repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
THUMNLab/AutoGL | [
"7b551961e90f5042d9b91d92c083f3f09dd9dbdd",
"7b551961e90f5042d9b91d92c083f3f09dd9dbdd"
] | [
"autogl/module/nas/estimator/one_shot.py",
"autogl/solver/classifier/graph_classifier.py"
] | [
"import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom . import register_nas_estimator\nfrom ..space import BaseSpace\nfrom .base import BaseEstimator\n\n\n@register_nas_estimator(\"oneshot\")\nclass OneShotEstimator(BaseEstimator):\n \"\"\"\n One shot estimator.\n\n Use model directly to get estimations.\n \"\"\"\n\n def infer(self, model: BaseSpace, dataset, mask=\"train\"):\n device = next(model.parameters()).device\n dset = dataset[0].to(device)\n pred = model(dset)[getattr(dset, f\"{mask}_mask\")]\n y = dset.y[getattr(dset, f\"{mask}_mask\")]\n loss = getattr(F, self.loss_f)(pred, y)\n # acc=sum(pred.max(1)[1]==y).item()/y.size(0)\n probs = F.softmax(pred, dim=1).detach().cpu().numpy()\n y = y.cpu()\n metrics = [eva.evaluate(probs, y) for eva in self.evaluation]\n return metrics, loss\n",
"\"\"\"\nAuto Classfier for Graph Node Classification\n\"\"\"\nimport time\nimport json\n\nfrom copy import deepcopy\n\nimport torch\nimport numpy as np\nimport yaml\n\nfrom .base import BaseClassifier\nfrom ...module.feature import FEATURE_DICT\nfrom ...module.model import BaseModel, MODEL_DICT\nfrom ...module.train import TRAINER_DICT, get_feval, BaseGraphClassificationTrainer\nfrom ..base import _initialize_single_model, _parse_hp_space\nfrom ..utils import LeaderBoard, set_seed\nfrom ...datasets import utils\nfrom ...utils import get_logger\n\nLOGGER = get_logger(\"GraphClassifier\")\n\n\nclass AutoGraphClassifier(BaseClassifier):\n \"\"\"\n Auto Multi-class Graph Classifier.\n\n Used to automatically solve the graph classification problems.\n\n Parameters\n ----------\n feature_module: autogl.module.feature.BaseFeatureEngineer or str or None\n The (name of) auto feature engineer used to process the given dataset.\n Disable feature engineer by setting it to ``None``. Default ``deepgl``.\n\n graph_models: list of autogl.module.model.BaseModel or list of str\n The (name of) models to be optimized as backbone. Default ``['gat', 'gcn']``.\n\n hpo_module: autogl.module.hpo.BaseHPOptimizer or str or None\n The (name of) hpo module used to search for best hyper parameters.\n Disable hpo by setting it to ``None``. Default ``anneal``.\n\n ensemble_module: autogl.module.ensemble.BaseEnsembler or str or None\n The (name of) ensemble module used to ensemble the multi-models found.\n Disable ensemble by setting it to ``None``. Default ``voting``.\n\n max_evals: int (Optional)\n If given, will set the number eval times the hpo module will use.\n Only be effective when hpo_module is ``str``. Default ``None``.\n\n trainer_hp_space: Iterable[dict] (Optional)\n trainer hp space or list of trainer hp spaces configuration.\n If a single trainer hp is given, will specify the hp space of trainer for\n every model. If a list of trainer hp is given, will specify every model\n with corrsponding trainer hp space. Default ``None``.\n\n model_hp_spaces: Iterable[Iterable[dict]] (Optional)\n model hp space configuration.\n If given, will specify every hp space of every passed model. Default ``None``.\n\n size: int (Optional)\n The max models ensemble module will use. Default ``None``.\n\n device: torch.device or str\n The device where model will be running on. If set to ``auto``, will use gpu\n when available. You can also specify the device by directly giving ``gpu`` or\n ``cuda:0``, etc. Default ``auto``.\n \"\"\"\n\n # pylint: disable=W0102\n\n def __init__(\n self,\n feature_module=None,\n graph_models=[\"gin\", \"topkpool\"],\n # nas_algorithms=None,\n # nas_spaces=None,\n # nas_estimators=None,\n hpo_module=\"anneal\",\n ensemble_module=\"voting\",\n max_evals=50,\n default_trainer=None,\n trainer_hp_space=None,\n model_hp_spaces=None,\n size=4,\n device=\"auto\",\n ):\n\n super().__init__(\n feature_module=feature_module,\n graph_models=graph_models,\n nas_algorithms=None, # nas_algorithms,\n nas_spaces=None, # nas_spaces,\n nas_estimators=None, # nas_estimators,\n hpo_module=hpo_module,\n ensemble_module=ensemble_module,\n max_evals=max_evals,\n default_trainer=default_trainer or \"GraphClassificationFull\",\n trainer_hp_space=trainer_hp_space,\n model_hp_spaces=model_hp_spaces,\n size=size,\n device=device,\n )\n\n self.dataset = None\n\n def _init_graph_module(\n self,\n graph_models,\n num_classes,\n num_features,\n feval,\n device,\n loss,\n num_graph_features,\n ) -> \"AutoGraphClassifier\":\n # load graph network module\n self.graph_model_list = []\n if isinstance(graph_models, (list, tuple)):\n for model in graph_models:\n if isinstance(model, str):\n if model in MODEL_DICT:\n self.graph_model_list.append(\n MODEL_DICT[model](\n num_classes=num_classes,\n num_features=num_features,\n num_graph_features=num_graph_features,\n device=device,\n init=False,\n )\n )\n else:\n raise KeyError(\"cannot find model %s\" % (model))\n elif isinstance(model, type) and issubclass(model, BaseModel):\n self.graph_model_list.append(\n model(\n num_classes=num_classes,\n num_features=num_features,\n num_graph_features=num_graph_features,\n device=device,\n init=False,\n )\n )\n elif isinstance(model, BaseModel):\n # setup the hp of num_classes and num_features\n model.set_num_classes(num_classes)\n model.set_num_features(num_features)\n model.set_num_graph_features(num_graph_features)\n self.graph_model_list.append(model.to(device))\n elif isinstance(model, BaseGraphClassificationTrainer):\n # receive a trainer list, put trainer to list\n assert (\n model.get_model() is not None\n ), \"Passed trainer should contain a model\"\n model.model.set_num_classes(num_classes)\n model.model.set_num_features(num_features)\n model.model.set_num_graph_features(num_graph_features)\n model.update_parameters(\n num_classes=num_classes,\n num_features=num_features,\n num_graph_features=num_graph_features,\n loss=loss,\n feval=feval,\n device=device,\n )\n self.graph_model_list.append(model)\n else:\n raise KeyError(\"cannot find graph network %s.\" % (model))\n else:\n raise ValueError(\n \"need graph network to be (list of) str or a BaseModel class/instance, get\",\n graph_models,\n \"instead.\",\n )\n\n # wrap all model_cls with specified trainer\n for i, model in enumerate(self.graph_model_list):\n # set model hp space\n if self._model_hp_spaces is not None:\n if self._model_hp_spaces[i] is not None:\n if isinstance(model, BaseGraphClassificationTrainer):\n model.model.hyper_parameter_space = self._model_hp_spaces[i]\n else:\n model.hyper_parameter_space = self._model_hp_spaces[i]\n # initialize trainer if needed\n if isinstance(model, BaseModel):\n name = (\n self._default_trainer\n if isinstance(self._default_trainer, str)\n else self._default_trainer[i]\n )\n model = TRAINER_DICT[name](\n model=model,\n num_features=num_features,\n num_classes=num_classes,\n loss=loss,\n feval=feval,\n device=device,\n num_graph_features=num_graph_features,\n init=False,\n )\n # set trainer hp space\n if self._trainer_hp_space is not None:\n if isinstance(self._trainer_hp_space[0], list):\n current_hp_for_trainer = self._trainer_hp_space[i]\n else:\n current_hp_for_trainer = self._trainer_hp_space\n model.hyper_parameter_space = current_hp_for_trainer\n self.graph_model_list[i] = model\n\n return self\n\n \"\"\"\n # currently disabled\n def _init_nas_module(\n self, num_features, num_classes, num_graph_features, feval, device, loss\n ):\n for algo, space, estimator in zip(\n self.nas_algorithms, self.nas_spaces, self.nas_estimators\n ):\n # TODO: initialize important parameters\n pass\n \"\"\"\n\n # pylint: disable=arguments-differ\n def fit(\n self,\n dataset,\n time_limit=-1,\n inplace=False,\n train_split=None,\n val_split=None,\n evaluation_method=\"infer\",\n seed=None,\n ) -> \"AutoGraphClassifier\":\n \"\"\"\n Fit current solver on given dataset.\n\n Parameters\n ----------\n dataset: torch_geometric.data.dataset.Dataset\n The multi-graph dataset needed to fit on.\n\n time_limit: int\n The time limit of the whole fit process (in seconds). If set below 0, will ignore\n time limit. Default ``-1``.\n\n inplace: bool\n Whether we process the given dataset in inplace manner. Default ``False``.\n Set it to True if you want to save memory by modifying the given dataset directly.\n\n train_split: float or int (Optional)\n The train ratio (in ``float``) or number (in ``int``) of dataset. If you want to use\n default train/val/test split in dataset, please set this to ``None``.\n Default ``None``.\n\n val_split: float or int (Optional)\n The validation ratio (in ``float``) or number (in ``int``) of dataset. If you want to\n use default train/val/test split in dataset, please set this to ``None``.\n Default ``None``.\n\n evaluation_method: (list of) str autogl.module.train.evaluation\n A (list of) evaluation method for current solver. If ``infer``, will automatically\n determine. Default ``infer``.\n\n seed: int (Optional)\n The random seed. If set to ``None``, will run everything at random.\n Default ``None``.\n\n Returns\n -------\n self: autogl.solver.AutoGraphClassifier\n A reference of current solver.\n \"\"\"\n\n set_seed(seed)\n\n if time_limit < 0:\n time_limit = 3600 * 24\n time_begin = time.time()\n\n # initialize leaderboard\n if evaluation_method == \"infer\":\n if hasattr(dataset, \"metric\"):\n evaluation_method = [dataset.metric]\n else:\n num_of_label = dataset.num_classes\n if num_of_label == 2:\n evaluation_method = [\"auc\"]\n else:\n evaluation_method = [\"acc\"]\n assert isinstance(evaluation_method, list)\n evaluator_list = get_feval(evaluation_method)\n\n self.leaderboard = LeaderBoard(\n [e.get_eval_name() for e in evaluator_list],\n {e.get_eval_name(): e.is_higher_better() for e in evaluator_list},\n )\n\n # set up the dataset\n if train_split is None and val_split is None:\n assert hasattr(dataset, \"train_split\") and hasattr(dataset, \"val_split\"), (\n \"The dataset has no default train/val split! \"\n \"Please manually pass train and val ratio.\"\n )\n LOGGER.info(\"Use the default train/val/test ratio in given dataset\")\n # if hasattr(dataset.train_split, \"n_splits\"):\n # cross_validation = True\n\n elif train_split is not None and val_split is not None:\n utils.graph_random_splits(dataset, train_split, val_split, seed=seed)\n else:\n LOGGER.error(\n \"Please set both train_split and val_split explicitly. Detect %s is None.\",\n \"train_split\" if train_split is None else \"val_split\",\n )\n raise ValueError(\n \"In consistent setting of train/val split. Detect {} is None.\".format(\n \"train_split\" if train_split is None else \"val_split\"\n )\n )\n\n # feature engineering\n if self.feature_module is not None:\n self.feature_module.fit(dataset.train_split)\n dataset = self.feature_module.transform(dataset, inplace=inplace)\n\n self.dataset = dataset\n assert dataset[0].x is not None, (\n \"Does not support fit on non node-feature dataset!\"\n \" Please add node features to dataset or specify feature engineers that generate\"\n \" node features.\"\n )\n\n # initialize graph networks\n self._init_graph_module(\n self.gml,\n num_features=dataset.num_node_features,\n num_classes=dataset.num_classes,\n feval=evaluator_list,\n device=self.runtime_device,\n loss=\"cross_entropy\" if not hasattr(dataset, \"loss\") else dataset.loss,\n num_graph_features=0\n if not hasattr(dataset.data, \"gf\")\n else dataset.data.gf.size(1),\n )\n\n # currently disabled\n \"\"\"\n self._init_nas_module(\n num_features=dataset.num_node_features,\n num_classes=dataset.num_classes,\n feval=evaluator_list,\n device=self.runtime_device,\n loss=\"cross_entropy\" if not hasattr(dataset, \"loss\") else dataset.loss,\n num_graph_features=0\n if not hasattr(dataset.data, \"gf\")\n else dataset.data.gf.size(1),\n )\n\n # neural architecture search\n if self.nas_algorithms is not None:\n # perform nas and add them to trainer list\n for algo, space, estimator in zip(\n self.nas_algorithms, self.nas_spaces, self.nas_estimators\n ):\n trainer = algo.search(space, self.dataset, estimator)\n self.graph_model_list.append(trainer)\n \"\"\"\n\n # train the models and tune hpo\n result_valid = []\n names = []\n for idx, model in enumerate(self.graph_model_list):\n if time_limit < 0:\n time_for_each_model = None\n else:\n time_for_each_model = (time_limit - time.time() + time_begin) / (\n len(self.graph_model_list) - idx\n )\n if self.hpo_module is None:\n model.initialize()\n model.train(dataset, True)\n optimized = model\n else:\n optimized, _ = self.hpo_module.optimize(\n trainer=model, dataset=dataset, time_limit=time_for_each_model\n )\n # to save memory, all the trainer derived will be mapped to cpu\n optimized.to(torch.device(\"cpu\"))\n name = str(optimized)\n names.append(name)\n performance_on_valid, _ = optimized.get_valid_score(return_major=False)\n result_valid.append(\n optimized.get_valid_predict_proba().detach().cpu().numpy()\n )\n self.leaderboard.insert_model_performance(\n name,\n dict(\n zip(\n [e.get_eval_name() for e in evaluator_list],\n performance_on_valid,\n )\n ),\n )\n self.trained_models[name] = optimized\n\n # fit the ensemble model\n if self.ensemble_module is not None:\n performance = self.ensemble_module.fit(\n result_valid,\n dataset.data.y[dataset.val_index].cpu().detach().numpy(),\n names,\n evaluator_list,\n n_classes=dataset.num_classes,\n )\n self.leaderboard.insert_model_performance(\n \"ensemble\",\n dict(zip([e.get_eval_name() for e in evaluator_list], performance)),\n )\n\n return self\n\n def fit_predict(\n self,\n dataset,\n time_limit=-1,\n inplace=False,\n train_split=None,\n val_split=None,\n evaluation_method=\"infer\",\n seed=None,\n use_ensemble=True,\n use_best=True,\n name=None,\n ) -> np.ndarray:\n \"\"\"\n Fit current solver on given dataset and return the predicted value.\n\n Parameters\n ----------\n dataset: torch_geometric.data.dataset.Dataset\n The dataset needed to fit on. This dataset must have only one graph.\n\n time_limit: int\n The time limit of the whole fit process (in seconds). If set below 0, will\n ignore time limit. Default ``-1``.\n\n inplace: bool\n Whether we process the given dataset in inplace manner. Default ``False``.\n Set it to True if you want to save memory by modifying the given dataset directly.\n\n train_split: float or int (Optional)\n The train ratio (in ``float``) or number (in ``int``) of dataset. If you want to\n use default train/val/test split in dataset, please set this to ``None``.\n Default ``None``.\n\n val_split: float or int (Optional)\n The validation ratio (in ``float``) or number (in ``int``) of dataset. If you want\n to use default train/val/test split in dataset, please set this to ``None``.\n Default ``None``.\n\n evaluation_method: (list of) str or autogl.module.train.evaluation\n A (list of) evaluation method for current solver. If ``infer``, will automatically\n determine. Default ``infer``.\n\n seed: int (Optional)\n The random seed. If set to ``None``, will run everything at random.\n Default ``None``.\n\n use_ensemble: bool\n Whether to use ensemble to do the predict. Default ``True``.\n\n use_best: bool\n Whether to use the best single model to do the predict. Will only be effective when\n ``use_ensemble`` is ``False``. Default ``True``.\n\n name: str or None\n The name of model used to predict. Will only be effective when ``use_ensemble`` and\n ``use_best`` both are ``False``. Default ``None``.\n\n Returns\n -------\n result: np.ndarray\n An array of shape ``(N,)``, where ``N`` is the number of test nodes. The prediction\n on given dataset.\n \"\"\"\n self.fit(\n dataset=dataset,\n time_limit=time_limit,\n inplace=inplace,\n train_split=train_split,\n val_split=val_split,\n evaluation_method=evaluation_method,\n seed=seed,\n )\n return self.predict(\n dataset=dataset,\n inplaced=inplace,\n inplace=inplace,\n use_ensemble=use_ensemble,\n use_best=use_best,\n name=name,\n )\n\n def predict_proba(\n self,\n dataset=None,\n inplaced=False,\n inplace=False,\n use_ensemble=True,\n use_best=True,\n name=None,\n mask=\"test\",\n ) -> np.ndarray:\n \"\"\"\n Predict the node probability.\n\n Parameters\n ----------\n dataset: torch_geometric.data.dataset.Dataset or None\n The dataset needed to predict. If ``None``, will use the processed dataset\n passed to ``fit()`` instead. Default ``None``.\n\n inplaced: bool\n Whether the given dataset is processed. Only be effective when ``dataset``\n is not ``None``. If you pass the dataset to ``fit()`` with ``inplace=True``,\n and you pass the dataset again to this method, you should set this argument\n to ``True``. Otherwise ``False``. Default ``False``.\n\n inplace: bool\n Whether we process the given dataset in inplace manner. Default ``False``.\n Set it to True if you want to save memory by modifying the given dataset directly.\n\n use_ensemble: bool\n Whether to use ensemble to do the predict. Default ``True``.\n\n use_best: bool\n Whether to use the best single model to do the predict. Will only be effective when\n ``use_ensemble`` is ``False``. Default ``True``.\n\n name: str or None\n The name of model used to predict. Will only be effective when ``use_ensemble`` and\n ``use_best`` both are ``False``. Default ``None``.\n\n mask: str\n The data split to give prediction on. Default ``test``.\n\n Returns\n -------\n result: np.ndarray\n An array of shape ``(N,C,)``, where ``N`` is the number of test nodes and ``C`` is\n the number of classes. The prediction on given dataset.\n \"\"\"\n if dataset is None:\n dataset = self.dataset\n elif not inplaced:\n if self.feature_module is not None:\n dataset = self.feature_module.transform(dataset, inplace=inplace)\n\n if use_ensemble:\n LOGGER.info(\"Ensemble argument on, will try using ensemble model.\")\n\n if not use_ensemble and use_best:\n LOGGER.info(\n \"Ensemble argument off and best argument on, will try using best model.\"\n )\n\n if (use_ensemble and self.ensemble_module is not None) or (\n not use_best and name == \"ensemble\"\n ):\n # we need to get all the prediction of every model trained\n predict_result = []\n names = []\n for model_name in self.trained_models:\n predict_result.append(\n self._predict_proba_by_name(dataset, model_name, mask)\n )\n names.append(model_name)\n return self.ensemble_module.ensemble(predict_result, names)\n\n if use_ensemble and self.ensemble_module is None:\n LOGGER.warning(\n \"Cannot use ensemble because no ensebmle module is given. \"\n \"Will use best model instead.\"\n )\n\n if use_best or (use_ensemble and self.ensemble_module is None):\n # just return the best model we have found\n best_model_name = self.leaderboard.get_best_model()\n return self._predict_proba_by_name(dataset, best_model_name, mask)\n\n if name is not None:\n # return model performance by name\n return self._predict_proba_by_name(dataset, name, mask)\n\n LOGGER.error(\n \"No model name is given while ensemble and best arguments are off.\"\n )\n raise ValueError(\n \"You need to specify a model name if you do not want use ensemble and best model.\"\n )\n\n def _predict_proba_by_name(self, dataset, name, mask):\n self.trained_models[name].to(self.runtime_device)\n predicted = (\n self.trained_models[name]\n .predict_proba(dataset, mask=mask)\n .detach()\n .cpu()\n .numpy()\n )\n self.trained_models[name].to(torch.device(\"cpu\"))\n return predicted\n\n def predict(\n self,\n dataset=None,\n inplaced=False,\n inplace=False,\n use_ensemble=True,\n use_best=True,\n name=None,\n mask=\"test\",\n ) -> np.ndarray:\n \"\"\"\n Predict the node class number.\n\n Parameters\n ----------\n dataset: torch_geometric.data.dataset.Dataset or None\n The dataset needed to predict. If ``None``, will use the processed dataset passed\n to ``fit()`` instead. Default ``None``.\n\n inplaced: bool\n Whether the given dataset is processed. Only be effective when ``dataset``\n is not ``None``. If you pass the dataset to ``fit()`` with ``inplace=True``, and\n you pass the dataset again to this method, you should set this argument to ``True``.\n Otherwise ``False``. Default ``False``.\n\n inplace: bool\n Whether we process the given dataset in inplace manner. Default ``False``.\n Set it to True if you want to save memory by modifying the given dataset directly.\n\n use_ensemble: bool\n Whether to use ensemble to do the predict. Default ``True``.\n\n use_best: bool\n Whether to use the best single model to do the predict. Will only be effective\n when ``use_ensemble`` is ``False``. Default ``True``.\n\n name: str or None\n The name of model used to predict. Will only be effective when ``use_ensemble``\n and ``use_best`` both are ``False``. Default ``None``.\n\n Returns\n -------\n result: np.ndarray\n An array of shape ``(N,)``, where ``N`` is the number of test nodes.\n The prediction on given dataset.\n \"\"\"\n proba = self.predict_proba(\n dataset, inplaced, inplace, use_ensemble, use_best, name, mask\n )\n return np.argmax(proba, axis=1)\n\n @classmethod\n def from_config(cls, path_or_dict, filetype=\"auto\") -> \"AutoGraphClassifier\":\n \"\"\"\n Load solver from config file.\n\n You can use this function to directly load a solver from predefined config dict\n or config file path. Currently, only support file type of ``json`` or ``yaml``,\n if you pass a path.\n\n Parameters\n ----------\n path_or_dict: str or dict\n The path to the config file or the config dictionary object\n\n filetype: str\n The filetype the given file if the path is specified. Currently only support\n ``json`` or ``yaml``. You can set to ``auto`` to automatically detect the file\n type (from file name). Default ``auto``.\n\n Returns\n -------\n solver: autogl.solver.AutoGraphClassifier\n The solver that is created from given file or dictionary.\n \"\"\"\n assert filetype in [\"auto\", \"yaml\", \"json\"], (\n \"currently only support yaml file or json file type, but get type \"\n + filetype\n )\n if isinstance(path_or_dict, str):\n if filetype == \"auto\":\n if path_or_dict.endswith(\".yaml\") or path_or_dict.endswith(\".yml\"):\n filetype = \"yaml\"\n elif path_or_dict.endswith(\".json\"):\n filetype = \"json\"\n else:\n LOGGER.error(\n \"cannot parse the type of the given file name, \"\n \"please manually set the file type\"\n )\n raise ValueError(\n \"cannot parse the type of the given file name, \"\n \"please manually set the file type\"\n )\n if filetype == \"yaml\":\n path_or_dict = yaml.load(\n open(path_or_dict, \"r\").read(), Loader=yaml.FullLoader\n )\n else:\n path_or_dict = json.load(open(path_or_dict, \"r\"))\n\n # load the dictionary\n path_or_dict = deepcopy(path_or_dict)\n solver = cls(None, [], None, None)\n fe_list = path_or_dict.pop(\"feature\", None)\n if fe_list is not None:\n fe_list_ele = []\n for feature_engineer in fe_list:\n name = feature_engineer.pop(\"name\")\n if name is not None:\n fe_list_ele.append(FEATURE_DICT[name](**feature_engineer))\n if fe_list_ele != []:\n solver.set_feature_module(fe_list_ele)\n\n models = path_or_dict.pop(\"models\", [{\"name\": \"gin\"}, {\"name\": \"topkpool\"}])\n model_hp_space = [\n _parse_hp_space(model.pop(\"hp_space\", None)) for model in models\n ]\n model_list = [\n _initialize_single_model(model.pop(\"name\"), model) for model in models\n ]\n\n trainer = path_or_dict.pop(\"trainer\", None)\n default_trainer = \"GraphClassificationFull\"\n trainer_space = None\n if isinstance(trainer, dict):\n # global default\n default_trainer = trainer.pop(\"name\", \"GraphClassificationFull\")\n trainer_space = _parse_hp_space(trainer.pop(\"hp_space\", None))\n default_kwargs = {\"num_features\": None, \"num_classes\": None}\n default_kwargs.update(trainer)\n default_kwargs[\"init\"] = False\n for i in range(len(model_list)):\n model = model_list[i]\n trainer_wrapper = TRAINER_DICT[default_trainer](\n model=model, **default_kwargs\n )\n model_list[i] = trainer_wrapper\n elif isinstance(trainer, list):\n # sequential trainer definition\n assert len(trainer) == len(\n model_list\n ), \"The number of trainer and model does not match\"\n trainer_space = []\n for i in range(len(model_list)):\n train, model = trainer[i], model_list[i]\n default_trainer = train.pop(\"name\", \"GraphClassificationFull\")\n trainer_space.append(_parse_hp_space(train.pop(\"hp_space\", None)))\n default_kwargs = {\"num_features\": None, \"num_classes\": None}\n default_kwargs.update(train)\n default_kwargs[\"init\"] = False\n trainer_wrap = TRAINER_DICT[default_trainer](\n model=model, **default_kwargs\n )\n model_list[i] = trainer_wrap\n\n solver.set_graph_models(\n model_list, default_trainer, trainer_space, model_hp_space\n )\n\n hpo_dict = path_or_dict.pop(\"hpo\", {\"name\": \"anneal\"})\n if hpo_dict is not None:\n name = hpo_dict.pop(\"name\")\n solver.set_hpo_module(name, **hpo_dict)\n\n ensemble_dict = path_or_dict.pop(\"ensemble\", {\"name\": \"voting\"})\n if ensemble_dict is not None:\n name = ensemble_dict.pop(\"name\")\n solver.set_ensemble_module(name, **ensemble_dict)\n\n return solver\n"
] | [
[
"torch.nn.functional.softmax"
],
[
"torch.device",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kumagai-group/vise | [
"8adfe61ad8f31767ec562f02f271e2495f357cd4",
"8adfe61ad8f31767ec562f02f271e2495f357cd4",
"8adfe61ad8f31767ec562f02f271e2495f357cd4"
] | [
"vise/analyzer/dielectric_function.py",
"vise/util/plotly_util.py",
"vise/analyzer/vasp/make_diele_func.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright (c) 2020. Distributed under the terms of the MIT License.\nfrom dataclasses import dataclass\nfrom math import sqrt, pi\nfrom typing import List\n\nimport numpy as np\nfrom monty.json import MSONable\nfrom tqdm import tqdm\nfrom vise.util.mix_in import ToJsonFileMixIn\nfrom scipy.constants import physical_constants as pc\n\neV_to_inv_cm = pc[\"electron volt-inverse meter relationship\"][0] / 100\n\n\ndef diele_func_to_coeff(freq, real, imag):\n return (2 * sqrt(2) * pi * sqrt(sqrt(real ** 2 + imag ** 2) - real)\n * freq * eV_to_inv_cm)\n\n\n@dataclass\nclass DieleFuncData(MSONable, ToJsonFileMixIn):\n energies: List[float] # in eV\n diele_func_real: List[List[float]] # [xx, yy, zz, xy, yz, xz]\n diele_func_imag: List[List[float]] # [xx, yy, zz, xy, yz, xz]\n band_gap: float # in eV\n\n @property\n def ave_absorption_coeff(self):\n reals = [sum(self.diele_func_real[i][:3]) / 3\n for i in range(len(self.energies))]\n imags = [sum(self.diele_func_imag[i][:3]) / 3\n for i in range(len(self.energies))]\n return [diele_func_to_coeff(freq, real, imag)\n for freq, real, imag in zip(self.energies, reals, imags)]\n\n def target_coeff_min_e(self, target_coeff: float = 10**4):\n for e, coeff in zip(self.energies, self.ave_absorption_coeff):\n if coeff > target_coeff:\n return e\n return None\n\n\ndef make_shifted_diele_func(diele_func_data: DieleFuncData,\n original_band_gap: float,\n shift: float) -> DieleFuncData:\n imag = imag_shift(diele_func_data.diele_func_imag,\n diele_func_data.energies,\n original_band_gap + shift, shift)\n real = kramers_kronig_trans(imag, diele_func_data.energies)\n return DieleFuncData(diele_func_data.energies,\n real.tolist(),\n imag.tolist(),\n original_band_gap + shift)\n\n\ndef imag_shift(diele_func_imag: List[List[float]],\n energies: List[float],\n band_gap: float,\n shift: float) -> np.ndarray:\n energies = np.array(energies)\n assert shift > 0\n result = []\n for energy_grid in energies:\n old_e = energy_grid - shift\n right_idx = np.argwhere(energies > old_e)[0][0]\n left_e, right_e = energies[right_idx - 1], energies[right_idx]\n # linear interpolation\n left_ratio = (right_e - old_e) / (right_e - left_e)\n\n inner_result = []\n for imag_idx in range(6):\n if energy_grid < band_gap:\n inner_result.append(0.0)\n else:\n old_diele = \\\n diele_func_imag[right_idx - 1][imag_idx] * left_ratio + \\\n diele_func_imag[right_idx][imag_idx] * (1 - left_ratio)\n inner_result.append(\n old_diele * (energy_grid - shift) / energy_grid)\n\n result.append(inner_result)\n\n return np.array(result)\n\n\ndef kramers_kronig_trans(diele_func_imag: np.array,\n energies: List[float],\n ita: float = 0.01) -> np.ndarray:\n mesh = energies[1] - energies[0]\n result = []\n ee2ss = [[e ** 2 - energy_grid ** 2 for e in energies]\n for energy_grid in energies]\n for imag_idx in tqdm(range(6)):\n imags = diele_func_imag[:, imag_idx]\n if imag_idx == 0 or \\\n (imag_idx > 0\n and np.allclose(\n imags, diele_func_imag[:, imag_idx - 1]) is False):\n if np.count_nonzero(imags) == 0:\n inner_result = [0.0] * len(energies)\n else:\n inner_result = []\n for ee2s in ee2ss:\n integrals = [e * imag * ee2 / (ee2 ** 2 + ita ** 2)\n for e, ee2, imag in zip(energies, ee2s, imags)]\n integral = sum(integrals) * mesh * 2 / pi\n if imag_idx < 3:\n integral += 1\n inner_result.append(integral)\n\n result.append(inner_result)\n\n return np.array(result).T",
"# Copyright (c) 2020. Distributed under the terms of the MIT License.\n\nimport numpy as np\nfrom numpy import concatenate, clip, dot, arctan2\nfrom numpy.linalg import det\n\n\ndef sort_coords(coords: np.ndarray) -> np.ndarray:\n \"\"\"Sort coordinates based on the angle with first coord from the center.\n\n Args:\n coords (np.ndarray):\n Coordinates to be sorted. The format of coords is as follows.\n np.array([[x1, y1, z1], [x2, y2, z2], [x3, y3, z3]]\n\n Returns:\n np.ndarray for sorted coordinates.\n \"\"\"\n if len(coords[0]) != 3:\n raise ValueError(\"Only valid for 3D vector\")\n\n center = np.average(coords, axis=0)\n relative_coords = coords - center\n external_prod = np.cross(relative_coords[0], relative_coords[1])\n\n # Skip parallel vectors.\n if abs(np.linalg.norm(external_prod)) < 1e-8 and len(relative_coords) > 2:\n external_prod = np.cross(relative_coords[0], relative_coords[2])\n normal_to_12_plane = external_prod / np.linalg.norm(external_prod)\n\n v0 = relative_coords[0] / np.linalg.norm(relative_coords[0])\n\n def angle_between_v0(index: int) -> float:\n \"\"\"\n Args:\n index (int): index of coords.\n\n Returns (float):\n Angle between rays from the center to rel_coords[0] and\n rel_coords[int].\n \"\"\"\n v = relative_coords[index] / np.linalg.norm(relative_coords[index])\n matrix = concatenate(([v0], [v], [normal_to_12_plane]), axis=0)\n determinant = det(matrix)\n angle = arctan2(clip(dot(v0, v), -1.0, 1.0), determinant)\n return angle\n\n indices = [i for i in range(len(coords))]\n indices.sort(key=angle_between_v0)\n return coords[indices]\n\n\ndef make_triangles(vertices):\n x = [v[0] for v in vertices]\n y = [v[1] for v in vertices]\n z = [v[2] for v in vertices]\n n_vertices = len(x)\n i = [0] * (n_vertices - 2)\n j = [x for x in range(1, n_vertices - 1)]\n k = [x for x in range(2, n_vertices)]\n return dict(x=x, y=y, z=z, i=i, j=j, k=k)",
"# -*- coding: utf-8 -*-\n# Copyright (c) 2020. Distributed under the terms of the MIT License.\n\nfrom pymatgen.io.vasp import Vasprun, Outcar\nfrom vise.analyzer.dielectric_function import DieleFuncData, \\\n kramers_kronig_trans\nfrom vise.analyzer.vasp.band_edge_properties import VaspBandEdgeProperties\n\nimport numpy as np\n\n\ndef make_diele_func(vasprun: Vasprun,\n outcar: Outcar,\n use_vasp_real: bool = True,\n ita: float = 0.01) -> DieleFuncData:\n\n energies, real, imag = vasprun.dielectric_data[\"density\"]\n imag = np.array(imag)\n if use_vasp_real:\n real = np.array(real)\n else:\n # When CSHIFT = 0.0, the first component becomes 99999.0\n # the following lines are copied from the vasprun.xml\n # <dielectricfunction>\n # <imag>\n # <array>\n # <dimension dim=“1”>gridpoints</dimension>\n # <field>energy</field>\n # <field>xx</field>\n # <field>yy</field>\n # <field>zz</field>\n # <field>xy</field>\n # <field>yz</field>\n # <field>zx</field>\n # <set>\n # <r> 0.0000 0.0000 0.0000 -0.0000 99999.0000 99999.0000 99999.0000 </r>\n imag[0] = 0.0\n real = kramers_kronig_trans(imag, energies, ita)\n band_gap = VaspBandEdgeProperties(vasprun, outcar).band_gap\n return DieleFuncData(energies, real.tolist(), imag.tolist(), band_gap)\n\n\n"
] | [
[
"numpy.array",
"numpy.allclose",
"numpy.count_nonzero",
"numpy.argwhere"
],
[
"numpy.dot",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.linalg.det",
"numpy.cross",
"numpy.average"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RobinCondat/pytorch-retinanet | [
"14a2085cd3785a667454898dc65f5324b1b9c6b8"
] | [
"retinanet/losses_vehicle.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom retinanet.config_experiment_2 import INDEXES_MIX, VEHICLE_INDEXES\n\ndef calc_iou(a, b):\n area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])\n\n iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])\n ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])\n\n iw = torch.clamp(iw, min=0)\n ih = torch.clamp(ih, min=0)\n\n ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih\n\n ua = torch.clamp(ua, min=1e-8)\n\n intersection = iw * ih\n\n IoU = intersection / ua\n\n return IoU\n\ndef cal_ioa(a, b):\n # Intersection over Area (for ignore regions)\n area = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]),dim=1)\n area = torch.clamp(area, min=1e-8)\n\n iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])\n ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])\n\n iw = torch.clamp(iw, min=0)\n ih = torch.clamp(ih, min=0)\n\n intersection = iw * ih\n\n IoA = intersection / area\n\n return IoA\n\n\nclass FocalLoss(nn.Module):\n #def __init__(self):\n\n def forward(self, classifications, regressions, anchors, annotations, dataset, ignore_index=None, merge_index=None):\n\n classes_from_other_datasets = [i for i in range(classifications.shape[-1]+1) if i not in INDEXES_MIX[dataset]]\n alpha = 0.25\n gamma = 2.0\n batch_size = classifications.shape[0]\n classification_losses = []\n regression_losses = []\n\n anchor = anchors[0, :, :]\n num_anchors = anchor.shape[0]\n\n anchor_widths = anchor[:, 2] - anchor[:, 0]\n anchor_heights = anchor[:, 3] - anchor[:, 1]\n anchor_ctr_x = anchor[:, 0] + 0.5 * anchor_widths\n anchor_ctr_y = anchor[:, 1] + 0.5 * anchor_heights\n if merge_index is not None:\n classifications = torch.cat((classifications,torch.zeros((classifications.shape[0],classifications.shape[1],1)).cuda()),2)\n print(classifications.shape)\n for j in range(batch_size):\n classification = classifications[j, :, :]\n regression = regressions[j, :, :]\n\n bbox_annotation = annotations[j, :, :]\n bbox_annotation = bbox_annotation[bbox_annotation[:, 4] != -1]\n \n # Merge vehicle detections in vehicle class\n if merge_index is not None:\n if merge_index not in classes_from_other_datasets:\n #print(torch.max(classification[:,VEHICLE_INDEXES], dim=1)[0].shape)\n classification[:,merge_index] = torch.max(classification[:,VEHICLE_INDEXES], dim=1)[0]\n\n # Ignore class from other datasets\n classification[:,classes_from_other_datasets]=0\n\n classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)\n\n if bbox_annotation.shape[0] == 0:\n if torch.cuda.is_available():\n alpha_factor = torch.ones(classification.shape).cuda() * alpha\n\n alpha_factor = 1. - alpha_factor\n focal_weight = classification\n\n focal_weight = alpha_factor * torch.pow(focal_weight, gamma)\n\n bce = -(torch.log(1.0 - classification))\n\n cls_loss = focal_weight * bce\n classification_losses.append(cls_loss.sum())\n regression_losses.append(torch.tensor(0).float().cuda())\n \n else:\n alpha_factor = torch.ones(classification.shape) * alpha\n\n alpha_factor = 1. - alpha_factor\n focal_weight = classification\n\n focal_weight = alpha_factor * torch.pow(focal_weight, gamma)\n\n bce = -(torch.log(1.0 - classification))\n\n cls_loss = focal_weight * bce\n classification_losses.append(cls_loss.sum())\n regression_losses.append(torch.tensor(0).float())\n \n continue\n\n # Filter ignore class (via ignore_index)\n if ignore_index is not None:\n # On sépare ici les annotations en 2 objets : \n # - bbox_annotation (pour tous les objets à détecter) \n # - ignore_annotation (pour toutes les régions à ignorer)\n ignore_annotation = bbox_annotation[bbox_annotation[:,4] == ignore_index]\n bbox_annotation = bbox_annotation[bbox_annotation[:,4] != ignore_index]\n\n if bbox_annotation.shape[0] != 0:\n IoU = calc_iou(anchors[0, :, :], bbox_annotation[:, :4]) # num_anchors x num_annotations_to_detect\n IoU_max, IoU_argmax = torch.max(IoU, dim=1) # num_anchors x 1\n else:\n IoU_max = None\n IoU_argmax = None\n \n if ignore_index is not None:\n # On calcule ici l'intersection over area : \n # tous les anchors ayant une IoA avec une région à ignorer supérieure à 0.5 seront ignorées pour la suite\n if ignore_annotation.shape[0] !=0:\n IoA = cal_ioa(anchors[0, :, :], ignore_annotation[:, :4]) # num_anchors x num_annotations_to_ignore \n IoA_max, IoA_argmax = torch.max(IoA, dim=1) # num_anchors x 1\n else:\n IoA_max = None\n IoA_argmax = None\n \n # compute the loss for classification\n targets = torch.ones(classification.shape) * -1\n\n if torch.cuda.is_available():\n targets = targets.cuda()\n\n if IoU_max is not None:\n targets[torch.lt(IoU_max, 0.4), :] = 0\n else:\n targets = targets*0\n \n if ignore_index is not None:\n if IoA_max is not None:\n ignore_indices = torch.ge(IoA_max, 0.5)\n else:\n ignore_indices = (torch.ones((num_anchors)) * 0).type(torch.ByteTensor)\n if IoU_max is not None:\n positive_indices = torch.ge(IoU_max, 0.5)\n num_positive_anchors = positive_indices.sum()\n \n else:\n positive_indices = (torch.ones((num_anchors)) * 0).type(torch.ByteTensor)\n num_positive_anchors = torch.tensor(0)\n \n if ignore_index is not None:\n if ignore_indices is not None:\n targets[ignore_indices, :] = -1\n \n if IoU_argmax is not None:\n assigned_annotations = bbox_annotation[IoU_argmax, :]\n targets[positive_indices, :] = 0\n targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1\n \n if torch.cuda.is_available():\n alpha_factor = torch.ones(targets.shape).cuda() * alpha\n else:\n alpha_factor = torch.ones(targets.shape) * alpha\n\n alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)\n focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)\n \n focal_weight = alpha_factor * torch.pow(focal_weight, gamma)\n\n bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))\n\n cls_loss = focal_weight * bce\n\n if torch.cuda.is_available():\n cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape).cuda())\n else:\n cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape))\n classification_losses.append(cls_loss.sum()/torch.clamp(num_positive_anchors.float(), min=1.0))\n \n # compute the loss for regression\n\n if num_positive_anchors > 0:\n assigned_annotations = assigned_annotations[positive_indices, :]\n\n anchor_widths_pi = anchor_widths[positive_indices]\n anchor_heights_pi = anchor_heights[positive_indices]\n anchor_ctr_x_pi = anchor_ctr_x[positive_indices]\n anchor_ctr_y_pi = anchor_ctr_y[positive_indices]\n\n gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]\n gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]\n gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths\n gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights\n\n # clip widths to 1\n gt_widths = torch.clamp(gt_widths, min=1)\n gt_heights = torch.clamp(gt_heights, min=1)\n\n targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi\n targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi\n targets_dw = torch.log(gt_widths / anchor_widths_pi)\n targets_dh = torch.log(gt_heights / anchor_heights_pi)\n\n targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh))\n targets = targets.t()\n\n if torch.cuda.is_available():\n targets = targets/torch.Tensor([[0.1, 0.1, 0.2, 0.2]]).cuda()\n else:\n targets = targets/torch.Tensor([[0.1, 0.1, 0.2, 0.2]])\n\n negative_indices = 1 + (~positive_indices)\n\n regression_diff = torch.abs(targets - regression[positive_indices, :])\n\n regression_loss = torch.where(\n torch.le(regression_diff, 1.0 / 9.0),\n 0.5 * 9.0 * torch.pow(regression_diff, 2),\n regression_diff - 0.5 / 9.0\n )\n regression_losses.append(regression_loss.mean())\n else:\n if torch.cuda.is_available():\n regression_losses.append(torch.tensor(0).float().cuda())\n else:\n regression_losses.append(torch.tensor(0).float())\n\n return torch.stack(classification_losses).mean(dim=0, keepdim=True), torch.stack(regression_losses).mean(dim=0, keepdim=True)\n\n \n"
] | [
[
"torch.abs",
"torch.ge",
"torch.ones",
"torch.max",
"torch.Tensor",
"torch.zeros",
"torch.eq",
"torch.lt",
"torch.unsqueeze",
"torch.tensor",
"torch.le",
"torch.log",
"torch.cuda.is_available",
"torch.stack",
"torch.clamp",
"torch.pow",
"torch.ne"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PeterouZh/PyTorch-StudioGAN | [
"faef6048d25dadee4fa31b2955f16f7d1ca8e1e2"
] | [
"src/main.py"
] | [
"# PyTorch StudioGAN: https://github.com/POSTECH-CVLab/PyTorch-StudioGAN\n# The MIT License (MIT)\n# See license file or visit https://github.com/POSTECH-CVLab/PyTorch-StudioGAN for details\n\n# src/main.py\n\n\nimport json\nimport os\nimport sys\nimport random\nimport warnings\nfrom argparse import ArgumentParser\n\nfrom utils.misc import *\nfrom utils.make_hdf5 import make_hdf5\nfrom utils.log import make_run_name\nfrom loader import prepare_train_eval\n\nimport torch\nfrom torch.backends import cudnn\nimport torch.multiprocessing as mp\n\n\n\nRUN_NAME_FORMAT = (\n \"{framework}-\"\n \"{phase}-\"\n \"{timestamp}\"\n)\n\n\ndef main():\n parser = ArgumentParser(add_help=False)\n parser.add_argument('-c', '--config_path', type=str, default='./src/configs/CIFAR10/ContraGAN.json')\n parser.add_argument('--checkpoint_folder', type=str, default=None)\n parser.add_argument('-current', '--load_current', action='store_true', help='whether you load the current or best checkpoint')\n parser.add_argument('--log_output_path', type=str, default=None)\n\n parser.add_argument('-DDP', '--distributed_data_parallel', action='store_true')\n parser.add_argument('-n', '--nodes', default=1, type=int, metavar='N')\n parser.add_argument('-nr', '--nr', default=0, type=int, help='ranking within the nodes')\n\n parser.add_argument('--seed', type=int, default=-1, help='seed for generating random numbers')\n parser.add_argument('--num_workers', type=int, default=8, help='')\n parser.add_argument('-sync_bn', '--synchronized_bn', action='store_true', help='whether turn on synchronized batchnorm')\n parser.add_argument('-mpc', '--mixed_precision', action='store_true', help='whether turn on mixed precision training')\n parser.add_argument('-LARS', '--LARS_optimizer', action='store_true', help='whether turn on LARS optimizer')\n parser.add_argument('-rm_API', '--disable_debugging_API', action='store_true', help='whether disable pytorch autograd debugging mode')\n\n parser.add_argument('--reduce_train_dataset', type=float, default=1.0, help='control the number of train dataset')\n parser.add_argument('--truncated_factor', type=float, default=-1.0, help='factor for truncation trick')\n parser.add_argument('-stat_otf', '--bn_stat_OnTheFly', action='store_true', help='when evaluating, use the statistics of a batch')\n parser.add_argument('-std_stat', '--standing_statistics', action='store_true')\n parser.add_argument('--standing_step', type=int, default=-1, help='# of steps for accumulation batchnorm')\n parser.add_argument('--freeze_layers', type=int, default=-1, help='# of layers for freezing discriminator')\n\n parser.add_argument('-l', '--load_all_data_in_memory', action='store_true')\n parser.add_argument('-t', '--train', action='store_true')\n parser.add_argument('-e', '--eval', action='store_true')\n parser.add_argument('-s', '--save_images', action='store_true')\n parser.add_argument('-iv', '--image_visualization', action='store_true', help='select whether conduct image visualization')\n parser.add_argument('-knn', '--k_nearest_neighbor', action='store_true', help='select whether conduct k-nearest neighbor analysis')\n parser.add_argument('-itp', '--interpolation', action='store_true', help='whether conduct interpolation analysis')\n parser.add_argument('-fa', '--frequency_analysis', action='store_true', help='whether conduct frequency analysis')\n parser.add_argument('-tsne', '--tsne_analysis', action='store_true', help='whether conduct tsne analysis')\n parser.add_argument('--nrow', type=int, default=10, help='number of rows to plot image canvas')\n parser.add_argument('--ncol', type=int, default=8, help='number of cols to plot image canvas')\n\n parser.add_argument('--print_every', type=int, default=100, help='control log interval')\n parser.add_argument('--save_every', type=int, default=2000, help='control evaluation and save interval')\n parser.add_argument('--eval_type', type=str, default='test', help='[train/valid/test]')\n\n from template_lib.v2.config_cfgnode import update_parser_defaults_from_yaml, global_cfg\n update_parser_defaults_from_yaml(parser=parser)\n args = parser.parse_args()\n\n if not args.train and \\\n not args.eval and \\\n not args.save_images and \\\n not args.image_visualization and \\\n not args.k_nearest_neighbor and \\\n not args.interpolation and \\\n not args.frequency_analysis and \\\n not args.tsne_analysis:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n if args.config_path is not None:\n with open(args.config_path) as f:\n model_configs = json.load(f)\n train_configs = vars(args)\n else:\n raise NotImplementedError\n\n hdf5_path_train = make_hdf5(model_configs['data_processing'], train_configs, mode=\"train\") \\\n if train_configs['load_all_data_in_memory'] else None\n\n if train_configs['seed'] == -1:\n train_configs['seed'] = random.randint(1,4096)\n cudnn.benchmark, cudnn.deterministic = True, False\n else:\n cudnn.benchmark, cudnn.deterministic = False, True\n\n fix_all_seed(train_configs['seed'])\n gpus_per_node, rank = torch.cuda.device_count(), torch.cuda.current_device()\n world_size = gpus_per_node*train_configs['nodes']\n if world_size == 1:\n warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.')\n\n run_name = make_run_name(RUN_NAME_FORMAT, framework=train_configs['config_path'].split('/')[-1][:-5], phase='train')\n if train_configs['disable_debugging_API']: torch.autograd.set_detect_anomaly(False)\n check_flags(train_configs, model_configs, world_size)\n\n if train_configs['distributed_data_parallel'] and world_size > 1:\n print(\"Train the models through DistributedDataParallel (DDP) mode.\")\n mp.spawn(prepare_train_eval, nprocs=gpus_per_node, args=(gpus_per_node, world_size, run_name,\n train_configs, model_configs, hdf5_path_train))\n else:\n prepare_train_eval(rank, gpus_per_node, world_size, run_name, train_configs, model_configs, hdf5_path_train=hdf5_path_train)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.cuda.device_count",
"torch.multiprocessing.spawn",
"torch.autograd.set_detect_anomaly",
"torch.cuda.current_device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anigasan/tensorflow | [
"5b780b4983007661ca479bf4d7ed9a260d8ce43f"
] | [
"tensorflow/lite/python/convert.py"
] | [
"# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Converts a frozen graph into a TFLite FlatBuffer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport enum # pylint: disable=g-bad-import-order\nimport os as _os\nimport platform as _platform\nimport subprocess as _subprocess\nimport tempfile as _tempfile\n\nimport six\nfrom six.moves import map\n\nfrom tensorflow.lite.python import lite_constants\nfrom tensorflow.lite.python import util\nfrom tensorflow.lite.python import wrap_toco\nfrom tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2\nfrom tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2\nfrom tensorflow.lite.toco import types_pb2 as _types_pb2\nfrom tensorflow.python.platform import resource_loader as _resource_loader\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export as _tf_export\n\n\n# Find the toco_from_protos binary using the resource loader if using from\n# bazel, otherwise we are in a pip where console_scripts already has\n# the toco_from_protos tool.\nif lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:\n _toco_from_proto_bin = \"\"\nelse:\n _toco_from_proto_bin = _resource_loader.get_path_to_datafile(\n \"../toco/python/toco_from_protos\")\n\nif _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):\n _toco_from_proto_bin = \"toco_from_protos\"\n\n\ndef _try_convert_to_unicode(output):\n if output is None:\n return u\"\"\n\n if isinstance(output, bytes):\n try:\n return six.ensure_text(output)\n except UnicodeDecodeError:\n pass\n return output\n\n\n@_tf_export(\"lite.OpsSet\")\nclass OpsSet(enum.Enum):\n \"\"\"Enum class defining the sets of ops available to generate TFLite models.\n\n WARNING: Experimental interface, subject to change.\n \"\"\"\n # Convert model using TensorFlow Lite builtin ops.\n TFLITE_BUILTINS = \"TFLITE_BUILTINS\"\n\n # Convert model using TensorFlow ops. Not all TensorFlow ops are available.\n # WARNING: Experimental interface, subject to change.\n SELECT_TF_OPS = \"SELECT_TF_OPS\"\n\n # Convert model using only TensorFlow Lite quantized int8 operations.\n # Specifying this will throw an error for operations that do not yet have\n # quantized implementations.\n TFLITE_BUILTINS_INT8 = \"TFLITE_BUILTINS_INT8\"\n\n def __str__(self):\n return self.value\n\n @staticmethod\n def get_options():\n \"\"\"Returns a list of OpsSet options as a list of strings.\"\"\"\n return [str(option) for option in list(OpsSet)]\n\n\nclass ConverterError(Exception):\n \"\"\"Raised when an error occurs during model conversion.\"\"\"\n pass\n\n\ndef toco_convert_protos(model_flags_str,\n toco_flags_str,\n input_data_str,\n debug_info_str=None,\n enable_mlir_converter=False):\n \"\"\"Convert `input_data_str` according to model and toco parameters.\n\n Unless you know what you are doing consider using\n the more friendly `tf.compat.v1.lite.toco_convert`.\n\n Args:\n model_flags_str: Serialized proto describing model properties, see\n `toco/model_flags.proto`.\n toco_flags_str: Serialized proto describing conversion properties, see\n `toco/toco_flags.proto`.\n input_data_str: Input data in serialized form (e.g. a graphdef is common)\n debug_info_str: Serialized `GraphDebugInfo` proto describing logging\n information. (default None)\n enable_mlir_converter: Enables MLIR-based conversion instead of the default\n TOCO conversion. (default False)\n Returns:\n Converted model in serialized form (e.g. a TFLITE model is common).\n Raises:\n ConverterError: When conversion fails in TFLiteConverter, usually due to\n ops not being supported.\n RuntimeError: When conversion fails, an exception is raised with the error\n message embedded.\n \"\"\"\n # TODO(aselle): When toco does not use fatal errors for failure, we can\n # switch this on.\n if not _toco_from_proto_bin:\n try:\n model_str = wrap_toco.wrapped_toco_convert(model_flags_str,\n toco_flags_str, input_data_str,\n debug_info_str,\n enable_mlir_converter)\n return model_str\n except Exception as e:\n raise ConverterError(str(e))\n\n # Windows and TemporaryFile are not that useful together,\n # since you cannot have two readers/writers. So we have to\n # make the temporaries and close and delete them explicitly.\n toco_filename, model_filename, input_filename, output_filename = (\n None, None, None, None)\n try:\n # Build all input files\n with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \\\n _tempfile.NamedTemporaryFile(delete=False) as fp_model, \\\n _tempfile.NamedTemporaryFile(delete=False) as fp_input, \\\n _tempfile.NamedTemporaryFile(delete=False) as fp_debug:\n toco_filename = fp_toco.name\n input_filename = fp_input.name\n model_filename = fp_model.name\n debug_filename = fp_debug.name\n\n fp_model.write(model_flags_str)\n fp_toco.write(toco_flags_str)\n fp_input.write(six.ensure_binary(input_data_str))\n debug_info_str = debug_info_str if debug_info_str else \"\"\n # if debug_info_str contains a \"string value\", then the call to\n # fp_debug.write(debug_info_str) will fail with the following error\n #\n # TypeError: a bytes-like object is required, not 'str'\n #\n # Some of the subtests within the \"convert_test\" unit-test fail\n # with the error shown above. So watch out for that scenario and\n # convert debug_info_str to bytes where needed\n if not isinstance(debug_info_str, bytes):\n fp_debug.write(debug_info_str.encode(\"utf-8\"))\n else:\n fp_debug.write(debug_info_str)\n\n # Reserve an output file\n with _tempfile.NamedTemporaryFile(delete=False) as fp:\n output_filename = fp.name\n\n # Run\n cmd = [\n _toco_from_proto_bin,\n model_filename,\n toco_filename,\n input_filename,\n output_filename,\n \"--debug_proto_file={}\".format(debug_filename),\n ]\n if enable_mlir_converter:\n cmd.append(\"--enable_mlir_converter\")\n cmdline = \" \".join(cmd)\n is_windows = _platform.system() == \"Windows\"\n proc = _subprocess.Popen(\n cmdline,\n shell=True,\n stdout=_subprocess.PIPE,\n stderr=_subprocess.STDOUT,\n close_fds=not is_windows)\n stdout, stderr = proc.communicate()\n exitcode = proc.returncode\n if exitcode == 0:\n with open(output_filename, \"rb\") as fp:\n return fp.read()\n else:\n stdout = _try_convert_to_unicode(stdout)\n stderr = _try_convert_to_unicode(stderr)\n raise ConverterError(\"See console for info.\\n%s\\n%s\\n\" % (stdout, stderr))\n finally:\n # Must manually cleanup files.\n for filename in [\n toco_filename, input_filename, model_filename, output_filename]:\n try:\n _os.unlink(filename)\n except (OSError, TypeError):\n pass\n\n\ndef build_toco_convert_protos(input_tensors,\n output_tensors,\n inference_type=lite_constants.FLOAT,\n inference_input_type=None,\n input_format=lite_constants.TENSORFLOW_GRAPHDEF,\n input_shapes=None,\n output_format=lite_constants.TFLITE,\n quantized_input_stats=None,\n default_ranges_stats=None,\n drop_control_dependency=True,\n reorder_across_fake_quant=False,\n allow_custom_ops=False,\n custom_opdefs=None,\n change_concat_input_ranges=False,\n post_training_quantize=False,\n quantize_to_float16=False,\n dump_graphviz_dir=None,\n dump_graphviz_video=False,\n target_ops=None,\n allow_nonexistent_arrays=False,\n debug_info=None,\n conversion_summary_dir=None):\n \"\"\"Builds protocol buffers describing a conversion of a model using TOCO.\n\n Typically this is to convert from TensorFlow GraphDef to TFLite, in which\n case the default `input_format` and `output_format` are sufficient.\n\n Args:\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.shape` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n inference_type: Target data type of real-number arrays in the output file.\n Must be `{tf.float32, tf.uint8}`. (default tf.float32)\n Must be `{tf.float32, tf.uint8}`. (default `inference_type`)\n inference_input_type: Target data type of real-number input arrays. Allows\n for a different type for input arrays in the case of quantization.\n input_format: Type of data to read Currently must be\n `{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)\n input_shapes: Input array shape. It needs to be a list of the same length\n as `input_tensors`, or None. (default None)\n output_format: Output file format. Currently must be `{TFLITE,\n GRAPHVIZ_DOT}`. (default TFLITE)\n quantized_input_stats: List of tuples of floats representing the mean and\n standard deviation. Each tuple maps to the corresponding input tensor.\n Only need if `inference_input_type` is `QUANTIZED_UINT8`.\n real_input_value = (quantized_input_value - mean_value) / std_dev_value.\n (default None)\n default_ranges_stats: Tuple of integers representing (min, max) range values\n for all arrays without a specified range. Intended for experimenting with\n quantization via \"dummy quantization\". (default None)\n drop_control_dependency: Boolean indicating whether to drop control\n dependencies silently. This is due to TFLite not supporting control\n dependencies. (default True)\n reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant\n nodes in unexpected locations. Used when the location of the FakeQuant\n nodes is preventing graph transformations necessary to convert the graph.\n Results in a graph that differs from the quantized training graph,\n potentially causing differing arithmetic behavior. (default False)\n allow_custom_ops: Boolean indicating whether to allow custom operations.\n When false any unknown operation is an error. When true, custom ops are\n created for any op that is unknown. The developer will need to provide\n these to the TensorFlow Lite runtime with a custom resolver.\n (default False)\n custom_opdefs: List of strings representing custom ops OpDefs that are\n included in the GraphDef. Required when using custom operations with the\n MLIR-based converter. (default None)\n change_concat_input_ranges: Boolean to change behavior of min/max ranges for\n inputs and outputs of the concat operator for quantized models. Changes\n the ranges of concat operator overlap when true. (default False)\n post_training_quantize: Boolean indicating whether to quantize the weights\n of the converted float model. Model size will be reduced and there will be\n latency improvements (at the cost of accuracy).\n (default False)\n quantize_to_float16: Boolean indicating whether to convert float buffers\n to float16. (default False)\n dump_graphviz_dir: Full filepath of folder to dump the graphs at various\n stages of processing GraphViz .dot files. Preferred over\n --output_format=GRAPHVIZ_DOT in order to keep the requirements of the\n output file. (default None)\n dump_graphviz_video: Boolean indicating whether to dump the graph after\n every graph transformation. (default False)\n target_ops: Experimental flag, subject to change. Set of OpsSet\n options indicating which converter to use.\n (default set([OpsSet.TFLITE_BUILTINS]))\n allow_nonexistent_arrays: Allow specifying array names that don't exist\n or are unused in the final graph. (default False)\n debug_info: `GraphDebugInfo` proto containing the stack traces for the\n original nodes referred by the converted graph.\n conversion_summary_dir: A string, the path to the generated conversion logs.\n\n Returns:\n model_flags, toco_flags, debug_info: three protocol buffers describing the\n conversion process and debug information.\n\n Raises:\n ValueError:\n If the input tensor type is unknown\n Missing mean_values or std_dev_values\n RuntimeError: If TOCO fails to convert (in which case the runtime error's\n error text will contain the TOCO error log)\n \"\"\"\n toco = _toco_flags_pb2.TocoFlags()\n toco.input_format = input_format\n toco.output_format = output_format\n toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)\n if inference_input_type:\n toco.inference_input_type = util.convert_dtype_to_tflite_type(\n inference_input_type)\n else:\n toco.inference_input_type = toco.inference_type\n toco.drop_control_dependency = drop_control_dependency\n toco.reorder_across_fake_quant = reorder_across_fake_quant\n toco.allow_custom_ops = allow_custom_ops\n if custom_opdefs:\n toco.custom_opdefs.extend(custom_opdefs)\n toco.post_training_quantize = post_training_quantize\n toco.quantize_to_float16 = quantize_to_float16\n if default_ranges_stats:\n toco.default_ranges_min = default_ranges_stats[0]\n toco.default_ranges_max = default_ranges_stats[1]\n if dump_graphviz_dir:\n toco.dump_graphviz_dir = dump_graphviz_dir\n toco.dump_graphviz_include_video = dump_graphviz_video\n if conversion_summary_dir:\n toco.conversion_summary_dir = conversion_summary_dir\n if target_ops:\n if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):\n toco.enable_select_tf_ops = True\n elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):\n toco.enable_select_tf_ops = True\n toco.force_select_tf_ops = True\n\n model = _model_flags_pb2.ModelFlags()\n model.change_concat_input_ranges = change_concat_input_ranges\n for idx, input_tensor in enumerate(input_tensors):\n input_array = model.input_arrays.add()\n input_array.name = util.get_tensor_name(input_tensor)\n input_array.data_type = util.convert_dtype_to_tflite_type(\n input_tensor.dtype)\n\n if toco.inference_input_type in \\\n [_types_pb2.QUANTIZED_UINT8, _types_pb2.INT8]:\n if not quantized_input_stats:\n raise ValueError(\"std_dev and mean must be defined when \"\n \"inference_input_type is QUANTIZED_UINT8.\")\n input_array.mean_value, input_array.std_value = quantized_input_stats[idx]\n if input_shapes is None:\n shape = input_tensor.shape\n else:\n shape = input_shapes[idx]\n input_array.shape.dims.extend(list(map(int, shape)))\n\n for output_tensor in output_tensors:\n model.output_arrays.append(util.get_tensor_name(output_tensor))\n\n model.allow_nonexistent_arrays = allow_nonexistent_arrays\n\n return model, toco, debug_info\n\n\ndef toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,\n enable_mlir_converter, *args, **kwargs):\n \"\"\"\"Convert a model using TOCO.\n\n This function is used to convert GraphDefs that cannot be loaded into\n TensorFlow to TFLite. Conversion can be customized by providing arguments\n that are forwarded to `build_toco_convert_protos` (see documentation for\n details).\n\n Args:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_arrays_with_shape: Tuple of strings representing input tensor names\n and list of integers representing input shapes\n (e.g., [(\"foo\" : [1, 16, 16, 3])]). Use only when graph cannot be loaded\n into TensorFlow and when `input_tensors` is None. (default None)\n output_arrays: List of output tensors to freeze graph with. Use only when\n graph cannot be loaded into TensorFlow and when `output_tensors` is None.\n (default None)\n enable_mlir_converter: Enables MLIR-based conversion instead of TOCO\n conversion.\n *args: See `build_toco_convert_protos`,\n **kwargs: See `build_toco_convert_protos`.\n\n Returns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\n Raises:\n Defined in `build_toco_convert_protos`.\n \"\"\"\n model_flags, toco_flags, _ = build_toco_convert_protos(\n input_tensors=[], output_tensors=[], *args, **kwargs)\n\n for idx, (name, shape) in enumerate(input_arrays_with_shape):\n input_array = model_flags.input_arrays.add()\n if toco_flags.inference_input_type == _types_pb2.QUANTIZED_UINT8:\n if ((\"quantized_input_stats\" not in kwargs) or\n (not kwargs[\"quantized_input_stats\"])):\n raise ValueError(\"std_dev and mean must be defined when \"\n \"inference_input_type is QUANTIZED_UINT8.\")\n input_array.mean_value, input_array.std_value = kwargs[\n \"quantized_input_stats\"][idx]\n input_array.name = name\n input_array.shape.dims.extend(list(map(int, shape)))\n\n for name in output_arrays:\n model_flags.output_arrays.append(name)\n\n data = toco_convert_protos(\n model_flags.SerializeToString(),\n toco_flags.SerializeToString(),\n input_data.SerializeToString(),\n enable_mlir_converter=enable_mlir_converter)\n return data\n\n\ndef toco_convert_impl(input_data, input_tensors, output_tensors,\n enable_mlir_converter, *args, **kwargs):\n \"\"\"\"Convert a model using TOCO.\n\n Typically this function is used to convert from TensorFlow GraphDef to TFLite.\n Conversion can be customized by providing arguments that are forwarded to\n `build_toco_convert_protos` (see documentation for details).\n\n Args:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.shape` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n enable_mlir_converter: Enables MLIR-based conversion instead of TOCO\n conversion.\n *args: See `build_toco_convert_protos`,\n **kwargs: See `build_toco_convert_protos`.\n\n Returns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\n Raises:\n Defined in `build_toco_convert_protos`.\n \"\"\"\n model_flags, toco_flags, debug_info = build_toco_convert_protos(\n input_tensors, output_tensors, *args, **kwargs)\n debug_info_str = debug_info.SerializeToString() if debug_info else None\n data = toco_convert_protos(\n model_flags.SerializeToString(),\n toco_flags.SerializeToString(),\n input_data.SerializeToString(),\n debug_info_str=debug_info_str,\n enable_mlir_converter=enable_mlir_converter)\n return data\n\n\n@_tf_export(v1=[\"lite.toco_convert\"])\[email protected](None, \"Use `lite.TFLiteConverter` instead.\")\ndef toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):\n \"\"\"Convert a model using TOCO.\n\n Typically this function is used to convert from TensorFlow GraphDef to TFLite.\n Conversion can be customized by providing arguments that are forwarded to\n `build_toco_convert_protos` (see documentation for details). This function has\n been deprecated. Please use `lite.TFLiteConverter` instead.\n\n Args:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.shape` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n *args: See `build_toco_convert_protos`,\n **kwargs: See `build_toco_convert_protos`.\n\n Returns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\n Raises:\n Defined in `build_toco_convert_protos`.\n \"\"\"\n enable_mlir_converter = kwargs.get(\"enable_mlir_converter\", False)\n return toco_convert_impl(input_data, input_tensors, output_tensors,\n enable_mlir_converter, *args, **kwargs)\n"
] | [
[
"tensorflow.python.platform.resource_loader.get_path_to_datafile",
"tensorflow.lite.python.util.convert_dtype_to_tflite_type",
"tensorflow.lite.python.wrap_toco.wrapped_toco_convert",
"tensorflow.lite.python.util.get_tensor_name",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.lite.toco.model_flags_pb2.ModelFlags",
"tensorflow.lite.toco.toco_flags_pb2.TocoFlags",
"tensorflow.python.util.deprecation.deprecated"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
cverluise/parseEPO | [
"be1171a0f8e6fcafa711fa291aebb1fc2260d5e6"
] | [
"parseepo/serialize.py"
] | [
"import html2text\nimport pandas as pd\nfrom wasabi import Printer\n\nfrom parseepo import validate\nfrom parseepo.exception import SingleAttrException\nfrom parseepo.utils import prepare_name\n\nh = html2text.HTML2Text()\nmsg = Printer()\nNAMES = [\"EP\", \"Num\", \"Ext\", \"publication_date\", \"language\", \"attr\", \"text\"]\nNESTED_ATTR = [\"TITLE\", \"CLAIM\", \"AMEND\", \"title\", \"claims\", \"amendment\"]\n\n\ndef format_patent_df(\n data: list, prepare_names: bool = False, handle_html: bool = False\n):\n \"\"\"\n Return data as a prepared DataFrame from a list of rows\n Nb: Input is [publication_number[Row]].\n E.g. [['EP','0700059 A1','1996-03-06','de','TITLE',' Elektroma...'],\n ['EP','0700059 A1','1996-03-06','en','TITLE',' Electroma...'],\n ...\n :param data: List[List]\n :param prepare_names: bool, True if you want to prepare names for BQ compatibility\n :param handle_html: bool, True if you want to handle html\n :return: pd.DataFrame\n publication_date language attr text publication_number\n 0 1996-03-06 ... ... ... EP-0700059-A1\n 1 1996-03-06 ... ... ... EP-0700059-A1\n 2 1996-03-06 ... ... ... EP-0700059-A1\n 3 1996-03-06 ... ... ... EP-0700059-A1\n 4 1996-03-06 ... ... ... EP-0700059-A1\n 5 1996-03-06 ... ... ... EP-0700059-A1\n 6 1996-03-06 ... ... ... EP-0700059-A1\n \"\"\"\n\n df_ = pd.DataFrame(data, columns=NAMES)\n df_[\"publication_number\"] = df_[\"EP\"] + \"-\" + df_[\"Num\"] + \"-\" + df_[\"Ext\"]\n df_ = df_.drop([\"EP\", \"Num\", \"Ext\"], axis=1)\n\n if prepare_names:\n df_[\"attr\"] = df_[\"attr\"].apply(lambda x: prepare_name(x, True))\n if handle_html:\n df_[\"text\"] = df_[\"text\"].apply(lambda x: h.handle(x))\n return df_\n\n\ndef unnest_attr(patent_dict: dict, publication_number: str):\n \"\"\"\n Unnest flat attributes returned as nested by the batch aggregation operation in\n serialize_patent.\n Raises warning if expected flat attributes has multiple values.\n :param patent_dict: dict, returned by serialize_patent\n :param publication_number: str, e.g. 'EP-0600083-A1'\n :return: dict\n In:\n { ...,\n 'PDFEP': {'language': ['en'],\n 'text': ['https://data.epo.org/publication-server/...']},\n }\n Out:\n {...,\n 'PDFEP': 'https://data.epo.org/publication-server/...',}\n\n \"\"\"\n attrs = list(filter(lambda x: x not in NESTED_ATTR, patent_dict.keys()))\n for attr in attrs:\n val = patent_dict[attr][\"text\"]\n try:\n validate.single_attr(val, attr, publication_number)\n except SingleAttrException:\n msg.warn(\n f\"{publication_number}: {attr} has more than 1 value. Only the first value \"\n f\"was kept. Add {attr} to the list NESTED_ATTR to fix this behavior.\"\n )\n patent_dict.update(\n {\n attr: {\n \"text\": patent_dict[attr][\"text\"][0],\n \"language\": patent_dict[attr][\"language\"][0],\n }\n }\n )\n\n\ndef serialize_patent_df(patent_df: pd.DataFrame):\n \"\"\"\n Return the serialized patent\n :param patent_df: pd.DataFrame, returned by format_patent_df\n :return: dict\n {'ABSTR': '<p id=\"pa01\" num=\"0001\">A device ...',\n 'CLAIM': {'language': ['en'],\n 'text': ['<claim id=\"c-en-0001\" ...']},\n 'DESCR': '<heading id=\"h0001\">Field of ...',\n 'PDFEP': 'https://data.epo.org/publication-server/...',\n 'TITLE': {'language': ['de', 'en', 'fr'],\n 'text': ['VORRICHTUNG ZUM ...',\n 'DEVICE FOR CONVEYING ...',\n \"DISPOSITIF D'ACHEMINEMENT ...']},\n 'publication_date': '1994-06-08',\n 'publication_number': 'EP-0600083-A1'}\n \"\"\"\n publication_number = patent_df[\"publication_number\"].values[0]\n publication_date = patent_df[\"publication_date\"].values[0]\n\n out = (\n patent_df.drop([\"publication_number\", \"publication_date\"], axis=1)\n .groupby(\"attr\")\n .aggregate(list)\n .T.to_dict()\n )\n\n unnest_attr(out, publication_number)\n out.update({\"publication_number\": publication_number})\n out.update({\"publication_date\": publication_date})\n return out\n\n\ndef serialize_patent(\n data: list, prepare_names: bool = False, handle_html: bool = False\n):\n \"\"\"\n Return the serialized patent\n :param data: List[List[str]], E.g.\n [['EP','0700059 A1','1996-03-06','de','TITLE',' Elektroma...'],\n ['EP','0700059 A1','1996-03-06','en','TITLE',' Electroma...'],\n :param prepare_names: bool, True if you want to prepare names for BQ compatibility\n :param handle_html: bool, True if you want to handle html\n :return: dict\n \"\"\"\n out = format_patent_df(data, prepare_names, handle_html)\n out = serialize_patent_df(out)\n return out\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
luigiluz/pyampd | [
"cd247030f5a4ccd971da837b9b873cacbd7adfb3"
] | [
"pyampd/ampd.py"
] | [
"import numpy as np\nfrom scipy.ndimage import uniform_filter1d\nfrom scipy.signal import detrend\n\n\ndef find_peaks_original(x, scale=None, debug=False):\n \"\"\"Find peaks in quasi-periodic noisy signals using AMPD algorithm.\n\n Automatic Multi-Scale Peak Detection originally proposed in\n \"An Efficient Algorithm for Automatic Peak Detection in\n Noisy Periodic and Quasi-Periodic Signals\", Algorithms 2012, 5, 588-603\n https://doi.org/10.1109/ICRERA.2016.7884365\n\n Optimized implementation by Igor Gotlibovych, 2018\n\n\n Parameters\n ----------\n x : ndarray\n 1-D array on which to find peaks\n scale : int, optional\n specify maximum scale window size of (2 * scale + 1)\n debug : bool, optional\n if set to True, return the Local Scalogram Matrix, `LSM`,\n and scale with most local maxima, `l`,\n together with peak locations\n\n Returns\n -------\n pks: ndarray\n The ordered array of peak indices found in `x`\n\n \"\"\"\n x = detrend(x)\n N = len(x)\n L = N // 2\n if scale:\n L = min(scale, L)\n\n # create LSM matix\n LSM = np.zeros((L, N), dtype=bool)\n for k in np.arange(1, L):\n LSM[k - 1, k:N - k] = (\n (x[0:N - 2 * k] < x[k:N - k]) & (x[k:N - k] > x[2 * k:N])\n )\n\n # Find scale with most maxima\n G = LSM.sum(axis=1)\n l_scale = np.argmax(G)\n\n # find peaks that persist on all scales up to l\n pks_logical = np.min(LSM[0:l_scale, :], axis=0)\n pks = np.flatnonzero(pks_logical)\n if debug:\n return pks, LSM, l_scale\n return pks\n\n\ndef find_peaks(x, scale=None, debug=False):\n \"\"\"Find peaks in quasi-periodic noisy signals using AMPD algorithm.\n\n Extended implementation handles peaks near start/end of the signal.\n\n Optimized implementation by Igor Gotlibovych, 2018\n\n\n Parameters\n ----------\n x : ndarray\n 1-D array on which to find peaks\n scale : int, optional\n specify maximum scale window size of (2 * scale + 1)\n debug : bool, optional\n if set to True, return the Local Scalogram Matrix, `LSM`,\n weigted number of maxima, 'G',\n and scale at which G is maximized, `l`,\n together with peak locations\n\n Returns\n -------\n pks: ndarray\n The ordered array of peak indices found in `x`\n\n \"\"\"\n x = detrend(x)\n N = len(x)\n L = N // 2\n if scale:\n L = min(scale, L)\n\n # create LSM matix\n LSM = np.ones((L, N), dtype=bool)\n for k in np.arange(1, L + 1):\n LSM[k - 1, 0:N - k] &= (x[0:N - k] > x[k:N]\n ) # compare to right neighbours\n LSM[k - 1, k:N] &= (x[k:N] > x[0:N - k]) # compare to left neighbours\n\n # Find scale with most maxima\n G = LSM.sum(axis=1)\n G = G * np.arange(\n N // 2, N // 2 - L, -1\n ) # normalize to adjust for new edge regions\n l_scale = np.argmax(G)\n\n # find peaks that persist on all scales up to l\n pks_logical = np.min(LSM[0:l_scale, :], axis=0)\n pks = np.flatnonzero(pks_logical)\n if debug:\n return pks, LSM, G, l_scale\n return pks\n\n\ndef find_peaks_adaptive(x, window=None, debug=False):\n \"\"\"Find peaks in quasi-periodic noisy signals using ASS-AMPD algorithm.\n\n Adaptive Scale Selection Automatic Multi-Scale Peak Detection,\n an extension of AMPD -\n \"An Efficient Algorithm for Automatic Peak Detection in\n Noisy Periodic and Quasi-Periodic Signals\", Algorithms 2012, 5, 588-603\n https://doi.org/10.1109/ICRERA.2016.7884365\n\n Optimized implementation by Igor Gotlibovych, 2018\n\n\n Parameters\n ----------\n x : ndarray\n 1-D array on which to find peaks\n window : int, optional\n sliding window size for adaptive scale selection\n debug : bool, optional\n if set to True, return the Local Scalogram Matrix, `LSM`,\n and `adaptive_scale`,\n together with peak locations\n\n Returns\n -------\n pks: ndarray\n The ordered array of peak indices found in `x`\n\n \"\"\"\n x = detrend(x)\n N = len(x)\n if not window:\n window = N\n if window > N:\n window = N\n L = window // 2\n\n # create LSM matix\n LSM = np.ones((L, N), dtype=bool)\n for k in np.arange(1, L + 1):\n LSM[k - 1, 0:N - k] &= (x[0:N - k] > x[k:N]\n ) # compare to right neighbours\n LSM[k - 1, k:N] &= (x[k:N] > x[0:N - k]) # compare to left neighbours\n\n # Create continuos adaptive LSM\n ass_LSM = uniform_filter1d(LSM * window, window, axis=1, mode='nearest')\n normalization = np.arange(L, 0, -1) # scale normalization weight\n ass_LSM = ass_LSM * normalization.reshape(-1, 1)\n\n # Find adaptive scale at each point\n adaptive_scale = ass_LSM.argmax(axis=0)\n\n # construct reduced LSM\n LSM_reduced = LSM[:adaptive_scale.max(), :]\n mask = (np.indices(LSM_reduced.shape)[0] > adaptive_scale\n ) # these elements are outside scale of interest\n LSM_reduced[mask] = 1\n\n # find peaks that persist on all scales up to l\n pks_logical = np.min(LSM_reduced, axis=0)\n pks = np.flatnonzero(pks_logical)\n if debug:\n return pks, ass_LSM, adaptive_scale\n return pks\n"
] | [
[
"numpy.min",
"numpy.arange",
"numpy.indices",
"numpy.flatnonzero",
"numpy.ones",
"numpy.argmax",
"scipy.ndimage.uniform_filter1d",
"numpy.zeros",
"scipy.signal.detrend"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
bionlplab/heart_failure_mortality | [
"f3bbfe65fe6f2c2a076acb38697133b472bf2231"
] | [
"extract_features.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom utils import *\nfrom sklearn.preprocessing import StandardScaler\nfrom collections import defaultdict\nimport re\n\ndef format_labels(file_path, timelines, mapping):\n\tmost_recent = mapping.sort_values([\"subject_id\", \"ordering_date\"], ascending=False).drop_duplicates(\"subject_id\", keep=\"first\")\n\n\tlabel_features = pd.read_csv(file_path)\n\tformatted_features = reformat4pycox([\"report_id\"], label_features)\n\n\t#Connect subject to report\n\tdata_frames = [timelines, most_recent]\n\tdata_df = reduce(lambda left,right: pd.merge(left,right,on=\"subject_id\"), data_frames)\n\n\t#Connect report to labels\n\tdata_frames = [data_df, formatted_features]\n\tdata_df = reduce(lambda left,right: pd.merge(left,right,on=\"report_id\"), data_frames)\n\n\tfor i in [\"ordering_date\", \"report_id\"]:\n\t del data_df[i]\n\n\treturn data_df\n\ndef format_hidden_features(file_path, timelines, mapping):\n\tloaded = np.load(file_path)\n\n\tmost_recent = mapping.sort_values([\"subject_id\", \"ordering_date\"], ascending=False).drop_duplicates(\"subject_id\", keep=\"first\")\n\treport_ids = list(most_recent['report_id'])\n\n\tmutable_file = {} \n\tfor id in report_ids:\n\t mutable_file[id] = loaded[id].flatten()\n\tloaded = mutable_file\n\n\tlabel_features = pd.DataFrame(loaded.values(), index=loaded)\n\n\tcols = list(label_features.columns)\n\txcols = [\"x\" + str(i) for i in cols]\n\trename_dict = dict(zip(cols,xcols))\n\trename_dict[\"index\"] = \"report_id\"\n\n\tlabel_features = label_features.reset_index().rename(columns=rename_dict)\n\n\t#Connect subject to report\n\tdata_frames = [timelines, most_recent]\n\tdata_df = reduce(lambda left,right: pd.merge(left,right,on=\"subject_id\"), data_frames)\n\n\t#Connect report to labels\n\tdata_frames = [data_df, label_features]\n\tdata_df = reduce(lambda left,right: pd.merge(left,right,on=\"report_id\"), data_frames)\n\n\tfor i in [\"ordering_date\", \"report_id\"]:\n\t del data_df[i]\n\n\treturn data_df\n\ndef format_hf_sequence(file_path, timelines, mapping):\n\tloaded = np.load(file_path)\n\t \n\ttop3_reports = mapping.sort_values([\"subject_id\", \"ordering_date\"], ascending=True).groupby(\"subject_id\").tail(3)\n\n\t#Create a list of report ids\n\treport_dict = top3_reports.groupby(\"subject_id\")[\"report_id\"].apply(list).to_dict()\n\n\t#Create a dict of report arrays. Format: key: array of report embeddings\n\tembedding_dict = defaultdict(list)\n\n\tfor k,v in report_dict.items():\n\t\tfor vi in v:\n\t\t embedding_dict[k].append(loaded[vi])\n\n\t\tembedding_dict[k] = np.vstack(embedding_dict[k])\n\n\t#Converting embedding dict into dataframe\n\tlabel_features = pd.DataFrame(embedding_dict.values(), index=embedding_dict)\n\n\tlabel_features[0] = label_features[0].apply(lambda x: add_paddings(x))\n\n\tlist2d = label_features[0]\n\n\tmerged = list(itertools.chain(*list2d))\n\n\tscaler = StandardScaler()\n\tscaler.fit(merged)\n\n\tlabel_features[0] = label_features[0].apply(lambda x: scaler.transform(x))\n\n\tcols = list(label_features.columns)\n\txcols = [\"x\" + str(i) for i in cols]\n\trename_dict = dict(zip(cols,xcols))\n\n\tlabel_features = label_features.rename(columns=rename_dict)\n\tlabel_features = label_features.reset_index().rename(columns={\"index\": \"subject_id\"})\n\n\tdata_frames = [timelines, label_features]\n\tdata_df = reduce(lambda left,right: pd.merge(left,right,on=\"subject_id\"), data_frames)\n\n\treturn data_df\n"
] | [
[
"pandas.merge",
"pandas.read_csv",
"numpy.load",
"sklearn.preprocessing.StandardScaler",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
derekmpham/mindmeld | [
"18189f956e4e3eb92df61fde95ec82f73b9efa91"
] | [
"mindmeld/converter/dialogflow.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This module contains the DialogflowConverter class used to convert Dialogflow projects\ninto Mindmeld projects\"\"\"\n\nimport json\nimport logging\nimport os\nimport re\n\nfrom sklearn.model_selection import train_test_split\n\nfrom mindmeld.converter.converter import Converter\n\nlogger = logging.getLogger(__name__)\n\n\nclass DialogflowConverter(Converter):\n \"\"\"The class is a sub class of the abstract Converter class. This class\n contains the methods required to convert a Dialogflow project into a MindMeld project\n \"\"\"\n\n sys_entity_map = {\n \"@sys.date-time\": \"sys_interval\",\n \"@sys.date\": \"sys_time\",\n \"@sys.date-period\": \"sys_interval\",\n \"@sys.time\": \"sys_time\",\n \"@sys.time-period\": \"sys_duration\",\n \"@sys.duration\": \"sys_duration\",\n \"@sys.number\": \"sys_number\",\n \"@sys.cardinal\": \"sys_number\",\n \"@sys.ordinal\": \"sys_ordinal\",\n \"@sys.unit-currency\": \"sys_amount-of-money\",\n \"@sys.unit-volume\": \"sys_volume\",\n \"@sys.email\": \"sys_email\",\n \"@sys.phone-number\": \"sys_phone-number\",\n \"@sys.url\": \"sys_url\",\n }\n\n # TODO: provide support for entities listed in sys_entity_map_todo\n sys_entity_map_todo = [\n \"@sys.number-integer\",\n \"@sys.number-sequence\",\n \"@sys.flight-number\",\n \"@sys.unit-area\",\n \"@sys.unit-length\",\n \"@sys.unit-speed\",\n \"@sys.unit-information\",\n \"@sys.percentage\",\n \"@sys.temperature\",\n \"@sys.duration\",\n \"@sys.age\",\n \"@sys.currency-name\",\n \"@sys.unit-area-name\",\n \"@sys.unit-length-name\",\n \"@sys.unit-speed-name\",\n \"@sys.unit-volume-name\",\n \"@sys.unit-weight-name\",\n \"@sys.unit-information-name\",\n \"@sys.address\",\n \"@sys.zip-code\",\n \"@sys.geo-capital\",\n \"@sys.geo-country\",\n \"@sys.geo-country-code\",\n \"@sys.geo-city\",\n \"@sys.geo-state\",\n \"@sys.geo-city\",\n \"@sys.geo-state\",\n \"@sys.place-attraction\",\n \"@sys.airport\",\n \"@sys.location\",\n \"@sys.given-name\",\n \"@sys.last-name\",\n \"@sys.person\",\n \"@sys.music-artist\",\n \"@sys.music-genre\",\n \"@sys.color\",\n \"@sys.language\",\n \"@sys.any\",\n ]\n\n def __init__(self, dialogflow_project_directory, mindmeld_project_directory):\n if os.path.exists(os.path.dirname(dialogflow_project_directory)):\n self.dialogflow_project_directory = dialogflow_project_directory\n self.mindmeld_project_directory = mindmeld_project_directory\n self.directory = os.path.dirname(os.path.realpath(__file__))\n self.entities_list = set()\n self.intents_list = set()\n else:\n msg = \"`{dialogflow_project_directory}` does not exist. Please verify.\"\n msg = msg.format(dialogflow_project_directory=dialogflow_project_directory)\n raise FileNotFoundError(msg)\n\n def create_mindmeld_directory(self):\n self.create_directory(self.mindmeld_project_directory)\n self.create_directory(os.path.join(self.mindmeld_project_directory, \"data\"))\n self.create_directory(os.path.join(self.mindmeld_project_directory, \"domains\"))\n self.create_directory(\n os.path.join(self.mindmeld_project_directory, \"domains\", \"general\")\n )\n self.create_directory(os.path.join(self.mindmeld_project_directory, \"entities\"))\n\n # =========================\n # create training data (entities, intents)\n # =========================\n\n def _create_entities_directories(self, entities):\n \"\"\" Creates directories + files for all languages/files.\n Currently does not use meta data in entityName.json files (the keys in var entities).\n \"\"\"\n for languages in entities.values():\n for sub in languages.values():\n dialogflow_entity_file = os.path.join(\n self.dialogflow_project_directory, \"entities\", sub + \".json\"\n )\n\n mindmeld_entity_directory_name = self.clean_check(\n sub, self.entities_list\n )\n\n mindmeld_entity_directory = os.path.join(\n self.mindmeld_project_directory,\n \"entities\",\n mindmeld_entity_directory_name,\n )\n\n self.create_directory(mindmeld_entity_directory)\n\n self._create_entity_file(\n dialogflow_entity_file, mindmeld_entity_directory\n )\n\n @staticmethod\n def _create_entity_file(dialogflow_entity_file, mindmeld_entity_directory):\n source_en = open(dialogflow_entity_file, \"r\")\n target_gazetteer = open(\n os.path.join(mindmeld_entity_directory, \"gazetteer.txt\"), \"w\"\n )\n target_mapping = open(\n os.path.join(mindmeld_entity_directory, \"mapping.json\"), \"w\"\n )\n\n datastore = json.load(source_en)\n mapping_dict = {\"entities\": []}\n\n for item in datastore:\n new_dict = {}\n while (\"value\" in item) and (item[\"value\"] in item[\"synonyms\"]):\n item[\"synonyms\"].remove(item[\"value\"])\n new_dict[\"whitelist\"] = item[\"synonyms\"]\n new_dict[\"cname\"] = item[\"value\"]\n mapping_dict[\"entities\"].append(new_dict)\n\n target_gazetteer.write(item[\"value\"] + \"\\n\")\n\n json.dump(mapping_dict, target_mapping, ensure_ascii=False, indent=2)\n\n source_en.close()\n target_gazetteer.close()\n target_mapping.close()\n\n def _create_intents_directories(self, intents):\n \"\"\" Creates directories + files for all languages/files.\"\"\"\n\n for languages in intents.values():\n for language, sub in languages.items():\n dialogflow_intent_file = os.path.join(\n self.dialogflow_project_directory, \"intents\", sub + \".json\"\n )\n\n mindmeld_intent_directory_name = self.clean_check(\n sub, self.intents_list\n )\n mindmeld_intent_directory = os.path.join(\n self.mindmeld_project_directory,\n \"domains\",\n \"general\",\n mindmeld_intent_directory_name,\n )\n\n self.create_directory(mindmeld_intent_directory)\n\n self._create_intent_file(\n dialogflow_intent_file, mindmeld_intent_directory, language\n )\n\n def _create_intent_file(\n self, dialogflow_intent_file, mindmeld_intent_directory, language\n ):\n source_en = open(dialogflow_intent_file, \"r\")\n target_test = open(os.path.join(mindmeld_intent_directory, \"test.txt\"), \"w\")\n target_train = open(os.path.join(mindmeld_intent_directory, \"train.txt\"), \"w\")\n\n datastore = json.load(source_en)\n all_text = []\n\n for usersay in datastore:\n sentence = \"\"\n for texts in usersay[\"data\"]:\n df_text = texts[\"text\"]\n if \"meta\" in texts and texts[\"meta\"] != \"@sys.ignore\":\n df_meta = texts[\"meta\"]\n\n if re.match(\n \"(@sys.).+\", df_meta\n ): # if text is a dialogflow sys entity\n if df_meta in DialogflowConverter.sys_entity_map:\n mm_meta = DialogflowConverter.sys_entity_map[df_meta]\n else:\n mm_meta = \"[DNE: {sysEntity}]\".format(sysEntity=df_meta[1:])\n logger.info(\n \"Unfortunately mindmeld does not currently support\"\n \"%s as a sys entity.\"\n \"Please create an entity for this.\",\n df_meta[1:],\n )\n\n entity_type = self.clean_name(mm_meta) + \"_entries_\" + language\n part = \"{\" + df_text + \"|\" + entity_type + \"}\"\n else:\n entity_type = (\n self.clean_name(df_meta[1:]) + \"_entries_\" + language\n )\n part = \"{\" + df_text + \"|\" + entity_type + \"}\"\n else:\n part = df_text\n\n sentence += part\n all_text.append(sentence)\n\n train, test = train_test_split(all_text, test_size=0.2)\n\n target_test.write(\"\\n\".join(test))\n target_train.write(\"\\n\".join(train))\n\n source_en.close()\n target_test.close()\n target_train.close()\n\n def _get_file_names(self, level):\n \"\"\" Gets the names of the entities from Dialogflow as a dictionary.\n levels (str): either \"entities\" or \"intents\"\n\n ex. if we had the following files in our entities directory:\n [\"test.json\", \"test_entries_en.json\", \"test_entries_de.json\"]\n it returns:\n {'test': {'en': 'test_entries_en', 'de': 'test_entries_de'}} \"\"\"\n\n directory = os.path.join(self.dialogflow_project_directory, level)\n files = os.listdir(directory)\n\n w = {\"entities\": \"entries\", \"intents\": \"usersays\"}\n p = r\".+(?<=(_\" + w[level] + \"_))(.*)(?=(.json))\"\n\n info = {}\n for name in files:\n match = re.match(p, name)\n\n if match:\n isbase = False\n base = name[: match.start(1)]\n language = match.group(2)\n else:\n isbase = True\n base = name[:-5]\n\n if base not in info:\n info[base] = {}\n\n if not isbase:\n info[base][language] = name[:-5]\n\n return info\n\n def create_mindmeld_training_data(self):\n entities = self._get_file_names(\"entities\")\n self._create_entities_directories(entities)\n\n intents = self._get_file_names(\"intents\")\n self._create_intents_directories(intents)\n\n # =========================\n # create init\n # =========================\n\n @staticmethod\n def create_handle(params):\n return \"@app.handle(\" + params + \")\"\n\n @staticmethod\n def create_header(function_name):\n return \"def \" + function_name + \"(request, responder):\"\n\n @staticmethod\n def create_function(handles, function_name, replies):\n assert isinstance(handles, list)\n\n result = \"\"\n for handle in handles:\n result += DialogflowConverter.create_handle(handle) + \"\\n\"\n result += DialogflowConverter.create_header(function_name) + \"\\n\"\n result += \" \" + \"replies = {}\".format(replies) + \"\\n\"\n result += \" \" + \"responder.reply(replies)\"\n return result\n\n @staticmethod\n def clean_name(name):\n \"\"\" Takes in a string and returns a valid folder name (no spaces, all lowercase).\"\"\"\n name = re.sub(r\"[^\\w\\s-]\", \"\", name).strip().lower()\n name = re.sub(r\"[-\\s]+\", \"_\", name)\n return name\n\n def clean_check(self, name, lst):\n \"\"\" Takes in a list of strings and a name.\n Returns name cleaned if the cleaned name is not found in lst.\"\"\"\n cleaned = self.clean_name(name)\n\n if cleaned not in lst:\n lst.add(cleaned)\n return cleaned\n else:\n logger.error(\n \"%s name has been created twice. Please ensure there \"\n \"are no duplicate names in the dialogflow files and \"\n \"filenames are valid (no spaces or special characters)\",\n cleaned,\n )\n\n def create_mindmeld_init(self):\n with open(\n os.path.join(self.mindmeld_project_directory, \"__init__.py\"), \"w\"\n ) as target:\n begin_info = [\n \"# -*- coding: utf-8 -*-\",\n '\"\"\"This module contains the MindMeld application\"\"\"',\n \"from mindmeld import Application\",\n \"app = Application(__name__)\",\n \"__all__ = ['app']\",\n ]\n\n for info, spacing in zip(begin_info, [1, 2, 1, 1, 0]):\n target.write(info + \"\\n\" * spacing)\n\n intents = self._get_file_names(\"intents\")\n\n for i, main in enumerate(intents.keys()):\n\n df_main = os.path.join(\n self.dialogflow_project_directory, \"intents\", main + \".json\"\n )\n\n with open(df_main) as source:\n if \"usersays\" in df_main:\n logger.error(\n \"Please check if your intent file\"\n \"names are correctly labeled.\"\n )\n\n datastore = json.load(source)\n replies = []\n\n for response in datastore[\"responses\"]:\n for message in response[\"messages\"]:\n language = message[\"lang\"]\n\n if \"speech\" in message:\n data = message[\"speech\"]\n\n replies = data if isinstance(data, list) else [data]\n\n if datastore[\"fallbackIntent\"]:\n function_name = \"default\" + \"_\" + language\n if language == \"en\":\n # TODO: support multiple defaults for languages\n handles = [\n \"default=True\",\n \"intent='unsupported'\",\n ]\n else:\n handles = [\"intent='unsupported'\"]\n else:\n function_name = \"renameMe\" + str(i) + \"_\" + language\n handles = [\n \"intent=\"\n + \"'\"\n + self.clean_name(datastore[\"name\"])\n + \"_usersays_\"\n + language\n + \"'\"\n ]\n\n target.write(\n \"\\n\\n\\n\"\n + self.create_function(\n handles=handles,\n function_name=function_name,\n replies=replies,\n )\n )\n target.write(\"\\n\")\n\n # =========================\n # convert project\n # =========================\n\n def convert_project(self):\n \"\"\" Converts a Dialogflow project into a MindMeld project.\n\n Dialogflow projects consist of entities and intents.\n note on languages:\n Dialogflow supports multiple languages and locales. They store their training\n data for different languages in different files. So, the name of each training\n file ends with a meta tag, two letters long for language, and an additional\n two letters for dialect (if applicable). For example, a file ending in \"_en-au\"\n indicates it's in English (Australia). Below we use \"la\" to represent this\n meta tag.\n\n entities folder contains:\n entityName.json - Meta data about entityName for all languages.\n entityName_entries_la.json - One for each language, contains entitiy mappings.\n\n intents folder contain:\n intentName.json - Contains rules, information about conversation flow, meta data.\n Contains previously mentioned information and responses for all languages.\n intentName_usersays_la.json - one for each language,\n contains training data to recognize intentName\n\n Limitations:\n - The converter is unable to create an entity when it encounters an\n unrecognized entity (an entity not defined under entities folder\n or system entities), and labels such entities as DNE in training data.\n - The converter currently does not automatically convert features like\n slot filling, contexts, and follow-up intents. Users can still implement such\n features and more.\n - Information in agent.json are not copied over.\n - There is no official support for different languages. Users can still\n implement this. The converter is able to successfully convert dialogflow\n bots that support multiple languages.\n\n Mindmeld:\n - Users can store data locally\n - Users can build a knowledge base (currently beta in Dialogflow).\n - Users can configure the machine learning models to best suit their needs.\n - Users have more flexibility in defining their own features, including\n ones like slot filling, contexts, and follow-up intents.\n \"\"\"\n\n logger.info(\"Converting project.\")\n\n # Create project directory with sub folders\n self.create_mindmeld_directory()\n\n # Transfer over test data from Dialogflow project and reformat to Mindmeld project\n self.create_mindmeld_training_data()\n file_loc = os.path.dirname(os.path.realpath(__file__))\n\n self.create_config(self.mindmeld_project_directory, file_loc)\n self.create_main(self.mindmeld_project_directory, file_loc)\n self.create_mindmeld_init()\n\n logger.info(\"Project converted.\")\n"
] | [
[
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
offy284/Keras-GAN | [
"6652c626ba584ffd1c25ca4e925e6f131077395c"
] | [
"music_preprocessor/music_preprocessor.py"
] | [
"import itertools\nimport shutil\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nfrom tqdm import tqdm\nimport numpy as np\nimport scipy\nfrom scipy.io.wavfile import write, read\nfrom scipy.fftpack import fft\nfrom scipy import signal\nfrom scipy.fft import fftshift\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport matplotlib.pyplot as plt\n\nRESOLUTION_SCALE = 10\n\n\ndef flatten_dir(dir):\n print(\"Flattening MusicData directory...\")\n all_files = []\n dups = 0\n\n for root, _dirs, files in itertools.islice(os.walk(dir), 1, None):\n try:\n for filename in files:\n all_files.append(os.path.join(root, filename))\n except:\n dups += 1\n for filename in all_files:\n try:\n shutil.move(filename, dir)\n except:\n dups += 1\n\n print(f\"{dups} duplicate files removed\")\n\n\ndef generate_big_music(resolution_scale=RESOLUTION_SCALE):\n print(\"Generating big_music from MusicData directory...\")\n onlyfiles = [f for f in listdir(\"MusicData/\") if isfile(join(\"MusicData/\", f))]\n\n print(\"Normalizing big_music...\")\n square_size = 28 * resolution_scale\n big_music = np.empty((1)) # np.empty((len(onlyfiles), square_size, square_size, 1))\n\n for i in tqdm(range(len(onlyfiles))):\n file = onlyfiles[i]\n if \"-converted\" in file:\n x = scipy.io.wavfile.read(f\"MusicData/{file}\")\n x = x[1]\n\n #big_music = big_music.reshape(-1)\n\n '''\n print(f\"Building spectrogram...\")\n \n plt.specgram(x, Fs=44100)\n plt.savefig(f'MusicImageData/{file}.png')\n \n x = x.reshape(-1, 1)\n\n min_max_scaler = MinMaxScaler()\n x = (min_max_scaler.fit_transform(x) - .5) * 2\n\n samples = list(np.empty((int(x.shape[0] / square_size / square_size), square_size, square_size, 1)))\n rows = np.zeros((square_size, square_size, 1))\n cols = np.zeros((square_size, 1))\n\n for samplei in tqdm(range(len(samples))):\n for yi in range(square_size):\n for xi in range(square_size):\n cols[xi] = x[xi + yi * square_size + samplei * square_size * square_size]\n rows[yi] = cols\n samples[samplei] = rows\n '''\n\n print(\"Numpyifying x...\")\n big_music = np.concatenate([big_music, x])\n\n print(f\"big_music is of shape {big_music.shape}\")\n\n freqs, times, spectrogram = signal.spectrogram(big_music, 44100)\n spectrogram = spectrogram.reshape((spectrogram.shape[1], spectrogram.shape[0]))\n\n print(spectrogram.shape)\n\n filename = f\"spectrogram.npy\"\n print(f\"Saving {filename}...\")\n np.save(f\"{filename}\", spectrogram)\n\n filename = f\"freqs.npy\"\n print(f\"Saving {filename}...\")\n np.save(f\"{filename}\", freqs)\n\n filename = f\"times.npy\"\n print(f\"Saving {filename}...\")\n np.save(f\"{filename}\", times)\n\n\nif __name__ == '__main__':\n print(\"Music Preprocessor v0.1\")\n #flatten_dir()\n generate_big_music()"
] | [
[
"scipy.signal.spectrogram",
"numpy.save",
"numpy.concatenate",
"scipy.io.wavfile.read",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Didou09/tofu | [
"4a4e1f058bab8e7556ed9d518f90807cec605476"
] | [
"tofu/geom/_core_optics.py"
] | [
"\n\"\"\"\nThis module is the geometrical part of the ToFu general package\nIt includes all functions and object classes necessary for tomography on Tokamaks\n\"\"\"\n\n# Built-in\nimport sys\nimport os\nimport warnings\nimport copy\n\n\n# Common\nimport numpy as np\nimport scipy.interpolate as scpinterp\nimport scipy.stats as scpstats\nimport datetime as dtm\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n# ToFu-specific\nfrom tofu import __version__ as __version__\nimport tofu.pathfile as tfpf\nimport tofu.utils as utils\nfrom . import _def as _def\nfrom . import _GG as _GG\nfrom . import _core\nfrom . import _check_optics\nfrom . import _comp_optics as _comp_optics\nfrom . import _plot_optics as _plot_optics\nimport tofu.spectro._rockingcurve as _rockingcurve\n\n\n__all__ = ['CrystalBragg']\n\n\n_Type = 'Tor'\n_NTHREADS = 16\n\n# rotate / translate instance\n_RETURN_COPY = False\n_USE_NON_PARALLELISM = True\n\n\n\"\"\"\n###############################################################################\n###############################################################################\n Ves class and functions\n###############################################################################\n###############################################################################\n\"\"\"\n\n\nclass CrystalBragg(utils.ToFuObject):\n \"\"\" A class defining crystals for Bragg diffraction\n\n A crystal can be of Type flat, cylindrical or spherical\n It is characterized by its:\n - geometry (Type, dimensions, curvature radii and position/orientation)\n - Material and lattice\n - Bragg parameters (angle vs lambda)\n\n\n Parameters\n ----------\n Id : str / tfpf.ID\n A name string or a pre-built tfpf.ID class to be used to identify this\n particular instance, if a string is provided, it is fed to tfpf.ID()\n dgeom : dict\n An array (2,N) or (N,2) defining the contour of the vacuum vessel in a\n cross-section, if not closed, will be closed automatically\n dspectral: str\n Flag indicating whether the vessel will be a torus ('Tor') or a linear\n device ('Lin')\n SavePath : None / str\n If provided, forces the default saving path of the object to the\n provided value\n\n \"\"\"\n\n # Fixed (class-wise) dictionary of default properties\n _ddef = {\n 'Id': {\n 'shot': 0, 'Exp': 'dummy', 'Diag': 'dummy',\n 'include': [\n 'Mod', 'Cls', 'Exp', 'Diag', 'Name', 'shot', 'version',\n ],\n },\n 'dgeom': {'Type': 'sph', 'Typeoutline': 'rect'},\n 'dmat': {},\n 'dbragg': {'braggref': np.pi/4.},\n 'dmisc': {'color': 'k'},\n }\n _dplot = {'cross':{'Elt':'P',\n 'dP':{'color':'k','lw':2},\n 'dI':{'color':'k','ls':'--','marker':'x','ms':8,'mew':2},\n 'dBs':{'color':'b','ls':'--','marker':'x','ms':8,'mew':2},\n 'dBv':{'color':'g','ls':'--','marker':'x','ms':8,'mew':2},\n 'dVect':{'color':'r','scale':10}},\n 'hor':{'Elt':'P',\n 'dP':{'color':'k','lw':2},\n 'dI':{'color':'k','ls':'--'},\n 'dBs':{'color':'b','ls':'--'},\n 'dBv':{'color':'g','ls':'--'},\n 'Nstep':50},\n '3d':{}}\n # _DEFLAMB = 3.971561e-10\n # _DEFNPEAKS = 12\n # _DREFLECT_DTYPES = {'specular':0, 'diffusive':1, 'ccube':2}\n\n\n # Does not exist beofre Python 3.6 !!!\n def __init_subclass__(cls, color='k', **kwdargs):\n # Python 2\n super(CrystalBragg,cls).__init_subclass__(**kwdargs)\n # Python 3\n #super().__init_subclass__(**kwdargs)\n cls._ddef = copy.deepcopy(CrystalBragg._ddef)\n cls._dplot = copy.deepcopy(CrystalBragg._dplot)\n cls._set_color_ddef(cls._color)\n\n @classmethod\n def _set_color_ddef(cls, color):\n cls._ddef['dmisc']['color'] = mpl.colors.to_rgba(color)\n\n def __init__(self, dgeom=None, dmat=None, dbragg=None,\n Id=None, Name=None, Exp=None, Diag=None, shot=None,\n fromdict=None, sep=None,\n SavePath=os.path.abspath('./'),\n SavePath_Include=tfpf.defInclude, color=None):\n\n # To replace __init_subclass__ for Python 2\n if sys.version[0]=='2':\n self._dstrip = utils.ToFuObjectBase._dstrip.copy()\n self.__class__._strip_init()\n\n # Create a dplot at instance level\n self._dplot = copy.deepcopy(self.__class__._dplot)\n\n kwdargs = locals()\n del kwdargs['self']\n # super()\n super(CrystalBragg,self).__init__(**kwdargs)\n\n def _reset(self):\n # super()\n super(CrystalBragg,self)._reset()\n self._dgeom = dict.fromkeys(self._get_keys_dgeom())\n self._dmat = dict.fromkeys(self._get_keys_dmat())\n self._dbragg = dict.fromkeys(self._get_keys_dbragg())\n self._dmisc = dict.fromkeys(self._get_keys_dmisc())\n #self._dplot = copy.deepcopy(self.__class__._ddef['dplot'])\n\n @classmethod\n def _checkformat_inputs_Id(cls, Id=None, Name=None,\n Exp=None, Diag=None, shot=None, Type=None,\n include=None,\n **kwdargs):\n if Id is not None:\n assert isinstance(Id,utils.ID)\n Name, Exp, Type = Id.Name, Id.Exp, Id.Type\n if Type is None:\n Type = cls._ddef['dgeom']['Type']\n if Exp is None:\n Exp = cls._ddef['Id']['Exp']\n if Diag is None:\n Diag = cls._ddef['Id']['Diag']\n if shot is None:\n shot = cls._ddef['Id']['shot']\n if include is None:\n include = cls._ddef['Id']['include']\n\n dins = {'Name':{'var':Name, 'cls':str},\n 'Exp': {'var':Exp, 'cls':str},\n 'Diag': {'var':Diag, 'cls':str},\n 'shot': {'var':shot, 'cls':int},\n 'Type': {'var':Type, 'in':['sph']},\n 'include':{'var':include, 'listof':str}}\n dins, err, msg = cls._check_InputsGeneric(dins)\n if err:\n raise Exception(msg)\n\n kwdargs.update({'Name':Name, 'shot':shot,\n 'Exp':Exp, 'Diag':Diag, 'Type':Type,\n 'include':include})\n return kwdargs\n\n ###########\n # Get largs\n ###########\n\n @staticmethod\n def _get_largs_dgeom(sino=True):\n largs = ['dgeom']\n return largs\n\n @staticmethod\n def _get_largs_dmat():\n largs = ['dmat']\n return largs\n\n @staticmethod\n def _get_largs_dbragg():\n largs = ['dbragg']\n return largs\n\n @staticmethod\n def _get_largs_dmisc():\n largs = ['color']\n return largs\n\n ###########\n # Get keys of dictionnaries\n ###########\n\n @staticmethod\n def _get_keys_dgeom():\n lk = ['Type', 'Typeoutline',\n 'summit', 'center', 'extenthalf', 'surface',\n 'nin', 'nout', 'e1', 'e2', 'rcurve',\n 'move', 'move_param', 'move_kwdargs']\n return lk\n\n @staticmethod\n def _get_keys_dmat():\n lk = ['formula', 'density', 'symmetry',\n 'lengths', 'angles', 'cut', 'd',\n 'alpha', 'beta', 'nin', 'nout', 'e1', 'e2']\n return lk\n\n @staticmethod\n def _get_keys_dbragg():\n lk = ['rockingcurve', 'lambref', 'braggref']\n return lk\n\n @staticmethod\n def _get_keys_dmisc():\n lk = ['color']\n return lk\n\n ###########\n # _init\n ###########\n\n def _init(self, dgeom=None, dmat=None, dbragg=None,\n color=None, **kwdargs):\n allkwds = dict(locals(), **kwdargs)\n largs = self._get_largs_dgeom()\n kwds = self._extract_kwdargs(allkwds, largs)\n self.set_dgeom(**kwds)\n largs = self._get_largs_dmat()\n kwds = self._extract_kwdargs(allkwds, largs)\n self.set_dmat(**kwds)\n largs = self._get_largs_dbragg()\n kwds = self._extract_kwdargs(allkwds, largs)\n self.set_dbragg(**kwds)\n largs = self._get_largs_dmisc()\n kwds = self._extract_kwdargs(allkwds, largs)\n self._set_dmisc(**kwds)\n self._dstrip['strip'] = 0\n\n ###########\n # set dictionaries\n ###########\n\n def set_dgeom(self, dgeom=None):\n self._dgeom = _check_optics._checkformat_dgeom(\n dgeom=dgeom, ddef=self._ddef['dgeom'],\n valid_keys=self._get_keys_dgeom(),\n )\n if self._dgeom['move'] is not None:\n self.set_move(\n move=self._dgeom['move'],\n param=self._dgeom['move_param'],\n **self._dgeom['move_kwdargs'],\n )\n\n def set_dmat(self, dmat=None):\n self._dmat = _check_optics._checkformat_dmat(\n dmat=dmat, dgeom=self._dgeom,\n ddef=self._ddef['dmat'],\n valid_keys=self._get_keys_dmat()\n )\n\n def set_dbragg(self, dbragg=None):\n self._dbragg = _check_optics._checkformat_dbragg(\n dbragg=dbragg,\n ddef=self._ddef['dbragg'],\n valid_keys=self._get_keys_dbragg(),\n dmat=self._dmat,\n )\n\n def _set_color(self, color=None):\n color = _check_optics._checkformat_inputs_dmisc(\n color=color, ddef=self._ddef,\n )\n self._dmisc['color'] = color\n self._dplot['cross']['dP']['color'] = color\n self._dplot['hor']['dP']['color'] = color\n # self._dplot['3d']['dP']['color'] = color\n\n def _set_dmisc(self, color=None):\n self._set_color(color)\n\n ###########\n # strip dictionaries\n ###########\n\n def _strip_dgeom(self, lkeep=None):\n lkeep = self._get_keys_dgeom()\n utils.ToFuObject._strip_dict(self._dgeom, lkeep=lkeep)\n\n def _strip_dmat(self, lkeep=None):\n lkeep = self._get_keys_dmat()\n utils.ToFuObject._strip_dict(self._dmat, lkeep=lkeep)\n\n def _strip_dbragg(self, lkeep=None):\n lkeep = self._get_keys_dbragg()\n utils.ToFuObject._strip_dict(self._dbragg, lkeep=lkeep)\n\n def _strip_dmisc(self, lkeep=['color']):\n utils.ToFuObject._strip_dict(self._dmisc, lkeep=lkeep)\n\n ###########\n # rebuild dictionaries\n ###########\n\n def _rebuild_dgeom(self, lkeep=None):\n lkeep = self._get_keys_dgeom()\n reset = utils.ToFuObject._test_Rebuild(self._dgeom, lkeep=lkeep)\n if reset:\n utils.ToFuObject._check_Fields4Rebuild(self._dgeom,\n lkeep=lkeep, dname='dgeom')\n self._set_dgeom(dgeom=self._dgeom)\n\n def _rebuild_dmat(self, lkeep=None):\n lkeep = self._get_keys_dmat()\n reset = utils.ToFuObject._test_Rebuild(self._dmat, lkeep=lkeep)\n if reset:\n utils.ToFuObject._check_Fields4Rebuild(self._dmat,\n lkeep=lkeep, dname='dmat')\n self.set_dmat(self._dmat)\n\n def _rebuild_dbragg(self, lkeep=None):\n lkeep = self._get_keys_dbragg()\n reset = utils.ToFuObject._test_Rebuild(self._dbragg, lkeep=lkeep)\n if reset:\n utils.ToFuObject._check_Fields4Rebuild(self._dbragg,\n lkeep=lkeep, dname='dbragg')\n self.set_dbragg(self._dbragg)\n\n def _rebuild_dmisc(self, lkeep=['color']):\n reset = utils.ToFuObject._test_Rebuild(self._dmisc, lkeep=lkeep)\n if reset:\n utils.ToFuObject._check_Fields4Rebuild(self._dmisc,\n lkeep=lkeep, dname='dmisc')\n self._set_dmisc(color=self.dmisc['color'])\n\n ###########\n # _strip and get/from dict\n ###########\n\n @classmethod\n def _strip_init(cls):\n cls._dstrip['allowed'] = [0,1]\n nMax = max(cls._dstrip['allowed'])\n doc = \"\"\"\n 1: Remove nothing\"\"\"\n doc = utils.ToFuObjectBase.strip.__doc__.format(doc,nMax)\n if sys.version[0]=='2':\n cls.strip.__func__.__doc__ = doc\n else:\n cls.strip.__doc__ = doc\n\n def strip(self, strip=0):\n # super()\n super(CrystalBragg, self).strip(strip=strip)\n\n def _strip(self, strip=0):\n if strip==0:\n self._rebuild_dgeom()\n self._rebuild_dmat()\n self._rebuild_dbragg()\n self._rebuild_dmisc()\n else:\n self._strip_dgeom()\n self._strip_dmat()\n self._strip_dbragg()\n self._strip_dmisc()\n\n def _to_dict(self):\n dout = {'dgeom':{'dict':self._dgeom, 'lexcept':None},\n 'dmat':{'dict':self._dmat, 'lexcept':None},\n 'dbragg':{'dict':self._dbragg, 'lexcept':None},\n 'dmisc':{'dict':self._dmisc, 'lexcept':None},\n 'dplot':{'dict':self._dplot, 'lexcept':None}}\n return dout\n\n def _from_dict(self, fd):\n self._dgeom.update(**fd.get('dgeom', {}))\n self._dmat.update(**fd.get('dmat', {}))\n self._dbragg.update(**fd.get('dbragg', {}))\n self._dmisc.update(**fd.get('dmisc', {}))\n self._dplot.update(**fd.get('dplot', {}))\n\n # -----------\n # Properties\n # -----------\n\n @property\n def Type(self):\n \"\"\"Return the type of structure \"\"\"\n return self._Id.Type\n\n @property\n def dgeom(self):\n return self._dgeom\n\n @property\n def dmat(self):\n \"\"\"Return the polygon defining the structure cross-section\"\"\"\n return self._dmat\n\n @property\n def dbragg(self):\n \"\"\"Return the polygon defining the structure cross-section\"\"\"\n return self._dbragg\n\n @property\n def dmisc(self):\n return self._dmisc\n\n # @property\n # def nin(self):\n # return self._dgeom['nin']\n\n # @property\n # def nout(self):\n # return self._dgeom['nout']\n\n # @property\n # def e1(self):\n # return self._dgeom['e1']\n\n # @property\n # def e2(self):\n # return self._dgeom['e2']\n\n @property\n def summit(self):\n return self._dgeom['summit']\n\n @property\n def center(self):\n return self._dgeom['center']\n\n @property\n def ismobile(self):\n return self._dgeom['move'] not in [None, False]\n\n @property\n def rockingcurve(self):\n if self._dbragg.get('rockingcurve') is not None:\n if self._dbragg['rockingcurve'].get('type') is not None:\n return self._dbragg['rockingcurve']\n raise Exception(\"rockingcurve was not set!\")\n\n # --------------------------------------\n # methods for getting unit vectors basis\n # --------------------------------------\n\n def get_unit_vectors(self, use_non_parallelism=None):\n \"\"\" Return the unit vectors (direct orthonormal basis)\n\n Depending on:\n use_non_parallelism: True => return the geometrical basis\n use_non_parallelism: False => return the mesh basis\n\n \"\"\"\n if use_non_parallelism is None:\n use_non_parallelism = _USE_NON_PARALLELISM\n\n if use_non_parallelism is True:\n nout = self._dmat['nout']\n e1 = self._dmat['e1']\n e2 = self._dmat['e2']\n else:\n nout = self._dgeom['nout']\n e1 = self._dgeom['e1']\n e2 = self._dgeom['e2']\n return nout, e1, e2, use_non_parallelism\n\n # -----------------\n # methods for color\n # -----------------\n\n def set_color(self, col):\n self._set_color(col)\n\n def get_color(self):\n return self._dmisc['color']\n\n # -----------------\n # methods for printing\n # -----------------\n\n def get_summary(self, sep=' ', line='-', just='l',\n table_sep=None, verb=True, return_=False):\n \"\"\" Summary description of the object content \"\"\"\n\n # -----------------------\n # Build material\n col0 = [\n 'formula', 'symmetry', 'cut', 'density',\n 'd (A)',\n 'bragg({:9.6} A) (deg)'.format(self._dbragg['lambref']*1e10),\n 'Type', 'outline', 'surface (cm²)', 'rcurve', 'rocking curve',\n ]\n ar0 = [self._dmat['formula'], self._dmat['symmetry'],\n str(self._dmat['cut']), str(self._dmat['density']),\n '{0:5.3f}'.format(self._dmat['d']*1.e10),\n str(self._dbragg['braggref']*180./np.pi),\n self._dgeom['Type'], self._dgeom['Typeoutline'],\n '{0:5.1f}'.format(self._dgeom['surface']*1.e4),\n '{0:6.3f}'.format(self._dgeom['rcurve'])]\n try:\n ar0.append(self.rockingcurve['type'])\n except Exception as err:\n ar0.append('None')\n\n\n # -----------------------\n # Build geometry\n col1 = ['half-extent', 'summit', 'center', 'nout', 'e1',\n 'alpha', 'beta']\n ar1 = [\n str(np.round(self._dgeom['extenthalf'], decimals=3)),\n str(np.round(self._dgeom['summit'], decimals=2)),\n str(np.round(self._dgeom['center'], decimals=2)),\n str(np.round(self._dmat['nout'], decimals=3)),\n str(np.round(self._dmat['e1'], decimals=3)),\n str(np.round(self._dmat['alpha'], decimals=6)),\n str(np.round(self._dmat['beta'], decimals=6)),\n ]\n if self._dgeom.get('move') not in [None, False]:\n col1 += ['move', 'param']\n ar1 += [self._dgeom['move'],\n str(np.round(self._dgeom['move_param'], decimals=5))]\n\n if self._dmisc.get('color') is not None:\n col1.append('color')\n ar1.append(str(self._dmisc['color']))\n\n lcol = [col0, col1]\n lar = [ar0, ar1]\n return self._get_summary(lar, lcol,\n sep=sep, line=line, table_sep=table_sep,\n verb=verb, return_=return_)\n # -----------------\n # methods for moving\n # -----------------\n\n def _update_or_copy(self, dgeom, pinhole=None,\n return_copy=None,\n name=None, diag=None, shot=None):\n if return_copy is None:\n return_copy = _RETURN_COPY\n for kk, vv in self._dgeom.items():\n if kk not in dgeom.keys():\n dgeom[kk] = vv\n if return_copy is True:\n if name is None:\n name = self.Id.Name + 'copy'\n if diag is None:\n diag = self.Id.Diag\n if shot is None:\n diag = self.Id.shot\n return self.__class__(dgeom=dgeom,\n dbragg=self._dbragg,\n dmat=self._dmat,\n color=self._dmisc['color'],\n Exp=self.Id.Exp,\n Diag=diag,\n Name=name,\n shot=shot,\n SavePath=self.Id.SavePath)\n else:\n dgeom0 = self.dgeom\n try:\n self.set_dgeom(dgeom=dgeom)\n self._dmat = _check_optics._checkformat_dmat(\n dmat={\n k0: v0 for k0, v0 in self._dmat.items()\n if k0 not in ['nin', 'nout', 'e1', 'e2']\n },\n dgeom=self._dgeom,\n ddef=self._ddef['dmat'],\n valid_keys=self._get_keys_dmat()\n )\n except Exception as err:\n # Make sure instance does not move\n self.set_dgeom(dgeom=dgeom0)\n msg = (str(err)\n + \"\\nAn exception occured during updating\\n\"\n + \" => instance unmoved\")\n raise Exception(msg)\n\n def _rotate_or_translate(self, func, **kwdargs):\n pts = np.array([self._dgeom['summit'], self._dgeom['center']]).T\n if 'rotate' in func.__name__:\n vect = np.array([\n self._dgeom['nout'],\n self._dgeom['e1'],\n self._dgeom['e2']\n ]).T\n pts, vect = func(pts=pts, vect=vect, **kwdargs)\n return {'summit': pts[:, 0], 'center': pts[:, 1],\n 'nout': vect[:, 0], 'nin': -vect[:, 0],\n 'e1': vect[:, 1], 'e2': vect[:, 2]}\n else:\n pts = func(pts=pts, **kwdargs)\n return {'summit': pts[:, 0], 'center': pts[:, 1]}\n\n def translate_in_cross_section(self, distance=None, direction_rz=None,\n phi=None,\n return_copy=None,\n diag=None, name=None, shot=None):\n \"\"\" Translate the instance in the cross-section \"\"\"\n if phi is None:\n phi = np.arctan2(*self.summit[1::-1])\n msg = (\"Poloidal plane was not explicitely specified\\n\"\n + \" => phi set to self.summit's phi ({})\".format(phi))\n warnings.warn(msg)\n dgeom = self._rotate_or_translate(\n self._translate_pts_poloidal_plane,\n phi=phi, direction_rz=direction_rz, distance=distance)\n return self._update_or_copy(dgeom,\n return_copy=return_copy,\n diag=diag, name=name, shot=shot)\n\n def translate_3d(self, distance=None, direction=None,\n return_copy=None,\n diag=None, name=None, shot=None):\n \"\"\" Translate the instance in provided direction \"\"\"\n dgeom = self._rotate_or_translate(\n self._translate_pts_3d,\n direction=direction, distance=distance)\n return self._update_or_copy(dgeom,\n return_copy=return_copy,\n diag=diag, name=name, shot=shot)\n\n def rotate_in_cross_section(self, angle=None, axis_rz=None,\n phi=None,\n return_copy=None,\n diag=None, name=None, shot=None):\n \"\"\" Rotate the instance in the cross-section \"\"\"\n if phi is None:\n phi = np.arctan2(*self.summit[1::-1])\n msg = (\"Poloidal plane was not explicitely specified\\n\"\n + \" => phi set to self.summit's phi ({})\".format(phi))\n warnings.warn(msg)\n dgeom = self._rotate_or_translate(\n self._rotate_pts_vectors_in_poloidal_plane,\n axis_rz=axis_rz, angle=angle, phi=phi)\n return self._update_or_copy(dgeom,\n return_copy=return_copy,\n diag=diag, name=name, shot=shot)\n\n def rotate_around_torusaxis(self, angle=None,\n return_copy=None,\n diag=None, name=None, shot=None):\n \"\"\" Rotate the instance around the torus axis \"\"\"\n dgeom = self._rotate_or_translate(\n self._rotate_pts_vectors_around_torusaxis,\n angle=angle)\n return self._update_or_copy(dgeom,\n return_copy=return_copy,\n diag=diag, name=name, shot=shot)\n\n def rotate_around_3daxis(self, angle=None, axis=None,\n return_copy=None,\n diag=None, name=None, shot=None):\n \"\"\" Rotate the instance around the provided 3d axis \"\"\"\n dgeom = self._rotate_or_translate(\n self._rotate_pts_vectors_around_3daxis,\n axis=axis, angle=angle)\n return self._update_or_copy(dgeom,\n return_copy=return_copy,\n diag=diag, name=name, shot=shot)\n\n def set_move(self, move=None, param=None, **kwdargs):\n \"\"\" Set the default movement parameters\n\n A default movement can be set for the instance, it can be any of the\n pre-implemented movement (rotations or translations)\n This default movement is the one that will be called when using\n self.move()\n\n Specify the type of movement via the name of the method (passed as a\n str to move)\n\n Specify, for the geometry of the instance at the time of defining this\n default movement, the current value of the associated movement\n parameter (angle / distance). This is used to set an arbitrary\n difference for user who want to use absolute position values\n The desired incremental movement to be performed when calling self.move\n will be deduced by substracting the stored param value to the provided\n param value. Just set the current param value to 0 if you don't care\n about a custom absolute reference.\n\n kwdargs must be a parameters relevant to the chosen method (axis,\n direction...)\n\n e.g.:\n self.set_move(move='rotate_around_3daxis',\n param=0.,\n axis=([0.,0.,0.], [1.,0.,0.]))\n self.set_move(move='translate_3d',\n param=0.,\n direction=[0.,1.,0.])\n \"\"\"\n move, param, kwdargs = self._checkformat_set_move(move, param, kwdargs)\n self._dgeom['move'] = move\n self._dgeom['move_param'] = param\n if isinstance(kwdargs, dict) and len(kwdargs) == 0:\n kwdargs = None\n self._dgeom['move_kwdargs'] = kwdargs\n\n def move(self, param):\n \"\"\" Set new position to desired param according to default movement\n\n Can only be used if default movement was set before\n See self.set_move()\n \"\"\"\n param = self._move(param, dictname='_dgeom')\n self._dgeom['move_param'] = param\n\n # -----------------\n # methods for rocking curve\n # -----------------\n\n def get_rockingcurve_func(self, lamb=None, n=None):\n \"\"\" Return the rocking curve function\n\n Also return the wavelength (lamb) (in meters) for which it was computed\n and the associated reference bragg angle (in rad)\n\n \"\"\"\n drock = self.rockingcurve\n if drock['type'] == 'tabulated-1d':\n if lamb is not None and lamb != drock['lamb']:\n msg = (\"rocking curve was tabulated only for:\\n\"\n + \"\\tlamb = {} m\\n\".format(lamb)\n + \" => Please let lamb=None\")\n raise Exception(msg)\n lamb = drock['lamb']\n bragg = self._checkformat_bragglamb(lamb=lamb, n=n)\n func = scpinterp.interp1d(drock['dangle'] + bragg, drock['value'],\n kind='linear', bounds_error=False,\n fill_value=0, assume_sorted=True)\n\n elif drock['type'] == 'tabulated-2d':\n lmin, lmax = drock['lamb'].min(), drock['lamb'].max()\n if lamb is None:\n lamb = drock['lamb']\n if lamb < lmin or lamb > lmax:\n msg = (\"rocking curve was tabulated only in interval:\\n\"\n + \"\\tlamb in [{}; {}] m\\n\".format(lmin, lmax)\n + \" => Please set lamb accordingly\")\n raise Exception(msg)\n bragg = self._checkformat_bragglamb(lamb=lamb, n=n)\n\n def func(angle, lamb=lamb, bragg=bragg, drock=drock):\n return scpinterp.interp2d(drock['dangle']+bragg, drock['lamb'],\n drock['value'], kind='linear',\n bounds_error=False, fill_value=0,\n assume_sorted=True)(angle, lamb)\n\n else:\n # TBC\n raise NotImplementedError\n def func(angle, d=d, delta_bragg=delta_bragg,\n Rmax=drock['Rmax'], sigma=drock['sigma']):\n core = sigma**2/((angle - (bragg+delta_bragg))**2 + sigma**2)\n if Rmax is None:\n return core/(sigma*np.pi)\n else:\n return Rmax*core\n return func, lamb, bragg\n\n def plot_rockingcurve(self, lamb=None, n=None, sigma=None,\n npts=None, color=None, ang_units=None,\n dmargin=None, fs=None, ax=None, legend=None):\n drock = self.rockingcurve\n func, lamb, bragg = self.get_rockingcurve_func(lamb=lamb, n=n)\n axtit = 'Rocking curve for ' + self.Id.Name\n return _plot_optics.CrystalBragg_plot_rockingcurve(\n func=func, bragg=bragg, lamb=lamb,\n sigma=sigma, npts=npts,\n ang_units=ang_units, axtit=axtit, color=color,\n fs=fs, ax=ax, legend=legend)\n\n def compute_rockingcurve(\n self, ih=None, ik=None, il=None, lamb=None,\n use_non_parallelism=None, na=None,\n alpha_limits=None,\n therm_exp=None, plot_therm_exp=None,\n plot_asf=None, plot_power_ratio=None,\n plot_asymmetry=None, plot_cmaps=None,\n verb=None, returnas=None,\n ):\n return _rockingcurve.compute_rockingcurve(\n ih=ih, ik=ik, il=il, lamb=lamb,\n use_non_parallelism=use_non_parallelism, na=na,\n alpha_limits=alpha_limits,\n therm_exp=therm_exp, plot_therm_exp=plot_therm_exp,\n plot_asf=plot_asf, plot_power_ratio=plot_power_ratio,\n plot_asymmetry=plot_asymmetry, plot_cmaps=plot_cmaps,\n verb=None, returnas=None,\n )\n\n def plot_var_temp_changes_wavelengths(\n self, ih=None, ik=None, il=None, lambdas=None,\n use_non_parallelism=None, na=None,\n alpha_limits=None,\n therm_exp=None, plot_therm_exp=None,\n plot_asf=None, plot_power_ratio=None,\n plot_asymmetry=None, plot_cmaps=None,\n quantity=None,\n curv_radius=None, pixel_size=None,\n ):\n return _rockingcurve.plot_var_temp_changes_wavelengths(\n ih=ih, ik=ik, il=il, lambdas=lambdas,\n use_non_parallelism=use_non_parallelism, na=na,\n alpha_limits=alpha_limits,\n therm_exp=therm_exp, plot_therm_exp=plot_therm_exp,\n plot_asf=plot_asf, plot_power_ratio=plot_power_ratio,\n plot_asymmetry=plot_asymmetry, plot_cmaps=plot_cmaps,\n quantity=quantity,\n curv_radius=curv_radius, pixel_size=pixel_size,\n )\n\n # -----------------\n # methods for surface and contour sampling\n # -----------------\n\n def sample_outline_plot(self, use_non_parallelism=None, res=None):\n if self._dgeom['Type'] == 'sph':\n if self._dgeom['Typeoutline'] == 'rect':\n nout, e1, e2, use_non_parallelism = self.get_unit_vectors(\n use_non_parallelism=use_non_parallelism,\n )\n outline = _comp_optics.CrystBragg_sample_outline_plot_sphrect(\n self._dgeom['summit'] - nout*self._dgeom['rcurve'],\n nout,\n e1,\n e2,\n self._dgeom['rcurve'],\n self._dgeom['extenthalf'],\n res,\n )\n else:\n raise NotImplementedError\n else:\n raise NotImplementedError\n return outline\n\n # -----------------\n # methods for surface and contour sampling\n # -----------------\n\n def _checkformat_bragglamb(self, bragg=None, lamb=None, n=None):\n lc = [lamb is not None, bragg is not None]\n if not any(lc):\n lamb = self._dbragg['lambref']\n lc[0] = True\n assert np.sum(lc) == 1, \"Provide lamb xor bragg!\"\n if lc[0]:\n bragg = self.get_bragg_from_lamb(\n np.atleast_1d(lamb), n=n,\n )\n else:\n bragg = np.atleast_1d(bragg)\n return bragg\n\n def _checkformat_get_Rays_from(self, phi=None, bragg=None):\n assert phi is not None\n assert bragg is not None\n bragg = np.atleast_1d(bragg)\n phi = np.atleast_1d(phi)\n nrays = max(phi.size, bragg.size)\n if not phi.shape == bragg.shape:\n if phi.size == 1:\n phi = np.full(bragg.shape, phi[0])\n elif bragg.size == 1:\n bragg = np.full(phi.shape, bragg[0])\n else:\n msg = \"phi and bragg/lamb must have the same shape!\\n\"\n msg += \" phi.shape: %s\\n\"%str(phi.shape)\n msg += \" bragg/lamb.shape: %s\\n\"%str(bragg.shape)\n raise Exception(msg)\n return phi, bragg\n\n def _get_rays_from_cryst(\n self,\n phi=None, bragg=None,\n lamb=None, n=None,\n dtheta=None, psi=None,\n ntheta=None, npsi=None,\n use_non_parallelism=None,\n include_summit=None,\n grid=None,\n ):\n\n # Get phi, bragg\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb)\n phi, bragg = self._checkformat_get_Rays_from(phi=phi, bragg=bragg)\n # assert phi.ndim == 1\n\n # Get local summits, nout, e1, e2\n pts_start, nout, e1, e2 = self.get_local_noute1e2(\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n )\n nin = -nout\n # reshape for broadcast\n if grid is True:\n nin = nin[..., None]\n e1 = e1[..., None]\n e2 = e2[..., None]\n else:\n assert bragg.shape == nin.shape[1:]\n\n # Compute start point (D) and unit vectors (us)\n vect = (\n np.sin(bragg)*nin\n + np.cos(bragg)*(np.cos(phi)*e1 + np.sin(phi)*e2)\n )\n return pts_start, vect\n\n def get_rays_from_cryst(\n self,\n phi=None, bragg=None,\n lamb=None, n=None,\n dtheta=None, psi=None,\n use_non_parallelism=None,\n ntheta=None, npsi=None,\n include_summit=None,\n det=None, config=None, length=None,\n returnas=None,\n return_xixj=None,\n grid=None,\n ):\n \"\"\" Return rays stemming from the crystal\n\n The rays are defined by a start point (on the crystal surface) and\n either an end point or a unit vector\n\n Start points\n ------------\n The start point is the crystal summit by default\n But that can be changed using:\n - ('dtheta', 'psi'): can be arbitrary but with same shape\n up to 4 dimensions\n - ('ntheta', 'npsi', 'include_summit'): will be used to\n compute the envelop (contour) of the crystal, as 2 1d arrays\n\n These arguments are fed to self.get_local_noute1e2() which will compute\n the start points and return them as shape (3, psi.shape)\n\n End point or unit vector\n ------------------------\n End point are computed automatically if:\n - 'config' is provided: ray-tracing is done like for any camera\n - 'det' is provided: xi and xj can be computed\n\n Returning format\n ----------------\n\n The rays can be returned as:\n - '(pts, vect, length)': a tuple of:\n - pts: array of start points on the crystal\n (only the summit by default)\n - vect: array\n - length:\n - '(pts, vect)': a tuple with only pts and vect\n - 'pts': a tuple, where both start and end points are returned\n All arrays represent (X, Y, Z) cartesian coordinates in the tokamak's\n frame\n\n Optionally, can return the (xi, xj) coordinates of points if a detector\n (det) is provided.\n\n \"\"\"\n\n # -----------\n # Check input\n if returnas is None:\n returnas = 'pts'\n if return_xixj is None:\n return_xixj = False\n\n lret = ['(pts, vect, length)', '(pts, vect)', 'pts'] # , object]\n if returnas not in lret:\n msg = (\n \"Arg returnas must be in:\\n\"\n + \"\\t- '(pts, vect, length)': starting points, unit vector,\"\n + \" length\\n\"\n + \"\\t- 'pts': starting and ending points\\n\"\n # + \"\\t- object: CamLOS1D instance\\n\"\n )\n raise Exception(msg)\n\n det = self._checkformat_det(det)\n if length is None:\n length = 10.\n\n if grid is None:\n try:\n grid = bragg.shape != dtheta.shape\n except Exception as err:\n grid = True\n\n # -----------\n # Get starting point and vectors\n pts_start, vect = self._get_rays_from_cryst(\n phi=phi, bragg=bragg,\n lamb=lamb, n=n,\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n grid=grid,\n )\n\n if returnas == '(pts, vect)':\n return pts_start, vect\n\n # -----------\n # Get length (minimum between conf, det, length)\n vshape = vect.shape\n dk = {\n k0: np.full(vshape[1:], np.nan)\n for k0 in ['config', 'det', 'length']\n }\n xi, xj = None, None\n if config is not None:\n # Here insert ray-tracing from config!\n if vshape != pts_start.shape:\n if len(vshape) == 3 and len(pts_start.shape) == 2:\n D = np.reshape(\n np.repeat(pts_start[..., None], vshape[-1], axis=-1),\n (3, -1),\n )\n u = vect.reshape((3, -1))\n else:\n msg = (\n \"Not treated case!\\n\"\n f\"\\t- pts_start.shape: {pts_start.shape}\\n\"\n f\"\\t- vect.shape: {vshape}\\n\"\n )\n raise Exception(msg)\n else:\n if len(vshape) > 2:\n D = pts_start.reshape((3, -1))\n u = vect.reshape((3, -1))\n else:\n D = pts_start\n u = vect\n\n rays = _core.Rays(\n dgeom=(D, u),\n config=config,\n strict=False,\n Name='dummy',\n Diag='dummy',\n Exp='dummy',\n )\n if u.shape != vshape:\n kout = rays.dgeom['kOut'].reshape(vshape[1:])\n else:\n kout = rays.dgeom['kOut']\n dk['config'] = kout\n\n if det is not None and det is not False:\n shape = tuple([3] + [1 for ii in range(vect.ndim-1)])\n cent = det['cent'].reshape(shape)\n nout = det['nout'].reshape(shape)\n if grid is True:\n k = (\n np.sum((cent-pts_start[..., None])*nout, axis=0)\n / np.sum(vect*nout, axis=0)\n )\n else:\n k = (\n np.sum((cent-pts_start)*nout, axis=0)\n / np.sum(vect*nout, axis=0)\n )\n dk['det'][k >= 0.] = k[k >= 0.]\n if return_xixj is True:\n if grid:\n pts_end = pts_start[..., None] + dk['det'][None, ...]*vect\n else:\n pts_end = pts_start + dk['det'][None, ...]*vect\n ei = det['ei'].reshape(shape)\n ej = det['ej'].reshape(shape)\n xi = np.sum((pts_end - cent)*ei, axis=0)\n xj = np.sum((pts_end - cent)*ej, axis=0)\n\n if length is not None:\n dk['length'][:] = length\n\n k = np.nanmin([vv for vv in dk.values() if vv is not None], axis=0)\n\n # -----------\n # return\n if returnas == 'pts':\n if grid:\n pts_end = pts_start[..., None] + k[None, ...]*vect\n if return_xixj:\n return pts_start, pts_end, xi, xj\n else:\n return pts_start, pts_end\n else:\n pts_end = pts_start + k[None, ...]*vect\n if return_xixj:\n return pts_start, pts_end, xi, xj\n else:\n return pts_start, pts_end\n elif returnas == '(pts, vect, length)':\n if return_xixj:\n return pts_start, vect, k, xi, xj\n else:\n return pts_start, vect, k\n\n # -----------------\n # methods for crystal splitting\n # -----------------\n\n def split(self, direction=None, nb=None):\n\n # ------------\n # check inputs\n if direction is None:\n direction = 'e1'\n if direction not in ['e1', 'e2']:\n msg = (\n \"Arg direction must be either:\\n\"\n \"\\t- 'e1': split along vector 'e1' (~horizontally)\\n\"\n \"\\t- 'e2': split along vector 'e2' (~vertically)\\n\"\n f\"You provided: {direction}\"\n )\n raise Exception(msg)\n\n if nb is None:\n nb = 2\n if not (isinstance(nb, int) and nb > 1):\n msg = (\n \"Arg nb must be a int > 1 !\\n\"\n \"It specifies the number of equal parts desired\\n\"\n f\"You provided: {nb}\"\n )\n raise Exception(msg)\n\n # ---------------\n # split\n\n edges = np.linspace(-1, 1, nb+1)\n mid = 0.5*(edges[1:] + edges[:-1])[None, :]\n if direction == 'e2':\n dtheta = mid*self._dgeom['extenthalf'][1]\n psi = np.zeros((1, nb), dtype=float)\n extenthalf = [\n self._dgeom['extenthalf'][0],\n self._dgeom['extenthalf'][1]/nb,\n ]\n else:\n dtheta = np.zeros((1, nb), dtype=float)\n psi = mid*self._dgeom['extenthalf'][0]\n extenthalf = [\n self._dgeom['extenthalf'][0]/nb,\n self._dgeom['extenthalf'][1],\n ]\n\n nouts = (\n np.cos(dtheta)*(\n self._dgeom['nout'][:, None]*np.cos(psi)\n + self._dgeom['e1'][:, None]*np.sin(psi)\n )\n + np.sin(dtheta)*self._dgeom['e2'][:, None]\n )\n e1s = (\n -self._dgeom['nout'][:, None]*np.sin(psi)\n + self._dgeom['e1'][:, None]*np.cos(psi)\n )\n e2s = np.array([\n nouts[1, :]*e1s[2, :] - nouts[2, :]*e1s[1, :],\n nouts[2, :]*e1s[0, :] - nouts[0, :]*e1s[2, :],\n nouts[0, :]*e1s[1, :] - nouts[1, :]*e1s[0, :],\n\n ])\n\n # -----------\n # Construct list of instances\n\n lobj = [\n self.__class__(\n dgeom={\n 'rcurve': self._dgeom['rcurve'],\n 'center': self._dgeom['center'],\n 'nout': nouts[:, ii],\n 'e1': e1s[:, ii],\n 'e2': e2s[:, ii],\n 'extenthalf': extenthalf,\n },\n dmat={\n k0: v0 for k0, v0 in self._dmat.items()\n if k0 not in ['nin', 'nout', 'e1', 'e2']\n },\n dbragg=dict(self._dbragg),\n Name=f\"{self.Id.Name}{ii}\",\n Exp=self.Id.Exp,\n )\n for ii in range(nb)\n ]\n\n return lobj\n\n\n\n # -----------------\n # methods for general plotting\n # -----------------\n\n def plot(\n self, dcryst=None,\n phi=None, bragg=None, lamb=None, pts=None,\n n=None, config=None, det=None, length=None,\n dtheta=None, psi=None,\n ntheta=None, npsi=None,\n include_summit=None,\n dax=None, proj=None, res=None, element=None,\n color=None, ddet=None,\n dleg=None, draw=True, dmargin=None,\n use_non_parallelism=None, grid=None,\n rays_npts=None, rays_color=None,\n fs=None, wintit=None, tit=None,\n ):\n \"\"\" Plot the crystal in desired projeection\n\n The projection is 3d, cross-section or horizontal\n Optionaly add rays reflected on cryst at:\n - lamb / phi: desired wavelength and incidence angle\n and either:\n - psi, dtheta : desired pts on the crystal surface\n - pts: emitted from desired pts (e.g.: in the plasma)\n (need to be refresh with get_rays_from_cryst method\n if new pts are wanted)\n\n Parameters\n ----------\n dax: None / dict\n dict of axes to be used, with keys:\n - 'cross': axe where to plot cross-section view\n - 'hor': axe where to plot horizontal (from top) view\n - '3d': axe where to plot 3d view\n if None, a new figure and axes are created\n proj: None / str\n key indicating which plot to make:\n - 'cross': cross-section projection\n - 'hor': horizontal projection\n - 'all': cross-section + horizontal view\n - '3d': 3d view\n element: None / str\n char string where each letter indicates an element to plot\n - 'o': outline (edges of crystal)\n - 's': summit (geometrical center of the crystal)\n - 'c': center (of the sphere of curvature)\n - 'r': rowland circle (plotted in e1 direction)\n - 'v': local unit vectors e1, e2, nout\n If None, default to 'oscvr'\n res: None / float\n Resolution for the discretization of the outline\n dcryst: None / dict\n dict of dict for plotting the various elements of the crystal:\n - 'outline': dict of properties fed to plot()\n - 'cent': dict of properties fed to plot()\n - 'summit': dict of properties fed to plot()\n - 'rowland': dict of properties fed to plot()\n - 'vectors': dict of properties fed to quiver()\n ddet: None / dict\n dict of dict for plotting the various elements of the det:\n - 'outline': dict of properties fed to plot()\n - 'cent': dict of properties fed to plot()\n - 'vectors': dict of properties fed to quiver()\n color: None / str / tuple\n color to be used for plotting\n Overwrites all colors in dcryst and ddet\n det: None / dict\n Optionnal associated detector to be plotted, as a dict with keys:\n - 'cent': 1d array of cartesian coordinates of the center\n - 'nout': 1d array of cartesian coordinates of unit vector\n oriented towards the crystal\n - 'ei': 1d array of cartesian coordinates of unit vector\n - 'ej': 1d array of cartesian coordinates of unit vector\n - 'outline': 2d array of outline coordinates in (ei, ej)\n dleg: None / dict\n dict of properties to be passed to plt.legend()\n if False legend is not plotted\n use_non_parallelism: None / str\n Return the unit vectors (direct orthonormal basis)\n Depending on:\n - use_non_parallelism: True => return the geometrical basis\n - use_non_parallelism: False => return the mesh basis\n \"\"\"\n if det is None:\n det = False\n det = self._checkformat_det(det)\n\n lc = [\n dtheta is not None or psi is not None or phi is not None,\n pts is not None\n ]\n if np.sum(lc) == 2:\n msg = (\n \"For ray tracing, please provide either:\\n\"\n + \"\\t- dtheta, psi, phi, lamb/bragg\\n\"\n + \"\\t- pts, lamb/bragg\\n\"\n )\n raise Exception(msg)\n\n # Add rays?\n if lc[0]:\n # Get one way\n # pts.shape = (3, nlamb, npts, ndtheta)\n pts_summit, pts1 = self.get_rays_from_cryst(\n phi=phi, lamb=lamb, bragg=bragg,\n n=n, use_non_parallelism=use_non_parallelism,\n dtheta=dtheta, psi=psi,\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n config=config, det=det,\n returnas='pts', return_xixj=False,\n grid=grid,\n )\n # Get the other way\n pts2, xi, xj = self.get_rays_from_cryst(\n phi=phi+np.pi, lamb=lamb, bragg=bragg,\n n=n, use_non_parallelism=use_non_parallelism,\n dtheta=dtheta, psi=psi,\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n config=config, det=det,\n returnas='pts', return_xixj=True,\n grid=grid,\n )[1:]\n elif lc[1]:\n c0 = (\n isinstance(pts, np.ndarray)\n and pts.ndim == 2\n and pts.shape[0] == 3\n )\n if not c0:\n msg = (\"Arg pts must be a (3, npts) np.array!\")\n raise Exception(msg)\n\n # pts.shape = (nlamb, npts, ndtheta)\n dtheta, psi, phi, bragg, _, _ = self.calc_raytracing_from_lambpts(\n pts=pts,\n lamb=lamb,\n ndtheta=ntheta,\n )\n pts_summit, pts2, xi, xj = self.get_rays_from_cryst(\n phi=phi+np.pi, lamb=None, bragg=bragg,\n n=n, use_non_parallelism=use_non_parallelism,\n dtheta=dtheta, psi=psi,\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n config=config, det=det,\n returnas='pts', return_xixj=True,\n grid=grid,\n )\n pts1 = np.repeat(\n np.repeat(\n np.repeat(\n pts[:, None, :], dtheta.shape[0], axis=1,\n )[..., None],\n dtheta.shape[2],\n axis=-1,\n )[..., None],\n 2,\n axis=-1,\n )\n else:\n pts_summit, pts1, pts2, xi, xj = None, None, None, None, None\n return _plot_optics.CrystalBragg_plot(\n cryst=self, dcryst=dcryst,\n det=det, ddet=ddet,\n dax=dax, proj=proj, res=res, element=element,\n color=color,\n pts_summit=pts_summit, pts1=pts1, pts2=pts2,\n xi=xi, xj=xj,\n rays_color=rays_color, rays_npts=rays_npts,\n dleg=dleg, draw=draw, fs=fs, dmargin=dmargin,\n use_non_parallelism=use_non_parallelism,\n wintit=wintit, tit=tit,\n )\n\n # -----------------\n # methods for generic first-approx\n # -----------------\n\n def get_phi_from_magaxis_summit(\n self,\n axis_r,\n axis_z,\n axis_npts=None,\n lamb=None,\n lamb_tol=None,\n bragg=None,\n n=None,\n use_non_parallelism=None,\n ):\n \"\"\" Return phi of a magnteic axis (at lamb with tolerance)\n\n axis_r and axis_z must be np.ndarrays of the same shape\n The magnetic axis is discretized toroidally in axis_npts (def: 1000)\n\n The pts closest to the chosen lamb are picked\n If no pts is found within tolerance, an error is raised\n\n \"\"\"\n\n # --------------------\n # Check / format input\n\n if axis_npts is None:\n axis_npts = 1000\n\n axis_r = np.atleast_1d(axis_r)\n axis_z = np.atleast_1d(axis_z)\n assert axis_r.shape == axis_z.shape\n\n if lamb_tol is None:\n lamb_tol = 0.01e-10\n\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)\n\n # --------------\n # Disretize axis\n\n shaperz = axis_r.shape\n phi_ax = np.full(shaperz, np.nan)\n\n # Compute phi\n theta_cryst = np.arctan2(\n self._dgeom['summit'][1],\n self._dgeom['summit'][0],\n )\n\n theta_ax = theta_cryst + np.pi/2*np.linspace(-1, 1, axis_npts)\n shapetheta = np.r_[[1 for ii in shaperz], axis_npts]\n theta_ax = theta_ax.reshape(shapetheta)\n\n axis_x = (axis_r[..., None] * np.cos(theta_ax)).ravel()\n axis_y = (axis_r[..., None] * np.sin(theta_ax)).ravel()\n axis_z = (np.repeat(axis_z[..., None], axis_npts, axis=-1)).ravel()\n\n # ----------------------------------------------\n # Compute bragg, phi, lamb of each point on axis\n\n (\n bragg_ax_full, phi_ax_full, lamb_ax_full,\n ) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n pts=np.array([axis_x, axis_y, axis_z]),\n dtheta=None, psi=None,\n ntheta=None, npsi=None,\n n=None,\n use_non_parallelism=use_non_parallelism,\n grid=None,\n return_lamb=True,\n )\n\n # -------------------------------------\n # Select points on axis closest to lamb\n\n # lamb_ax_full = self.get_lamb_from_bragg(bragg_ax_full)\n shape_full = tuple(np.r_[shaperz, axis_npts])\n lamb_ax_full = lamb_ax_full.reshape(shape_full)\n phi_ax_full = phi_ax_full.reshape(shape_full)\n dlamb = np.abs(lamb_ax_full - lamb)\n\n indok = np.any(dlamb <= lamb_tol, axis=-1)\n indmin = np.nanargmin(dlamb[indok, :], axis=-1)\n indtup = tuple([iii for iii in indok.nonzero()] + [indmin])\n phi_ax[indok] = phi_ax_full[indtup]\n\n return phi_ax\n\n def get_bragg_from_lamb(self, lamb=None, n=None):\n \"\"\" Braggs' law: n*lamb = 2dsin(bragg) \"\"\"\n if self._dmat['d'] is None:\n msg = \"Interplane distance d no set !\\n\"\n msg += \" => self.set_dmat({'d':...})\"\n raise Exception(msg)\n if lamb is None:\n lamb = self._dbragg['lambref']\n return _comp_optics.get_bragg_from_lamb(\n np.atleast_1d(lamb), self._dmat['d'], n=n,\n )\n\n def get_lamb_from_bragg(self, bragg=None, n=None):\n \"\"\" Braggs' law: n*lamb = 2dsin(bragg) \"\"\"\n if self._dmat['d'] is None:\n msg = \"Interplane distance d no set !\\n\"\n msg += \" => self.set_dmat({'d':...})\"\n raise Exception(msg)\n if bragg is None:\n bragg = self._dbragg['braggref']\n return _comp_optics.get_lamb_from_bragg(np.atleast_1d(bragg),\n self._dmat['d'], n=n)\n\n def update_non_parallelism(self, alpha=None, beta=None):\n \"\"\" Compute new values of unit vectors nout, e1 and e2 into\n dmat basis, due to non parallelism\n\n Update new values into dmat dict\n \"\"\"\n if alpha is None:\n alpha = 0\n if beta is None:\n beta = 0\n\n (self._dmat['nin'], self._dmat['nout'], self._dmat['e1'],\n self._dmat['e2']) = _comp_optics.get_vectors_from_angles(\n alpha, beta,\n self._dgeom['nout'], self._dgeom['e1'],\n self._dgeom['e2'],\n )\n self._dmat['alpha'], self._dmat['beta'] = alpha, beta\n\n def calc_meridional_sagital_focus(\n self,\n rcurve=None,\n bragg=None,\n alpha=None,\n use_non_parallelism=None,\n verb=None,\n ):\n \"\"\" Compute sagittal and meridional focuses distances.\n Optionnal result according to non-parallelism, using first the\n update_non_parallelism method.\n\n parameters\n ----------\n rcurve: float\n in dgeom dict., curvature radius of the crystal.\n bragg: float\n in dbragg dict., reference bragg angle of the crystal.\n alpha: float\n in dmat dict., amplitude of the non-parallelism\n as an a angle defined by user, in radian.\n use_non_parallelism: str\n Need to be True to use new alpha angle\n\n Return\n ------\n merid_ref: float\n Distance crystal-meridional focus (m), for a perfect crystal\n sagit_ref: float\n Distance crystal-sagital focus (m), for a perfect crystal\n merid_unp: float\n Distance crystal-meridional focus (m), using non_parallelism\n sagit_unp: float\n Distance crystal-sagital focus (m), using non_parallelism\n\n \"\"\"\n # Check inputs\n if rcurve is None:\n rcurve = self._dgeom['rcurve']\n if bragg is None:\n bragg = self._dbragg['braggref']\n if use_non_parallelism is True:\n alpha = self._dmat['alpha']\n if use_non_parallelism is False:\n alpha = 0.0\n\n # Compute\n return _comp_optics.calc_meridional_sagital_focus(\n rcurve=rcurve,\n bragg=bragg,\n alpha=alpha,\n use_non_parallelism=use_non_parallelism,\n verb=verb,\n )\n\n def get_rowland_dist_from_lambbragg(self, bragg=None, lamb=None, n=None):\n \"\"\" Return the array of dist from cryst summit to pts on rowland \"\"\"\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n if np.all(np.isnan(bragg)):\n msg = (\"There is no available bragg angle!\\n\"\n + \" => Check the vlue of self.dmat['d'] vs lamb\")\n raise Exception(msg)\n return _comp_optics.get_rowland_dist_from_bragg(\n bragg=bragg, rcurve=self._dgeom['rcurve'],\n )\n\n def get_detector_ideal(\n self,\n bragg=None, lamb=None,\n rcurve=None, n=None,\n ddist=None, di=None, dj=None,\n dtheta=None, dpsi=None, tilt=None,\n lamb0=None, lamb1=None, dist01=None,\n use_non_parallelism=None,\n tangent_to_rowland=None, plot=False,\n ):\n \"\"\" Return approximate ideal detector geometry\n\n Assumes infinitesimal and ideal crystal\n Returns a dict containing the position and orientation of a detector if\n it was placed ideally on the rowland circle, centered on the\n desired bragg angle (in rad) or wavelength (in m)\n The detector can be tangential to the Rowland circle or perpendicular\n to the line between the crystal and the detector\n Assumes detector center matching lamb (m) / bragg (rad)\n\n The detector can be translated towards / away from the crystal\n to make sure the distance between 2 spectral lines\n\n (lamb0 and lamb1) on the detector's plane matches\n a desired distance (dist01, in m)\n\n Finally, a desired offset (translation) can be added\n via (ddist, di, dj), in m\n Similarly, an extra rotation can be added via (dtheta, dpsi, tilt)\n\n Detector is described by center position\n and (nout, ei, ej) unit vectors\n By convention, nout = np.cross(ei, ej)\n Vectors (ei, ej) define an orthogonal frame in the detector's plane\n All coordinates are 3d (X, Y, Z in the tokamak's frame)\n\n Return:\n -------\n det: dict\n dict of detector geometrical characteristics:\n 'cent': np.ndarray\n (3,) array of (x, y, z) coordinates of detector center\n 'nout': np.ndarray\n (3,) array of (x, y, z) coordinates of unit vector\n perpendicular to detector' surface\n oriented towards crystal\n 'ei': np.ndarray\n (3,) array of (x, y, z) coordinates of unit vector\n defining first coordinate in detector's plane\n 'ej': np.ndarray\n (3,) array of (x, y, z) coordinates of unit vector\n defining second coordinate in detector's plane\n 'outline': np.darray\n (2, N) array to build detector's contour\n where the last point is identical to the first.\n (for example for WEST X2D spectrometer:\n x*np.r_[-1,-1,1,1,-1], y*np.r_[-1,1,1,-1,-1])\n \"\"\"\n\n # ---------------------\n # Check / format inputs\n\n if rcurve is None:\n rcurve = self._dgeom['rcurve']\n\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n if np.all(np.isnan(bragg)):\n msg = (\"There is no available bragg angle!\\n\"\n + \" => Check the vlue of self.dmat['d'] vs lamb\")\n raise Exception(msg)\n\n lc = [lamb0 is not None, lamb1 is not None, dist01 is not None]\n if any(lc) and not all(lc):\n msg = (\n \"Arg lamb0, lamb1 and dist01 must be provided together:\\n\"\n + \"\\t- lamb0: line0 wavelength ({})\\n\".format(lamb0)\n + \"\\t- lamb1: line1 wavelength ({})\\n\".format(lamb1)\n + \"\\t- dist01: distance (m) on detector between lines \"\n + \"({})\".format(dist01)\n )\n raise Exception(msg)\n bragg01 = None\n if all(lc):\n bragg01 = self._checkformat_bragglamb(\n lamb=np.r_[lamb0, lamb1], n=n,\n )\n\n # split into 2 different condition because of dmat\n lc = [rcurve is None, self._dgeom['summit'] is None]\n if any(lc):\n msg = (\n \"Some missing fields in dgeom for computation:\"\n + \"\\n\\t-\" + \"\\n\\t-\".join(['rcurve'] + 'summit')\n )\n raise Exception(msg)\n\n nout, e1, e2, use_non_parallelism = self.get_unit_vectors(\n use_non_parallelism=use_non_parallelism,\n )\n\n lc = [cc is None for cc in [nout, e1, e2]]\n if any(lc):\n msg = (\n \"\"\"\n Field 'nout', 'e1', 'e2' missing!\n \"\"\"\n )\n raise Exception(msg)\n\n # Compute crystal-centered parameters in (nout, e1, e2)\n (det_dist, n_crystdet_rel,\n det_nout_rel, det_ei_rel) = _comp_optics.get_approx_detector_rel(\n rcurve, bragg,\n bragg01=bragg01, dist01=dist01,\n tangent_to_rowland=tangent_to_rowland)\n\n # Deduce absolute position in (x, y, z)\n det_cent, det_nout, det_ei, det_ej = _comp_optics.get_det_abs_from_rel(\n det_dist, n_crystdet_rel, det_nout_rel, det_ei_rel,\n self._dgeom['summit'], nout, e1, e2,\n ddist=ddist, di=di, dj=dj,\n dtheta=dtheta, dpsi=dpsi, tilt=tilt)\n\n if plot:\n dax = self.plot()\n p0 = np.repeat(det_cent[:,None], 3, axis=1)\n vv = np.vstack((det_nout, det_ei, det_ej)).T\n dax['cross'].plot(np.hypot(det_cent[0], det_cent[1]),\n det_cent[2], 'xb')\n dax['hor'].plot(det_cent[0], det_cent[1], 'xb')\n dax['cross'].quiver(np.hypot(p0[0, :], p0[1, :]), p0[2, :],\n np.hypot(vv[0, :], vv[1, :]), vv[2, :],\n units='xy', color='b')\n dax['hor'].quiver(p0[0, :], p0[1, :], vv[0, :], vv[1, :],\n units='xy', color='b')\n return {'cent': det_cent, 'nout': det_nout,\n 'ei': det_ei, 'ej': det_ej}\n\n def _checkformat_det(self, det=None):\n lc = [det is None, det is False, isinstance(det, dict)]\n msg = (\"det must be:\\n\"\n + \"\\t- False: not det provided\\n\"\n + \"\\t- None: use default approx det from:\\n\"\n + \"\\t self.get_detector_ideal()\\n\"\n + \"\\t- dict: a dictionary of 3d (x,y,z) coordinates of a point\"\n + \" (local frame center) and 3 unit vectors forming a direct \"\n + \"orthonormal basis attached to the detector's frame\\n\"\n + \"\\t\\t\\t\\t- 'cent': detector center\\n\"\n + \"\\t\\t\\t\\t- 'nout': unit vector perpendicular to surface, \"\n + \"in direction of the crystal\\n\"\n + \"\\t\\t\\t\\t- 'ei': unit vector, first coordinate on surface\\n\"\n + \"\\t\\t\\t\\t- 'ej': unit vector, second coordinate on surfacei\\n\"\n + \" You provided: {}\".format(det))\n if not any(lc):\n raise Exception(msg)\n if lc[0]:\n det = self.get_detector_ideal(lamb=self._dbragg['lambref'])\n elif lc[2]:\n lk = ['cent', 'nout', 'ei', 'ej']\n c0 = (isinstance(det, dict)\n and all([(kk in det.keys()\n and hasattr(det[kk], '__iter__')\n and np.atleast_1d(det[kk]).size == 3\n and not np.any(np.isnan(det[kk])))\n for kk in lk]))\n if not c0:\n raise Exception(msg)\n for k0 in lk:\n det[k0] = np.atleast_1d(det[k0]).ravel()\n return det\n\n def get_local_noute1e2(\n self,\n dtheta=None, psi=None,\n ntheta=None, npsi=None,\n use_non_parallelism=None,\n include_summit=None,\n ):\n \"\"\" Return (vout, ve1, ve2) associated to pts on the crystal's surface\n\n All points on the spherical crystal's surface are identified\n by (dtheta, psi) coordinates, where:\n - theta = np.pi/2 + dtheta (dtheta=0 default) for the center\n (for the diffracted beam), from frame's basis vector ez\n - psi = 0 for the center, positive in direction of e1\n They are the spherical coordinates from a sphere centered on the\n crystal's center of curvature.\n\n Args (dtheta, psi) can be:\n - arbitrary: same shape and dimension up to 4\n - 'envelop': will be computed to represent the crystal contour\n will be returned as 2 1d arrays\n\n Return the pts themselves and the 3 perpendicular local unit vectors\n (nout, e1, e2), where nout is towards the outside of the sphere and\n nout = np.cross(e1, e2)\n\n In all cases, the output have shape (3, psi.shape)\n\n Return:\n -------\n summ: np.ndarray\n coordinates of the points on the surface\n vout: np.ndarray\n coordinates of outward unit vector\n ve1: np.ndarray\n coordinates of first tangential unit vector\n ve2: np.ndarray\n coordinates of second tangential unit vector\n\n All are cartesian (X, Y, Z) coordinates in the tokamak's frame\n\n \"\"\"\n # Get local basis at crystal summit\n nout, e1, e2, use_non_parallelism = self.get_unit_vectors(\n use_non_parallelism=use_non_parallelism,\n )\n nin = -nout\n\n # Get vectors at any points from psi & dtheta\n vout, ve1, ve2 = _comp_optics.CrystBragg_get_noute1e2_from_psitheta(\n nout, e1, e2,\n psi=psi, dtheta=dtheta,\n e1e2=True, sameshape=False,\n extenthalf_psi=self._dgeom['extenthalf'][0],\n extenthalf_dtheta=self._dgeom['extenthalf'][1],\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n )\n vin = -vout\n # cent no longer dgeom['center'] because no longer a fixed point\n cent = self._dgeom['summit'] + self._dgeom['rcurve']*nin\n reshape = np.r_[3, [1 for ii in range(vout.ndim - 1)]]\n cent = cent.reshape(reshape)\n\n # Redefining summit according to nout at each point at crystal\n summ = cent + self._dgeom['rcurve']*vout\n return summ, vout, ve1, ve2\n\n def calc_xixj_from_braggphi(\n self,\n phi=None,\n bragg=None,\n lamb=None,\n n=None,\n dtheta=None,\n psi=None,\n det=None,\n use_non_parallelism=None,\n strict=None,\n return_strict=None,\n data=None,\n plot=True,\n dax=None,\n ):\n \"\"\" Assuming crystal's summit as frame origin\n\n According to [1], this assumes a local frame centered on the crystal\n\n These calculations are independent from the tokamak's frame:\n The origin of the local frame is the crystal's summit\n The (O, ez) axis is the crystal's normal\n The crystal is tangent to (O, ex, ey)\n\n [1] tofu/Notes_Upgrades/SpectroX2D/SpectroX2D_EllipsesOnPlane.pdf\n\n Parameters:\n -----------\n Z: float\n Detector's plane intersection with (O, ez) axis\n n: np.ndarray\n (3,) array containing local (x,y,z) coordinates of the plane's\n normal vector\n \"\"\"\n if return_strict is None:\n return_strict = False\n\n # Check / format inputs\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n phi = np.atleast_1d(phi)\n\n # Check / get det\n det = self._checkformat_det(det)\n\n # Get local summit nout, e1, e2 if non-centered\n if dtheta is None:\n dtheta = 0.\n if psi is None:\n psi = 0.\n\n # Probably to update with use_non_parallelism?\n # Get back summit & vectors at any point at the crystal surface,\n # according to parallelism properties\n summit, nout, e1, e2 = self.get_local_noute1e2(\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n ntheta=None, npsi=None,\n include_summit=False,\n )\n\n # Compute\n xi, xj, strict = _comp_optics.calc_xixj_from_braggphi(\n det_cent=det['cent'],\n det_nout=det['nout'], det_ei=det['ei'], det_ej=det['ej'],\n det_outline=det.get('outline'),\n summit=summit, nout=nout, e1=e1, e2=e2,\n bragg=bragg, phi=phi, strict=strict,\n )\n\n if plot:\n dax = _plot_optics.CrystalBragg_plot_approx_detector_params(\n bragg, xi, xj, data, dax,\n )\n if return_strict is True:\n return xi, xj, strict\n else:\n return xi, xj\n\n def plot_line_on_det_tracing(\n self, lamb=None, n=None,\n nphi=None,\n det=None, johann=None,\n use_non_parallelism=None,\n lpsi=None, ldtheta=None,\n strict=None,\n ax=None, dleg=None,\n rocking=None, fs=None, dmargin=None,\n wintit=None, tit=None,\n ):\n \"\"\" Visualize the de-focusing by ray-tracing of chosen lamb\n Possibility to plot few wavelength' arcs on the same plot.\n Args:\n - lamb: array of min size 1, in 1e-10 [m]\n - det: dict\n - xi_bounds: np.min & np.max of _XI\n - xj_bounds: np.min & np.max of _XJ\n (from \"inputs_temp/XICS_allshots_C34.py\" l.649)\n - johann: True or False\n \"\"\"\n # Check / format inputs\n if lamb is None:\n lamb = self._dbragg['lambref']\n lamb = np.atleast_1d(lamb).ravel()\n nlamb = lamb.size\n\n if johann is None:\n johann = lpsi is not None or ldtheta is not None\n if rocking is None:\n rocking = False\n\n if det is None or det.get('outline') is None:\n msg = (\"Please provide det as a dict with 'outline'!\")\n raise Exception(msg)\n\n # Get local basis\n nout, e1, e2, use_non_parallelism = self.get_unit_vectors(\n use_non_parallelism=use_non_parallelism,\n )\n nin = -nout\n\n # Compute lamb / phi\n _, phi = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=det['outline'][0, :], xj=det['outline'][1, :], det=det,\n dtheta=0, psi=0,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=False,\n )\n phimin, phimax = np.nanmin(phi), np.nanmax(phi)\n phimin, phimax = phimin-(phimax-phimin)/10, phimax+(phimax-phimin)/10\n\n # Get reference ray-tracing\n bragg = self._checkformat_bragglamb(lamb=lamb, n=n)\n if nphi is None:\n nphi = 100\n phi = np.linspace(phimin, phimax, nphi)\n\n xi = np.full((nlamb, nphi), np.nan)\n xj = np.full((nlamb, nphi), np.nan)\n for ll in range(nlamb):\n xi[ll, :], xj[ll, :] = self.calc_xixj_from_braggphi(\n bragg=np.full(phi.shape, bragg[ll]),\n phi=phi,\n dtheta=0.,\n psi=0.,\n n=n,\n det=det,\n use_non_parallelism=use_non_parallelism,\n strict=strict,\n plot=False,\n )\n\n # Get johann-error raytracing (multiple positions on crystal)\n xi_er, xj_er = None, None\n if johann and not rocking:\n if lpsi is None:\n lpsi = np.linspace(-1., 1., 15)\n if ldtheta is None:\n ldtheta = np.linspace(-1., 1., 15)\n lpsi, ldtheta = np.meshgrid(lpsi, ldtheta)\n lpsi = lpsi.ravel()\n ldtheta = ldtheta.ravel()\n\n lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi]\n ldtheta = self._dgeom['extenthalf'][1]*np.r_[ldtheta]\n npsi = lpsi.size\n assert npsi == ldtheta.size\n\n xi_er = np.full((nlamb, npsi*nphi), np.nan)\n xj_er = np.full((nlamb, npsi*nphi), np.nan)\n for l in range(nlamb):\n for ii in range(npsi):\n i0 = np.arange(ii*nphi, (ii+1)*nphi)\n xi_er[l, i0], xj_er[l, i0] = self.calc_xixj_from_braggphi(\n phi=phi, bragg=bragg[l], lamb=None, n=n,\n dtheta=ldtheta[ii], psi=lpsi[ii],\n det=det, plot=False,\n use_non_parallelism=use_non_parallelism,\n strict=strict,\n )\n\n # Get rocking curve error\n if rocking:\n pass\n\n # Plot\n return _plot_optics.CrystalBragg_plot_line_tracing_on_det(\n lamb, xi, xj, xi_er, xj_er,\n det=det, ax=ax, dleg=dleg,\n johann=johann, rocking=rocking,\n fs=fs, dmargin=dmargin, wintit=wintit, tit=tit)\n\n def calc_johannerror(\n self,\n xi=None, xj=None, err=None,\n det=None, n=None,\n lpsi=None, ldtheta=None,\n lambda_interval_min=None,\n lambda_interval_max=None,\n use_non_parallelism=None,\n plot=True, fs=None, cmap=None,\n vmin=None, vmax=None, tit=None, wintit=None,\n ):\n \"\"\" Plot the johann error\n\n The johann error is the error (scattering) induced by defocalization\n due to finite crystal dimensions\n There is a johann error on wavelength (lamb => loss of spectral\n resolution) and on directionality (phi)\n If provided, lpsi and ldtheta are taken as normalized variations with\n respect to the crystal summit and to its extenthalf.\n Typical values are:\n - lpsi = [-1, 1, 1, -1]\n - ldtheta = [-1, -1, 1, 1]\n They must have the same len()\n\n First affecting a reference lambda according to:\n - pixel's position\n - crystal's summit\n Then, computing error on bragg and phi angles on each pixels by\n computing lambda and phi from the crystal's outline\n Provide lambda_interval_min/max to ensure the given wavelength interval\n is detected over the whole surface area.\n A True/False boolean is then returned.\n \"\"\"\n\n # Check xi, xj once before to avoid doing it twice\n if err is None:\n err = 'abs'\n if lambda_interval_min is None:\n lambda_interval_min = 3.93e-10\n if lambda_interval_max is None:\n lambda_interval_max = 4.00e-10\n\n xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)\n\n # Check / format inputs\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xii, xj=xjj, det=det,\n dtheta=0, psi=0,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n\n # Only one summit was selected\n bragg, phi, lamb = bragg[..., 0], phi[..., 0], lamb[..., 0]\n\n # Check lambda interval into lamb array\n c0 = (\n np.min(lamb) < lambda_interval_min\n and np.max(lamb) > lambda_interval_max\n )\n if c0:\n test_lambda_interv = True\n else:\n test_lambda_interv = False\n\n # Get err from multiple ldtheta, lpsi\n if lpsi is None:\n lpsi = np.r_[-1., 0., 1., 1., 1., 0., -1, -1]\n lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi]\n if ldtheta is None:\n ldtheta = np.r_[-1., -1., -1., 0., 1., 1., 1., 0.]\n ldtheta = self._dgeom['extenthalf'][1]*np.r_[ldtheta]\n npsi = lpsi.size\n assert npsi == ldtheta.size\n\n (\n braggerr, phierr, lamberr,\n ) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xii, xj=xjj, det=det,\n dtheta=ldtheta, psi=lpsi,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n err_lamb = np.nanmax(np.abs(lamb[..., None] - lamberr), axis=-1)\n err_phi = np.nanmax(np.abs(phi[..., None] - phierr), axis=-1)\n\n # absolute vs relative error\n if 'rel' in err:\n if err == 'rel':\n err_lamb = 100.*err_lamb / (np.nanmax(lamb) - np.nanmin(lamb))\n err_phi = 100.*err_phi / (np.nanmax(phi) - np.nanmin(phi))\n elif err == 'rel2':\n err_lamb = 100.*err_lamb / np.mean(lamb)\n err_phi = 100.*err_phi / np.mean(phi)\n err_lamb_units = '%'\n err_phi_units = '%'\n else:\n err_lamb_units = 'm'\n err_phi_units = 'rad'\n\n if plot is True:\n ax = _plot_optics.CrystalBragg_plot_johannerror(\n xi, xj, lamb, phi,\n err_lamb, err_phi,\n err_lamb_units=err_lamb_units,\n err_phi_units=err_phi_units,\n cmap=cmap, vmin=vmin, vmax=vmax,\n fs=fs, tit=tit, wintit=wintit,\n )\n return (\n err_lamb, err_phi, err_lamb_units, err_phi_units,\n test_lambda_interv,\n )\n\n def plot_focal_error_summed(\n self,\n dist_min=None, dist_max=None,\n di_min=None, di_max=None,\n ndist=None, ndi=None,\n lamb=None, bragg=None,\n xi=None, xj=None,\n err=None,\n use_non_parallelism=None,\n tangent_to_rowland=None, n=None,\n plot=None,\n pts=None,\n det_ref=None, plot_dets=None, nsort=None,\n dcryst=None,\n lambda_interval_min=None,\n lambda_interval_max=None,\n contour=None,\n fs=None,\n ax=None,\n cmap=None,\n vmin=None,\n vmax=None,\n return_ax=None,\n ):\n \"\"\"\n Using the calc_johannerror method, computing the sum of the\n focalization error over the whole detector for different positions\n characterized by the translations ddist and di in the equatorial plane\n (dist_min, dist_max, ndist) (di_min, di_max, ndi).\n\n Parameters:\n -----------\n - lamb/bragg : float\n Automatically set to crystal's references\n - xi, xj : np.ndarray\n pixelization of the detector\n (from \"inputs_temp/XICS_allshots_C34.py\" l.649)\n - alpha, beta : float\n Values of Non Parallelism references angles\n - use_non_parallelism : str\n - tangent_to_rowland : str\n - plot_dets : str\n Possibility to plot the nsort- detectors with the lowest\n summed focalization error, next to the Best Approximate Real\n detector\n dict(np.load('det37_CTVD_incC4_New.npz', allow_pickle=True))\n - nsort : float\n Number of best detector's position to plot\n - lambda_interv_min/max : float\n To ensure the given wavelength interval is detected over the whole\n surface area. A True/False boolean is then returned.\n \"\"\"\n\n # Check / format inputs\n if dist_min is None:\n dist_min = -0.15\n if dist_max is None:\n dist_max = 0.15\n if di_min is None:\n di_min = -0.40\n if di_max is None:\n di_max = 0.40\n if ndist is None:\n ndist = 21\n if ndi is None:\n ndi = 21\n if err is None:\n err = 'rel'\n if plot is None:\n plot = True\n if plot_dets is None:\n plot_dets = det_ref is not None\n if nsort is None:\n nsort = 5\n if return_ax is None:\n return_ax = True\n if lambda_interval_min is None:\n lambda_interval_min = 3.93e-10\n if lambda_interval_max is None:\n lambda_interval_max = 4.00e-10\n\n l0 = [dist_min, dist_max, ndist, di_min, di_max, ndi]\n c0 = any([l00 is not None for l00 in l0])\n if not c0:\n msg = (\n \"Please give the ranges of ddist and di translations\\n\"\n \"\\t to compute the different detector's position\\n\"\n \"\\t Provided:\\n\"\n \"\\t\\t- dist_min, dist_max, ndist: ({}, {}, {})\\n\".format(\n dist_min, dist_max, ndist,\n )\n + \"\\t\\t- di_min, di_max, ndi: ({}, {}, {})\\n\".format(\n di_min, di_max, ndi,\n )\n )\n raise Exception(msg)\n\n # ------------\n # Compute local coordinates of det_ref\n (\n ddist0, di0, dj0,\n dtheta0, dpsi0, tilt0,\n ) = self._get_local_coordinates_of_det(\n bragg=bragg,\n lamb=lamb,\n det_ref=det_ref,\n use_non_parallelism=use_non_parallelism,\n )\n\n # angle between nout vectors from get_det_approx() &\n ## get_det_approx(tangent=False)\n\n det1 = self.get_detector_ideal(\n lamb=lamb,\n bragg=bragg,\n use_non_parallelism=use_non_parallelism,\n tangent_to_rowland=True,\n )\n det2 = self.get_detector_ideal(\n lamb=lamb,\n bragg=bragg,\n use_non_parallelism=use_non_parallelism,\n tangent_to_rowland=False,\n )\n cos_angle_nout = np.sum(\n det1['nout'] * det2['nout']\n ) / (\n np.linalg.norm(det1['nout'] * np.linalg.norm(det2['nout']))\n )\n angle_nout = np.arccos(cos_angle_nout)\n\n # Compute\n ddist = np.linspace(dist_min, dist_max, int(ndist))\n di = np.linspace(di_min, di_max, int(ndi))\n error_lambda = np.full((di.size, ddist.size), np.nan)\n test_lamb_interv = np.zeros((di.size, ddist.size), dtype='bool')\n end = '\\r'\n for ii in range(ddist.size):\n for jj in range(di.size):\n\n # print progression\n if ii == ndist-1 and jj == ndi-1:\n end = '\\n'\n msg = (\n \"Computing mean focal error for det \"\n f\"({ii+1}, {jj+1})/({ndist}, {ndi})\"\n ).ljust(60)\n print(msg, end=end, flush=True)\n\n # Get det\n dpsi0bis = float(dpsi0)\n if tangent_to_rowland:\n dpsi0bis = dpsi0 - angle_nout\n\n det = self.get_detector_ideal(\n ddist=ddist[ii],\n di=di[jj],\n dj=dj0,\n dtheta=dtheta0,\n dpsi=dpsi0bis,\n tilt=tilt0,\n lamb=lamb,\n bragg=bragg,\n use_non_parallelism=use_non_parallelism,\n tangent_to_rowland=False,\n )\n\n # Integrate error\n (\n error_lambda_temp, test_lamb_interv[jj, ii],\n ) = self.calc_johannerror(\n xi=xi, xj=xj,\n det=det,\n err=err,\n lambda_interval_min=lambda_interval_min,\n lambda_interval_max=lambda_interval_max,\n plot=False,\n )[::4]\n error_lambda[jj, ii] = np.nanmean(error_lambda_temp)\n\n if 'rel' in err:\n units = '%'\n else:\n units = 'm'\n\n if plot:\n ax = _plot_optics.CrystalBragg_plot_focal_error_summed(\n cryst=self, dcryst=dcryst,\n lamb=lamb, bragg=bragg,\n error_lambda=error_lambda,\n ddist=ddist, di=di,\n ddist0=ddist0, di0=di0, dj0=dj0,\n dtheta0=dtheta0, dpsi0=dpsi0, tilt0=tilt0,\n angle_nout=angle_nout,\n det_ref=det_ref,\n units=units,\n plot_dets=plot_dets, nsort=nsort,\n tangent_to_rowland=tangent_to_rowland,\n use_non_parallelism=use_non_parallelism,\n pts=pts,\n test_lamb_interv=test_lamb_interv,\n contour=contour,\n fs=fs,\n ax=ax,\n cmap=cmap,\n vmin=vmin,\n vmax=vmax,\n )\n if return_ax:\n return error_lambda, ddist, di, test_lamb_interv, ax\n else:\n return error_lambda, ddist, di, test_lamb_interv\n\n def _get_local_coordinates_of_det(\n self,\n bragg=None,\n lamb=None,\n det_ref=None,\n use_non_parallelism=None,\n ):\n \"\"\"\n Computation of translation (ddist, di, dj) and angular\n (dtheta, dpsi, tilt) properties of an arbitrary detector choosen by\n the user.\n \"\"\"\n\n # ------------\n # check inputs\n\n if det_ref is None:\n msg = (\n \"You need to provide your arbitrary detector\\n\"\n + \"\\t in order to compute its spatial properties !\\n\"\n + \"\\t You provided: {}\".format(det)\n )\n raise Exception(msg)\n\n # Checkformat det\n det_ref = self._checkformat_det(det=det_ref)\n\n # ------------\n # get approx detect\n\n det_approx = self.get_detector_ideal(\n bragg=bragg, lamb=lamb,\n tangent_to_rowland=False,\n use_non_parallelism=use_non_parallelism,\n )\n\n # ------------\n # get vector delta between centers\n\n delta = det_ref['cent'] - det_approx['cent']\n ddist = np.sum(delta * (-det_approx['nout']))\n di = np.sum(delta * det_approx['ei'])\n dj = np.sum(delta * det_approx['ej'])\n\n # ---------------\n # get angles from unit vectors\n dtheta, dpsi, tilt = None, None, None\n\n # use formulas in _comp_optics.get_det_abs_from_rel()\n sindtheta = np.sum(det_approx['ej'] * det_ref['nout'])\n costheta_cospsi = np.sum(det_approx['nout'] * det_ref['nout'])\n costheta_sinpsi = np.sum(det_approx['ei'] * det_ref['nout'])\n costheta = np.sqrt(costheta_cospsi**2 + costheta_sinpsi**2)\n dtheta = np.arctan2(sindtheta, costheta)\n dpsi = np.arctan2(\n costheta_sinpsi / costheta,\n costheta_cospsi / costheta,\n )\n\n # ---------\n # tilt\n det_ei2 = (\n np.cos(dpsi)*det_approx['ei'] - np.sin(dpsi)*det_approx['nout']\n )\n det_ej2 = np.cross(det_ref['nout'], det_ei2)\n costilt = np.sum(det_ref['ei']*det_ei2)\n sintilt = np.sum(det_ref['ei']*det_ej2)\n tilt = np.arctan2(sintilt, costilt)\n\n return ddist, di, dj, dtheta, dpsi, tilt\n\n def get_lambbraggphi_from_ptsxixj_dthetapsi(\n self,\n pts=None,\n xi=None, xj=None, det=None,\n dtheta=None, psi=None,\n ntheta=None, npsi=None,\n n=None,\n use_non_parallelism=None,\n grid=None,\n return_lamb=None,\n ):\n \"\"\" Return the lamb, bragg and phi for provided pts and dtheta/psi\n\n if grid = True:\n compute all pts / dtheta/psi comnbinations\n => return (npts, ndtheta) arrays\n else:\n each pts is associated to a single dtheta/psi\n => assumes npts == ndtheta == npsi\n => return (npts,) arrays\n\n \"\"\"\n\n # Check / Format inputs\n if return_lamb is None:\n return_lamb = True\n det = self._checkformat_det(det)\n\n # Get local basis\n summ, vout, ve1, ve2 = self.get_local_noute1e2(\n dtheta=dtheta, psi=psi,\n ntheta=ntheta, npsi=npsi,\n use_non_parallelism=use_non_parallelism,\n include_summit=True,\n )\n\n # Derive bragg, phi\n bragg, phi = _comp_optics.calc_braggphi_from_xixjpts(\n pts=pts,\n xi=xi, xj=xj, det=det,\n summit=summ, nin=-vout, e1=ve1, e2=ve2,\n grid=grid,\n )\n\n # Derive lamb\n if return_lamb is True:\n lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)\n return bragg, phi, lamb\n else:\n return bragg, phi\n\n def get_lamb_avail_from_pts(\n self,\n pts=None,\n n=None, ndtheta=None,\n det=None, nlamb=None, klamb=None,\n use_non_parallelism=None,\n strict=None,\n return_phidtheta=None,\n return_xixj=None,\n ):\n \"\"\" Return the wavelength accessible from plasma points on the crystal\n\n For a given plasma point, only a certain lambda interval can be\n bragg-diffracted on the crystal (due to bragg's law and the crystal's\n dimensions)\n\n Beware, for a given pts and lamb, there can be up to 2 sets of\n solutions\n All non-valid solutions are set to nans, such that most of the time\n there is only one\n\n For a set of given:\n - pts (3, npts) array, (x, y, z) coordinates\n Using:\n - nlamb: sampling of the lamb interval (default: 100)\n - ndtheta: sampling of the lamb interval (default: 20)\n - det: (optional) a detector dict, for xi and xj\n Returns:\n - lamb: (npts, nlamb) array of sampled valid wavelength interval\n - phi: (npts, nlamb, ndtheta, 2) array of phi\n - dtheta: (npts, nlamb, ndtheta, 2) array of dtheta\n - psi: (npts, nlamb, ndtheta, 2) array of psi\n And optionally (return_xixj=True and det provided as dict):\n - xi: (npts, nlamb, ndtheta, 2) array of xi\n - xj: (npts, nlamb, ndtheta, 2) array of xj\n\n The result is computed with or w/o taking into account non-parallelism\n\n \"\"\"\n # Check / format\n if ndtheta is None:\n ndtheta = 20\n if nlamb is None:\n nlamb = 100\n assert nlamb >= 2, \"nlamb must be >= 2\"\n if return_phidtheta is None:\n return_phidtheta = True\n if return_xixj is None:\n return_xixj = det is not None\n if det is None:\n return_xixj = False\n if det is None:\n strict = False\n\n # Get lamb min / max\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n pts=pts,\n dtheta='envelop', psi='envelop',\n ntheta=None, npsi=None,\n n=n, grid=True,\n use_non_parallelism=use_non_parallelism,\n return_lamb=True,\n )\n lambmin = np.nanmin(lamb, axis=1)\n lambmax = np.nanmax(lamb, axis=1)\n if klamb is None:\n klamb = np.linspace(0, 1, nlamb)\n elif not (isinstance(klamb, np.ndarray) and klamb.ndim == 1):\n msg = \"Please provide klamb as a 1d vector!\"\n raise Exception(msg)\n nlamb = klamb.size\n lamb = lambmin[:, None] + (lambmax-lambmin)[:, None]*klamb\n\n return _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(\n cryst=self,\n lamb=lamb,\n n=n,\n ndtheta=ndtheta,\n pts=pts,\n use_non_parallelism=use_non_parallelism,\n return_phidtheta=return_phidtheta,\n return_xixj=return_xixj,\n strict=strict,\n det=det,\n )\n\n def _calc_dthetapsiphi_from_lambpts(\n self,\n pts=None, bragg=None, lamb=None,\n n=None, ndtheta=None,\n use_non_parallelism=None,\n grid=None,\n ):\n\n # Check / Format inputs\n pts = _comp_optics._checkformat_pts(pts)\n npts = pts.shape[1]\n\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n\n # get nout, e1, e2\n nout, e1, e2, use_non_parallelism = self.get_unit_vectors(\n use_non_parallelism=use_non_parallelism\n )\n\n # Compute dtheta, psi, indnan (nlamb, npts, ndtheta)\n # In general there are 2 solutions! (only close to rowland in practice)\n dtheta, psi, indok, grid = _comp_optics.calc_dthetapsiphi_from_lambpts(\n pts,\n bragg,\n summit=self._dgeom['summit'], # To be updated (non-paralellism)?\n rcurve=self._dgeom['rcurve'],\n nout=nout, e1=e1, e2=e2,\n extenthalf=self._dgeom['extenthalf'],\n ndtheta=ndtheta,\n grid=grid,\n )\n\n # reshape bragg for matching dtheta.shape\n if grid is True:\n bragg = np.repeat(\n np.repeat(\n np.repeat(bragg[:, None], npts, axis=-1)[..., None],\n dtheta.shape[2],\n axis=-1,\n )[..., None],\n 2,\n axis=-1,\n )\n pts = pts[:, None, :, None, None]\n else:\n bragg = np.repeat(\n np.repeat(bragg[:, None], dtheta.shape[1], axis=1)[..., None],\n 2,\n axis=-1,\n )\n pts = pts[..., None, None]\n bragg[~indok] = np.nan\n\n # Get corresponding phi and re-check bragg, for safety\n bragg2, phi = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n pts=pts,\n dtheta=dtheta, psi=psi,\n grid=False,\n use_non_parallelism=use_non_parallelism,\n return_lamb=False,\n )\n\n c0 = (\n bragg2.shape == bragg.shape\n and np.allclose(bragg, bragg2, equal_nan=True)\n )\n if not c0:\n try:\n plt.figure()\n plt.plot(bragg, bragg2, '.')\n except Exception as err:\n pass\n msg = (\n \"Inconsistency detected in bragg angle computations:\\n\"\n + \"\\t- from the points and lamb\\n\"\n + \"\\t- from the points and (dtheta, psi)\\n\"\n + \"\\nContext:\\n\"\n + \"\\t- use_non_parallelism: {}\\n\".format(use_non_parallelism)\n + \"\\t- bragg.shape = {}\\n\".format(bragg.shape)\n + \"\\t- bragg2.shape = {}\\n\".format(bragg2.shape)\n )\n raise Exception(msg)\n\n return dtheta, psi, phi, bragg\n\n def calc_raytracing_from_lambpts(\n self,\n lamb=None, bragg=None, pts=None,\n xi_bounds=None, xj_bounds=None, nphi=None,\n det=None, n=None, ndtheta=None,\n johann=False, lpsi=None, ldtheta=None,\n rocking=False, strict=None, plot=None, fs=None,\n dmargin=None, wintit=None,\n tit=None, proj=None,\n legend=None, draw=None, returnas=None,\n ):\n \"\"\" Visualize the de-focusing by ray-tracing of chosen lamb\n\n If plot, 3 different plots can be produced:\n - det: plots the intersection of rays with detector plane\n - '2d': plots the geometry of the rays in 2d cross and hor\n - '3d': plots the geometry of the rays in 3d\n Specify the plotting option by setting plot to any of these (or a list)\n \"\"\"\n # Check / format inputs\n if returnas is None:\n returnas = 'data'\n if plot is None or plot is True:\n plot = ['det', '3d']\n if isinstance(plot, str):\n plot = plot.split('+')\n assert all([ss in ['det', '2d', '3d'] for ss in plot])\n assert returnas in ['data', 'ax']\n\n pts = _comp_optics._checkformat_pts(pts)\n npts = pts.shape[1]\n\n # Get dtheta, psi and phi from pts/lamb\n dtheta, psi, phi, bragg = self._calc_dthetapsiphi_from_lambpts(\n pts=pts, lamb=lamb, bragg=bragg, n=n, ndtheta=ndtheta,\n )\n ndtheta = dtheta.shape[-1]\n # assert dtheta.shape == (nlamb, npts, ndtheta)\n\n # Check / get det\n det = self._checkformat_det(det)\n\n # Compute xi, xj of reflexion (phi -> phi + np.pi)\n xi, xj = self.calc_xixj_from_braggphi(\n bragg=bragg, phi=phi+np.pi, n=n,\n dtheta=dtheta, psi=psi,\n det=det, strict=strict, plot=False,\n )\n\n # Plot to be checked - unnecessary ?\n plot = False\n if plot is not False:\n ptscryst, ptsdet = None, None\n if '2d' in plot or '3d' in plot:\n ptscryst = self.get_local_noute1e2(dtheta, psi)[0]\n ptsdet = (det['cent'][:, None, None, None]\n + xi[None, ...]*det['ei'][:, None, None, None]\n + xj[None, ...]*det['ej'][:, None, None, None])\n\n ax = _plot_optics.CrystalBragg_plot_raytracing_from_lambpts(\n xi=xi, xj=xj, lamb=lamb,\n xi_bounds=xi_bounds, xj_bounds=xj_bounds,\n pts=pts, ptscryst=ptscryst, ptsdet=ptsdet,\n det_cent=det['cent'], det_nout=det['nout'],\n det_ei=det['ei'], det_ej=det['ej'],\n cryst=self, proj=plot, fs=fs, dmargin=dmargin,\n wintit=wintit, tit=tit, legend=legend, draw=draw)\n if returnas == 'ax':\n return ax\n return dtheta, psi, phi, bragg, xi, xj\n\n def _calc_spect1d_from_data2d(self, data, lamb, phi,\n nlambfit=None, nphifit=None,\n nxi=None, nxj=None,\n spect1d=None, mask=None, vertsum1d=None):\n if nlambfit is None:\n nlambfit = nxi\n if nphifit is None:\n nphifit = nxj\n return _comp_optics._calc_spect1d_from_data2d(\n data, lamb, phi,\n nlambfit=nlambfit,\n nphifit=nphifit,\n spect1d=spect1d,\n mask=mask,\n vertsum1d=vertsum1d,\n )\n\n def plot_data_vs_lambphi(\n self,\n xi=None, xj=None, data=None, mask=None,\n det=None, dtheta=None, psi=None, n=None,\n nlambfit=None, nphifit=None,\n magaxis=None, npaxis=None,\n dlines=None, spect1d='mean',\n lambmin=None, lambmax=None,\n xjcut=None, dxj=None,\n plot=True, fs=None, tit=None, wintit=None,\n cmap=None, vmin=None, vmax=None,\n returnas=None,\n ):\n # Check / format inputs\n assert data is not None\n if returnas is None:\n returnas = 'spect'\n lreturn = ['ax', 'spect']\n if returnas not in lreturn:\n msg = (\"Arg returnas must be in {}\\n:\".format(lreturn)\n + \"\\t- 'spect': return a 1d vertically averaged spectrum\\n\"\n + \"\\t- 'ax' : return a list of axes instances\")\n raise Exception(msg)\n\n xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)\n nxi = xi.size if xi is not None else np.unique(xii).size\n nxj = xj.size if xj is not None else np.unique(xjj).size\n\n # Compute lamb / phi\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xii, xj=xjj, det=det,\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n\n # Compute lambfit / phifit and spectrum1d\n (spect1d, lambfit, phifit,\n vertsum1d, phiminmax) = self._calc_spect1d_from_data2d(\n data, lamb, phi,\n nlambfit=nlambfit, nphifit=nphifit, nxi=nxi, nxj=nxj,\n spect1d=spect1d, mask=mask, vertsum1d=True\n )\n\n # Get phiref from mag axis\n lambax, phiax = None, None\n if magaxis is not None:\n if npaxis is None:\n npaxis = 1000\n thetacryst = np.arctan2(self._dgeom['summit'][1],\n self._dgeom['summit'][0])\n thetaax = thetacryst + np.pi/2*np.linspace(-1, 1, npaxis)\n pts = np.array([magaxis[0]*np.cos(thetaax),\n magaxis[0]*np.sin(thetaax),\n np.full((npaxis,), magaxis[1])])\n braggax, phiax = self.calc_braggphi_from_pts(pts)\n lambax = self.get_lamb_from_bragg(braggax)\n phiax = np.arctan2(np.sin(phiax-np.pi), np.cos(phiax-np.pi))\n ind = ((lambax >= lambfit[0]) & (lambax <= lambfit[-1])\n & (phiax >= phifit[0]) & (phiax <= phifit[-1]))\n lambax, phiax = lambax[ind], phiax[ind]\n ind = np.argsort(lambax)\n lambax, phiax = lambax[ind], phiax[ind]\n\n # Get lamb / phi for xj\n lambcut, phicut, spectcut = None, None, None\n if xjcut is not None:\n if dxj is None:\n dxj = 0.002\n xjcut = np.sort(np.atleast_1d(xjcut).ravel())\n xicutf = np.tile(xi, (xjcut.size, 1))\n xjcutf = np.repeat(xjcut[:, None], nxi, axis=1)\n (\n braggcut, phicut, lambcut,\n ) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xicutf, xj=xjcutf, det=det,\n dtheta=0, psi=0,\n use_non_parallelism=use_non_parallelism,\n n=1,\n grid=True,\n return_lamb=True,\n )\n indxj = [(np.abs(xj-xjc) <= dxj).nonzero()[0] for xjc in xjcut]\n spectcut = np.array([np.nanmean(data[ixj, :], axis=0)\n for ixj in indxj])\n\n # plot\n ax = None\n if plot:\n ax = _plot_optics.CrystalBragg_plot_data_vs_lambphi(\n xi, xj, bragg, lamb, phi, data,\n lambfit=lambfit, phifit=phifit, spect1d=spect1d,\n vertsum1d=vertsum1d, lambax=lambax, phiax=phiax,\n lambmin=lambmin, lambmax=lambmax, phiminmax=phiminmax,\n xjcut=xjcut, lambcut=lambcut, phicut=phicut, spectcut=spectcut,\n cmap=cmap, vmin=vmin, vmax=vmax, dlines=dlines,\n tit=tit, wintit=wintit, fs=fs)\n if returnas == 'spect':\n return spect1d, lambfit\n elif returnas == 'ax':\n return ax\n\n def get_plasmadomain_at_lamb(\n self,\n config=None,\n struct=None,\n domain=None,\n res=None,\n det=None,\n xixj_lim=None,\n strict=None,\n bragg=None,\n lamb=None,\n # for available lamb determination\n ndtheta=None,\n nlamb=None,\n n=None,\n use_non_parallelism=None,\n # plotting\n plot=None,\n dax=None,\n plot_as=None,\n lcolor=None,\n return_dax=None,\n ):\n \"\"\" Return pts in the plasma domain and a mask\n\n The mask is True only for points for which the desired wavelength is\n accesible from the crystal (and from the detector if strict=True and\n det is provided)\n\n More than one value of lamb can be provided (nlamb >= 1)\n\n pts is returned as a (3, npts) array\n lambok is returned as a (nlamb, npts) array\n\n \"\"\"\n\n # ------------\n # check inputs\n\n struct = _check_optics._check_config_get_Ves(\n config=config, struct=struct,\n )\n\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)\n\n # To be refined if xjlim is narrow\n if ndtheta is None:\n ndtheta = 5\n # To be refined if xilim is narrow\n if nlamb is None:\n nlamb = 11\n if strict is None:\n strict = True\n\n if plot is None:\n plot = True\n if return_dax is None:\n return_dax = plot is True\n\n # -------------\n # sample volume\n\n (\n pts, dV, ind, (resR, resZ, resPhi),\n ) = config.dStruct['dObj']['Ves'][struct].get_sampleV(\n res=res,\n domain=domain,\n returnas='(R, Z, Phi)',\n )\n\n # ------------------------------\n # check access from crystal only\n\n ptsXYZ = np.array([\n pts[0, :]*np.cos(pts[2, :]),\n pts[0, :]*np.sin(pts[2, :]),\n pts[1, :],\n ])\n\n lamb_access = self.get_lamb_avail_from_pts(\n pts=ptsXYZ,\n nlamb=2,\n use_non_parallelism=use_non_parallelism,\n return_phidtheta=False,\n return_xixj=False,\n strict=False,\n )\n\n lambok = np.zeros((lamb.size, pts.shape[1]), dtype=bool)\n for ii, ll in enumerate(lamb):\n lambok[ii, :] = (\n (lamb_access[:, 0] <= ll) & (ll <= lamb_access[:, 1])\n )\n\n # ---------------\n # refactor pts and lambok\n\n indok = np.any(lambok, axis=0)\n pts = pts[:, indok]\n ptsXYZ = ptsXYZ[:, indok]\n lambok = lambok[:, indok]\n\n # ---------------\n # check strict\n if strict is True:\n\n # det vs detbis if xixj_lim\n detbis = dict(det)\n if xixj_lim is not None:\n detbis['outline'] = np.array([\n np.r_[\n xixj_lim[0][0],\n xixj_lim[0][1]*np.r_[1, 1],\n xixj_lim[0][0],\n ],\n np.r_[\n xixj_lim[1][0]*np.r_[1, 1],\n xixj_lim[1][1]*np.r_[1, 1],\n ],\n ])\n detbis['outline'] = np.concatenate(\n (detbis['outline'], detbis['outline'][:, 0:1]),\n axis=1,\n )\n\n # intersection with detbis\n for kk, ll in enumerate(lamb):\n lambi = _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(\n cryst=self,\n lamb=np.full((lambok[kk, :].sum(), 1), ll),\n n=n,\n ndtheta=ndtheta,\n pts=ptsXYZ[:, lambok[kk, :]],\n use_non_parallelism=use_non_parallelism,\n return_phidtheta=False,\n return_xixj=False,\n strict=strict,\n det=detbis,\n )\n lambok[kk, lambok[kk, :]] = ~np.isnan(lambi[:, 0])\n\n # -------\n # return\n\n if plot:\n dax = _plot_optics.CrystalBragg_plot_plasma_domain_at_lamb(\n cryst=self,\n det=det,\n xixj_lim=xixj_lim,\n config=config,\n lamb=lamb,\n pts=pts,\n reseff=[resR, resZ, resPhi],\n lambok=lambok,\n dax=dax,\n plot_as=plot_as,\n lcolor=lcolor,\n )\n\n # ---------------\n # return\n\n if return_dax is True:\n return pts, lambok, dax\n else:\n return pts, lambok\n\n def calc_signal_from_emissivity(\n self,\n emis=None,\n config=None,\n struct=None,\n domain=None,\n res=None,\n det=None,\n xixj_lim=None,\n strict=None,\n bragg=None,\n lamb=None,\n binning=None,\n # for available lamb determination\n ndtheta=None,\n nlamb=None,\n n=None,\n use_non_parallelism=None,\n # plotting\n plot=None,\n vmin=None,\n vmax=None,\n vmin_bin=None,\n vmax_bin=None,\n cmap=None,\n dax=None,\n fs=None,\n dmargin=None,\n tit=None,\n return_dax=None,\n ):\n \"\"\" Return pts in the plasma domain and a mask\n\n The mask is True only for points for which the desired wavelength is\n accesible from the crystal (and from the detector if strict=True and\n det is provided)\n\n More than one value of lamb can be provided (nlamb >= 1)\n\n pts is returned as a (3, npts) array\n lambok is returned as a (nlamb, npts) array\n\n \"\"\"\n\n # ------------\n # check inputs\n\n (\n struct, lamb, binning,\n ) = _check_optics._check_calc_signal_from_emissivity(\n emis=emis, config=config, struct=struct,\n lamb=lamb, det=det, binning=binning,\n )\n\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)\n\n # To be refined if xjlim is narrow\n if ndtheta is None:\n ndtheta = 5\n # To be refined if xilim is narrow\n if nlamb is None:\n nlamb = 11\n if strict is None:\n strict = True\n\n if plot is None:\n plot = True\n if return_dax is None:\n return_dax = plot is True\n\n # -------------\n # sample volume\n\n (\n pts, dV, ind, (resR, resZ, resPhi),\n ) = config.dStruct['dObj']['Ves'][struct].get_sampleV(\n res=res,\n domain=domain,\n returnas='(R, Z, Phi)',\n )\n\n # ------------------------------\n # check access from crystal only\n\n ptsXYZ = np.array([\n pts[0, :]*np.cos(pts[2, :]),\n pts[0, :]*np.sin(pts[2, :]),\n pts[1, :],\n ])\n\n lamb_access = self.get_lamb_avail_from_pts(\n pts=ptsXYZ,\n nlamb=2,\n use_non_parallelism=use_non_parallelism,\n return_phidtheta=False,\n return_xixj=False,\n strict=False,\n )\n\n lambok = np.zeros((lamb.size, pts.shape[1]), dtype=bool)\n for ii, ll in enumerate(lamb):\n lambok[ii, :] = (\n (lamb_access[:, 0] <= ll) & (ll <= lamb_access[:, 1])\n )\n\n # ---------------\n # refactor pts and lambok\n\n indok = np.any(lambok, axis=0)\n pts = pts[:, indok]\n ptsXYZ = ptsXYZ[:, indok]\n lambok = lambok[:, indok]\n\n # ---------------\n # check strict\n\n # det vs detbis if xixj_lim\n detbis = dict(det)\n if xixj_lim is not None:\n detbis['outline'] = np.array([\n np.r_[\n xixj_lim[0][0],\n xixj_lim[0][1]*np.r_[1, 1],\n xixj_lim[0][0],\n ],\n np.r_[\n xixj_lim[1][0]*np.r_[1, 1],\n xixj_lim[1][1]*np.r_[1, 1],\n ],\n ])\n detbis['outline'] = np.concatenate(\n (detbis['outline'], detbis['outline'][:, 0:1]),\n axis=1,\n )\n\n # intersection with detbis\n shape = tuple(np.r_[pts.shape[1], lamb.size, ndtheta, 2])\n xi = np.full(shape, np.nan)\n xj = np.full(shape, np.nan)\n val = np.full(shape, np.nan)\n for kk, ll in enumerate(lamb):\n (\n lambi, xii, xji,\n ) = _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(\n cryst=self,\n lamb=np.full((lambok[kk, :].sum(), 1), ll),\n n=n,\n ndtheta=ndtheta,\n pts=ptsXYZ[:, lambok[kk, :]],\n use_non_parallelism=use_non_parallelism,\n return_phidtheta=False,\n return_xixj=True,\n strict=True,\n det=detbis,\n )\n\n iok = ~np.isnan(lambi[:, 0])\n iokf = lambok[kk, :].nonzero()[0][iok]\n lambok[kk, lambok[kk, :]] = iok\n xi[iokf, kk, :, :] = xii[iok, 0, :, :]\n xj[iokf, kk, :, :] = xji[iok, 0, :, :]\n val[iokf, kk, :, :] = emis(\n r=pts[0, iokf],\n z=pts[1, iokf],\n phi=pts[2, iokf],\n lamb=lamb[kk:kk+1],\n t=None,\n )[:, 0, None, None]\n\n # -------\n # Optional binning\n\n binned = None\n if binning is not False:\n iok = np.isfinite(val)\n binned = scpstats.binned_statistic_2d(\n xi[iok].ravel(),\n xj[iok].ravel(),\n val[iok].ravel(),\n statistic='mean',\n bins=binning,\n expand_binnumbers=False,\n )[0]\n\n # -------\n # return\n\n if plot:\n dax = _plot_optics.CrystalBragg_plot_signal_from_emissivity(\n cryst=self,\n det=det,\n xixj_lim=xixj_lim,\n config=config,\n lamb=lamb,\n pts=pts,\n reseff=[resR, resZ, resPhi],\n xi=xi,\n xj=xj,\n val=val,\n lambok=lambok,\n binning=binning,\n binned=binned,\n # plotting\n vmin=vmin,\n vmax=vmax,\n vmin_bin=vmin_bin,\n vmax_bin=vmax_bin,\n cmap=cmap,\n dax=dax,\n fs=fs,\n dmargin=dmargin,\n tit=tit,\n )\n\n # ---------------\n # return\n\n if return_dax is True:\n return pts, val, xi, xj, binned, dax\n else:\n return pts, val, xi, xj, binned\n\n @staticmethod\n def fit1d_dinput(\n dlines=None, dconstraints=None, dprepare=None,\n data=None, lamb=None,\n mask=None, domain=None, pos=None, subset=None,\n same_spectrum=None, same_spectrum_dlamb=None,\n focus=None, valid_fraction=None, valid_nsigma=None,\n focus_half_width=None, valid_return_fract=None,\n ):\n \"\"\" Return a formatted dict of lines and constraints\n\n To be fed to _fit12d.multigausfit1d_from_dlines()\n Provides a user-friendly way of defining constraints\n \"\"\"\n\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.fit1d_dinput(\n dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,\n data=data, lamb=lamb,\n mask=mask, domain=domain, pos=pos, subset=subset,\n same_spectrum=same_spectrum,\n same_spectrum_dlamb=same_spectrum_dlamb,\n focus=focus, valid_fraction=valid_fraction,\n valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,\n valid_return_fract=valid_return_fract)\n\n def fit1d(\n self,\n # Input data kwdargs\n data=None, lamb=None,\n dinput=None, dprepare=None, dlines=None, dconstraints=None,\n mask=None, domain=None, subset=None, pos=None,\n same_spectrum=None, same_spectrum_dlamb=None,\n focus=None, valid_fraction=None, valid_nsigma=None,\n focus_half_width=None,\n # Optimization kwdargs\n dx0=None, dscales=None, x0_scale=None, bounds_scale=None,\n method=None, tr_solver=None, tr_options=None, max_nfev=None,\n xtol=None, ftol=None, gtol=None,\n loss=None, verbose=None, chain=None, jac=None, showonly=None,\n # Results extraction kwdargs\n amp=None, coefs=None, ratio=None,\n Ti=None, width=None, vi=None, shift=None,\n pts_lamb_total=None, pts_lamb_detail=None,\n # Saving and plotting kwdargs\n save=None, name=None, path=None,\n plot=None, fs=None, dmargin=None,\n tit=None, wintit=None, returnas=None,\n ):\n\n # ----------------------\n # Get dinput for 1d fitting from dlines, dconstraints, dprepare...\n if dinput is None:\n dinput = self.fit1d_dinput(\n dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,\n data=data, lamb=lamb,\n mask=mask, domain=domain, pos=pos, subset=subset,\n focus=focus, valid_fraction=valid_fraction,\n valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,\n same_spectrum=same_spectrum,\n same_spectrum_dlamb=same_spectrum_dlamb)\n\n # ----------------------\n # return\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.fit1d(\n # Input data kwdargs\n data=data, lamb=lamb,\n dinput=dinput, dprepare=dprepare,\n dlines=dlines, dconstraints=dconstraints,\n mask=mask, domain=domain, subset=subset, pos=pos,\n # Optimization kwdargs\n method=method, tr_solver=tr_solver, tr_options=tr_options,\n xtol=xtol, ftol=ftol, gtol=gtol,\n max_nfev=max_nfev, loss=loss, chain=chain,\n dx0=dx0, x0_scale=x0_scale, bounds_scale=bounds_scale,\n jac=jac, verbose=verbose,\n save=save, name=name, path=path,\n amp=amp, coefs=coefs, ratio=ratio,\n Ti=Ti, width=width, vi=vi, shift=shift,\n pts_lamb_total=pts_lamb_total,\n pts_lamb_detail=pts_lamb_detail,\n plot=plot, fs=fs, wintit=wintit, tit=tit)\n\n @staticmethod\n def fit1d_extract(\n dfit1d=None,\n amp=None, coefs=None, ratio=None,\n Ti=None, width=None,\n vi=None, shift=None,\n pts_lamb_total=None, pts_lamb_detail=None,\n ):\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.fit1d_extract(\n dfit1d=dfit,\n amp=amp, coefs=coefs, ratio=ratio,\n Ti=Ti, width=width,\n vi=vi, shift=shift,\n pts_lamb_total=pts_lamb_total, pts_lamb_detail=pts_lamb_detail)\n\n def fit1d_from2d(self):\n \"\"\" Useful for optimizing detector or crystal position\n\n Given a set of 2d images on a detector\n Transform the 2d (xi, xj) image into (lamb, phi)\n Slice nphi 1d spectra\n Fit them using a dict of reference lines (dlines)\n Optionally provide constraints for the fitting\n Return the vertical profiles of the wavelength shitf of each line\n To be used as input for an cost function and optimization\n\n 1d fitting is used instead of 2d because:\n - faster (for optimization)\n - does not require a choice of nbsplines\n - easier to understand and decide for user\n\n \"\"\"\n # Check / format inputs\n if lphi is None:\n msg = (\"Arg lphi must be provided !\")\n raise Exception(msg)\n\n # ----------------------\n # Prepare input data\n # (geometrical transform, domain, binning, subset, noise...)\n if dprepare is None:\n dprepare = self.fit2d_prepare(\n data=data, xi=xi, xj=xj, n=n,\n det=det, dtheta=dtheta, psi=psi,\n mask=mask, domain=domain,\n pos=pos, binning=binning,\n nbsplines=False, subset=False,\n lphi=lphi, lphi_tol=lphi_tol)\n\n # ----------------------\n # Get dinput for 2d fitting from dlines, and dconstraints\n if dinput is None:\n dinput = self.fit2d_dinput(\n dlines=dlines, dconstraints=dconstraints,\n deg=deg, knots=knots, nbsplines=nbsplines,\n domain=dprepare['domain'],\n dataphi1d=dprepare['dataphi1d'], phi1d=dprepare['phi1d'])\n\n # ----------------------\n # fit\n out = self.fit1d(\n xi=None, xj=None, data=None, mask=None,\n det=None, dtheta=None, psi=None, n=None,\n nlambfit=None, nphifit=None,\n lambmin=None, lambmax=None,\n dlines=None, spect1d=None,\n dconstraints=None, dx0=None,\n same_spectrum=None, dlamb=None,\n double=None,\n dscales=None, x0_scale=None, bounds_scale=None,\n method=None, max_nfev=None,\n xtol=None, ftol=None, gtol=None,\n loss=None, verbose=0, chain=None,\n jac=None, showonly=None,\n plot=None, fs=None, dmargin=None,\n tit=None, wintit=None, returnas=None,\n )\n pass\n\n def fit2d_dinput(\n self, dlines=None, dconstraints=None, dprepare=None,\n data=None, xi=None, xj=None, n=None,\n det=None, dtheta=None, psi=None,\n mask=None, domain=None, pos=None, binning=None, subset=None,\n # lphi=None, lphi_tol=None,\n deg=None, knots=None, nbsplines=None,\n focus=None, valid_fraction=None, valid_nsigma=None,\n focus_half_width=None, valid_return_fract=None,\n ):\n \"\"\" Return a formatted dict of lines and constraints\n\n To be fed to _fit12d.multigausfit1d_from_dlines()\n Provides a user-friendly way of defining constraints\n \"\"\"\n\n import tofu.spectro._fit12d as _fit12d\n if dprepare is None:\n # ----------------------\n # Geometrical transform\n xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)\n nxi = xi.size if xi is not None else np.unique(xii).size\n nxj = xj.size if xj is not None else np.unique(xjj).size\n\n # Compute lamb / phi\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xii, xj=xjj, det=det,\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n\n # ----------------------\n # Prepare input data (domain, binning, subset, noise...)\n dprepare = _fit12d.multigausfit2d_from_dlines_prepare(\n data, lamb, phi,\n mask=mask, domain=domain,\n pos=pos, binning=binning,\n nbsplines=nbsplines, subset=subset,\n nxi=nxi, nxj=nxj,\n ) # , lphi=lphi, lphi_tol=lphi_tol)\n return _fit12d.fit2d_dinput(\n dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,\n deg=deg, knots=knots, nbsplines=nbsplines,\n focus=focus, valid_fraction=valid_fraction,\n valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,\n valid_return_fract=valid_return_fract)\n\n def fit2d(\n self,\n # Input data kwdargs\n data=None, xi=None, xj=None,\n det=None, dtheta=None, psi=None, n=None,\n dinput=None, dprepare=None, dlines=None, dconstraints=None,\n mask=None, domain=None, subset=None, pos=None, binning=None,\n focus=None, valid_fraction=None, valid_nsigma=None,\n focus_half_width=None,\n deg=None, knots=None, nbsplines=None,\n # Optimization kwdargs\n dx0=None, dscales=None, x0_scale=None, bounds_scale=None,\n method=None, tr_solver=None, tr_options=None, max_nfev=None,\n xtol=None, ftol=None, gtol=None,\n loss=None, verbose=None, chain=None, jac=None, showonly=None,\n predeclare=None, debug=None,\n # Results extraction kwdargs\n amp=None, coefs=None, ratio=None,\n Ti=None, width=None, vi=None, shift=None,\n pts_lamb_total=None, pts_lamb_detail=None,\n # Saving and plotting kwdargs\n save=None, name=None, path=None,\n plot=None, fs=None, dmargin=None,\n tit=None, wintit=None, returnas=None,\n ):\n\n # npts=None, dax=None,\n # spect1d=None, nlambfit=None,\n # plotmode=None, angunits=None, indspect=None,\n # cmap=None, vmin=None, vmax=None):\n \"\"\" Perform 2d fitting of a 2d spectrometre image\n\n Fit the spectrum by a sum of gaussians\n Modulate each gaussian parameters by bsplines in the spatial direction\n\n data must be provided in shape (nt, nxi, nxj), where:\n - nt is the number of time steps\n - nxi is the nb. of pixels in the horizontal / spectral direction\n - nxj is the nb. of pixels in the vertical / spacial direction\n\n \"\"\"\n\n # ----------------------\n # Geometrical transform in dprepare\n if dinput is None:\n dinput = self.fit2d_dinput(\n dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,\n data=data, xi=xi, xj=xj, n=n,\n det=det, dtheta=dtheta, psi=psi,\n mask=mask, domain=domain,\n pos=pos, binning=binning, subset=subset,\n deg=deg, knots=knots, nbsplines=nbsplines,\n focus=focus, valid_fraction=valid_fraction,\n valid_nsigma=valid_nsigma, focus_half_width=focus_half_width)\n\n # ----------------------\n # return\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.fit2d(\n dinput=dinput, dprepare=dprepare,\n dlines=dlines, dconstraints=dconstraints,\n lamb=lamb, phi=phi, data=data, mask=mask,\n nxi=dinput['dprepare']['nxi'], nxj=dinput['dprepare']['nxj'],\n domain=domain, pos=pos, binning=binning, subset=subset,\n deg=deg, knots=knots, nbsplines=nbsplines,\n method=method, tr_solver=tr_solver, tr_options=tr_options,\n xtol=xtol, ftol=ftol, gtol=gtol,\n max_nfev=max_nfev, loss=loss, chain=chain,\n dx0=dx0, x0_scale=x0_scale, bounds_scale=bounds_scale,\n jac=jac, verbose=verbose,\n save=save, name=name, path=path,\n plot=plot)\n\n @staticmethod\n def fit2d_extract(dfit2d=None,\n amp=None, Ti=None, vi=None,\n pts_phi=None, npts_phi=None,\n pts_lamb_phi_total=None,\n pts_lamb_phi_detail=None):\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.fit2d_extract_data(\n dfit2d=dfit2d,\n amp=amp, Ti=Ti, vi=vi,\n pts_phi=pts_phi, npts_phi=npts_phi,\n pts_lamb_phi_total=pts_lamb_phi_total,\n pts_lamb_phi_detail=pts_lamb_phi_detail)\n\n def fit2d_plot(self, dfit2d=None, ratio=None,\n dax=None, plotmode=None, angunits=None,\n cmap=None, vmin=None, vmax=None,\n dmargin=None, tit=None, wintit=None, fs=None):\n dout = self.fit2d_extract(\n dfit2d,\n amp=amp, Ti=Ti, vi=vi,\n pts_lamb_phi_total=pts_lamb_phi_total,\n pts_lamb_phi_detail=pts_lamb_phi_detail)\n return _plot_optics.CrystalBragg_plot_data_fit2d(\n dfit2d=dfit2d, dout=dout, ratio=ratio,\n dax=dax, plotmode=plotmode, angunits=angunits,\n cmap=cmap, vmin=vmin, vmax=vmax,\n dmargin=dmargin, tit=tit, wintit=wintit, fs=fs)\n\n def noise_analysis(\n self, data=None, xi=None, xj=None, n=None,\n det=None, dtheta=None, psi=None,\n mask=None, valid_fraction=None, nxerrbin=None,\n margin=None, domain=None, nlamb=None,\n deg=None, knots=None, nbsplines=None,\n loss=None, max_nfev=None,\n xtol=None, ftol=None, gtol=None,\n method=None, tr_solver=None, tr_options=None,\n verbose=None, plot=None,\n ms=None, dcolor=None,\n dax=None, fs=None, dmargin=None,\n wintit=None, tit=None, sublab=None,\n save_fig=None, name_fig=None, path_fig=None,\n fmt=None, return_dax=None,\n ):\n\n # ----------------------\n # Geometrical transform\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xi, xj=xj, det=det,\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.noise_analysis_2d(\n data, lamb, phi,\n mask=mask, valid_fraction=valid_fraction,\n margin=margin, nxerrbin=nxerrbin,\n nlamb=nlamb, deg=deg, knots=knots, nbsplines=nbsplines,\n loss=loss, max_nfev=max_nfev,\n xtol=xtol, ftol=ftol, gtol=gtol,\n method=method, tr_solver=tr_solver, tr_options=tr_options,\n verbose=verbose, plot=plot,\n ms=ms, dcolor=dcolor,\n dax=dax, fs=fs, dmargin=dmargin,\n wintit=wintit, tit=tit, sublab=sublab,\n save_fig=save_fig, name_fig=name_fig, path_fig=path_fig,\n fmt=fmt, return_dax=return_dax)\n\n @staticmethod\n def noise_analysis_plot(\n dnoise=None, margin=None, valid_fraction=None,\n ms=None, dcolor=None,\n dax=None, fs=None, dmargin=None,\n wintit=None, tit=None, sublab=None,\n save=None, name=None, path=None, fmt=None,\n ):\n import tofu.spectro._plot as _plot_spectro\n return _plot_spectro.plot_noise_analysis(\n dnoise=dnoise, margin=margin, valid_fraction=valid_fraction,\n ms=ms, dcolor=dcolor,\n dax=dax, fs=fs, dmargin=dmargin,\n wintit=wintit, tit=tit, sublab=sublab,\n save=save, name=name, path=path, fmt=fmt)\n\n def noise_analysis_scannbs(\n self, data=None, xi=None, xj=None, n=None,\n det=None, dtheta=None, psi=None,\n mask=None, nxerrbin=None,\n domain=None, nlamb=None,\n deg=None, knots=None, nbsplines=None, lnbsplines=None,\n loss=None, max_nfev=None,\n xtol=None, ftol=None, gtol=None,\n method=None, tr_solver=None, tr_options=None,\n verbose=None, plot=None,\n ms=None, dax=None, fs=None, dmargin=None,\n wintit=None, tit=None, sublab=None,\n save_fig=None, name_fig=None, path_fig=None,\n fmt=None, return_dax=None,\n ):\n\n # ----------------------\n # Geometrical transform\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xi, xj=xj, det=det,\n dtheta=0, psi=0,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.noise_analysis_2d_scannbs(\n data, lamb, phi,\n mask=mask, nxerrbin=nxerrbin, nlamb=nlamb,\n deg=deg, knots=knots, nbsplines=nbsplines, lnbsplines=lnbsplines,\n loss=loss, max_nfev=max_nfev,\n xtol=xtol, ftol=ftol, gtol=gtol,\n method=method, tr_solver=tr_solver, tr_options=tr_options,\n verbose=verbose, plot=plot,\n ms=ms, dax=dax, fs=fs, dmargin=dmargin,\n wintit=wintit, tit=tit, sublab=sublab,\n save_fig=save_fig, name_fig=name_fig, path_fig=path_fig,\n fmt=fmt, return_dax=return_dax)\n\n @staticmethod\n def noise_analysis_scannbs_plot(\n dnoise_scan=None, ms=None,\n dax=None, fs=None, dmargin=None,\n wintit=None, tit=None, sublab=None,\n save=None, name=None, path=None, fmt=None,\n ):\n import tofu.spectro._plot as _plot_spectro\n return _plot_spectro.plot_noise_analysis_scannbs(\n dnoise=dnoise_scan, ms=ms,\n dax=dax, fs=fs, dmargin=dmargin,\n wintit=wintit, tit=tit, sublab=sublab,\n save=save, name=name, path=path, fmt=fmt)\n"
] | [
[
"numpy.nanmax",
"numpy.sqrt",
"numpy.linspace",
"matplotlib.colors.to_rgba",
"numpy.nanmin",
"numpy.arctan2",
"numpy.concatenate",
"numpy.round",
"numpy.nanargmin",
"numpy.max",
"numpy.any",
"numpy.nanmean",
"numpy.cross",
"matplotlib.pyplot.plot",
"numpy.mean",
"scipy.interpolate.interp2d",
"numpy.hypot",
"numpy.allclose",
"numpy.unique",
"numpy.arange",
"numpy.full",
"numpy.atleast_1d",
"numpy.sin",
"scipy.interpolate.interp1d",
"numpy.repeat",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.isnan",
"numpy.arccos",
"numpy.argsort",
"numpy.array",
"numpy.meshgrid",
"numpy.sum",
"numpy.abs",
"numpy.isfinite",
"numpy.tile",
"numpy.cos",
"numpy.linalg.norm",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
srihari-nagaraj/anuvaad | [
"b09b01a033a033e97db6e404c088e0e6332053e4",
"b09b01a033a033e97db6e404c088e0e6332053e4",
"b09b01a033a033e97db6e404c088e0e6332053e4"
] | [
"anuvaad-etl/anuvaad-extractor/document-processor/evaluator/evaluator_string/src/notebooks/tesseract_ocr_evaluation_local.py",
"anuvaad-etl/anuvaad-extractor/document-processor/block-segmenter/src/utilities/yolov5/datasets.py",
"anuvaad-etl/anuvaad-extractor/document-processor/layout-detector/prima/src/utilities/table/table.py"
] | [
"import glob\nimport uuid\nimport json\nimport requests\nimport copy,time\nimport os\nimport cv2\nimport numpy as np\nfrom time import sleep\nimport pandas as pd\nimport logging\nfrom collections import Counter\nimport pytesseract\nfrom pytesseract import Output\n#from pytesseract import pytesseract\nfrom difflib import SequenceMatcher\nfrom io import StringIO\nfrom dynamic_adjustment import coord_adjustment\nimport ast\nfrom leven import levenshtein\nfrom horizontal_merging import horzontal_merging\n\nocr_level = \"LINE\"\ntext_processing = True\nREJECT_FILTER = 2\n#crop_factor= 5\n#crop_factor_y= 4\ncrop_factor= 5\ncrop_factor_y= 0\ncrop_save = True\ndigitization = True\nvis_thresh=0.90\nLANG_MAPPING = {\n \"en\" : [\"Latin\",\"eng\"],\n \"kn\" : ['Kannada',\"kan\"],\n \"gu\": [\"guj\"],\n \"or\": [\"ori\"],\n \"hi\" : [\"Devanagari\",\"hin\",\"eng\"],\n \"bn\" : [\"Bengali\",\"ben\"],\n \"mr\": [\"Devanagari\",\"hin\",\"eng\"],\n \"ta\": ['Tamil',\"tam\"],\n \"te\" : [\"Telugu\",\"tel\"],\n \"ml\" :[\"Malayalam\"],\n \"ma\" :[\"Marathi\"]\n}\n\n\n#path = '/home/ubuntu/tesseract_evaluation/data/'\n#output_path = '/home/ubuntu/tesseract_evaluation/result/'\n#output_path_boxes= '/home/ubuntu/tesseract_evaluation/test_word_boxes/'\n#base_path = '/home/ubuntu/tesseract_evaluation/test_word_boxes/'\npath = '/home/naresh/Tarento/testing_document_processor/test_pipeline/data/'\noutput_path = '/home/naresh/Tarento/testing_document_processor/test_pipeline/result/'\noutput_path_boxes= '/home/naresh/Tarento/testing_document_processor/test_word_boxes/'\nbase_path= '/home/naresh/Tarento/testing_document_processor/test_word_boxes/'\n\n\npsms = [6,7,8,9,10,11]\ntoken = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyTmFtZSI6ImRoaXJhai5kYWdhQHRhcmVudG8uY29tIiwicGFzc3dvcmQiOiJiJyQyYiQxMiRuTXdNcHpCVlBXVVUvSlVLWXBKYWkuQUd2SUNJalJVcUdIbnBPenRzai5VRU55emlSZmk1TyciLCJleHAiOjE2MTk3Njg2NjN9.14IL5_kw83F5gxjUMSw6kCDLYQhjAg306AwJj0DsxWc'\n\n\nword_url = \"https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate\"\ngoogle_url = \"https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate\"\nlayout_url = \"https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate\"\nsegmenter_url = \"https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate\"\nbs_url =\"https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/jobs/search/bulk\"\n\nevaluator_url = \"https://auth.anuvaad.org/anuvaad-etl/document-processor/evaluator/v0/process\"\n\n#evaluator_url = 'http://0.0.0.0:5001/anuvaad-etl/document-processor/evaluator/v0/process'\n\ndownload_url =\"https://auth.anuvaad.org/download/\"\nupload_url = 'https://auth.anuvaad.org/anuvaad-api/file-uploader/v0/upload-file'\n\n\nheaders = {\n 'auth-token' :token }\n\n\n\n\n\nclass Draw:\n \n def __init__(self,input_json,save_dir,regions,prefix='',color= (255,0,0),thickness=5): \n self.json = input_json\n self.save_dir = save_dir\n self.regions = regions\n self.prefix = prefix\n self.color = color\n self.thickness=thickness\n if self.prefix == 'seg':\n #print('drawing children')\n self.draw_region_children()\n else:\n self.draw_region__sub_children()\n \n def get_coords(self,page_index):\n return self.json['outputs'][0]['pages'][page_index][self.regions]\n \n def get_page_count(self):\n return(self.json['outputs'][0]['page_info'])\n \n def get_page(self,page_index):\n page_path = self.json['outputs'][0]['page_info'][page_index]\n page_path = page_path.split('upload')[1]#'/'.join(page_path.split('/')[1:])\n #print(page_path) \n return download_file(download_url,headers,page_path,f_type='image')\n\n def draw_region(self):\n font = cv2.FONT_HERSHEY_SIMPLEX \n for page_index in range(len(self.get_page_count())) :\n nparr = np.frombuffer(self.get_page(page_index), np.uint8)\n image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n for region in self.get_coords(page_index) :\n ground = region['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x']) ,int(pt['y'])])\n cv2.polylines(image, [np.array(pts)],True, self.color, self.thickness)\n if 'class' not in region.keys():\n region['class'] = 'TEXT'\n cv2.putText(image, str(region['class']), (pts[0][0],pts[0][1]), font, \n 2, (0,125,255), 3, cv2.LINE_AA)\n \n image_path = os.path.join(self.save_dir , '{}_{}_{}.png'.format(self.regions,self.prefix,page_index)) \n cv2.imwrite(image_path , image)\n \n def draw_region_children(self):\n font = cv2.FONT_HERSHEY_SIMPLEX \n fontScale = 2\n thickness =3\n\n\n for page_index in range(len(self.get_page_count())) :\n nparr = np.frombuffer(self.get_page(page_index), np.uint8)\n image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n for region_index,region in enumerate(self.get_coords(page_index)) :\n try:\n ground = region['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x']) ,int(pt['y'])])\n #print(pts)\n region_color = (0 ,0,125+ 130*(region_index/ len(self.get_coords(page_index))))\n cv2.polylines(image, [np.array(pts)],True, region_color, self.thickness)\n cv2.putText(image, str(region_index), (pts[0][0],pts[0][1]), font, \n fontScale, region_color, thickness, cv2.LINE_AA)\n for line_index, line in enumerate(region['children']):\n ground = line['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x']) ,int(pt['y'])])\n\n line_color = (125 + 130*(region_index/ len(self.get_coords(page_index))) ,0,0)\n cv2.polylines(image, [np.array(pts)],True, line_color, self.thickness -2)\n cv2.putText(image, str(line_index), (pts[0][0],pts[0][1]), font, \n fontScale, line_color, thickness, cv2.LINE_AA)\n except Exception as e:\n print(str(e))\n print(region)\n \n image_path = os.path.join(self.save_dir , '{}_{}.png'.format(self.prefix,page_index))\n cv2.imwrite(image_path , image)\n def draw_region__sub_children(self): \n for page_index in range(len(self.get_page_count())) :\n nparr = np.frombuffer(self.get_page(page_index), np.uint8)\n image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n \n font = cv2.FONT_HERSHEY_SIMPLEX \n fontScale = 2\n\n # Blue color in BGR \n color = (0 ,255,0) \n\n # Line thickness of 2 px \n thickness = 3\n\n # Using cv2.putText() method \n \n for region_index,region in enumerate(self.get_coords(page_index)) :\n try:\n ground = region['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x']) ,int(pt['y'])])\n #print(pts)\n region_color = (0,0,255)\n cv2.polylines(image, [np.array(pts)],True, region_color, self.thickness)\n for line_index, line in enumerate(region['regions']):\n ground = line['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x'])-1 ,int(pt['y']) -1 ])\n\n line_color = (255,0,0)\n cv2.polylines(image, [np.array(pts)],True, line_color, self.thickness -2)\n \n cv2.putText(image, str(line_index), (pts[0][0],pts[0][1]), font, \n fontScale, (255,0,0), thickness, cv2.LINE_AA)\n for word_index, word in enumerate(line['regions']):\n ground = word['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x']) -3,int(pt['y'])-3])\n\n word_color = (0,255,0)\n cv2.polylines(image, [np.array(pts)],True, word_color, self.thickness -2)\n\n cv2.putText(image, str(word_index), (pts[0][0],pts[0][1]), font, \n fontScale-1,(0,255,0), thickness, cv2.LINE_AA)\n except Exception as e:\n print(str(e))\n print(region)\n \n \n \n #print(self.prefix)\n image_path = os.path.join(self.save_dir , '{}_{}_{}.png'.format(self.prefix,self.regions,page_index))\n cv2.imwrite(image_path , image)\n\n\n\n\n\n# # google vision pipeline\n\n\ndef google_ocr_v15(url,headers,pdf_name):\n \n file = {\n \"files\": [\n {\n \"locale\": \"hi\",\n \"path\": pdf_name,\n \"type\": \"pdf\",\n \"config\":{\n \"OCR\": {\n \"option\": \"HIGH_ACCURACY\",\n \"language\": \"hi\",\n \"top_correction\":\"True\",\n \"craft_word\": \"True\",\n \"craft_line\": \"True\",\n }\n }}\n ],\n \"workflowCode\": \"WF_A_FCWDLDBSOD15GV\"\n }\n res = requests.post(url,json=file,headers=headers)\n return res.json()\n\n\n\n\n\ndef upload_file(pdf_file,headers,url):\n #url = 'https://auth.anuvaad.org/anuvaad-api/file-uploader/v0/upload-file'\n files = [\n ('file',(open(pdf_file,'rb')))] \n\n response = requests.post(url, headers=headers, files=files)\n \n return response.json()\n\n\n\n\n\ndef download_file(download_url,headers,outputfile,f_type='json'):\n download_url =download_url+str(outputfile)\n res = requests.get(download_url,headers=headers)\n if f_type == 'json':\n return res.json()\n else :\n return res.content\n\n\n\n\n\ndef save_json(path,res):\n with open(path, \"w\", encoding='utf8') as write_file:\n json.dump(res, write_file,ensure_ascii=False )\n\n\n\n\n\ndef bulk_search(job_id,bs_url,headers):\n bs_request = {\n \"jobIDs\": [job_id],\n \"taskDetails\":\"true\"\n }\n print(job_id)\n res = requests.post(bs_url,json=bs_request,headers=headers, timeout = 10000)\n print(res.json())\n \n \n while(1):\n \n in_progress = res.json()['jobs'][0]['status']\n \n if in_progress == 'COMPLETED':\n outputfile = res.json()['jobs'][0]['output'][0]['outputFile']\n print(in_progress)\n return outputfile\n break\n sleep(0.5)\n print(in_progress)\n res = requests.post(bs_url,json=bs_request,headers=headers, timeout = 10000)\n \n \n\n\n\n\n\ndef execute_module(module,url,input_file,module_code,pdf_dir,overwirte=True , draw=True):\n \n \n \n output_path = os.path.join(pdf_dir,'{}.json'.format(module_code))\n if os.path.exists(output_path) and not overwirte:\n print(' loading *****************{}'.format(module_code ))\n with open(output_path,'r') as wd_file :\n response = json.load(wd_file)\n \n wf_res = pdf_dir + '/{}_wf.json'.format(module_code)\n with open(wf_res,'r') as wd_file :\n json_file = json.load(wd_file) \n #json_file = upload_file(output_path,headers,upload_url)['data']\n else :\n if module_code in ['wd','gv']:\n res = upload_file(input_file,headers,upload_url)\n print('upload response **********', res)\n pdf_name = res['data']\n response = module(url,headers,pdf_name)\n \n else : \n response = module(url,headers,input_file)\n \n if 'eval' in module_code :\n json_file = response['outputFile']\n response = download_file(download_url,headers,json_file)\n save_json(output_path,response)\n return json_file,response\n \n \n print(' response *****************{} {}'.format(module_code ,response ))\n job_id = response['jobID']\n json_file = bulk_search(job_id,bs_url,headers)\n save_json(pdf_dir + '/{}_wf.json'.format(module_code),json_file) \n print('bulk search response **************',json_file )\n response = download_file(download_url,headers,json_file)\n save_json(output_path,response)\n if draw :\n if module_code in ['wd','gv']:\n Draw(response,pdf_dir,regions='lines',prefix=module_code)\n else :\n Draw(response,pdf_dir,regions='regions',prefix=module_code)\n \n return json_file,response\n \n\n\n\ndef evaluate__and_save_input(pdf_files,output_dir,headers,word_url,layout_url,download_url,upload_url,bs_url):\n word_responses = {}\n layout_responses = {}\n segmenter_responses = []\n for pdf in pdf_files:\n #try :\n pdf_name = pdf.split('/')[-1].split('.')[0]\n print(pdf , ' is being processed')\n pdf_output_dir = os.path.join(output_dir,pdf_name)\n os.system('mkdir -p \"{}\"'.format(pdf_output_dir))\n\n\n wd_json,_ = execute_module(google_ocr_v15,word_url,input_file=pdf,module_code='gv',pdf_dir=pdf_output_dir,overwirte=False , draw=False)\n\n\n\n\ndef main(path,headers,word_url,layout_url,download_url,upload_url,bs_url):\n pdf_names = glob.glob(path + '/*.pdf')\n \n \n return evaluate__and_save_input(pdf_names,output_path,headers,word_url,layout_url,download_url,upload_url,bs_url)\n \n\nif digitization:\n main(path,headers,word_url,layout_url,download_url,upload_url,bs_url)\n\n\ndef bound_coordinate(corrdinate,max):\n if corrdinate < 0 :\n corrdinate = 0\n if corrdinate > max:\n corrdinate = max - 2\n return int(corrdinate)\ndef get_image_from_box(image, box, height=140):\n #box = data['box']\n #scale = np.sqrt((box[1, 1] - box[2, 1])**2 + (box[0, 1] - box[3, 1])**2) / height\n #print(\"scale is \",scale)\n #w = int(np.sqrt((box[0, 0] - box[1, 0])**2 + (box[2, 0] - box[3, 0])**2) / scale)\n w = max(abs(box[0, 0] - box[1, 0]),abs(box[2, 0] - box[3, 0]))\n height = max(abs(box[0, 1] - box[3, 1]),abs(box[1, 1] - box[2, 1]))\n pts1 = np.float32(box)\n #w=2266-376\n pts2 = np.float32([[0, 0], [int(w), 0],[int(w),int(height)],[0,int(height)]])\n M = cv2.getPerspectiveTransform(pts1, pts2)\n result_img = cv2.warpPerspective(image,M,(int(w), int(height))) #flags=cv2.INTER_NEAREST\n return result_img\n\ndef process_dfs(temp_df):\n\ttemp_df = temp_df[temp_df.text.notnull()]\n\ttext = \"\"\n\tconf=0\n\ttemp_dict1 = []\n\tfor index, row in temp_df.iterrows():\n\t\ttemp_dict2 = {}\n\t\tconf = conf + row[\"conf\"]\n\t\ttemp_dict2[\"text\"]=row['text']\n\t\ttemp_dict2[\"conf\"]=row['conf']\n\t\ttext = text +\" \"+ str(row['text'])\n\t\ttemp_dict1.append(temp_dict2)\n\treturn text,temp_dict1\ndef process_dfs_updated(temp_df,language,psm_val,image):\n\ttemp_df = temp_df[temp_df.text.notnull()]\n\ttext = \"\"\n\tconf=0\n\ttemp_dict1 = []\n\tif len(temp_df)>0:\n\t\tfor index, row in temp_df.iterrows():\n\t\t\ttemp_dict2 = {}\n\t\t\torg_conf = row[\"conf\"]\n\t\t\torg_text = row['text']\n\t\t\tflag = True\n\t\t\tif row[\"conf\"]<50:\n\t\t\t\tprint(row[\"top\"],row[\"height\"],row[\"left\"],row[\"width\"])\n\t\t\t\tcrop_image = image[ int(row[\"top\"]):int(row[\"top\"]+row[\"height\"]), int(row[\"left\"]):int(row[\"left\"]+row[\"width\"])]\n\t\t\t\tfor psm in psms:\n\t\t\t\t\t\n\t\t\t\t\tdf2 = pytesseract.image_to_data(crop_image,config='--psm '+str(psm), lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)\n\t\t\t\t\ttemp_df2 = df2[df2.text.notnull()]\n\t\t\t\t\tif len(temp_df2)>0:\n\t\t\t\t\t\tnew_conf = temp_df2.iloc[0].conf\n\t\t\t\t\t\tif org_conf<new_conf:\n\t\t\t\t\t\t\torg_conf = new_conf\n\t\t\t\t\t\t\torg_text = temp_df2.iloc[0].text\n\t\t\t\t\t\n\t\t\tif flag:\n\t\t\t\tprint(\"old text\", row['text'])\n\t\t\t\tprint(\"new text\", org_text)\t\t\n\t\t\tconf = conf + org_conf\n\t\t\ttemp_dict2[\"text\"]=org_text\n\t\t\ttemp_dict2[\"conf\"]=org_conf\n\t\t\ttext = text +\" \"+ str(org_text)\n\t\t\ttemp_dict1.append(temp_dict2)\n\treturn text,temp_dict1\n \ndef check_psm(path,coord,language,mode_height,save_base_path,psm_val,org_score,org_text,line_text,org_conf):\n\tfor psm in psms:\n\t\ttext,conf_dict = get_text(path,coord,language,mode_height,save_base_path,psm)\n\t\tif text_processing:\n\t\t\ttext_list = text.split()\n\t\t\ttext = \" \".join(text_list)\n\t\t\tscore,message,match_count = seq_matcher(text,line_text)\n\t\t\tif score==1.0 or score==1:\n\t\t\t\torg_score = score\n\t\t\t\torg_text = text\n\t\t\t\torg_conf = conf_dict\n\t\t\t\tbreak\n\t\t\telif score>org_score:\n\t\t\t\torg_score =score\n\t\t\t\torg_text = text\n\t\t\t\torg_conf = conf_dict\n\t\t\t\t\n\treturn org_text, org_conf,org_score\n\t\t\n \n \n\t\t\n\t\ndef get_text(path,coord,language,mode_height,save_base_path,psm_val):\n #try:\n\n\tpath = path.split('upload')[1]\n\n\timage = download_file(download_url,headers,path,f_type='image')\n\tnparr = np.frombuffer(image, np.uint8)\n\timage = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\t#image = cv2.imread(\"/home/naresh/crop.jpeg\",0)\n\theight, width,channel = image.shape\n\n\t# left = bound_coordinate(coord[0] , width)\n\t# top = bound_coordinate(coord[1],height )\n\t# right = bound_coordinate(coord[2] ,width)\n\t# bottom = bound_coordinate(coord[3], height)\n\t# region_width = abs(right-left)\n\t# region_height = abs(bottom-top)\n\n\t# if left==right==top==bottom==0 or region_width==0 or region_height==0:\n\t# return \"\"\t\n\n\tcrop_image = get_image_from_box(image, coord, height=abs(coord[0,1]-coord[2,1]))\n\t#crop_image = image[ top:bottom, left:right]\n\t#crop_image_cv = image[ coord[0,1]:coord[2,1], coord[0,0]:coord[1,0]]\n\tsave_path = save_base_path+\"/\"+\"_psm_pers\"+str(psm_val)+\"--\"+str(uuid.uuid4()) + '.jpg'\n\n\tif crop_save:\n\t cv2.imwrite(save_path,crop_image)\n\n\t#if abs(bottom-top) > 3*mode_height:\n\t#print(LANG_MAPPING[language][0])\n\tif abs(coord[1,1]-coord[2,1])>mode_height:\n\t #text = pytesseract.image_to_string(crop_image,config='--psm 6', lang=LANG_MAPPING[language][1])\n\t dfs = pytesseract.image_to_data(crop_image,config='--psm 6', lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)\n\t #text,conf_dict = process_dfs(dfs)\n\t text,conf_dict = process_dfs_updated(dfs,language,6,crop_image)\n\t \n\telse:\n\t #text = pytesseract.image_to_string(crop_image,config='--psm '+str(psm_val), lang=LANG_MAPPING[language][1])\n\t dfs = pytesseract.image_to_data(crop_image,config='--psm '+str(psm_val), lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)\n\t #text,conf_dict = process_dfs(dfs)\n\t text,conf_dict = process_dfs_updated(dfs,language,psm_val,crop_image)\n\treturn text,conf_dict\n #except:\n\n #print(\"xxxxxxxxxxxxxxxxxxxxxxxxxx\",coord)\n #print([0.0])\n #return \"\",[0.0]\n\n\ndef merger_text(line):\n text = \"\"\n word_count=0\n for word_idx, word in enumerate(line['regions']):\n if \"text\" in word.keys() and word[\"text\"].replace(\" \", \"\") != \"\":\n text = text+\" \"+ word[\"text\"]\n word_count=word_count+1\n return text, word_count\n\n\n\ndef get_coord(bbox):\n temp_box = []\n temp_box_cv = []\n temp_box.append([bbox[\"boundingBox\"]['vertices'][0]['x'],bbox[\"boundingBox\"]['vertices'][0]['y']])\n temp_box.append([bbox[\"boundingBox\"]['vertices'][1]['x'],bbox[\"boundingBox\"]['vertices'][1]['y']])\n temp_box.append([bbox[\"boundingBox\"]['vertices'][2]['x'],bbox[\"boundingBox\"]['vertices'][2]['y']])\n temp_box.append([bbox[\"boundingBox\"]['vertices'][3]['x'],bbox[\"boundingBox\"]['vertices'][3]['y']])\n \n temp_box_cv.append(bbox[\"boundingBox\"]['vertices'][0]['x'])\n temp_box_cv.append(bbox[\"boundingBox\"]['vertices'][0]['y'])\n temp_box_cv.append(bbox[\"boundingBox\"]['vertices'][2]['x'])\n temp_box_cv.append(bbox[\"boundingBox\"]['vertices'][2]['y'])\n temp_box = np.array(temp_box)\n return temp_box,temp_box_cv\ndef frequent_height(page_info):\n text_height = []\n if len(page_info) > 0 :\n for idx, level in enumerate(page_info):\n coord_crop,coord = get_coord(level)\n if len(coord)!=0:\n text_height.append(abs(coord[3]-coord[1]))\n occurence_count = Counter(text_height)\n return occurence_count.most_common(1)[0][0]\n else :\n return 0\ndef remove_space(a):\n return a.replace(\" \", \"\")\n\ndef seq_matcher(tgt_text,gt_text):\n tgt_text = remove_space(tgt_text)\n gt_text = remove_space(gt_text)\n score = SequenceMatcher(None, gt_text, tgt_text).ratio()\n mismatch_count = levenshtein(tgt_text, gt_text)\n match_count = abs(len(gt_text)-mismatch_count)\n score = match_count/len(gt_text)\n \n\n# matchs = list(SequenceMatcher(None, gt_text, tgt_text).get_matching_blocks())\n# match_count=0\n## match_lis = []\n# for match in matchs:\n# match_count = match_count + match.size\n \n message = {\"ground\":True,\"input\":True}\n if score==0.0:\n if len(gt_text)>0 and len(tgt_text)==0:\n message['input'] = \"text missing in tesseract\"\n if len(gt_text)==0 and len(tgt_text)>0:\n message['ground'] = \"text missing in google vision\"\n if score==1.0 and len(gt_text)==0 and len(tgt_text)==0:\n message['ground'] = \"text missing in google vision\"\n message['input'] = \"text missing in tesseract\"\n return score,message,match_count\n\ndef count_mismatch_char(gt ,tgt) :\n count=0\n gt_count = len(gt)\n for i,j in zip(gt,tgt):\n if i==j:\n count=count+1\n mismatch_char = abs(gt_count-count)\n return mismatch_char\ndef correct_region(region):\n box = region['boundingBox']['vertices']\n tmp=0\n \n region['boundingBox']= {'vertices' : [{'x':box[0]['x']-crop_factor,'y':box[0]['y']-crop_factor_y},\\\n {'x':box[1]['x']+crop_factor+tmp,'y':box[1]['y']-crop_factor_y},\\\n {'x':box[2]['x']+crop_factor+tmp,'y':box[2]['y']+crop_factor_y},\\\n {'x':box[3]['x']-crop_factor,'y': box[3]['y']+crop_factor_y}]}\n return region\n \n\n\ndef sort_line(line):\n line['regions'].sort(key=lambda x: x['boundingBox']['vertices'][0]['x'],reverse=False)\n return line\n\n\ndef cell_ocr_word(lang, page_path, line,save_base_path,mode_height):\n cell_text =\"\"\n conf_dicts=[]\n #updated_lines = horzontal_merging(line['regions'])\n dynamic_line = coord_adjustment(page_path,line['regions'] ,save_base_path)\n for word_idx, word in enumerate(dynamic_line):\n word = correct_region(word)\n coord_crop, coord = get_coord(word)\n if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :\n text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8) \n cell_text = cell_text +\" \" +text\n conf_dicts.extend(conf_dict)\n return cell_text,conf_dicts\n\ndef cell_text_ocr(lang, page_path, line,save_base_path,mode_height):\n cell_text =\"\"\n cell_regions = []\n #updated_lines = horzontal_merging(line['regions'])\n for word_idx, word in enumerate(line['regions']):\n word = correct_region(word)\n coord_crop, coord = get_coord(word)\n if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :\n text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8) \n cell_text = cell_text +\" \" +text\n return cell_text\n\ndef cell_ocr(lang, page_path, line,save_base_path,mode_height,psm):\n text =\"\"\n cell_google_text = \"\"\n conf_dicts = []\n updated_lines = horzontal_merging(line['regions'])\n dynamic_line = coord_adjustment(page_path,updated_lines ,save_base_path)\n \n for updated_line in dynamic_line:\n line_text = updated_line['text']\n cell_google_text= cell_google_text + \" \"+line_text\n corrected_line = correct_region(updated_line)\n coord_crop, coord = get_coord(corrected_line)\n if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :\n tess_text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,psm) \n text = text + \" \" + tess_text\n conf_dicts.extend(conf_dict)\n \n return cell_google_text,text,conf_dicts\n\ndef text_extraction(df,lang, page_path, regions,save_base_path):\n final_score = 0\n total_words = 0\n total_lines = 0\n total_chars = 0\n total_match_chars = 0\n for idx, level in enumerate(regions):\n mode_height = frequent_height(level['regions'])\n\n if ocr_level==\"WORD\":\n for line_idx, line in enumerate(level['regions']):\n #word_regions = coord_adjustment(page_path, line['regions'],save_base_path)\n for word_idx, word in enumerate(line['regions']):\n word = correct_region(word)\n coord_crop, coord = get_coord(word)\n word_text = word['text']\n if len(word_text)>0 and len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :\n text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8)\n if text_processing:\n text_list = text.split()\n text = \" \".join(text_list)\n score,message,match_count = seq_matcher(text,word['text'])\n final_score = final_score+score\n total_words = total_words+1\n total_chars = total_chars+len(remove_space(word['text']))\n total_match_chars= total_match_chars+match_count\n word['char_match'] = match_count\n word['tess_text'] = text\n word['conf_dict'] = conf_dict\n word['score'] = score\n word['message'] = message\n columns = word.keys()\n df2 = pd.DataFrame([word],columns=columns)\n df = df.append(df2, ignore_index=True)\n elif len(word_text)>0:\n score,message,match_count = seq_matcher(\"\",word['text'])\n word['char_match'] = match_count\n word['tess_text'] = \" \"\n word['conf_dict'] = None\n word['score'] = score\n word['message'] = message\n columns = word.keys()\n df2 = pd.DataFrame([word],columns=columns)\n df = df.append(df2, ignore_index=True)\n if ocr_level==\"LINE\":\n lines_adjusted = coord_adjustment(page_path, level['regions'],save_base_path)\n for line_idx, line_org in enumerate(lines_adjusted):\n line_sorted = copy.deepcopy(sort_line(line_org))\n line_text,total_word = merger_text(line_sorted)\n line = copy.deepcopy(correct_region(line_sorted))\n psm = 7\n \n if total_word<2:\n #print(line_text)\n psm=8\n coord_crop, coord = get_coord(line)\n\n print(\"line text\",line_text)\n if len(remove_space(line_text))>0 and len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :\n if 'class' in line.keys() and line['class']==\"CELL\":\n line_text,text,conf_dict = cell_ocr(lang, page_path, line,save_base_path,mode_height,psm)\n elif 'class' in line.keys() and line['class']==\"CELL_TEXT\":\n text,conf_dict = cell_ocr_word(lang, page_path, line,save_base_path,mode_height)\n else:\n \n text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,psm)\n \n if text_processing:\n text_list = text.split()\n text = \" \".join(text_list)\n score,message,match_count = seq_matcher(text,line_text)\n #if score < 1.0:\n #text, conf_dict,score = check_psm(page_path,coord_crop,lang,mode_height,save_base_path,psm,score,text,line_text,conf_dict)\n final_score = final_score+score\n total_lines = total_lines+1\n total_chars = total_chars+len(remove_space(line_text))\n total_match_chars= total_match_chars+match_count\n line['char_match'] = match_count\n line['tess_text'] = text\n line['text'] = line_text \n line['conf_dict'] = conf_dict\n line['score'] = score\n line['message'] = message\n columns = line.keys()\n df2 = pd.DataFrame([line],columns=columns)\n df = df.append(df2, ignore_index=True)\n elif len(remove_space(line_text))>0:\n score,message,match_count = seq_matcher(\"\",line_text)\n line['char_match'] = match_count\n line['tess_text'] = \" \"\n line['conf_dict'] = None\n line['text'] = line_text\n line['score'] = score\n line['message'] = message\n columns = line.keys()\n df2 = pd.DataFrame([line],columns=columns)\n df = df.append(df2, ignore_index=True)\n\n #return regions,final_score/total_words,df,total_chars,total_match_chars\n return regions,final_score/total_lines,df,total_chars,total_match_chars\n\n\njson_files_path = glob.glob(output_path+\"/*/gv.json\")\n\n\ndef tesseract(json_files):\n \n output = []\n dfs =[]\n for json_file in json_files:\n file_name = json_file.split('/')[-1].split('.json')[0]\n pdf_name = json_file.split('/')[-2]\n print(\"file name--------------------->>>>>>>>>>>>>>>>>>\",pdf_name)\n if not os.path.exists(base_path+pdf_name):\n os.mkdir(base_path+pdf_name)\n save_base_path = base_path+pdf_name\n with open(json_file,'r+') as f:\n data = json.load(f)\n columns = [\"page_path\",\"page_data\",\"file_eval_info\"]\n final_df = pd.DataFrame(columns=columns)\n Draw(data,save_base_path,regions='regions')\n lang = data['outputs'][0]['config']['OCR']['language']\n total_page = len(data['outputs'][0]['pages'])\n file_score = 0; total_chars_file = 0\n file_data = []; total_match_chars_file = 0\n page_paths = []\n page_data_counts = []\n for idx,page_data in enumerate(data['outputs'][0]['pages']):\n t1 = time.time()\n print(\"processing started for page no. \",idx)\n page_path = page_data['path']\n regions = page_data['regions'][1:]\n df = pd.DataFrame()\n regions,score,df,total_chars,total_match_chars = text_extraction(df,lang, page_path, regions,save_base_path)\n file_score = file_score + score\n total_chars_file =total_chars_file +total_chars\n total_match_chars_file = total_match_chars_file+total_match_chars\n file_data.append(df.to_csv())\n page_paths.append(page_path)\n char_details = {\"total_chars\":total_chars,\"total_match_chars\":total_match_chars}\n page_data_counts.append(char_details)\n data['outputs'][0]['pages'][idx][\"regions\"][1:] = copy.deepcopy(regions)\n t2 = t1+time.time()\n print(\"processing completed for page in {}\".format(t2))\n file_eval_info = {\"total_chars\":total_chars_file,\"total_match_chars\":total_match_chars_file,\"score\":total_match_chars_file/total_chars_file}\n\n print(file_eval_info)\n final_df[\"page_path\"] = page_paths\n final_df[\"page_data\"] = file_data\n final_df[\"file_eval_info\"] = [file_eval_info]*len(page_paths)\n \n print(\"file level evaluation result------------------->>>>>>>>>>>>>>>>>>>>>>>>>>>\",file_eval_info)\n data['outputs'][0]['score'] = file_score/total_page\n with open(save_base_path+\"/\"+file_name+\".json\", 'w') as outfile:\n json.dump(data, outfile)\n final_df.to_csv(save_base_path+\"/\"+file_name+'.csv')\n return output,final_df\n \n\noutput,dfs = tesseract(json_files_path)\n\n\n\ndef draw_thresh_box(df,path,page_index,save_path):\n path = path.split('upload')[1]\n \n image = download_file(download_url,headers,path,f_type='image')\n nparr = np.frombuffer(image, np.uint8)\n image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n font = cv2.FONT_HERSHEY_SIMPLEX \n color= (255,0,0);thickness=5\n df =df.reset_index()\n for row in df.iterrows():\n row2 = row[1].to_dict()\n boxes = row2['boundingBox']\n boxes2 = ast.literal_eval(boxes)\n ground = boxes2['vertices']\n \n pts = []\n for pt in ground:\n pts.append([int(pt['x']) ,int(pt['y'])])\n cv2.polylines(image, [np.array(pts)],True, color, thickness)\n cv2.putText(image, str(row2['text']), (pts[0][0],pts[0][1]), font, \n 2, (0,0,255), 2, cv2.LINE_AA)\n cv2.putText(image, str(row2['tess_text']), (pts[1][0],pts[1][1]), font, \n 2, (0,255,0), 2, cv2.LINE_AA)\n\n image_path = os.path.join(save_path , '{}.png'.format(page_index)) \n cv2.imwrite(image_path , image)\n\ndef visualize_results(df_paths,thresh):\n for df_path in glob.glob(df_paths+\"*/*.csv\"):\n save_path = base_path + df_path.split('/')[-2]+\"/\"\n \n df = pd.read_csv(df_path)\n for idx,(page_path,page_data) in enumerate(zip(df['page_path'],df['page_data'])):\n df_string = StringIO(page_data)\n page_df = pd.read_csv(df_string, sep=\",\")\n filtered_df = page_df[page_df['score']<thresh]\n draw_thresh_box(filtered_df,page_path,idx,save_path)\n \nvisualize_results(base_path,vis_thresh)\n\n\n\n\n\n",
"# Dataset utils and dataloaders\n\nimport glob\nimport logging\nimport math\nimport os\nimport random\nimport shutil\nimport time\nfrom itertools import repeat\nfrom multiprocessing.pool import ThreadPool\nfrom pathlib import Path\nfrom threading import Thread\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image, ExifTags\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\n\nfrom src.utilities.yolov5.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \\\n clean_str\nfrom src.utilities.yolov5.torch_utils import torch_distributed_zero_first\n\n# Parameters\nhelp_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'\nimg_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes\nvid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes\nlogger = logging.getLogger(__name__)\n\n# Get orientation exif tag\nfor orientation in ExifTags.TAGS.keys():\n if ExifTags.TAGS[orientation] == 'Orientation':\n break\n\n\ndef get_hash(files):\n # Returns a single hash value of a list of files\n return sum(os.path.getsize(f) for f in files if os.path.isfile(f))\n\n\ndef exif_size(img):\n # Returns exif-corrected PIL size\n s = img.size # (width, height)\n try:\n rotation = dict(img._getexif().items())[orientation]\n if rotation == 6: # rotation 270\n s = (s[1], s[0])\n elif rotation == 8: # rotation 90\n s = (s[1], s[0])\n except:\n pass\n\n return s\n\n\ndef create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,\n rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):\n # Make sure only the first process in DDP process the dataset first, and the following others can use the cache\n with torch_distributed_zero_first(rank):\n dataset = LoadImagesAndLabels(path, imgsz, batch_size,\n augment=augment, # augment images\n hyp=hyp, # augmentation hyperparameters\n rect=rect, # rectangular training\n cache_images=cache,\n single_cls=opt.single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix)\n\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None\n loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader\n # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()\n dataloader = loader(dataset,\n batch_size=batch_size,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)\n return dataloader, dataset\n\n\nclass InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):\n \"\"\" Dataloader that reuses workers\n\n Uses same syntax as vanilla DataLoader\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))\n self.iterator = super().__iter__()\n\n def __len__(self):\n return len(self.batch_sampler.sampler)\n\n def __iter__(self):\n for i in range(len(self)):\n yield next(self.iterator)\n\n\nclass _RepeatSampler(object):\n \"\"\" Sampler that repeats forever\n\n Args:\n sampler (Sampler)\n \"\"\"\n\n def __init__(self, sampler):\n self.sampler = sampler\n\n def __iter__(self):\n while True:\n yield from iter(self.sampler)\n\n\nclass LoadImages: # for inference\n def __init__(self, path, img_size=640, stride=32):\n\n if os.path.isfile(path):\n files = [path] # files\n else:\n raise Exception(f'ERROR: {path} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in img_formats]\n videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {path}. ' \\\n f'Supported formats are:\\nimages: {img_formats}\\nvideos: {vid_formats}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, 'Image Not Found ' + path\n print(f'image {self.count}/{self.nf} {path}: ', end='')\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride)[0]\n\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files\n\n\nclass LoadWebcam: # for inference\n def __init__(self, pipe='0', img_size=640, stride=32):\n self.img_size = img_size\n self.stride = stride\n\n if pipe.isnumeric():\n pipe = eval(pipe) # local camera\n # pipe = 'rtsp://192.168.1.64/1' # IP camera\n # pipe = 'rtsp://username:[email protected]/1' # IP camera with login\n # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera\n\n self.pipe = pipe\n self.cap = cv2.VideoCapture(pipe) # video capture object\n self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if cv2.waitKey(1) == ord('q'): # q to quit\n self.cap.release()\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Read frame\n if self.pipe == 0: # local camera\n ret_val, img0 = self.cap.read()\n img0 = cv2.flip(img0, 1) # flip left-right\n else: # IP camera\n n = 0\n while True:\n n += 1\n self.cap.grab()\n if n % 30 == 0: # skip frames\n ret_val, img0 = self.cap.retrieve()\n if ret_val:\n break\n\n # Print\n assert ret_val, f'Camera Error {self.pipe}'\n img_path = 'webcam.jpg'\n print(f'webcam {self.count}: ', end='')\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride)[0]\n\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n\n return img_path, img, img0, None\n\n def __len__(self):\n return 0\n\n\nclass LoadStreams: # multiple IP or RTSP cameras\n def __init__(self, sources='streams.txt', img_size=640, stride=32):\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n\n if os.path.isfile(sources):\n with open(sources, 'r') as f:\n sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n else:\n sources = [sources]\n\n n = len(sources)\n self.imgs = [None] * n\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n for i, s in enumerate(sources):\n # Start the thread to read frames from the video stream\n print(f'{i + 1}/{n}: {s}... ', end='')\n cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)\n assert cap.isOpened(), f'Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = cap.get(cv2.CAP_PROP_FPS) % 100\n _, self.imgs[i] = cap.read() # guarantee first frame\n thread = Thread(target=self.update, args=([i, cap]), daemon=True)\n print(f' success ({w}x{h} at {fps:.2f} FPS).')\n thread.start()\n print('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n if not self.rect:\n print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')\n\n def update(self, index, cap):\n # Read next stream frame in a daemon thread\n n = 0\n while cap.isOpened():\n n += 1\n # _, self.imgs[index] = cap.read()\n cap.grab()\n if n == 4: # read every 4th frame\n success, im = cap.retrieve()\n self.imgs[index] = im if success else self.imgs[index] * 0\n n = 0\n time.sleep(0.01) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n img0 = self.imgs.copy()\n if cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Letterbox\n img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]\n\n # Stack\n img = np.stack(img, 0)\n\n # Convert\n img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416\n img = np.ascontiguousarray(img)\n\n return self.sources, img, img0, None\n\n def __len__(self):\n return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years\n\n\ndef img2label_paths(img_paths):\n # Define label paths as a function of image paths\n sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings\n return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]\n\n\nclass LoadImagesAndLabels(Dataset): # for training/testing\n def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,\n cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):\n self.img_size = img_size\n self.augment = augment\n self.hyp = hyp\n self.image_weights = image_weights\n self.rect = False if image_weights else rect\n self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)\n self.mosaic_border = [-img_size // 2, -img_size // 2]\n self.stride = stride\n self.path = path\n\n try:\n f = [] # image files\n for p in path if isinstance(path, list) else [path]:\n p = Path(p) # os-agnostic\n if p.is_dir(): # dir\n f += glob.glob(str(p / '**' / '*.*'), recursive=True)\n # f = list(p.rglob('**/*.*')) # pathlib\n elif p.is_file(): # file\n with open(p, 'r') as t:\n t = t.read().strip().splitlines()\n parent = str(p.parent) + os.sep\n f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path\n # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)\n else:\n raise Exception(f'{prefix}{p} does not exist')\n self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])\n # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib\n assert self.img_files, f'{prefix}No images found'\n except Exception as e:\n raise Exception(f'{prefix}Error loading data from {path}: {e}\\nSee {help_url}')\n\n # Check cache\n self.label_files = img2label_paths(self.img_files) # labels\n cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels\n if cache_path.is_file():\n cache, exists = torch.load(cache_path), True # load\n if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed\n cache, exists = self.cache_labels(cache_path, prefix), False # re-cache\n else:\n cache, exists = self.cache_labels(cache_path, prefix), False # cache\n\n # Display cache\n nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total\n if exists:\n d = f\"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted\"\n tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results\n assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'\n\n # Read cache\n cache.pop('hash') # remove hash\n cache.pop('version') # remove version\n labels, shapes, self.segments = zip(*cache.values())\n self.labels = list(labels)\n self.shapes = np.array(shapes, dtype=np.float64)\n self.img_files = list(cache.keys()) # update\n self.label_files = img2label_paths(cache.keys()) # update\n if single_cls:\n for x in self.labels:\n x[:, 0] = 0\n\n n = len(shapes) # number of images\n bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index\n nb = bi[-1] + 1 # number of batches\n self.batch = bi # batch index of image\n self.n = n\n self.indices = range(n)\n\n # Rectangular Training\n if self.rect:\n # Sort by aspect ratio\n s = self.shapes # wh\n ar = s[:, 1] / s[:, 0] # aspect ratio\n irect = ar.argsort()\n self.img_files = [self.img_files[i] for i in irect]\n self.label_files = [self.label_files[i] for i in irect]\n self.labels = [self.labels[i] for i in irect]\n self.shapes = s[irect] # wh\n ar = ar[irect]\n\n # Set training image shapes\n shapes = [[1, 1]] * nb\n for i in range(nb):\n ari = ar[bi == i]\n mini, maxi = ari.min(), ari.max()\n if maxi < 1:\n shapes[i] = [maxi, 1]\n elif mini > 1:\n shapes[i] = [1, 1 / mini]\n\n self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride\n\n # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)\n self.imgs = [None] * n\n if cache_images:\n gb = 0 # Gigabytes of cached images\n self.img_hw0, self.img_hw = [None] * n, [None] * n\n results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads\n pbar = tqdm(enumerate(results), total=n)\n for i, x in pbar:\n self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)\n gb += self.imgs[i].nbytes\n pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'\n\n def cache_labels(self, path=Path('./labels.cache'), prefix=''):\n # Cache dataset labels, check images and read shapes\n x = {} # dict\n nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate\n pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))\n for i, (im_file, lb_file) in enumerate(pbar):\n try:\n # verify images\n im = Image.open(im_file)\n im.verify() # PIL verify\n shape = exif_size(im) # image size\n segments = [] # instance segments\n assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'\n assert im.format.lower() in img_formats, f'invalid image format {im.format}'\n\n # verify labels\n if os.path.isfile(lb_file):\n nf += 1 # label found\n with open(lb_file, 'r') as f:\n l = [x.split() for x in f.read().strip().splitlines()]\n if any([len(x) > 8 for x in l]): # is segment\n classes = np.array([x[0] for x in l], dtype=np.float32)\n segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)\n l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)\n l = np.array(l, dtype=np.float32)\n if len(l):\n assert l.shape[1] == 5, 'labels require 5 columns each'\n assert (l >= 0).all(), 'negative labels'\n assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'\n assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'\n else:\n ne += 1 # label empty\n l = np.zeros((0, 5), dtype=np.float32)\n else:\n nm += 1 # label missing\n l = np.zeros((0, 5), dtype=np.float32)\n x[im_file] = [l, shape, segments]\n except Exception as e:\n nc += 1\n print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')\n\n pbar.desc = f\"{prefix}Scanning '{path.parent / path.stem}' for images and labels... \" \\\n f\"{nf} found, {nm} missing, {ne} empty, {nc} corrupted\"\n\n if nf == 0:\n print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')\n\n x['hash'] = get_hash(self.label_files + self.img_files)\n x['results'] = nf, nm, ne, nc, i + 1\n x['version'] = 0.1 # cache version\n torch.save(x, path) # save for next time\n logging.info(f'{prefix}New cache created: {path}')\n return x\n\n def __len__(self):\n return len(self.img_files)\n\n # def __iter__(self):\n # self.count = -1\n # print('ran dataset iter')\n # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)\n # return self\n\n def __getitem__(self, index):\n index = self.indices[index] # linear, shuffled, or image_weights\n\n hyp = self.hyp\n mosaic = self.mosaic and random.random() < hyp['mosaic']\n if mosaic:\n # Load mosaic\n img, labels = load_mosaic(self, index)\n shapes = None\n\n # MixUp https://arxiv.org/pdf/1710.09412.pdf\n if random.random() < hyp['mixup']:\n img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))\n r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0\n img = (img * r + img2 * (1 - r)).astype(np.uint8)\n labels = np.concatenate((labels, labels2), 0)\n\n else:\n # Load image\n img, (h0, w0), (h, w) = load_image(self, index)\n\n # Letterbox\n shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape\n img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)\n shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling\n\n labels = self.labels[index].copy()\n if labels.size: # normalized xywh to pixel xyxy format\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])\n\n if self.augment:\n # Augment imagespace\n if not mosaic:\n img, labels = random_perspective(img, labels,\n degrees=hyp['degrees'],\n translate=hyp['translate'],\n scale=hyp['scale'],\n shear=hyp['shear'],\n perspective=hyp['perspective'])\n\n # Augment colorspace\n augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])\n\n # Apply cutouts\n # if random.random() < 0.9:\n # labels = cutout(img, labels)\n\n nL = len(labels) # number of labels\n if nL:\n labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh\n labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1\n labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1\n\n if self.augment:\n # flip up-down\n if random.random() < hyp['flipud']:\n img = np.flipud(img)\n if nL:\n labels[:, 2] = 1 - labels[:, 2]\n\n # flip left-right\n if random.random() < hyp['fliplr']:\n img = np.fliplr(img)\n if nL:\n labels[:, 1] = 1 - labels[:, 1]\n\n labels_out = torch.zeros((nL, 6))\n if nL:\n labels_out[:, 1:] = torch.from_numpy(labels)\n\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n\n return torch.from_numpy(img), labels_out, self.img_files[index], shapes\n\n @staticmethod\n def collate_fn(batch):\n img, label, path, shapes = zip(*batch) # transposed\n for i, l in enumerate(label):\n l[:, 0] = i # add target image index for build_targets()\n return torch.stack(img, 0), torch.cat(label, 0), path, shapes\n\n @staticmethod\n def collate_fn4(batch):\n img, label, path, shapes = zip(*batch) # transposed\n n = len(shapes) // 4\n img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]\n\n ho = torch.tensor([[0., 0, 0, 1, 0, 0]])\n wo = torch.tensor([[0., 0, 1, 0, 0, 0]])\n s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale\n for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW\n i *= 4\n if random.random() < 0.5:\n im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[\n 0].type(img[i].type())\n l = label[i]\n else:\n im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)\n l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s\n img4.append(im)\n label4.append(l)\n\n for i, l in enumerate(label4):\n l[:, 0] = i # add target image index for build_targets()\n\n return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4\n\n\n# Ancillary functions --------------------------------------------------------------------------------------------------\ndef load_image(self, index):\n # loads 1 image from dataset, returns img, original hw, resized hw\n img = self.imgs[index]\n if img is None: # not cached\n path = self.img_files[index]\n img = cv2.imread(path) # BGR\n assert img is not None, 'Image Not Found ' + path\n h0, w0 = img.shape[:2] # orig hw\n r = self.img_size / max(h0, w0) # resize image to img_size\n if r != 1: # always resize down, only resize up if training with augmentation\n interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR\n img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)\n return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized\n else:\n return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized\n\n\ndef augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):\n r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains\n hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))\n dtype = img.dtype # uint8\n\n x = np.arange(0, 256, dtype=np.int16)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)\n cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed\n\n\ndef hist_equalize(img, clahe=True, bgr=False):\n # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255\n yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)\n if clahe:\n c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n yuv[:, :, 0] = c.apply(yuv[:, :, 0])\n else:\n yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram\n return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB\n\n\ndef load_mosaic(self, index):\n # loads images in a 4-mosaic\n\n labels4, segments4 = [], []\n s = self.img_size\n yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y\n indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices\n for i, index in enumerate(indices):\n # Load image\n img, _, (h, w) = load_image(self, index)\n\n # place img in img4\n if i == 0: # top left\n img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles\n x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)\n x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)\n elif i == 1: # top right\n x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc\n x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n elif i == 2: # bottom left\n x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)\n x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)\n elif i == 3: # bottom right\n x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)\n x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n\n img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]\n padw = x1a - x1b\n padh = y1a - y1b\n\n # Labels\n labels, segments = self.labels[index].copy(), self.segments[index].copy()\n if labels.size:\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format\n segments = [xyn2xy(x, w, h, padw, padh) for x in segments]\n labels4.append(labels)\n segments4.extend(segments)\n\n # Concat/clip labels\n labels4 = np.concatenate(labels4, 0)\n for x in (labels4[:, 1:], *segments4):\n np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()\n # img4, labels4 = replicate(img4, labels4) # replicate\n\n # Augment\n img4, labels4 = random_perspective(img4, labels4, segments4,\n degrees=self.hyp['degrees'],\n translate=self.hyp['translate'],\n scale=self.hyp['scale'],\n shear=self.hyp['shear'],\n perspective=self.hyp['perspective'],\n border=self.mosaic_border) # border to remove\n\n return img4, labels4\n\n\ndef load_mosaic9(self, index):\n # loads images in a 9-mosaic\n\n labels9, segments9 = [], []\n s = self.img_size\n indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices\n for i, index in enumerate(indices):\n # Load image\n img, _, (h, w) = load_image(self, index)\n\n # place img in img9\n if i == 0: # center\n img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles\n h0, w0 = h, w\n c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates\n elif i == 1: # top\n c = s, s - h, s + w, s\n elif i == 2: # top right\n c = s + wp, s - h, s + wp + w, s\n elif i == 3: # right\n c = s + w0, s, s + w0 + w, s + h\n elif i == 4: # bottom right\n c = s + w0, s + hp, s + w0 + w, s + hp + h\n elif i == 5: # bottom\n c = s + w0 - w, s + h0, s + w0, s + h0 + h\n elif i == 6: # bottom left\n c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h\n elif i == 7: # left\n c = s - w, s + h0 - h, s, s + h0\n elif i == 8: # top left\n c = s - w, s + h0 - hp - h, s, s + h0 - hp\n\n padx, pady = c[:2]\n x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords\n\n # Labels\n labels, segments = self.labels[index].copy(), self.segments[index].copy()\n if labels.size:\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format\n segments = [xyn2xy(x, w, h, padx, pady) for x in segments]\n labels9.append(labels)\n segments9.extend(segments)\n\n # Image\n img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]\n hp, wp = h, w # height, width previous\n\n # Offset\n yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y\n img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]\n\n # Concat/clip labels\n labels9 = np.concatenate(labels9, 0)\n labels9[:, [1, 3]] -= xc\n labels9[:, [2, 4]] -= yc\n c = np.array([xc, yc]) # centers\n segments9 = [x - c for x in segments9]\n\n for x in (labels9[:, 1:], *segments9):\n np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()\n # img9, labels9 = replicate(img9, labels9) # replicate\n\n # Augment\n img9, labels9 = random_perspective(img9, labels9, segments9,\n degrees=self.hyp['degrees'],\n translate=self.hyp['translate'],\n scale=self.hyp['scale'],\n shear=self.hyp['shear'],\n perspective=self.hyp['perspective'],\n border=self.mosaic_border) # border to remove\n\n return img9, labels9\n\n\ndef replicate(img, labels):\n # Replicate labels\n h, w = img.shape[:2]\n boxes = labels[:, 1:].astype(int)\n x1, y1, x2, y2 = boxes.T\n s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)\n for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices\n x1b, y1b, x2b, y2b = boxes[i]\n bh, bw = y2b - y1b, x2b - x1b\n yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y\n x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]\n img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]\n labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)\n\n return img, labels\n\n\ndef letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):\n # Resize and pad image while meeting stride-multiple constraints\n shape = img.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better test mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return img, ratio, (dw, dh)\n\n\ndef random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,\n border=(0, 0)):\n # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))\n # targets = [cls, xyxy]\n\n height = img.shape[0] + border[0] * 2 # shape(h,w,c)\n width = img.shape[1] + border[1] * 2\n\n # Center\n C = np.eye(3)\n C[0, 2] = -img.shape[1] / 2 # x translation (pixels)\n C[1, 2] = -img.shape[0] / 2 # y translation (pixels)\n\n # Perspective\n P = np.eye(3)\n P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)\n P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.uniform(-degrees, degrees)\n # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations\n s = random.uniform(1 - scale, 1 + scale)\n # s = 2 ** random.uniform(-scale, scale)\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)\n T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)\n\n # Combined rotation matrix\n M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT\n if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed\n if perspective:\n img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))\n else: # affine\n img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n\n # Visualize\n # import matplotlib.pyplot as plt\n # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()\n # ax[0].imshow(img[:, :, ::-1]) # base\n # ax[1].imshow(img2[:, :, ::-1]) # warped\n\n # Transform label coordinates\n n = len(targets)\n if n:\n use_segments = any(x.any() for x in segments)\n new = np.zeros((n, 4))\n if use_segments: # warp segments\n segments = resample_segments(segments) # upsample\n for i, segment in enumerate(segments):\n xy = np.ones((len(segment), 3))\n xy[:, :2] = segment\n xy = xy @ M.T # transform\n xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine\n\n # clip\n new[i] = segment2box(xy, width, height)\n\n else: # warp boxes\n xy = np.ones((n * 4, 3))\n xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n xy = xy @ M.T # transform\n xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine\n\n # create new boxes\n x = xy[:, [0, 2, 4, 6]]\n y = xy[:, [1, 3, 5, 7]]\n new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n # clip\n new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)\n new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)\n\n # filter candidates\n i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)\n targets = targets[i]\n targets[:, 1:5] = new[i]\n\n return img, targets\n\n\ndef box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)\n # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio\n w1, h1 = box1[2] - box1[0], box1[3] - box1[1]\n w2, h2 = box2[2] - box2[0], box2[3] - box2[1]\n ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio\n return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates\n\n\ndef cutout(image, labels):\n # Applies image cutout augmentation https://arxiv.org/abs/1708.04552\n h, w = image.shape[:2]\n\n def bbox_ioa(box1, box2):\n # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2\n box2 = box2.transpose()\n\n # Get the coordinates of bounding boxes\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\n\n # Intersection area\n inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \\\n (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)\n\n # box2 area\n box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16\n\n # Intersection over box2 area\n return inter_area / box2_area\n\n # create random masks\n scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction\n for s in scales:\n mask_h = random.randint(1, int(h * s))\n mask_w = random.randint(1, int(w * s))\n\n # box\n xmin = max(0, random.randint(0, w) - mask_w // 2)\n ymin = max(0, random.randint(0, h) - mask_h // 2)\n xmax = min(w, xmin + mask_w)\n ymax = min(h, ymin + mask_h)\n\n # apply random color mask\n image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]\n\n # return unobscured labels\n if len(labels) and s > 0.03:\n box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n labels = labels[ioa < 0.60] # remove >60% obscured labels\n\n return labels\n\n\ndef create_folder(path='./new'):\n # Create folder\n if os.path.exists(path):\n shutil.rmtree(path) # delete output folder\n os.makedirs(path) # make new output folder\n\n\ndef flatten_recursive(path='../coco128'):\n # Flatten a recursive directory by bringing all files to top level\n new_path = Path(path + '_flat')\n create_folder(new_path)\n for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):\n shutil.copyfile(file, new_path / Path(file).name)\n\n\ndef extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')\n # Convert detection dataset into classification dataset, with one directory per class\n\n path = Path(path) # images dir\n shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing\n files = list(path.rglob('*.*'))\n n = len(files) # number of files\n for im_file in tqdm(files, total=n):\n if im_file.suffix[1:] in img_formats:\n # image\n im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB\n h, w = im.shape[:2]\n\n # labels\n lb_file = Path(img2label_paths([str(im_file)])[0])\n if Path(lb_file).exists():\n with open(lb_file, 'r') as f:\n lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels\n\n for j, x in enumerate(lb):\n c = int(x[0]) # class\n f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename\n if not f.parent.is_dir():\n f.parent.mkdir(parents=True)\n\n b = x[1:] * [w, h, w, h] # box\n # b[2:] = b[2:].max() # rectangle to square\n b[2:] = b[2:] * 1.2 + 3 # pad\n b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)\n\n b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image\n b[[1, 3]] = np.clip(b[[1, 3]], 0, h)\n assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'\n\n\ndef autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')\n \"\"\" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files\n # Arguments\n path: Path to images directory\n weights: Train, val, test weights (list)\n \"\"\"\n path = Path(path) # images dir\n files = list(path.rglob('*.*'))\n n = len(files) # number of files\n indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split\n txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files\n [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing\n for i, img in tqdm(zip(indices, files), total=n):\n if img.suffix[1:] in img_formats:\n with open(path / txt[i], 'a') as f:\n f.write(str(img) + '\\n') # add image to txt file\n",
"import cv2\nimport numpy as np\nimport pandas as pd\n#import logging\n#import os\n#import config\n#import uuid\n\n\nclass TableRepositories:\n def __init__(self, filepath, rect=None, SORT_METHOD='top-to-bottom', MAX_THRESHOLD_VALUE=255, BLOCK_SIZE=15,\n THRESHOLD_CONSTANT=0, SCALE=10):\n '''\n :param filepath: absolute path of input image file , or a grayscale image as a numpy array\n :param SORT_METHOD: order of indexing of cells in a table\n :param BLOCK_SIZE: size of neighbourhood taken in account for calculating adaptive threshold\n :param THRESHOLD_CONSTANT: offset used for adaptive thresholding\n :param SCALE: size of pattern finding kernel (line elements in this case)\n '''\n\n self.image_path = filepath\n self.rect = rect\n self.response = {\"response\": {\"tables\": []}}\n self.MAX_THRESHOLD_VALUE = MAX_THRESHOLD_VALUE\n self.BLOCK_SIZE = BLOCK_SIZE\n self.THRESHOLD_CONSTANT = THRESHOLD_CONSTANT\n self.SCALE = SCALE\n self.SORT_METHOD = SORT_METHOD\n\n self.load_image ()\n self.get_table_mask ()\n self.table_indexing ()\n\n def load_image(self):\n\n IMAGE_BUFFER = 10\n if type (self.image_path) == str:\n image = cv2.imread (self.image_path, 0)\n else:\n image = self.image_path\n self.input_image = image # [self.rect['y']-IMAGE_BUFFER:self.rect['y']+self.rect['h']+IMAGE_BUFFER,self.rect['x']-IMAGE_BUFFER:self.rect['x']+self.rect['w']+IMAGE_BUFFER]\n self.slate = np.zeros (self.input_image.shape)\n\n def get_table_mask(self):\n #binarization of image\n filtered = cv2.adaptiveThreshold (~self.input_image, self.MAX_THRESHOLD_VALUE, cv2.ADAPTIVE_THRESH_MEAN_C,\n cv2.THRESH_BINARY, self.BLOCK_SIZE, self.THRESHOLD_CONSTANT)\n self.filtered = filtered\n # Finding srtuctre elements (horizontal and vertical lines)\n horizontal = filtered.copy()\n vertical = filtered.copy()\n\n horizontal_size = int (horizontal.shape [1] / self.SCALE)\n horizontal_structure = cv2.getStructuringElement (cv2.MORPH_RECT, (horizontal_size, 1))\n horizontal = cv2.erode (horizontal, horizontal_structure)\n horizontal = cv2.dilate (horizontal, horizontal_structure)\n\n horizontal_size = 5 # int(horizontal.shape[1] / (self.SCALE * 2))\n horizontal_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))\n horizontal = cv2.dilate(horizontal, horizontal_structure)\n # horizontal = cv2.dilate(horizontal, horizontal_structure)\n #horizontal = cv2.dilate(horizontal, horizontal_structure)\n\n #height_to_width_ratio = self.input_image.shape[0] / float(self.input_image.shape[1])\n #print(height_to_width_ratio)\n #vertical_size = int (vertical.shape [0] / (self.SCALE * height_to_width_ratio))\n #print(vertical_size , 'vetical_size')\n vertical_size = int (vertical.shape [0] / (self.SCALE * 4 ))\n vertical_structure = cv2.getStructuringElement (cv2.MORPH_RECT, (1, vertical_size))\n vertical = cv2.erode (vertical, vertical_structure)\n vertical = cv2.dilate (vertical, vertical_structure)\n #vertical = cv2.dilate(vertical, vertical_structure)\n\n\n # generating table borders\n self.mask = horizontal + vertical\n self.intersections = cv2.bitwise_and(horizontal, vertical)\n\n def sort_contours(self, cnts):\n contours_list =[]\n for c in cnts:\n x, y, w, h = cv2.boundingRect(c)\n contours_list.append([x, y, w, h])\n contours_df = pd.DataFrame(contours_list, columns=['left', 'top', 'width', 'height'])\n contours_df = contours_df.sort_values(by=['top'])\n sorted_contours = self.sort_contours_helper(contours_df, [])\n return sorted_contours\n\n\n def sort_contours_helper(self,contours_df, sorted_contours=[]):\n\n check_y = contours_df.iloc[0]['top']\n spacing_threshold = 10 #contours_df.iloc[0]['height'] *0.5\n\n same_line = contours_df[abs(contours_df['top'] - check_y) < spacing_threshold ]\n next_lines = contours_df[abs(contours_df['top'] - check_y) >=spacing_threshold]\n sort_lines = same_line.sort_values(by=['left'])\n for index, row in sort_lines.iterrows():\n sorted_contours.append(row)\n if len(next_lines) > 0:\n self.sort_contours_helper(next_lines, sorted_contours)\n\n return sorted_contours\n\n\n\n\n\n\n\n def draw_contours_index(self, contours, img):\n '''\n\n :param contours: contours present cropped fraction of mask image\n :param img: cropped portion of mask image having one table (in case when input image has multiple tables )\n :return: image indexed with cell location, list of bounding box coordinates of every individual cell\n '''\n image_area = img.shape [0] * img.shape [1]\n draw_conts = np.zeros (img.shape)\n # margin = 10\n midpoints = []\n rects = []\n xi, yi = 0, 0\n #count_contours = len (contours)\n #for i in range (count_contours):\n for contour in contours:\n #cont_area = cv2.contourArea (contours [count_contours - i -1])\n #x1, y1, w1, h1 = cv2.boundingRect (contours [count_contours - i - 1])\n\n cont_area = contour['height'] * contour['width']\n x1, y1, w1, h1 = contour['left'] ,contour['top'] , contour['width'] , contour['height']\n\n\n area_ratio = cont_area / float(image_area)\n #print(area_ratio, i)\n\n # filtering out lines and noise\n if (area_ratio < 0.8) & (h1 > 5 ):\n midpoint = [int (x1 + w1 / 2), int (y1 + h1 / 2)] # np.mean(contours[i],axis=0)\n midpoints.append (midpoint)\n if len (midpoints) > 1:\n shift = midpoints [-1] [1] - midpoints [-2] [1]\n shift = abs(shift)\n\n # Detecting change in column by measuring difference in x coordinate of current and previous cell\n # (cells already sored based on their coordinates)\n if shift < 10: # h1*0.5:\n xi = xi + 1\n else:\n xi = 0\n yi = yi + 1\n rects.append ({\"x\": int(x1), \"y\": int(y1), \"w\": int(w1), \"h\": int(h1), \"index\": (int(yi), int(xi))})\n cv2.rectangle (draw_conts, (x1, y1), (x1 + w1, y1 + h1), 255, 1)\n cv2.putText (draw_conts, str ((xi, yi)), (int (midpoint [0]), int (midpoint [1])),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.3, 255, 1, cv2.LINE_AA)\n #cv2.imwrite('out/slate' + str(i) + '.png' , draw_conts)\n return draw_conts, rects\n\n def end_point_correction(self,x,y,w,h,margin):\n #check if after adding margin the endopints are still inside the image\n\n ymax = self.input_image.shape [0]\n xmax = self.input_image.shape [1]\n\n if (y - margin) < 0:\n ystart = 0\n else :\n ystart = y - margin\n if (y + h + margin) > ymax :\n yend = ymax\n else :\n yend = y + h + margin\n if (x - margin) < 0:\n xstart = 0\n else :\n xstart = x - margin\n if (x + w + margin) > xmax :\n xend = xmax\n else :\n xend = x + w + margin\n\n return ystart,yend, xstart,xend\n\n def table_indexing(self):\n\n # list_of_tables = []\n image_area = float (self.input_image.shape [0] * self.input_image.shape [1])\n\n # finding all the tables in the image, cv2.RETR_EXTERNAL gives only the outermost border of an\n # enclosed figure.\n contours = cv2.findContours (self.mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = contours [0] if len (contours) == 2 else contours [1]\n\n if len (contours) > 0:\n # Indexing one table at a time\n for c in contours:\n x, y, w, h = cv2.boundingRect (c)\n area_ratio = (w * h) / image_area\n\n # Filtering for noise\n if (area_ratio < 0.9) & (area_ratio > 0.005):\n #if (area_ratio < 1.0) & (area_ratio > 0.01):\n margin = 2\n #check if after adding margin the endopints are still inside the image\n ystart,yend, xstart,xend= self.end_point_correction(x,y,w,h,margin)\n table_dic = {\"x\": int(xstart), \"y\": int(ystart), \"w\": int(xend-xstart), \"h\": int(yend-ystart)}\n\n crop_fraction = self.mask[ystart: yend, xstart:xend]\n\n sub_contours = cv2.findContours (crop_fraction, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n sub_contours = sub_contours [0] if len (sub_contours) == 2 else sub_contours [1]\n sorted_conts = self.sort_contours(sub_contours)\n\n indexed_sub_image, rects = self.draw_contours_index (sorted_conts, img=crop_fraction)\n table_dic ['rect'] = rects\n if len(rects) > 0 :\n self.response [\"response\"] [\"tables\"].append (table_dic)\n\n\n # self.slate stores an image indexed with cell location for all available tables\n self.slate[ystart: yend, xstart:xend] = indexed_sub_image\n\n #cv2.imwrite ('out/slate.png', self.slate)\n #cv2.imwrite ('out/mask.png', self.mask)\n #cv2.imwrite ('out/filtered.png', self.filtered)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.frombuffer",
"numpy.float32",
"numpy.array"
],
[
"numpy.minimum",
"torch.zeros",
"torch.cat",
"torch.load",
"numpy.flipud",
"numpy.concatenate",
"torch.save",
"numpy.random.beta",
"torch.utils.data.distributed.DistributedSampler",
"numpy.clip",
"numpy.fliplr",
"numpy.arange",
"numpy.eye",
"numpy.unique",
"torch.from_numpy",
"numpy.stack",
"torch.tensor",
"numpy.full",
"numpy.zeros",
"numpy.ascontiguousarray",
"numpy.append",
"torch.stack",
"numpy.array",
"numpy.maximum",
"numpy.ones",
"numpy.mod",
"numpy.random.uniform"
],
[
"numpy.zeros",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
phanvanthinh98/keras_LSTM | [
"b22cff1e9fd762226ec3dc9d3af3e300484dd833",
"2d551d31915906120f3f6a3ed3c4de94ba4bb288",
"2d551d31915906120f3f6a3ed3c4de94ba4bb288",
"b22cff1e9fd762226ec3dc9d3af3e300484dd833"
] | [
"keras/wrappers/scikit_learn.py",
"keras/distribute/custom_training_loop_optimizer_test.py",
"keras/mixed_precision/loss_scale_optimizer.py",
"keras/legacy_tf_layers/core.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Wrapper for using the Scikit-Learn API with Keras models.\"\"\"\n# pylint: disable=g-classes-have-attributes\n\nimport copy\nimport types\n\nimport numpy as np\n\nfrom keras import losses\nfrom keras.models import Sequential\nfrom keras.utils.generic_utils import has_arg\nfrom keras.utils.np_utils import to_categorical\nfrom tensorflow.python.util.tf_export import keras_export\n\n\nclass BaseWrapper(object):\n \"\"\"Base class for the Keras scikit-learn wrapper.\n\n Warning: This class should not be used directly.\n Use descendant classes instead.\n\n Args:\n build_fn: callable function or class instance\n **sk_params: model parameters & fitting parameters\n\n The `build_fn` should construct, compile and return a Keras model, which\n will then be used to fit/predict. One of the following\n three values could be passed to `build_fn`:\n 1. A function\n 2. An instance of a class that implements the `__call__` method\n 3. None. This means you implement a class that inherits from either\n `KerasClassifier` or `KerasRegressor`. The `__call__` method of the\n present class will then be treated as the default `build_fn`.\n\n `sk_params` takes both model parameters and fitting parameters. Legal model\n parameters are the arguments of `build_fn`. Note that like all other\n estimators in scikit-learn, `build_fn` should provide default values for\n its arguments, so that you could create the estimator without passing any\n values to `sk_params`.\n\n `sk_params` could also accept parameters for calling `fit`, `predict`,\n `predict_proba`, and `score` methods (e.g., `epochs`, `batch_size`).\n fitting (predicting) parameters are selected in the following order:\n\n 1. Values passed to the dictionary arguments of\n `fit`, `predict`, `predict_proba`, and `score` methods\n 2. Values passed to `sk_params`\n 3. The default values of the `keras.models.Sequential`\n `fit`, `predict`, `predict_proba` and `score` methods\n\n When using scikit-learn's `grid_search` API, legal tunable parameters are\n those you could pass to `sk_params`, including fitting parameters.\n In other words, you could use `grid_search` to search for the best\n `batch_size` or `epochs` as well as the model parameters.\n \"\"\"\n\n def __init__(self, build_fn=None, **sk_params):\n self.build_fn = build_fn\n self.sk_params = sk_params\n self.check_params(sk_params)\n\n def check_params(self, params):\n \"\"\"Checks for user typos in `params`.\n\n Args:\n params: dictionary; the parameters to be checked\n\n Raises:\n ValueError: if any member of `params` is not a valid argument.\n \"\"\"\n legal_params_fns = [\n Sequential.fit, Sequential.predict, Sequential.predict_classes,\n Sequential.evaluate\n ]\n if self.build_fn is None:\n legal_params_fns.append(self.__call__)\n elif (not isinstance(self.build_fn, types.FunctionType) and\n not isinstance(self.build_fn, types.MethodType)):\n legal_params_fns.append(self.build_fn.__call__)\n else:\n legal_params_fns.append(self.build_fn)\n\n for params_name in params:\n for fn in legal_params_fns:\n if has_arg(fn, params_name):\n break\n else:\n if params_name != 'nb_epoch':\n raise ValueError('{} is not a legal parameter'.format(params_name))\n\n def get_params(self, **params): # pylint: disable=unused-argument\n \"\"\"Gets parameters for this estimator.\n\n Args:\n **params: ignored (exists for API compatibility).\n\n Returns:\n Dictionary of parameter names mapped to their values.\n \"\"\"\n res = self.sk_params.copy()\n res.update({'build_fn': self.build_fn})\n return res\n\n def set_params(self, **params):\n \"\"\"Sets the parameters of this estimator.\n\n Args:\n **params: Dictionary of parameter names mapped to their values.\n\n Returns:\n self\n \"\"\"\n self.check_params(params)\n self.sk_params.update(params)\n return self\n\n def fit(self, x, y, **kwargs):\n \"\"\"Constructs a new model with `build_fn` & fit the model to `(x, y)`.\n\n Args:\n x : array-like, shape `(n_samples, n_features)`\n Training samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`\n True labels for `x`.\n **kwargs: dictionary arguments\n Legal arguments are the arguments of `Sequential.fit`\n\n Returns:\n history : object\n details about the training history at each epoch.\n \"\"\"\n if self.build_fn is None:\n self.model = self.__call__(**self.filter_sk_params(self.__call__))\n elif (not isinstance(self.build_fn, types.FunctionType) and\n not isinstance(self.build_fn, types.MethodType)):\n self.model = self.build_fn(\n **self.filter_sk_params(self.build_fn.__call__))\n else:\n self.model = self.build_fn(**self.filter_sk_params(self.build_fn))\n\n if (losses.is_categorical_crossentropy(self.model.loss) and\n len(y.shape) != 2):\n y = to_categorical(y)\n\n fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))\n fit_args.update(kwargs)\n\n history = self.model.fit(x, y, **fit_args)\n\n return history\n\n def filter_sk_params(self, fn, override=None):\n \"\"\"Filters `sk_params` and returns those in `fn`'s arguments.\n\n Args:\n fn : arbitrary function\n override: dictionary, values to override `sk_params`\n\n Returns:\n res : dictionary containing variables\n in both `sk_params` and `fn`'s arguments.\n \"\"\"\n override = override or {}\n res = {}\n for name, value in self.sk_params.items():\n if has_arg(fn, name):\n res.update({name: value})\n res.update(override)\n return res\n\n\n@keras_export('keras.wrappers.scikit_learn.KerasClassifier')\nclass KerasClassifier(BaseWrapper):\n \"\"\"Implementation of the scikit-learn classifier API for Keras.\n \"\"\"\n\n def fit(self, x, y, **kwargs):\n \"\"\"Constructs a new model with `build_fn` & fit the model to `(x, y)`.\n\n Args:\n x : array-like, shape `(n_samples, n_features)`\n Training samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`\n True labels for `x`.\n **kwargs: dictionary arguments\n Legal arguments are the arguments of `Sequential.fit`\n\n Returns:\n history : object\n details about the training history at each epoch.\n\n Raises:\n ValueError: In case of invalid shape for `y` argument.\n \"\"\"\n y = np.array(y)\n if len(y.shape) == 2 and y.shape[1] > 1:\n self.classes_ = np.arange(y.shape[1])\n elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1:\n self.classes_ = np.unique(y)\n y = np.searchsorted(self.classes_, y)\n else:\n raise ValueError('Invalid shape for y: ' + str(y.shape))\n self.n_classes_ = len(self.classes_)\n return super(KerasClassifier, self).fit(x, y, **kwargs)\n\n def predict(self, x, **kwargs):\n \"\"\"Returns the class predictions for the given test data.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n **kwargs: dictionary arguments\n Legal arguments are the arguments\n of `Sequential.predict_classes`.\n\n Returns:\n preds: array-like, shape `(n_samples,)`\n Class predictions.\n \"\"\"\n kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)\n classes = self.model.predict_classes(x, **kwargs)\n return self.classes_[classes]\n\n def predict_proba(self, x, **kwargs):\n \"\"\"Returns class probability estimates for the given test data.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n **kwargs: dictionary arguments\n Legal arguments are the arguments\n of `Sequential.predict_classes`.\n\n Returns:\n proba: array-like, shape `(n_samples, n_outputs)`\n Class probability estimates.\n In the case of binary classification,\n to match the scikit-learn API,\n will return an array of shape `(n_samples, 2)`\n (instead of `(n_sample, 1)` as in Keras).\n \"\"\"\n kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)\n probs = self.model.predict(x, **kwargs)\n\n # check if binary classification\n if probs.shape[1] == 1:\n # first column is probability of class 0 and second is of class 1\n probs = np.hstack([1 - probs, probs])\n return probs\n\n def score(self, x, y, **kwargs):\n \"\"\"Returns the mean accuracy on the given test data and labels.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n y: array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`\n True labels for `x`.\n **kwargs: dictionary arguments\n Legal arguments are the arguments of `Sequential.evaluate`.\n\n Returns:\n score: float\n Mean accuracy of predictions on `x` wrt. `y`.\n\n Raises:\n ValueError: If the underlying model isn't configured to\n compute accuracy. You should pass `metrics=[\"accuracy\"]` to\n the `.compile()` method of the model.\n \"\"\"\n y = np.searchsorted(self.classes_, y)\n kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)\n\n loss_name = self.model.loss\n if hasattr(loss_name, '__name__'):\n loss_name = loss_name.__name__\n if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:\n y = to_categorical(y)\n\n outputs = self.model.evaluate(x, y, **kwargs)\n if not isinstance(outputs, list):\n outputs = [outputs]\n for name, output in zip(self.model.metrics_names, outputs):\n if name in ['accuracy', 'acc']:\n return output\n raise ValueError('The model is not configured to compute accuracy. '\n 'You should pass `metrics=[\"accuracy\"]` to '\n 'the `model.compile()` method.')\n\n\n@keras_export('keras.wrappers.scikit_learn.KerasRegressor')\nclass KerasRegressor(BaseWrapper):\n \"\"\"Implementation of the scikit-learn regressor API for Keras.\n \"\"\"\n\n def predict(self, x, **kwargs):\n \"\"\"Returns predictions for the given test data.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n **kwargs: dictionary arguments\n Legal arguments are the arguments of `Sequential.predict`.\n\n Returns:\n preds: array-like, shape `(n_samples,)`\n Predictions.\n \"\"\"\n kwargs = self.filter_sk_params(Sequential.predict, kwargs)\n return np.squeeze(self.model.predict(x, **kwargs))\n\n def score(self, x, y, **kwargs):\n \"\"\"Returns the mean loss on the given test data and labels.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n y: array-like, shape `(n_samples,)`\n True labels for `x`.\n **kwargs: dictionary arguments\n Legal arguments are the arguments of `Sequential.evaluate`.\n\n Returns:\n score: float\n Mean accuracy of predictions on `x` wrt. `y`.\n \"\"\"\n kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)\n loss = self.model.evaluate(x, y, **kwargs)\n if isinstance(loss, list):\n return -loss[0]\n return -loss\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for custom training loops that involves advanced optimizer usage.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nfrom absl.testing import parameterized\nfrom tensorflow.python.distribute import values\nfrom keras.distribute import strategy_combinations as keras_strategy_combinations\nfrom keras.optimizer_v2 import gradient_descent\n\n\nclass OptimizerTest(tf.test.TestCase, parameterized.TestCase):\n\n @tf.__internal__.distribute.combinations.generate(\n tf.__internal__.test.combinations.times(\n tf.__internal__.test.combinations.combine(\n distribution=keras_strategy_combinations.multidevice_strategies,\n mode=[\"eager\"],\n ),\n tf.__internal__.test.combinations.combine(\n experimental_aggregate_gradients=True,\n expected=[[[-0.3, -0.3], [-0.3, -0.3]]]) +\n tf.__internal__.test.combinations.combine(\n experimental_aggregate_gradients=False,\n expected=[[[-0.1, -0.1], [-0.2, -0.2]]])\n ))\n def test_custom_aggregation(self, distribution,\n experimental_aggregate_gradients, expected):\n\n with distribution.scope():\n v = tf.Variable([0., 0.])\n optimizer = gradient_descent.SGD(0.1)\n\n class PerReplica(values.DistributedValues):\n \"\"\"Holds a map from replica to unsynchronized values.\"\"\"\n\n @property\n def values(self):\n \"\"\"Returns the per replica values.\"\"\"\n return self._values\n\n @tf.function\n def optimize():\n with tf.compat.v1.device(distribution.extended.worker_devices[0]):\n v1 = tf.convert_to_tensor([1., 1.])\n with tf.compat.v1.device(distribution.extended.worker_devices[1]):\n v2 = tf.convert_to_tensor([2., 2.])\n grads = PerReplica([v1, v2])\n def step_fn(grads):\n optimizer.apply_gradients(\n [(grads, v)],\n experimental_aggregate_gradients=experimental_aggregate_gradients)\n return v.read_value()\n\n return distribution.experimental_local_results(\n distribution.run(step_fn, args=(grads,)))\n\n self.assertAllClose(optimize(), expected)\n\n @tf.__internal__.distribute.combinations.generate(\n tf.__internal__.test.combinations.combine(\n distribution=tf.__internal__.distribute.combinations.one_device_strategy,\n mode=[\"eager\"],\n experimental_aggregate_gradients=[True, False]))\n def test_custom_aggregation_one_device(self, distribution,\n experimental_aggregate_gradients):\n\n with distribution.scope():\n v = tf.Variable([0., 0.])\n optimizer = gradient_descent.SGD(0.1)\n\n @tf.function\n def optimize():\n grads = tf.convert_to_tensor([1., 1.])\n\n def step_fn(grads):\n optimizer.apply_gradients(\n [(grads, v)],\n experimental_aggregate_gradients=experimental_aggregate_gradients)\n return v.read_value()\n\n return distribution.experimental_local_results(\n distribution.run(step_fn, args=(grads,)))\n\n self.assertAllClose(optimize(), [[-0.1, -0.1]])\n\n @tf.__internal__.distribute.combinations.generate(\n tf.__internal__.test.combinations.combine(distribution=[\n tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu\n ]))\n def test_custom_aggregation_central_storage(self, distribution):\n with distribution.scope():\n v = tf.Variable([0., 0.])\n optimizer = gradient_descent.SGD(0.1)\n\n grads = tf.convert_to_tensor([1., 1.])\n\n def step_fn(grads):\n with self.assertRaises(NotImplementedError):\n optimizer.apply_gradients([(grads, v)],\n experimental_aggregate_gradients=False)\n\n return distribution.run(step_fn, args=(grads,))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the loss scaling optimizer class.\"\"\"\n\nimport tensorflow.compat.v2 as tf\nfrom keras import backend\nfrom keras import optimizers\nfrom keras.mixed_precision import loss_scale as keras_loss_scale_module\nfrom keras.optimizer_v2 import optimizer_v2\nfrom keras.optimizer_v2 import utils as optimizer_utils\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util.tf_export import keras_export\n\n\nclass _UnwrapPreventer(object):\n \"\"\"Wrapper that DistributionStrategy will not unwrap.\n\n Typically, DistributionStrategy will unwrap values when going from a cross-\n replica context to a replica context via `call_for_each_replica`. This class\n is a wrapper that DistributionStrategy will not unwrap, so it can be used to\n prevent it from unwrapping a value.\n\n TODO(reedwm): Find/implement a better way of preventing values from being\n unwrapped by DistributionStrategy\n \"\"\"\n\n __slots__ = ['value']\n\n def __init__(self, value):\n self.value = value\n\n\nclass _DelegatingTrackableMixin(object):\n \"\"\"A mixin that delegates all Trackable methods to another trackable object.\n\n This class must be used with multiple inheritance. A class that subclasses\n Trackable can also subclass this class, which causes all Trackable methods to\n be delegated to the trackable object passed in the constructor.\n\n A subclass can use this mixin to appear as if it were the trackable passed to\n the constructor, from a Checkpoint's perspective. LossScaleOptimizer uses this\n mixin, so that the checkpoint format for a LossScaleOptimizer is identical to\n the checkpoint format for a normal optimizer. This allows a model to be saved\n with a normal Optimizer and restored with a LossScaleOptimizer, or vice versa.\n The only difference in checkpoint format is that the loss scale is also saved\n with a LossScaleOptimizer.\n \"\"\"\n\n def __init__(self, trackable_obj):\n self._trackable = trackable_obj\n\n # pylint: disable=protected-access\n @property\n def _setattr_tracking(self):\n return self._trackable._setattr_tracking\n\n @_setattr_tracking.setter\n def _setattr_tracking(self, value):\n self._trackable._setattr_tracking = value\n\n @property\n def _update_uid(self):\n return self._trackable._update_uid\n\n @_update_uid.setter\n def _update_uid(self, value):\n self._trackable._update_uid = value\n\n @property\n def _unconditional_checkpoint_dependencies(self):\n return self._trackable._unconditional_checkpoint_dependencies\n\n @property\n def _unconditional_dependency_names(self):\n return self._trackable._unconditional_dependency_names\n\n @property\n def _name_based_restores(self):\n return self._trackable._name_based_restores\n\n def _maybe_initialize_trackable(self):\n return self._trackable._maybe_initialize_trackable()\n\n @property\n def _object_identifier(self):\n return self._trackable._object_identifier\n\n @property\n def _tracking_metadata(self):\n return self._trackable._tracking_metadata\n\n def _no_dependency(self, value):\n return self._trackable._no_dependency(value)\n\n def _name_based_attribute_restore(self, checkpoint):\n return self._trackable._name_based_attribute_restore(checkpoint)\n\n @property\n def _checkpoint_dependencies(self):\n return self._trackable._checkpoint_dependencies\n\n @property\n def _deferred_dependencies(self):\n return self._trackable._deferred_dependencies\n\n def _lookup_dependency(self, name):\n self._trackable._lookup_dependency(name)\n\n def _add_variable_with_custom_getter(self,\n name,\n shape=None,\n dtype=tf.float32,\n initializer=None,\n getter=None,\n overwrite=False,\n **kwargs_for_getter):\n return self._trackable._add_variable_with_custom_getter(\n name, shape, dtype, initializer, getter, overwrite, **kwargs_for_getter)\n\n def _preload_simple_restoration(self, name):\n return self._trackable._preload_simple_restoration(name)\n\n def _track_trackable(self, trackable, name, overwrite=False): # pylint: disable=redefined-outer-name\n return self._trackable._track_trackable(trackable, name, overwrite)\n\n def _handle_deferred_dependencies(self, name, trackable): # pylint: disable=redefined-outer-name\n return self._trackable._handle_deferred_dependencies(name, trackable)\n\n def _restore_from_checkpoint_position(self, checkpoint_position):\n return self._trackable._restore_from_checkpoint_position(\n checkpoint_position)\n\n def _single_restoration_from_checkpoint_position(self, checkpoint_position,\n visit_queue):\n return self._trackable._single_restoration_from_checkpoint_position(\n checkpoint_position, visit_queue)\n\n def _gather_saveables_for_checkpoint(self):\n return self._trackable._gather_saveables_for_checkpoint()\n\n def _list_extra_dependencies_for_serialization(self, serialization_cache):\n return self._trackable._list_extra_dependencies_for_serialization(\n serialization_cache)\n\n def _list_functions_for_serialization(self, serialization_cache):\n return self._trackable._list_functions_for_serialization(\n serialization_cache)\n # pylint: enable=protected-access\n\n\ndef _is_all_finite(grads):\n \"\"\"Returns a scalar boolean tensor indicating if all gradients are finite.\"\"\"\n is_finite_per_grad = [\n tf.reduce_all(tf.math.is_finite(g)) for g in grads if g is not None\n ]\n return tf.reduce_all(is_finite_per_grad)\n\n\ndef _op_in_graph_mode(tensor):\n \"\"\"Returns the tensor's op in graph mode, or the tensor in eager mode.\n\n This is useful because sometimes an op is needed in graph mode instead of a\n tensor. In eager mode, there are no ops.\n\n Args:\n tensor: A tensor.\n\n Returns:\n The tensor's op in graph mode. The tensor in eager mode.\n \"\"\"\n if tf.executing_eagerly():\n return tensor\n return tensor.op\n\n\ndef _assign_if_finite(var, value):\n \"\"\"Assigns a value to a variable if the value is finite.\"\"\"\n return tf.compat.v1.cond(\n tf.math.is_finite(value), lambda: _op_in_graph_mode(var.assign(value)),\n tf.no_op)\n\n\nclass _DynamicLossScaleState(tf.__internal__.tracking.Trackable):\n \"\"\"The state of a dynamic loss scale.\"\"\"\n\n def __init__(self,\n initial_loss_scale,\n growth_steps,\n multiplier):\n \"\"\"Creates the dynamic loss scale.\"\"\"\n super(_DynamicLossScaleState, self).__init__()\n self._initial_loss_scale = float(initial_loss_scale)\n self._growth_steps = int(growth_steps)\n self._multiplier = float(multiplier)\n\n self._weights = {}\n self._current_loss_scale = self._add_weight(\n name='current_loss_scale',\n dtype=tf.float32,\n initial_value=self._initial_loss_scale)\n # The number of consecutive steps with finite gradients since the last\n # nonfinite gradient or change in loss scale. The name is 'good_steps' for\n # backwards compatibility with older checkpoints.\n self._counter = self._add_weight(\n name='good_steps', dtype=tf.int64, initial_value=0)\n\n def _add_weight(self, name, initial_value, dtype=None):\n \"\"\"Adds a weight to this loss scale.\n\n Args:\n name: Variable name.\n initial_value: The variable's initial value.\n dtype: The type of the variable.\n\n Returns:\n A variable.\n\n Raises:\n RuntimeError: If a weight with `name` has already been added.\n \"\"\"\n variable = tf.compat.v1.Variable(\n initial_value=initial_value,\n name=name,\n dtype=dtype,\n trainable=False,\n use_resource=True,\n synchronization=tf.VariableSynchronization.AUTO,\n # Set aggregation to NONE, as loss scaling variables should never be\n # aggregated.\n aggregation=tf.compat.v1.VariableAggregation.NONE)\n if tf.executing_eagerly():\n graph_key = None\n else:\n graph = tf.compat.v1.get_default_graph()\n graph_key = graph._graph_key # pylint: disable=protected-access\n\n key = (name, graph_key)\n self._weights[key] = variable\n self._handle_deferred_dependencies(name=name, trackable=variable)\n backend.track_variable(variable)\n return variable\n\n @property\n def _checkpoint_dependencies(self):\n \"\"\"From Trackable. Gather graph-specific weights to save.\"\"\"\n if tf.executing_eagerly():\n graph_key = None\n else:\n graph = tf.compat.v1.get_default_graph()\n graph_key = graph._graph_key # pylint: disable=protected-access\n weights = []\n for (name, g), v in sorted(self._weights.items(), key=lambda i: i[0][0]):\n if g == graph_key:\n weights.append(tf.__internal__.tracking.TrackableReference(name=name, ref=v))\n return (super(_DynamicLossScaleState, self)._checkpoint_dependencies +\n weights)\n\n def _lookup_dependency(self, name):\n \"\"\"From Trackable. Find a weight in the current graph.\"\"\"\n unconditional = super(_DynamicLossScaleState, self)._lookup_dependency(name)\n if unconditional is not None:\n return unconditional\n if tf.executing_eagerly():\n graph_key = None\n else:\n graph = tf.compat.v1.get_default_graph()\n graph_key = graph._graph_key # pylint: disable=protected-access\n return self._weights.get((name, graph_key), None)\n\n @property\n def initial_loss_scale(self):\n return self._initial_loss_scale\n\n @property\n def growth_steps(self):\n return self._growth_steps\n\n @property\n def multiplier(self):\n return self._multiplier\n\n @property\n def current_loss_scale(self):\n \"\"\"Returns the current loss scale as a float32 `tf.Variable`.\"\"\"\n return self._current_loss_scale\n\n @property\n def counter(self):\n \"\"\"Returns the counter as a float32 `tf.Variable`.\"\"\"\n return self._counter\n\n def __call__(self):\n \"\"\"Returns the current loss scale as a scalar `float32` tensor.\"\"\"\n return tf.convert_to_tensor(self._current_loss_scale)\n\n def update(self, grads):\n \"\"\"Updates the value of the loss scale.\n\n Args:\n grads: A nested structure of unscaled gradients, each which is an\n all-reduced gradient of the loss with respect to a weight.\n\n Returns:\n update_op: In eager mode, None. In graph mode, an op to update the loss\n scale.\n should_apply_gradients: Either a bool or a scalar boolean tensor. If\n False, the caller should skip applying `grads` to the variables this\n step.\n \"\"\"\n grads = tf.nest.flatten(grads)\n if tf.distribute.has_strategy(\n ) and tf.distribute.in_cross_replica_context():\n distribution = tf.distribute.get_strategy()\n is_finite_per_replica = distribution.extended.call_for_each_replica(\n _is_all_finite, args=(grads,))\n # Each replica computed the same `is_finite` value, since `grads` is\n # all-reduced across replicas. Arbitrarily take `is_finite` from the first\n # replica.\n is_finite = (\n distribution.experimental_local_results(is_finite_per_replica)[0])\n else:\n is_finite = _is_all_finite(grads)\n\n def update_if_finite_grads():\n \"\"\"Update assuming the gradients are finite.\"\"\"\n\n def incr_loss_scale():\n new_loss_scale = self.current_loss_scale * self.multiplier\n return tf.group(\n _assign_if_finite(self.current_loss_scale, new_loss_scale),\n self.counter.assign(0))\n\n return tf.compat.v1.cond(\n self.counter + 1 >= self.growth_steps,\n incr_loss_scale,\n lambda: _op_in_graph_mode(self.counter.assign_add(1)))\n\n def update_if_not_finite_grads():\n \"\"\"Update assuming the gradients are nonfinite.\"\"\"\n\n new_loss_scale = tf.maximum(\n self.current_loss_scale / self.multiplier, 1)\n return tf.group(\n self.counter.assign(0),\n self.current_loss_scale.assign(new_loss_scale))\n\n update_op = tf.compat.v1.cond(is_finite, update_if_finite_grads,\n update_if_not_finite_grads)\n should_apply_gradients = is_finite\n return update_op, should_apply_gradients\n\n\n# See LossScaleOptimizer docstring for why this is so big\n_DEFAULT_INITIAL_SCALE = 2 ** 15\n_DEFAULT_GROWTH_STEPS = 2000\n\n\n# pylint: disable=g-classes-have-attributes\n@keras_export('keras.mixed_precision.LossScaleOptimizer')\nclass LossScaleOptimizer(_DelegatingTrackableMixin, optimizer_v2.OptimizerV2):\n \"\"\"An optimizer that applies loss scaling to prevent numeric underflow.\n\n Loss scaling is a technique to prevent numeric underflow in intermediate\n gradients when float16 is used. To prevent underflow, the loss is multiplied\n (or \"scaled\") by a certain factor called the \"loss scale\", which causes\n intermediate gradients to be scaled by the loss scale as well. The final\n gradients are divided (or \"unscaled\") by the loss scale to bring them back to\n their original value.\n\n `LossScaleOptimizer` wraps another optimizer and applies loss scaling to it.\n By default, the loss scale is dynamically updated over time so you do not have\n to choose the loss scale. The `minimize` method automatically scales the loss,\n unscales the gradients, and updates the loss scale so all you have to do is\n wrap your optimizer with a `LossScaleOptimizer` if you use `minimize`. For\n example:\n\n >>> opt = tf.keras.optimizers.SGD(0.25)\n >>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)\n >>> var = tf.Variable(1.)\n >>> loss_fn = lambda: var ** 2\n >>> # 'minimize' applies loss scaling and updates the loss sale.\n >>> opt.minimize(loss_fn, var_list=var)\n >>> var.numpy()\n 0.5\n\n If a `tf.GradientTape` is used to compute gradients instead of `minimize`, you\n must scale the loss and gradients manually. This can be done with the\n `LossScaleOptimizer.get_scaled_loss` and\n `LossScaleOptimizer.get_unscaled_gradients` methods. For example:\n\n >>> with tf.GradientTape() as tape:\n ... loss = loss_fn()\n ... scaled_loss = opt.get_scaled_loss(loss)\n >>> scaled_grad = tape.gradient(scaled_loss, var)\n >>> (grad,) = opt.get_unscaled_gradients([scaled_grad])\n >>> opt.apply_gradients([(grad, var)]) # Loss scale is updated here\n >>> var.numpy()\n 0.25\n\n Warning: If you forget to call `get_scaled_loss` or `get_unscaled_gradients`\n (or both) when using a `tf.GradientTape`, the model will likely converge to a\n worse quality. Please make sure you call each function exactly once.\n\n When mixed precision with float16 is used, there is typically no risk of\n underflow affecting model quality if loss scaling is properly used. See\n [the mixed precision guide](\n https://www.tensorflow.org/guide/keras/mixed_precision) for more information\n on how to use mixed precision.\n\n Args:\n inner_optimizer: The `tf.keras.optimizers.Optimizer` instance to wrap.\n dynamic: Bool indicating whether dynamic loss scaling is used. Defaults to\n True. If True, the loss scale will be dynamically updated over time using\n an algorithm that keeps the loss scale at approximately its optimal value.\n If False, a single fixed loss scale is used and `initial_scale` must be\n specified, which is used as the loss scale. Recommended to keep as True,\n as choosing a fixed loss scale can be tricky. Currently, there is a small\n performance overhead to dynamic loss scaling compared to fixed loss\n scaling.\n initial_scale: The initial loss scale. If `dynamic` is True, this defaults\n to `2 ** 15`. If `dynamic` is False, this must be specified and acts as\n the sole loss scale, as the loss scale does not change over time. When\n dynamic loss scaling is used, is better for this to be a very high number,\n because a loss scale that is too high gets lowered far more quickly than a\n loss scale that is too low gets raised.\n dynamic_growth_steps: With dynamic loss scaling, every\n `dynamic_growth_steps` steps with finite gradients, the loss scale is\n doubled. Defaults to 2000. If a nonfinite gradient is encountered, the\n count is reset back to zero, gradients are skipped that step, and the loss\n scale is halved. The count can be queried with\n `LossScaleOptimizer.dynamic_counter`. This argument can only be specified\n if `dynamic` is True.\n\n `LossScaleOptimizer` will occasionally skip applying gradients to the\n variables, in which case the trainable variables will not change that step.\n This is done because the dynamic loss scale will sometimes be raised too\n high, causing overflow in the gradients. Typically, the first 2 to 15 steps of\n the model are skipped as the initial loss scale is very high, but afterwards\n steps will only be skipped on average 0.05% of the time (the fraction of steps\n skipped is `1 / dynamic_growth_steps`).\n\n `LossScaleOptimizer` delegates all public `Optimizer` methods to the inner\n optimizer. Additionally, in methods `minimize` and `get_gradients`, it scales\n the loss and unscales the gradients. In methods `minimize` and\n `apply_gradients`, it additionally updates the loss scale and skips applying\n gradients if any gradient has a nonfinite value.\n\n ### Hyperparameters\n\n Hyperparameters can be accessed and set on the LossScaleOptimizer, which will\n be delegated to the wrapped optimizer.\n\n >>> opt = tf.keras.optimizers.Adam(beta_1=0.8, epsilon=1e-5)\n >>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)\n >>> opt.beta_1 # Equivalent to `opt.inner_optimizer.beta_1`\n 0.8\n >>> opt.beta_1 = 0.7 # Equivalent to `opt.inner_optimizer.beta_1 = 0.7`\n >>> opt.beta_1\n 0.7\n >>> opt.inner_optimizer.beta_1\n 0.7\n\n However, accessing or setting non-hyperparameters is not delegated to the\n LossScaleOptimizer. In an Adam optimizer, `beta_1` is a hyperparameter but\n `epsilon` is not, as the Adam optimizer only calls `Optimizer._set_hyper` on\n `beta_1`.\n\n >>> opt.inner_optimizer.epsilon\n 1e-5\n >>> opt.epsilon\n Traceback (most recent call last):\n ...\n AttributeError: 'LossScaleOptimizer' object has no attribute 'epsilon'\n >>> opt.epsilon = 1e-4 # This does NOT set epsilon on `opt.inner_optimizer`\n >>> opt.inner_optimizer.epsilon\n >>> 1e-5\n\n In the above example, despite epsilon being set on the LossScaleOptimizer, the\n old epsilon value will still be used when training as epsilon was not set on\n the inner optimizer.\n \"\"\"\n\n _HAS_AGGREGATE_GRAD = True\n\n def __init__(self, inner_optimizer, dynamic=True, initial_scale=None,\n dynamic_growth_steps=None):\n if not isinstance(inner_optimizer, optimizer_v2.OptimizerV2):\n raise TypeError('\"inner_optimizer\" must be an instance of OptimizerV2, '\n 'but got: %s' % inner_optimizer)\n if not isinstance(dynamic, bool):\n # Catch errors if a user incorrectly passes a string or float to the\n # second argument argument, as this is commonly done for\n # LossScaleOptimizerV1.\n raise TypeError('\"dynamic\" argument to LossScaleOptimizer.__init__ must '\n 'be a bool, but got: %r' % (dynamic,))\n if isinstance(inner_optimizer, LossScaleOptimizer):\n raise TypeError('LossScaleOptimizer cannot wrap another '\n 'LossScaleOptimizer, but got: %s' % (inner_optimizer,))\n self._raise_if_strategy_unsupported()\n if getattr(inner_optimizer, '_is_wrapped_by_loss_scale_optimizer', False):\n # TODO(reedwm): Maybe support this. The difficulty is that LSO has the\n # same checkpoint format as the inner optimizer, so multiple LSOs wrapping\n # the same optimizer causes the checkpointing logic to become confused.\n raise ValueError('\"inner_optimizer\" is already wrapped by a '\n 'LossScaleOptimizer. An optimizer can only be wrapped '\n 'by a single LossScaleOptimizer')\n self._optimizer = inner_optimizer\n self._optimizer._is_wrapped_by_loss_scale_optimizer = True\n\n # We don't call super().__init__, since we do not want to call OptimizerV2's\n # constructor.\n _DelegatingTrackableMixin.__init__(self, self._optimizer)\n\n if dynamic:\n if initial_scale is None:\n initial_scale = _DEFAULT_INITIAL_SCALE\n if dynamic_growth_steps is None:\n dynamic_growth_steps = _DEFAULT_GROWTH_STEPS\n self._loss_scale = _DynamicLossScaleState(\n initial_scale, dynamic_growth_steps, multiplier=2)\n self._track_trackable(self._loss_scale, 'loss_scale')\n else:\n if initial_scale is None:\n raise ValueError('\"initial_scale\" must be specified if \"dynamic\" is '\n 'False')\n self._loss_scale = float(initial_scale)\n if dynamic_growth_steps is not None:\n raise ValueError('\"dynamic_growth_steps\" must be None if \"dynamic\" '\n 'is False, but got: %s' % (dynamic_growth_steps,))\n\n # To support restoring TensorFlow 2.2 checkpoints.\n self._track_trackable(FakeOptimizerForRestoration(self._optimizer),\n 'base_optimizer')\n\n @property\n def dynamic(self):\n \"\"\"Bool indicating whether dynamic loss scaling is used.\"\"\"\n return isinstance(self._loss_scale, _DynamicLossScaleState)\n\n @property\n def loss_scale(self):\n \"\"\"The current loss scale as a float32 scalar tensor.\"\"\"\n if isinstance(self._loss_scale, _DynamicLossScaleState):\n return tf.convert_to_tensor(\n self._loss_scale.current_loss_scale)\n else:\n return tf.convert_to_tensor(self._loss_scale)\n\n @property\n def dynamic_counter(self):\n \"\"\"The number of steps since the loss scale was last increased or decreased.\n\n This is None if `LossScaleOptimizer.dynamic` is False.\n\n The counter is incremented every step. Once it reaches\n `LossScaleOptimizer.dynamic_growth_steps`, the loss scale will be doubled\n and the counter will be reset back to zero. If nonfinite gradients are\n encountered, the loss scale will be halved and the counter will be reset\n back to zero.\n \"\"\"\n if isinstance(self._loss_scale, _DynamicLossScaleState):\n return self._loss_scale.counter\n else:\n return None\n\n @property\n def initial_scale(self):\n \"\"\"The initial loss scale.\n\n If `LossScaleOptimizer.dynamic` is False, this is the same number as\n `LossScaleOptimizer.loss_scale`, as the loss scale never changes.\n \"\"\"\n if isinstance(self._loss_scale, _DynamicLossScaleState):\n return self._loss_scale.initial_loss_scale\n else:\n return self._loss_scale\n\n @property\n def dynamic_growth_steps(self):\n \"\"\"The number of steps it takes to increase the loss scale.\n\n This is None if `LossScaleOptimizer.dynamic` is False.\n\n Every `dynamic_growth_steps` consecutive steps with finite gradients, the\n loss scale is increased.\n \"\"\"\n if isinstance(self._loss_scale, _DynamicLossScaleState):\n return self._loss_scale.growth_steps\n else:\n return None\n\n @property\n def inner_optimizer(self):\n \"\"\"The optimizer that this LossScaleOptimizer is wrapping.\"\"\"\n return self._optimizer\n\n def get_scaled_loss(self, loss):\n \"\"\"Scales the loss by the loss scale.\n\n This method is only needed if you compute gradients manually, e.g. with\n `tf.GradientTape`. In that case, call this method to scale the loss before\n passing the loss to `tf.GradientTape`. If you use\n `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss\n scaling is automatically applied and this method is unneeded.\n\n If this method is called, `get_unscaled_gradients` should also be called.\n See the `tf.keras.mixed_precision.LossScaleOptimizer` doc for\n an example.\n\n Args:\n loss: The loss, which will be multiplied by the loss scale. Can either be\n a tensor or a callable returning a tensor.\n\n Returns:\n `loss` multiplied by `LossScaleOptimizer.loss_scale`.\n \"\"\"\n if callable(loss):\n def new_loss():\n loss_val = loss()\n return loss_val * tf.cast(self.loss_scale, loss_val.dtype)\n return new_loss\n else:\n return loss * tf.cast(self.loss_scale, loss.dtype)\n\n def get_unscaled_gradients(self, grads):\n \"\"\"Unscales the gradients by the loss scale.\n\n This method is only needed if you compute gradients manually, e.g. with\n `tf.GradientTape`. In that case, call this method to unscale the gradients\n after computing them with `tf.GradientTape`. If you use\n `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss\n scaling is automatically applied and this method is unneeded.\n\n If this method is called, `get_scaled_loss` should also be called. See\n the `tf.keras.mixed_precision.LossScaleOptimizer` doc for an\n example.\n\n Args:\n grads: A list of tensors, each which will be divided by the loss scale.\n Can have None values, which are ignored.\n\n Returns:\n A new list the same size as `grads`, where every non-None value in `grads`\n is divided by `LossScaleOptimizer.loss_scale`.\n \"\"\"\n loss_scale_reciprocal = 1. / self.loss_scale\n return [\n _multiply_gradient(g, loss_scale_reciprocal) if g is not None else None\n for g in grads\n ]\n\n def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):\n tape = tf.GradientTape() if tape is None else tape\n with tape:\n loss = self.get_scaled_loss(loss)\n grads_and_vars = self._optimizer._compute_gradients( # pylint: disable=protected-access\n loss,\n var_list,\n grad_loss,\n tape=tape)\n grads = [g for g, _ in grads_and_vars]\n weights = [v for _, v in grads_and_vars]\n unscaled_grads = self.get_unscaled_gradients(grads)\n return list(zip(unscaled_grads, weights))\n\n def get_gradients(self, loss, params):\n loss = self.get_scaled_loss(loss)\n grads = self._optimizer.get_gradients(loss, params)\n return self.get_unscaled_gradients(grads)\n\n def _create_all_weights(self, var_list):\n self._optimizer._create_all_weights(var_list) # pylint: disable=protected-access\n\n def apply_gradients(self,\n grads_and_vars,\n name=None,\n experimental_aggregate_gradients=True):\n if tf.distribute.in_cross_replica_context():\n raise ValueError('apply_gradients() must be called in a replica context.')\n # We check for the strategy here despite already checking in the constructor\n # as frequently the optimizer is created outside the strategy's scope.\n self._raise_if_strategy_unsupported()\n\n grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)\n if experimental_aggregate_gradients:\n # We must aggregate the gradients here instead of in\n # self.optimizer.apply_gradients, so that any NaN or Inf gradients are\n # propogated to each replica. If any replica has a NaN or Inf gradient,\n # they must all have a NaN or Inf gradient so that they all skip the step.\n # pylint: disable=protected-access\n grads_and_vars = self._optimizer._transform_unaggregated_gradients(\n grads_and_vars)\n grads_and_vars = self._optimizer._aggregate_gradients(grads_and_vars)\n # pylint: enable=protected-access\n\n grads_and_vars = tuple(grads_and_vars)\n grads = [g for g, _ in grads_and_vars]\n # We do not want DistributionStrategy to unwrap any MirroredVariables in\n # grads_and_vars, because even in a replica context, the wrapped\n # optimizer expects mirrored variables. So we wrap the variables with an\n # _UnwrapPreventer, preventing DistributionStrategy from unwrapping the\n # MirroredVariables.\n wrapped_vars = _UnwrapPreventer([v for _, v in grads_and_vars])\n\n def do_not_apply_fn():\n # Normally self._optimizer.iterations is incremented in\n # self._optimizer.apply_gradients(). Since that is not called in this\n # branch, we increment it here instead.\n return self._optimizer.iterations.assign_add(1, read_value=False)\n\n def _if_should_apply_grads(grads):\n if isinstance(self._loss_scale, _DynamicLossScaleState):\n return self._loss_scale.update(grads)\n else:\n return (tf.no_op(), True)\n\n if optimizer_utils.strategy_supports_no_merge_call():\n loss_scale_update_op, should_apply_grads = _if_should_apply_grads(grads)\n def apply_fn():\n return self._apply_gradients(grads, wrapped_vars, name)\n\n maybe_apply_op = tf.__internal__.smart_cond.smart_cond(should_apply_grads, apply_fn,\n do_not_apply_fn)\n return tf.group(maybe_apply_op, loss_scale_update_op)\n\n else:\n\n def _apply_gradients_cross_replica(distribution, grads, wrapped_vars,\n name):\n loss_scale_update_op, should_apply_grads = _if_should_apply_grads(grads)\n\n def apply_fn():\n return distribution.extended.call_for_each_replica(\n self._apply_gradients,\n args=(grads, wrapped_vars, name))\n\n # Note: We must call this cond() in a cross-replica context.\n # DistributionStrategy does not support having a cond in a replica\n # context with a branch that calls `merge_call`, and\n # self._optimizer.apply_gradients calls `merge_call`.\n maybe_apply_op = tf.__internal__.smart_cond.smart_cond(should_apply_grads, apply_fn,\n do_not_apply_fn)\n return tf.group(maybe_apply_op, loss_scale_update_op)\n return tf.distribute.get_replica_context().merge_call(\n _apply_gradients_cross_replica,\n args=(grads, wrapped_vars, name))\n\n def _apply_gradients(self, grads, wrapped_vars, name):\n # Pass experimental_aggregate_gradients=False since LossScaleOptimizer\n # already aggregated the gradients.\n # TODO(reedwm): This will raise a fairly cryptic error message if\n # self._optimizer.apply_gradients does not take\n # experimental_aggregate_gradients.\n return self._optimizer.apply_gradients(\n list(zip(grads, wrapped_vars.value)), name,\n experimental_aggregate_gradients=False)\n\n def get_config(self):\n serialized_optimizer = optimizers.serialize(self._optimizer)\n return {\n 'inner_optimizer': serialized_optimizer,\n 'dynamic': self.dynamic,\n 'initial_scale': self.initial_scale,\n 'dynamic_growth_steps': self.dynamic_growth_steps,\n }\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n config = config.copy() # Make a copy, since we mutate config\n if 'loss_scale' in config:\n # If loss_scale is in config, we assume we are deserializing a\n # LossScaleOptimizer from TF 2.3 or below. We convert the config so it\n # can be deserialized in the current LossScaleOptimizer.\n loss_scale = keras_loss_scale_module.deserialize(\n config.pop('loss_scale'))\n if isinstance(loss_scale, tf.mixed_precision.experimental.FixedLossScale):\n config['dynamic'] = False\n config['initial_scale'] = loss_scale._loss_scale_value # pylint: disable=protected-access\n elif isinstance(loss_scale, tf.mixed_precision.experimental.DynamicLossScale):\n config['dynamic'] = True\n config['initial_scale'] = loss_scale.initial_loss_scale\n config['dynamic_growth_steps'] = loss_scale.increment_period\n if loss_scale.multiplier != 2:\n raise ValueError('Cannot deserialize LossScaleOptimizer with a '\n 'DynamicLossScale whose multiplier is not 2. Got '\n 'DynamicLossScale: %s' % (loss_scale,))\n else:\n raise ValueError(\n 'Serialized LossScaleOptimizers with a LossScale that is neither a '\n 'FixedLossScale nor a DynamicLossScale can no longer be '\n 'deserialized')\n config['inner_optimizer'] = config.pop('optimizer')\n config['inner_optimizer'] = optimizers.deserialize(\n config['inner_optimizer'], custom_objects=custom_objects)\n return cls(**config)\n\n def _raise_if_strategy_unsupported(self):\n if not strategy_supports_loss_scaling():\n strategy = tf.distribute.get_strategy()\n if isinstance(strategy,\n (tf.distribute.experimental.TPUStrategy, tf.compat.v1.distribute.experimental.TPUStrategy,\n tf.distribute.TPUStrategy)):\n raise ValueError(\n 'Loss scaling is not supported with TPUStrategy. Loss scaling is '\n 'unnecessary with TPUs, since they support bfloat16 instead of '\n 'float16 and bfloat16 does not require loss scaling. You should '\n 'remove the use of the LossScaleOptimizer when TPUs are used.')\n else:\n raise ValueError('Loss scaling is not supported with the '\n 'tf.distribute.Strategy: %s. Try using a different '\n 'Strategy, e.g. a MirroredStrategy' %\n strategy.__class__.__name__)\n\n # Delegations: We delegate most OptimizerV2 methods to the wrapped optimizer\n # below.\n\n @property\n def iterations(self):\n return self._optimizer.iterations\n\n @iterations.setter\n def iterations(self, variable):\n self._optimizer.iterations = variable\n\n def get_slot_names(self):\n return self._optimizer.get_slot_names()\n\n def variables(self):\n return self._optimizer.variables()\n\n @property\n def weights(self):\n return self._optimizer.weights\n\n def get_weights(self):\n return self._optimizer.get_weights()\n\n def set_weights(self, weights):\n return self._optimizer.set_weights(weights)\n\n @property\n def clipnorm(self):\n return self._optimizer.clipnorm\n\n @clipnorm.setter\n def clipnorm(self, val):\n self._optimizer.clipnorm = val\n\n @property\n def global_clipnorm(self):\n return self._optimizer.global_clipnorm\n\n @global_clipnorm.setter\n def global_clipnorm(self, val):\n self._optimizer.global_clipnorm = val\n\n @property\n def clipvalue(self):\n return self._optimizer.clipvalue\n\n @clipvalue.setter\n def clipvalue(self, val):\n self._optimizer.clipvalue = val\n\n def _aggregate_gradients(self, grads_and_vars):\n return self._optimizer._aggregate_gradients(grads_and_vars) # pylint: disable=protected-access\n\n def _restore_slot_variable(self, slot_name, variable, slot_variable):\n return self._optimizer._restore_slot_variable(slot_name, variable, # pylint: disable=protected-access\n slot_variable)\n\n def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,\n variable):\n return self._optimizer._create_or_restore_slot_variable( # pylint: disable=protected-access\n slot_variable_position, slot_name, variable)\n\n def get_slot(self, var, slot_name):\n return self._optimizer.get_slot(var, slot_name)\n\n def add_slot(self, var, slot_name, initializer='zeros'):\n return self._optimizer.add_slot(var, slot_name, initializer)\n\n def __getattribute__(self, name):\n try:\n return object.__getattribute__(self, name)\n except AttributeError as e:\n if name == '_optimizer' or name == '_hyper':\n # Avoid infinite recursion\n raise e\n\n # Delegate hyperparameter accesses to inner optimizer.\n if name == 'lr':\n name = 'learning_rate'\n if name in self._optimizer._hyper:\n return self._optimizer._get_hyper(name)\n raise e\n\n def __dir__(self):\n result = set(super(LossScaleOptimizer, self).__dir__())\n if '_optimizer' in result:\n result |= self._optimizer._hyper.keys()\n if 'learning_rate' in self._optimizer._hyper.keys():\n result.add('lr')\n return list(result)\n\n def __setattr__(self, name, value):\n if name == 'lr':\n name = 'learning_rate'\n # Delegate setting hyperparameter to inner optimizer if the attribute does\n # not exist on the LossScaleOptimizer\n try:\n # We cannot check for the 'iterations' attribute as it cannot be set after\n # it is accessed.\n if name != 'iterations':\n object.__getattribute__(self, name)\n has_attribute = True\n except AttributeError:\n has_attribute = False\n if (name != '_optimizer' and name in self._optimizer._hyper\n and not has_attribute):\n self._optimizer._set_hyper(name, value)\n else:\n super(LossScaleOptimizer, self).__setattr__(name, value)\n\n # Explicitly delegate learning_rate. Normally hyperparameters are delegated in\n # __getattribute__, but if a hyperparameter is not in self._optimizer._hyper\n # (e.g. because self._optimizer itself wraps another optimizer), then it won't\n # be delegated. Since learning_rate is a very commonly accessed\n # hyperparameter, we delegate it here.\n @property\n def learning_rate(self):\n return self._optimizer.learning_rate\n\n @learning_rate.setter\n def learning_rate(self, value):\n self._optimizer.learning_rate = value\n\n @property\n def lr(self):\n return self._optimizer.learning_rate\n\n @lr.setter\n def lr(self, value):\n self._optimizer.lr = value\n\n # We do not override some OptimizerV2 methods. For each, we describe why we do\n # not delegate them to self._optimizer:\n # * get_updates: get_updates() calls get_gradients(). Since we override\n # get_gradients(), we cannot delegate get_updates() to self._optimizer,\n # otherwise the overridden get_gradients() method would not be called.\n # Luckily, get_updates() does not access any OptimizerV2 fields, so\n # inheriting the OptimizerV2 version works fine.\n # * minimize: We don't delegate for a similar as get_updates(): it calls\n # both self._compute_gradients() and self.apply_gradients(), and both need\n # to have the LossScaleOptimizer version called.\n\n # TODO(reedwm): Maybe throw an error if mixed precision is used without this\n # optimizer being used.\n\n\n@keras_export('keras.mixed_precision.experimental.LossScaleOptimizer')\nclass LossScaleOptimizerV1(LossScaleOptimizer):\n \"\"\"An deprecated optimizer that applies loss scaling.\n\n Warning: This class is deprecated and will be removed in a future version of\n TensorFlow. Please use the non-experimental class\n `tf.keras.mixed_precision.LossScaleOptimizer` instead.\n\n This class is identical to the non-experimental\n `keras.mixed_precision.LossScaleOptimizer` except its constructor takes\n different arguments. For this class (the experimental version), the\n constructor takes a `loss_scale` argument. For the non-experimental class,\n the constructor encodes the loss scaling information in multiple arguments.\n Note that unlike this class, the non-experimental class does not accept a\n `tf.compat.v1.mixed_precision.LossScale`, which is deprecated.\n\n If you currently use this class, you should switch to the non-experimental\n `tf.keras.mixed_precision.LossScaleOptimizer` instead. We show several\n examples of converting the use of the experimental class to the equivalent\n non-experimental class.\n\n >>> # In all of the the examples below, `opt1` and `opt2` are identical\n >>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(\n ... tf.keras.optimizers.SGD(), loss_scale='dynamic')\n >>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(\n ... tf.keras.optimizers.SGD())\n >>> assert opt1.get_config() == opt2.get_config()\n\n >>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(\n ... tf.keras.optimizers.SGD(), loss_scale=123)\n >>> # dynamic=False indicates to use fixed loss scaling. initial_scale=123\n >>> # refers to the initial loss scale, which is the single fixed loss scale\n >>> # when dynamic=False.\n >>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(\n ... tf.keras.optimizers.SGD(), dynamic=False, initial_scale=123)\n >>> assert opt1.get_config() == opt2.get_config()\n\n >>> loss_scale = tf.compat.v1.mixed_precision.experimental.DynamicLossScale(\n ... initial_loss_scale=2048, increment_period=500)\n >>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(\n ... tf.keras.optimizers.SGD(), loss_scale=loss_scale)\n >>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(\n ... tf.keras.optimizers.SGD(), initial_scale=2048,\n ... dynamic_growth_steps=500)\n >>> assert opt1.get_config() == opt2.get_config()\n\n Make sure to also switch from this class to the non-experimental class in\n isinstance checks, if you have any. If you do not do this, your model may run\n into hard-to-debug issues, as the experimental `LossScaleOptimizer` subclasses\n the non-experimental `LossScaleOptimizer`, but not vice versa. It is safe to\n switch isinstance checks to the non-experimental `LossScaleOptimizer` even\n before using the non-experimental `LossScaleOptimizer`.\n\n >>> opt1 = tf.keras.mixed_precision.experimental.LossScaleOptimizer(\n ... tf.keras.optimizers.SGD(), loss_scale='dynamic')\n >>> # The experimental class subclasses the non-experimental class\n >>> isinstance(opt1, tf.keras.mixed_precision.LossScaleOptimizer)\n True\n >>> opt2 = tf.keras.mixed_precision.LossScaleOptimizer(\n ... tf.keras.optimizers.SGD())\n >>> # The non-experimental class does NOT subclass the experimental class.\n >>> isinstance(opt2, tf.keras.mixed_precision.experimental.LossScaleOptimizer)\n False\n\n Args:\n optimizer: The Optimizer instance to wrap.\n loss_scale: The loss scale to scale the loss and gradients. This can\n either be an int/float to use a fixed loss scale, the string \"dynamic\"\n to use dynamic loss scaling, or an instance of a LossScale. The string\n \"dynamic\" equivalent to passing `DynamicLossScale()`, and passing an\n int/float is equivalent to passing a FixedLossScale with the given loss\n scale. If a DynamicLossScale is passed, DynamicLossScale.multiplier must\n be 2 (the default).\n \"\"\"\n\n def __init__(self, optimizer, loss_scale):\n warn_msg_prefix = (\n 'tf.keras.mixed_precision.experimental.LossScaleOptimizer is '\n 'deprecated. Please use tf.keras.mixed_precision.LossScaleOptimizer '\n 'instead. ')\n\n if isinstance(loss_scale, dict):\n loss_scale = keras_loss_scale_module.deserialize(loss_scale)\n\n if isinstance(loss_scale, (int, float)):\n tf_logging.warning(\n warn_msg_prefix + 'For example:\\n'\n ' opt = tf.keras.mixed_precision.LossScaleOptimizer('\n 'opt, dynamic=False, initial_scale={})'.format(loss_scale))\n super(LossScaleOptimizerV1, self).__init__(optimizer, dynamic=False,\n initial_scale=loss_scale)\n elif isinstance(loss_scale, tf.mixed_precision.experimental.FixedLossScale):\n ls_val = loss_scale._loss_scale_value # pylint: disable=protected-access\n tf_logging.warning(\n warn_msg_prefix + 'For example:\\n'\n ' opt = tf.keras.mixed_precision.LossScaleOptimizer('\n 'opt, dynamic=False, initial_scale={})'.format(ls_val))\n super(LossScaleOptimizerV1, self).__init__(optimizer, dynamic=False,\n initial_scale=ls_val)\n elif loss_scale == 'dynamic':\n tf_logging.warning(\n warn_msg_prefix + 'For example:\\n'\n ' opt = tf.keras.mixed_precision.LossScaleOptimizer('\n 'opt)')\n super(LossScaleOptimizerV1, self).__init__(optimizer)\n elif isinstance(loss_scale, tf.mixed_precision.experimental.DynamicLossScale):\n kwargs = {}\n extra_arguments = ''\n if loss_scale.initial_loss_scale != _DEFAULT_INITIAL_SCALE:\n kwargs['initial_scale'] = loss_scale.initial_loss_scale\n extra_arguments += (', initial_scale=%s' %\n loss_scale.initial_loss_scale)\n if loss_scale.increment_period != _DEFAULT_GROWTH_STEPS:\n kwargs['dynamic_growth_steps'] = loss_scale.increment_period\n extra_arguments += (', dynamic_growth_steps=%s' %\n loss_scale.increment_period)\n if loss_scale.multiplier != 2:\n raise ValueError('When passing a DynamicLossScale to \"loss_scale\", '\n 'DynamicLossScale.multiplier must be 2. Got: %s'\n % (loss_scale,))\n tf_logging.warning(\n warn_msg_prefix +\n 'Note that the non-experimental LossScaleOptimizer does not take a '\n 'DynamicLossScale but instead takes the dynamic configuration '\n 'directly in the constructor. For example:\\n'\n ' opt = tf.keras.mixed_precision.LossScaleOptimizer('\n 'opt{})\\n'.format(extra_arguments))\n super(LossScaleOptimizerV1, self).__init__(optimizer, **kwargs)\n elif isinstance(loss_scale, tf.mixed_precision.experimental.LossScale):\n raise TypeError('Passing a LossScale that is not a FixedLossScale or a '\n 'DynamicLossScale is no longer supported. Got: {}'\n .format(loss_scale))\n else:\n raise ValueError('Invalid value passed to loss_scale. loss_scale '\n 'must be the string \"dynamic\" (recommended), an int, '\n 'a float, a FixedLossScale, or a DynamicLossScale. Got '\n 'value: {}'.format(loss_scale))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n config = config.copy() # Make a copy, since we mutate config\n\n # If loss_scale is in config, we assume we are deserializing a\n # LossScaleOptimizer from TF 2.3 or below. Otherwise, we assume we are\n # deserializing a LossScaleOptimizer from TF 2.4 or above.\n if 'loss_scale' in config:\n config['loss_scale'] = keras_loss_scale_module.deserialize(\n config['loss_scale'])\n if (isinstance(config['loss_scale'], tf.mixed_precision.experimental.DynamicLossScale)\n and config['loss_scale'].multiplier != 2):\n raise ValueError('Cannot deserialize LossScaleOptimizer with a '\n 'DynamicLossScale whose multiplier is not 2. Got '\n 'DynamicLossScale: %s' % (config['loss_scale'],))\n config['optimizer'] = optimizers.deserialize(\n config['optimizer'], custom_objects=custom_objects)\n return cls(**config)\n\n # We convert the config, as generated by LossScaleOptimizer.get_config, to a\n # version that can be passed to LossScaleOptimizerV1.__init__\n if config['dynamic']:\n config['loss_scale'] = tf.mixed_precision.experimental.DynamicLossScale(\n config['initial_scale'], config['dynamic_growth_steps'], multiplier=2)\n else:\n config['loss_scale'] = tf.mixed_precision.experimental.FixedLossScale(\n config['initial_scale'])\n\n del config['dynamic']\n del config['initial_scale']\n del config['dynamic_growth_steps']\n config['optimizer'] = optimizers.deserialize(\n config.pop('inner_optimizer'), custom_objects=custom_objects)\n return cls(**config)\n\n\nclass FakeOptimizerForRestoration(tf.__internal__.tracking.Trackable):\n \"\"\"A fake optimizer used to support restoring TensorFlow 2.2 checkpoints.\n\n The checkpoint format for LossScaleOptimizers changed after TF 2.2. This class\n exists to support restoring TF 2.2 checkpoints in newer version of TensorFlow.\n\n In TF 2.2, LossScaleOptimizer would track the wrapped optimizer by calling the\n following in LossScaleOptimizer.__init__\n\n ```\n self._track_trackable(self._optimizer, 'base_optimizer')\n ```\n\n This means a dependency from the LossScaleOptimizer to the wrapped optimizer\n would be stored in the checkpoint. However now, the checkpoint format with a\n LossScaleOptimizer is the same as the format without a LossScaleOptimizer,\n except the loss scale is also stored. This means there is no dependency from\n the LossScaleOptimizer to the wrapped optimizer. Instead, the\n LossScaleOptimizer acts as if it is the wrapped optimizer, from a checkpoint's\n perspective, by overriding all Trackable methods and delegating them to the\n wrapped optimizer.\n\n To allow restoring TF 2.2. checkpoints, LossScaleOptimizer adds a dependency\n on this class instead of the inner optimizer. When restored, this class will\n instead restore the slot variables of the inner optimizer. Since this class\n has no variables, it does not affect the checkpoint when saved.\n \"\"\"\n\n def __init__(self, optimizer):\n self._optimizer = optimizer\n\n def get_slot_names(self):\n return self._optimizer.get_slot_names()\n\n def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,\n variable):\n return self._optimizer._create_or_restore_slot_variable( # pylint: disable=protected-access\n slot_variable_position, slot_name, variable)\n\n\ntf.__internal__.mixed_precision.register_loss_scale_wrapper(optimizer_v2.OptimizerV2,\n LossScaleOptimizerV1)\n\n\ndef _multiply_gradient(gradient, scale):\n \"\"\"Multiply a (possibly sparse) gradient by the given scale factor.\"\"\"\n scale = tf.cast(scale, gradient.dtype)\n if isinstance(gradient, tf.IndexedSlices):\n return tf.IndexedSlices(\n gradient.values * scale,\n gradient.indices,\n dense_shape=gradient.dense_shape)\n else:\n return gradient * scale\n\n\ndef strategy_supports_loss_scaling():\n \"\"\"Returns True if the current Strategy supports loss scaling.\"\"\"\n if not tf.distribute.has_strategy():\n return True\n strategy = tf.distribute.get_strategy()\n # Strategies are supported if either there is only one replica or if variables\n # are replicated per device. Otherwise, the current model.fit() implementation\n # and most custom training loops incorrectly unscale the gradients. Currently,\n # gradients are unscaled once per compute replica, but they should be unscaled\n # once per variable replica. When there is one variable replica for each\n # compute replica, this works fine, but otherwise issues will occur.\n # TODO(reedwm): Support all strategies.\n return isinstance(strategy, (\n tf.distribute.MultiWorkerMirroredStrategy,\n tf.compat.v1.distribute.experimental.MultiWorkerMirroredStrategy,\n tf.distribute.OneDeviceStrategy,\n tf.compat.v1.distribute.OneDeviceStrategy,\n tf.distribute.MirroredStrategy,\n tf.compat.v1.distribute.MirroredStrategy,\n ))\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n# pylint: disable=g-classes-have-attributes\n\"\"\"Contains the core layers: Dense, Dropout.\n\nAlso contains their functional aliases.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nimport warnings\n\nfrom keras import layers as keras_layers\nfrom keras.legacy_tf_layers import base\nfrom tensorflow.python.util.tf_export import keras_export\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.Dense'])\n@tf_export(v1=['layers.Dense'])\nclass Dense(keras_layers.Dense, base.Layer):\n \"\"\"Densely-connected layer class.\n\n This layer implements the operation:\n `outputs = activation(inputs * kernel + bias)`\n Where `activation` is the activation function passed as the `activation`\n argument (if not `None`), `kernel` is a weights matrix created by the layer,\n and `bias` is a bias vector created by the layer\n (only if `use_bias` is `True`).\n\n Args:\n units: Integer or Long, dimensionality of the output space.\n activation: Activation function (callable). Set it to None to maintain a\n linear activation.\n use_bias: Boolean, whether the layer uses a bias.\n kernel_initializer: Initializer function for the weight matrix.\n If `None` (default), weights are initialized using the default\n initializer used by `tf.compat.v1.get_variable`.\n bias_initializer: Initializer function for the bias.\n kernel_regularizer: Regularizer function for the weight matrix.\n bias_regularizer: Regularizer function for the bias.\n activity_regularizer: Regularizer function for the output.\n kernel_constraint: An optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training.\n bias_constraint: An optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n trainable: Boolean, if `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such cases.\n _reuse: Boolean, whether to reuse the weights of a previous layer\n by the same name.\n\n Properties:\n units: Python integer, dimensionality of the output space.\n activation: Activation function (callable).\n use_bias: Boolean, whether the layer uses a bias.\n kernel_initializer: Initializer instance (or name) for the kernel matrix.\n bias_initializer: Initializer instance (or name) for the bias.\n kernel_regularizer: Regularizer instance for the kernel matrix (callable)\n bias_regularizer: Regularizer instance for the bias (callable).\n activity_regularizer: Regularizer instance for the output (callable)\n kernel_constraint: Constraint function for the kernel matrix.\n bias_constraint: Constraint function for the bias.\n kernel: Weight matrix (TensorFlow variable or tensor).\n bias: Bias vector, if applicable (TensorFlow variable or tensor).\n \"\"\"\n\n def __init__(self, units,\n activation=None,\n use_bias=True,\n kernel_initializer=None,\n bias_initializer=tf.compat.v1.zeros_initializer(),\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n trainable=True,\n name=None,\n **kwargs):\n super(Dense, self).__init__(units=units,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n trainable=trainable,\n name=name,\n **kwargs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.dense'])\n@tf_export(v1=['layers.dense'])\ndef dense(\n inputs, units,\n activation=None,\n use_bias=True,\n kernel_initializer=None,\n bias_initializer=tf.compat.v1.zeros_initializer(),\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n trainable=True,\n name=None,\n reuse=None):\n \"\"\"Functional interface for the densely-connected layer.\n\n This layer implements the operation:\n `outputs = activation(inputs * kernel + bias)`\n where `activation` is the activation function passed as the `activation`\n argument (if not `None`), `kernel` is a weights matrix created by the layer,\n and `bias` is a bias vector created by the layer\n (only if `use_bias` is `True`).\n\n Args:\n inputs: Tensor input.\n units: Integer or Long, dimensionality of the output space.\n activation: Activation function (callable). Set it to None to maintain a\n linear activation.\n use_bias: Boolean, whether the layer uses a bias.\n kernel_initializer: Initializer function for the weight matrix.\n If `None` (default), weights are initialized using the default\n initializer used by `tf.compat.v1.get_variable`.\n bias_initializer: Initializer function for the bias.\n kernel_regularizer: Regularizer function for the weight matrix.\n bias_regularizer: Regularizer function for the bias.\n activity_regularizer: Regularizer function for the output.\n kernel_constraint: An optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training.\n bias_constraint: An optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n trainable: Boolean, if `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n name: String, the name of the layer.\n reuse: Boolean, whether to reuse the weights of a previous layer\n by the same name.\n\n Returns:\n Output tensor the same shape as `inputs` except the last dimension is of\n size `units`.\n\n Raises:\n ValueError: if eager execution is enabled.\n \"\"\"\n warnings.warn('`tf.layers.dense` is deprecated and '\n 'will be removed in a future version. '\n 'Please use `tf.keras.layers.Dense` instead.')\n layer = Dense(units,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n trainable=trainable,\n name=name,\n _scope=name,\n _reuse=reuse)\n return layer.apply(inputs)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.Dropout'])\n@tf_export(v1=['layers.Dropout'])\nclass Dropout(keras_layers.Dropout, base.Layer):\n \"\"\"Applies Dropout to the input.\n\n Dropout consists in randomly setting a fraction `rate` of input units to 0\n at each update during training time, which helps prevent overfitting.\n The units that are kept are scaled by `1 / (1 - rate)`, so that their\n sum is unchanged at training time and inference time.\n\n Args:\n rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out\n 10% of input units.\n noise_shape: 1D tensor of type `int32` representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)`, and you want the dropout mask\n to be the same for all timesteps, you can use\n `noise_shape=[batch_size, 1, features]`.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed`.\n for behavior.\n name: The name of the layer (string).\n \"\"\"\n\n def __init__(self, rate=0.5,\n noise_shape=None,\n seed=None,\n name=None,\n **kwargs):\n super(Dropout, self).__init__(rate=rate,\n noise_shape=noise_shape,\n seed=seed,\n name=name,\n **kwargs)\n\n def call(self, inputs, training=False):\n return super(Dropout, self).call(inputs, training=training)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.dropout'])\n@tf_export(v1=['layers.dropout'])\ndef dropout(inputs,\n rate=0.5,\n noise_shape=None,\n seed=None,\n training=False,\n name=None):\n \"\"\"Applies Dropout to the input.\n\n Dropout consists in randomly setting a fraction `rate` of input units to 0\n at each update during training time, which helps prevent overfitting.\n The units that are kept are scaled by `1 / (1 - rate)`, so that their\n sum is unchanged at training time and inference time.\n\n Args:\n inputs: Tensor input.\n rate: The dropout rate, between 0 and 1. E.g. \"rate=0.1\" would drop out\n 10% of input units.\n noise_shape: 1D tensor of type `int32` representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)`, and you want the dropout mask\n to be the same for all timesteps, you can use\n `noise_shape=[batch_size, 1, features]`.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed`\n for behavior.\n training: Either a Python boolean, or a TensorFlow boolean scalar tensor\n (e.g. a placeholder). Whether to return the output in training mode\n (apply dropout) or in inference mode (return the input untouched).\n name: The name of the layer (string).\n\n Returns:\n Output tensor.\n\n Raises:\n ValueError: if eager execution is enabled.\n \"\"\"\n warnings.warn('`tf.layers.dropout` is deprecated and '\n 'will be removed in a future version. '\n 'Please use `tf.keras.layers.Dropout` instead.')\n layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)\n return layer.apply(inputs, training=training)\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.Flatten'])\n@tf_export(v1=['layers.Flatten'])\nclass Flatten(keras_layers.Flatten, base.Layer):\n \"\"\"Flattens an input tensor while preserving the batch axis (axis 0).\n\n Args:\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, ..., channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, ...)`.\n\n Examples:\n\n ```\n x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')\n y = Flatten()(x)\n # now `y` has shape `(None, 16)`\n\n x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')\n y = Flatten()(x)\n # now `y` has shape `(None, None)`\n ```\n \"\"\"\n pass\n\n\n@keras_export(v1=['keras.__internal__.legacy.layers.flatten'])\n@tf_export(v1=['layers.flatten'])\ndef flatten(inputs, name=None, data_format='channels_last'):\n \"\"\"Flattens an input tensor while preserving the batch axis (axis 0).\n\n Args:\n inputs: Tensor input.\n name: The name of the layer (string).\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, height, width)`.\n\n Returns:\n Reshaped tensor.\n\n Examples:\n\n ```\n x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')\n y = flatten(x)\n # now `y` has shape `(None, 16)`\n\n x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')\n y = flatten(x)\n # now `y` has shape `(None, None)`\n ```\n \"\"\"\n warnings.warn('`tf.layers.flatten` is deprecated and '\n 'will be removed in a future version. '\n 'Please use `tf.keras.layers.Flatten` instead.')\n layer = Flatten(name=name, data_format=data_format)\n return layer.apply(inputs)\n\n\n# Aliases\n\nFullyConnected = Dense\nfully_connected = dense\n"
] | [
[
"numpy.hstack",
"numpy.unique",
"numpy.arange",
"tensorflow.python.util.tf_export.keras_export",
"numpy.searchsorted",
"numpy.array"
],
[
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.__internal__.test.combinations.combine",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.compat.v1.device"
],
[
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.__internal__.mixed_precision.register_loss_scale_wrapper",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.compat.v2.math.is_finite",
"tensorflow.compat.v2.mixed_precision.experimental.FixedLossScale",
"tensorflow.compat.v2.group",
"tensorflow.compat.v2.__internal__.smart_cond.smart_cond",
"tensorflow.compat.v2.distribute.get_replica_context",
"tensorflow.compat.v2.reduce_all",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.compat.v2.compat.v1.cond",
"tensorflow.compat.v2.distribute.in_cross_replica_context",
"tensorflow.compat.v2.__internal__.tracking.TrackableReference",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.compat.v1.get_default_graph",
"tensorflow.compat.v2.compat.v1.Variable",
"tensorflow.compat.v2.no_op",
"tensorflow.compat.v2.mixed_precision.experimental.DynamicLossScale",
"tensorflow.compat.v2.maximum",
"tensorflow.compat.v2.GradientTape",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.distribute.has_strategy",
"tensorflow.compat.v2.distribute.get_strategy",
"tensorflow.compat.v2.IndexedSlices"
],
[
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.compat.v2.compat.v1.zeros_initializer",
"tensorflow.python.util.tf_export.tf_export"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.4"
]
}
] |
daniil-lyakhov/deep-object-reid | [
"b0f7d6a2d4cff8c417a66d82c09d16788d81ec67",
"b0f7d6a2d4cff8c417a66d82c09d16788d81ec67"
] | [
"torchreid/models/mobilenetv3.py",
"torchreid/engine/image/multilabel.py"
] | [
"import math\n\nimport torch\nimport torch.nn as nn\nfrom torch.cuda.amp import autocast\n\nfrom torchreid.losses import AngleSimpleLinear\nfrom torchreid.ops import Dropout, EvalModeSetter, rsc\nfrom .common import HSigmoid, HSwish, ModelInterface, make_divisible\nimport timm\n\nfrom torchreid.integration.nncf.compression import get_no_nncf_trace_context_manager, nullcontext\n\n__all__ = ['mobilenetv3_large', 'mobilenetv3_large_075', 'mobilenetv3_small', 'mobilenetv3_large_150',\n 'mobilenetv3_large_125']\n\npretrained_urls = {\n 'mobilenetv3_small':\n 'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-small-55df8e1f.pth?raw=true',\n 'mobilenetv3_large':\n 'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-large-1cd25616.pth?raw=true',\n 'mobilenetv3_large_075':\n 'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-large-0.75-9632d2a8.pth?raw=true',\n 'mobilenetv3_large_21k':\n 'https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/mobilenetv3_large_100_miil_21k.pth'\n}\n\n\nSHOULD_NNCF_SKIP_SE_LAYERS = False\nSHOULD_NNCF_SKIP_HEAD = False\nno_nncf_se_layer_context = get_no_nncf_trace_context_manager() if SHOULD_NNCF_SKIP_SE_LAYERS else nullcontext\nno_nncf_head_context = get_no_nncf_trace_context_manager() if SHOULD_NNCF_SKIP_HEAD else nullcontext\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=4):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, make_divisible(channel // reduction, 8)),\n nn.ReLU(inplace=True),\n nn.Linear(make_divisible(channel // reduction, 8), channel),\n HSigmoid()\n )\n\n def forward(self, x):\n with no_nncf_se_layer_context():\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y\n\n\ndef conv_3x3_bn(inp, oup, stride, IN_conv1=False):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n nn.BatchNorm2d(oup) if not IN_conv1 else nn.InstanceNorm2d(oup, affine=True),\n HSwish()\n )\n\n\ndef conv_1x1_bn(inp, oup, loss='softmax'):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n HSwish() if loss == 'softmax' else nn.PReLU()\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se, use_hs):\n super(InvertedResidual, self).__init__()\n assert stride in [1, 2]\n\n self.identity = stride == 1 and inp == oup\n\n if inp == hidden_dim:\n self.conv = nn.Sequential(\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n HSwish() if use_hs else nn.ReLU(inplace=True),\n # Squeeze-and-Excite\n SELayer(hidden_dim) if use_se else nn.Identity(),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n else:\n self.conv = nn.Sequential(\n # pw\n nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),\n nn.BatchNorm2d(hidden_dim),\n HSwish() if use_hs else nn.ReLU(inplace=True),\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n # Squeeze-and-Excite\n SELayer(hidden_dim) if use_se else nn.Identity(),\n HSwish() if use_hs else nn.ReLU(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n\n def forward(self, x):\n if self.identity:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass MobileNetV3(ModelInterface):\n def __init__(self,\n cfgs,\n mode,\n IN_conv1=False,\n num_classes=1000,\n width_mult=1.,\n in_channels=3,\n input_size=(224, 224),\n dropout_cls = None,\n pooling_type='avg',\n IN_first=False,\n self_challenging_cfg=False,\n **kwargs):\n\n super().__init__(**kwargs)\n self.in_size = input_size\n self.num_classes = num_classes\n self.input_IN = nn.InstanceNorm2d(in_channels, affine=True) if IN_first else None\n self.pooling_type = pooling_type\n self.self_challenging_cfg = self_challenging_cfg\n self.width_mult = width_mult\n self.dropout_cls = dropout_cls\n # setting of inverted residual blocks\n self.cfgs = cfgs\n assert mode in ['large', 'small']\n # building first layer\n input_channel = make_divisible(16 * self.width_mult, 8)\n stride = 1 if self.in_size[0] < 100 else 2\n layers = [conv_3x3_bn(3, input_channel, stride, IN_conv1)]\n # building inverted residual blocks\n block = InvertedResidual\n flag = True\n for k, t, c, use_se, use_hs, s in self.cfgs:\n if (self.in_size[0] < 100) and (s == 2) and flag:\n s = 1\n flag = False\n output_channel = make_divisible(c * self.width_mult, 8)\n exp_size = make_divisible(input_channel * t, 8)\n layers.append(block(input_channel, exp_size, output_channel, k, s, use_se, use_hs))\n input_channel = output_channel\n self.features = nn.Sequential(*layers)\n self.num_features = exp_size\n # building last several layers\n self.conv = conv_1x1_bn(input_channel, exp_size, self.loss)\n output_channel = {'large': 1280, 'small': 1024}\n output_channel = make_divisible(output_channel[mode] * self.width_mult, 8) if self.width_mult > 1.0 else output_channel[mode]\n\n if self.loss == 'softmax' or self.loss == 'asl':\n self.classifier = nn.Sequential(\n nn.Linear(exp_size, output_channel),\n nn.BatchNorm1d(output_channel),\n HSwish(),\n Dropout(**self.dropout_cls),\n nn.Linear(output_channel, self.num_classes),\n )\n else:\n assert self.loss in ['am_softmax', 'am_binary']\n self.classifier = nn.Sequential(\n nn.Linear(exp_size, output_channel),\n nn.BatchNorm1d(output_channel),\n nn.PReLU(),\n Dropout(**self.dropout_cls),\n AngleSimpleLinear(output_channel, self.num_classes),\n )\n self._initialize_weights()\n self.forward = autocast(self.mix_precision)(self.forward)\n\n def extract_features(self, x):\n y = self.conv(self.features(x))\n return y\n\n def infer_head(self, x, skip_pool=False):\n if not skip_pool:\n glob_features = self._glob_feature_vector(x, self.pooling_type, reduce_dims=False)\n else:\n glob_features = x\n\n logits = self.classifier(glob_features.view(x.shape[0], -1))\n return glob_features, logits\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def forward(self, x, return_featuremaps=False, get_embeddings=False, gt_labels=None):\n if self.input_IN is not None:\n x = self.input_IN(x)\n\n y = self.extract_features(x)\n if return_featuremaps:\n return y\n\n with no_nncf_head_context():\n glob_features, logits = self.infer_head(y, skip_pool=False)\n if self.training and self.self_challenging_cfg.enable and gt_labels is not None:\n glob_features = rsc(\n features = glob_features,\n scores = logits,\n labels = gt_labels,\n retain_p = 1.0 - self.self_challenging_cfg.drop_p,\n retain_batch = 1.0 - self.self_challenging_cfg.drop_batch_p\n )\n\n with EvalModeSetter([self.output], m_type=(nn.BatchNorm1d, nn.BatchNorm2d)):\n _, logits = self.infer_head(x, skip_pool=True)\n\n if not self.training and self.is_classification():\n return [logits]\n\n if get_embeddings:\n out_data = [logits, glob_features]\n elif self.loss in ['softmax', 'am_softmax', 'asl', 'am_binary']:\n out_data = [logits]\n elif self.loss in ['triplet']:\n out_data = [logits, glob_features]\n else:\n raise KeyError(\"Unsupported loss: {}\".format(self.loss))\n\n return tuple(out_data)\n\n\ndef init_pretrained_weights(model, key='', **kwargs):\n \"\"\"Initializes model with pretrained weights.\n Layers that don't match with pretrained layers in name or size are kept unchanged.\n \"\"\"\n import os\n import errno\n import gdown\n\n from torchreid.utils import load_pretrained_weights\n\n def _get_torch_home():\n ENV_TORCH_HOME = 'TORCH_HOME'\n ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'\n DEFAULT_CACHE_DIR = '~/.cache'\n torch_home = os.path.expanduser(\n os.getenv(\n ENV_TORCH_HOME,\n os.path.join(\n os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'\n )\n )\n )\n return torch_home\n\n torch_home = _get_torch_home()\n model_dir = os.path.join(torch_home, 'checkpoints')\n try:\n os.makedirs(model_dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n filename = key + '_imagenet.pth'\n cached_file = os.path.join(model_dir, filename)\n if not os.path.exists(cached_file):\n gdown.download(pretrained_urls[key], cached_file)\n model = load_pretrained_weights(model, cached_file, **kwargs)\n\n\ndef mobilenetv3_large_075(pretrained=False, **kwargs):\n \"\"\"\n Constructs a MobileNetV3-Large model\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [3, 1, 16, 0, 0, 1],\n [3, 4, 24, 0, 0, 2],\n [3, 3, 24, 0, 0, 1],\n [5, 3, 40, 1, 0, 2],\n [5, 3, 40, 1, 0, 1],\n [5, 3, 40, 1, 0, 1],\n [3, 6, 80, 0, 1, 2],\n [3, 2.5, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [5, 6, 160, 1, 1, 2],\n [5, 6, 160, 1, 1, 1],\n [5, 6, 160, 1, 1, 1]\n ]\n\n net = MobileNetV3(cfgs, mode='large', width_mult =.75, **kwargs)\n if pretrained:\n init_pretrained_weights(net, key='mobilenetv3_large_075')\n\n return net\n\n\ndef mobilenetv3_large(pretrained=False, **kwargs):\n \"\"\"\n Constructs a MobileNetV3-Large model\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [3, 1, 16, 0, 0, 1],\n [3, 4, 24, 0, 0, 2],\n [3, 3, 24, 0, 0, 1],\n [5, 3, 40, 1, 0, 2],\n [5, 3, 40, 1, 0, 1],\n [5, 3, 40, 1, 0, 1],\n [3, 6, 80, 0, 1, 2],\n [3, 2.5, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [5, 6, 160, 1, 1, 2],\n [5, 6, 160, 1, 1, 1],\n [5, 6, 160, 1, 1, 1]\n ]\n\n net = MobileNetV3(cfgs, mode='large', width_mult = 1., **kwargs)\n if pretrained:\n init_pretrained_weights(net, key='mobilenetv3_large')\n\n return net\n\n\ndef mobilenetv3_large_150(pretrained=False, **kwargs):\n \"\"\"\n Constructs a MobileNetV3-Large model\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [3, 1, 16, 0, 0, 1],\n [3, 4, 24, 0, 0, 2],\n [3, 3, 24, 0, 0, 1],\n [5, 3, 40, 1, 0, 2],\n [5, 3, 40, 1, 0, 1],\n [5, 3, 40, 1, 0, 1],\n [3, 6, 80, 0, 1, 2],\n [3, 2.5, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [5, 6, 160, 1, 1, 2],\n [5, 6, 160, 1, 1, 1],\n [5, 6, 160, 1, 1, 1]\n ]\n\n net = MobileNetV3(cfgs, mode='large', width_mult = 1.5, **kwargs)\n if pretrained:\n raise NotImplementedError(\"The weights for this configuration are not available\")\n\n return net\n\n\ndef mobilenetv3_large_125(pretrained=False, **kwargs):\n \"\"\"\n Constructs a MobileNetV3-Large model\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [3, 1, 16, 0, 0, 1],\n [3, 4, 24, 0, 0, 2],\n [3, 3, 24, 0, 0, 1],\n [5, 3, 40, 1, 0, 2],\n [5, 3, 40, 1, 0, 1],\n [5, 3, 40, 1, 0, 1],\n [3, 6, 80, 0, 1, 2],\n [3, 2.5, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [5, 6, 160, 1, 1, 2],\n [5, 6, 160, 1, 1, 1],\n [5, 6, 160, 1, 1, 1]\n ]\n\n net = MobileNetV3(cfgs, mode='large', width_mult = 1.25, **kwargs)\n if pretrained:\n raise NotImplementedError(\"The weights for this configuration are not available\")\n\n return net\n\n\ndef mobilenetv3_small(pretrained=False, **kwargs):\n \"\"\"\n Constructs a MobileNetV3-Small model\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [3, 1, 16, 1, 0, 2],\n [3, 4.5, 24, 0, 0, 2],\n [3, 3.67, 24, 0, 0, 1],\n [5, 4, 40, 1, 1, 2],\n [5, 6, 40, 1, 1, 1],\n [5, 6, 40, 1, 1, 1],\n [5, 3, 48, 1, 1, 1],\n [5, 3, 48, 1, 1, 1],\n [5, 6, 96, 1, 1, 2],\n [5, 6, 96, 1, 1, 1],\n [5, 6, 96, 1, 1, 1],\n ]\n net = MobileNetV3(cfgs, mode='small', width_mult = 1., **kwargs)\n if pretrained:\n init_pretrained_weights(net, key='mobilenetv3_small')\n\n return net\n",
"from __future__ import absolute_import, division, print_function\nfrom enum import auto\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.cuda.amp import GradScaler, autocast\n\nfrom torchreid import metrics\nfrom torchreid.losses import AsymmetricLoss, AMBinaryLoss\nfrom torchreid.metrics.accuracy import accuracy\nfrom torchreid.optim import SAM\nfrom ..engine import Engine\n\nclass MultilabelEngine(Engine):\n r\"\"\"Multilabel classification engine. It supports ASL, BCE and Angular margin loss with binary classification.\"\"\"\n def __init__(self, datamanager, models, optimizers, schedulers, use_gpu, save_all_chkpts,\n train_patience, early_stoping, lr_decay_factor, loss_name, label_smooth,\n lr_finder, m, s, sym_adjustment, auto_balance, amb_k, amb_t, clip_grad,\n should_freeze_aux_models, nncf_metainfo, initial_lr,\n target_metric, use_ema_decay, ema_decay, asl_gamma_pos, asl_gamma_neg, asl_p_m,\n mix_precision, **kwargs):\n\n super().__init__(datamanager,\n models=models,\n optimizers=optimizers,\n schedulers=schedulers,\n use_gpu=use_gpu,\n save_all_chkpts=save_all_chkpts,\n train_patience=train_patience,\n lr_decay_factor=lr_decay_factor,\n early_stoping=early_stoping,\n should_freeze_aux_models=should_freeze_aux_models,\n nncf_metainfo=nncf_metainfo,\n initial_lr=initial_lr,\n lr_finder=lr_finder,\n target_metric=target_metric,\n use_ema_decay=use_ema_decay,\n ema_decay=ema_decay)\n\n self.main_losses = nn.ModuleList()\n self.clip_grad = clip_grad\n num_classes = self.datamanager.num_train_pids\n if not isinstance(num_classes, (list, tuple)):\n num_classes = [num_classes]\n self.num_classes = num_classes\n\n for _ in enumerate(self.num_classes):\n if loss_name == 'asl':\n self.main_losses.append(AsymmetricLoss(\n gamma_neg=asl_gamma_neg,\n gamma_pos=asl_gamma_pos,\n probability_margin=asl_p_m,\n label_smooth=label_smooth,\n ))\n elif loss_name == 'bce':\n self.main_losses.append(AsymmetricLoss(\n gamma_neg=0,\n gamma_pos=0,\n probability_margin=0,\n label_smooth=label_smooth,\n ))\n elif loss_name == 'am_binary':\n self.main_losses.append(AMBinaryLoss(\n m=m,\n k=amb_k,\n t=amb_t,\n s=s,\n sym_adjustment=sym_adjustment,\n auto_balance=auto_balance,\n gamma_neg=asl_gamma_neg,\n gamma_pos=asl_gamma_pos,\n label_smooth=label_smooth,\n ))\n\n num_classes = self.datamanager.num_train_pids\n if not isinstance(num_classes, (list, tuple)):\n num_classes = [num_classes]\n self.num_classes = num_classes\n self.num_targets = len(self.num_classes)\n self.enable_sam = isinstance(self.optims[self.main_model_name], SAM)\n\n for model_name in self.get_model_names():\n assert isinstance(self.optims[model_name], SAM) == self.enable_sam, \"SAM must be enabled \\\n for all models or none of them\"\n self.scaler = GradScaler(enabled=mix_precision)\n self.prev_smooth_top1 = 0.\n self.forward_backward = autocast(mix_precision)(self.forward_backward)\n\n def forward_backward(self, data):\n n_iter = self.epoch * self.num_batches + self.batch_idx\n\n train_records = self.parse_data_for_train(data, output_dict=True, use_gpu=self.use_gpu)\n imgs, obj_ids = train_records['img'], train_records['obj_id']\n\n model_names = self.get_model_names()\n num_models = len(model_names)\n steps = [1,2] if self.enable_sam and not self.lr_finder else [1]\n # forward pass\n for step in steps:\n # if sam is enabled then statistics will be written each step, but will be saved only the second time\n # this is made just for convinience\n avg_acc = 0.0\n out_logits = [[] for _ in range(self.num_targets)]\n total_loss = torch.zeros([], dtype=imgs.dtype, device=imgs.device)\n loss_summary = dict()\n\n for model_name in model_names:\n self.optims[model_name].zero_grad()\n model_loss, model_loss_summary, model_avg_acc, model_logits = self._single_model_losses(\n self.models[model_name], train_records, imgs, obj_ids, n_iter, model_name)\n avg_acc += model_avg_acc / float(num_models)\n total_loss += model_loss / float(num_models)\n loss_summary.update(model_loss_summary)\n\n for trg_id in range(self.num_targets):\n if model_logits[trg_id] is not None:\n out_logits[trg_id].append(model_logits[trg_id])\n model_num = len(model_names)\n # compute mutual loss\n if len(model_names) > 1:\n mutual_loss = torch.zeros([], dtype=imgs.dtype, device=imgs.device)\n for trg_id in range(self.num_targets):\n if len(out_logits[trg_id]) <= 1:\n continue\n for model_i, logits_i in enumerate(out_logits[trg_id]):\n probabilities_i = torch.sigmoid(logits_i)\n kl_loss = 0\n for model_j, logits_j in enumerate(out_logits[trg_id]):\n if model_i != model_j:\n probabilities_j = torch.sigmoid(logits_j)\n kl_loss += self.kl_div_binary(probabilities_i, probabilities_j)\n mutual_loss += kl_loss / (model_num - 1)\n loss_summary['mutual_{}/{}'.format(trg_id, model_names[model_i])] = mutual_loss.item()\n\n should_turn_off_mutual_learning = self._should_turn_off_mutual_learning(self.epoch)\n coeff_mutual_learning = int(not should_turn_off_mutual_learning)\n\n total_loss += coeff_mutual_learning * mutual_loss\n # backward pass\n self.scaler.scale(total_loss).backward(retain_graph=False)\n for model_name in model_names:\n if self.clip_grad != 0 and step == 1:\n self.scaler.unscale_(self.optims[model_name])\n torch.nn.utils.clip_grad_norm_(self.models[model_name].parameters(), self.clip_grad)\n if not self.enable_sam and step == 1:\n self.scaler.step(self.optims[model_name])\n self.scaler.update()\n elif step == 1:\n assert self.enable_sam\n if self.clip_grad == 0:\n # if self.clip_grad == 0 this means that unscale_ wasn't applied\n # unscale parameters to perform SAM manipulations with parameters\n self.scaler.unscale_(self.optims[model_name]) \n overflow = self.optims[model_name].first_step(self.scaler)\n self.scaler.update() # update scaler after first step\n if overflow:\n print(\"Overflow occurred. Skipping step ...\")\n loss_summary['loss'] = total_loss.item()\n # skip second step if overflow occurred \n return loss_summary, avg_acc\n else:\n assert self.enable_sam and step==2\n if self.clip_grad == 0:\n self.scaler.unscale_(self.optims[model_name]) \n self.optims[model_name].second_step()\n self.scaler.update()\n\n loss_summary['loss'] = total_loss.item()\n\n return loss_summary, avg_acc\n\n def _single_model_losses(self, model, train_records, imgs, obj_ids, n_iter, model_name):\n model_output = model(imgs)\n all_logits = self._parse_model_output(model_output)\n\n total_loss = torch.zeros([], dtype=imgs.dtype, device=imgs.device)\n out_logits = []\n loss_summary = dict()\n\n num_trg_losses = 0\n avg_acc = 0\n\n for trg_id in range(self.num_targets):\n trg_mask = train_records['dataset_id'] == trg_id\n\n trg_obj_ids = obj_ids[trg_mask]\n trg_num_samples = trg_obj_ids.numel()\n if trg_num_samples == 0:\n out_logits.append(None)\n continue\n\n trg_logits = all_logits[trg_id][trg_mask]\n main_loss = self.main_losses[trg_id](trg_logits, trg_obj_ids)\n avg_acc += metrics.accuracy_multilabel(trg_logits, trg_obj_ids).item()\n loss_summary['main_{}/{}'.format(trg_id, model_name)] = main_loss.item()\n\n scaled_trg_logits = self.main_losses[trg_id].get_last_scale() * trg_logits\n out_logits.append(scaled_trg_logits)\n\n total_loss += main_loss\n num_trg_losses += 1\n\n total_loss /= float(num_trg_losses)\n avg_acc /= float(num_trg_losses)\n\n return total_loss, loss_summary, avg_acc, out_logits\n\n def kl_div_binary(self, x, y):\n ''' compute KL divergence between two tensors represented\n independent binary distributions'''\n # get binary distributions for two models with shape = (BxCx2)\n p = torch.stack((x, (1-x))).permute(1,2,0)\n q = torch.stack((y, (1-y))).permute(1,2,0)\n # log probabilities\n p_log = torch.log(p.add_(1e-8))\n # compute true KLDiv for each sample, than do the batchmean reduction\n return F.kl_div(p_log, q, reduction='none').sum(2).div_(x.size(1)).sum().div_(x.size(0))\n\n def _parse_model_output(self, model_output):\n all_logits = model_output[0] if isinstance(model_output, (tuple, list)) else model_output\n all_logits = all_logits if isinstance(all_logits, (tuple, list)) else [all_logits]\n\n return all_logits\n\n def exit_on_plateau_and_choose_best(self, top1, smooth_top1):\n '''\n The function returns a pair (should_exit, is_candidate_for_best).\n\n The function sets this checkpoint as a candidate for best if either it is the first checkpoint\n for this LR or this checkpoint is better then the previous best.\n\n The function sets should_exit = True if the overfitting is observed or the metric\n doesn't improves for a predetermined number of epochs.\n '''\n\n should_exit = False\n is_candidate_for_best = False\n current_metric = round(top1, 4)\n if smooth_top1 <= self.prev_smooth_top1:\n self.iter_to_wait += 1\n if self.iter_to_wait >= self.train_patience:\n print(\"The training should be stopped due to no improvements for {} epochs\".format(self.train_patience))\n should_exit = True\n else:\n self.iter_to_wait = 0\n\n if current_metric >= self.best_metric:\n self.best_metric = current_metric\n is_candidate_for_best = True\n\n self.prev_smooth_top1 = smooth_top1\n return should_exit, is_candidate_for_best\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.PReLU",
"torch.nn.Conv2d",
"torch.cuda.amp.autocast",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.InstanceNorm2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.nn.functional.kl_div",
"torch.sigmoid",
"torch.zeros",
"torch.nn.ModuleList",
"torch.cuda.amp.autocast",
"torch.cuda.amp.GradScaler",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Harshs27/lingvo | [
"bd396e651488b2e2c4a7416be077b4a0226c87c8",
"bd396e651488b2e2c4a7416be077b4a0226c87c8",
"bd396e651488b2e2c4a7416be077b4a0226c87c8",
"bd396e651488b2e2c4a7416be077b4a0226c87c8",
"bd396e651488b2e2c4a7416be077b4a0226c87c8"
] | [
"lingvo/core/conv_layers_builder_test.py",
"lingvo/tools/audio_lib.py",
"lingvo/core/cluster.py",
"lingvo/core/steps/attention_steps_test.py",
"lingvo/core/test_utils.py"
] | [
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for conv layers builder.\"\"\"\n\nfrom absl.testing import parameterized\nfrom lingvo import compat as tf\nfrom lingvo.core import bn_layers\nfrom lingvo.core import conv_layers_builder\nfrom lingvo.core import conv_layers_with_time_padding\nfrom lingvo.core import layers\nfrom lingvo.core import test_utils\nimport numpy as np\n\n\nclass ConvPaddedLayersTest(test_utils.TestCase):\n\n def _ConvTestHelper(self, dilation, stride, activation, batch_norm,\n weight_norm, in_dim, out_dim, filter_shape, conv_last,\n causal_conv):\n with self.session(use_gpu=True) as sess:\n p1 = layers.Conv2DLayer.Params().Set(\n name='conv_2d01',\n filter_shape=filter_shape + [in_dim, out_dim],\n filter_stride=stride,\n dilation_rate=dilation,\n activation=activation,\n batch_norm=batch_norm,\n weight_norm=weight_norm,\n bias=not batch_norm,\n conv_last=conv_last,\n causal_convolution=causal_conv)\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=weight_norm)\n if batch_norm:\n norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(\n decay=0.999)\n builder_params.norm_layer_tpl = norm_p\n else:\n builder_params.norm_layer_tpl = None\n p2 = builder_params.Instantiate().Conv2D(\n 'conv_2d02',\n in_dim,\n out_dim,\n filter_shape,\n stride=stride,\n dilation=dilation,\n activation=activation,\n conv_last=conv_last,\n is_causal=causal_conv)\n\n l1 = p1.Instantiate()\n l2 = p2.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n l1_theta = l1.theta.Transform(tf.identity)\n l2_theta = l2.theta.Transform(tf.identity)\n conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)\n conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)\n\n tf.logging.info(l1_theta)\n tf.logging.info(l2_theta)\n l1_num_vars = l1_theta.Flatten()\n l2_num_var2 = l2_theta.Flatten()\n if len(l1_num_vars) != len(l2_num_var2):\n tf.logging.info(\n 'Mismatched number of vars: l1: %d vars, l2: %d vars',\n len(l1_num_vars), len(l2_num_var2))\n\n w1 = l1_theta.w\n w2 = l2_theta.conv_2d.w\n # b1 = l1_theta.b\n # b2 = l2_theta.bn_or_bias.b\n\n tf.global_variables_initializer().run()\n v1, p1 = sess.run([conv_out1, out1_padding])\n w1_v = sess.run(w1)\n v2, p2 = sess.run([conv_out2, out2_padding], feed_dict={w2: w1_v})\n\n self.assertAllClose(v1, v2)\n self.assertAllClose(p1, p2)\n\n def testConvBasic(self):\n dilation = [1, 1]\n stride = [2, 3]\n activation = 'NONE'\n batch_norm = False\n weight_norm = False\n in_dim = 3\n out_dim = 3\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,\n in_dim, out_dim, filter_shape, conv_last, causal_conv)\n\n def testConvBnWnTanh(self):\n dilation = [1, 1]\n stride = [2, 3]\n activation = 'TANH'\n batch_norm = True\n weight_norm = True\n in_dim = 3\n out_dim = 3\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,\n in_dim, out_dim, filter_shape, conv_last, causal_conv)\n\n def testConvGn(self):\n dilation = [1, 1]\n stride = [2, 3]\n activation = 'TANH'\n in_dim = 3\n out_dim = 4\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n\n with self.session(use_gpu=True) as sess:\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=True)\n builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(\n num_groups=2)\n p = builder_params.Instantiate().Conv2D(\n 'conv_2d02',\n in_dim,\n out_dim,\n filter_shape,\n stride=stride,\n dilation=dilation,\n activation=activation,\n conv_last=conv_last,\n is_causal=causal_conv)\n\n l = p.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)\n tf.global_variables_initializer().run()\n v = sess.run(tf.reduce_sum(conv_out, 0))\n\n expected_out = [[[-0.35070014, -1.7821487, 0.8349923, 1.1709788],\n [-0.18872532, 0.9702145, 0.5534694, -1.1386856]],\n [[0.34970748, -0.5403709, -0.9809327, -2.0930214],\n [0.54232424, 1.1565661, 1.0349312, 1.3458138]],\n [[0, 0, 0, 0], [0, 0, 0, 0]]]\n\n self.assertAllClose(v, expected_out)\n\n def testConvLastWnTanh(self):\n dilation = [1, 1]\n stride = [2, 3]\n activation = 'TANH'\n batch_norm = False\n weight_norm = True\n in_dim = 3\n out_dim = 3\n filter_shape = [2, 2]\n conv_last = True\n causal_conv = False\n self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,\n in_dim, out_dim, filter_shape, conv_last, causal_conv)\n\n def testConvLastCausal(self):\n dilation = [1, 1]\n stride = [2, 3]\n activation = 'TANH'\n batch_norm = True\n weight_norm = True\n in_dim = 3\n out_dim = 3\n filter_shape = [2, 1]\n conv_last = True\n causal_conv = True\n self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,\n in_dim, out_dim, filter_shape, conv_last, causal_conv)\n\n def _DepthwiseConvTestHelper(self, dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n filter_shape, conv_last, causal_conv):\n with self.session(use_gpu=True) as sess:\n p1 = layers.DepthwiseConv2DLayer.Params().Set(\n name='conv_2d01',\n filter_shape=filter_shape + [in_dim, depth_multiplier],\n filter_stride=stride,\n dilation_rate=dilation,\n activation=activation,\n batch_norm=batch_norm,\n weight_norm=weight_norm,\n bias=not batch_norm,\n conv_last=conv_last,\n causal_convolution=causal_conv)\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=weight_norm)\n if batch_norm:\n norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(\n decay=0.999)\n builder_params.norm_layer_tpl = norm_p\n else:\n builder_params.norm_layer_tpl = None\n\n p2 = builder_params.Instantiate().DepthwiseConv2D(\n 'conv_2d02',\n in_dim,\n depth_multiplier,\n filter_shape,\n stride=stride,\n activation=activation,\n dilation=dilation,\n conv_last=conv_last,\n is_causal=causal_conv)\n\n l1 = p1.Instantiate()\n l2 = p2.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n l1_theta = l1.theta.Transform(tf.identity)\n l2_theta = l2.theta.Transform(tf.identity)\n conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)\n conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)\n\n tf.logging.info(l1_theta)\n tf.logging.info(l2_theta)\n l1_num_vars = l1_theta.Flatten()\n l2_num_var2 = l2_theta.Flatten()\n if len(l1_num_vars) != len(l2_num_var2):\n tf.logging.info(\n 'Mismatched number of vars: l1: %d vars, l2: %d vars',\n len(l1_num_vars), len(l2_num_var2))\n\n w1 = l1_theta.w\n w2 = l2_theta.conv_2d.w\n # b1 = l1_theta.b\n # b2 = l2_theta.bn_or_bias.b\n\n tf.global_variables_initializer().run()\n v1, p1 = sess.run([conv_out1, out1_padding])\n w1_v = sess.run([w1])[0]\n v2, p2 = sess.run([conv_out2, out2_padding], feed_dict={w2: w1_v})\n\n self.assertAllClose(v1, v2)\n self.assertAllClose(p1, p2)\n\n def testDepthConvBasic(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'NONE'\n batch_norm = False\n weight_norm = False\n in_dim = 3\n depth_multiplier = 2\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n filter_shape, conv_last, causal_conv)\n\n def testDepthConvBnWnTanh(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n batch_norm = True\n weight_norm = True\n in_dim = 3\n depth_multiplier = 3\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n filter_shape, conv_last, causal_conv)\n\n def testDepthConvGn(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n in_dim = 4\n depth_multiplier = 1\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n\n with self.session(use_gpu=True) as sess:\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=True)\n builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(\n num_groups=2)\n p = builder_params.Instantiate().DepthwiseConv2D(\n 'conv_2d02',\n in_dim,\n depth_multiplier,\n filter_shape,\n stride=stride,\n activation=activation,\n dilation=dilation,\n conv_last=conv_last,\n is_causal=causal_conv)\n l = p.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 4]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)\n tf.global_variables_initializer().run()\n v = sess.run(tf.reduce_sum(conv_out, 0))\n\n expected_out = [[[-0.77095497, 0.30285388, -0.05714864, 1.0386012],\n [0.74034333, 0.04982221, -0.41769135, -2.9531932],\n [-0.2647084, -0.1936804, 0.6598473, 0.42537105]],\n [[1.3095646, -0.85996866, 2.2734299, -1.8457952],\n [-0.9542263, -0.14199251, 0.51472515, 0.91931283],\n [0.47267163, 1.4824618, 0.4548889, 0.93488806]],\n [[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]]\n\n self.assertAllClose(expected_out, v)\n\n def testDepthConvLastWnTanh(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n batch_norm = False\n weight_norm = True\n in_dim = 3\n depth_multiplier = 3\n filter_shape = [2, 2]\n conv_last = True\n causal_conv = False\n self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n filter_shape, conv_last, causal_conv)\n\n def testDepthConvLastCausal(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n batch_norm = True\n weight_norm = True\n in_dim = 3\n depth_multiplier = 3\n filter_shape = [2, 1]\n conv_last = True\n causal_conv = True\n self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n filter_shape, conv_last, causal_conv)\n\n def _SeparableConvTestHelper(self, dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier, out_dim,\n filter_shape, conv_last, causal_conv,\n assert_equality=True):\n with self.session(use_gpu=True) as sess:\n p1 = layers.SeparableConv2DLayer.Params().Set(\n name='conv_2d01',\n filter_shape=filter_shape + [in_dim, out_dim],\n depth_multiplier=depth_multiplier,\n filter_stride=stride,\n dilation_rate=dilation,\n activation=activation,\n batch_norm=batch_norm,\n weight_norm=weight_norm,\n bias=not batch_norm,\n conv_last=conv_last,\n causal_convolution=causal_conv)\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=weight_norm)\n if batch_norm:\n norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(\n decay=0.999)\n builder_params.norm_layer_tpl = norm_p\n else:\n builder_params.norm_layer_tpl = None\n p2 = builder_params.Instantiate().SeparableConv2D(\n 'conv_2d02',\n in_dim,\n out_dim,\n depth_multiplier,\n filter_shape,\n stride=stride,\n activation=activation,\n dilation=dilation,\n conv_last=conv_last,\n is_causal=causal_conv)\n\n l1 = p1.Instantiate()\n l2 = p2.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n l1_theta = l1.theta.Transform(tf.identity)\n l2_theta = l2.theta.Transform(tf.identity)\n conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)\n conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)\n\n tf.logging.info(l1_theta)\n tf.logging.info(l2_theta)\n l1_num_vars = l1_theta.Flatten()\n l2_num_var2 = l2_theta.Flatten()\n if len(l1_num_vars) != len(l2_num_var2):\n tf.logging.info(\n 'Mismatched number of vars: l1: %d vars, l2: %d vars',\n len(l1_num_vars), len(l2_num_var2))\n\n pointwise_conv_w1 = l1_theta.w\n depth_conv_w1 = l1_theta.depthwise_conv.w\n pointwise_conv_w2 = l2_theta.conv_1x1.w\n depth_conv_w2 = l2_theta.conv_2d.w\n # b1 = l1_theta.b\n # b2 = l2_theta.bn_or_bias.b\n tf.global_variables_initializer().run()\n v1, p1 = sess.run([conv_out1, out1_padding])\n p_w1_v, d_w1_v = sess.run([pointwise_conv_w1, depth_conv_w1])\n v2, p2 = sess.run([conv_out2, out2_padding],\n feed_dict={\n pointwise_conv_w2: p_w1_v,\n depth_conv_w2: d_w1_v\n })\n\n if assert_equality:\n self.assertAllClose(v1, v2)\n self.assertAllClose(p1, p2)\n\n def testSeparableConv2DLayerBasic(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'NONE'\n batch_norm = False\n weight_norm = False\n in_dim = 3\n depth_multiplier = 3\n out_dim = 2\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n out_dim, filter_shape, conv_last, causal_conv)\n\n def testSeparableConvWnWnTanh(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n batch_norm = False\n weight_norm = True\n in_dim = 3\n depth_multiplier = 3\n out_dim = 2\n filter_shape = [2, 1]\n conv_last = False\n causal_conv = True\n self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n out_dim, filter_shape, conv_last, causal_conv)\n\n def testSeparableConvLastBnWnTanh(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n batch_norm = True\n weight_norm = True\n in_dim = 3\n depth_multiplier = 3\n out_dim = 2\n filter_shape = [2, 1]\n conv_last = True\n causal_conv = True\n # New implementation is not equivallent to the old.\n self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n out_dim, filter_shape, conv_last, causal_conv,\n assert_equality=False)\n\n def testSeparableConvGn(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n in_dim = 4\n depth_multiplier = 1\n out_dim = 2\n filter_shape = [2, 1]\n conv_last = True\n causal_conv = True\n\n with self.session(use_gpu=True) as sess:\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=True)\n builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(\n num_groups=2)\n p = builder_params.Instantiate().SeparableConv2D(\n 'conv_2d02',\n in_dim,\n out_dim,\n depth_multiplier,\n filter_shape,\n stride=stride,\n activation=activation,\n dilation=dilation,\n conv_last=conv_last,\n is_causal=causal_conv)\n\n l = p.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 4]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)\n tf.global_variables_initializer().run()\n v = sess.run(tf.reduce_sum(conv_out, 0))\n\n expected_out = [[[0.00963847, -0.04019006], [0.36265337, -0.06592329],\n [0.65582913, -0.1533944]],\n [[0.7512939, -0.7282307], [0.96100605, -1.9509676],\n [0.4639647, 0.2485837]], [[0., 0.], [0., 0.], [0., 0.]]]\n\n self.assertAllClose(expected_out, v)\n\n\nclass CausalPoolingLayerTest(test_utils.TestCase, parameterized.TestCase):\n \"\"\"Tests for CausalPoolingLayer.\"\"\"\n\n @parameterized.named_parameters(\n {\n 'testcase_name': 'max_pooling',\n 'pooling_type': 'MAX',\n 'left_context': 2,\n 'inputs': np.array([-2, 0, 2, 4, 0, 0]),\n 'input_paddings': np.array([0, 0, 0, 0, 1, 1]),\n 'expected_output': np.array([-2, 0, 2, 4, 0, 0]),\n 'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),\n }, {\n 'testcase_name': 'avg_pooling',\n 'pooling_type': 'AVG',\n 'left_context': 2,\n 'inputs': np.array([-2, 0, 2, 4, 0, 0]),\n 'input_paddings': np.array([0, 0, 0, 0, 1, 1]),\n 'expected_output': np.array([-2, -1, 1, 3, 0, 0]),\n 'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),\n }, {\n 'testcase_name': 'max_pooling_large_window',\n 'pooling_type': 'MAX',\n 'left_context': 10,\n 'inputs': np.array([-2, 0, 2, 4, 0, 0]),\n 'input_paddings': np.array([0, 0, 0, 0, 1, 1]),\n 'expected_output': np.array([-2, 0, 2, 4, 0, 0]),\n 'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),\n }, {\n 'testcase_name': 'avg_pooling_large_window',\n 'pooling_type': 'AVG',\n 'left_context': 10,\n 'inputs': np.array([-2, 0, 2, 4, 0, 0]),\n 'input_paddings': np.array([0, 0, 0, 0, 1, 1]),\n 'expected_output': np.array([-2, -1, 0, 1, 0, 0]),\n 'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),\n }, {\n 'testcase_name': 'avg_pooling_infinte_window',\n 'pooling_type': 'AVG',\n 'left_context': -1,\n 'inputs': np.array([-2, 0, 2, 4, 0, 0]),\n 'input_paddings': np.array([0, 0, 0, 0, 1, 1]),\n 'expected_output': np.array([-2, -1, 0, 1, 0, 0]),\n 'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),\n })\n def testSimpleCase(self, pooling_type, left_context, inputs, input_paddings,\n expected_output, expected_output_padding):\n inputs = inputs[np.newaxis, :, np.newaxis, np.newaxis]\n input_paddings = input_paddings[np.newaxis, :]\n param = conv_layers_builder.CausalPoolingLayer.Params().Set(\n name='test_layer', pooling_type=pooling_type, left_context=left_context)\n pooling_layer = param.Instantiate()\n with self.session(use_gpu=True) as sess:\n inputs = tf.convert_to_tensor(inputs, dtype=tf.float32)\n input_paddings = tf.convert_to_tensor(input_paddings, dtype=tf.float32)\n output, output_paddings = pooling_layer.FPropDefaultTheta(\n inputs, input_paddings)\n tf.global_variables_initializer().run()\n output_val, output_paddings_val = sess.run([output, output_paddings])\n\n self.assertAllClose(expected_output, output_val.flatten())\n self.assertAllEqual(expected_output_padding, output_paddings_val.flatten())\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Audio library.\"\"\"\n\nimport subprocess\nimport lingvo.compat as tf\nfrom lingvo.core import py_utils\nfrom lingvo.tasks.asr import frontend as asr_frontend\n\nfrom tensorflow.python.ops import gen_audio_ops as audio_ops # pylint: disable=g-direct-tensorflow-import\n\n\n# There are two ways to decode a wav in tensorflow:\n# Through the tensorflow native audio decoder, exported\n# via framework, or via tf.contrib.ffmpeg.decode_audio.\n# While the latter could technically support FLAC, it does\n# not. It also adds an extra dependency on ffmpeg.\n\n\ndef DecodeFlacToWav(input_bytes):\n \"\"\"Decode a FLAC byte string to WAV.\"\"\"\n p = subprocess.Popen(\n ['sox', '-t', 'flac', '-', '-t', 'wav', '-'],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate(input=input_bytes)\n assert p.returncode == 0, err\n return out\n\n\ndef DecodeWav(input_bytes):\n \"\"\"Decode a wav file from its contents.\n\n Args:\n input_bytes: a byte array or Tensor with the wav file contents.\n\n Returns:\n A pair of Tensor for sample rate, decoded samples.\n \"\"\"\n result = tf.audio.decode_wav(input_bytes)\n return result.sample_rate, result.audio\n\n\ndef AudioToMfcc(sample_rate, audio, window_size_ms, window_stride_ms,\n num_coefficients):\n window_size_samples = sample_rate * window_size_ms // 1000\n window_stride_samples = sample_rate * window_stride_ms // 1000\n spectrogram = audio_ops.audio_spectrogram(\n audio,\n window_size=window_size_samples,\n stride=window_stride_samples,\n magnitude_squared=True)\n mfcc = audio_ops.mfcc(\n spectrogram, sample_rate, dct_coefficient_count=num_coefficients)\n return mfcc\n\n\ndef ExtractLogMelFeatures(wav_bytes_t):\n \"\"\"Create Log-Mel Filterbank Features from raw bytes.\n\n Args:\n wav_bytes_t: Tensor representing raw wav file as a string of bytes. It is\n currently assumed that the wav file is encoded at 16KHz (see DecodeWav,\n below).\n\n Returns:\n A Tensor representing three stacked log-Mel filterbank energies, sub-sampled\n every three frames.\n \"\"\"\n\n def _CreateAsrFrontend():\n \"\"\"Parameters corresponding to default ASR frontend.\"\"\"\n p = asr_frontend.MelAsrFrontend.Params()\n p.sample_rate = 16000.\n p.frame_size_ms = 25.\n p.frame_step_ms = 10.\n p.num_bins = 80\n p.lower_edge_hertz = 125.\n p.upper_edge_hertz = 7600.\n p.preemph = 0.97\n p.noise_scale = 0.\n p.pad_end = False\n return p.Instantiate()\n\n sample_rate, audio = DecodeWav(wav_bytes_t)\n audio *= 32768\n # Remove channel dimension, since we have a single channel.\n audio = tf.squeeze(audio, axis=1)\n # TODO(drpng): make batches.\n audio = tf.expand_dims(audio, axis=0)\n static_sample_rate = 16000\n mel_frontend = _CreateAsrFrontend()\n with tf.control_dependencies(\n [tf.assert_equal(sample_rate, static_sample_rate)]):\n outputs = mel_frontend.FPropDefaultTheta(\n py_utils.NestedMap(src_inputs=audio, paddings=tf.zeros_like(audio)))\n log_mel = outputs.src_inputs\n return log_mel\n",
"# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Specification of a training cluster.\"\"\"\n\nimport heapq\nimport lingvo.compat as tf\nfrom lingvo.core import hyperparams\nfrom lingvo.core import py_utils\nimport numpy as np\n\n\n_CLUSTER_STACK = py_utils.ThreadLocalStack()\n\n\nclass _Cluster:\n \"\"\"The whole training cluster from a single task's point of view.\"\"\"\n\n @classmethod\n def _JobSpec(cls, replicas):\n \"\"\"Construct a job spec param with the given number of replicas.\"\"\"\n p = hyperparams.Params()\n # By default, we use /job:localhost so that most of tests can just\n # work out of the box. trainer.py will then set job names accordingly.\n p.Define('name', '/job:localhost',\n 'TensorFlow job spec, e.g., /job:trainer, /job:ps')\n p.Define('replicas', replicas, 'The number of tasks of a job.')\n p.Define(\n 'targets', '', 'The target network address(es) to which we can '\n 'create tf sessions. E.g., a single ip:port, or a list of '\n 'comma-separated grpc://ip:port, etc.')\n p.Define('cpus_per_replica', 1, 'The number of CPU devices to use per '\n 'replica.')\n p.Define('gpus_per_replica', 0, 'The number of GPU devices to use per '\n 'replica.')\n p.Define(\n 'devices_per_split', 1, 'Devices of a replica are grouped into '\n 'splits. Each split contains these many devices. One split is a '\n 'group of devices on which the computation nodes of a graph is '\n 'placed upon.E.g., one can place the forward lstm on device 0 of '\n 'a split and place the backward lstm on device 1. etc.')\n p.Define('tpus_per_replica', 0,\n 'The number of tpu cores to use per replica.')\n p.Define('num_tpu_hosts', 0, 'The number of tpu hosts.')\n return p\n\n @classmethod\n def Params(cls):\n \"\"\"Defaults parameters for a cluster.\"\"\"\n p = hyperparams.InstantiableParams(cls)\n p.Define(\n 'mode', 'async', 'A string noting the overall training method. '\n 'Valid values: sync, async.')\n p.Define(\n 'job', 'trainer', 'The role of this job in the training cluster. '\n 'E.g., trainer_client, trainer, controller, etc.')\n p.Define('task', 0, 'This process is the task-th task in the job.')\n p.Define('logdir', '', 'The log directory.')\n\n # How the cluster is composed.\n #\n # A typical training cluster has a few jobs (controller, worker, ps, etc).\n # One can potentially place computation on any device of these jobs.\n # Here, we specify how each job is configured. E.g., number of GPUs each\n # task is equipped with, the number of replicas, etc.\n #\n # Note that trainer client may dispatch operations on just a\n # smaller subset of jobs. For example, the controller only places\n # computations onto the controller and ps devices; while evaler\n # only places computations on the evaler devices.\n #\n # cluster.job refers to the role of a client process performs. It\n # can be 'controller', 'trainer', 'trainer_client', 'evaler' and\n # 'decoder', etc. Often, a client can be the same process as one\n # of the compute devices (e.g., controller). Sometimes, they can\n # be a separate processes. E.g., trainer_client is a separate\n # standalone process. It places computations on the worker and\n # ps devices, while itself does not host any.\n p.Define('controller', cls._JobSpec(1), 'The controller job.')\n p.Define('worker', cls._JobSpec(1), 'The worker job.')\n p.Define('ps', cls._JobSpec(1), 'The ps job.')\n p.Define('input', cls._JobSpec(0), 'The input job.')\n p.Define('evaler', cls._JobSpec(0), 'The evaler job.')\n p.Define('decoder', cls._JobSpec(0), 'The decoder job.')\n\n # A few 'global' knobs.\n p.Define(\n 'add_summary', None, 'Whether to add summaries. If None, '\n 'decides based on the job type.')\n p.Define('do_eval', None, 'Whether to do eval.')\n p.Define('split_id', 0, 'Split id for the model.')\n p.Define('immediately_create_variables', True,\n 'Whether to create variables immediately.')\n return p\n\n @classmethod\n def _MakeDeviceString(cls, job_name, task_id, device_name, device_id):\n del cls\n return '%s/replica:0/task:%d/device:%s:%d' % (job_name, task_id,\n device_name, device_id)\n\n @classmethod\n def ListDevices(cls, job_spec):\n \"\"\"Lists devices in the job.\n\n Args:\n job_spec: A param object specifying a job in a training cluster.\n\n Returns:\n Returns a 2D np string array. ret[i, j] is the i-th replica's j-th\n devices.\n \"\"\"\n if not job_spec.gpus_per_replica:\n cpus = job_spec.cpus_per_replica\n ret = np.empty((job_spec.replicas, cpus), np.object)\n for i in range(job_spec.replicas):\n for j in range(cpus):\n ret[i, j] = cls._MakeDeviceString(job_spec.name, i, 'CPU', j)\n else:\n ret = np.empty((job_spec.replicas, job_spec.gpus_per_replica), np.object)\n for i in range(job_spec.replicas):\n for j in range(job_spec.gpus_per_replica):\n ret[i, j] = cls._MakeDeviceString(job_spec.name, i, 'GPU', j)\n return ret\n\n def __enter__(self):\n _CLUSTER_STACK.stack.append(self)\n return self\n\n def __exit__(self, type_arg, value_arg, traceback_arg):\n assert _CLUSTER_STACK.stack\n assert _CLUSTER_STACK.stack[-1] is self\n _CLUSTER_STACK.stack.pop()\n\n @staticmethod\n def Top():\n return _CLUSTER_STACK.stack[-1] if _CLUSTER_STACK.stack else None\n\n def __init__(self, params):\n self._params = params.Copy()\n p = self.params\n\n # A set of invariants about the setup of the cluster.\n #\n # NOTE. Two job specs can be identical. E.g., if p.worker.name is\n # the same as p.ps.name, that means ps is colocated with worker.\n assert p.ps.replicas >= 0\n assert p.ps.gpus_per_replica >= 0\n if p.mode == 'async' and p.job == 'controller':\n # There is only 1 controller.\n assert p.controller.replicas == 1\n assert p.task == 0\n elif p.mode == 'async' and p.job == 'trainer':\n assert p.worker.replicas >= 1\n assert p.worker.gpus_per_replica >= 0\n assert p.worker.devices_per_split >= 1\n # In async mode, trainers colocate with workers.\n assert 0 <= p.task and p.task < p.worker.replicas\n if p.ps.replicas == 0:\n # There is no ps. We are doing single-replica training.\n assert p.worker.replicas == 1\n elif p.mode == 'async' and p.job == 'evaler':\n assert 0 <= p.task and p.task < p.evaler.replicas\n elif p.mode == 'async' and p.job == 'decoder':\n assert 0 <= p.task and p.task < p.decoder.replicas\n elif p.mode == 'sync' and p.job == 'controller':\n # There is only 1 controller.\n assert p.controller.replicas == 1\n assert p.task == 0\n elif p.mode == 'sync' and p.job == 'trainer_client':\n assert p.worker.replicas >= 1\n assert p.worker.gpus_per_replica >= 0\n assert p.worker.devices_per_split >= 1\n elif p.mode == 'sync' and p.job == 'evaler':\n assert 0 <= p.task and p.task < p.evaler.replicas\n elif p.mode == 'sync' and p.job == 'decoder':\n assert 0 <= p.task and p.task < p.decoder.replicas\n elif p.mode == 'sync' and p.job == 'executor_tpu':\n assert p.worker.replicas >= 1\n else:\n assert False, (p.mode, p.job)\n\n if p.job == 'controller':\n self._job_spec = p.controller\n elif p.job in ('trainer', 'worker', 'trainer_client'):\n self._job_spec = p.worker\n elif p.job == 'evaler':\n self._job_spec = p.evaler\n elif p.job == 'decoder':\n self._job_spec = p.decoder\n elif p.job == 'executor_tpu':\n self._job_spec = p.worker\n\n @property\n def params(self):\n return self._params\n\n @property\n def mode(self):\n return self.params.mode\n\n @property\n def job(self):\n return self.params.job\n\n @property\n def logdir(self):\n return self.params.logdir\n\n @property\n def task(self):\n return self.params.task\n\n @property\n def job_spec(self):\n return self._job_spec\n\n @property\n def asynchronous(self):\n \"\"\"Returns True if configured for asynchronous training.\"\"\"\n return self.params.mode == 'async'\n\n @property\n def synchronous(self):\n \"\"\"Returns True if configured for synchronous training.\"\"\"\n return self.params.mode == 'sync'\n\n @property\n def num_replicas(self):\n return self._job_spec.replicas\n\n @property\n def tpus_per_replica(self):\n return self._job_spec.tpus_per_replica\n\n @property\n def num_tpu_hosts(self):\n return self._job_spec.num_tpu_hosts\n\n @property\n def num_devices_per_replica(self):\n return (self._job_spec.gpus_per_replica or\n self._job_spec.tpus_per_replica or self._job_spec.cpus_per_replica)\n\n @property\n def total_worker_devices(self):\n \"\"\"Return the total number of discrete worker devices in the cluster.\"\"\"\n worker_spec = self.params.worker\n devices_per_replica = (\n worker_spec.gpus_per_replica or worker_spec.tpus_per_replica or\n self._job_spec.cpus_per_replica)\n num_replicas = worker_spec.replicas\n return devices_per_replica * num_replicas\n\n @property\n def num_devices_per_split(self):\n \"\"\"Return number of accelerators to use per split.\"\"\"\n return self._job_spec.devices_per_split\n\n @property\n def num_splits_per_replica(self):\n # Note that a split must be within a replica.\n assert self.num_devices_per_replica % self.num_devices_per_split == 0\n return int(self.num_devices_per_replica / self.num_devices_per_split)\n\n @property\n def num_splits_per_client(self):\n \"\"\"The number of splits visible by one trainer client.\"\"\"\n if self.synchronous and self.job == 'trainer_client':\n # One client drives all the workers.\n return self.num_splits_per_replica * self.num_replicas\n elif self.synchronous and self.job == 'executor_tpu':\n # One client drives all the workers.\n return self.num_splits_per_replica * self.num_replicas\n else:\n # One client colocates with one worker and drives the worker only.\n return self.num_splits_per_replica\n\n @property\n def available_devices(self):\n \"\"\"Returns all compute devices available in a 2D array.\n\n Returns:\n A 2D array (python list of python lists) of strings. ret[i, j]\n is the j-th visible device on i-th visible replica.\n \"\"\"\n if self._job_spec.tpus_per_replica:\n ret = np.empty((1, self.num_devices_per_split), np.object)\n for i in range(self.num_devices_per_split):\n ret[0, i] = tf.tpu.core(i)\n return ret\n\n if self.job == 'trainer' and self.asynchronous:\n # In async mode, each trainer task can only use its own devices.\n return self.ListDevices(self._job_spec)[self.task:(self.task + 1), :]\n\n if self.job == 'trainer_client' and self.synchronous:\n # In sync mode, trainer_client can use every device.\n return self.ListDevices(self._job_spec)\n\n if self.job == 'executor_tpu' and self.synchronous:\n # executor_tpu can use every device.\n return self.ListDevices(self._job_spec)\n\n if self.job in ('controller', 'evaler', 'decoder'):\n # Our current policy is that each controller/evaler/decoder task\n # only uses 1 replica.\n return self.ListDevices(self._job_spec)[self.task:(self.task + 1), :]\n\n assert False, (self.job, self.mode)\n\n @property\n def input_device(self):\n \"\"\"Returns the tensorflow device name to place input op on.\"\"\"\n p = self.params\n if self.synchronous and p.input.replicas > 0:\n # Uses a separate job for input processing.\n assert p.input.replicas == 1\n return self.ListDevices(p.input)[0, 0]\n else:\n return ''\n\n def PlaceInput(self, input_params):\n \"\"\"Applies a placement policy on the given input generator params.\n\n By default, the policy is to place the input generator onto the input\n device. Subclass can override PlaceInput method to implement more advanced\n placement policy.\n\n Args:\n input_params: An input generator params.\n\n Returns:\n An input params which places the input generator on the input device.\n \"\"\"\n\n class _UseInputDevice(input_params.cls):\n \"\"\"Places the input generator on the input device.\"\"\"\n\n def __init__(self, params):\n with tf.device(self.cluster.input_device):\n super().__init__(params)\n\n def SplitInputBatch(self, num_splits):\n with tf.device(self.cluster.input_device):\n return super().SplitInputBatch(num_splits)\n\n return input_params.Copy().Set(cls=_UseInputDevice)\n\n @property\n def input_targets(self):\n \"\"\"Returns a list of network addresses of the input job.\"\"\"\n p = self.params.input\n if not p.targets:\n return []\n targets = p.targets.split(',')\n assert p.replicas == len(targets), '{} vs. {}'.format(p.replicas, targets)\n return targets\n\n def WorkerDeviceInModelSplit(self, device_index):\n \"\"\"Returns the device to use for 'device_index' for the current model split.\n\n Args:\n device_index: An int, the device index within 'model_split'.\n\n Returns:\n A string. The device to place ops onto.\n\n Raises:\n ValueError: if split_id of cluster is incorrectly set.\n \"\"\"\n devices = self.available_devices.reshape([-1]).tolist()\n if not devices:\n return ''\n else:\n split_id = self.params.split_id\n if split_id < 0 or split_id >= self.num_splits_per_client:\n raise ValueError('split_id (%d) not in [0, %d)' %\n (split_id, self.num_splits_per_client))\n devices_per_split = self.num_devices_per_split\n return devices[devices_per_split * split_id +\n device_index % devices_per_split]\n\n def GetPlacer(self, strategy=None):\n \"\"\"Returns a device function for placing ops within the cluster.\n\n Args:\n strategy: A string. Identifier for a placement strategy. By default, we\n use a least loaded policy to place variables.\n\n Returns:\n Returns a device function can be used in tf.device().\n\n Raises:\n ValueError: when strategy is not supported.\n \"\"\"\n if self.job == 'evaler' or self.job == 'decoder':\n # Currently, we only support evaler/decoder uses 1 accelerator.\n return self.ListDevices(self.job_spec)[self.task, 0]\n elif strategy is None:\n return _LeastLoadedPlacer(self).DeviceFunction\n raise ValueError('Unsupported placement policy: ', strategy)\n\n @property\n def add_summary(self):\n p = self.params\n if p.add_summary is None:\n return self.job in ['controller', 'decoder']\n else:\n return p.add_summary\n\n @property\n def do_eval(self):\n return self.params.do_eval\n\n @property\n def immediately_create_variables(self):\n return self.params.immediately_create_variables\n\n @property\n def worker_cluster_def(self):\n \"\"\"Returns a tf.train.ClusterDef representing the worker cluster.\"\"\"\n p = self.params.worker\n\n if not p.targets:\n return None\n\n job = p.name.replace('/job:', '', 1)\n workers = [addr.replace('grpc://', '', 1) for addr in p.targets.split(',')]\n\n return tf.train.ClusterSpec({job: workers}).as_cluster_def()\n\n\n# Ops that must be placed on the 'ps' devices.\n_VAR_OPS = ['Variable', 'VariableV2', 'AutoReloadVariable', 'VarHandleOp']\n\n\nclass VarPlacer:\n \"\"\"Placer which places variables across a set of devices.\n\n VarPlacer places non-variable ops on the worker device.\n \"\"\"\n\n def __init__(self, cluster):\n self._cluster = cluster\n self._devices = cluster.ListDevices(cluster.job_spec)\n\n def _AssignVar(self, _):\n raise ValueError('Unimplemented')\n\n def DeviceFunction(self, op):\n \"\"\"Choose a device for 'op'.\n\n Args:\n op: an Operation.\n\n Returns:\n The device to use for the Operation.\n \"\"\"\n # Op has already assigned to a device explicitly. Don't change it.\n if op.device:\n return op.device\n\n # Place vars according our policy.\n if op.type in _VAR_OPS:\n return self._AssignVar(op)\n\n # The default policy is to place the op on the 1st device visible\n # to this task.\n assert self._devices is not None, ('Unexpected job: %s' % self._cluster.job)\n task = self._cluster.params.task\n assert 0 <= task and task < len(self._devices)\n return self._devices[task, 0]\n\n\nclass _LeastLoadedPlacer(VarPlacer):\n \"\"\"Placer which places a variable on the least loaded var device.\n\n We use total byte sizes of variables placed on a device to indicate\n the device's load.\n\n \"\"\"\n\n def __init__(self, cluster):\n super().__init__(cluster)\n # A min heap of (size, device)\n var_devices = cluster.ListDevices(cluster.params.ps).flatten().tolist()\n tf.logging.info('_LeastLoadedPlacer : %s', var_devices)\n self._var_space_pq = [(0, d) for d in var_devices]\n\n def _AssignVar(self, var_op):\n size = var_op.get_attr('dtype').size\n shape = tf.TensorShape(var_op.get_attr('shape'))\n assert self._var_space_pq, ('No ps devices to use.')\n allocated, device = heapq.heappop(self._var_space_pq)\n if shape.num_elements() is None:\n assert var_op.name.endswith(\n 'wb/var'), 'Unexpected name pattern: %s' % var_op.name\n # CuDNN RNN vars shape aren't known statically, decide to make a constant\n # estimate to avoid introducing more complexities.\n allocated += 10 * 1024**2 * size\n else:\n allocated += shape.num_elements() * size\n heapq.heappush(self._var_space_pq, (allocated, device))\n tf.logging.info('Place variable %s on %s %d', var_op.name, device,\n allocated)\n return device\n",
"# Lint as: python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for third_party.py.lingvo.core.steps.attention_steps.\"\"\"\n\nfrom lingvo import compat as tf\nfrom lingvo.core import attention\nfrom lingvo.core import py_utils\nfrom lingvo.core import test_utils\nfrom lingvo.core.steps import attention_steps\nimport numpy as np\n\n\nclass AttentionStepsTest(test_utils.TestCase):\n\n def testAttentionStep(self):\n with self.session(use_gpu=False):\n np.random.seed(12345)\n src_batch_size = 3\n target_batch_size = 6\n src_length = 5\n src_context_dim = 4\n query_dim = 5\n src_dim = 4\n source_vecs = tf.constant(\n np.random.rand(src_length, src_batch_size, src_dim), dtype=tf.float32)\n source_contexts = tf.constant(\n np.random.rand(src_length, src_batch_size, src_context_dim),\n dtype=tf.float32)\n source_padding = tf.zeros([src_length, target_batch_size],\n dtype=tf.float32)\n query_vec = tf.constant(\n np.random.rand(target_batch_size, query_dim), dtype=tf.float32)\n\n p = attention_steps.AttentionStep.Params()\n p.atten.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n p.atten.source_dim = src_dim\n p.atten.query_dim = query_dim\n p.atten.hidden_dim = query_dim\n p.atten.vn.global_vn = False\n p.atten.vn.per_step_vn = False\n p.atten.packed_input = True\n step = p.Instantiate()\n\n external_inputs = py_utils.NestedMap(\n src=source_vecs,\n context=source_contexts,\n padding=source_padding)\n packed = step.PrepareExternalInputs(step.theta, external_inputs)\n state0 = step.ZeroState(step.theta, packed, target_batch_size)\n\n step_inputs = py_utils.NestedMap(inputs=[query_vec])\n step_padding = tf.zeros([target_batch_size, 1], dtype=tf.float32)\n output, state1 = step.FProp(step.theta, packed, step_inputs, step_padding,\n state0)\n\n self.evaluate(tf.global_variables_initializer())\n output, state1 = self.evaluate([output, state1])\n\n self.assertAllClose(\n output, {\n 'context': [[0.41788787, 0.5865286, 0.58267754, 0.21218117],\n [0.42178467, 0.5067202, 0.5413259, 0.6616881],\n [0.71586907, 0.6303425, 0.52290946, 0.694283],\n [0.41789612, 0.58647645, 0.5826333, 0.21220288],\n [0.421697, 0.5068262, 0.5411844, 0.66167986],\n [0.7156511, 0.63033843, 0.5228955, 0.69437]],\n 'probs':\n [[0.20118009, 0.19332525, 0.20120151, 0.2022583, 0.20203482],\n [0.20019522, 0.20133461, 0.19572362, 0.2025276, 0.2002189],\n [0.20116101, 0.20004824, 0.20221081, 0.19645905, 0.20012087],\n [0.20123273, 0.19319996, 0.20131132, 0.20220752, 0.2020485],\n [0.2002011, 0.2015253, 0.19534773, 0.20260131, 0.20032457],\n [0.20097165, 0.19993119, 0.20225787, 0.19671878, 0.20012051]]\n })\n self.assertAllClose(\n state1, {\n 'atten_state': [[0.], [0.], [0.], [0.], [0.], [0.]],\n 'atten_context': [[0.41788787, 0.5865286, 0.58267754, 0.21218117],\n [0.42178467, 0.5067202, 0.5413259, 0.6616881],\n [0.71586907, 0.6303425, 0.52290946, 0.694283],\n [0.41789612, 0.58647645, 0.5826333, 0.21220288],\n [0.421697, 0.5068262, 0.5411844, 0.66167986],\n [0.7156511, 0.63033843, 0.5228955, 0.69437]]\n })\n\n def testAttentionStepMultiSourceSame(self):\n with self.session(use_gpu=False):\n np.random.seed(12345)\n src_batch_size = 3\n target_batch_size = 6\n src_length = 5\n query_dim = 5\n src_dim = 4\n source_vecs_0 = tf.constant(\n np.random.rand(src_length, src_batch_size, src_dim), dtype=tf.float32)\n source_vecs_1 = tf.constant(\n np.random.rand(src_length, src_batch_size, src_dim), dtype=tf.float32)\n sources = py_utils.NestedMap(\n source_0=source_vecs_0, source_1=source_vecs_1)\n\n source_padding_0 = tf.zeros([src_length, src_batch_size],\n dtype=tf.float32)\n source_padding_1 = tf.zeros([src_length, src_batch_size],\n dtype=tf.float32)\n source_paddings = py_utils.NestedMap(\n source_0=source_padding_0, source_1=source_padding_1)\n query_vec = tf.constant(\n np.random.rand(target_batch_size, query_dim), dtype=tf.float32)\n\n p = attention_steps.AttentionStep.Params()\n\n # Setup MultiSourceAttention\n p.atten = attention.MultiSourceAttention.Params()\n p.atten.source_dim = src_dim\n p.atten.query_dim = query_dim\n\n add_atten_params = attention.AdditiveAttention.Params()\n add_atten_params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n add_atten_params.source_dim = src_dim\n add_atten_params.query_dim = query_dim\n add_atten_params.hidden_dim = query_dim\n add_atten_params.vn.global_vn = False\n add_atten_params.vn.per_step_vn = False\n add_atten_params.packed_input = True\n\n p.atten.source_atten_tpls = [('source_0', add_atten_params),\n ('source_1', add_atten_params)]\n\n step = p.Instantiate()\n\n external_inputs = py_utils.NestedMap(src=sources, padding=source_paddings)\n packed = step.PrepareExternalInputs(step.theta, external_inputs)\n state0 = step.ZeroState(step.theta, packed, target_batch_size)\n\n step_inputs = py_utils.NestedMap(inputs=[query_vec])\n step_padding = tf.zeros([target_batch_size, 1], dtype=tf.float32)\n output, state1 = step.FProp(step.theta, packed, step_inputs, step_padding,\n state0)\n\n self.evaluate(tf.global_variables_initializer())\n output, state1 = self.evaluate([output, state1])\n\n self.assertAllClose(\n output, {\n 'context': [[0.9590156, 0.8653384, 1.1668519, 0.697219],\n [1.175648, 1.1199431, 1.2219069, 1.1452408],\n [1.3191833, 1.0350775, 1.1315871, 1.3297331],\n [0.95910096, 0.86546516, 1.1669571, 0.6971649],\n [1.175647, 1.1201943, 1.222264, 1.1451368],\n [1.3188481, 1.034915, 1.1314276, 1.3297772]],\n 'probs':\n [[0.20118009, 0.19332525, 0.20120151, 0.2022583, 0.20203482],\n [0.20019522, 0.20133461, 0.19572362, 0.2025276, 0.2002189],\n [0.20116101, 0.20004824, 0.20221081, 0.19645905, 0.20012087],\n [0.20123273, 0.19319996, 0.20131132, 0.20220752, 0.2020485],\n [0.2002011, 0.2015253, 0.19534773, 0.20260131, 0.20032457],\n [0.20097165, 0.19993119, 0.20225787, 0.19671878, 0.20012051]]\n })\n self.assertAllClose(\n state1, {\n 'atten_state': {\n 'source_0': [[0.], [0.], [0.], [0.], [0.], [0.]],\n 'source_1': [[0.], [0.], [0.], [0.], [0.], [0.]]\n },\n 'atten_context': [[0.9590156, 0.8653384, 1.1668519, 0.697219],\n [1.175648, 1.1199431, 1.2219069, 1.1452408],\n [1.3191833, 1.0350775, 1.1315871, 1.3297331],\n [0.95910096, 0.86546516, 1.1669571, 0.6971649],\n [1.175647, 1.1201943, 1.222264, 1.1451368],\n [1.3188481, 1.034915, 1.1314276, 1.3297772]]\n })\n\n def testAttentionStepMultiSourceSameWithGmmAttention(self):\n with self.session(use_gpu=False):\n np.random.seed(12345)\n src_batch_size = 3\n target_batch_size = 6\n src_length = 5\n query_dim = 5\n src_dim = 4\n source_vecs_0 = tf.constant(\n np.random.rand(src_length, src_batch_size, src_dim), dtype=tf.float32)\n source_vecs_1 = tf.constant(\n np.random.rand(src_length, src_batch_size, src_dim), dtype=tf.float32)\n sources = py_utils.NestedMap(\n source_0=source_vecs_0, source_1=source_vecs_1)\n\n source_padding_0 = tf.zeros([src_length, src_batch_size],\n dtype=tf.float32)\n source_padding_1 = tf.zeros([src_length, src_batch_size],\n dtype=tf.float32)\n source_paddings = py_utils.NestedMap(\n source_0=source_padding_0, source_1=source_padding_1)\n query_vec = tf.constant(\n np.random.rand(target_batch_size, query_dim), dtype=tf.float32)\n\n p = attention_steps.AttentionStep.Params()\n\n # Setup MultiSourceAttention\n p.atten = attention.MultiSourceAttention.Params()\n p.atten.source_dim = src_dim\n p.atten.query_dim = query_dim\n\n gmm_atten_params = attention.GmmMonotonicAttention.Params()\n gmm_atten_params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n gmm_atten_params.source_dim = src_dim\n gmm_atten_params.query_dim = query_dim\n gmm_atten_params.hidden_dim = query_dim\n gmm_atten_params.vn.global_vn = False\n gmm_atten_params.vn.per_step_vn = False\n gmm_atten_params.packed_input = True\n\n p.atten.source_atten_tpls = [('source_0', gmm_atten_params),\n ('source_1', gmm_atten_params)]\n\n step = p.Instantiate()\n\n external_inputs = py_utils.NestedMap(src=sources, padding=source_paddings)\n packed = step.PrepareExternalInputs(step.theta, external_inputs)\n state0 = step.ZeroState(step.theta, packed, target_batch_size)\n\n step_inputs = py_utils.NestedMap(inputs=[query_vec])\n step_padding = tf.zeros([target_batch_size, 1], dtype=tf.float32)\n output, state1 = step.FProp(step.theta, packed, step_inputs, step_padding,\n state0)\n\n self.evaluate(tf.global_variables_initializer())\n output, state1 = self.evaluate([output, state1])\n\n self.assertAllClose(\n output, {\n 'context': [[0.8048796, 0.9554154, 1.2422264, 0.82598877],\n [1.1976988, 0.9226365, 1.1311831, 1.1287751],\n [1.2583418, 0.96984935, 0.8972859, 1.2939383],\n [0.8055052, 0.9545301, 1.2421954, 0.824931],\n [1.1980952, 0.9227077, 1.1313919, 1.13009],\n [1.2582378, 0.96980226, 0.8973369, 1.2938937]],\n 'probs':\n [[0.05302628, 0.20965888, 0.3661108, 0.26998273, 0.08293614],\n [0.05321905, 0.20958655, 0.36570197, 0.270003, 0.08308904],\n [0.05327733, 0.20919749, 0.36514452, 0.27033207, 0.08349889],\n [0.05328987, 0.20906723, 0.3648241, 0.27042356, 0.08376145],\n [0.05301215, 0.21013679, 0.36650375, 0.26960865, 0.08261178],\n [0.05328071, 0.20917267, 0.36505368, 0.27032903, 0.08357814]]\n })\n self.assertAllClose(\n state1, {\n 'atten_state': {\n 'source_0': [[[2.4243412, 1.2218076, 1.0122609, 0.18427502],\n [1.9546769, 0.9721461, 1.0469768, 0.19244196],\n [1.7934805, 0.8947478, 1.2158467, 0.18101364],\n [2.2727895, 1.1433213, 0.969053, 0.21098366],\n [2.1986299, 1.0997422, 1.3713341, 0.23128569]],\n [[2.4298353, 1.227302, 1.0116383, 0.18391277],\n [1.9476058, 0.96507514, 1.0462759, 0.19275317],\n [1.793545, 0.89481235, 1.220826, 0.1817861],\n [2.2800756, 1.1506072, 0.96794796, 0.21093304],\n [2.194984, 1.0960963, 1.3741415, 0.23061496]],\n [[2.4273272, 1.2247936, 1.0106387, 0.18302175],\n [1.9522938, 0.96976304, 1.0510013, 0.19241981],\n [1.7976122, 0.8988795, 1.2246737, 0.18208173],\n [2.2875524, 1.1580843, 0.97309643, 0.21170339],\n [2.1904838, 1.0915961, 1.3786552, 0.23077331]],\n [[2.4339817, 1.2314482, 1.0118915, 0.18239658],\n [1.9538436, 0.9713129, 1.050209, 0.19243228],\n [1.7997689, 0.90103614, 1.2248727, 0.18208562],\n [2.286818, 1.15735, 0.9776513, 0.2125737],\n [2.1872034, 1.0883157, 1.3807379, 0.23051178]],\n [[2.4258854, 1.223352, 1.0136935, 0.18414007],\n [1.9573982, 0.9748675, 1.0445031, 0.19239089],\n [1.7965381, 0.89780533, 1.2112961, 0.18159895],\n [2.2637806, 1.1343125, 0.9743988, 0.21178932],\n [2.1948628, 1.0959752, 1.366173, 0.23008086]],\n [[2.435421, 1.2328876, 1.0118036, 0.18307444],\n [1.9479709, 0.96544015, 1.0476727, 0.19277772],\n [1.795729, 0.8969963, 1.224472, 0.18180896],\n [2.2865427, 1.1570745, 0.9713619, 0.211611],\n [2.1911612, 1.0922736, 1.3791639, 0.2307278]]],\n 'source_1': [[[2.4243412, 1.2218076, 1.0122609, 0.18427502],\n [1.9546769, 0.9721461, 1.0469768, 0.19244196],\n [1.7934805, 0.8947478, 1.2158467, 0.18101364],\n [2.2727895, 1.1433213, 0.969053, 0.21098366],\n [2.1986299, 1.0997422, 1.3713341, 0.23128569]],\n [[2.4298353, 1.227302, 1.0116383, 0.18391277],\n [1.9476058, 0.96507514, 1.0462759, 0.19275317],\n [1.793545, 0.89481235, 1.220826, 0.1817861],\n [2.2800756, 1.1506072, 0.96794796, 0.21093304],\n [2.194984, 1.0960963, 1.3741415, 0.23061496]],\n [[2.4273272, 1.2247936, 1.0106387, 0.18302175],\n [1.9522938, 0.96976304, 1.0510013, 0.19241981],\n [1.7976122, 0.8988795, 1.2246737, 0.18208173],\n [2.2875524, 1.1580843, 0.97309643, 0.21170339],\n [2.1904838, 1.0915961, 1.3786552, 0.23077331]],\n [[2.4339817, 1.2314482, 1.0118915, 0.18239658],\n [1.9538436, 0.9713129, 1.050209, 0.19243228],\n [1.7997689, 0.90103614, 1.2248727, 0.18208562],\n [2.286818, 1.15735, 0.9776513, 0.2125737],\n [2.1872034, 1.0883157, 1.3807379, 0.23051178]],\n [[2.4258854, 1.223352, 1.0136935, 0.18414007],\n [1.9573982, 0.9748675, 1.0445031, 0.19239089],\n [1.7965381, 0.89780533, 1.2112961, 0.18159895],\n [2.2637806, 1.1343125, 0.9743988, 0.21178932],\n [2.1948628, 1.0959752, 1.366173, 0.23008086]],\n [[2.435421, 1.2328876, 1.0118036, 0.18307444],\n [1.9479709, 0.96544015, 1.0476727, 0.19277772],\n [1.795729, 0.8969963, 1.224472, 0.18180896],\n [2.2865427, 1.1570745, 0.9713619, 0.211611],\n [2.1911612, 1.0922736, 1.3791639, 0.2307278]]]\n },\n 'atten_context': [[0.8048796, 0.9554154, 1.2422264, 0.82598877],\n [1.1976988, 0.9226365, 1.1311831, 1.1287751],\n [1.2583418, 0.96984935, 0.8972859, 1.2939383],\n [0.8055052, 0.9545301, 1.2421954, 0.824931],\n [1.1980952, 0.9227077, 1.1313919, 1.13009],\n [1.2582378, 0.96980226, 0.8973369, 1.2938937]]\n })\n\n def testAttentionStepMultiSourceDifferent(self):\n with self.session(use_gpu=False):\n np.random.seed(12345)\n src_batch_size = 3\n target_batch_size = 6\n src_length = 5\n query_dim = 5\n src_dim = 4\n source_vecs_0 = tf.constant(\n np.random.rand(src_length, src_batch_size, src_dim), dtype=tf.float32)\n source_vecs_1 = tf.constant(\n np.random.rand(src_length, src_batch_size, src_dim), dtype=tf.float32)\n sources = py_utils.NestedMap(\n source_0=source_vecs_0, source_1=source_vecs_1)\n\n source_padding_0 = tf.zeros([src_length, src_batch_size],\n dtype=tf.float32)\n source_padding_1 = tf.zeros([src_length, src_batch_size],\n dtype=tf.float32)\n source_paddings = py_utils.NestedMap(\n source_0=source_padding_0, source_1=source_padding_1)\n query_vec = tf.constant(\n np.random.rand(target_batch_size, query_dim), dtype=tf.float32)\n\n p = attention_steps.AttentionStep.Params()\n\n # Setup MultiSourceAttention\n p.atten = attention.MultiSourceAttention.Params()\n p.atten.source_dim = src_dim\n p.atten.query_dim = query_dim\n\n add_atten_params = attention.AdditiveAttention.Params()\n add_atten_params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n add_atten_params.source_dim = src_dim\n add_atten_params.query_dim = query_dim\n add_atten_params.hidden_dim = query_dim\n add_atten_params.vn.global_vn = False\n add_atten_params.vn.per_step_vn = False\n add_atten_params.packed_input = True\n\n gmm_atten_params = attention.GmmMonotonicAttention.Params()\n gmm_atten_params.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n gmm_atten_params.source_dim = src_dim\n gmm_atten_params.query_dim = query_dim\n gmm_atten_params.hidden_dim = query_dim\n gmm_atten_params.vn.global_vn = False\n gmm_atten_params.vn.per_step_vn = False\n gmm_atten_params.packed_input = True\n\n p.atten.source_atten_tpls = [('source_0', add_atten_params),\n ('source_1', gmm_atten_params)]\n\n step = p.Instantiate()\n\n external_inputs = py_utils.NestedMap(src=sources, padding=source_paddings)\n packed = step.PrepareExternalInputs(step.theta, external_inputs)\n state0 = step.ZeroState(step.theta, packed, target_batch_size)\n\n step_inputs = py_utils.NestedMap(inputs=[query_vec])\n step_padding = tf.zeros([target_batch_size, 1], dtype=tf.float32)\n output, state1 = step.FProp(step.theta, packed, step_inputs, step_padding,\n state0)\n\n self.evaluate(tf.global_variables_initializer())\n output, state1 = self.evaluate([output, state1])\n\n self.assertAllClose(\n output, {\n 'context': [[0.9140804, 0.8979037, 1.1033492, 0.70460725],\n [1.1748682, 1.0488822, 1.2771418, 1.0938747],\n [1.2568944, 1.0808113, 0.9878455, 1.4196949],\n [0.9142588, 0.8978502, 1.1039352, 0.7042637],\n [1.174994, 1.0493405, 1.2779118, 1.0942582],\n [1.2567302, 1.0806134, 0.98783255, 1.4195559]],\n 'probs':\n [[0.20118009, 0.19332525, 0.20120151, 0.2022583, 0.20203482],\n [0.20019522, 0.20133461, 0.19572362, 0.2025276, 0.2002189],\n [0.20116101, 0.20004824, 0.20221081, 0.19645905, 0.20012087],\n [0.20123273, 0.19319996, 0.20131132, 0.20220752, 0.2020485],\n [0.2002011, 0.2015253, 0.19534773, 0.20260131, 0.20032457],\n [0.20097165, 0.19993119, 0.20225787, 0.19671878, 0.20012051]]\n })\n self.assertAllClose(\n state1, {\n 'atten_state': {\n 'source_0': [[0.], [0.], [0.], [0.], [0.], [0.]],\n 'source_1': [[[2.4243412, 1.2218076, 1.0122609, 0.18427502],\n [1.9546769, 0.9721461, 1.0469768, 0.19244196],\n [1.7934805, 0.8947478, 1.2158467, 0.18101364],\n [2.2727895, 1.1433213, 0.969053, 0.21098366],\n [2.1986299, 1.0997422, 1.3713341, 0.23128569]],\n [[2.4298353, 1.227302, 1.0116383, 0.18391277],\n [1.9476058, 0.96507514, 1.0462759, 0.19275317],\n [1.793545, 0.89481235, 1.220826, 0.1817861],\n [2.2800756, 1.1506072, 0.96794796, 0.21093304],\n [2.194984, 1.0960963, 1.3741415, 0.23061496]],\n [[2.4273272, 1.2247936, 1.0106387, 0.18302175],\n [1.9522938, 0.96976304, 1.0510013, 0.19241981],\n [1.7976122, 0.8988795, 1.2246737, 0.18208173],\n [2.2875524, 1.1580843, 0.97309643, 0.21170339],\n [2.1904838, 1.0915961, 1.3786552, 0.23077331]],\n [[2.4339817, 1.2314482, 1.0118915, 0.18239658],\n [1.9538436, 0.9713129, 1.050209, 0.19243228],\n [1.7997689, 0.90103614, 1.2248727, 0.18208562],\n [2.286818, 1.15735, 0.9776513, 0.2125737],\n [2.1872034, 1.0883157, 1.3807379, 0.23051178]],\n [[2.4258854, 1.223352, 1.0136935, 0.18414007],\n [1.9573982, 0.9748675, 1.0445031, 0.19239089],\n [1.7965381, 0.89780533, 1.2112961, 0.18159895],\n [2.2637806, 1.1343125, 0.9743988, 0.21178932],\n [2.1948628, 1.0959752, 1.366173, 0.23008086]],\n [[2.435421, 1.2328876, 1.0118036, 0.18307444],\n [1.9479709, 0.96544015, 1.0476727, 0.19277772],\n [1.795729, 0.8969963, 1.224472, 0.18180896],\n [2.2865427, 1.1570745, 0.9713619, 0.211611],\n [2.1911612, 1.0922736, 1.3791639, 0.2307278]]]\n },\n 'atten_context': [[0.9140804, 0.8979037, 1.1033492, 0.70460725],\n [1.1748682, 1.0488822, 1.2771418, 1.0938747],\n [1.2568944, 1.0808113, 0.9878455, 1.4196949],\n [0.9142588, 0.8978502, 1.1039352, 0.7042637],\n [1.174994, 1.0493405, 1.2779118, 1.0942582],\n [1.2567302, 1.0806134, 0.98783255, 1.419555]]\n })\n\n def testAttentionBlockStep(self):\n with self.session(use_gpu=False):\n np.random.seed(12345)\n src_batch_size = 3\n target_batch_size = 6\n src_length = 5\n query_dim = 5\n context_dim = 8\n hidden_dim = 7\n src_dim = context_dim\n source_vecs = tf.constant(\n np.random.rand(src_length, src_batch_size, src_dim), dtype=tf.float32)\n source_padding = tf.zeros([src_length, target_batch_size],\n dtype=tf.float32)\n\n p = attention_steps.AttentionBlockStep.Params()\n p.attention.atten.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)\n p.attention.atten.source_dim = src_dim\n p.attention.atten.query_dim = query_dim\n p.attention.atten.hidden_dim = hidden_dim\n p.attention.atten.vn.global_vn = False\n p.attention.atten.vn.per_step_vn = False\n p.attention.atten.packed_input = True\n p.query_generator.step_input_dim = context_dim\n p.query_generator.rnn_cell_dim = query_dim\n step = p.Instantiate()\n\n external_inputs = py_utils.NestedMap(\n attention=py_utils.NestedMap(src=source_vecs, padding=source_padding))\n packed = step.PrepareExternalInputs(step.theta, external_inputs)\n state0 = step.ZeroState(step.theta, packed, target_batch_size)\n\n step_padding = tf.zeros([target_batch_size, 1], dtype=tf.float32)\n output, state1 = step.FProp(step.theta, packed, None, step_padding,\n state0)\n\n self.evaluate(tf.global_variables_initializer())\n output, state1 = self.evaluate([output, state1])\n\n self.assertAllClose(\n output, {\n 'atten_query':\n np.array([\n [\n 0.1142175, 0.00020437, 0.02718649, -0.06030316,\n 0.02916641\n ],\n [\n 0.09362462, 0.07093287, 0.10184045, -0.0228882,\n 0.06189567\n ],\n [\n 0.12866478, 0.0121689, 0.05557573, -0.04107622,\n 0.0543875\n ],\n [\n 0.1142175, 0.00020437, 0.02718649, -0.06030316,\n 0.02916641\n ],\n [\n 0.09362462, 0.07093287, 0.10184045, -0.0228882,\n 0.06189567\n ],\n [\n 0.12866478, 0.0121689, 0.05557573, -0.04107622,\n 0.0543875\n ],\n ]),\n 'atten_context':\n np.array([\n [\n 0.55453926, 0.55162865, 0.62239933, 0.26001987,\n 0.51269007, 0.555924, 0.54857075, 0.51340824\n ],\n [\n 0.6495046, 0.42096642, 0.605386, 0.79519784,\n 0.39852753, 0.30938083, 0.53797, 0.43651274\n ],\n [\n 0.66645885, 0.56522155, 0.67393464, 0.6224826,\n 0.66094846, 0.6098963, 0.52270895, 0.5319694\n ],\n [\n 0.55453926, 0.55162865, 0.62239933, 0.26001987,\n 0.51269007, 0.555924, 0.54857075, 0.51340824\n ],\n [\n 0.6495046, 0.42096642, 0.605386, 0.79519784,\n 0.39852753, 0.30938083, 0.53797, 0.43651274\n ],\n [\n 0.66645885, 0.56522155, 0.67393464, 0.6224826,\n 0.66094846, 0.6098963, 0.52270895, 0.5319694\n ],\n ]),\n 'atten_probs':\n np.array([\n [\n 0.20132412, 0.19545832, 0.20277032, 0.19362292,\n 0.20682438\n ],\n [\n 0.20172212, 0.20001633, 0.20166671, 0.20218876,\n 0.19440602\n ],\n [\n 0.20540778, 0.20792785, 0.19377577, 0.19288684,\n 0.20000176\n ],\n [\n 0.20132412, 0.19545832, 0.20277032, 0.19362292,\n 0.20682438\n ],\n [\n 0.20172212, 0.20001633, 0.20166671, 0.20218876,\n 0.19440602\n ],\n [\n 0.20540778, 0.20792785, 0.19377577, 0.19288684,\n 0.20000176\n ],\n ])\n })\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Helpers for unittests.\"\"\"\n\nimport inspect\nimport re\n\nimport lingvo.compat as tf\nfrom lingvo.core import cluster_factory\nfrom lingvo.core import py_utils\nimport numpy as np\n\nFLAGS = tf.flags.FLAGS\n\n# Enable tf.function when eager execution is on-by-default, which is the case\n# when:\n# - the test target doesn't depend on the disable_tf2 target, and\n# - --define=tf_api_version=1 is not specified during the build.\n#\n# TODO(laigd): remove TF version check when 312743821 and 313682500 are in the\n# release.\nif tf.executing_eagerly() and tf.compat.v1.__version__ >= '2.3.0':\n try:\n FLAGS.if_use_tf_function = True\n FLAGS.while_loop_use_tf_function = True\n FLAGS.call_defun_use_tf_function = True\n except tf.flags.UnrecognizedFlagError:\n pass\n\n# Disable eager execution for all tests.\ntf.disable_eager_execution()\n\ntf.flags.DEFINE_boolean(\n 'update_goldens', False,\n 'Update the goldens, rather than diffing against them.')\n\n\nclass TestCase(tf.test.TestCase):\n \"\"\"TestCase that performs Lingvo-specific setup.\"\"\"\n\n def setUp(self):\n super().setUp()\n # Ensure the global_step variable is created in the default graph.\n py_utils.GetOrCreateGlobalStepVar()\n\n def _create_session(self, *args, **kwargs):\n sess = super()._create_session(*args, **kwargs)\n with sess.graph.as_default():\n # Ensure the global_step variable is created in every new session.\n py_utils.GetOrCreateGlobalStepVar()\n return sess\n\n def SetEval(self, mode):\n return cluster_factory.SetEval(mode=mode)\n\n\ndef _ReplaceOneLineInFile(fpath, linenum, old, new):\n \"\"\"Replaces a line for the input file.\"\"\"\n lines = []\n lines = open(fpath).readlines()\n assert lines[linenum] == old, (\n 'Expected \"%s\" at line %d in file %s, but got \"%s\"' %\n (lines[linenum], linenum + 1, fpath, old))\n tf.logging.info('Replacing {}:{}.'.format(fpath, linenum))\n lines[linenum] = new\n with open(fpath, 'w') as f:\n for l in lines:\n f.write(l)\n\n\ndef ReplaceGoldenSingleFloat(old, float_value):\n m = re.match(\n r'(?P<prefix>.*)\\bCompareToGoldenSingleFloat\\('\n r'(?P<testobj>[^,]+), *'\n r'[-+]?(?:\\d+(?:\\.\\d*)?|\\.\\d+)(?:[eE][-+]?\\d+)?, *'\n r'(?P<v2>.*)\\)(?P<postfix>.*)\\n', old)\n assert m\n return ('%sCompareToGoldenSingleFloat(%s, %f, %s)%s\\n' %\n (m.group('prefix'), m.group('testobj'), float_value, m.group('v2'),\n m.group('postfix')))\n\n\ndef ReplaceGoldenStackAnalysis(new_float_value):\n \"\"\"Analyze the stack trace to figure out how to update the golden value.\"\"\"\n src_file_frame = None\n for frame in inspect.stack():\n if frame[-2] and 'CompareToGoldenSingleFloat' in frame[-2][0]:\n src_file_frame = frame\n break\n assert src_file_frame\n runfiles_pattern = 'runfiles/[^/]+/'\n fpath = src_file_frame[1].split(runfiles_pattern)[-1]\n line_num = src_file_frame[2] - 1\n old_line = src_file_frame[4][0]\n new_line = ReplaceGoldenSingleFloat(old_line, new_float_value)\n return fpath, line_num, old_line, new_line\n\n\ndef CompareToGoldenSingleFloat(testobj, v1, v2, *args, **kwargs):\n \"\"\"Compare golden value with real value.\n\n When running the bazel tests with FLAGS.update_goldens to be True, this\n function automatically updates the golden value in the test file if there is a\n mismatch and the calling site of CompareToGoldenSingleFloat is a 1-liner. E.g.\n Code::\n\n test_utils.CompareToGoldenSingleFloat(self, 0.3232, input_batch.label)\n\n works but this will not::\n\n test_utils.CompareToGoldenSingleFloat(self,\n 0.3232,\n input_batch.label)\n\n Args:\n testobj: A test object, such as tf.test.TestCase or test_utils.TestCase.\n v1: the golden value to compare against.\n v2: the returned value.\n *args: extra args\n **kwargs: extra args\n \"\"\"\n if not FLAGS.update_goldens:\n testobj.assertAllClose(v1, v2, *args, **kwargs)\n else:\n _ReplaceOneLineInFile(*ReplaceGoldenStackAnalysis(v2))\n\n\ndef PickEveryN(np_arr, step=1):\n \"\"\"Flattens `np_arr` and keeps one value every step values.\"\"\"\n return np_arr.flatten()[::step]\n\n\ndef ComputeNumericGradient(sess,\n y,\n x,\n delta=1e-4,\n step=1,\n extra_feed_dict=None):\n \"\"\"Compute the numeric gradient of y wrt to x.\n\n Args:\n sess: The TF session constructed with a graph containing x and y.\n y: A scalar TF Tensor in the graph constructed in sess.\n x: A TF Tensor in the graph constructed in sess.\n delta: Gradient checker's small perturbation of x[i].\n step: Only compute numerical gradients for a subset of x values. I.e.\n dy/dx[i] is computed if i % step == 0.\n extra_feed_dict: Additional feed_dict of tensors to keep fixed during the\n gradient checking.\n\n Returns:\n A Tensor of the same shape and dtype as x. If x[i] is not chosen\n to compute the numerical gradient dy/x[i], the corresponding\n value is set to 0.\n \"\"\"\n\n x_data = sess.run(x)\n x_size = x_data.size\n x_shape = x_data.shape\n\n numeric_grad = np.zeros(x_size, dtype=x_data.dtype)\n\n # For variables we need to issue an assignment operation in order to update\n # the value of the variable. This is because with resource variables x will be\n # pointing to the handle rather than its value.\n feed_dict = extra_feed_dict or {}\n ph = tf.placeholder(x_data.dtype, x_shape)\n x_assign = x.assign(ph) if isinstance(x, tf.Variable) else None\n\n for i in range(0, x_size, step):\n x_pos = x_data.copy()\n if x_size == 1:\n x_pos += delta\n else:\n x_pos.flat[i] += delta\n if x_assign is None:\n feed_dict.update(dict([(x, x_pos)]))\n else:\n sess.run(x_assign, feed_dict={ph: x_pos})\n y_pos = sess.run(y, feed_dict=feed_dict)\n\n x_neg = x_data.copy()\n if x_size == 1:\n x_neg -= delta\n else:\n x_neg.flat[i] -= delta\n if x_assign is None:\n feed_dict.update(dict([(x, x_neg)]))\n else:\n sess.run(x_assign, feed_dict={ph: x_neg})\n y_neg = sess.run(y, feed_dict=feed_dict)\n numeric_grad[i] = (y_pos - y_neg) / (2 * delta)\n\n # Restore the variable back to its original value to avoid breaking any\n # further test code that operates on the graph.\n if x_assign is not None:\n sess.run(x_assign, feed_dict={ph: x_data})\n\n return numeric_grad.reshape(x_shape)\n"
] | [
[
"numpy.random.normal",
"numpy.array",
"numpy.full"
],
[
"tensorflow.python.ops.gen_audio_ops.mfcc",
"tensorflow.python.ops.gen_audio_ops.audio_spectrogram"
],
[
"numpy.empty"
],
[
"numpy.array",
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
541867329/pydata-notebook | [
"867f204d7abac96dbae80e6cdd2e3661e554d1dd"
] | [
"mydemo/matplotlibDemo/clickEvent.py"
] | [
"from matplotlib.pyplot import figure, show\nimport numpy as npy\nfrom numpy.random import rand\n\nif 1: # picking on a scatter plot (matplotlib.collections.RegularPolyCollection)\n\n x, y, c, s = rand(4, 100)\n\n\n def onpick3(event):\n ind = event.ind\n print('onpick3 scatter:', ind, npy.take(x, ind), npy.take(y, ind))\n\n\n fig = figure()\n ax1 = fig.add_subplot(111)\n col = ax1.scatter(x, y, 100 * s, c, picker=True)\n # fig.savefig('pscoll.eps')\n fig.canvas.mpl_connect('pick_event', onpick3)\n\nshow()\n"
] | [
[
"numpy.take",
"matplotlib.pyplot.show",
"numpy.random.rand",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
suresh-guttikonda/iGibson | [
"a69e623058180146466cd52d4bb3c00d1facdacf",
"a69e623058180146466cd52d4bb3c00d1facdacf",
"a69e623058180146466cd52d4bb3c00d1facdacf",
"a69e623058180146466cd52d4bb3c00d1facdacf",
"a69e623058180146466cd52d4bb3c00d1facdacf"
] | [
"igibson/robots/jr2_robot.py",
"igibson/utils/data_utils/ext_object/scripts_wip/get_obj_stable_rotations.py",
"igibson/test/test_render_tensor.py",
"igibson/robots/fetch_robot.py",
"igibson/metrics/agent.py"
] | [
"import gym\nimport numpy as np\n\nfrom igibson.robots.robot_locomotor import LocomotorRobot\n\n\nclass JR2(LocomotorRobot):\n \"\"\"\n JR2 robot (no arm)\n Reference: https://cvgl.stanford.edu/projects/jackrabbot/\n Uses joint velocity control\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.velocity = config.get(\"velocity\", 1.0)\n LocomotorRobot.__init__(\n self,\n \"jr2_urdf/jr2.urdf\",\n action_dim=4,\n scale=config.get(\"robot_scale\", 1.0),\n is_discrete=config.get(\"is_discrete\", True),\n control=\"velocity\",\n )\n\n def set_up_continuous_action_space(self):\n \"\"\"\n Set up continuous action space\n \"\"\"\n self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)\n self.action_high = self.velocity * np.ones([self.action_dim])\n self.action_low = -self.action_high\n\n def set_up_discrete_action_space(self):\n \"\"\"\n Set up discrete action space\n \"\"\"\n self.action_list = [\n [self.velocity, self.velocity, 0, self.velocity],\n [-self.velocity, -self.velocity, 0, -self.velocity],\n [self.velocity, -self.velocity, -self.velocity, 0],\n [-self.velocity, self.velocity, self.velocity, 0],\n [0, 0, 0, 0],\n ]\n self.action_space = gym.spaces.Discrete(len(self.action_list))\n self.setup_keys_to_action()\n\n def setup_keys_to_action(self):\n self.keys_to_action = {\n (ord(\"w\"),): 0, # forward\n (ord(\"s\"),): 1, # backward\n (ord(\"d\"),): 2, # turn right\n (ord(\"a\"),): 3, # turn left\n (): 4,\n }\n",
"\"\"\"\nCredit: Andrey Kurenkov\n\"\"\"\n\nimport argparse\nimport json\nimport math\nimport os\nimport signal\nimport xml.etree.ElementTree as ET\n\nimport numpy as np\nimport pybullet as p\nimport trimesh\nfrom pyquaternion import Quaternion\n\n\"\"\"\nAnalyzes a model for possible ways to place it flat on a surface.\nUse by running without --ask_probs or --save_json to see rotations, and then with to save them with probabilities.\n\"\"\"\n\n\ndef viz_transform(body_id, mesh, transform):\n quat = Quaternion(matrix=transform)\n r = quat.real\n v = quat.vector\n p.resetBasePositionAndOrientation(\n body_id, [0, 0, mesh.extents[2] / 2.0], [quat.vector[0], quat.vector[1], quat.vector[2], quat.real]\n )\n return r, v\n\n\ndef compute_poses(mesh, threshold):\n poses = trimesh.poses.compute_stable_poses(mesh, n_samples=5, threshold=threshold)\n return poses\n\n\nclass TimeoutError(Exception):\n pass\n\n\ndef handler(signum, frame):\n raise TimeoutError()\n\n\ndef main(args):\n p.connect(p.GUI)\n cat_ids = []\n if args.object_cat is not None:\n cat_dir = \"data/ig_dataset/objects/%s\" % (args.object_cat)\n for obj_id in os.listdir(cat_dir):\n cat_ids.append((args.object_cat, obj_id))\n elif args.cat_file is not None:\n with open(args.cat_file, \"r\") as f:\n for line in f:\n cat = line.strip()\n cat_dir = \"data/ig_dataset/objects/%s\" % (cat)\n for obj_id in os.listdir(cat_dir):\n cat_ids.append((cat, obj_id))\n else:\n with open(args.cat_id_file, \"r\") as f:\n for line in f:\n cat_ids.append(line.strip().split(\"/\"))\n for object_cat, object_id in cat_ids:\n print(\"Processing %s %s\" % (object_cat, object_id))\n metadata_file = \"data/ig_dataset/objects/%s/%s/misc/metadata.json\" % (object_cat, object_id)\n with open(metadata_file, \"r\") as f:\n metadata = json.load(f)\n\n if args.skip_processed and \"orientations\" in metadata and len(metadata[\"orientations\"]) > 0:\n continue\n\n urdf_file = \"data/ig_dataset/objects/%s/%s/%s.urdf\" % (object_cat, object_id, object_id)\n tree = ET.parse(urdf_file)\n joints = tree.findall(\"joint\")\n links = tree.findall(\"link\")\n # Find the base link\n children_links = [joint.find(\"child\").attrib[\"link\"] for joint in joints]\n base_link = [link for link in links if link.attrib[\"name\"] not in children_links][0]\n object_file = base_link.find(\"collision/geometry/mesh\").attrib[\"filename\"]\n # Use the collision mesh of the base link\n object_file = \"data/ig_dataset/objects/%s/%s/%s\" % (object_cat, object_id, object_file)\n\n mesh = trimesh.load(object_file, force=\"mesh\")\n\n poses = [np.eye(4)]\n # set the timeout handler\n signal.signal(signal.SIGALRM, handler)\n signal.alarm(10)\n try:\n new_poses = compute_poses(mesh, args.threshold)\n for pose in new_poses[0]:\n poses.append(pose)\n except TimeoutError as exc:\n pass\n finally:\n signal.alarm(0)\n visualShapeId = p.createVisualShape(shapeType=p.GEOM_MESH, fileName=object_file)\n collisionShapeId = p.createCollisionShape(shapeType=p.GEOM_MESH, fileName=object_file)\n body_id = p.createMultiBody(baseCollisionShapeIndex=collisionShapeId, baseVisualShapeIndex=visualShapeId)\n\n info_dict = {}\n aabb = p.getAABB(body_id)\n print(\"Showing all stable placement rotations:\")\n dicts = []\n\n for i in range(len(poses)):\n print(\"Num poses:\", len(poses))\n rotation_dict = {}\n transform = poses[i]\n r, v = viz_transform(body_id, mesh, transform)\n prob = 0\n if args.save_json:\n inp = input(\"Enter probability of rotation, or +x/-x or/ +y/-y or +z/-z to rotate:\")\n if inp == \"\":\n continue\n while inp[0] == \"+\" or inp[0] == \"-\":\n rot_num = float(inp.split()[1])\n if inp[0] == \"-\":\n rot_num *= -1\n if inp[1] == \"z\":\n rot = np.array(\n [\n [math.cos(math.pi * rot_num), -math.sin(math.pi * rot_num), 0.0, 0.0],\n [math.sin(math.pi * rot_num), math.cos(math.pi * rot_num), 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n )\n elif inp[1] == \"y\":\n rot = np.array(\n [\n [math.cos(math.pi * rot_num), 0.0, math.sin(math.pi * rot_num), 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [-math.sin(math.pi * rot_num), 0.0, math.cos(math.pi * rot_num), 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n )\n elif inp[1] == \"x\":\n rot = np.array(\n [\n [1.0, 0.0, 0.0, 0.0],\n [0.0, math.cos(math.pi * rot_num), -math.sin(math.pi * rot_num), 0.0],\n [0.0, math.sin(math.pi * rot_num), math.cos(math.pi * rot_num), 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n )\n transform = np.matmul(rot, transform)\n r, v = viz_transform(body_id, mesh, transform)\n inp = input(\"Enter probability of rotation, or +/- to rotate about Z:\")\n prob = float(inp)\n variation = float(input(\"Enter variation about Z (0-1):\"))\n rotation_dict[\"prob\"] = prob\n rotation_dict[\"variation\"] = variation\n else:\n skip = input(\"Rotation %d: (press enter to continue)\" % (i + 1))\n rotation_dict[\"rotation\"] = [float(v[0]), float(v[1]), float(v[2]), float(r)]\n aabb = p.getAABB(body_id)\n size = [aabb[1][0] - aabb[0][0], aabb[1][1] - aabb[0][1], aabb[1][2] - aabb[0][2]]\n print(\"Bounding box size=%s\" % str(size))\n rotation_dict[\"size\"] = size\n if prob > 0:\n dicts.append(rotation_dict)\n\n print(\"Summary:\")\n for d in dicts:\n print(d)\n\n if args.save_json:\n metadata[\"orientations\"] = dicts\n\n with open(metadata_file, \"w\") as f:\n documents = json.dump(metadata, f)\n\n p.removeBody(body_id)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Analyze objects that can be placed in a container for their plausible rotations.\"\n )\n parser.add_argument(\"--object_cat\", type=str, default=None, help=\"A category to set rotation for\")\n parser.add_argument(\n \"--cat_id_file\",\n type=str,\n default=None,\n help=\"A text file containing category and id of each object to set rotation for, one per line\",\n )\n parser.add_argument(\n \"--cat_file\",\n type=str,\n default=None,\n help=\"A text file containing category and id of each object to set rotation for, one per line\",\n )\n parser.add_argument(\n \"--save_json\", action=\"store_true\", help=\"Whether to ask for and save orientations and probabilities to json.\"\n )\n parser.add_argument(\"--threshold\", type=float, default=0.03, help=\"Threshold for including orientations or not.\")\n parser.add_argument(\"--skip_processed\", action=\"store_true\")\n args = parser.parse_args()\n if args.object_cat is None and args.cat_id_file is None and args.cat_file is None:\n raise ValueError(\"Either object_cat or cat_id_file or cat_file must be set\")\n main(args)\n",
"import os\n\nimport numpy as np\n\nimport igibson\nfrom igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings\nfrom igibson.render.mesh_renderer.mesh_renderer_tensor import MeshRendererG2G\n\n\ndef test_tensor_render_rendering():\n w = 800\n h = 600\n setting = MeshRendererSettings(enable_pbr=False, msaa=True)\n renderer = MeshRendererG2G(w, h, rendering_settings=setting)\n test_dir = os.path.join(igibson.assets_path, \"test\")\n renderer.load_object(os.path.join(test_dir, \"mesh/bed1a77d92d64f5cbbaaae4feed64ec1_new.obj\"))\n renderer.add_instance(0)\n\n renderer.set_camera([0, 0, 1.2], [0, 1, 1.2], [0, 1, 0])\n renderer.set_fov(90)\n tensor, tensor2 = renderer.render(modes=(\"rgb\", \"normal\"))\n\n img_np = tensor.flip(0).data.cpu().numpy().reshape(h, w, 4)\n img_np2 = tensor2.flip(0).data.cpu().numpy().reshape(h, w, 4)\n\n # plt.subplot(1,2,1)\n # plt.imshow(img_np)\n # plt.subplot(1,2,2)\n # plt.imshow(img_np2)\n # plt.show()\n assert np.allclose(\n np.mean(img_np.astype(np.float32), axis=(0, 1)),\n np.array([131.71548, 128.34981, 121.81708, 255.86292]),\n rtol=1e-3,\n )\n\n # print(np.mean(img_np.astype(np.float32), axis = (0,1)))\n # print(np.mean(img_np2.astype(np.float32), axis = (0,1)))\n renderer.release()\n",
"import gym\nimport numpy as np\nimport pybullet as p\n\nfrom igibson.external.pybullet_tools.utils import joints_from_names, set_joint_positions\nfrom igibson.robots.robot_locomotor import LocomotorRobot\n\n\nclass Fetch(LocomotorRobot):\n \"\"\"\n Fetch Robot\n Reference: https://fetchrobotics.com/robotics-platforms/fetch-mobile-manipulator/\n Uses joint velocity control\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.wheel_velocity = config.get(\"wheel_velocity\", 1.0)\n self.torso_lift_velocity = config.get(\"torso_lift_velocity\", 1.0)\n self.arm_velocity = config.get(\"arm_velocity\", 1.0)\n self.wheel_dim = 2\n self.torso_lift_dim = 1\n self.arm_dim = 7\n LocomotorRobot.__init__(\n self,\n \"fetch/fetch.urdf\",\n action_dim=self.wheel_dim + self.torso_lift_dim + self.arm_dim,\n scale=config.get(\"robot_scale\", 1.0),\n is_discrete=config.get(\"is_discrete\", False),\n control=\"velocity\",\n self_collision=True,\n )\n\n def set_up_continuous_action_space(self):\n \"\"\"\n Set up continuous action space\n \"\"\"\n self.action_high = np.array(\n [self.wheel_velocity] * self.wheel_dim\n + [self.torso_lift_velocity] * self.torso_lift_dim\n + [self.arm_velocity] * self.arm_dim\n )\n self.action_low = -self.action_high\n self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)\n\n def set_up_discrete_action_space(self):\n \"\"\"\n Set up discrete action space\n \"\"\"\n assert False, \"Fetch does not support discrete actions\"\n\n def robot_specific_reset(self):\n \"\"\"\n Fetch robot specific reset.\n Reset the torso lift joint and tuck the arm towards the body\n \"\"\"\n super(Fetch, self).robot_specific_reset()\n\n # roll the arm to its body\n robot_id = self.robot_ids[0]\n arm_joints = joints_from_names(\n robot_id,\n [\n \"torso_lift_joint\",\n \"shoulder_pan_joint\",\n \"shoulder_lift_joint\",\n \"upperarm_roll_joint\",\n \"elbow_flex_joint\",\n \"forearm_roll_joint\",\n \"wrist_flex_joint\",\n \"wrist_roll_joint\",\n ],\n )\n\n rest_position = (0.02, np.pi / 2.0 - 0.4, np.pi / 2.0 - 0.1, -0.4, np.pi / 2.0 + 0.1, 0.0, np.pi / 2.0, 0.0)\n # might be a better pose to initiate manipulation\n # rest_position = (0.30322468280792236, -1.414019864768982,\n # 1.5178184935241699, 0.8189625336474915,\n # 2.200358942909668, 2.9631312579803466,\n # -1.2862852996643066, 0.0008453550418615341)\n\n set_joint_positions(robot_id, arm_joints, rest_position)\n\n def get_end_effector_position(self):\n \"\"\"\n Get end-effector position\n \"\"\"\n return self.parts[\"gripper_link\"].get_position()\n\n def end_effector_part_index(self):\n \"\"\"\n Get end-effector link id\n \"\"\"\n return self.parts[\"gripper_link\"].body_part_index\n\n def load(self):\n \"\"\"\n Load the robot into pybullet. Filter out unnecessary self collision\n due to modeling imperfection in the URDF\n \"\"\"\n ids = super(Fetch, self).load()\n robot_id = self.robot_ids[0]\n\n disable_collision_names = [\n [\"torso_lift_joint\", \"shoulder_lift_joint\"],\n [\"torso_lift_joint\", \"torso_fixed_joint\"],\n [\"caster_wheel_joint\", \"estop_joint\"],\n [\"caster_wheel_joint\", \"laser_joint\"],\n [\"caster_wheel_joint\", \"torso_fixed_joint\"],\n [\"caster_wheel_joint\", \"l_wheel_joint\"],\n [\"caster_wheel_joint\", \"r_wheel_joint\"],\n ]\n for names in disable_collision_names:\n link_a, link_b = joints_from_names(robot_id, names)\n p.setCollisionFilterPair(robot_id, robot_id, link_a, link_b, 0)\n\n return ids\n",
"import copy\n\nimport numpy as np\nimport pybullet as p\n\nfrom igibson.metrics.metric_base import MetricBase\n\n\nclass AgentMetric(MetricBase):\n def __init__(self):\n self.initialized = False\n\n self.state_cache = {}\n self.next_state_cache = {}\n\n self.agent_pos = {part: [] for part in [\"left_hand\", \"right_hand\", \"body\"]}\n self.agent_grasping = {part: [] for part in [\"left_hand\", \"right_hand\"]}\n\n self.agent_local_pos = {part: [] for part in [\"left_hand\", \"right_hand\"]}\n\n self.agent_reset = {part: [] for part in [\"left_hand\", \"right_hand\", \"body\"]}\n\n self.delta_agent_work = {part: [] for part in [\"left_hand\", \"right_hand\", \"body\"]}\n self.delta_agent_distance = {part: [] for part in [\"left_hand\", \"right_hand\", \"body\"]}\n self.delta_agent_grasp_distance = {part: [] for part in [\"left_hand\", \"right_hand\"]}\n\n self.clip = 0.2\n\n def step_callback(self, igbhvr_act_inst, _):\n robot = igbhvr_act_inst.simulator.robots[0]\n agent_work = {part: 0 for part in [\"left_hand\", \"right_hand\", \"body\"]}\n agent_distance = {part: 0 for part in [\"left_hand\", \"right_hand\", \"body\"]}\n\n for part in [\"left_hand\", \"right_hand\", \"body\"]:\n self.next_state_cache[part] = {\n \"position\": np.array(p.getBasePositionAndOrientation(robot.parts[part].body_id)[0]),\n }\n\n if not self.initialized:\n self.state_cache = copy.deepcopy(self.next_state_cache)\n self.initialized = True\n\n if robot.action[19] > 0 and robot.action[27] > 0:\n self.agent_reset[\"left_hand\"].append(True)\n self.agent_reset[\"right_hand\"].append(True)\n self.agent_reset[\"body\"].append(True)\n if robot.action[19] > 0:\n self.agent_reset[\"left_hand\"].append(True)\n self.agent_reset[\"right_hand\"].append(False)\n self.agent_reset[\"body\"].append(True)\n elif robot.action[27] > 0:\n self.agent_reset[\"left_hand\"].append(False)\n self.agent_reset[\"right_hand\"].append(True)\n self.agent_reset[\"body\"].append(True)\n else:\n self.agent_reset[\"left_hand\"].append(False)\n self.agent_reset[\"right_hand\"].append(False)\n self.agent_reset[\"body\"].append(False)\n\n for part in self.state_cache:\n delta_pos = np.linalg.norm(self.next_state_cache[part][\"position\"] - self.state_cache[part][\"position\"])\n self.agent_pos[part].append(list(self.state_cache[part][\"position\"]))\n # Exclude agent teleports\n delta_pos = np.clip(delta_pos, -self.clip, self.clip)\n if robot.parts[part].movement_cid is None:\n force = 0\n work = 0\n else:\n force = p.getConstraintState(robot.parts[part].movement_cid)\n work = np.abs((delta_pos * np.linalg.norm(force)))\n\n distance = np.abs(delta_pos)\n if part in [\"left_hand\", \"right_hand\"]:\n self.agent_local_pos[part].append(list(robot.parts[part].local_pos))\n if part in [\"left_hand\", \"right_hand\"] and (\n len(p.getContactPoints(robot.parts[part].body_id)) > 0 or robot.parts[part].object_in_hand is not None\n ):\n self.delta_agent_grasp_distance[part].append(distance)\n self.agent_grasping[part].append(True)\n elif part in [\"left_hand\", \"right_hand\"]:\n self.delta_agent_grasp_distance[part].append(0)\n self.agent_grasping[part].append(False)\n\n agent_work[part] = work\n agent_distance[part] = distance\n\n self.delta_agent_work[part].append(work)\n self.delta_agent_distance[part].append(distance)\n\n self.state_cache = copy.deepcopy(self.next_state_cache)\n\n def gather_results(self):\n return {\n \"agent_distance\": {\n \"timestep\": self.delta_agent_distance,\n },\n \"grasp_distance\": {\n \"timestep\": self.delta_agent_grasp_distance,\n },\n \"work\": {\n \"timestep\": self.delta_agent_work,\n },\n \"pos\": {\n \"timestep\": self.agent_pos,\n },\n \"local_pos\": {\n \"timestep\": self.agent_local_pos,\n },\n \"grasping\": {\n \"timestep\": self.agent_grasping,\n },\n \"reset\": {\n \"timestep\": self.agent_reset,\n },\n }\n"
] | [
[
"numpy.ones"
],
[
"numpy.eye",
"numpy.matmul"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.abs",
"numpy.linalg.norm",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
teja-ambati1202/Insurance-Fraud-Detection | [
"a9bbdd5a2af68e0e90f8e16ba43129bab709614b"
] | [
"Training_Raw_data_validation/rawValidation.py"
] | [
"import sqlite3\r\nfrom datetime import datetime\r\nfrom os import listdir\r\nimport os\r\nimport re\r\nimport json\r\nimport shutil\r\nimport pandas as pd\r\nfrom application_logging.logger import App_Logger\r\n\r\n\r\n\r\n\r\n\r\nclass Raw_Data_validation:\r\n\r\n \"\"\"\r\n This class shall be used for handling all the validation done on the Raw Training Data!!.\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n def __init__(self,path):\r\n self.Batch_Directory = path\r\n self.schema_path = 'schema_training.json'\r\n self.logger = App_Logger()\r\n\r\n\r\n def valuesFromSchema(self):\r\n \"\"\"\r\n Method Name: valuesFromSchema\r\n Description: This method extracts all the relevant information from the pre-defined \"Schema\" file.\r\n Output: LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, Number of Columns\r\n On Failure: Raise ValueError,KeyError,Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n try:\r\n with open(self.schema_path, 'r') as f:\r\n dic = json.load(f)\r\n f.close()\r\n pattern = dic['SampleFileName']\r\n LengthOfDateStampInFile = dic['LengthOfDateStampInFile']\r\n LengthOfTimeStampInFile = dic['LengthOfTimeStampInFile']\r\n column_names = dic['ColName']\r\n NumberofColumns = dic['NumberofColumns']\r\n\r\n file = open(\"Training_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n message =\"LengthOfDateStampInFile:: %s\" %LengthOfDateStampInFile + \"\\t\" + \"LengthOfTimeStampInFile:: %s\" % LengthOfTimeStampInFile +\"\\t \" + \"NumberofColumns:: %s\" % NumberofColumns + \"\\n\"\r\n self.logger.log(file,message)\r\n\r\n file.close()\r\n\r\n\r\n\r\n except ValueError:\r\n file = open(\"Training_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n self.logger.log(file,\"ValueError:Value not found inside schema_training.json\")\r\n file.close()\r\n raise ValueError\r\n\r\n except KeyError:\r\n file = open(\"Training_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n self.logger.log(file, \"KeyError:Key value error incorrect key passed\")\r\n file.close()\r\n raise KeyError\r\n\r\n except Exception as e:\r\n file = open(\"Training_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n self.logger.log(file, str(e))\r\n file.close()\r\n raise e\r\n\r\n return LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, NumberofColumns\r\n\r\n\r\n def manualRegexCreation(self):\r\n \"\"\"\r\n Method Name: manualRegexCreation\r\n Description: This method contains a manually defined regex based on the \"FileName\" given in \"Schema\" file.\r\n This Regex is used to validate the filename of the training data.\r\n Output: Regex pattern\r\n On Failure: None\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n regex = \"['fraudDetection']+['\\_'']+[\\d_]+[\\d]+\\.csv\"\r\n return regex\r\n\r\n def createDirectoryForGoodBadRawData(self):\r\n\r\n \"\"\"\r\n Method Name: createDirectoryForGoodBadRawData\r\n Description: This method creates directories to store the Good Data and Bad Data\r\n after validating the training data.\r\n\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n try:\r\n path = os.path.join(\"Training_Raw_files_validated/\", \"Good_Raw/\")\r\n if not os.path.isdir(path):\r\n os.makedirs(path)\r\n path = os.path.join(\"Training_Raw_files_validated/\", \"Bad_Raw/\")\r\n if not os.path.isdir(path):\r\n os.makedirs(path)\r\n\r\n except OSError as ex:\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Error while creating Directory %s:\" % ex)\r\n file.close()\r\n raise OSError\r\n\r\n def deleteExistingGoodDataTrainingFolder(self):\r\n\r\n \"\"\"\r\n Method Name: deleteExistingGoodDataTrainingFolder\r\n Description: This method deletes the directory made to store the Good Data\r\n after loading the data in the table. Once the good files are\r\n loaded in the DB,deleting the directory ensures space optimization.\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n try:\r\n path = 'Training_Raw_files_validated/'\r\n # if os.path.isdir(\"ids/\" + userName):\r\n # if os.path.isdir(path + 'Bad_Raw/'):\r\n # shutil.rmtree(path + 'Bad_Raw/')\r\n if os.path.isdir(path + 'Good_Raw/'):\r\n shutil.rmtree(path + 'Good_Raw/')\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"GoodRaw directory deleted successfully!!!\")\r\n file.close()\r\n except OSError as s:\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Error while Deleting Directory : %s\" %s)\r\n file.close()\r\n raise OSError\r\n\r\n def deleteExistingBadDataTrainingFolder(self):\r\n\r\n \"\"\"\r\n Method Name: deleteExistingBadDataTrainingFolder\r\n Description: This method deletes the directory made to store the bad Data.\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n try:\r\n path = 'Training_Raw_files_validated/'\r\n if os.path.isdir(path + 'Bad_Raw/'):\r\n shutil.rmtree(path + 'Bad_Raw/')\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"BadRaw directory deleted before starting validation!!!\")\r\n file.close()\r\n except OSError as s:\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Error while Deleting Directory : %s\" %s)\r\n file.close()\r\n raise OSError\r\n\r\n def moveBadFilesToArchiveBad(self):\r\n\r\n \"\"\"\r\n Method Name: moveBadFilesToArchiveBad\r\n Description: This method deletes the directory made to store the Bad Data\r\n after moving the data in an archive folder. We archive the bad\r\n files to send them back to the client for invalid data issue.\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n now = datetime.now()\r\n date = now.date()\r\n time = now.strftime(\"%H%M%S\")\r\n try:\r\n\r\n source = 'Training_Raw_files_validated/Bad_Raw/'\r\n if os.path.isdir(source):\r\n path = \"TrainingArchiveBadData\"\r\n if not os.path.isdir(path):\r\n os.makedirs(path)\r\n dest = 'TrainingArchiveBadData/BadData_' + str(date)+\"_\"+str(time)\r\n if not os.path.isdir(dest):\r\n os.makedirs(dest)\r\n files = os.listdir(source)\r\n for f in files:\r\n if f not in os.listdir(dest):\r\n shutil.move(source + f, dest)\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Bad files moved to archive\")\r\n path = 'Training_Raw_files_validated/'\r\n if os.path.isdir(path + 'Bad_Raw/'):\r\n shutil.rmtree(path + 'Bad_Raw/')\r\n self.logger.log(file,\"Bad Raw Data Folder Deleted successfully!!\")\r\n file.close()\r\n except Exception as e:\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file, \"Error while moving bad files to archive:: %s\" % e)\r\n file.close()\r\n raise e\r\n\r\n\r\n\r\n\r\n def validationFileNameRaw(self,regex,LengthOfDateStampInFile,LengthOfTimeStampInFile):\r\n \"\"\"\r\n Method Name: validationFileNameRaw\r\n Description: This function validates the name of the training csv files as per given name in the schema!\r\n Regex pattern is used to do the validation.If name format do not match the file is moved\r\n to Bad Raw Data folder else in Good raw data.\r\n Output: None\r\n On Failure: Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n\r\n # delete the directories for good and bad data in case last run was unsuccessful and folders were not deleted.\r\n self.deleteExistingBadDataTrainingFolder()\r\n self.deleteExistingGoodDataTrainingFolder()\r\n #create new directories\r\n self.createDirectoryForGoodBadRawData()\r\n onlyfiles = [f for f in listdir(self.Batch_Directory)]\r\n try:\r\n f = open(\"Training_Logs/nameValidationLog.txt\", 'a+')\r\n for filename in onlyfiles:\r\n if (re.match(regex, filename)):\r\n splitAtDot = re.split('.csv', filename)\r\n splitAtDot = (re.split('_', splitAtDot[0]))\r\n if len(splitAtDot[1]) == LengthOfDateStampInFile:\r\n if len(splitAtDot[2]) == LengthOfTimeStampInFile:\r\n shutil.copy(\"Training_Batch_Files/\" + filename, \"Training_Raw_files_validated/Good_Raw\")\r\n self.logger.log(f,\"Valid File name!! File moved to GoodRaw Folder :: %s\" % filename)\r\n\r\n else:\r\n shutil.copy(\"Training_Batch_Files/\" + filename, \"Training_Raw_files_validated/Bad_Raw\")\r\n self.logger.log(f,\"Invalid File Name!! File moved to Bad Raw Folder :: %s\" % filename)\r\n else:\r\n shutil.copy(\"Training_Batch_Files/\" + filename, \"Training_Raw_files_validated/Bad_Raw\")\r\n self.logger.log(f,\"Invalid File Name!! File moved to Bad Raw Folder :: %s\" % filename)\r\n else:\r\n shutil.copy(\"Training_Batch_Files/\" + filename, \"Training_Raw_files_validated/Bad_Raw\")\r\n self.logger.log(f, \"Invalid File Name!! File moved to Bad Raw Folder :: %s\" % filename)\r\n\r\n f.close()\r\n\r\n except Exception as e:\r\n f = open(\"Training_Logs/nameValidationLog.txt\", 'a+')\r\n self.logger.log(f, \"Error occured while validating FileName %s\" % e)\r\n f.close()\r\n raise e\r\n\r\n\r\n\r\n\r\n def validateColumnLength(self,NumberofColumns):\r\n \"\"\"\r\n Method Name: validateColumnLength\r\n Description: This function validates the number of columns in the csv files.\r\n It is should be same as given in the schema file.\r\n If not same file is not suitable for processing and thus is moved to Bad Raw Data folder.\r\n If the column number matches, file is kept in Good Raw Data for processing.\r\n\r\n Output: None\r\n On Failure: Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n try:\r\n f = open(\"Training_Logs/columnValidationLog.txt\", 'a+')\r\n self.logger.log(f,\"Column Length Validation Started!!\")\r\n for file in listdir('Training_Raw_files_validated/Good_Raw/'):\r\n csv = pd.read_csv(\"Training_Raw_files_validated/Good_Raw/\" + file)\r\n if csv.shape[1] == NumberofColumns:\r\n pass\r\n else:\r\n shutil.move(\"Training_Raw_files_validated/Good_Raw/\" + file, \"Training_Raw_files_validated/Bad_Raw\")\r\n self.logger.log(f, \"Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s\" % file)\r\n self.logger.log(f, \"Column Length Validation Completed!!\")\r\n except OSError:\r\n f = open(\"Training_Logs/columnValidationLog.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured while moving the file :: %s\" % OSError)\r\n f.close()\r\n raise OSError\r\n except Exception as e:\r\n f = open(\"Training_Logs/columnValidationLog.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured:: %s\" % e)\r\n f.close()\r\n raise e\r\n f.close()\r\n\r\n def validateMissingValuesInWholeColumn(self):\r\n \"\"\"\r\n Method Name: validateMissingValuesInWholeColumn\r\n Description: This function validates if any column in the csv file has all values missing.\r\n If all the values are missing, the file is not suitable for processing.\r\n SUch files are moved to bad raw data.\r\n Output: None\r\n On Failure: Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n try:\r\n f = open(\"Training_Logs/missingValuesInColumn.txt\", 'a+')\r\n self.logger.log(f,\"Missing Values Validation Started!!\")\r\n\r\n for file in listdir('Training_Raw_files_validated/Good_Raw/'):\r\n csv = pd.read_csv(\"Training_Raw_files_validated/Good_Raw/\" + file)\r\n count = 0\r\n for columns in csv:\r\n if (len(csv[columns]) - csv[columns].count()) == len(csv[columns]):\r\n count+=1\r\n shutil.move(\"Training_Raw_files_validated/Good_Raw/\" + file,\r\n \"Training_Raw_files_validated/Bad_Raw\")\r\n self.logger.log(f,\"Invalid Column for the file!! File moved to Bad Raw Folder :: %s\" % file)\r\n break\r\n if count==0:\r\n csv.rename(columns={\"Unnamed: 0\": \"Wafer\"}, inplace=True)\r\n csv.to_csv(\"Training_Raw_files_validated/Good_Raw/\" + file, index=None, header=True)\r\n except OSError:\r\n f = open(\"Training_Logs/missingValuesInColumn.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured while moving the file :: %s\" % OSError)\r\n f.close()\r\n raise OSError\r\n except Exception as e:\r\n f = open(\"Training_Logs/missingValuesInColumn.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured:: %s\" % e)\r\n f.close()\r\n raise e\r\n f.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
inqlee0704/pyqct | [
"304612ed558e7c46fe987ecfea8145cbc5721700"
] | [
"QCT/get_S_norm.py"
] | [
"# ##############################################################################\n# Usage: python get_S_norm.py Subj I1 I2\n# Time: ~ 20s\n# Ref: \n# ##############################################################################\n# 20220118, In Kyu Lee\n# No version suffix\n# ##############################################################################\n# v1c: 08/11/2021, In Kyu Lee\n# - Fixed: when V_IN < V_EX, s_norm returns nan issue.\n# - ownpow is used\n# v1b: 08/10/2021, In Kyu Lee\n# - S* stat is added\n# 03/18/2021, In Kyu Lee\n# Calculate S*\n# ##############################################################################\n# Input: \n# - displacement img, ex) PMSN03001_EX0-TO-PMSN03001_IN0-SSTVD_disp_resample.mhd'\n# - IN lobe mask, ex) PMSN03001_IN0_vida-lobes.img\n# Output:\n# - s* image, ex) PMSN03001_EX0-TO-PMSN03001_IN0-SSTVD_s_norm.img\n# - s* stat, ex) PMSN03001_EX0-TO-PMSN03001_IN0-SSTVD_lobar_s_norm.txt\n# ##############################################################################w\n\n# import libraries\nimport os\nimport sys\nimport numpy as np\nimport time\nimport pandas as pd\nfrom medpy.io import load, save\nimport SimpleITK as sitk\nsitk.ProcessObject_SetGlobalWarningDisplay(False)\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef ownpow(a, b):\n if a > 0:\n return a**b\n if a < 0:\n temp = abs(a)**b\n return -1*temp\n\nstart = time.time()\nSubj = str(sys.argv[1]) # PMSN03001\nI1 = str(sys.argv[2]) # 'IN0'\nI2 = str(sys.argv[3]) # 'EX0'\n\ndisp_path = f'{Subj}_{I2}-TO-{Subj}_{I1}-SSTVD_disp_resample.mhd'\nhisto_EX = pd.read_csv(f'{Subj}_{I2}_vida-histo.csv')\nhisto_IN = pd.read_csv(f'{Subj}_{I1}_vida-histo.csv')\ns_norm_stat_path = f'{Subj}_{I2}-TO-{Subj}_{I1}-SSTVD_lobar_s_norm.txt'\n\nIN_lobe_path = f'{Subj}_{I1}_vida-lobes.img'\nif not os.path.exists(IN_lobe_path):\n IN_lobe_path = f'{Subj}_{I1}_vida-lobes.img.gz'\n\ns_norm_img_path = f'{Subj}_{I2}-TO-{Subj}_{I1}-SSTVD_s_norm.img'\n# V_cm3_IN \nV_EX = histo_EX.loc[histo_EX.location=='both', 'total-volume-cm3'].values[0]\nV_IN = histo_IN.loc[histo_IN.location=='both', 'total-volume-cm3'].values[0]\n# cm^3 -> mm^3\nV_EX = V_EX * 1000\nV_IN = V_IN * 1000\n\n# Data Loading . . .\ndisp, disp_h = load(disp_path)\nIN_lobe_img, IN_lobe_header = load(IN_lobe_path)\ns_norm_h = disp_h\n# [mm]\ns = (disp[:,:,:,0]**2+disp[:,:,:,1]**2+disp[:,:,:,2]**2)**0.5\n# This doesn't work if V_IN- V_EX is negative\n# s_norm = s/((V_IN-V_EX)**(1/3))\ns_norm = s/ownpow(V_IN-V_EX,1/3)\n\n# Prep stat\ns_norm_l0 = np.mean(s_norm[IN_lobe_img==8])\ns_norm_l1 = np.mean(s_norm[IN_lobe_img==16])\ns_norm_l2 = np.mean(s_norm[IN_lobe_img==32])\ns_norm_l3 = np.mean(s_norm[IN_lobe_img==64])\ns_norm_l4 = np.mean(s_norm[IN_lobe_img==128])\ns_norm_mean = (s_norm_l0 + s_norm_l1 + s_norm_l2 + s_norm_l3 + s_norm_l4)/5\n\ns_norm_l0_sd = np.std(s_norm[IN_lobe_img==8])\ns_norm_l1_sd = np.std(s_norm[IN_lobe_img==16])\ns_norm_l2_sd = np.std(s_norm[IN_lobe_img==32])\ns_norm_l3_sd = np.std(s_norm[IN_lobe_img==64])\ns_norm_l4_sd = np.std(s_norm[IN_lobe_img==128])\ns_norm_sd = np.std(s_norm[IN_lobe_img!=0])\n\n# CV = std/mean\ns_norm_l0_cv = s_norm_l0_sd/s_norm_l0\ns_norm_l1_cv = s_norm_l1_sd/s_norm_l1\ns_norm_l2_cv = s_norm_l2_sd/s_norm_l2\ns_norm_l3_cv = s_norm_l3_sd/s_norm_l3\ns_norm_l4_cv = s_norm_l4_sd/s_norm_l4\ns_norm_cv = s_norm_sd/s_norm_mean\n\ns_norm_stat = pd.DataFrame({'Lobes':['Lobe0','Lobe1','Lobe2','Lobe3','Lobe4','All'],\n 'sStar_m':np.float16([s_norm_l0,s_norm_l1,s_norm_l2,s_norm_l3,s_norm_l4,s_norm_mean]),\n 'sStar_sd':np.float16([s_norm_l0_sd,s_norm_l1_sd,s_norm_l2_sd,s_norm_l3_sd,s_norm_l4_sd,s_norm_sd]),\n 'sStar_cv':np.float16([s_norm_l0_cv,s_norm_l1_cv,s_norm_l2_cv,s_norm_l3_cv,s_norm_l4_cv,s_norm_cv])})\n\n\n# Save\nsave(s_norm,s_norm_img_path,hdr=s_norm_h)\ns_norm_stat.to_csv(s_norm_stat_path, index=False, sep=' ')\nend = time.time()\nprint(f'Elapsed time: {end-start}s')\n"
] | [
[
"numpy.float16",
"numpy.std",
"pandas.read_csv",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
a-maumau/pixel_objectness.pytorch | [
"f5acb972be694662d839b99eb33e66a807d6031e"
] | [
"trainer.py"
] | [
"import os\nimport math\nimport argparse\nfrom datetime import datetime\n\nimport torch \nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom PIL import Image\n\nimport data_loader\nfrom mau_ml_util.train_logger import TrainLogger\n#from mau_ml_util.metric import SegmentationMetric\nfrom metric_from_latest_mmu import SegmentationMetric\nfrom templates import Template_Trainer\n\ntorch.backends.cudnn.benchmark = True\n\nclass ColorMap(object):\n def __init__(self, base_color=[[0,0,1], [0,1,1], [0,1,0], [1,1,0], [1,0,0]]):\n \"\"\"\n color_points: list of [int, int, int]\n each value of component represent R,G,B.\n \"\"\"\n\n self.base_color = base_color\n self.num_color_min1 = len(self.base_color)-1\n\n def __call__(self, val):\n return self.to_colormap(val)\n\n def to_colormap(self, val):\n \"\"\"\n returns tpule of (R,G,B) value in range [0,1].\n \"\"\"\n\n fract_between = 0\n\n if val <= 0:\n idx1 = idx2 = 0\n elif val >= 1:\n idx1 = idx2 = self.num_color_min1\n else:\n val = val * (self.num_color_min1)\n idx1 = math.floor(val);\n idx2 = idx1+1;\n fract_between = val - idx1\n \n r = (self.base_color[idx2][0] - self.base_color[idx1][0])*fract_between + self.base_color[idx1][0]\n g = (self.base_color[idx2][1] - self.base_color[idx1][1])*fract_between + self.base_color[idx1][1]\n b = (self.base_color[idx2][2] - self.base_color[idx1][2])*fract_between + self.base_color[idx1][2]\n\n return (r,g,b) \n\nclass Trainer_PixelObjectness(Template_Trainer):\n def __init__(self, args, model, optimizer, lr_policy):\n self.args = args \n self.lr_policy = lr_policy\n self.iter_wise = self.lr_policy.iteration_wise\n\n # for loggin the training\n val_head = [\"iter\" if self.iter_wise else \"epoch\", \"mean_pixel_accuracy\"]\n for i in range(self.args.class_num):\n val_head.append(\"mean_precision_class_{}\".format(i))\n for i in range(self.args.class_num):\n val_head.append(\"mean_IoU_class_{}\".format(i))\n self.tlog = self.get_train_logger({\"train\":[\"iter\" if self.iter_wise else \"epoch\", \"batch_mean_total_loss\"], \"val\":val_head},\n save_dir=self.args.save_dir, save_name=self.args.save_name, arguments=self.get_argparse_arguments(self.args),\n use_http_server=self.args.use_http_server, use_msg_server=self.args.use_msg_server, notificate=False,\n visualize_fetch_stride=self.args.viz_fetch_stride, http_port=self.args.http_server_port, msg_port=self.args.msg_server_port)\n \n\n\n # paths\n self.save_dir = self.tlog.log_save_path\n self.model_param_dir = self.tlog.mkdir(\"model_param\")\n\n if torch.cuda.is_available() and not self.args.nogpu:\n self.map_device = torch.device('cuda:{}'.format(self.args.gpu_device_num))\n else:\n self.map_device = torch.device('cpu')\n\n self.model = model\n if torch.cuda.is_available() and not args.nogpu:\n self.model = self.model.to(self.map_device)\n\n self.optimizer = optimizer\n\n self.train_loader = data_loader.get_train_loader(self.args, [(0.5, 0.5, 0.5),(0.5, 0.5, 0.5)])#[(0.485, 0.456, 0.406),(0.229, 0.224, 0.225)])\n self.val_loader = data_loader.get_val_loader(self.args, [(0.5, 0.5, 0.5),(0.5, 0.5, 0.5)])\n\n self.cmap = self._gen_cmap()\n\n if self.args.show_parameters:\n for idx, m in enumerate(model.modules()):\n print(idx, '->', m)\n print(args)\n\n print(\"\\nsaving at {}\\n\".format(self.save_dir))\n\n # PASCAL VOC color maps\n # borrowed from https://gist.github.com/wllhf/a4533e0adebe57e3ed06d4b50c8419ae\n def _gen_cmap_voc(self, class_num=255):\n def bitget(byteval, idx):\n return ((byteval & (1 << idx)) != 0)\n\n cmap = np.zeros((class_num+1, 3), dtype='uint8')\n for i in range(class_num+1):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7-j)\n g = g | (bitget(c, 1) << 7-j)\n b = b | (bitget(c, 2) << 7-j)\n c = c >> 3\n\n cmap[i] = np.array([r, g, b])\n\n return cmap\n\n def _gen_cmap(self, max_value=255):\n mapper = ColorMap()\n cmap = []\n\n for v in range(max_value+1):\n cmap.append(np.uint8(np.array(mapper(v/max_value))*255))\n\n return cmap\n\n def convert_to_color_map(self, img_array, color_map=None, class_num=255):\n \"\"\"\n img_array: numpy.ndarray\n shape must be (width, height)\n \"\"\"\n\n if color_map is None:\n color_map = self._gen_cmap()\n\n new_img = np.empty(shape=(img_array.shape[0], img_array.shape[1], 3), dtype='uint8')\n\n for c in range(class_num+1):\n index = np.where(img_array == c)\n new_img[index] = color_map[c]\n\n return new_img\n\n def validate(self, count):\n with torch.no_grad():\n self.model.eval()\n\n # logging\n pix_acc = 0.0\n precision_class = []\n jaccard_class = []\n\n #data_count_precision = [0 for i in range(self.args.class_num)]\n #data_count_jaccard = [0 for i in range(self.args.class_num)]\n \n metric = SegmentationMetric(self.args.class_num, map_device=self.map_device)\n\n if self.args.quiet:\n _trainval_loader = self.val_loader\n else:\n _trainval_loader = self.to_tqdm(self.val_loader, desc=\"train val\")\n\n for b, (image, mask, original_image) in enumerate(_trainval_loader):\n batch_size = image.shape[0]\n\n img = self.format_tensor(image, requires_grad=False, map_device=self.map_device)\n mask = self.format_tensor(mask, requires_grad=False, map_device=self.map_device)\n\n outputs, prob_maps = self.model.inference(img)\n outputs = F.interpolate(outputs, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)\n prob_maps = F.interpolate(prob_maps, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)\n\n metric(outputs, mask)\n \n # save only few batch for sample\n if b < 1:\n self.tlog.setup_output(\"{}_{}_batch_{}_sample\".format(\"iter\" if self.iter_wise else \"epoch\", count, b))\n\n # test color image\n #test_img = np.ones((256,256))\n #for i in range(256):\n # test_img[i] = test_img[i]*i\n # \n #self.tlog.pack_output(Image.fromarray(self.convert_to_color_map(np.uint8(test_img))))\n \n for n in range(batch_size):\n self.tlog.pack_output(Image.fromarray(np.uint8(original_image[n].detach().numpy())))\n\n pred_img = np.uint8(outputs[n].squeeze(0).cpu().detach().numpy())\n prob_img = prob_maps[n].squeeze(0).cpu().detach().numpy()\n self.tlog.pack_output(Image.fromarray(pred_img*255), not_in_schema=True)\n self.tlog.pack_output(Image.fromarray(self.convert_to_color_map(np.uint8(prob_img[1]*255), self.cmap)))\n\n gt_img = np.uint8(mask[n].cpu().detach().numpy())\n self.tlog.pack_output(Image.fromarray(gt_img*255), not_in_schema=True)\n\n self.tlog.pack_output(None, \" \")\n\n self.tlog.pack_output(None, \"validation sample\", [\"left: input\", \"center: pred cmap\", \"right: output mask\"])\n self.tlog.flush_output()\n\n pix_acc = metric.calc_pix_acc()\n precision = metric.calc_mean_precision()\n jaccard_index = metric.calc_mean_jaccard_index()\n\n # might I should return the non evaluated class with nan and filter the list\n # by filter(lambda n: n!=float(\"nan\"), items)\n\n for class_id in range(self.args.class_num):\n precision_class.append(precision[\"class_{}\".format(class_id)])\n jaccard_class.append(jaccard_index[\"class_{}\".format(class_id)])\n\n #data_count_precision[class_id] += len(precision[\"class_{}\".format(str(class_id))])\n #data_count_jaccard[class_id] += len(jaccard_index[\"class_{}\".format(str(class_id))])\n\n # logging, this implementation is not caring missing value\n #mean_precision_classes = [y/x if x > 0 else 0 for y, x in zip(precision_class, data_count_precision)]\n #mean_iou_classes = [y/x if x > 0 else 0 for y, x in zip(jaccard_class, data_count_jaccard)]\n \n # clac. with out background\n log_msg_data = [count, pix_acc, np.mean(precision_class[1:]), np.mean(jaccard_class[1:])]\n\n self.tlog.log(\"val\", [count, pix_acc]+precision_class+jaccard_class)\n self.tlog.log_message(\"[{}] mean pix acc.:{:.5f}, precision:{:.5f}, IoU:{:.5f}\".format(*log_msg_data), \"LOG\", \"validation\")\n\n if not self.args.quiet:\n tqdm.write(\"[{}] mean pix acc.:{:.5f}, precision:{:.5f}, IoU:{:.5f}\".format(*log_msg_data))\n\n self.model.train()\n\n def train(self):\n train_finish = False\n \n if self.args.quiet:\n epochs = range(1, self.args.epochs+1)\n else:\n epochs = self.to_tqdm(range(1, self.args.epochs+1), desc=\"train\")\n\n curr_iter = 0\n epoch = 0\n\n total_loss = 0.0\n data_num = 0\n\n # for epoch wise and iter wise\n decay_arg = {\"curr_iter\":curr_iter, \"curr_epoch\":epoch}\n\n for epoch in epochs:\n if not self.iter_wise:\n total_loss = 0.0\n data_num = 0\n\n if self.args.quiet:\n _train_loader = self.train_loader\n else:\n _train_loader = self.to_tqdm(self.train_loader)\n\n for img, mask in _train_loader:\n # loss log will be showed in size averaged\n data_num += 1\n\n self.optimizer.zero_grad()\n\n images = self.format_tensor(img, map_device=self.map_device)\n masks = self.format_tensor(mask, map_device=self.map_device)\n\n output = self.model(images)\n output = F.interpolate(output, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)\n\n batch_loss = self.model.loss(output, masks)\n total_loss += batch_loss.item()\n \n batch_loss.backward()\n self.optimizer.step()\n\n curr_iter += 1\n\n if not self.args.quiet:\n _train_loader.set_description(\"{: 3d}: train[{}] loss: {:.5f}\".format(curr_iter if self.iter_wise else epoch, self.args.save_name, total_loss/data_num))\n\n if self.iter_wise:\n self.lr_policy.decay_lr(**decay_arg)\n \n if curr_iter % self.args.trainval_every == 0:\n self.validate(curr_iter)\n\n if curr_iter % self.args.save_every == 0:\n state = {'iter': curr_iter,\n 'optimizer_state_dict' : self.optimizer.state_dict()}\n self.model.save(add_state=state, file_name=os.path.join(self.model_param_dir,'model_param_iter{}.pth'.format(curr_iter)))\n \n self.tlog.log_message(\"[iter:{}] model saved.\".format(curr_iter), \"LOG\", \"train\")\n\n if curr_iter % self.args.log_every == 0:\n if not self.args.quiet:\n tqdm.write(\"[#{: 3d}] {} iter mean loss: {:.5f}\".format(curr_iter, self.args.log_every, total_loss/data_num))\n \n self.tlog.log(\"train\", [curr_iter, float(total_loss/data_num)])\n self.tlog.log_message(\"[{}] {} iter mean loss:{:.5f}\".format(\"iter:{}\".format(curr_iter), self.args.log_every, float(total_loss/data_num)), \"LOG\", \"train\")\n\n total_loss = 0\n data_num = 0\n\n if curr_iter == self.args.max_iter:\n train_finish = True\n _train_loader.close()\n break\n \n if train_finish:\n epochs.close()\n break\n\n if not self.iter_wise:\n if not self.args.quiet:\n tqdm.write(\"[# {: 3d}] batch mean loss: {:.5f}\".format(epoch, total_loss/data_num))\n \n if epoch % self.args.log_every == 0:\n self.tlog.log(\"train\", [epoch, float(total_loss/data_num)])\n self.tlog.log_message(\"[{}] batch mean loss:{:.5f}\".format(\"epoch:{}\".format(epoch), float(total_loss/data_num)), \"LOG\", \"train\")\n\n # check train validation\n if epoch % self.args.trainval_every == 0:\n self.validate(epoch)\n\n self.lr_policy.decay_lr(**decay_arg)\n #if epoch % self.args.decay_every == 0:\n # for param_group in self.optimizer.param_groups:\n # param_group['lr'] *= self.args.decay_value\n #\n # self.tlog.log_message(\"[epoch:{}] decay learning rate by {}\".format(epoch, self.args.decay_value), \"LOG\", \"train\")\n \n # save model\n if epoch % self.args.save_every == 0:\n state = {'epoch': epoch,\n 'optimizer_state_dict' : self.optimizer.state_dict()}\n self.model.save(add_state=state, file_name=os.path.join(self.model_param_dir,'model_param_e{}.pth'.format(epoch)))\n \n self.tlog.log_message(\"[epoch:{}] model saved.\".format(epoch), \"LOG\", \"train\")\n\n self.model.save(add_state={'optimizer_state_dict' : self.optimizer.state_dict()},\n file_name=os.path.join(self.model_param_dir, 'model_param_fin_{}.pth'.format(datetime.now().strftime(\"%Y%m%d_%H-%M-%S\"))))\n\n print(\"data is saved at {}\".format(self.save_dir))\n\n def test_loader(self):\n from matplotlib import pylab as plt\n import time\n\n if self.args.quiet:\n epochs = range(1, self.args.epochs+1)\n else:\n epochs = self.to_tqdm(range(1, self.args.epochs+1), desc=\"train\")\n\n for epoch in epochs:\n if self.args.quiet:\n _train_loader = self.train_loader\n else:\n _train_loader = self.to_tqdm(self.train_loader)\n\n for img, mask in _train_loader:\n batch_size = img.shape[0]\n\n img = img.numpy()\n mask = mask.numpy()\n\n for i in range(batch_size):\n _img = np.uint8(img[i]*255).transpose(1,2,0)\n _mask = self.convert_to_color_map(np.uint8(mask[i]), self.cmap)\n\n merged_img = np.concatenate([_img, _mask], axis=1)\n\n plt.imshow(merged_img)\n plt.show()\n\n"
] | [
[
"matplotlib.pylab.show",
"numpy.uint8",
"numpy.concatenate",
"torch.no_grad",
"numpy.mean",
"torch.cuda.is_available",
"torch.nn.functional.interpolate",
"torch.device",
"matplotlib.pylab.imshow",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
solad5/acgan-gpt2 | [
"52901a996fd235355f8c3f6b83037c85b1fdb415",
"52901a996fd235355f8c3f6b83037c85b1fdb415"
] | [
"gpt2_model.py",
"embedders.py"
] | [
"'''\n code by TaeHwan Jung(@graykode)\n Original Paper and repository here : https://github.com/openai/gpt-2\n GPT2 Pytorch Model : https://github.com/huggingface/pytorch-pretrained-BERT\n'''\n\nimport copy\nimport torch\nimport math\nimport torch.nn as nn\nfrom torch.nn.parameter import Parameter\n\ndef gelu(x):\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\ndef load_weight(model, state_dict):\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if key.endswith(\".g\"):\n new_key = key[:-2] + \".weight\"\n elif key.endswith(\".b\"):\n new_key = key[:-2] + \".bias\"\n elif key.endswith(\".w\"):\n new_key = key[:-2] + \".weight\"\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n start_model = model\n if hasattr(model, \"transformer\") and all(not s.startswith('transformer.') for s in state_dict.keys()):\n start_model = model.transformer\n load(start_model, prefix=\"\")\n\n # Make sure we are still sharing the output and input embeddings after loading weights\n model.set_tied()\n return model\n\nclass LayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n \"\"\"\n super(LayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.weight * x + self.bias\n\n\nclass Conv1D(nn.Module):\n def __init__(self, nf, nx):\n super(Conv1D, self).__init__()\n self.nf = nf\n w = torch.empty(nx, nf)\n nn.init.normal_(w, std=0.02)\n self.weight = Parameter(w)\n self.bias = Parameter(torch.zeros(nf))\n\n def forward(self, x):\n size_out = x.size()[:-1] + (self.nf,)\n x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)\n x = x.view(*size_out)\n return x\n\n\nclass Attention(nn.Module):\n def __init__(self, nx, n_ctx, config, scale=False):\n super(Attention, self).__init__()\n n_state = nx # in Attention: n_state=768 (nx=n_embd)\n # [switch nx => n_state from Block to Attention to keep identical to TF implem]\n assert n_state % config.n_head == 0\n self.register_buffer(\"bias\", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))\n self.n_head = config.n_head\n self.split_size = n_state\n self.scale = scale\n self.c_attn = Conv1D(n_state * 3, nx)\n self.c_proj = Conv1D(n_state, nx)\n\n def _attn(self, q, k, v):\n w = torch.matmul(q, k)\n if self.scale:\n w = w / math.sqrt(v.size(-1))\n nd, ns = w.size(-2), w.size(-1)\n b = self.bias[:, :, ns - nd:ns, :ns]\n # Here the bias b also serves as the mask to remove future information\n w = w * b - 1e10 * (1 - b)\n w = nn.Softmax(dim=-1)(w)\n return torch.matmul(w, v)\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states\n\n def split_heads(self, x, k=False):\n new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states\n if k:\n return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n else:\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def forward(self, x, layer_past=None):\n x = self.c_attn(x)\n query, key, value = x.split(self.split_size, dim=2)\n query = self.split_heads(query)\n key = self.split_heads(key, k=True)\n value = self.split_heads(value)\n if layer_past is not None:\n past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below\n key = torch.cat((past_key, key), dim=-1)\n value = torch.cat((past_value, value), dim=-2)\n present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking\n a = self._attn(query, key, value)\n a = self.merge_heads(a)\n a = self.c_proj(a)\n return a, present\n\n\nclass MLP(nn.Module):\n def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)\n super(MLP, self).__init__()\n nx = config.n_embd\n self.c_fc = Conv1D(n_state, nx)\n self.c_proj = Conv1D(nx, n_state)\n self.act = gelu\n\n def forward(self, x):\n h = self.act(self.c_fc(x))\n h2 = self.c_proj(h)\n return h2\n\n\nclass Block(nn.Module):\n def __init__(self, n_ctx, config, scale=False):\n super(Block, self).__init__()\n nx = config.n_embd\n self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)\n self.attn = Attention(nx, n_ctx, config, scale)\n self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)\n self.mlp = MLP(4 * nx, config)\n\n def forward(self, x, layer_past=None):\n a, present = self.attn(self.ln_1(x), layer_past=layer_past)\n x = x + a\n m = self.mlp(self.ln_2(x))\n x = x + m\n return x, present\n\n\nclass Transformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.n_layer = config.n_layer\n self.n_embd = config.n_embd\n self.n_vocab = config.vocab_size\n\n self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n self.wpe = nn.Embedding(config.n_positions, config.n_embd)\n block = Block(config.n_ctx, config, scale=True)\n self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])\n self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)\n\n def set_embeddings_weights(self, model_embeddings_weights):\n embed_shape = model_embeddings_weights.shape\n self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)\n self.decoder.weight = model_embeddings_weights # Tied weights\n\n def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):\n if past is None:\n past_length = 0\n past = [None] * len(self.h)\n else:\n past_length = past[0][0].size(-2)\n if position_ids is None:\n position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long,\n device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_ids.size(-1))\n position_ids = position_ids.view(-1, position_ids.size(-1))\n\n inputs_embeds = self.wte(input_ids)\n position_embeds = self.wpe(position_ids)\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))\n token_type_embeds = self.wte(token_type_ids)\n else:\n token_type_embeds = 0\n hidden_states = inputs_embeds + position_embeds + token_type_embeds\n presents = []\n for block, layer_past in zip(self.h, past):\n hidden_states, present = block(hidden_states, layer_past)\n presents.append(present)\n hidden_states = self.ln_f(hidden_states)\n output_shape = input_shape + (hidden_states.size(-1),)\n return hidden_states.view(*output_shape), presents\n\n\nclass LinearReadoutHead(nn.Module):\n def __init__(self, model_embeddings_weights, config):\n super().__init__()\n self.n_embd = config.n_embd\n self.set_embeddings_weights(model_embeddings_weights)\n\n def set_embeddings_weights(self, model_embeddings_weights):\n embed_shape = model_embeddings_weights.shape\n self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)\n self.decoder.weight = model_embeddings_weights # Tied weights\n\n def forward(self, hidden_state):\n # Truncated Language modeling logits (we remove the last token)\n # h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)\n lm_logits = self.decoder(hidden_state)\n return lm_logits\n\n\nclass GPT2(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transformer = Transformer(config)\n self.readout_head = LinearReadoutHead(self.transformer.wte.weight, config)\n\n def set_tied(self):\n \"\"\" Make sure we are sharing the embeddings\n \"\"\"\n self.readout_head.set_embeddings_weights(self.transformer.wte.weight)\n\n def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):\n hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)\n return hidden_states",
"from transformers import *\nimport pdb\nfrom torch.nn.utils import weight_norm as wn\nfrom tqdm import tqdm\nfrom torch.nn.utils.rnn import pad_sequence\nimport torch.nn as nn\nimport torch\nfrom bpe_encoder import get_codec\nfrom gpt2_model import GPT2\nfrom utils import parse_config\n\ndef bert_encoder():\n return BERTEncoder()\n\ndef gpt2_encoder():\n return GPT2Encoder()\n\ndef class_embedding(n_classes, embedding_dim):\n return nn.Embedding(n_classes, embedding_dim)\n\n\ndef unconditional(n_classes, embedding_dim):\n return nn.Embedding(n_classes, embedding_dim)\n\n\nclass Embedder(nn.Module):\n def __init__(self, embed_size):\n super(Embedder, self).__init__()\n self.embed_size = embed_size\n\n def forward(self, class_labels, captions):\n raise NotImplementedError\n\n\nclass BERTEncoder(Embedder):\n '''\n pretrained model used to embed text to a 768 dimensional vector\n '''\n\n def __init__(self):\n super(BERTEncoder, self).__init__(embed_size=768)\n self.pretrained_weights = 'bert-base-uncased'\n self.tokenizer = BertTokenizer.from_pretrained(self.pretrained_weights)\n self.model = BertModel.from_pretrained(self.pretrained_weights)\n self.max_len = 50\n\n def tokenize(self, text_batch):\n text_token_ids = [\n torch.tensor(self.tokenizer.encode(string_, add_special_tokens=False, max_length=self.max_len)) for\n string_ in text_batch]\n padded_input = pad_sequence(text_token_ids, batch_first=True, padding_value=0)\n return padded_input\n\n def forward(self, class_labels, captions):\n '''\n :param class_labels : torch.LongTensor, class ids\n :param list captions: list of strings, sentences to embed\n :return: torch.tensor embeddings: embeddings of shape (batch_size,embed_size=768)\n '''\n\n padded_input = self.tokenize(captions)\n device = list(self.parameters())[0].device\n padded_input = padded_input.to(device)\n # takes the mean of the last hidden states computed by the pre-trained BERT encoder and return it\n return self.model(padded_input)[0].mean(dim=1)\n\n\nclass GPT2Encoder(Embedder):\n '''\n pretrained model used to embed text to a 768 dimensional vector\n '''\n def __init__(self):\n super(GPT2Encoder, self).__init__(embed_size=768)\n self.codec = get_codec()\n self.gpt2_config = parse_config()\n self.gpt2_model = GPT2(self.gpt2_config)\n\n def forward(self, class_labels, captions):\n '''\n :param class_labels: torch.LongTensor, class ids\n :param list captions: list of strings, sentences to embed\n :return: torch.tensor embeddings: embeddungs of shape (batch_size, embed_size=768)\n '''\n count = 0\n for caption in captions:\n curembedding = self.gpt2_model(self.codec.encode(caption))\n curembedding = torch.mean(curembedding, dim=1)\n if count == 0:\n res = curembedding\n count += 1\n else:\n res = torch.cat((res, curembedding), dim=0)\n return res\n"
] | [
[
"torch.nn.Softmax",
"torch.ones",
"torch.empty",
"torch.zeros",
"torch.sqrt",
"torch.cat",
"torch.nn.Embedding",
"torch.matmul",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.parameter.Parameter",
"torch.pow"
],
[
"torch.mean",
"torch.nn.utils.rnn.pad_sequence",
"torch.nn.Embedding",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ManuLado/Enviar-comandos-a-marlin | [
"5ba596c9b0db47125e2e29ed8084e61d326e8777",
"5ba596c9b0db47125e2e29ed8084e61d326e8777"
] | [
"take_images.py",
"pano_libs/P0.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Graba video leido desde la arducam\n# Se le debe indicar el archivo de video a grabar y\n# la duración de la captura en segundos.\n\n# SINTAXIS: python capturar_video.py VIDEO TIEMPO\n# 1- Ruta del video\n# 2- Tiempo de grabacion en segundos\n\nfrom ctypes import *\n\nimport ctypes\n\nimport sys\nimport os\n\nimport time\nfrom PIL import Image\nimport numpy as np\nimport thread as thread\nimport math\n\nfrom select import select\nfrom evdev import InputDevice\nfrom evdev import ecodes\nfrom astropy.io import fits\nimport ArducamSDK\n\n# Analisis de argumentos\nif (len(sys.argv)==3):\n NOMBREIMG = sys.argv[1];\n NUMIMG = int(sys.argv[2]);\nelse:\n print (\"Se requieren 2 argumentos: NOMBRE_IMAGENES NUMERO_IMAGENES\")\n exit()\n\n#### CONFIGURACION ARDUCAMSDK ################\nCOLOR_BYTE2RGB = 47 # No se modifico del original\nCAMERA_MT9M001 = 0x4D091031 # No se modifico del original\nSensorShipAddr = 186\nI2C_MODE_8_16 = 1\nusbVid = 0x52CB # No se modifico del original\nWidth = 1280 #1280\nHeight = 1024 #1024\ncfg ={\"u32CameraType\":CAMERA_MT9M001,\n \"u32Width\":Width,\"u32Height\":Height,\n \"u32UsbVersion\":1,\n \"u8PixelBytes\":1,\n \"u16Vid\":0x52cb,\n \"u8PixelBits\":8,\n \"u32SensorShipAddr\":SensorShipAddr,\n \"emI2cMode\":I2C_MODE_8_16 }\n\n# FLAGS\nglobal saveFlag,downFlag,flag,H_value,V_value,lx,ly,mx,my,dx,dy,W_zoom,H_zooM,handle,openFlag,initTime,storeFlag,bufferData,globalGain\nglobal testPatternFlag\nglobal integrationTime\nglobal shutterWidth\n\nopenFlag = False\nhandle = {}\ndownFlag = False\nflag = True\nsaveFlag = False\nstoreFlag = False\nsaveNum=0\nH_value = 0\nV_value = 0\nW_zoom = 0\nH_zoom = 0\nlx = 0\nly = 0\nmx = 0\nmy = 0\ndx = 0\ndy = 0\ntestPatternFlag = False;\n\nregArr=[[0x01, 0x000C], # Row Start\n [0x02, 0x0014], # Column Start\n [0x03, Height - 1], # Window Height 0x03FF\n [0x04, Width - 1], # Window Width 0x04FF\n [0x05, 0x0009], # Horizontal Blanking\n [0x06, 0x0019], # Vertical Blanking\n [0x07, 0x0002], # Output Control\n [0x09, 0x0419], # Shutter Width 0x0419 (max: 0x3FFF)\n [0x0B, 0x0000], # Frame Restart\n [0x0C, 0x0000],#0x0100], \n [0x0D, 0x0000], \n [0x1E, 0x8000], # Read Mode 1 0x8000\n [0x20, 0x1104], \n [0x2B, 0x0008], \n [0x2C, 0x0008], \n [0x2D, 0x0008], \n [0x2E, 0x0008],\n [0x32, 0x0FFC], # Test Data Register\n [0x35, 0x0067], # Global Gain 0x0008 (max: 0x0067)\n [0x5F, 0x0904], \n #[0x60, 0x0000], # BLC offset: Even row, even column\n #[0x61, 0x0000], # BLC offset: Odd row, odd column\n #[0x62, 0x049F], # Black Level Calibration Control 0x0498 (No-BLC: 0x049F; Manual-BLC: 0x0499 & reg0x60/61/63/64)\n #[0x63, 0x0000], # BLC offset: Even row, odd column\n #[0x64, 0x0000], # BLC offset: Odd row, Even column\n [0x60, 0x002F], # BLC offset: Even row, even column\n [0x61, 0x002F], # BLC offset: Odd row, odd column\n [0x62, 0x0499], # Black Level Calibration Control 0x0498 (No-BLC: 0x049F; Manual-BLC: 0x0499 & reg0x60/61/63/64)\n [0x63, 0x000F], # BLC offset: Even row, odd column\n [0x64, 0x000F], # BLC offset: Odd row, Even column\n [0xF1, 0x0001], \n [0xFFFF, 0xFFFF]\n]\n\nglobalGain = regArr[18][1];\n\n# Cálculo del tiempo de integración inicial (pag 16 del datasheet)\nrowTime = regArr[3][1] + 1 + 244 + regArr[4][1] - 19; #[pixel clock periods] default: 1514\nresetDelay = 4*regArr[9][1] #[pixel clock periods] default: 0\noverheadTime = 180; #[pixel clock periods]\nshutterWidth = regArr[7][1]\nintegrationPeriods = shutterWidth*rowTime - overheadTime - resetDelay;\nclockPeriod = 1000.0/24e6; #[ms]\nintegrationTime = integrationPeriods * clockPeriod; #[ms]\nwith open('integrationtime.txt','w') as it:\n it.write(str(integrationTime)+\"\\n\")\n\nprint (\"Initial integration time: %.3fms\"%(integrationTime));\nprint (\"Initial gain: 0x%02x\"%(globalGain));\n\na_lock = thread.allocate_lock();\n\ndef readThread(threadName,read_Flag):\n global flag,handle,storeFlag,bufferData,openFlag\n global a_lock\n count = 0\n time0 = time.time()\n time1 = time.time()\n data = {}\n # Wait for the arducam object to be ready\n while openFlag == False:\n time1 = time.time();\n if time1 - time0 > 20:\n #timeout\n exit;\n\n while flag:\n res = ArducamSDK.Py_ArduCam_available(handle)\n #~ print \"Available frames %d\"%(res)\n if res > 0:\n \n res,data = ArducamSDK.Py_ArduCam_read(handle,Width * Height)\n if res == 0:\n count += 1\n time1 = time.time()\n ArducamSDK.Py_ArduCam_del(handle)\n else:\n print (\"read data fail!\")\n \n else:\n #print \"No data availiable\"\n time.sleep(.01);\n \n if len(data) >= Width * Height:\n if time1 - time0 >= 5:\n print (\"%s %f %s\\n\"%(\"fps:\",count*1.0/(time1-time0),\"/s\"))\n count = 0\n time0 = time1\n \n a_lock.acquire();\n bufferData = data;\n data = [];\n storeFlag = True;\n a_lock.release();\n #show(data)\n\t\t#else:\n\t\t#\tprint \"data length is not enough!\"\n if flag == False:\n break\n \nthread.start_new_thread( readThread,(\"Thread-2\", flag,))\n\npass\n\ndef showAndSave(threadName,algoquenoseusa):\n global flag,W_zoom,H_zoom,V_value,H_value,lx,ly,downFlag,saveFlag,saveNum,bufferData,storeFlag\n global a_lock\n global hist_ax\n global NOMBREIMG\n img = np.zeros((Height, Width), dtype=np.uint8);\n while flag:\n a_lock.acquire();\n if storeFlag == True:\n storeFlag = False;\n img = np.frombuffer(bufferData, np.uint8)\n img = np.reshape(img, (Height, Width));\n\n saveNum += 1\n #name = NOMBREIMG + str(saveNum) + \".fits\"\n\t #name = NOMBREIMG + \"_\" + str(saveNum) + \".jpeg\"\n name = NOMBREIMG + \".fits\"\n hdu=fits.PrimaryHDU()\n hdu.data=img\n hdu.writeto(name,overwrite=True)\n print (\"Frame saved to %s\"%(name))\n \n a_lock.release();\n \n if saveNum == NUMIMG:\n flag=False;\n print (\"Total number of adq images = %d\"%(saveNum))\n \n if flag == False:\n break\nthread.start_new_thread( showAndSave,(\"Thread-3\",flag))\npass\n\ndef init_and_read_arducam():\n\tglobal flag,regArr,handle,openFlag\n\tregNum = 0\n\tres,handle = ArducamSDK.Py_ArduCam_autoopen(cfg)\n\tif res == 0:\n\t\topenFlag = True\n\t\tprint (\"device open success!\")\n\t\twhile (regArr[regNum][0] != 0xFFFF):\n\t\t\tArducamSDK.Py_ArduCam_writeSensorReg(handle,regArr[regNum][0],regArr[regNum][1])\n\t\t\tregNum = regNum + 1\n\t\tres = ArducamSDK.Py_ArduCam_beginCapture(handle)\n\t\t\n\t\tif res == 0:\n\t\t\tprint (\"transfer task create success!\")\n\t\t\twhile flag :\t\t\n\t\t\t\tres = ArducamSDK.Py_ArduCam_capture(handle)\n\t\t\t\tif res != 0:\n\t\t\t\t\tprint (\"capture failed!\")\n\t\t\t\t\tflag = False;\n\t\t\t\t\tbreak;\t\t\t\t\t\n\t\t\t\ttime.sleep(0.1)\n\t\t\t\tif flag == False:\t\t\n\t\t\t\t\tbreak\n\t\telse:\n\t\t\tprint (\"transfer task create fail!\")\n\t\t\n\t\ttime.sleep(2);\n\t\tres = ArducamSDK.Py_ArduCam_close(handle)\n\t\tif res == 0:\n\t\t\topenFlag = False\n\t\t\tprint (\"device close success!\")\n\t\telse:\n\t\t\tprint (\"device close fail!\")\n\telse:\n\t\tprint (\"device open fail!\")\n\nif __name__ == \"__main__\":\n\tinitTime = time.time();\n\tinit_and_read_arducam();\n\n",
"from random import randrange\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport cv2\r\nimport argparse\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"echo\", help=\"echo the string you use here\")\r\nparser.add_argument(\"nombre1\", help=\"echo the string you use here\")\r\nparser.add_argument(\"nombre2\", help=\"echo the string you use here\")\r\nparser.add_argument(\"nombre3\", help=\"echo the string you use here\")\r\nargs = parser.parse_args()\r\n\r\ndire=args.echo\r\n\r\n\r\nname1=dire+'/'+args.nombre1\r\nname2=dire+'/'+args.nombre2\r\nk=args.nombre3\r\n\r\nfigsize = (10, 10)\r\nrgb_l = cv2.cvtColor(cv2.imread(name1), cv2.COLOR_BGR2RGB)\r\ngray_l = cv2.cvtColor(rgb_l, cv2.COLOR_RGB2GRAY)\r\nrgb_r = cv2.cvtColor(cv2.imread(name2), cv2.COLOR_BGR2RGB)\r\ngray_r = cv2.cvtColor(rgb_r, cv2.COLOR_RGB2GRAY)\r\n# use orb if sift is not installed\r\nfeature_extractor = cv2.ORB_create()\r\n\r\n# find the keypoints and descriptors with chosen feature_extractor\r\nkp_l, desc_l = feature_extractor.detectAndCompute(gray_l, None)\r\nkp_r, desc_r = feature_extractor.detectAndCompute(gray_r, None)\r\n\r\ntest = cv2.drawKeypoints(rgb_l, kp_l, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n\r\nplt.figure(figsize=figsize)\r\nplt.imshow(test)\r\nplt.title(\"keypoints\")\r\n#plt.show()\r\nbf = cv2.BFMatcher()\r\nmatches = bf.knnMatch(desc_l, desc_r, k=2)\r\n\r\n# Apply ratio test\r\ngood_match = []\r\nfor m in matches:\r\n if m[0].distance/m[1].distance < 0.5:\r\n good_match.append(m)\r\ngood_match_arr = np.asarray(good_match)\r\n\r\n# show only 30 matches\r\nim_matches = cv2.drawMatchesKnn(rgb_l, kp_l, rgb_r, kp_r,\r\n good_match[0:30], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\r\n\r\nplt.figure(figsize=(20, 20))\r\nplt.imshow(im_matches)\r\nplt.title(\"keypoints matches\")\r\n#plt.show()\r\ngood_kp_l = np.array([kp_l[m.queryIdx].pt for m in good_match_arr[:, 0]]).reshape(-1, 1, 2)\r\ngood_kp_r = np.array([kp_r[m.trainIdx].pt for m in good_match_arr[:, 0]]).reshape(-1, 1, 2)\r\nH, masked = cv2.findHomography(good_kp_r, good_kp_l, cv2.RANSAC, 5.0)\r\n\r\nprint(H)\r\nrgb_r_warped = cv2.warpPerspective(rgb_r, H, (rgb_l.shape[1] + rgb_r.shape[1], rgb_l.shape[0]))\r\nrgb_r_warped[0:rgb_l.shape[0], 0:rgb_l.shape[1]] = rgb_l\r\n\r\nplt.figure(figsize=figsize)\r\nplt.imshow(rgb_r_warped)\r\nplt.title(\"naive warping\")\r\n#plt.show()\r\ndef warpTwoImages(img1, img2, H):\r\n '''warp img2 to img1 with homograph H\r\n from: https://stackoverflow.com/questions/13063201/how-to-show-the-whole-image-when-using-opencv-warpperspective\r\n '''\r\n h1, w1 = img1.shape[:2]\r\n h2, w2 = img2.shape[:2]\r\n pts1 = np.float32([[0, 0], [0, h1], [w1, h1], [w1, 0]]).reshape(-1, 1, 2)\r\n pts2 = np.float32([[0, 0], [0, h2], [w2, h2], [w2, 0]]).reshape(-1, 1, 2)\r\n pts2_ = cv2.perspectiveTransform(pts2, H)\r\n pts = np.concatenate((pts1, pts2_), axis=0)\r\n [xmin, ymin] = np.int32(pts.min(axis=0).ravel() - 0.5)\r\n [xmax, ymax] = np.int32(pts.max(axis=0).ravel() + 0.5)\r\n t = [-xmin, -ymin]\r\n Ht = np.array([[1, 0, t[0]], [0, 1, t[1]], [0, 0, 1]]) # translate\r\n\r\n result = cv2.warpPerspective(img2, Ht@H, (xmax-xmin, ymax-ymin))\r\n result[t[1]:h1+t[1], t[0]:w1+t[0]] = img1\r\n return result\r\n\r\n\r\nresult = warpTwoImages(rgb_l, rgb_r, H)\r\n\r\nplt.figure(figsize=figsize)\r\nplt.imshow(result)\r\nplt.title(\"better warping\")\r\n#plt.show()\r\ncv2.imwrite(dire+\"_P0/\"+str(k)+\".jpg\",result)"
] | [
[
"numpy.reshape",
"numpy.frombuffer",
"numpy.zeros"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"numpy.asarray",
"numpy.concatenate",
"numpy.float32",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AndrewFalkowski/SODIS_SIM | [
"4d5da3e0872ee747d399d66fdee1633e7d2b8ab1"
] | [
"BoxThermal.py"
] | [
"import numpy as np\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport numba\nimport time\nfrom scipy.integrate import odeint\n\n\n\n# a sample differential equation dy/dx = (x-y)/2\n\n# def dydx(x,y):\n# return ((x-y)/2)\n\n# # find the value of y for a given x using step size h\n# # and an initial value y0 at x0\n\n# def rungeKutta(x0, y0, x, h):\n# #count num iteratings using step size or step height h\n# n = int(((x - x0)/h))\n# # iterate for number of iterations\n# y = y0\n# for i in range(1, n + 1):\n# # apply runge kutta formulas to find the next value of y\n# k1 = h * dydx(x0, y)\n# k2 = h * dydx(x0 + 0.5 * h, y + 0.5 * k1)\n# k3 = h * dydx(x0 + 0.5 * h, y + 0.5 * k2)\n# k4 = h * dydx(x0 + h, y + k3)\n\n# # update the next value of y\n# y = y + (1.0 / 6.0) * (k1 + 2*k2 + 2*k3 + k4)\n\n# # update the next value of x\n# x0 = x0 + h\n\n# return y\n\n\n# # driver method\n# x0 = 0\n# y = 1\n# x = 2\n# h = 0.2\n# print('The value of y at x is:', rungeKutta(x0, y, x, h))\n\ndef box_dim(A_c, h, prct_f):\n # all dimensions in meters\n box_vol = A_c * h\n vol_f = box_vol * prct_f # L\n m_a = box_vol * (1-prct_f) * 1.225\n m_f = vol_f * 997 # kg\n print('Contained Water: ', m_f, 'Liters')\n A_s = 4 * h * np.sqrt(A_c)\n return m_f, m_a, A_s\n\n# m_f, m_a, A_s = box_dim(0.25, 0.15, 0.9)\n\n\ndef boxODE(x, t, m_f, m_a, A_s):\n\n # constants\n A_c = 0.25 # square meters\n A_s = A_s\n A_f = A_c # square meters\n T_amb = 298 # kelvin\n T_sky = T_amb - 6 # kelvin\n alpha_g = 0.02 # %\n alpha_p = 0.98\n t_g = 0.9 # %\n t_f = 0.85 # %\n # print(t)\n Irr = 0.0426*(t) + 1.38E-6*(t)**2 - 7.94E-11*(t)**3 + 7.3E-16*(t)**4\n # Irr = 600\n x_b = 0.065 # insulation thickness meters\n x_s = 0.065 # insulation thickness meters\n\n k_i = 1.0 # thermal conductivity of side materials, foamed glass # W/mK\n h_rad_g2_g1 = 8\n h_cov_g2_g1 = 20\n h_rad_g1_sky = 8\n h_rad_g1_amb = 8\n h_rad_p_g2 = 20\n h_cov_a_g2 = 8\n h_cov_f_a = 8\n h_cov_p_f = 30\n h_cov_g1_amb = 65\n\n M_f = m_f * 4.187\n M_g1 = 1150 * (A_c * 0.001) * 1.67 # assuming acrylic\n M_g2 = M_g1\n M_p = 8960 * (A_c * 0.065) * 1.0\n # assuming coper\n M_a = 0.718 * m_a\n\n # assign each ODE to a vector element\n T_g1 = x[0]\n T_g2 = x[1]\n T_a = x[2]\n T_p = x[3]\n T_f = x[4]\n\n Q_rad_g2_g1 = h_rad_g2_g1 * A_c * (T_g2 - T_g1)\n Q_cov_g2_g1 = h_cov_g2_g1 * A_c * (T_g2 - T_g1)\n Q_rad_g1_sky = h_rad_g1_sky * A_c * (T_g1 - T_sky)\n Q_cov_g1_amb = h_rad_g1_amb * A_c * (T_g1 - T_amb)\n Q_rad_p_g2 = h_rad_p_g2 * A_c * (T_p - T_g2)\n Q_cov_a_g2 = h_cov_a_g2 * A_c * (T_a - T_g2)\n Q_cov_f_a = h_cov_f_a * (A_c) * (T_f - T_a)\n Q_cov_p_f = h_cov_p_f * A_c * (T_p - T_f)\n U_base = ((x_b/k_i) + 1/(h_cov_g1_amb))**(-1)\n U_side = ((x_s/k_i) + 1/(h_cov_g1_amb))**(-1)\n Q_amb_loss = (U_base*A_c + U_side*A_s)*(T_p - T_amb)\n\n\n\n # define each ODE\n dT_g1dt = (Irr * alpha_g * A_c + Q_rad_g2_g1 + Q_cov_g2_g1 - Q_rad_g1_sky - Q_cov_g1_amb) / M_g1\n dT_g2dt = (Irr * alpha_g * t_g * A_c + Q_rad_p_g2 + Q_cov_a_g2 - Q_rad_g2_g1) / M_g2\n dT_adt = (Q_cov_f_a - Q_cov_a_g2)/M_a\n dT_pdt = (Irr * alpha_p * t_g**2 * t_f * A_c - Q_rad_p_g2 - Q_amb_loss - Q_cov_p_f) / M_p\n dT_fdt = (Q_cov_p_f + Q_cov_f_a) / M_f\n\n return [dT_g1dt, dT_g2dt, dT_adt, dT_pdt, dT_fdt]\n\n# x0 = [298, 298, 298, 298, 285]\n\n\n# # test the defined ODES\n# print(boxODE(x=x0, t=0, m_f=m_f, m_a=m_a, A_s=A_s))\n\n\n# # declare a time vector (time window)\n# t = np.linspace(0,54000,1000)\n# x = odeint(boxODE,x0,t, args=(m_f, m_a, A_s))\n\n# Tf= x[:,4]\n# Tp = x[:,3]\n\n# # plot the results\n# plt.plot((t/3600)+5.8,Tf_2, label='fluid')\n# # plt.plot(t/3600,Tp, label='plate')\n# plt.legend()\n# plt.ylim(298, 340)\n# plt.xlim(0,24)\n# plt.show()\n\n#%%\n\n# xs = np.arange(27000,28201,1)\n# ys = 0.0226*xs - 295\n\n# #%%\n\n# fig = plt.figure(figsize=(5,5))\n# fig, ax1 = plt.subplots()\n\n# plt.plot((t/3600)+5.8,Tf, color='r')\n# plt.plot(xs/3600 + 5.8, ys, color='r')\n# plt.plot(np.arange(27000,27601,1)/3600+5.8, )\n# plt.hlines(338, -100, 100, linestyle=':', color='k')\n# plt.text(6.5, 339, 'Pasteurization Temperature')\n\n# ax1.tick_params(direction='in', length=7,top=True, right=True, left=True)\n# minor_locator_x = AutoMinorLocator(2)\n# minor_locator_y = AutoMinorLocator(2)\n# ax1.get_xaxis().set_minor_locator(minor_locator_x)\n# ax1.get_yaxis().set_minor_locator(minor_locator_y)\n# # rotate and align the tick labels so they look better\n# plt.tick_params(which='minor',\n# direction='in',\n# length=4,\n# right=True,\n# left=True,\n# top=True)\n# plt.xlim(6,21)\n# plt.xlabel('Hour of Day')\n# plt.ylim(298, 350)\n# plt.ylabel('Water Temperature (K)')\n\n# plt.savefig('Figures/comb_img.png', dpi=300)"
] | [
[
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rivei/pm4py_with_dash | [
"05ed524c11b44932783864a4465d400ea1300910"
] | [
"python/pm4pyPlus.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 1 22:17:20 2019\n\n@author: Wei\n\"\"\"\n\n#from dash_app import default_log as log\nimport pandas as pd\nimport numpy as np\n#import pytz\nfrom datetime import datetime, tzinfo,timedelta\n\nfrom pm4py.statistics.traces.log import case_statistics\nfrom pm4py.algo.filtering.log.attributes import attributes_filter\n\nMAX_TRACES = 9999\n\ndef filtered_log_df(log, top_trace_n = MAX_TRACES):\n# if top_trace_n == MAX_TRACES:\n# traces_with_count = case_statistics.get_variant_statistics(log) #parameters=(\"max_variants_to_return\":5)\n# #df = pd.DataFrame.from_dict([dict(x) for x in traces_with_count])\n# df = pd.DataFrame()\n# df.columns = ['caseid','actid','actseq','resid','ts','sT']\n# else:\n n_cases = 0\n caseid = []\n actid = []\n actseq = []\n resid = []\n ts = []\n startTime = []\n for case in log:\n actidx = 0\n startT = case[0]['time:timestamp'].timestamp()\n for event in case:\n caseid.append(n_cases)\n actid.append(event['concept:name'])\n actseq.append(actidx)\n resid.append(event['org:resource'])\n ts.append(event['time:timestamp'].timestamp())\n startTime.append(event['time:timestamp'].timestamp() - startT)\n actidx = actidx + 1\n n_cases = n_cases + 1\n df = pd.DataFrame({'caseid': caseid, \n 'actid':actid, \n 'actseq':actseq, \n 'resid':resid, \n 'ts':ts, \n 'sT': startTime}) \n df['preid'] = df['actid'].shift(1)\n df['preid'] = df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)\n\n return df\n\ndef n_cases(log, top_trace_n = MAX_TRACES):\n if top_trace_n == MAX_TRACES:\n df = filtered_log_df(log)\n else:\n df = filtered_log_df(log, top_trace_n)\n return len(df['caseid'].unique())\n \n\ndef n_events(log):\n df = filtered_log_df(log)\n return len(df)\n \ndef n_activities(log):\n df = filtered_log_df(log)\n return len(df['actid'].unique())\n\ndef n_resources(log):\n df = filtered_log_df(log)\n return len(df['resid'].unique())\n\ndef n_traces(log, top_trace_n = MAX_TRACES):\n if top_trace_n == MAX_TRACES:\n traces_with_count = case_statistics.get_variant_statistics(log) #parameters=(\"max_variants_to_return\":5)\n else:\n traces_with_count = case_statistics.get_variant_statistics(log, parameters={\"max_variants_to_return\":top_trace_n})\n \n df = pd.DataFrame.from_dict([dict(x) for x in traces_with_count])\n return len(df)\n\ndef acts_df(log):\n activities = attributes_filter.get_attribute_values(log, \"concept:name\")\n actid = []\n cnt = []\n for act0 in activities.items():\n actid.append(act0[0])\n cnt.append(act0[1]) \n return pd.DataFrame({'id':actid, 'cnt':cnt})\n\ndef traces_df(log):\n traces = case_statistics.get_variant_statistics(log) \n tid = []\n actid = []\n actseq = []\n cnt = []\n n_traces = 0\n for trace in traces:\n actidx = 0\n acts = trace['variant']\n for s in acts.split(','):\n tid.append(n_traces)\n actid.append(s)\n actseq.append(actidx)\n cnt.append(trace['count'])\n actidx = actidx+1\n n_traces = n_traces + 1\n \n trace_df = pd.DataFrame({'id': tid, 'actid': actid, 'actseq':actseq, 'cnt':cnt})\n trace_df['preid'] = trace_df['actid'].shift(1)\n trace_df['preid'] = trace_df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1) \n trace_df['pre_post'] = trace_df.apply(lambda row: row['preid']+\"@@\"+row['actid'], axis = 1)\n \n# def actid2num(sactid, df):\n# nactid = -1\n# for i in range(0, len(df)):\n# if df['id'][i] == sactid:\n# nactid = i/len(df)\n# return nactid\n# \n# act_df = acts_df(log)\n# trace_df['nactid'] = trace_df['actid'].apply(lambda i:actid2num(i, act_df))\n return trace_df\n \n \ndef sort_df(log):\n df = filtered_log_df(log) \n dur = np.zeros(len(df))\n evS = 0\n evE = -1\n for i in range(0, len(df)):\n if df['actseq'][i] == 0:\n evS = i\n if i < len(df) - 1:\n if df['actseq'][i + 1] == 0:\n evE = i\n else:\n evE = i\n \n if evE >= evS:\n for j in range(evS, evE+1):\n dur[j] = df['sT'][evE-1]\n\n df['dur'] = dur\n \n sort_df = df.sort_values(by=['dur','caseid', 'actseq'], ascending = [0,1,1])\n \n sortid = 0\n sid = np.zeros(len(sort_df))\n for i in range(1, len(sort_df)):\n if i < len(sort_df) - 1:\n if sort_df.iloc[i,:]['caseid'] != sort_df.iloc[i-1,:]['caseid']:\n sortid = sortid + 1\n \n sid[i] = sortid\n \n sort_df['sid'] = sid\n return sort_df\n\ndef mtx_df(log):\n df = traces_df(log)\n prelist = (df['preid'].unique())\n actlist = (df['actid'].unique())\n dff = pd.DataFrame(columns=prelist,index = actlist)\n# dff.columns = actlist\n# dff.index = prelist\n\n mtxdf1 = df.groupby('pre_post')['cnt'].sum() #agg(['sum','count','mean'])\n #mtxdf1['abs'] = mtxdf1['sum']/mtxdf1['count']\n# mtxdf= pd.DataFrame({'pre_post':mtxdf1.index, 'cnt': list(mtxdf1)})\n \n for s in mtxdf1.index:\n a = s.split(\"@@\")\n if len(a) != 2:\n print(a[0], a[1])\n else:\n dff[a[0]][a[1]] = mtxdf1[s]\n\n return dff\n \n#\n#activities = log_attributes_filter.get_attribute_values(log, \"concept:name\")\n#actid = []\n#cnt = []\n#for act0 in activities.items():\n# actid.append(act0[0])\n# cnt.append(act0[1])\n#\n#act_df = pd.DataFrame({'id':actid, 'cnt':cnt})\n#\n#n_activities = len(act_df)\n#\n#from pm4py.statistics.traces.log import case_statistics\n#traces = case_statistics.get_variant_statistics(log)#, parameters={\"max_variants_to_return\": 5})\n#\n##acts = []\n##cnt = []\n##tid = []\n##idx = 0\n##for trace in traces:\n## tid.append(idx)\n## acts.append(trace['variant'])\n## cnt.append(trace['count'])\n## idx = idx + 1\n## \n##trace_df = pd.DataFrame({'id': tid, 'acts': acts, 'cnt':cnt})\n##n_traces = len(trace_df)\n#\n#tid = []\n#actid = []\n#actseq = []\n#cnt = []\n#n_traces = 0\n#for trace in traces:\n# actidx = 0\n# acts = trace['variant']\n# for s in acts.split(','):\n# tid.append(n_traces)\n# actid.append(s)\n# actseq.append(actidx)\n# cnt.append(trace['count'])\n# actidx = actidx+1\n# n_traces = n_traces + 1\n# \n#trace_df = pd.DataFrame({'id': tid, 'actid': actid, 'actseq':actseq, 'cnt':cnt})\n#trace_df['preid'] = trace_df['actid'].shift(1)\n#trace_df['preid'] = trace_df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)\n##trace_df['postid'] = trace_df['actid'].shift(1)\n##trace_df['postid'] = trace_df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)\n#\n#trace_df['pre_post'] = trace_df.apply(lambda row: row['preid']+\"-\"+row['actid'], axis = 1)\n#\n#def actid2num(sactid, df):\n# nactid = -1\n# for i in range(0, len(df)):\n# if df['id'][i] == sactid:\n# nactid = i/len(df)\n# return nactid\n#\n##actid2num(\"Confirmation of receipt\", act_df)\n#\n#trace_df['nactid'] = trace_df['actid'].apply(lambda i:actid2num(i, act_df))\n#\n## matrix\n#df['pre_post'] = df.apply(lambda row: row['preid']+\"-\"+row['actid'], axis = 1)\n##mtxdf1 = pd.DataFrame({'ant':df['preid'],'con':df})\n#mtxdf1 = df[df['preid']!='START'].groupby('pre_post')['caseid'].count() #agg(['sum','count','mean'])\n##mtxdf1['abs'] = mtxdf1['sum']/mtxdf1['count']\n#mtxdf= pd.DataFrame({'pre_post':mtxdf1.index, 'cnt': list(mtxdf1)})\n#\n##roles Detection: related to resource vs activity?\n##from pm4py.algo.enhancement.roles import factory as roles_factory\n##roles = roles_factory.apply(log)\n#aaa\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
guptarohit994/ECE143_group25_project | [
"e31d0425b2a6114eed6c55bdb0491c2c996b94be"
] | [
"statistical_analysis/gpa_scatter.py"
] | [
"\nimport helper\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd \n\ndef plot_gpa_scatter():\n \"\"\"Plotting scatterplot of grades expected and grade received, using the general department list\n \"\"\"\n # obtaining data\n department_df = helper.generate_depts_df(helper.general_dept_list)\n comp_criteria = [\"AvgGradeExpected\",\"AvgGradeReceived\"]\n\n # generating scatterplot graph\n lower_bound = 1.5\n upper_bound = 4.02\n ax = department_df.plot.scatter(x=comp_criteria[0], y=comp_criteria[1], c= \"grey\",ylim=(lower_bound,upper_bound),xlim=(lower_bound,upper_bound), figsize=(10,10), fontsize=20, alpha = 0.3)\n ax.set_xlabel(\"Average Grade Expected\", fontsize = 20)\n ax.set_ylabel(\"Average Grade Received\", fontsize = 20)\n\n # computing least squares best fit line and adding it onto graph\n y = department_df[\"AvgGradeReceived\"]\n x = department_df[\"AvgGradeExpected\"]\n A = np.vstack([x, np.ones(len(x))]).T\n m, c = np.linalg.lstsq(A, y, rcond=None)[0]\n print(\"m:{}, c:{}\".format(m,c))\n ax.plot(np.linspace(lower_bound,4,10),np.linspace(lower_bound,4,10),c=\"red\")\n ax.plot(np.linspace(lower_bound,4,10),(np.linspace(lower_bound,4,10)*m) + c,c=\"blue\")"
] | [
[
"numpy.linalg.lstsq",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AK391/stylegan_xl | [
"9854d3d0e96eccaad10cab22379c018e1e031cf0"
] | [
"viz/renderer.py"
] | [
"# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\nimport sys\nimport copy\nimport traceback\nimport numpy as np\nimport torch\nimport torch.fft\nimport torch.nn\nimport matplotlib.cm\nimport dnnlib\nfrom torch_utils.ops import upfirdn2d\nimport legacy # pylint: disable=import-error\n\n#----------------------------------------------------------------------------\n\nclass CapturedException(Exception):\n def __init__(self, msg=None):\n if msg is None:\n _type, value, _traceback = sys.exc_info()\n assert value is not None\n if isinstance(value, CapturedException):\n msg = str(value)\n else:\n msg = traceback.format_exc()\n assert isinstance(msg, str)\n super().__init__(msg)\n\n#----------------------------------------------------------------------------\n\nclass CaptureSuccess(Exception):\n def __init__(self, out):\n super().__init__()\n self.out = out\n\n#----------------------------------------------------------------------------\n\ndef _sinc(x):\n y = (x * np.pi).abs()\n z = torch.sin(y) / y.clamp(1e-30, float('inf'))\n return torch.where(y < 1e-30, torch.ones_like(x), z)\n\ndef _lanczos_window(x, a):\n x = x.abs() / a\n return torch.where(x < 1, _sinc(x), torch.zeros_like(x))\n\n#----------------------------------------------------------------------------\n\ndef _construct_affine_bandlimit_filter(mat, a=3, amax=16, aflt=64, up=4, cutoff_in=1, cutoff_out=1):\n assert a <= amax < aflt\n mat = torch.as_tensor(mat).to(torch.float32)\n\n # Construct 2D filter taps in input & output coordinate spaces.\n taps = ((torch.arange(aflt * up * 2 - 1, device=mat.device) + 1) / up - aflt).roll(1 - aflt * up)\n yi, xi = torch.meshgrid(taps, taps)\n xo, yo = (torch.stack([xi, yi], dim=2) @ mat[:2, :2].t()).unbind(2)\n\n # Convolution of two oriented 2D sinc filters.\n fi = _sinc(xi * cutoff_in) * _sinc(yi * cutoff_in)\n fo = _sinc(xo * cutoff_out) * _sinc(yo * cutoff_out)\n f = torch.fft.ifftn(torch.fft.fftn(fi) * torch.fft.fftn(fo)).real\n\n # Convolution of two oriented 2D Lanczos windows.\n wi = _lanczos_window(xi, a) * _lanczos_window(yi, a)\n wo = _lanczos_window(xo, a) * _lanczos_window(yo, a)\n w = torch.fft.ifftn(torch.fft.fftn(wi) * torch.fft.fftn(wo)).real\n\n # Construct windowed FIR filter.\n f = f * w\n\n # Finalize.\n c = (aflt - amax) * up\n f = f.roll([aflt * up - 1] * 2, dims=[0,1])[c:-c, c:-c]\n f = torch.nn.functional.pad(f, [0, 1, 0, 1]).reshape(amax * 2, up, amax * 2, up)\n f = f / f.sum([0,2], keepdim=True) / (up ** 2)\n f = f.reshape(amax * 2 * up, amax * 2 * up)[:-1, :-1]\n return f\n\n#----------------------------------------------------------------------------\n\ndef _apply_affine_transformation(x, mat, up=4, **filter_kwargs):\n _N, _C, H, W = x.shape\n mat = torch.as_tensor(mat).to(dtype=torch.float32, device=x.device)\n\n # Construct filter.\n f = _construct_affine_bandlimit_filter(mat, up=up, **filter_kwargs)\n assert f.ndim == 2 and f.shape[0] == f.shape[1] and f.shape[0] % 2 == 1\n p = f.shape[0] // 2\n\n # Construct sampling grid.\n theta = mat.inverse()\n theta[:2, 2] *= 2\n theta[0, 2] += 1 / up / W\n theta[1, 2] += 1 / up / H\n theta[0, :] *= W / (W + p / up * 2)\n theta[1, :] *= H / (H + p / up * 2)\n theta = theta[:2, :3].unsqueeze(0).repeat([x.shape[0], 1, 1])\n g = torch.nn.functional.affine_grid(theta, x.shape, align_corners=False)\n\n # Resample image.\n y = upfirdn2d.upsample2d(x=x, f=f, up=up, padding=p)\n z = torch.nn.functional.grid_sample(y, g, mode='bilinear', padding_mode='zeros', align_corners=False)\n\n # Form mask.\n m = torch.zeros_like(y)\n c = p * 2 + 1\n m[:, :, c:-c, c:-c] = 1\n m = torch.nn.functional.grid_sample(m, g, mode='nearest', padding_mode='zeros', align_corners=False)\n return z, m\n\n#----------------------------------------------------------------------------\n\nclass Renderer:\n def __init__(self):\n self._device = torch.device('cuda')\n self._pkl_data = dict() # {pkl: dict | CapturedException, ...}\n self._networks = dict() # {cache_key: torch.nn.Module, ...}\n self._pinned_bufs = dict() # {(shape, dtype): torch.Tensor, ...}\n self._cmaps = dict() # {name: torch.Tensor, ...}\n self._is_timing = False\n self._start_event = torch.cuda.Event(enable_timing=True)\n self._end_event = torch.cuda.Event(enable_timing=True)\n self._net_layers = dict() # {cache_key: [dnnlib.EasyDict, ...], ...}\n\n def render(self, **args):\n self._is_timing = True\n self._start_event.record(torch.cuda.current_stream(self._device))\n res = dnnlib.EasyDict()\n try:\n self._render_impl(res, **args)\n except:\n res.error = CapturedException()\n self._end_event.record(torch.cuda.current_stream(self._device))\n if 'image' in res:\n res.image = self.to_cpu(res.image).numpy()\n if 'stats' in res:\n res.stats = self.to_cpu(res.stats).numpy()\n if 'error' in res:\n res.error = str(res.error)\n if self._is_timing:\n self._end_event.synchronize()\n res.render_time = self._start_event.elapsed_time(self._end_event) * 1e-3\n self._is_timing = False\n return res\n\n def get_network(self, pkl, key, **tweak_kwargs):\n data = self._pkl_data.get(pkl, None)\n if data is None:\n print(f'Loading \"{pkl}\"... ', end='', flush=True)\n try:\n with dnnlib.util.open_url(pkl, verbose=False) as f:\n data = legacy.load_network_pkl(f)\n print('Done.')\n except:\n data = CapturedException()\n print('Failed!')\n self._pkl_data[pkl] = data\n self._ignore_timing()\n if isinstance(data, CapturedException):\n raise data\n\n orig_net = data[key]\n cache_key = (orig_net, self._device, tuple(sorted(tweak_kwargs.items())))\n net = self._networks.get(cache_key, None)\n if net is None:\n try:\n net = copy.deepcopy(orig_net)\n net = self._tweak_network(net, **tweak_kwargs)\n net.to(self._device)\n except:\n net = CapturedException()\n self._networks[cache_key] = net\n self._ignore_timing()\n if isinstance(net, CapturedException):\n raise net\n return net\n\n def _tweak_network(self, net):\n # Print diagnostics.\n #for name, value in misc.named_params_and_buffers(net):\n # if name.endswith('.magnitude_ema'):\n # value = value.rsqrt().numpy()\n # print(f'{name:<50s}{np.min(value):<16g}{np.max(value):g}')\n # if name.endswith('.weight') and value.ndim == 4:\n # value = value.square().mean([1,2,3]).sqrt().numpy()\n # print(f'{name:<50s}{np.min(value):<16g}{np.max(value):g}')\n return net\n\n def _get_pinned_buf(self, ref):\n key = (tuple(ref.shape), ref.dtype)\n buf = self._pinned_bufs.get(key, None)\n if buf is None:\n buf = torch.empty(ref.shape, dtype=ref.dtype).pin_memory()\n self._pinned_bufs[key] = buf\n return buf\n\n def to_device(self, buf):\n return self._get_pinned_buf(buf).copy_(buf).to(self._device)\n\n def to_cpu(self, buf):\n return self._get_pinned_buf(buf).copy_(buf).clone()\n\n def _ignore_timing(self):\n self._is_timing = False\n\n def _apply_cmap(self, x, name='viridis'):\n cmap = self._cmaps.get(name, None)\n if cmap is None:\n cmap = matplotlib.cm.get_cmap(name)\n cmap = cmap(np.linspace(0, 1, num=1024), bytes=True)[:, :3]\n cmap = self.to_device(torch.from_numpy(cmap))\n self._cmaps[name] = cmap\n hi = cmap.shape[0] - 1\n x = (x * hi + 0.5).clamp(0, hi).to(torch.int64)\n x = torch.nn.functional.embedding(x, cmap)\n return x\n\n def _render_impl(self, res,\n pkl = None,\n w0_seeds = [[0, 1]],\n stylemix_idx = [],\n stylemix_seed = 0,\n trunc_psi = 1,\n trunc_cutoff = 0,\n random_seed = 0,\n noise_mode = 'const',\n force_fp32 = False,\n layer_name = None,\n sel_channels = 3,\n base_channel = 0,\n img_scale_db = 0,\n img_normalize = False,\n fft_show = False,\n fft_all = True,\n fft_range_db = 50,\n fft_beta = 8,\n input_transform = None,\n untransform = False,\n ):\n # Dig up network details.\n G = self.get_network(pkl, 'G_ema')\n res.img_resolution = G.img_resolution\n res.num_ws = G.num_ws\n res.has_noise = any('noise_const' in name for name, _buf in G.synthesis.named_buffers())\n res.has_input_transform = (hasattr(G.synthesis, 'input') and hasattr(G.synthesis.input, 'transform'))\n\n # Set input transform.\n if res.has_input_transform:\n m = np.eye(3)\n try:\n if input_transform is not None:\n m = np.linalg.inv(np.asarray(input_transform))\n except np.linalg.LinAlgError:\n res.error = CapturedException()\n G.synthesis.input.transform.copy_(torch.from_numpy(m))\n\n # Generate random latents.\n all_seeds = [seed for seed, _weight in w0_seeds] + [stylemix_seed]\n all_seeds = list(set(all_seeds))\n all_zs = np.zeros([len(all_seeds), G.z_dim], dtype=np.float32)\n all_cs = np.zeros([len(all_seeds), G.c_dim], dtype=np.float32)\n for idx, seed in enumerate(all_seeds):\n rnd = np.random.RandomState(seed)\n all_zs[idx] = rnd.randn(G.z_dim)\n cls = rnd.randint(G.c_dim)\n if G.c_dim > 0:\n all_cs[idx, cls] = 1\n\n # Run mapping network.\n w_avg = G.mapping.w_avg[cls]\n all_zs = self.to_device(torch.from_numpy(all_zs))\n all_cs = self.to_device(torch.from_numpy(all_cs))\n all_ws = G.mapping(z=all_zs, c=all_cs, truncation_psi=trunc_psi, truncation_cutoff=trunc_cutoff) - w_avg\n all_ws = dict(zip(all_seeds, all_ws))\n\n # Calculate final W.\n w = torch.stack([all_ws[seed] * weight for seed, weight in w0_seeds]).sum(dim=0, keepdim=True)\n stylemix_idx = [idx for idx in stylemix_idx if 0 <= idx < G.num_ws]\n if len(stylemix_idx) > 0:\n w[:, stylemix_idx] = all_ws[stylemix_seed][np.newaxis, stylemix_idx]\n w += w_avg\n\n # Run synthesis network.\n synthesis_kwargs = dnnlib.EasyDict(noise_mode=noise_mode, force_fp32=force_fp32)\n torch.manual_seed(random_seed)\n out, layers = self.run_synthesis_net(G.synthesis, w, capture_layer=layer_name, **synthesis_kwargs)\n\n # Update layer list.\n cache_key = (G.synthesis, tuple(sorted(synthesis_kwargs.items())))\n if cache_key not in self._net_layers:\n if layer_name is not None:\n torch.manual_seed(random_seed)\n _out, layers = self.run_synthesis_net(G.synthesis, w, **synthesis_kwargs)\n self._net_layers[cache_key] = layers\n res.layers = self._net_layers[cache_key]\n\n # Untransform.\n if untransform and res.has_input_transform:\n out, _mask = _apply_affine_transformation(out.to(torch.float32), G.synthesis.input.transform, amax=6) # Override amax to hit the fast path in upfirdn2d.\n\n # Select channels and compute statistics.\n out = out[0].to(torch.float32)\n if sel_channels > out.shape[0]:\n sel_channels = 1\n base_channel = max(min(base_channel, out.shape[0] - sel_channels), 0)\n sel = out[base_channel : base_channel + sel_channels]\n res.stats = torch.stack([\n out.mean(), sel.mean(),\n out.std(), sel.std(),\n out.norm(float('inf')), sel.norm(float('inf')),\n ])\n\n # Scale and convert to uint8.\n img = sel\n if img_normalize:\n img = img / img.norm(float('inf'), dim=[1,2], keepdim=True).clip(1e-8, 1e8)\n img = img * (10 ** (img_scale_db / 20))\n img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8).permute(1, 2, 0)\n res.image = img\n\n # FFT.\n if fft_show:\n sig = out if fft_all else sel\n sig = sig.to(torch.float32)\n sig = sig - sig.mean(dim=[1,2], keepdim=True)\n sig = sig * torch.kaiser_window(sig.shape[1], periodic=False, beta=fft_beta, device=self._device)[None, :, None]\n sig = sig * torch.kaiser_window(sig.shape[2], periodic=False, beta=fft_beta, device=self._device)[None, None, :]\n fft = torch.fft.fftn(sig, dim=[1,2]).abs().square().sum(dim=0)\n fft = fft.roll(shifts=[fft.shape[0] // 2, fft.shape[1] // 2], dims=[0,1])\n fft = (fft / fft.mean()).log10() * 10 # dB\n fft = self._apply_cmap((fft / fft_range_db + 1) / 2)\n res.image = torch.cat([img.expand_as(fft), fft], dim=1)\n\n @staticmethod\n def run_synthesis_net(net, *args, capture_layer=None, **kwargs): # => out, layers\n submodule_names = {mod: name for name, mod in net.named_modules()}\n unique_names = set()\n layers = []\n\n def module_hook(module, _inputs, outputs):\n outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]\n outputs = [out for out in outputs if isinstance(out, torch.Tensor) and out.ndim in [4, 5]]\n for idx, out in enumerate(outputs):\n if out.ndim == 5: # G-CNN => remove group dimension.\n out = out.mean(2)\n name = submodule_names[module]\n if name == '':\n name = 'output'\n if len(outputs) > 1:\n name += f':{idx}'\n if name in unique_names:\n suffix = 2\n while f'{name}_{suffix}' in unique_names:\n suffix += 1\n name += f'_{suffix}'\n unique_names.add(name)\n shape = [int(x) for x in out.shape]\n dtype = str(out.dtype).split('.')[-1]\n layers.append(dnnlib.EasyDict(name=name, shape=shape, dtype=dtype))\n if name == capture_layer:\n raise CaptureSuccess(out)\n\n hooks = [module.register_forward_hook(module_hook) for module in net.modules()]\n try:\n out = net(*args, **kwargs)\n except CaptureSuccess as e:\n out = e.out\n for hook in hooks:\n hook.remove()\n return out, layers\n\n#----------------------------------------------------------------------------\n"
] | [
[
"numpy.linspace",
"torch.sin",
"numpy.asarray",
"torch.kaiser_window",
"torch.device",
"torch.nn.functional.affine_grid",
"numpy.eye",
"torch.from_numpy",
"torch.arange",
"torch.ones_like",
"torch.nn.functional.pad",
"torch.empty",
"torch.cuda.current_stream",
"torch.cuda.Event",
"torch.zeros_like",
"torch.stack",
"numpy.random.RandomState",
"torch.as_tensor",
"torch.nn.functional.embedding",
"torch.manual_seed",
"torch.fft.fftn",
"torch.nn.functional.grid_sample",
"torch.meshgrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jggatter/cumulus | [
"1dfd9dfce5a44ff867859db6f24a356f72c6ccdd"
] | [
"docker/demultiplexing/demuxlet/generate_zarr.py"
] | [
"import argparse\n\nimport pegasusio as pio\nimport pandas as pd\n\nparser = argparse.ArgumentParser(description='Merge demuxlet result with gene-count matrix.')\nparser.add_argument('demux_res', metavar = 'demux_result.best', help = 'Demuxlet demultiplexing results.')\nparser.add_argument('raw_mat', metavar = 'raw_feature_bc_matrix.h5', help = 'Raw gene count matrix in 10x format.')\nparser.add_argument('out_file', metavar = 'output_result.zarr', help = 'Output zarr file.')\nargs = parser.parse_args()\n\ndemux_type_dict = {'SNG': 'singlet', 'DBL': 'doublet', 'AMB': 'unknown'}\n\ndef write_output(assignment_file: str, input_mat_file: str, output_zarr_file: str) -> None:\n df = pd.read_csv(assignment_file, sep = '\\t', header = 0, index_col = 'BARCODE')\n df.index = pd.Index([x[:-2] for x in df.index])\n df['demux_type'] = df['DROPLET.TYPE'].apply(lambda s: demux_type_dict[s])\n df['assignment'] = ''\n df.loc[df['demux_type'] == 'singlet', 'assignment'] = df.loc[df['demux_type'] == 'singlet', 'SNG.BEST.GUESS']\n df.loc[df['demux_type'] == 'doublet', 'assignment'] = df.loc[df['demux_type'] == 'doublet', 'DBL.BEST.GUESS'].apply(lambda s: ','.join(s.split(',')[:-1]))\n\n data = pio.read_input(input_mat_file)\n data.obs['demux_type'] = ''\n data.obs['assignment'] = ''\n\n idx = data.obs_names.isin(df.index)\n barcodes = data.obs_names[idx]\n df_valid = df.loc[barcodes, ['demux_type', 'assignment']]\n data.obs.loc[idx, 'demux_type'] = df_valid['demux_type'].values\n data.obs.loc[idx, 'assignment'] = df_valid['assignment'].values\n\n pio.write_output(data, output_zarr_file, zarr_zipstore = True)\n\n\nif __name__ == '__main__':\n write_output(args.demux_res, args.raw_mat, args.out_file)"
] | [
[
"pandas.read_csv",
"pandas.Index"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
HuangHaoyu1997/gym-miniworld | [
"77dc24bf1b1ca8c2cfefadfe3e35a0deb2d08a1a",
"77dc24bf1b1ca8c2cfefadfe3e35a0deb2d08a1a"
] | [
"gym_miniworld/miniworld.py",
"gym_miniworld/entity.py"
] | [
"import math\nfrom enum import IntEnum\nimport numpy as np\nimport gym\nfrom gym import spaces\nfrom .random import *\nfrom .opengl import *\nfrom .objmesh import *\nfrom .entity import *\nfrom .math import *\nfrom .params import *\n\n# Default wall height for room\nDEFAULT_WALL_HEIGHT=2.74\n\n# Texture size/density in texels/meter\nTEX_DENSITY = 512\n\ndef gen_texcs_wall(\n tex,\n min_x,\n min_y,\n width,\n height\n):\n \"\"\"\n Generate texture coordinates for a wall quad\n \"\"\"\n\n xc = (TEX_DENSITY / tex.width)\n yc = (TEX_DENSITY / tex.height)\n\n min_u = (min_x) * xc\n max_u = (min_x + width) * xc\n min_v = (min_y) * yc\n max_v = (min_y + height) * yc\n\n return np.array(\n [\n [min_u, min_v],\n [min_u, max_v],\n [max_u, max_v],\n [max_u, min_v],\n ],\n dtype=np.float32\n )\n\ndef gen_texcs_floor(\n tex,\n poss\n):\n \"\"\"\n Generate texture coordinates for the floor or ceiling\n This is done by mapping x,z positions directly to texture\n coordinates\n \"\"\"\n\n texc_mul = np.array(\n [\n TEX_DENSITY / tex.width,\n TEX_DENSITY / tex.height\n ],\n dtype=float\n )\n\n coords = np.stack([poss[:,0], poss[:,2]], axis=1) * texc_mul\n\n return coords\n\nclass Room:\n \"\"\"\n Represent an individual room and its contents\n \"\"\"\n\n def __init__(\n self,\n outline,\n wall_height=DEFAULT_WALL_HEIGHT,\n floor_tex='floor_tiles_bw',\n wall_tex='concrete',\n ceil_tex='concrete_tiles',\n no_ceiling=False\n ):\n # The outlien should have shape Nx2\n assert len(outline.shape) == 2\n assert outline.shape[1] == 2\n assert outline.shape[0] >= 3\n\n # Add a Y coordinate to the outline points\n outline = np.insert(outline, 1, 0, axis=1)\n\n # Number of outline vertices / walls\n self.num_walls = outline.shape[0]\n\n # List of 2D points forming the outline of the room\n # Shape is Nx3\n self.outline = outline\n\n # Compute the min and max x, z extents\n self.min_x = self.outline[:, 0].min()\n self.max_x = self.outline[:, 0].max()\n self.min_z = self.outline[:, 2].min()\n self.max_z = self.outline[:, 2].max()\n\n # Compute midpoint coordinates\n self.mid_x = (self.max_x + self.min_x) / 2\n self.mid_z = (self.max_z + self.min_z) / 2\n\n # Compute approximate surface area\n self.area = (self.max_x - self.min_x) * (self.max_z - self.min_z)\n\n # Compute room edge directions and normals\n # Compute edge vectors (p1 - p0)\n # For the first point, p0 is the last\n # For the last point, p0 is p_n-1\n next_pts = np.concatenate([self.outline[1:], np.expand_dims(self.outline[0], axis=0)], axis=0)\n self.edge_dirs = next_pts - self.outline\n self.edge_dirs = (self.edge_dirs.T / np.linalg.norm(self.edge_dirs, axis=1)).T\n self.edge_norms = -np.cross(self.edge_dirs, Y_VEC)\n self.edge_norms = (self.edge_norms.T / np.linalg.norm(self.edge_norms, axis=1)).T\n\n # Height of the room walls\n self.wall_height = wall_height\n\n # No ceiling flag\n self.no_ceiling = no_ceiling\n\n # Texture names\n self.wall_tex_name = wall_tex\n self.floor_tex_name = floor_tex\n self.ceil_tex_name = ceil_tex\n\n # Lists of portals, indexed by wall/edge index\n self.portals = [[] for i in range(self.num_walls)]\n\n # List of neighbor rooms\n # Same length as list of portals\n self.neighbors = []\n\n def add_portal(\n self,\n edge,\n start_pos=None,\n end_pos=None,\n min_x=None,\n max_x=None,\n min_z=None,\n max_z=None,\n min_y=0,\n max_y=None\n ):\n \"\"\"\n Create a new portal/opening in a wall of this room\n \"\"\"\n\n if max_y == None:\n max_y = self.wall_height\n\n assert edge <= self.num_walls\n assert max_y > min_y\n\n # Get the edge points, compute the direction vector\n e_p0 = self.outline[edge]\n e_p1 = self.outline[(edge+1) % self.num_walls]\n e_len = np.linalg.norm(e_p1 - e_p0)\n e_dir = (e_p1 - e_p0) / e_len\n x0, _, z0 = e_p0\n x1, _, z1 = e_p1\n dx, _, dz = e_dir\n\n # If the portal extents are specified by x coordinates\n if min_x != None:\n assert min_z == None and max_z == None\n assert start_pos == None and end_pos == None\n assert x0 != x1\n\n m0 = (min_x - x0) / dx\n m1 = (max_x - x0) / dx\n\n if m1 < m0:\n m0, m1 = m1, m0\n\n start_pos, end_pos = m0, m1\n\n # If the portal extents are specified by z coordinates\n elif min_z != None:\n assert min_x == None and max_x == None\n assert start_pos == None and end_pos == None\n assert z0 != z1\n\n m0 = (min_z - z0) / dz\n m1 = (max_z - z0) / dz\n\n if m1 < m0:\n m0, m1 = m1, m0\n\n start_pos, end_pos = m0, m1\n\n else:\n assert min_x == None and max_x == None\n assert min_z == None and max_z == None\n\n assert end_pos > start_pos\n assert start_pos >= 0, \"portal outside of wall extents\"\n assert end_pos <= e_len, \"portal outside of wall extents\"\n\n self.portals[edge].append({\n 'start_pos': start_pos,\n 'end_pos': end_pos,\n 'min_y': min_y,\n 'max_y': max_y\n })\n\n # Sort the portals by start position\n self.portals[edge].sort(key=lambda e: e['start_pos'])\n\n return start_pos, end_pos\n\n def point_inside(self, p):\n \"\"\"\n Test if a point is inside the room\n \"\"\"\n\n # Vector from edge start to test point\n ap = p - self.outline\n\n # Compute the dot products of normals to AP vectors\n dotNAP = np.sum(self.edge_norms * ap, axis=1)\n\n # The point is inside if all the dot products are greater than zero\n return np.all(np.greater(dotNAP, 0))\n\n def _gen_static_data(self, params, rng):\n \"\"\"\n Generate polygons and static data for this room\n Needed for rendering and collision detection\n Note: the wall polygons are quads, but the floor and\n ceiling can be arbitrary n-gons\n \"\"\"\n\n # Load the textures and do texture randomization\n self.wall_tex = Texture.get(self.wall_tex_name, rng)\n self.floor_tex = Texture.get(self.floor_tex_name, rng)\n self.ceil_tex = Texture.get(self.ceil_tex_name, rng)\n\n # Generate the floor vertices\n self.floor_verts = self.outline\n self.floor_texcs = gen_texcs_floor(\n self.floor_tex,\n self.floor_verts\n )\n\n # Generate the ceiling vertices\n # Flip the ceiling vertex order because of backface culling\n self.ceil_verts = np.flip(self.outline, axis=0) + self.wall_height * Y_VEC\n self.ceil_texcs = gen_texcs_floor(\n self.ceil_tex,\n self.ceil_verts\n )\n\n self.wall_verts = []\n self.wall_norms = []\n self.wall_texcs = []\n self.wall_segs = []\n\n def gen_seg_poly(\n edge_p0,\n side_vec,\n seg_start,\n seg_end,\n min_y,\n max_y\n ):\n if seg_end == seg_start:\n return\n\n if min_y == max_y:\n return\n\n s_p0 = edge_p0 + seg_start * side_vec\n s_p1 = edge_p0 + seg_end * side_vec\n\n # If this polygon starts at ground level, add a collidable segment\n if min_y == 0:\n self.wall_segs.append(np.array([s_p1, s_p0]))\n\n # Generate the vertices\n # Vertices are listed in counter-clockwise order\n self.wall_verts.append(s_p0 + min_y * Y_VEC)\n self.wall_verts.append(s_p0 + max_y * Y_VEC)\n self.wall_verts.append(s_p1 + max_y * Y_VEC)\n self.wall_verts.append(s_p1 + min_y * Y_VEC)\n\n # Compute the normal for the polygon\n normal = np.cross(s_p1 - s_p0, Y_VEC)\n normal = -normal / np.linalg.norm(normal)\n for i in range(4):\n self.wall_norms.append(normal)\n\n # Generate the texture coordinates\n texcs = gen_texcs_wall(\n self.wall_tex,\n seg_start,\n min_y,\n seg_end - seg_start,\n max_y - min_y\n )\n self.wall_texcs.append(texcs)\n\n # For each wall\n for wall_idx in range(self.num_walls):\n edge_p0 = self.outline[wall_idx, :]\n edge_p1 = self.outline[(wall_idx+1) % self.num_walls, :]\n wall_width = np.linalg.norm(edge_p1 - edge_p0)\n side_vec = (edge_p1 - edge_p0) / wall_width\n\n if len(self.portals[wall_idx]) > 0:\n seg_end = self.portals[wall_idx][0]['start_pos']\n else:\n seg_end = wall_width\n\n # Generate the first polygon (going up to the first portal)\n gen_seg_poly(\n edge_p0,\n side_vec,\n 0,\n seg_end,\n 0,\n self.wall_height\n )\n\n # For each portal in this wall\n for portal_idx, portal in enumerate(self.portals[wall_idx]):\n portal = self.portals[wall_idx][portal_idx]\n start_pos = portal['start_pos']\n end_pos = portal['end_pos']\n min_y = portal['min_y']\n max_y = portal['max_y']\n\n # Generate the bottom polygon\n gen_seg_poly(\n edge_p0,\n side_vec,\n start_pos,\n end_pos,\n 0,\n min_y\n )\n\n # Generate the top polygon\n gen_seg_poly(\n edge_p0,\n side_vec,\n start_pos,\n end_pos,\n max_y,\n self.wall_height\n )\n\n if portal_idx < len(self.portals[wall_idx]) - 1:\n next_portal = self.portals[wall_idx][portal_idx+1]\n next_portal_start = next_portal['start_pos']\n else:\n next_portal_start = wall_width\n\n # Generate the polygon going up to the next portal\n gen_seg_poly(\n edge_p0,\n side_vec,\n end_pos,\n next_portal_start,\n 0,\n self.wall_height\n )\n\n self.wall_verts = np.array(self.wall_verts)\n self.wall_norms = np.array(self.wall_norms)\n\n if len(self.wall_segs) > 0:\n self.wall_segs = np.array(self.wall_segs)\n else:\n self.wall_segs = np.array([]).reshape(0, 2, 3)\n\n if len(self.wall_texcs) > 0:\n self.wall_texcs = np.concatenate(self.wall_texcs)\n else:\n self.wall_texcs = np.array([]).reshape(0, 2)\n\n def _render(self):\n \"\"\"\n Render the static elements of the room\n \"\"\"\n\n glColor3f(1, 1, 1)\n\n # Draw the floor\n self.floor_tex.bind()\n glBegin(GL_POLYGON)\n glNormal3f(0, 1, 0)\n for i in range(self.floor_verts.shape[0]):\n glTexCoord2f(*self.floor_texcs[i, :])\n glVertex3f(*self.floor_verts[i, :])\n glEnd()\n\n # Draw the ceiling\n if not self.no_ceiling:\n self.ceil_tex.bind()\n glBegin(GL_POLYGON)\n glNormal3f(0, -1, 0)\n for i in range(self.ceil_verts.shape[0]):\n glTexCoord2f(*self.ceil_texcs[i, :])\n glVertex3f(*self.ceil_verts[i, :])\n glEnd()\n\n # Draw the walls\n self.wall_tex.bind()\n glBegin(GL_QUADS)\n for i in range(self.wall_verts.shape[0]):\n glNormal3f(*self.wall_norms[i, :])\n glTexCoord2f(*self.wall_texcs[i, :])\n glVertex3f(*self.wall_verts[i, :])\n glEnd()\n\nclass MiniWorldEnv(gym.Env):\n \"\"\"\n Base class for MiniWorld environments. Implements the procedural\n world generation and simulation logic.\n \"\"\"\n\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 30\n }\n\n # Enumeration of possible actions\n class Actions(IntEnum):\n # Turn left or right by a small amount\n turn_left = 0\n turn_right = 1\n\n # Move forward or back by a small amount\n move_forward = 2\n move_back = 3\n\n # Pick up or drop an object being carried\n pickup = 4\n drop = 5\n\n # Toggle/activate an object\n toggle = 6\n\n # Done completing task\n done = 7\n\n def __init__(\n self,\n max_episode_steps=1500,\n obs_width=80,\n obs_height=60,\n window_width=800,\n window_height=600,\n params=DEFAULT_PARAMS,\n domain_rand=False\n ):\n # Action enumeration for this environment\n self.actions = MiniWorldEnv.Actions\n\n # Actions are discrete integer values\n self.action_space = spaces.Discrete(len(self.actions))\n\n # Observations are RGB images with pixels in [0, 255]\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(obs_height, obs_width, 3),\n dtype=np.uint8\n )\n\n self.reward_range = (-math.inf, math.inf)\n\n # Maximum number of steps per episode\n self.max_episode_steps = max_episode_steps\n\n # Simulation parameters, used for domain randomization\n self.params = params\n\n # Domain randomization enable/disable flag\n self.domain_rand = domain_rand\n\n # Window for displaying the environment to humans\n self.window = None\n\n # Invisible window to render into (shadow OpenGL context)\n self.shadow_window = pyglet.window.Window(width=1, height=1, visible=False)\n\n # Enable depth testing and backface culling\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_CULL_FACE)\n\n # Frame buffer used to render observations\n self.obs_fb = FrameBuffer(obs_width, obs_height, 8)\n\n # Frame buffer used for human visualization\n self.vis_fb = FrameBuffer(window_width, window_height, 16)\n\n # Compute the observation display size\n self.obs_disp_width = 256\n self.obs_disp_height = obs_height * (self.obs_disp_width / obs_width)\n\n # For displaying text\n self.text_label = pyglet.text.Label(\n font_name=\"Arial\",\n font_size=14,\n multiline=True,\n width=400,\n x = window_width + 5,\n y = window_height - (self.obs_disp_height + 19)\n )\n\n # Initialize the state\n self.seed()\n self.reset()\n\n def close(self):\n pass\n\n def seed(self, seed=None):\n self.rand = RandGen(seed)\n return [seed]\n\n def reset(self):\n \"\"\"\n Reset the simulation at the start of a new episode\n This also randomizes many environment parameters (domain randomization)\n \"\"\"\n\n # Step count since episode start\n self.step_count = 0\n\n # Create the agent\n self.agent = Agent()\n\n # List of entities contained\n self.entities = []\n\n # List of rooms in the world\n self.rooms = []\n\n # Wall segments for collision detection\n # Shape is (N, 2, 3)\n self.wall_segs = []\n\n # Generate the world\n self._gen_world()\n\n # Check if domain randomization is enabled or not\n rand = self.rand if self.domain_rand else None\n\n # Randomize elements of the world (domain randomization)\n self.params.sample_many(rand, self, [\n 'sky_color',\n 'light_pos',\n 'light_color',\n 'light_ambient'\n ])\n\n # Get the max forward step distance\n self.max_forward_step = self.params.get_max('forward_step')\n\n # Randomize parameters of the entities\n for ent in self.entities:\n ent.randomize(self.params, rand)\n\n # Compute the min and max x, z extents of the whole floorplan\n self.min_x = min([r.min_x for r in self.rooms])\n self.max_x = max([r.max_x for r in self.rooms])\n self.min_z = min([r.min_z for r in self.rooms])\n self.max_z = max([r.max_z for r in self.rooms])\n\n # Generate static data\n if len(self.wall_segs) == 0:\n self._gen_static_data()\n\n # Pre-compile static parts of the environment into a display list\n self._render_static()\n\n # Generate the first camera image\n obs = self.render_obs()\n\n # Return first observation\n return obs\n\n def _get_carry_pos(self, agent_pos, ent):\n \"\"\"\n Compute the position at which to place an object being carried\n \"\"\"\n\n dist = self.agent.radius + ent.radius + self.max_forward_step\n pos = agent_pos + self.agent.dir_vec * 1.05 * dist\n\n # Adjust the Y-position so the object is visible while being carried\n y_pos = max(self.agent.cam_height - ent.height - 0.3, 0)\n pos = pos + Y_VEC * y_pos\n\n return pos\n\n def move_agent(self, fwd_dist, fwd_drift):\n \"\"\"\n Move the agent forward\n \"\"\"\n\n next_pos = (\n self.agent.pos +\n self.agent.dir_vec * fwd_dist +\n self.agent.right_vec * fwd_drift\n )\n\n if self.intersect(self.agent, next_pos, self.agent.radius):\n return False\n\n carrying = self.agent.carrying\n if carrying:\n next_carrying_pos = self._get_carry_pos(next_pos, carrying)\n\n if self.intersect(carrying, next_carrying_pos, carrying.radius):\n return False\n\n carrying.pos = next_carrying_pos\n\n self.agent.pos = next_pos\n\n return True\n\n def turn_agent(self, turn_angle):\n \"\"\"\n Turn the agent left or right\n \"\"\"\n\n turn_angle *= (math.pi / 180)\n orig_dir = self.agent.dir\n\n self.agent.dir += turn_angle\n\n carrying = self.agent.carrying\n if carrying:\n pos = self._get_carry_pos(self.agent.pos, carrying)\n\n if self.intersect(carrying, pos, carrying.radius):\n self.agent.dir = orig_dir\n return False\n\n carrying.pos = pos\n carrying.dir = self.agent.dir\n\n return True\n\n def step(self, action):\n \"\"\"\n Perform one action and update the simulation\n \"\"\"\n\n self.step_count += 1\n\n rand = self.rand if self.domain_rand else None\n fwd_step = self.params.sample(rand, 'forward_step')\n fwd_drift = self.params.sample(rand, 'forward_drift')\n turn_step = self.params.sample(rand, 'turn_step')\n\n if action == self.actions.move_forward:\n self.move_agent(fwd_step, fwd_drift)\n\n elif action == self.actions.move_back:\n self.move_agent(-fwd_step, fwd_drift)\n\n elif action == self.actions.turn_left:\n self.turn_agent(turn_step)\n\n elif action == self.actions.turn_right:\n self.turn_agent(-turn_step)\n\n # Pick up an object\n elif action == self.actions.pickup:\n # Position at which we will test for an intersection\n test_pos = self.agent.pos + self.agent.dir_vec * 1.5 * self.agent.radius\n ent = self.intersect(self.agent, test_pos, 1.2 * self.agent.radius)\n if not self.agent.carrying:\n if isinstance(ent, Entity):\n if not ent.is_static:\n self.agent.carrying = ent\n\n # Drop an object being carried\n elif action == self.actions.drop:\n if self.agent.carrying:\n self.agent.carrying.pos[1] = 0\n self.agent.carrying = None\n\n # If we are carrying an object, update its position as we move\n if self.agent.carrying:\n ent_pos = self._get_carry_pos(self.agent.pos, self.agent.carrying)\n self.agent.carrying.pos = ent_pos\n self.agent.carrying.dir = self.agent.dir\n\n # Generate the current camera image\n obs = self.render_obs()\n\n # If the maximum time step count is reached\n if self.step_count >= self.max_episode_steps:\n done = True\n reward = 0\n return obs, reward, done, {}\n\n reward = 0\n done = False\n\n return obs, reward, done, {}\n\n def add_rect_room(\n self,\n min_x,\n max_x,\n min_z,\n max_z,\n **kwargs\n ):\n \"\"\"\n Create a rectangular room\n \"\"\"\n\n # 2D outline coordinates of the room,\n # listed in counter-clockwise order when viewed from the top\n outline = np.array([\n # East wall\n [max_x, max_z],\n # North wall\n [max_x, min_z],\n # West wall\n [min_x, min_z],\n # South wall\n [min_x, max_z],\n ])\n\n return self.add_room(outline=outline, **kwargs)\n\n def add_room(self, **kwargs):\n \"\"\"\n Create a new room\n \"\"\"\n\n assert len(self.wall_segs) == 0, \"cannot add rooms after static data is generated\"\n\n room = Room(**kwargs)\n self.rooms.append(room)\n\n return room\n\n def connect_rooms(\n self,\n room_a,\n room_b,\n min_x=None,\n max_x=None,\n min_z=None,\n max_z=None,\n max_y=None\n ):\n \"\"\"\n Connect two rooms along facing edges\n \"\"\"\n\n def find_facing_edges():\n for idx_a in range(room_a.num_walls):\n norm_a = room_a.edge_norms[idx_a]\n\n for idx_b in range(room_b.num_walls):\n norm_b = room_b.edge_norms[idx_b]\n\n # Reject edges that are not facing each other\n if np.dot(norm_a, norm_b) > -0.9:\n continue\n\n dir = room_b.outline[idx_b] - room_a.outline[idx_a]\n\n # Reject edges that are not touching\n if np.dot(norm_a, dir) > 0.05:\n continue\n\n return idx_a, idx_b\n\n return None, None\n\n idx_a, idx_b = find_facing_edges()\n assert idx_a != None, \"matching edges not found in connect_rooms\"\n\n start_a, end_a = room_a.add_portal(\n edge=idx_a,\n min_x=min_x,\n max_x=max_x,\n min_z=min_z,\n max_z=max_z,\n max_y=max_y\n )\n\n start_b, end_b = room_b.add_portal(\n edge=idx_b,\n min_x=min_x,\n max_x=max_x,\n min_z=min_z,\n max_z=max_z,\n max_y=max_y\n )\n\n a = room_a.outline[idx_a] + room_a.edge_dirs[idx_a] * start_a\n b = room_a.outline[idx_a] + room_a.edge_dirs[idx_a] * end_a\n c = room_b.outline[idx_b] + room_b.edge_dirs[idx_b] * start_b\n d = room_b.outline[idx_b] + room_b.edge_dirs[idx_b] * end_b\n\n # If the portals are directly connected, stop\n if np.linalg.norm(a - d) < 0.001:\n return\n\n len_a = np.linalg.norm(b - a)\n len_b = np.linalg.norm(d - c)\n\n # Room outline points must be specified in counter-clockwise order\n outline = np.stack([c, b, a, d])\n outline = np.stack([outline[:, 0], outline[:, 2]], axis=1)\n\n max_y = max_y if max_y != None else room_a.wall_height\n\n room = Room(\n outline,\n wall_height=max_y,\n wall_tex=room_a.wall_tex_name,\n floor_tex=room_a.floor_tex_name,\n ceil_tex=room_a.ceil_tex_name,\n no_ceiling=room_a.no_ceiling,\n )\n\n self.rooms.append(room)\n\n room.add_portal(1, start_pos=0, end_pos=len_a)\n room.add_portal(3, start_pos=0, end_pos=len_b)\n\n def place_entity(\n self,\n ent,\n room=None,\n pos=None,\n dir=None,\n min_x=None,\n max_x=None,\n min_z=None,\n max_z=None\n ):\n \"\"\"\n Place an entity/object in the world.\n Find a position that doesn't intersect with any other object.\n \"\"\"\n\n assert len(self.rooms) > 0, \"create rooms before calling place_entity\"\n assert ent.radius != None, \"entity must have physical size defined\"\n\n # Generate collision detection data\n if len(self.wall_segs) == 0:\n self._gen_static_data()\n\n # If an exact position if specified\n if pos is not None:\n ent.dir = dir if dir != None else self.rand.float(-math.pi, math.pi)\n ent.pos = pos\n self.entities.append(ent)\n return ent\n\n # Keep retrying until we find a suitable position\n while True:\n # Pick a room, sample rooms proportionally to floor surface area\n r = room if room else self.rand.choice(self.rooms, probs=self.room_probs)\n\n # Choose a random point within the square bounding box of the room\n lx = r.min_x if min_x == None else min_x\n hx = r.max_x if max_x == None else max_x\n lz = r.min_z if min_z == None else min_z\n hz = r.max_z if max_z == None else max_z\n pos = self.rand.float(\n low =[lx + ent.radius, 0, lz + ent.radius],\n high=[hx - ent.radius, 0, hz - ent.radius]\n )\n\n # Make sure the position is within the room's outline\n if not r.point_inside(pos):\n continue\n\n # Make sure the position doesn't intersect with any walls\n if self.intersect(ent, pos, ent.radius):\n continue\n\n # Pick a direction\n d = dir if dir != None else self.rand.float(-math.pi, math.pi)\n\n ent.pos = pos\n ent.dir = d\n break\n\n self.entities.append(ent)\n\n return ent\n\n def place_agent(\n self,\n room=None,\n dir=None,\n min_x=None,\n max_x=None,\n min_z=None,\n max_z=None\n ):\n \"\"\"\n Place the agent in the environment at a random position\n and orientation\n \"\"\"\n\n return self.place_entity(\n self.agent,\n room=room,\n dir=dir,\n min_x=min_x,\n max_x=max_x,\n min_z=min_z,\n max_z=max_z\n )\n\n def intersect(self, ent, pos, radius):\n \"\"\"\n Check if an entity intersects with the world\n \"\"\"\n\n # Ignore the Y position\n px, _, pz = pos\n pos = np.array([px, 0, pz])\n\n # Check for intersection with walls\n if intersect_circle_segs(pos, radius, self.wall_segs):\n return True\n\n # Check for entity intersection\n for ent2 in self.entities:\n # Entities can't intersect with themselves\n if ent2 is ent:\n continue\n\n px, _, pz = ent2.pos\n pos2 = np.array([px, 0, pz])\n\n d = np.linalg.norm(pos2 - pos)\n if d < radius + ent2.radius:\n return ent2\n\n return None\n\n def near(self, ent0, ent1=None):\n \"\"\"\n Test if the two entities are near each other.\n Used for \"go to\" or \"put next\" type tasks\n \"\"\"\n\n if ent1 == None:\n ent1 = self.agent\n\n dist = np.linalg.norm(ent0.pos - ent1.pos)\n return dist < ent0.radius + ent1.radius + 1.1 * self.max_forward_step\n\n def _load_tex(self, tex_name):\n \"\"\"\n Load a texture, with or without domain randomization\n \"\"\"\n\n rand = self.rand if self.params.sample(self.rand, 'tex_rand') else None\n return Texture.get(tex_name, rand)\n\n def _gen_static_data(self):\n \"\"\"\n Generate static data needed for rendering and collision detection\n \"\"\"\n\n # Generate the static data for each room\n for room in self.rooms:\n room._gen_static_data(\n self.params,\n self.rand if self.domain_rand else None\n )\n\n # Concatenate the wall segments\n self.wall_segs = np.concatenate([r.wall_segs for r in self.rooms])\n\n # Room selection probabilities\n self.room_probs = np.array([r.area for r in self.rooms], dtype=float)\n self.room_probs /= np.sum(self.room_probs)\n\n def _gen_world(self):\n \"\"\"\n Generate the world. Derived classes must implement this method.\n \"\"\"\n\n raise NotImplementedError\n\n def _reward(self):\n \"\"\"\n Default sparse reward computation\n \"\"\"\n\n return 1.0 - 0.2 * (self.step_count / self.max_episode_steps)\n\n def _render_static(self):\n \"\"\"\n Render the static elements of the scene into a display list.\n Called once at the beginning of each episode.\n \"\"\"\n\n # TODO: manage this automatically\n # glIsList\n glDeleteLists(1, 1);\n glNewList(1, GL_COMPILE);\n\n # Light position\n glLightfv(GL_LIGHT0, GL_POSITION, (GLfloat*4)(*self.light_pos + [1]))\n\n # Background/minimum light level\n glLightfv(GL_LIGHT0, GL_AMBIENT, (GLfloat*4)(*self.light_ambient))\n\n # Diffuse light color\n glLightfv(GL_LIGHT0, GL_DIFFUSE, (GLfloat*4)(*self.light_color))\n\n #glLightf(GL_LIGHT0, GL_SPOT_CUTOFF, 180)\n #glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 0)\n #glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0)\n #glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0)\n #glLightf(GL_LIGHT0, GL_QUADRATIC_ATTENUATION, 0)\n\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n\n glShadeModel(GL_SMOOTH)\n glEnable(GL_COLOR_MATERIAL)\n glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)\n\n # Render the rooms\n glEnable(GL_TEXTURE_2D)\n for room in self.rooms:\n room._render()\n\n # Render the static entities\n for ent in self.entities:\n if ent.is_static:\n ent.render()\n\n glEndList()\n\n def _render_world(\n self,\n frame_buffer,\n render_agent\n ):\n \"\"\"\n Render the world from a given camera position into a frame buffer,\n and produce a numpy image array as output.\n \"\"\"\n\n # Call the display list for the static parts of the environment\n glCallList(1)\n\n # TODO: keep the non-static entities in a different list for efficiency?\n # Render the non-static entities\n for ent in self.entities:\n if not ent.is_static and ent is not self.agent:\n ent.render()\n #ent.draw_bound()\n\n if render_agent:\n self.agent.render()\n\n # Resolve the rendered image into a numpy array\n img = frame_buffer.resolve()\n\n return img\n\n def render_top_view(self, frame_buffer=None):\n \"\"\"\n Render a top view of the whole map (from above)\n \"\"\"\n\n if frame_buffer == None:\n frame_buffer = self.obs_fb\n\n # Switch to the default OpenGL context\n # This is necessary on Linux Nvidia drivers\n self.shadow_window.switch_to()\n\n # Bind the frame buffer before rendering into it\n frame_buffer.bind()\n\n # Clear the color and depth buffers\n glClearColor(*self.sky_color, 1.0)\n glClearDepth(1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Scene extents to render\n min_x = self.min_x - 1\n max_x = self.max_x + 1\n min_z = self.min_z - 1\n max_z = self.max_z + 1\n\n width = max_x - min_x\n height = max_z - min_z\n aspect = width / height\n fb_aspect = frame_buffer.width / frame_buffer.height\n\n # Adjust the aspect extents to match the frame buffer aspect\n if aspect > fb_aspect:\n # Want to add to denom, add to height\n new_h = width / fb_aspect\n h_diff = new_h - height\n min_z -= h_diff / 2\n max_z += h_diff / 2\n elif aspect < fb_aspect:\n # Want to add to num, add to width\n new_w = height * fb_aspect\n w_diff = new_w - width\n min_x -= w_diff / 2\n max_x += w_diff / 2\n\n # Set the projection matrix\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(\n min_x,\n max_x,\n -max_z,\n -min_z,\n -100, 100.0\n )\n\n # Setup the camera\n # Y maps to +Z, Z maps to +Y\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n m = [\n 1, 0, 0, 0,\n 0, 0, 1, 0,\n 0, -1, 0, 0,\n 0, 0, 0, 1,\n ]\n glLoadMatrixf((GLfloat * len(m))(*m))\n\n return self._render_world(\n frame_buffer,\n render_agent=True\n )\n\n def render_obs(self, frame_buffer=None):\n \"\"\"\n Render an observation from the point of view of the agent\n \"\"\"\n\n if frame_buffer == None:\n frame_buffer = self.obs_fb\n\n # Switch to the default OpenGL context\n # This is necessary on Linux Nvidia drivers\n self.shadow_window.switch_to()\n\n # Bind the frame buffer before rendering into it\n frame_buffer.bind()\n\n # Clear the color and depth buffers\n glClearColor(*self.sky_color, 1.0)\n glClearDepth(1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Set the projection matrix\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(\n self.agent.cam_fov_y,\n frame_buffer.width / float(frame_buffer.height),\n 0.04,\n 100.0\n )\n\n # Setup the camera\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(\n # Eye position\n *self.agent.cam_pos,\n # Target\n *(self.agent.cam_pos + self.agent.cam_dir),\n # Up vector\n 0, 1.0, 0.0\n )\n\n return self._render_world(\n frame_buffer,\n render_agent=False\n )\n\n def render_depth(self, frame_buffer=None):\n \"\"\"\n Produce a depth map\n Values are floating-point, map shape is (H,W,1)\n Distances are in meters from the observer\n \"\"\"\n\n if frame_buffer == None:\n frame_buffer = self.obs_fb\n\n # Render the world\n self.render_obs(frame_buffer)\n\n return frame_buffer.get_depth_map(0.04, 100.0)\n\n def get_visible_ents(self):\n \"\"\"\n Get a list of visible entities.\n Uses OpenGL occlusion queries to approximate visibility.\n :return: set of objects visible to the agent\n \"\"\"\n\n # Allocate the occlusion query ids\n num_ents = len(self.entities)\n query_ids = (GLuint * num_ents)()\n glGenQueries(num_ents, query_ids)\n\n # Switch to the default OpenGL context\n # This is necessary on Linux Nvidia drivers\n self.shadow_window.switch_to()\n\n # Use the small observation frame buffer\n frame_buffer = self.obs_fb\n\n # Bind the frame buffer before rendering into it\n frame_buffer.bind()\n\n # Clear the color and depth buffers\n glClearColor(*self.sky_color, 1.0)\n glClearDepth(1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Set the projection matrix\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(\n self.agent.cam_fov_y,\n frame_buffer.width / float(frame_buffer.height),\n 0.04,\n 100.0\n )\n\n # Setup the cameravisible objects\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(\n # Eye position\n *self.agent.cam_pos,\n # Target\n *(self.agent.cam_pos + self.agent.cam_dir),\n # Up vector\n 0, 1.0, 0.0\n )\n\n # Render the rooms, without texturing\n glDisable(GL_TEXTURE_2D)\n for room in self.rooms:\n room._render()\n\n # For each entity\n for ent_idx, ent in enumerate(self.entities):\n if ent is self.agent:\n continue\n\n glBeginQuery(GL_ANY_SAMPLES_PASSED, query_ids[ent_idx])\n pos = ent.pos\n\n #glColor3f(1, 0, 0)\n drawBox(\n x_min=pos[0] - 0.1,\n x_max=pos[0] + 0.1,\n y_min=pos[1],\n y_max=pos[1] + 0.2,\n z_min=pos[2] - 0.1,\n z_max=pos[2] + 0.1\n )\n\n glEndQuery(GL_ANY_SAMPLES_PASSED)\n\n vis_objs = set()\n\n # Get query results\n for ent_idx, ent in enumerate(self.entities):\n if ent is self.agent:\n continue\n\n visible = (GLuint*1)(1)\n glGetQueryObjectuiv(query_ids[ent_idx], GL_QUERY_RESULT, visible);\n\n if visible[0] != 0:\n vis_objs.add(ent)\n\n # Free the occlusion query ids\n glDeleteQueries(1, query_ids)\n\n #img = frame_buffer.resolve()\n #return img\n\n return vis_objs\n\n def render(self, mode='human', close=False, view='agent'):\n \"\"\"\n Render the environment for human viewing\n \"\"\"\n\n if close:\n if self.window:\n self.window.close()\n return\n\n # Render the human-view image\n assert view in ['agent', 'top']\n if view == 'agent':\n img = self.render_obs(self.vis_fb)\n else:\n img = self.render_top_view(self.vis_fb)\n img_width = img.shape[1]\n img_height = img.shape[0]\n\n if mode == 'rgb_array':\n return img\n\n # Render the agent's view\n obs = self.render_obs()\n obs_width = obs.shape[1]\n obs_height = obs.shape[0]\n\n window_width = img_width + self.obs_disp_width\n window_height = img_height\n\n if self.window is None:\n config = pyglet.gl.Config(double_buffer=True)\n self.window = pyglet.window.Window(\n width=window_width,\n height=window_height,\n resizable=False,\n config=config\n )\n\n self.window.clear()\n self.window.switch_to()\n\n # Bind the default frame buffer\n glBindFramebuffer(GL_FRAMEBUFFER, 0);\n\n # Clear the color and depth buffers\n glClearColor(0, 0, 0, 1.0)\n glClearDepth(1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\n\n # Setup orghogonal projection\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glOrtho(0, window_width, 0, window_height, 0, 10)\n\n # Draw the human render to the rendering window\n img_flip = np.ascontiguousarray(np.flip(img, axis=0))\n img_data = pyglet.image.ImageData(\n img_width,\n img_height,\n 'RGB',\n img_flip.ctypes.data_as(POINTER(GLubyte)),\n pitch=img_width * 3,\n )\n img_data.blit(\n 0,\n 0,\n 0,\n width=img_width,\n height=img_height\n )\n\n # Draw the observation\n obs = np.ascontiguousarray(np.flip(obs, axis=0))\n obs_data = pyglet.image.ImageData(\n obs_width,\n obs_height,\n 'RGB',\n obs.ctypes.data_as(POINTER(GLubyte)),\n pitch=obs_width * 3,\n )\n obs_data.blit(\n img_width,\n img_height - self.obs_disp_height,\n 0,\n width=self.obs_disp_width,\n height=self.obs_disp_height\n )\n\n # Draw the text label in the window\n self.text_label.text = \"pos: (%.2f, %.2f, %.2f)\\nangle: %d\\nsteps: %d\" % (\n *self.agent.pos,\n int(self.agent.dir * 180 / math.pi) % 360,\n self.step_count\n )\n self.text_label.draw()\n\n # Force execution of queued commands\n glFlush()\n\n # If we are not running the Pyglet event loop,\n # we have to manually flip the buffers and dispatch events\n if mode == 'human':\n self.window.flip()\n self.window.dispatch_events()\n\n return img\n",
"import math\nimport numpy as np\nfrom .math import *\nfrom .opengl import *\nfrom .objmesh import ObjMesh\n\n# Map of color names to RGB values\nCOLORS = {\n 'red' : np.array([1.0, 0.0, 0.0]),\n 'green' : np.array([0.0, 1.0, 0.0]),\n 'blue' : np.array([0.0, 0.0, 1.0]),\n 'purple': np.array([0.44, 0.15, 0.76]),\n 'yellow': np.array([1.00, 1.00, 0.00]),\n 'grey' : np.array([0.39, 0.39, 0.39]),\n 'snow'\t: np.array([255., 250., 250.])/255.,\n 'grey'\t: np.array([190., 190., 190.])/255.,\n}\n\n# List of color names, sorted alphabetically\nCOLOR_NAMES = sorted(list(COLORS.keys()))\n\nclass Entity:\n def __init__(self):\n # World position\n # Note: for most entities, the position is at floor level\n self.pos = None\n\n # Direction/orientation angle in radians\n self.dir = None\n\n # Radius for bounding circle/cylinder\n self.radius = 0\n\n # Height of bounding cylinder\n self.height = 0\n\n def randomize(self, params, rng):\n \"\"\"\n Set the domain randomization parameters\n \"\"\"\n pass\n\n def render(self):\n \"\"\"\n Draw the object\n \"\"\"\n raise NotImplementedError\n\n def step(self, delta_time):\n \"\"\"\n Update the state of the object\n \"\"\"\n pass\n\n def draw_bound(self):\n \"\"\"\n Draw the bounding circle\n Used for debugging purposes\n \"\"\"\n\n x, _, z = self.pos\n\n glColor3f(1, 0, 0)\n glBegin(GL_LINES)\n\n for i in range(60):\n a = i * 2 * math.pi / 60\n cx = x + self.radius * math.cos(a)\n cz = z + self.radius * math.sin(a)\n glVertex3f(cx, 0.01, cz)\n\n glEnd()\n\n @property\n def dir_vec(self):\n \"\"\"\n Vector pointing in the direction of forward movement\n \"\"\"\n\n x = math.cos(self.dir)\n z = -math.sin(self.dir)\n return np.array([x, 0, z])\n\n @property\n def right_vec(self):\n \"\"\"\n Vector pointing to the right of the agent\n \"\"\"\n\n x = math.sin(self.dir)\n z = math.cos(self.dir)\n return np.array([x, 0, z])\n\n @property\n def is_static(self):\n \"\"\"\n True for objects that cannot move or animate\n (can be rendered statically)\n \"\"\"\n return False\n\nclass MeshEnt(Entity):\n \"\"\"\n Entity whose appearance is defined by a mesh file\n\n height -- scale the model to this height\n static -- flag indicating this object cannot move\n \"\"\"\n\n def __init__(\n self,\n mesh_name,\n height,\n static=True\n ):\n super().__init__()\n\n self.static = static\n\n # Load the mesh\n self.mesh = ObjMesh.get(mesh_name)\n\n # Get the mesh extents\n sx, sy, sz = self.mesh.max_coords\n\n # Compute the mesh scaling factor\n self.scale = height / sy\n\n # Compute the radius and height\n self.radius = math.sqrt(sx*sx + sz*sz) * self.scale\n self.height = height\n\n def render(self):\n \"\"\"\n Draw the object\n \"\"\"\n\n glPushMatrix()\n glTranslatef(*self.pos)\n glScalef(self.scale, self.scale, self.scale)\n glRotatef(self.dir * 180 / math.pi, 0, 1, 0)\n glColor3f(1, 1, 1)\n self.mesh.render()\n glPopMatrix()\n\n @property\n def is_static(self):\n return self.static\n\nclass ImageFrame(Entity):\n \"\"\"\n Frame to display an image on a wall\n Note: the position is in the middle of the frame, on the wall\n \"\"\"\n\n def __init__(self, pos, dir, tex_name, width, depth=0.05):\n super().__init__()\n\n self.pos = pos\n self.dir = dir\n\n # Load the image to be displayed\n self.tex = Texture.get(tex_name)\n\n self.width = width\n self.depth = depth\n self.height = (float(self.tex.height) / self.tex.width) * self.width\n\n @property\n def is_static(self):\n return True\n\n def render(self):\n \"\"\"\n Draw the object\n \"\"\"\n\n x, y, z = self.pos\n\n # sx is depth\n # Frame points towards +sx\n sx = self.depth\n hz = self.width / 2\n hy = self.height / 2\n\n glPushMatrix()\n glTranslatef(*self.pos)\n glRotatef(self.dir * (180/math.pi), 0, 1, 0)\n\n # Bind texture for front\n glColor3f(1, 1, 1)\n glEnable(GL_TEXTURE_2D)\n self.tex.bind()\n\n # Front face, showing image\n glBegin(GL_QUADS)\n glNormal3f(1, 0, 0)\n glTexCoord2f(1, 1)\n glVertex3f(sx, +hy, -hz)\n glTexCoord2f(0, 1)\n glVertex3f(sx, +hy, +hz)\n glTexCoord2f(0, 0)\n glVertex3f(sx, -hy, +hz)\n glTexCoord2f(1, 0)\n glVertex3f(sx, -hy, -hz)\n glEnd()\n\n # Black frame/border\n glDisable(GL_TEXTURE_2D)\n glColor3f(0, 0, 0)\n\n glBegin(GL_QUADS)\n\n # Left\n glNormal3f(0, 0, -1)\n glVertex3f(0 , +hy, -hz)\n glVertex3f(+sx, +hy, -hz)\n glVertex3f(+sx, -hy, -hz)\n glVertex3f(0 , -hy, -hz)\n\n # Right\n glNormal3f(0, 0, 1)\n glVertex3f(+sx, +hy, +hz)\n glVertex3f(0 , +hy, +hz)\n glVertex3f(0 , -hy, +hz)\n glVertex3f(+sx, -hy, +hz)\n\n # Top\n glNormal3f(0, 1, 0)\n glVertex3f(+sx, +hy, +hz)\n glVertex3f(+sx, +hy, -hz)\n glVertex3f(0 , +hy, -hz)\n glVertex3f(0 , +hy, +hz)\n\n # Bottom\n glNormal3f(0, -1, 0)\n glVertex3f(+sx, -hy, -hz)\n glVertex3f(+sx, -hy, +hz)\n glVertex3f(0 , -hy, +hz)\n glVertex3f(0 , -hy, -hz)\n\n glEnd()\n\n glPopMatrix()\n\nclass TextFrame(Entity):\n \"\"\"\n Frame to display text or numbers on a wall\n Note: the position is in the middle of the frame, on the wall\n \"\"\"\n\n def __init__(self, pos, dir, str, height=0.15, depth=0.05):\n super().__init__()\n\n self.pos = pos\n self.dir = dir\n\n self.str = str\n\n self.depth = depth\n self.height = height\n self.width = len(str) * height\n\n @property\n def is_static(self):\n return True\n\n def randomize(self, params, rng):\n self.texs = []\n for ch in self.str:\n try:\n if ch == ' ':\n self.texs.append(None)\n else:\n tex_name = f'chars/ch_0x{ord(ch)}'\n self.texs.append(Texture.get(tex_name, rng))\n except:\n raise 'only alphanumerical characters supported in TextFrame'\n\n def render(self):\n \"\"\"\n Draw the object\n \"\"\"\n\n x, y, z = self.pos\n\n # sx is depth\n # Frame points towards +sx\n sx = 0.05\n hz = self.width / 2\n hy = self.height / 2\n\n glPushMatrix()\n glTranslatef(*self.pos)\n glRotatef(self.dir * (180/math.pi), 0, 1, 0)\n\n # Bind texture for front\n glColor3f(1, 1, 1)\n\n # For each character\n for idx, ch in enumerate(self.str):\n tex = self.texs[idx]\n if tex:\n glEnable(GL_TEXTURE_2D)\n self.texs[idx].bind()\n else:\n glDisable(GL_TEXTURE_2D)\n\n char_width = self.height\n z_0 = hz - char_width * (idx+1)\n z_1 = z_0 + char_width\n\n # Front face, showing image\n glBegin(GL_QUADS)\n glNormal3f(1, 0, 0)\n glTexCoord2f(1, 1)\n glVertex3f(sx, +hy, z_0)\n glTexCoord2f(0, 1)\n glVertex3f(sx, +hy, z_1)\n glTexCoord2f(0, 0)\n glVertex3f(sx, -hy, z_1)\n glTexCoord2f(1, 0)\n glVertex3f(sx, -hy, z_0)\n glEnd()\n\n # Black frame/border\n glDisable(GL_TEXTURE_2D)\n glColor3f(0, 0, 0)\n\n glBegin(GL_QUADS)\n\n # Left\n glNormal3f(0, 0, -1)\n glVertex3f(0 , +hy, -hz)\n glVertex3f(+sx, +hy, -hz)\n glVertex3f(+sx, -hy, -hz)\n glVertex3f(0 , -hy, -hz)\n\n # Right\n glNormal3f(0, 0, 1)\n glVertex3f(+sx, +hy, +hz)\n glVertex3f(0 , +hy, +hz)\n glVertex3f(0 , -hy, +hz)\n glVertex3f(+sx, -hy, +hz)\n\n # Top\n glNormal3f(0, 1, 0)\n glVertex3f(+sx, +hy, +hz)\n glVertex3f(+sx, +hy, -hz)\n glVertex3f(0 , +hy, -hz)\n glVertex3f(0 , +hy, +hz)\n\n # Bottom\n glNormal3f(0, -1, 0)\n glVertex3f(+sx, -hy, -hz)\n glVertex3f(+sx, -hy, +hz)\n glVertex3f(0 , -hy, +hz)\n glVertex3f(0 , -hy, -hz)\n\n glEnd()\n\n glPopMatrix()\n\nclass Box(Entity):\n \"\"\"\n Colored box object\n \"\"\"\n\n def __init__(self, color, size=1.0):\n super().__init__()\n\n if type(size) is int or type(size) is float:\n size = np.array([size, size, size])\n size = np.array(size)\n sx, sy, sz = size\n\n self.color = color\n self.size = size\n\n self.radius = math.sqrt(sx*sx + sz*sz)/2\n self.height = sy\n\n def randomize(self, params, rng):\n self.color_vec = COLORS[self.color] + params.sample(rng, 'obj_color_bias')\n self.color_vec = np.clip(self.color_vec, 0, 1)\n\n def render(self):\n \"\"\"\n Draw the object\n \"\"\"\n\n sx, sy, sz = self.size\n\n glDisable(GL_TEXTURE_2D)\n glColor3f(*self.color_vec)\n\n glPushMatrix()\n glTranslatef(*self.pos)\n glRotatef(self.dir * (180/math.pi), 0, 1, 0)\n\n drawBox(\n x_min=-sx/2,\n x_max=+sx/2,\n y_min=0,\n y_max=sy,\n z_min=-sz/2,\n z_max=+sz/2\n )\n\n glPopMatrix()\n\nclass Key(MeshEnt):\n \"\"\"\n Key the agent can pick up, carry, and use to open doors\n \"\"\"\n\n def __init__(self, color):\n assert color in COLOR_NAMES\n super().__init__(\n mesh_name='key_{}'.format(color),\n height=0.35,\n static=False\n )\n\nclass Ball(MeshEnt):\n \"\"\"\n Ball (sphere) the agent can pick up and carry\n \"\"\"\n\n def __init__(self, color, size=1.0):\n assert color in COLOR_NAMES\n super().__init__(\n mesh_name='ball_{}'.format(color),\n height=size,\n static=False\n )\n\nclass Office_desk(MeshEnt):\n \"\"\"\n Office Desk\n \"\"\"\n\n def __init__(self, size=1.0):\n super().__init__(\n mesh_name='office_desk',\n height=size,\n static=False\n )\n\nclass Office_chair(MeshEnt):\n \"\"\"\n Office Chair\n \"\"\"\n\n def __init__(self, size=1.0):\n super().__init__(\n mesh_name='office_chair',\n height=size,\n static=False\n )\n\nclass Potion(MeshEnt):\n \"\"\"\n Potion饮料\n \"\"\"\n\n def __init__(self, size=0.6):\n super().__init__(\n mesh_name='potion',\n height=size,\n static=False\n )\n\nclass Barrier(MeshEnt):\n \"\"\"\n Barrier\n \"\"\"\n\n def __init__(self, size=1.0):\n super().__init__(\n mesh_name='barrier',\n height=size,\n static=False\n )\n\nclass Agent(Entity):\n def __init__(self):\n super().__init__()\n\n # Distance between the camera and the floor\n self.cam_height = 1.5\n\n # Camera up/down angles in degrees\n # Positive angles tilt the camera upwards\n self.cam_pitch = 0\n\n # Vertical field of view in degrees\n self.cam_fov_y = 60\n\n # Bounding cylinder size for the agent\n self.radius = 0.4\n self.height = 1.6\n\n # Object currently being carried by the agent\n self.carrying = None\n\n @property\n def cam_pos(self):\n \"\"\"\n Camera position in 3D space\n \"\"\"\n\n rot_y = gen_rot_matrix(Y_VEC, self.dir)\n cam_disp = np.array([self.cam_fwd_disp, self.cam_height, 0])\n cam_disp = np.dot(cam_disp, rot_y)\n\n return self.pos + cam_disp\n\n @property\n def cam_dir(self):\n \"\"\"\n Camera direction (lookat) vector\n\n Note: this is useful even if just for slight domain\n randomization of camera angle\n \"\"\"\n\n rot_z = gen_rot_matrix(Z_VEC, self.cam_pitch * math.pi/180)\n rot_y = gen_rot_matrix(Y_VEC, self.dir)\n\n dir = np.dot(X_VEC, rot_z)\n dir = np.dot(dir, rot_y)\n\n return dir\n\n def randomize(self, params, rng):\n params.sample_many(rng, self, [\n 'cam_height',\n 'cam_fwd_disp',\n 'cam_pitch',\n 'cam_fov_y',\n ])\n # self.radius = params.sample(rng, 'bot_radius')\n\n def render(self):\n \"\"\"\n Draw the agent\n \"\"\"\n\n # Note: this is currently only used in the top view\n # Eventually, we will want a proper 3D model\n\n p = self.pos + Y_VEC * self.height\n dv = self.dir_vec * self.radius\n rv = self.right_vec * self.radius\n\n p0 = p + dv\n p1 = p + 0.75 * (rv - dv)\n p2 = p + 0.75 * (-rv - dv)\n\n glColor3f(1, 0, 0)\n glBegin(GL_TRIANGLES)\n glVertex3f(*p0)\n glVertex3f(*p2)\n glVertex3f(*p1)\n glEnd()\n\n \"\"\"\n glBegin(GL_LINE_STRIP)\n for i in range(20):\n a = (2 * math.pi * i) / 20\n pc = p + dv * math.cos(a) + rv * math.sin(a)\n glVertex3f(*pc)\n glEnd()\n \"\"\"\n\n def step(self, delta_time):\n pass\n"
] | [
[
"numpy.dot",
"numpy.expand_dims",
"numpy.greater",
"numpy.linalg.norm",
"numpy.stack",
"numpy.concatenate",
"numpy.insert",
"numpy.cross",
"numpy.array",
"numpy.flip",
"numpy.sum"
],
[
"numpy.dot",
"numpy.array",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Nitin-Mane/dense-ulearn-vos | [
"9e39d359a53a2343522ce5820fdf27223a4ffcb4"
] | [
"datasets/dataloader_infer.py"
] | [
"\"\"\"\nCopyright (c) 2021 TU Darmstadt\nAuthor: Nikita Araslanov <[email protected]>\nLicense: Apache License 2.0\n\"\"\"\n\nimport os\nimport torch\n\nfrom PIL import Image\n\nimport numpy as np\nimport torchvision.transforms as tf\n\nfrom .dataloader_base import DLBase\n\n\nclass DataSeg(DLBase):\n\n def __init__(self, cfg, split, ignore_labels=[], \\\n root=os.path.expanduser('./data'), renorm=False):\n\n super(DataSeg, self).__init__()\n\n self.cfg = cfg\n self.root = root\n self.split = split\n self.ignore_labels = ignore_labels\n self._init_palette(self.cfg.DATASET.NUM_CLASSES)\n\n # train/val/test splits are pre-cut\n split_fn = os.path.join(self.root, self.split + \".txt\")\n assert os.path.isfile(split_fn)\n\n self.sequence_ids = []\n self.sequence_names = []\n def add_sequence(name):\n vlen = len(self.images)\n assert vlen >= cfg.DATASET.VIDEO_LEN, \\\n \"Detected video shorter [{}] than training length [{}]\".format(vlen, \\\n cfg.DATASET.VIDEO_LEN)\n self.sequence_ids.append(vlen)\n self.sequence_names.append(name)\n return vlen\n\n self.images = []\n self.masks = []\n self.flags = []\n\n token = None\n with open(split_fn, \"r\") as lines:\n for line in lines:\n _flag, _image, _mask = line.strip(\"\\n\").split(' ')\n\n # save every frame\n #_flag = 1\n self.flags.append(int(_flag))\n\n _image = os.path.join(cfg.DATASET.ROOT, _image.lstrip('/'))\n assert os.path.isfile(_image), '%s not found' % _image\n\n # each sequence may have a different length\n # do some book-keeping e.g. to ensure we have\n # sequences long enough for subsequent sampling\n _token = _image.split(\"/\")[-2] # parent directory\n \n # sequence ID is in the filename\n #_token = os.path.basename(_image).split(\"_\")[0] \n if token != _token:\n if not token is None:\n add_sequence(token)\n token = _token\n\n self.images.append(_image)\n\n if _mask is None:\n self.masks.append(None)\n else:\n _mask = os.path.join(cfg.DATASET.ROOT, _mask.lstrip('/'))\n #assert os.path.isfile(_mask), '%s not found' % _mask\n self.masks.append(_mask)\n\n # update the last sequence\n # returns the total amount of frames\n add_sequence(token)\n print(\"Loaded {} sequences\".format(len(self.sequence_ids)))\n\n # definint data augmentation:\n print(\"Dataloader: {}\".format(split), \" #\", len(self.images))\n print(\"\\t {}: no augmentation\".format(split))\n\n self.tf = tf.Compose([tf.ToTensor(), tf.Normalize(mean=self.MEAN, std=self.STD)])\n self._num_samples = len(self.images)\n\n def __len__(self):\n return len(self.sequence_ids)\n\n \n def _mask2tensor(self, mask, num_classes=6):\n h,w = mask.shape\n ones = torch.ones(1,h,w)\n zeros = torch.zeros(num_classes,h,w)\n \n max_idx = mask.max()\n assert max_idx < num_classes, \"{} >= {}\".format(max_idx, num_classes)\n return zeros.scatter(0, mask[None, ...], ones)\n \n def denorm(self, image):\n\n if image.dim() == 3:\n assert image.dim() == 3, \"Expected image [CxHxW]\"\n assert image.size(0) == 3, \"Expected RGB image [3xHxW]\"\n\n for t, m, s in zip(image, self.MEAN, self.STD):\n t.mul_(s).add_(m)\n elif image.dim() == 4:\n # batch mode\n assert image.size(1) == 3, \"Expected RGB image [3xHxW]\"\n\n for t, m, s in zip((0,1,2), self.MEAN, self.STD):\n image[:, t, :, :].mul_(s).add_(m)\n\n return image\n\n\n def __getitem__(self, index):\n \n seq_to = self.sequence_ids[index]\n seq_from = 0 if index == 0 else self.sequence_ids[index - 1]\n\n image0 = Image.open(self.images[seq_from])\n w,h = image0.size\n\n images, masks, fns, flags = [], [], [], []\n tracks = torch.LongTensor(self.cfg.DATASET.NUM_CLASSES).fill_(-1)\n masks = torch.LongTensor(self.cfg.DATASET.NUM_CLASSES, h, w).zero_()\n known_ids = set()\n\n for t in range(seq_from, seq_to):\n\n t0 = t - seq_from\n image = Image.open(self.images[t]).convert('RGB')\n\n fns.append(os.path.basename(self.images[t].replace(\".jpg\", \"\")))\n flags.append(self.flags[t])\n\n if os.path.isfile(self.masks[t]):\n mask = Image.open(self.masks[t])\n mask = torch.from_numpy(np.array(mask, np.long, copy=False))\n\n unique_ids = np.unique(mask)\n for oid in unique_ids:\n if not oid in known_ids:\n tracks[oid] = t0\n known_ids.add(oid)\n masks[oid] = (mask == oid).long()\n else:\n mask = Image.new('L', image.size)\n\n image = self.tf(image)\n images.append(image)\n\n images = torch.stack(images, 0)\n seq_name = self.sequence_names[index]\n flags = torch.LongTensor(flags)\n\n return images, images, masks, tracks, len(known_ids), fns, flags, seq_name\n"
] | [
[
"torch.LongTensor",
"torch.ones",
"numpy.unique",
"torch.zeros",
"torch.stack",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Carlosbogo/etna | [
"b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94",
"b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94",
"b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94",
"b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94"
] | [
"etna/analysis/eda_utils.py",
"tests/test_transforms/test_feature_importance_transform.py",
"tests/test_pipeline/test_autoregressive_pipeline.py",
"tests/test_transforms/test_gale_shapley.py"
] | [
"import math\nimport warnings\nfrom itertools import combinations\nfrom typing import TYPE_CHECKING\nfrom typing import Optional\nfrom typing import Sequence\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport statsmodels.api as sm\nfrom matplotlib.ticker import MaxNLocator\nfrom statsmodels.graphics import utils\n\nif TYPE_CHECKING:\n from etna.datasets import TSDataset\n\nplot_acf = sm.graphics.tsa.plot_acf\nplot_pacf = sm.graphics.tsa.plot_pacf\n\n\ndef cross_corr_plot(ts: \"TSDataset\", n_segments: int = 10, maxlags: int = 21, segments: Optional[Sequence] = None):\n \"\"\"\n Cross-correlation plot between multiple timeseries.\n\n Parameters\n ----------\n ts:\n TSDataset with timeseries data\n n_segments:\n number of random segments to plot\n maxlags:\n number of timeseries shifts for cross-correlation\n segments:\n segments to plot\n \"\"\"\n if not segments:\n segments = list(ts.segments)\n segments = np.random.choice(segments, size=min(len(segments), n_segments), replace=False)\n segment_pairs = list(combinations(segments, r=2))\n if len(segment_pairs) == 0:\n raise ValueError(\"There are no pairs to plot! Try set n_segments > 1.\")\n columns_num = min(2, len(segment_pairs))\n rows_num = math.ceil(len(segment_pairs) / columns_num)\n fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)\n ax = ax.ravel()\n fig.suptitle(\"Cross-correlation\", fontsize=16)\n for i, (segment_1, segment_2) in enumerate(segment_pairs):\n df_segment_1 = ts[:, segment_1, :][segment_1]\n df_segment_2 = ts[:, segment_2, :][segment_2]\n fig, axx = utils.create_mpl_ax(ax[i])\n target_1 = df_segment_1.target\n target_2 = df_segment_2.target\n if target_1.dtype == int or target_2.dtype == int:\n warnings.warn(\n \"At least one target column has integer dtype, \"\n \"it is converted to float in order to calculate correlation.\"\n )\n target_1 = target_1.astype(float)\n target_2 = target_2.astype(float)\n lags, level, _, _ = axx.xcorr(x=target_1, y=target_2, maxlags=maxlags)\n ax[i].plot(lags, level, \"o\", markersize=5)\n ax[i].set_title(f\"{segment_1} vs {segment_2}\")\n ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.show()\n\n\ndef sample_acf_plot(ts: \"TSDataset\", n_segments: int = 10, lags: int = 21, segments: Sequence = None):\n \"\"\"\n Autocorrelation plot for multiple timeseries.\n\n Parameters\n ----------\n ts:\n TSDataset with timeseries data\n n_segments:\n number of random segments to plot\n lags:\n number of timeseries shifts for cross-correlation\n segments:\n segments to plot\n\n Notes\n -----\n https://en.wikipedia.org/wiki/Autocorrelation\n \"\"\"\n if not segments:\n segments = sorted(ts.segments)\n\n k = min(n_segments, len(segments))\n columns_num = min(2, k)\n rows_num = math.ceil(k / columns_num)\n fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)\n ax = ax.ravel()\n fig.suptitle(\"Partial Autocorrelation\", fontsize=16)\n for i, name in enumerate(sorted(np.random.choice(segments, size=k, replace=False))):\n df_slice = ts[:, name, :][name]\n plot_acf(x=df_slice[\"target\"].values, ax=ax[i], lags=lags)\n ax[i].set_title(name)\n plt.show()\n\n\ndef sample_pacf_plot(ts: \"TSDataset\", n_segments: int = 10, lags: int = 21, segments: Sequence = None):\n \"\"\"\n Partial autocorrelation plot for multiple timeseries.\n\n Parameters\n ----------\n ts:\n TSDataset with timeseries data\n n_segments:\n number of random segments to plot\n lags:\n number of timeseries shifts for cross-correlation\n segments:\n segments to plot\n\n Notes\n -----\n https://en.wikipedia.org/wiki/Partial_autocorrelation_function\n \"\"\"\n if not segments:\n segments = sorted(ts.segments)\n\n k = min(n_segments, len(segments))\n columns_num = min(2, k)\n rows_num = math.ceil(k / columns_num)\n fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)\n ax = ax.ravel()\n fig.suptitle(\"Partial Autocorrelation\", fontsize=16)\n for i, name in enumerate(sorted(np.random.choice(segments, size=k, replace=False))):\n df_slice = ts[:, name, :][name]\n plot_pacf(x=df_slice[\"target\"].values, ax=ax[i], lags=lags)\n ax[i].set_title(name)\n plt.show()\n\n\ndef distribution_plot(\n ts: \"TSDataset\",\n n_segments: int = 10,\n segments: Sequence = None,\n shift: int = 30,\n window: int = 30,\n freq: str = \"1M\",\n n_rows: int = 10,\n):\n \"\"\"Distribution of z-values grouped by segments and time frequency.\n\n ... math:\n mean_{i} = \\\\sum_{j=i-\\\\text{shift}}^{i-\\\\text{shift}+\\\\text{window}} \\\\frac{x_{j}}{\\\\text{window}}\n\n Parameters\n ----------\n ts:\n dataset with timeseries data\n n_segments:\n number of random segments to plot\n segments:\n segments to plot\n shift:\n number of timeseries shifts for statistics calc\n window:\n number of points for statistics calc\n freq:\n group for z_{i}\n n_rows:\n maximum number of rows to plot\n \"\"\"\n df_pd = ts.to_pandas(flatten=True)\n\n if not segments:\n segments = df_pd.segment.unique()\n segments = np.random.choice(segments, size=min(len(segments), n_segments), replace=False)\n df_full = df_pd[df_pd.segment.isin(segments)]\n df_full.loc[:, \"mean\"] = (\n df_full.groupby(\"segment\").target.shift(shift).transform(lambda s: s.rolling(window).mean())\n )\n df_full.loc[:, \"std\"] = df_full.groupby(\"segment\").target.shift(shift).transform(lambda s: s.rolling(window).std())\n df_full = df_full.dropna()\n df_full.loc[:, \"z\"] = (df_full[\"target\"] - df_full[\"mean\"]) / df_full[\"std\"]\n\n grouped_data = df_full.groupby([df_full.timestamp.dt.to_period(freq)])\n columns_num = min(2, len(grouped_data))\n rows_num = min(n_rows, math.ceil(len(grouped_data) / columns_num))\n groups = set(list(grouped_data.groups.keys())[-rows_num * columns_num :])\n fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 7.5 * rows_num), constrained_layout=True, squeeze=False)\n fig.suptitle(f\"Z statistic shift: {shift} window: {window}\", fontsize=16)\n ax = ax.ravel()\n i = 0\n for period, df_slice in grouped_data:\n if period not in groups:\n continue\n sns.boxplot(data=df_slice.sort_values(by=\"segment\"), y=\"z\", x=\"segment\", ax=ax[i], fliersize=False)\n ax[i].set_title(f\"{period}\")\n i += 1\n",
"import pandas as pd\nimport pytest\nfrom catboost import CatBoostRegressor\nfrom numpy.random import RandomState\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import r2_score\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.tree import ExtraTreeRegressor\n\nfrom etna.datasets import TSDataset\nfrom etna.datasets import generate_ar_df\nfrom etna.models import LinearPerSegmentModel\nfrom etna.pipeline import Pipeline\nfrom etna.transforms import SegmentEncoderTransform\nfrom etna.transforms.feature_importance import TreeFeatureSelectionTransform\n\n\[email protected]\ndef ts_with_regressors():\n num_segments = 3\n df = generate_ar_df(\n start_time=\"2020-01-01\", periods=300, ar_coef=[1], sigma=1, n_segments=num_segments, random_seed=0, freq=\"D\"\n )\n\n example_segment = df[\"segment\"].unique()[0]\n timestamp = df[df[\"segment\"] == example_segment][\"timestamp\"]\n df_exog = pd.DataFrame({\"timestamp\": timestamp})\n\n # useless regressors\n num_useless = 12\n df_regressors_useless = generate_ar_df(\n start_time=\"2020-01-01\", periods=300, ar_coef=[1], sigma=1, n_segments=num_useless, random_seed=1, freq=\"D\"\n )\n for i, segment in enumerate(df_regressors_useless[\"segment\"].unique()):\n regressor = df_regressors_useless[df_regressors_useless[\"segment\"] == segment][\"target\"].values\n df_exog[f\"regressor_useless_{i}\"] = regressor\n\n # useful regressors: the same as target but with little noise\n df_regressors_useful = df.copy()\n sampler = RandomState(seed=2).normal\n for i, segment in enumerate(df_regressors_useful[\"segment\"].unique()):\n regressor = df_regressors_useful[df_regressors_useful[\"segment\"] == segment][\"target\"].values\n noise = sampler(scale=0.05, size=regressor.shape)\n df_exog[f\"regressor_useful_{i}\"] = regressor + noise\n\n # construct exog\n classic_exog_list = []\n for segment in df[\"segment\"].unique():\n tmp = df_exog.copy(deep=True)\n tmp[\"segment\"] = segment\n classic_exog_list.append(tmp)\n df_exog_all_segments = pd.concat(classic_exog_list)\n\n # construct TSDataset\n df = df[df[\"timestamp\"] <= timestamp[200]]\n return TSDataset(df=TSDataset.to_dataset(df), df_exog=TSDataset.to_dataset(df_exog_all_segments), freq=\"D\")\n\n\[email protected](\n \"model\",\n [\n DecisionTreeRegressor(random_state=42),\n ExtraTreeRegressor(random_state=42),\n RandomForestRegressor(n_estimators=10, random_state=42),\n ExtraTreesRegressor(n_estimators=10, random_state=42),\n GradientBoostingRegressor(n_estimators=10, random_state=42),\n CatBoostRegressor(iterations=10, random_state=42, silent=True, cat_features=[\"regressor_segment_code\"]),\n ],\n)\[email protected](\"top_k\", [0, 1, 5, 15, 50])\ndef test_selected_top_k_regressors(model, top_k, ts_with_regressors):\n \"\"\"Check that transform selects exactly top_k regressors if where are this much.\"\"\"\n df = ts_with_regressors.to_pandas()\n le_encoder = SegmentEncoderTransform()\n df_encoded = le_encoder.fit_transform(df)\n selector = TreeFeatureSelectionTransform(model=model, top_k=top_k)\n df_selected = selector.fit_transform(df_encoded)\n\n all_regressors = ts_with_regressors.regressors\n all_regressors.append(\"regressor_segment_code\")\n selected_regressors = set()\n for column in df_selected.columns.get_level_values(\"feature\"):\n if column.startswith(\"regressor\"):\n selected_regressors.add(column)\n\n assert len(selected_regressors) == min(len(all_regressors), top_k)\n\n\[email protected](\n \"model\",\n [\n DecisionTreeRegressor(random_state=42),\n ExtraTreeRegressor(random_state=42),\n RandomForestRegressor(n_estimators=10, random_state=42),\n ExtraTreesRegressor(n_estimators=10, random_state=42),\n GradientBoostingRegressor(n_estimators=10, random_state=42),\n CatBoostRegressor(iterations=10, random_state=42, silent=True, cat_features=[\"regressor_segment_code\"]),\n ],\n)\[email protected](\"top_k\", [0, 1, 5, 15, 50])\ndef test_retain_values(model, top_k, ts_with_regressors):\n \"\"\"Check that transform doesn't change values of columns.\"\"\"\n df = ts_with_regressors.to_pandas()\n le_encoder = SegmentEncoderTransform()\n df_encoded = le_encoder.fit_transform(df)\n selector = TreeFeatureSelectionTransform(model=model, top_k=top_k)\n df_selected = selector.fit_transform(df_encoded)\n\n for segment in ts_with_regressors.segments:\n for column in df_selected.columns.get_level_values(\"feature\").unique():\n assert (\n df_selected.loc[:, pd.IndexSlice[segment, column]] == df_encoded.loc[:, pd.IndexSlice[segment, column]]\n ).all()\n\n\[email protected](\n \"model\",\n [\n DecisionTreeRegressor(random_state=42),\n ExtraTreeRegressor(random_state=42),\n RandomForestRegressor(n_estimators=10, random_state=42),\n ExtraTreesRegressor(n_estimators=10, random_state=42),\n GradientBoostingRegressor(n_estimators=10, random_state=42),\n CatBoostRegressor(iterations=10, random_state=42, silent=True, cat_features=[\"regressor_segment_code\"]),\n ],\n)\ndef test_fails_negative_top_k(model, ts_with_regressors):\n \"\"\"Check that transform doesn't allow you to set top_k to negative values.\"\"\"\n with pytest.raises(ValueError, match=\"positive integer\"):\n TreeFeatureSelectionTransform(model=model, top_k=-1)\n\n\[email protected](\n \"model\",\n [\n DecisionTreeRegressor(random_state=42),\n ExtraTreeRegressor(random_state=42),\n RandomForestRegressor(n_estimators=10, random_state=42),\n ExtraTreesRegressor(n_estimators=10, random_state=42),\n GradientBoostingRegressor(n_estimators=10, random_state=42),\n CatBoostRegressor(iterations=10, random_state=42, silent=True),\n ],\n)\ndef test_warns_no_regressors(model, example_tsds):\n \"\"\"Check that transform allows you to fit on dataset with no regressors but warns about it.\"\"\"\n df = example_tsds.to_pandas()\n selector = TreeFeatureSelectionTransform(model=model, top_k=3)\n with pytest.warns(UserWarning, match=\"not possible to select regressors\"):\n df_selected = selector.fit_transform(df)\n assert (df == df_selected).all().all()\n\n\[email protected](\n \"model\",\n [\n DecisionTreeRegressor(random_state=42),\n ExtraTreeRegressor(random_state=42),\n RandomForestRegressor(n_estimators=10, random_state=42),\n ExtraTreesRegressor(n_estimators=10, random_state=42),\n GradientBoostingRegressor(n_estimators=10, random_state=42),\n CatBoostRegressor(iterations=700, random_state=42, silent=True, cat_features=[\"regressor_segment_code\"]),\n ],\n)\ndef test_sanity_selected(model, ts_with_regressors):\n \"\"\"Check that transform correctly finds meaningful regressors.\"\"\"\n df = ts_with_regressors.to_pandas()\n le_encoder = SegmentEncoderTransform()\n df_encoded = le_encoder.fit_transform(df)\n selector = TreeFeatureSelectionTransform(model=model, top_k=8)\n df_selected = selector.fit_transform(df_encoded)\n features_columns = df_selected.columns.get_level_values(\"feature\").unique()\n selected_regressors = [column for column in features_columns if column.startswith(\"regressor_\")]\n useful_regressors = [column for column in selected_regressors if \"useful\" in column]\n assert len(useful_regressors) == 3\n\n\[email protected](\n \"model\",\n [\n DecisionTreeRegressor(random_state=42),\n ExtraTreeRegressor(random_state=42),\n RandomForestRegressor(n_estimators=10, random_state=42),\n ExtraTreesRegressor(n_estimators=10, random_state=42),\n GradientBoostingRegressor(n_estimators=10, random_state=42),\n CatBoostRegressor(iterations=500, silent=True, random_state=42, cat_features=[\"regressor_segment_code\"]),\n ],\n)\ndef test_sanity_model(model, ts_with_regressors):\n \"\"\"Check that training with this transform can utilize selected regressors.\"\"\"\n ts_train, ts_test = ts_with_regressors.train_test_split(test_size=30)\n le_encoder = SegmentEncoderTransform()\n selector = TreeFeatureSelectionTransform(model=model, top_k=8)\n\n model = LinearPerSegmentModel()\n pipeline = Pipeline(model=model, transforms=[le_encoder, selector], horizon=30)\n pipeline.fit(ts=ts_train)\n ts_forecast = pipeline.forecast()\n\n for segment in ts_forecast.segments:\n test_target = ts_test[:, segment, \"target\"]\n forecasted_target = ts_forecast[:, segment, \"target\"]\n r2 = r2_score(forecasted_target, test_target)\n assert r2 > 0.99\n",
"from copy import deepcopy\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom etna.datasets import TSDataset\nfrom etna.models import LinearPerSegmentModel\nfrom etna.pipeline import AutoRegressivePipeline\nfrom etna.transforms import DateFlagsTransform\nfrom etna.transforms import LagTransform\nfrom etna.transforms import LinearTrendTransform\n\n\ndef test_fit(example_tsds):\n \"\"\"Test that AutoRegressivePipeline pipeline makes fit without failing.\"\"\"\n model = LinearPerSegmentModel()\n transforms = [LagTransform(in_column=\"target\", lags=[1]), DateFlagsTransform()]\n pipeline = AutoRegressivePipeline(model=model, transforms=transforms, horizon=5, step=1)\n pipeline.fit(example_tsds)\n\n\ndef test_forecast_columns(example_tsds):\n \"\"\"Test that AutoRegressivePipeline generates all the columns.\"\"\"\n original_ts = deepcopy(example_tsds)\n horizon = 5\n\n # make predictions in AutoRegressivePipeline\n model = LinearPerSegmentModel()\n transforms = [LagTransform(in_column=\"target\", lags=[1]), DateFlagsTransform(is_weekend=True)]\n pipeline = AutoRegressivePipeline(model=model, transforms=transforms, horizon=horizon, step=1)\n pipeline.fit(example_tsds)\n forecast_pipeline = pipeline.forecast()\n\n # generate all columns\n original_ts.fit_transform(transforms)\n\n assert set(forecast_pipeline.columns) == set(original_ts.columns)\n\n\ndef test_forecast_one_step(example_tsds):\n \"\"\"Test that AutoRegressivePipeline gets predictions one by one if step is equal to 1.\"\"\"\n original_ts = deepcopy(example_tsds)\n horizon = 5\n\n # make predictions in AutoRegressivePipeline\n model = LinearPerSegmentModel()\n transforms = [LagTransform(in_column=\"target\", lags=[1])]\n pipeline = AutoRegressivePipeline(model=model, transforms=transforms, horizon=horizon, step=1)\n pipeline.fit(example_tsds)\n forecast_pipeline = pipeline.forecast()\n\n # make predictions manually\n df = original_ts.to_pandas()\n original_ts.fit_transform(transforms)\n model = LinearPerSegmentModel()\n model.fit(original_ts)\n for i in range(horizon):\n cur_ts = TSDataset(df, freq=original_ts.freq)\n # these transform don't fit and we can fit_transform them at each step\n cur_ts.transform(transforms)\n cur_forecast_ts = cur_ts.make_future(1)\n cur_future_ts = model.forecast(cur_forecast_ts)\n to_add_df = cur_future_ts.to_pandas()\n df = pd.concat([df, to_add_df[df.columns]])\n\n forecast_manual = TSDataset(df.tail(horizon), freq=original_ts.freq)\n assert np.all(forecast_pipeline[:, :, \"target\"] == forecast_manual[:, :, \"target\"])\n\n\[email protected](\"horizon, step\", ((1, 1), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (20, 1), (20, 2), (20, 3)))\ndef test_forecast_multi_step(example_tsds, horizon, step):\n \"\"\"Test that AutoRegressivePipeline gets correct number of predictions if step is more than 1.\"\"\"\n model = LinearPerSegmentModel()\n transforms = [LagTransform(in_column=\"target\", lags=[step])]\n pipeline = AutoRegressivePipeline(model=model, transforms=transforms, horizon=horizon, step=step)\n pipeline.fit(example_tsds)\n forecast_pipeline = pipeline.forecast()\n\n assert forecast_pipeline.df.shape[0] == horizon\n\n\ndef test_forecast_warning_prediction_intervals(example_tsds):\n \"\"\"Test that AutoRegressivePipeline warns when called with prediction intervals.\"\"\"\n horizon = 5\n step = 1\n model = LinearPerSegmentModel()\n transforms = [LagTransform(in_column=\"target\", lags=[step])]\n pipeline = AutoRegressivePipeline(model=model, transforms=transforms, horizon=horizon, step=step)\n pipeline.fit(example_tsds)\n with pytest.warns(UserWarning, match=\"doesn't support prediction intervals\"):\n _ = pipeline.forecast(prediction_interval=True)\n\n\ndef test_forecast_with_fit_transforms(example_tsds):\n \"\"\"Test that AutoRegressivePipeline can work with transforms that need fitting.\"\"\"\n horizon = 5\n\n model = LinearPerSegmentModel()\n transforms = [LagTransform(in_column=\"target\", lags=[1]), LinearTrendTransform(in_column=\"target\")]\n pipeline = AutoRegressivePipeline(model=model, transforms=transforms, horizon=horizon, step=1)\n pipeline.fit(example_tsds)\n pipeline.forecast()\n",
"from typing import Dict\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom etna.analysis.feature_relevance import StatisticsRelevanceTable\nfrom etna.datasets import TSDataset\nfrom etna.datasets import generate_ar_df\nfrom etna.datasets import generate_periodic_df\nfrom etna.transforms.gale_shapley import BaseGaleShapley\nfrom etna.transforms.gale_shapley import GaleShapleyFeatureSelectionTransform\nfrom etna.transforms.gale_shapley import GaleShapleyMatcher\nfrom etna.transforms.gale_shapley import RegressorGaleShapley\nfrom etna.transforms.gale_shapley import SegmentGaleShapley\n\n\[email protected]\ndef ts_with_large_regressors_number(random_seed) -> TSDataset:\n df = generate_periodic_df(periods=100, start_time=\"2020-01-01\", n_segments=3, period=7, scale=10)\n\n exog_df = generate_periodic_df(periods=150, start_time=\"2020-01-01\", n_segments=3, period=7).rename(\n {\"target\": \"regressor_1\"}, axis=1\n )\n for i in range(1, 4):\n tmp = generate_periodic_df(periods=150, start_time=\"2020-01-01\", n_segments=3, period=7)\n tmp[\"target\"] += np.random.uniform(low=-i / 5, high=i / 5, size=(450,))\n exog_df = exog_df.merge(tmp.rename({\"target\": f\"regressor_{i + 1}\"}, axis=1), on=[\"timestamp\", \"segment\"])\n for i in range(4, 8):\n tmp = generate_ar_df(periods=150, start_time=\"2020-01-01\", n_segments=3, ar_coef=[1], random_seed=i)\n exog_df = exog_df.merge(tmp.rename({\"target\": f\"regressor_{i + 1}\"}, axis=1), on=[\"timestamp\", \"segment\"])\n\n ts = TSDataset(df=TSDataset.to_dataset(df), freq=\"D\", df_exog=TSDataset.to_dataset(exog_df))\n return ts\n\n\[email protected]\ndef relevance_matrix() -> pd.DataFrame:\n table = pd.DataFrame({\"regressor_1\": [1, 2, 3, 4], \"regressor_2\": [4, 1, 5, 2], \"regressor_3\": [2, 4, 1, 3]})\n table.index = [\"segment_1\", \"segment_2\", \"segment_3\", \"segment_4\"]\n return table\n\n\[email protected]\ndef base_gale_shapley_player() -> BaseGaleShapley:\n base = BaseGaleShapley(name=\"regressor_1\", ranked_candidates=[\"segment_1\", \"segment_3\", \"segment_2\", \"segment_4\"])\n return base\n\n\[email protected]\ndef regressor() -> RegressorGaleShapley:\n reg = RegressorGaleShapley(\n name=\"regressor_1\", ranked_candidates=[\"segment_1\", \"segment_3\", \"segment_2\", \"segment_4\"]\n )\n return reg\n\n\[email protected]\ndef segment() -> SegmentGaleShapley:\n segment = SegmentGaleShapley(\n name=\"segment_1\", ranked_candidates=[\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"]\n )\n return segment\n\n\[email protected]\ndef matcher() -> GaleShapleyMatcher:\n segments = [\n SegmentGaleShapley(\n name=\"segment_1\",\n ranked_candidates=[\"regressor_1\", \"regressor_2\", \"regressor_3\"],\n ),\n SegmentGaleShapley(\n name=\"segment_2\",\n ranked_candidates=[\"regressor_1\", \"regressor_3\", \"regressor_2\"],\n ),\n SegmentGaleShapley(\n name=\"segment_3\",\n ranked_candidates=[\"regressor_2\", \"regressor_3\", \"regressor_1\"],\n ),\n ]\n regressors = [\n RegressorGaleShapley(\n name=\"regressor_1\",\n ranked_candidates=[\"segment_3\", \"segment_1\", \"segment_2\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_2\",\n ranked_candidates=[\"segment_2\", \"segment_3\", \"segment_1\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_3\",\n ranked_candidates=[\"segment_1\", \"segment_2\", \"segment_3\"],\n ),\n ]\n gsh = GaleShapleyMatcher(segments=segments, regressors=regressors)\n return gsh\n\n\[email protected]\ndef relevance_matrix_big() -> pd.DataFrame:\n matrix = np.array([[1, 2, 3, 4, 5, 6, 7], [6, 1, 3, 4, 7, 5, 2], [1, 5, 4, 3, 2, 7, 6]])\n table = pd.DataFrame(\n matrix,\n index=[\"segment_1\", \"segment_2\", \"segment_3\"],\n columns=[\n \"regressor_1\",\n \"regressor_2\",\n \"regressor_3\",\n \"regressor_4\",\n \"regressor_5\",\n \"regressor_6\",\n \"regressor_7\",\n ],\n )\n return table\n\n\[email protected](\n \"ascending,expected\",\n (\n (\n True,\n {\n \"segment_1\": [\"regressor_1\", \"regressor_3\", \"regressor_2\"],\n \"segment_2\": [\"regressor_2\", \"regressor_1\", \"regressor_3\"],\n \"segment_3\": [\"regressor_3\", \"regressor_1\", \"regressor_2\"],\n \"segment_4\": [\"regressor_2\", \"regressor_3\", \"regressor_1\"],\n },\n ),\n (\n False,\n {\n \"segment_1\": [\"regressor_2\", \"regressor_3\", \"regressor_1\"],\n \"segment_2\": [\"regressor_3\", \"regressor_1\", \"regressor_2\"],\n \"segment_3\": [\"regressor_2\", \"regressor_1\", \"regressor_3\"],\n \"segment_4\": [\"regressor_1\", \"regressor_3\", \"regressor_2\"],\n },\n ),\n ),\n)\ndef test_get_ranked_list(relevance_matrix: pd.DataFrame, ascending: bool, expected: Dict[str, List[str]]):\n result = GaleShapleyFeatureSelectionTransform._get_ranked_list(table=relevance_matrix, ascending=ascending)\n for key in expected.keys():\n assert key in result\n assert result[key] == expected[key]\n\n\[email protected](\n \"ascending,expected\",\n (\n (\n True,\n {\n \"regressor_1\": [\"segment_1\", \"segment_2\", \"segment_3\", \"segment_4\"],\n \"regressor_2\": [\"segment_2\", \"segment_4\", \"segment_1\", \"segment_3\"],\n \"regressor_3\": [\"segment_3\", \"segment_1\", \"segment_4\", \"segment_2\"],\n },\n ),\n (\n False,\n {\n \"regressor_1\": [\"segment_4\", \"segment_3\", \"segment_2\", \"segment_1\"],\n \"regressor_2\": [\"segment_3\", \"segment_1\", \"segment_4\", \"segment_2\"],\n \"regressor_3\": [\"segment_2\", \"segment_4\", \"segment_1\", \"segment_3\"],\n },\n ),\n ),\n)\ndef test_get_ranked_list_regressors(relevance_matrix: pd.DataFrame, ascending: bool, expected: Dict[str, List[str]]):\n result = GaleShapleyFeatureSelectionTransform._get_ranked_list(table=relevance_matrix.T, ascending=ascending)\n for key in expected.keys():\n assert key in result\n assert result[key] == expected[key]\n\n\[email protected](\n \"top_k,n_segments,n_regressors,expected\",\n (\n (20, 10, 50, 2),\n (27, 10, 40, 3),\n (15, 4, 16, 4),\n (7, 10, 50, 1),\n (30, 5, 20, 1),\n ),\n)\ndef test_compute_gale_shapley_steps_number(top_k: int, n_segments: int, n_regressors: int, expected: int):\n result = GaleShapleyFeatureSelectionTransform._compute_gale_shapley_steps_number(\n top_k=top_k, n_segments=n_segments, n_regressors=n_regressors\n )\n assert result == expected\n\n\[email protected](\n \"ranked_regressors,regressors_to_drop,expected\",\n (\n (\n {\n \"segment_1\": [\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n \"segment_2\": [\"regressor_3\", \"regressor_2\", \"regressor_1\", \"regressor_4\"],\n \"segment_3\": [\"regressor_4\", \"regressor_3\", \"regressor_1\", \"regressor_2\"],\n },\n [\"regressor_2\", \"regressor_3\"],\n {\n \"segment_1\": [\"regressor_1\", \"regressor_4\"],\n \"segment_2\": [\"regressor_1\", \"regressor_4\"],\n \"segment_3\": [\"regressor_4\", \"regressor_1\"],\n },\n ),\n (\n {\n \"segment_1\": [\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n \"segment_2\": [\"regressor_3\", \"regressor_2\", \"regressor_1\", \"regressor_4\"],\n \"segment_3\": [\"regressor_4\", \"regressor_3\", \"regressor_1\", \"regressor_2\"],\n },\n [\"regressor_2\", \"regressor_3\", \"regressor_1\", \"regressor_4\"],\n {\n \"segment_1\": [],\n \"segment_2\": [],\n \"segment_3\": [],\n },\n ),\n ),\n)\ndef test_gale_shapley_transform_update_ranking_list(\n ranked_regressors: Dict[str, List[str]], regressors_to_drop: List[str], expected: Dict[str, List[str]]\n):\n result = GaleShapleyFeatureSelectionTransform._update_ranking_list(\n segment_regressors_ranking=ranked_regressors, regressors_to_drop=regressors_to_drop\n )\n for key in result:\n assert result[key] == expected[key]\n\n\ndef test_base_update_segment(base_gale_shapley_player: BaseGaleShapley):\n base_gale_shapley_player.update_tmp_match(\"segment_2\")\n assert base_gale_shapley_player.tmp_match == \"segment_2\"\n assert base_gale_shapley_player.tmp_match_rank == 2\n\n\ndef test_regressor_check_segment(regressor: RegressorGaleShapley):\n assert regressor.check_segment(\"segment_4\")\n regressor.update_tmp_match(\"segment_2\")\n assert not regressor.check_segment(\"segment_4\")\n assert regressor.check_segment(\"segment_1\")\n\n\ndef test_segment_get_next_candidate(segment: SegmentGaleShapley):\n assert segment.get_next_candidate() == \"regressor_1\"\n segment.update_tmp_match(\"regressor_1\")\n assert segment.get_next_candidate() == \"regressor_2\"\n\n\ndef test_gale_shapley_matcher_match(matcher: GaleShapleyMatcher):\n segment = matcher.segments[0]\n regressor = matcher.regressors[0]\n assert segment.tmp_match is None\n assert segment.is_available\n assert regressor.tmp_match is None\n assert regressor.is_available\n matcher.match(segment=segment, regressor=regressor)\n assert segment.tmp_match == regressor.name\n assert segment.tmp_match_rank == 0\n assert not segment.is_available\n assert regressor.tmp_match == segment.name\n assert regressor.tmp_match_rank == 1\n assert not regressor.is_available\n\n\ndef test_gale_shapley_matcher_break_match(matcher: GaleShapleyMatcher):\n segment = matcher.segments[0]\n regressor = matcher.regressors[0]\n assert segment.tmp_match is None\n assert segment.is_available\n assert regressor.tmp_match is None\n assert regressor.is_available\n matcher.match(segment=segment, regressor=regressor)\n matcher.break_match(segment=segment, regressor=regressor)\n assert segment.tmp_match is None\n assert segment.is_available\n assert regressor.tmp_match is None\n assert regressor.is_available\n\n\[email protected](\n \"segments,regressors,expected\",\n (\n (\n [\n SegmentGaleShapley(\n name=\"segment_1\",\n ranked_candidates=[\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n ),\n SegmentGaleShapley(\n name=\"segment_2\",\n ranked_candidates=[\"regressor_1\", \"regressor_3\", \"regressor_2\", \"regressor_4\"],\n ),\n SegmentGaleShapley(\n name=\"segment_3\",\n ranked_candidates=[\"regressor_2\", \"regressor_4\", \"regressor_1\", \"regressor_3\"],\n ),\n SegmentGaleShapley(\n name=\"segment_4\",\n ranked_candidates=[\"regressor_3\", \"regressor_1\", \"regressor_4\", \"regressor_2\"],\n ),\n ],\n [\n RegressorGaleShapley(\n name=\"regressor_1\",\n ranked_candidates=[\"segment_2\", \"segment_1\", \"segment_3\", \"segment_4\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_2\",\n ranked_candidates=[\"segment_1\", \"segment_2\", \"segment_3\", \"segment_4\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_3\",\n ranked_candidates=[\"segment_3\", \"segment_2\", \"segment_4\", \"segment_1\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_4\",\n ranked_candidates=[\"segment_3\", \"segment_1\", \"segment_4\", \"segment_2\"],\n ),\n ],\n {\n \"segment_1\": \"regressor_2\",\n \"segment_2\": \"regressor_1\",\n \"segment_3\": \"regressor_4\",\n \"segment_4\": \"regressor_3\",\n },\n ),\n (\n [\n SegmentGaleShapley(\n name=\"segment_1\",\n ranked_candidates=[\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n ),\n SegmentGaleShapley(\n name=\"segment_2\",\n ranked_candidates=[\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n ),\n SegmentGaleShapley(\n name=\"segment_3\",\n ranked_candidates=[\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n ),\n SegmentGaleShapley(\n name=\"segment_4\",\n ranked_candidates=[\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n ),\n ],\n [\n RegressorGaleShapley(\n name=\"regressor_1\",\n ranked_candidates=[\"segment_2\", \"segment_1\", \"segment_3\", \"segment_4\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_2\",\n ranked_candidates=[\"segment_1\", \"segment_2\", \"segment_3\", \"segment_4\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_3\",\n ranked_candidates=[\"segment_3\", \"segment_2\", \"segment_4\", \"segment_1\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_4\",\n ranked_candidates=[\"segment_3\", \"segment_1\", \"segment_4\", \"segment_2\"],\n ),\n ],\n {\n \"segment_1\": \"regressor_2\",\n \"segment_2\": \"regressor_1\",\n \"segment_3\": \"regressor_3\",\n \"segment_4\": \"regressor_4\",\n },\n ),\n (\n [\n SegmentGaleShapley(\n name=\"segment_1\",\n ranked_candidates=[\"regressor_1\", \"regressor_5\", \"regressor_2\", \"regressor_4\", \"regressor_3\"],\n ),\n SegmentGaleShapley(\n name=\"segment_2\",\n ranked_candidates=[\"regressor_5\", \"regressor_2\", \"regressor_3\", \"regressor_4\", \"regressor_1\"],\n ),\n SegmentGaleShapley(\n name=\"segment_3\",\n ranked_candidates=[\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\", \"regressor_5\"],\n ),\n ],\n [\n RegressorGaleShapley(\n name=\"regressor_1\",\n ranked_candidates=[\"segment_3\", \"segment_1\", \"segment_2\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_2\",\n ranked_candidates=[\"segment_3\", \"segment_2\", \"segment_1\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_3\",\n ranked_candidates=[\"segment_3\", \"segment_1\", \"segment_2\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_4\",\n ranked_candidates=[\"segment_1\", \"segment_2\", \"segment_3\"],\n ),\n RegressorGaleShapley(\n name=\"regressor_5\",\n ranked_candidates=[\"segment_1\", \"segment_3\", \"segment_2\"],\n ),\n ],\n {\n \"segment_1\": \"regressor_5\",\n \"segment_2\": \"regressor_2\",\n \"segment_3\": \"regressor_1\",\n },\n ),\n ),\n)\ndef test_gale_shapley_result(\n segments: List[SegmentGaleShapley],\n regressors: List[RegressorGaleShapley],\n expected: Dict[str, str],\n):\n matcher = GaleShapleyMatcher(segments=segments, regressors=regressors)\n matches = matcher()\n for k, v in expected.items():\n assert k in matches\n assert matches[k] == v\n\n\[email protected](\n \"segment_regressor_ranking,regressor_segments_ranking,expected\",\n (\n (\n {\n \"segment_1\": [\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n \"segment_2\": [\"regressor_1\", \"regressor_3\", \"regressor_2\", \"regressor_4\"],\n \"segment_3\": [\"regressor_2\", \"regressor_4\", \"regressor_1\", \"regressor_3\"],\n \"segment_4\": [\"regressor_3\", \"regressor_1\", \"regressor_4\", \"regressor_2\"],\n },\n {\n \"regressor_1\": [\"segment_2\", \"segment_1\", \"segment_3\", \"segment_4\"],\n \"regressor_2\": [\"segment_1\", \"segment_2\", \"segment_3\", \"segment_4\"],\n \"regressor_3\": [\"segment_3\", \"segment_2\", \"segment_4\", \"segment_1\"],\n \"regressor_4\": [\"segment_3\", \"segment_1\", \"segment_4\", \"segment_2\"],\n },\n {\n \"segment_1\": \"regressor_2\",\n \"segment_2\": \"regressor_1\",\n \"segment_3\": \"regressor_4\",\n \"segment_4\": \"regressor_3\",\n },\n ),\n (\n {\n \"segment_1\": [\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n \"segment_2\": [\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n \"segment_3\": [\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n \"segment_4\": [\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\"],\n },\n {\n \"regressor_1\": [\"segment_2\", \"segment_1\", \"segment_3\", \"segment_4\"],\n \"regressor_2\": [\"segment_1\", \"segment_2\", \"segment_3\", \"segment_4\"],\n \"regressor_3\": [\"segment_3\", \"segment_2\", \"segment_4\", \"segment_1\"],\n \"regressor_4\": [\"segment_3\", \"segment_1\", \"segment_4\", \"segment_2\"],\n },\n {\n \"segment_1\": \"regressor_2\",\n \"segment_2\": \"regressor_1\",\n \"segment_3\": \"regressor_3\",\n \"segment_4\": \"regressor_4\",\n },\n ),\n (\n {\n \"segment_1\": [\"regressor_1\", \"regressor_5\", \"regressor_2\", \"regressor_4\", \"regressor_3\"],\n \"segment_2\": [\"regressor_5\", \"regressor_2\", \"regressor_3\", \"regressor_4\", \"regressor_1\"],\n \"segment_3\": [\"regressor_1\", \"regressor_2\", \"regressor_3\", \"regressor_4\", \"regressor_5\"],\n },\n {\n \"regressor_1\": [\"segment_3\", \"segment_1\", \"segment_2\"],\n \"regressor_2\": [\"segment_3\", \"segment_2\", \"segment_1\"],\n \"regressor_3\": [\"segment_3\", \"segment_1\", \"segment_2\"],\n \"regressor_4\": [\"segment_1\", \"segment_2\", \"segment_3\"],\n \"regressor_5\": [\"segment_1\", \"segment_3\", \"segment_2\"],\n },\n {\n \"segment_1\": \"regressor_5\",\n \"segment_2\": \"regressor_2\",\n \"segment_3\": \"regressor_1\",\n },\n ),\n ),\n)\ndef test_gale_shapley_transform_gale_shapley_iteration(\n segment_regressor_ranking: Dict[str, List[str]],\n regressor_segments_ranking: Dict[str, List[str]],\n expected: Dict[str, str],\n):\n GaleShapleyFeatureSelectionTransform._gale_shapley_iteration(\n segment_regressors_ranking=segment_regressor_ranking, regressor_segments_ranking=regressor_segments_ranking\n )\n\n\[email protected](\n \"matches,n,greater_is_better,expected\",\n (\n (\n {\n \"segment_1\": \"regressor_4\",\n \"segment_2\": \"regressor_7\",\n \"segment_3\": \"regressor_5\",\n },\n 2,\n False,\n [\"regressor_5\", \"regressor_7\"],\n ),\n (\n {\n \"segment_1\": \"regressor_4\",\n \"segment_2\": \"regressor_7\",\n \"segment_3\": \"regressor_5\",\n },\n 1,\n True,\n [\"regressor_4\"],\n ),\n (\n {\n \"segment_1\": \"regressor_3\",\n \"segment_2\": \"regressor_2\",\n \"segment_3\": \"regressor_1\",\n },\n 2,\n False,\n [\"regressor_1\", \"regressor_2\"],\n ),\n (\n {\n \"segment_1\": \"regressor_3\",\n \"segment_2\": \"regressor_2\",\n \"segment_3\": \"regressor_1\",\n },\n 3,\n False,\n [\"regressor_1\", \"regressor_2\", \"regressor_3\"],\n ),\n ),\n)\ndef test_gale_shapley_transform_process_last_step(\n matches: Dict[str, str], n: int, greater_is_better: bool, expected: List[str], relevance_matrix_big: pd.DataFrame\n):\n result = GaleShapleyFeatureSelectionTransform._process_last_step(\n matches=matches, relevance_table=relevance_matrix_big, n=n, greater_is_better=greater_is_better\n )\n assert sorted(result) == sorted(expected)\n\n\[email protected](\"use_rank\", (True, False))\[email protected](\"top_k\", (2, 3, 5, 6, 7))\ndef test_gale_shapley_transform_fit(ts_with_large_regressors_number: TSDataset, top_k: int, use_rank: bool):\n df = ts_with_large_regressors_number.df\n transform = GaleShapleyFeatureSelectionTransform(\n relevance_table=StatisticsRelevanceTable(), top_k=top_k, use_rank=use_rank\n )\n transform.fit(df=df)\n\n\ndef test_gale_shapley_transform_fit_transform(ts_with_large_regressors_number: TSDataset):\n df = ts_with_large_regressors_number.df\n transform = GaleShapleyFeatureSelectionTransform(\n relevance_table=StatisticsRelevanceTable(), top_k=5, use_rank=False\n )\n transformed = transform.fit_transform(df=df)\n assert set(transformed.columns.get_level_values(\"feature\")) == {\n \"target\",\n \"regressor_1\",\n \"regressor_2\",\n \"regressor_3\",\n \"regressor_4\",\n \"regressor_5\",\n }\n"
] | [
[
"matplotlib.ticker.MaxNLocator",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.random.choice"
],
[
"sklearn.ensemble.RandomForestRegressor",
"sklearn.tree.ExtraTreeRegressor",
"pandas.concat",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.metrics.r2_score",
"pandas.DataFrame",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn.ensemble.ExtraTreesRegressor",
"numpy.random.RandomState"
],
[
"numpy.all",
"pandas.concat"
],
[
"numpy.random.uniform",
"numpy.array",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TimoleonLatinopoulos/MortalKombatOpenAI | [
"59dc89d1f50dd74690859e5e1fa18701a5246382"
] | [
"DDQN.py"
] | [
"import tensorflow as tf\nfrom keras.activations import relu\nfrom keras.initializers import VarianceScaling\nfrom keras.layers import Dense, Conv2D, Flatten\nfrom keras.losses import logcosh\n\n\nclass DDQN:\n \"\"\" Implements a Dueling Dual Deep Q-Network based on the frames of the Retro Environment \"\"\"\n\n def __init__(self, n_actions, frame_height=63, frame_width=113, stacked_frames=4, learning_rate=0.00001):\n self.n_actions = n_actions\n self.frame_height = frame_height\n self.frame_width = frame_width\n self.stacked_frames = stacked_frames\n self.learning_rate = learning_rate\n\n self.input = tf.placeholder(shape=[None, self.frame_height, self.frame_width, self.stacked_frames],\n dtype=tf.float32)\n self.input = self.input / 255\n\n # Convolutional layers\n self.conv1 = self.conv_layer(self.input, 32, [8, 8], 4, 'conv1')\n self.conv2 = self.conv_layer(self.conv1, 64, [4, 4], 2, 'conv2')\n self.conv3 = self.conv_layer(self.conv2, 64, [3, 3], 1, 'conv3')\n self.flat = Flatten()(self.conv3)\n self.dense1 = self.dense_layer(self.flat, 512, 'dense1', relu)\n\n # Splitting into value and advantage streams\n self.v_stream, self.a_stream = tf.split(self.dense1, 2, 1)\n self.value = self.dense_layer(self.v_stream, 1, 'value')\n self.advantage = self.dense_layer(self.a_stream, self.n_actions, 'advantage')\n\n # Getting Q-values from value and advantage streams\n self.q_values = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keepdims=True))\n self.prediction = tf.argmax(self.q_values, 1)\n\n # targetQ according to Bellman equation\n self.target_q = tf.placeholder(shape=[None], dtype=tf.float32)\n self.action = tf.placeholder(shape=[None], dtype=tf.uint8)\n self.action_one_hot = tf.one_hot(self.action, self.n_actions, dtype=tf.float32)\n self.Q = tf.reduce_sum(tf.multiply(self.q_values, self.action_one_hot), axis=1)\n\n # Parameter updates\n self.error = logcosh(self.target_q, self.Q)\n self.loss = tf.reduce_mean(self.error)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n self.update = self.optimizer.minimize(self.loss)\n\n @staticmethod\n def conv_layer(_inputs, _filters, _kernel_size, _strides, _name):\n return Conv2D(filters=_filters, kernel_size=_kernel_size, strides=_strides,\n kernel_initializer=VarianceScaling(scale=2.0), padding=\"valid\",\n activation=relu, use_bias=False, name=_name)(_inputs)\n\n @staticmethod\n def dense_layer(_inputs, _units, _name, _activation=None):\n return Dense(activation=_activation, units=_units,\n kernel_initializer=VarianceScaling(scale=2.0), name=_name)(_inputs)\n\n\nclass TargetNetworkUpdater:\n \"\"\" Updates the variables and the weights of the target network based on the main network \"\"\"\n\n def __init__(self, main_vars, target_vars):\n self.main_vars = main_vars\n self.target_vars = target_vars\n\n def update_target_vars(self):\n update_ops = []\n for i, var in enumerate(self.main_vars):\n copy_op = self.target_vars[i].assign(var.value())\n update_ops.append(copy_op)\n return update_ops\n\n def update_networks(self, sess):\n update_ops = self.update_target_vars()\n for copy_op in update_ops:\n sess.run(copy_op)\n"
] | [
[
"tensorflow.multiply",
"tensorflow.reduce_mean",
"tensorflow.argmax",
"tensorflow.placeholder",
"tensorflow.one_hot",
"tensorflow.train.AdamOptimizer",
"tensorflow.split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
meokz/d3rlpy | [
"40504e2d8b424547558ab82786c523e8f4626a82",
"40504e2d8b424547558ab82786c523e8f4626a82"
] | [
"d3rlpy/models/torch/encoders.py",
"d3rlpy/metrics/comparer.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef _create_activation(activation_type):\n if activation_type == 'relu':\n return torch.relu\n elif activation_type == 'swish':\n return lambda x: x * torch.sigmoid(x)\n raise ValueError('invalid activation_type.')\n\n\ndef create_encoder(observation_shape,\n action_size=None,\n use_batch_norm=False,\n discrete_action=False,\n activation_type='relu',\n **kwargs):\n\n activation = _create_activation(activation_type)\n\n if len(observation_shape) == 3:\n # pixel input\n if action_size is not None:\n return PixelEncoderWithAction(observation_shape,\n action_size,\n use_batch_norm=use_batch_norm,\n discrete_action=discrete_action,\n activation=activation,\n **kwargs)\n return PixelEncoder(observation_shape,\n use_batch_norm=use_batch_norm,\n activation=activation,\n **kwargs)\n elif len(observation_shape) == 1:\n # vector input\n if action_size is not None:\n return VectorEncoderWithAction(observation_shape,\n action_size,\n use_batch_norm=use_batch_norm,\n discrete_action=discrete_action,\n activation=activation,\n **kwargs)\n return VectorEncoder(observation_shape,\n use_batch_norm=use_batch_norm,\n activation=activation,\n **kwargs)\n else:\n raise ValueError('observation_shape must be 1d or 3d.')\n\n\nclass PixelEncoder(nn.Module):\n def __init__(self,\n observation_shape,\n filters=None,\n feature_size=None,\n use_batch_norm=False,\n activation=torch.relu):\n super().__init__()\n\n # default architecture is based on Nature DQN paper.\n if filters is None:\n filters = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]\n if feature_size is None:\n feature_size = 512\n\n self.observation_shape = observation_shape\n self.use_batch_norm = use_batch_norm\n self.activation = activation\n self.feature_size = feature_size\n\n # convolutional layers\n in_channels = [observation_shape[0]] + [f[0] for f in filters[:-1]]\n self.convs = nn.ModuleList()\n self.conv_bns = nn.ModuleList()\n for in_channel, f in zip(in_channels, filters):\n out_channel, kernel_size, stride = f\n conv = nn.Conv2d(in_channel,\n out_channel,\n kernel_size=kernel_size,\n stride=stride)\n self.convs.append(conv)\n\n if use_batch_norm:\n self.conv_bns.append(nn.BatchNorm2d(out_channel))\n\n # last dense layer\n self.fc = nn.Linear(self._get_linear_input_size(), feature_size)\n if use_batch_norm:\n self.fc_bn = nn.BatchNorm1d(feature_size)\n\n def _get_linear_input_size(self):\n x = torch.rand((1, ) + self.observation_shape)\n with torch.no_grad():\n return self._conv_encode(x).view(1, -1).shape[1]\n\n def _conv_encode(self, x):\n h = x\n for i in range(len(self.convs)):\n h = self.activation(self.convs[i](h))\n if self.use_batch_norm:\n h = self.conv_bns[i](h)\n return h\n\n def forward(self, x):\n h = self._conv_encode(x)\n\n h = self.activation(self.fc(h.view(h.shape[0], -1)))\n if self.use_batch_norm:\n h = self.fc_bn(h)\n\n return h\n\n\nclass PixelEncoderWithAction(PixelEncoder):\n def __init__(self,\n observation_shape,\n action_size,\n filters=None,\n feature_size=None,\n use_batch_norm=False,\n discrete_action=False,\n activation=torch.relu):\n self.action_size = action_size\n self.discrete_action = discrete_action\n super().__init__(observation_shape, filters, feature_size,\n use_batch_norm, activation)\n\n def _get_linear_input_size(self):\n size = super()._get_linear_input_size()\n return size + self.action_size\n\n def forward(self, x, action):\n h = self._conv_encode(x)\n\n if self.discrete_action:\n action = F.one_hot(action.view(-1).long(),\n num_classes=self.action_size).float()\n\n # cocat feature and action\n h = torch.cat([h.view(h.shape[0], -1), action], dim=1)\n h = self.activation(self.fc(h))\n if self.use_batch_norm:\n h = self.fc_bn(h)\n\n return h\n\n\nclass VectorEncoder(nn.Module):\n def __init__(self,\n observation_shape,\n hidden_units=None,\n use_batch_norm=False,\n activation=torch.relu):\n super().__init__()\n self.observation_shape = observation_shape\n\n if hidden_units is None:\n hidden_units = [256, 256]\n\n self.use_batch_norm = use_batch_norm\n self.feature_size = hidden_units[-1]\n self.activation = activation\n\n in_units = [observation_shape[0]] + hidden_units[:-1]\n self.fcs = nn.ModuleList()\n self.bns = nn.ModuleList()\n for in_unit, out_unit in zip(in_units, hidden_units):\n self.fcs.append(nn.Linear(in_unit, out_unit))\n if use_batch_norm:\n self.bns.append(nn.BatchNorm1d(out_unit))\n\n def forward(self, x):\n h = x\n for i in range(len(self.fcs)):\n h = self.activation(self.fcs[i](h))\n if self.use_batch_norm:\n h = self.bns[i](h)\n return h\n\n\nclass VectorEncoderWithAction(VectorEncoder):\n def __init__(self,\n observation_shape,\n action_size,\n hidden_units=None,\n use_batch_norm=False,\n discrete_action=False,\n activation=torch.relu):\n self.action_size = action_size\n self.discrete_action = discrete_action\n concat_shape = (observation_shape[0] + action_size, )\n super().__init__(concat_shape, hidden_units, use_batch_norm,\n activation)\n self.observation_shape = observation_shape\n\n def forward(self, x, action):\n if self.discrete_action:\n action = F.one_hot(action.view(-1).long(),\n num_classes=self.action_size).float()\n\n x = torch.cat([x, action], dim=1)\n return super().forward(x)\n",
"import numpy as np\n\nfrom .scorer import _make_batches\n\n\ndef compare_continuous_action_diff(base_algo, window_size=1024):\n \"\"\" Returns scorer function of action difference between algorithms. \n\n This metrics suggests how different the two algorithms are in continuous\n action-space.\n If the algorithm to compare with is near-optimal, the small action\n difference would be better.\n\n .. math::\n\n \\\\mathbb{E}_{s_t \\\\sim D}\n [(\\\\pi_{\\\\phi_1}(s_t) - \\\\pi_{\\\\phi_2}(s_t))^2]\n\n .. code-block:: python\n\n from d3rlpy.algos import CQL\n from d3rlpy.metrics.comparer import compare_continuous_action_diff\n\n cql1 = CQL()\n cql2 = CQL()\n\n scorer = compare_continuous_action_diff(cql1)\n\n squared_action_diff = scorer(cql2, ...)\n\n Args:\n base_algo (d3rlpy.algos.base.AlgoBase): algorithm to comapre with.\n window_size (int): mini-batch size to compute.\n\n Returns:\n callable: scorer function.\n\n \"\"\"\n def scorer(algo, episodes):\n total_diffs = []\n for episode in episodes:\n # TODO: handle different n_frames\n for batch in _make_batches(episode, window_size, algo.n_frames):\n base_actions = base_algo.predict(batch.observations)\n actions = algo.predict(batch.observations)\n diff = ((actions - base_actions)**2).sum(axis=1).tolist()\n total_diffs += diff\n # smaller is better, sometimes?\n return -np.mean(total_diffs)\n\n return scorer\n\n\ndef compare_discrete_action_match(base_algo, window_size=1024):\n \"\"\" Returns scorer function of action matches between algorithms.\n\n This metrics suggests how different the two algorithms are in discrete\n action-space.\n If the algorithm to compare with is near-optimal, the small action\n difference would be better.\n\n .. math::\n\n \\\\mathbb{E}_{s_t \\\\sim D} [\\\\parallel\n \\\\{\\\\text{argmax}_a Q_{\\\\theta_1}(s_t, a)\n = \\\\text{argmax}_a Q_{\\\\theta_2}(s_t, a)\\\\}]\n\n .. code-block:: python\n\n from d3rlpy.algos import DQN\n from d3rlpy.metrics.comparer import compare_continuous_action_diff\n\n dqn1 = DQN()\n dqn2 = DQN()\n\n scorer = compare_continuous_action_diff(dqn1)\n\n percentage_of_identical_actions = scorer(dqn2, ...)\n\n Args:\n base_algo (d3rlpy.algos.base.AlgoBase): algorithm to comapre with.\n window_size (int): mini-batch size to compute.\n\n Returns:\n callable: scorer function.\n\n \"\"\"\n def scorer(algo, episodes):\n total_matches = []\n for episode in episodes:\n # TODO: handle different n_frames\n for batch in _make_batches(episode, window_size, algo.n_frames):\n base_actions = base_algo.predict(batch.observations)\n actions = algo.predict(batch.observations)\n match = (base_actions == actions).tolist()\n total_matches += match\n return np.mean(total_matches)\n\n return scorer\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.sigmoid",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.no_grad",
"torch.rand",
"torch.nn.BatchNorm2d"
],
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NLP-Discourse-SoochowU/TDDiscourseParser | [
"2f9c7cef85c564c47b368ee4935caf1fad7c598d",
"2f9c7cef85c564c47b368ee4935caf1fad7c598d"
] | [
"treebuilder/partptr/train.py",
"segmenter/rnn/train.py"
] | [
"# coding: UTF-8\r\nimport argparse\r\nimport logging\r\nimport random\r\nimport torch\r\nimport copy\r\nimport numpy as np\r\nfrom dataset import CDTB\r\nfrom collections import Counter\r\nfrom itertools import chain\r\nfrom structure.vocab import Vocab, Label\r\nfrom structure.nodes import node_type_filter, EDU, Relation, Sentence, TEXT\r\nfrom treebuilder.partptr.model import PartitionPtr\r\nfrom treebuilder.partptr.parser import PartitionPtrParser\r\nimport torch.optim as optim\r\nfrom util.eval import parse_eval, gen_parse_report\r\nfrom tensorboardX import SummaryWriter\r\n\r\n\r\ndef build_vocab(dataset):\r\n word_freq = Counter()\r\n pos_freq = Counter()\r\n nuc_freq = Counter()\r\n rel_freq = Counter()\r\n for paragraph in chain(*dataset):\r\n for node in paragraph.iterfind(filter=node_type_filter([EDU, Relation])):\r\n if isinstance(node, EDU):\r\n word_freq.update(node.words)\r\n pos_freq.update(node.tags)\r\n elif isinstance(node, Relation):\r\n nuc_freq[node.nuclear] += 1\r\n rel_freq[node.ftype] += 1\r\n\r\n word_vocab = Vocab(\"word\", word_freq)\r\n pos_vocab = Vocab(\"part of speech\", pos_freq)\r\n nuc_label = Label(\"nuclear\", nuc_freq)\r\n rel_label = Label(\"relation\", rel_freq)\r\n return word_vocab, pos_vocab, nuc_label, rel_label\r\n\r\n\r\ndef gen_decoder_data(root, edu2ids):\r\n # splits s0 s1 s2 s3 s4 s5 s6\r\n # edus s/ e0 e1 e2 e3 e4 e5 /s\r\n splits = [] # [(0, 3, 6, NS), (0, 2, 3, SN), ...]\r\n child_edus = [] # [edus]\r\n\r\n if isinstance(root, EDU):\r\n child_edus.append(root)\r\n elif isinstance(root, Sentence):\r\n for child in root:\r\n _child_edus, _splits = gen_decoder_data(child, edu2ids)\r\n child_edus.extend(_child_edus)\r\n splits.extend(_splits)\r\n elif isinstance(root, Relation):\r\n children = [gen_decoder_data(child, edu2ids) for child in root]\r\n if len(children) < 2:\r\n raise ValueError(\"relation node should have at least 2 children\")\r\n\r\n while children:\r\n left_child_edus, left_child_splits = children.pop(0)\r\n if children:\r\n last_child_edus, _ = children[-1]\r\n start = edu2ids[left_child_edus[0]]\r\n split = edu2ids[left_child_edus[-1]] + 1\r\n end = edu2ids[last_child_edus[-1]] + 1\r\n nuc = root.nuclear\r\n rel = root.ftype\r\n splits.append((start, split, end, nuc, rel))\r\n child_edus.extend(left_child_edus)\r\n splits.extend(left_child_splits)\r\n return child_edus, splits\r\n\r\n\r\ndef numericalize(dataset, word_vocab, pos_vocab, nuc_label, rel_label):\r\n instances = []\r\n for paragraph in filter(lambda d: d.root_relation(), chain(*dataset)):\r\n encoder_inputs = []\r\n decoder_inputs = []\r\n pred_splits = []\r\n pred_nucs = []\r\n pred_rels = []\r\n edus = list(paragraph.edus())\r\n for edu in edus:\r\n edu_word_ids = [word_vocab[word] for word in edu.words]\r\n edu_pos_ids = [pos_vocab[pos] for pos in edu.tags]\r\n encoder_inputs.append((edu_word_ids, edu_pos_ids))\r\n edu2ids = {edu: i for i, edu in enumerate(edus)}\r\n _, splits = gen_decoder_data(paragraph.root_relation(), edu2ids)\r\n for start, split, end, nuc, rel in splits:\r\n decoder_inputs.append((start, end))\r\n pred_splits.append(split)\r\n pred_nucs.append(nuc_label[nuc])\r\n pred_rels.append(rel_label[rel])\r\n instances.append((encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels))\r\n return instances\r\n\r\n\r\ndef gen_batch_iter(instances, batch_size, use_gpu=False):\r\n random_instances = np.random.permutation(instances)\r\n num_instances = len(instances)\r\n offset = 0\r\n while offset < num_instances:\r\n batch = random_instances[offset: min(num_instances, offset+batch_size)]\r\n\r\n # find out max seqlen of edus and words of edus\r\n num_batch = batch.shape[0]\r\n max_edu_seqlen = 0\r\n max_word_seqlen = 0\r\n for encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels in batch:\r\n max_edu_seqlen = max_edu_seqlen if max_edu_seqlen >= len(encoder_inputs) else len(encoder_inputs)\r\n for edu_word_ids, edu_pos_ids in encoder_inputs:\r\n max_word_seqlen = max_word_seqlen if max_word_seqlen >= len(edu_word_ids) else len(edu_word_ids)\r\n\r\n # batch to numpy\r\n e_input_words = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long)\r\n e_input_poses = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long)\r\n e_masks = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.uint8)\r\n\r\n d_inputs = np.zeros([num_batch, max_edu_seqlen-1, 2], dtype=np.long)\r\n d_outputs = np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long)\r\n d_output_nucs = np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long)\r\n d_output_rels = np.zeros([num_batch, max_edu_seqlen - 1], dtype=np.long)\r\n d_masks = np.zeros([num_batch, max_edu_seqlen-1, max_edu_seqlen+1], dtype=np.uint8)\r\n\r\n for batchi, (encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels) in enumerate(batch):\r\n for edui, (edu_word_ids, edu_pos_ids) in enumerate(encoder_inputs):\r\n word_seqlen = len(edu_word_ids)\r\n e_input_words[batchi][edui][:word_seqlen] = edu_word_ids\r\n e_input_poses[batchi][edui][:word_seqlen] = edu_pos_ids\r\n e_masks[batchi][edui][:word_seqlen] = 1\r\n\r\n for di, decoder_input in enumerate(decoder_inputs):\r\n d_inputs[batchi][di] = decoder_input\r\n d_masks[batchi][di][decoder_input[0]+1: decoder_input[1]] = 1\r\n d_outputs[batchi][:len(pred_splits)] = pred_splits\r\n d_output_nucs[batchi][:len(pred_nucs)] = pred_nucs\r\n d_output_rels[batchi][:len(pred_rels)] = pred_rels\r\n\r\n # numpy to torch\r\n e_input_words = torch.from_numpy(e_input_words).long()\r\n e_input_poses = torch.from_numpy(e_input_poses).long()\r\n e_masks = torch.from_numpy(e_masks).byte()\r\n d_inputs = torch.from_numpy(d_inputs).long()\r\n d_outputs = torch.from_numpy(d_outputs).long()\r\n d_output_nucs = torch.from_numpy(d_output_nucs).long()\r\n d_output_rels = torch.from_numpy(d_output_rels).long()\r\n d_masks = torch.from_numpy(d_masks).byte()\r\n\r\n if use_gpu:\r\n e_input_words = e_input_words.cuda()\r\n e_input_poses = e_input_poses.cuda()\r\n e_masks = e_masks.cuda()\r\n d_inputs = d_inputs.cuda()\r\n d_outputs = d_outputs.cuda()\r\n d_output_nucs = d_output_nucs.cuda()\r\n d_output_rels = d_output_rels.cuda()\r\n d_masks = d_masks.cuda()\r\n\r\n yield (e_input_words, e_input_poses, e_masks), (d_inputs, d_masks), (d_outputs, d_output_nucs, d_output_rels)\r\n offset = offset + batch_size\r\n\r\n\r\ndef parse_and_eval(dataset, model):\r\n model.eval()\r\n parser = PartitionPtrParser(model)\r\n golds = list(filter(lambda d: d.root_relation(), chain(*dataset)))\r\n num_instances = len(golds)\r\n strips = []\r\n for paragraph in golds:\r\n edus = []\r\n for edu in paragraph.edus():\r\n edu_copy = EDU([TEXT(edu.text)])\r\n setattr(edu_copy, \"words\", edu.words)\r\n setattr(edu_copy, \"tags\", edu.tags)\r\n edus.append(edu_copy)\r\n strips.append(edus)\r\n parses = []\r\n for edus in strips:\r\n parse = parser.parse(edus)\r\n parses.append(parse)\r\n return num_instances, parse_eval(parses, golds)\r\n\r\n\r\ndef model_score(scores):\r\n eval_score = sum(score[2] for score in scores)\r\n return eval_score\r\n\r\n\r\ndef main(args):\r\n # set seed for reproducibility\r\n random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n np.random.seed(args.seed)\r\n\r\n # load dataset\r\n cdtb = CDTB(args.data, \"TRAIN\", \"VALIDATE\", \"TEST\", ctb_dir=args.ctb_dir, preprocess=True, cache_dir=args.cache_dir)\r\n # build vocabulary\r\n word_vocab, pos_vocab, nuc_label, rel_label = build_vocab(cdtb.train)\r\n\r\n trainset = numericalize(cdtb.train, word_vocab, pos_vocab, nuc_label, rel_label)\r\n logging.info(\"num of instances trainset: %d\" % len(trainset))\r\n logging.info(\"args: %s\" % str(args))\r\n # build model\r\n model = PartitionPtr(hidden_size=args.hidden_size, dropout=args.dropout,\r\n word_vocab=word_vocab, pos_vocab=pos_vocab, nuc_label=nuc_label, rel_label=rel_label,\r\n pretrained=args.pretrained, w2v_size=args.w2v_size, w2v_freeze=args.w2v_freeze,\r\n pos_size=args.pos_size,\r\n split_mlp_size=args.split_mlp_size, nuc_mlp_size=args.nuc_mlp_size,\r\n rel_mlp_size=args.rel_mlp_size,\r\n use_gpu=args.use_gpu)\r\n if args.use_gpu:\r\n model.cuda()\r\n logging.info(\"model:\\n%s\" % str(model))\r\n\r\n # train and evaluate\r\n niter = 0\r\n log_splits_loss = 0.\r\n log_nucs_loss = 0.\r\n log_rels_loss = 0.\r\n log_loss = 0.\r\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)\r\n writer = SummaryWriter(args.log_dir)\r\n logging.info(\"hint: run 'tensorboard --logdir %s' to observe training status\" % args.log_dir)\r\n best_model = None\r\n best_model_score = 0.\r\n for nepoch in range(1, args.epoch + 1):\r\n batch_iter = gen_batch_iter(trainset, args.batch_size, args.use_gpu)\r\n for nbatch, (e_inputs, d_inputs, grounds) in enumerate(batch_iter, start=1):\r\n niter += 1\r\n model.train()\r\n optimizer.zero_grad()\r\n splits_loss, nucs_loss, rels_loss = model.loss(e_inputs, d_inputs, grounds)\r\n loss = args.a_split_loss * splits_loss + args.a_nuclear_loss * nucs_loss + args.a_relation_loss * rels_loss\r\n loss.backward()\r\n optimizer.step()\r\n log_splits_loss += splits_loss.item()\r\n log_nucs_loss += nucs_loss.item()\r\n log_rels_loss += rels_loss.item()\r\n log_loss += loss.item()\r\n if niter % args.log_every == 0:\r\n logging.info(\"[iter %-6d]epoch: %-3d, batch %-5d,\"\r\n \"train splits loss:%.5f, nuclear loss %.5f, relation loss %.5f, loss %.5f\" %\r\n (niter, nepoch, nbatch, log_splits_loss, log_nucs_loss, log_rels_loss, log_loss))\r\n writer.add_scalar(\"train/split_loss\", log_splits_loss, niter)\r\n writer.add_scalar(\"train/nuclear_loss\", log_nucs_loss, niter)\r\n writer.add_scalar(\"train/relation_loss\", log_rels_loss, niter)\r\n writer.add_scalar(\"train/loss\", log_loss, niter)\r\n log_splits_loss = 0.\r\n log_nucs_loss = 0.\r\n log_rels_loss = 0.\r\n log_loss = 0.\r\n if niter % args.validate_every == 0:\r\n num_instances, validate_scores = parse_and_eval(cdtb.validate, model)\r\n logging.info(\"validation on %d instances\" % num_instances)\r\n logging.info(gen_parse_report(*validate_scores))\r\n writer.add_scalar(\"validate/span_f1\", validate_scores[0][2], niter)\r\n writer.add_scalar(\"validate/nuclear_f1\", validate_scores[1][2], niter)\r\n writer.add_scalar(\"validate/coarse_relation_f1\", validate_scores[2][2], niter)\r\n writer.add_scalar(\"validate/fine_relation_f1\", validate_scores[3][2], niter)\r\n new_model_score = model_score(validate_scores)\r\n if new_model_score > best_model_score:\r\n # test on testset with new best model\r\n best_model_score = new_model_score\r\n best_model = copy.deepcopy(model)\r\n logging.info(\"test on new best model\")\r\n num_instances, test_scores = parse_and_eval(cdtb.test, best_model)\r\n logging.info(\"test on %d instances\" % num_instances)\r\n logging.info(gen_parse_report(*test_scores))\r\n writer.add_scalar(\"test/span_f1\", test_scores[0][2], niter)\r\n writer.add_scalar(\"test/nuclear_f1\", test_scores[1][2], niter)\r\n writer.add_scalar(\"test/coarse_relation_f1\", test_scores[2][2], niter)\r\n writer.add_scalar(\"test/fine_relation_f1\", test_scores[3][2], niter)\r\n if best_model:\r\n # evaluation and save best model\r\n logging.info(\"final test result\")\r\n num_instances, test_scores = parse_and_eval(cdtb.test, best_model)\r\n logging.info(\"test on %d instances\" % num_instances)\r\n logging.info(gen_parse_report(*test_scores))\r\n logging.info(\"save best model to %s\" % args.model_save)\r\n with open(args.model_save, \"wb+\") as model_fd:\r\n torch.save(best_model, model_fd)\r\n writer.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig(level=logging.INFO)\r\n arg_parser = argparse.ArgumentParser()\r\n\r\n # dataset parameters\r\n arg_parser.add_argument(\"--data\", default=\"data/CDTB\")\r\n arg_parser.add_argument(\"--ctb_dir\", default=\"data/CTB\")\r\n arg_parser.add_argument(\"--cache_dir\", default=\"data/cache\")\r\n\r\n # model parameters\r\n arg_parser.add_argument(\"-hidden_size\", default=512, type=int)\r\n arg_parser.add_argument(\"-dropout\", default=0.33, type=float)\r\n # w2v_group = arg_parser.add_mutually_exclusive_group(required=True)\r\n arg_parser.add_argument(\"-pretrained\", default=\"data/pretrained/sgns.renmin.word\")\r\n arg_parser.add_argument(\"-w2v_size\", type=int)\r\n arg_parser.add_argument(\"-pos_size\", default=30, type=int)\r\n arg_parser.add_argument(\"-split_mlp_size\", default=64, type=int)\r\n arg_parser.add_argument(\"-nuc_mlp_size\", default=32, type=int)\r\n arg_parser.add_argument(\"-rel_mlp_size\", default=128, type=int)\r\n arg_parser.add_argument(\"--w2v_freeze\", dest=\"w2v_freeze\", action=\"store_true\")\r\n arg_parser.set_defaults(w2v_freeze=True)\r\n\r\n # train parameters\r\n arg_parser.add_argument(\"-epoch\", default=20, type=int)\r\n arg_parser.add_argument(\"-batch_size\", default=64, type=int)\r\n arg_parser.add_argument(\"-lr\", default=0.001, type=float)\r\n arg_parser.add_argument(\"-l2\", default=0.0, type=float)\r\n arg_parser.add_argument(\"-log_every\", default=10, type=int)\r\n arg_parser.add_argument(\"-validate_every\", default=10, type=int)\r\n arg_parser.add_argument(\"-a_split_loss\", default=0.3, type=float)\r\n arg_parser.add_argument(\"-a_nuclear_loss\", default=1.0, type=float)\r\n arg_parser.add_argument(\"-a_relation_loss\", default=1.0, type=float)\r\n arg_parser.add_argument(\"-log_dir\", default=\"data/log\")\r\n arg_parser.add_argument(\"-model_save\", default=\"data/models/treebuilder.partptr.model\")\r\n arg_parser.add_argument(\"--seed\", default=21, type=int)\r\n arg_parser.add_argument(\"--use_gpu\", dest=\"use_gpu\", action=\"store_true\")\r\n arg_parser.set_defaults(use_gpu=True)\r\n\r\n main(arg_parser.parse_args())\r\n",
"# coding: UTF-8\r\nimport random\r\nfrom collections import Counter\r\nfrom structure.vocab import Vocab, Label\r\nfrom structure.nodes import node_type_filter, EDU, Sentence, Paragraph\r\nfrom itertools import chain\r\nimport numpy as np\r\nfrom dataset import CDTB\r\nimport logging\r\nimport argparse\r\nimport torch\r\nimport torch.optim as optim\r\nfrom segmenter.rnn.model import RNNSegmenterModel\r\nfrom segmenter.rnn import RNNSegmenter\r\nfrom util.eval import edu_eval, gen_edu_report\r\n\r\nlogger = logging.getLogger(\"train rnn segmenter\")\r\n\r\n\r\ndef build_vocab(dataset):\r\n word_freq = Counter()\r\n pos_freq = Counter()\r\n for paragraph in chain(*dataset):\r\n for edu in paragraph.edus():\r\n word_freq.update(edu.words)\r\n pos_freq.update(edu.tags)\r\n word_vocab = Vocab(\"word\", word_freq)\r\n pos_vocab = Vocab(\"part of speech\", pos_freq)\r\n return word_vocab, pos_vocab\r\n\r\n\r\ndef gen_train_instances(dataset):\r\n instances = []\r\n tags = []\r\n for paragraph in chain(*dataset):\r\n for sentence in paragraph.sentences():\r\n edus = list(sentence.iterfind(node_type_filter(EDU)))\r\n if edus:\r\n sent_words = []\r\n sent_poses = []\r\n sent_tags = []\r\n for i, edu in enumerate(edus):\r\n words = edu.words\r\n poses = edu.tags\r\n label = ['O'] * (len(words) - 1)\r\n label += ['B'] if i < len(edus) - 1 else ['O']\r\n sent_words.extend(words)\r\n sent_poses.extend(poses)\r\n sent_tags.extend(label)\r\n instances.append((sent_words, sent_poses))\r\n tags.append(sent_tags)\r\n return instances, tags\r\n\r\n\r\ndef numericalize(instances, tags, word_vocab, pos_vocab, tag_label):\r\n trainset = []\r\n for (words, poses), tags in zip(instances, tags):\r\n word_ids = [word_vocab[word] for word in words]\r\n pos_ids = [pos_vocab[pos] for pos in poses]\r\n tag_ids = [tag_label[tag] for tag in tags]\r\n trainset.append((word_ids, pos_ids, tag_ids))\r\n return trainset\r\n\r\n\r\ndef gen_batch_iter(trainset, batch_size, use_gpu=False):\r\n random_instances = np.random.permutation(trainset)\r\n num_instances = len(trainset)\r\n offset = 0\r\n while offset < num_instances:\r\n batch = random_instances[offset: min(num_instances, offset + batch_size)]\r\n num_batch = batch.shape[0]\r\n lengths = np.zeros(num_batch, dtype=np.int)\r\n for i, (word_ids, pos_ids, tag_ids) in enumerate(batch):\r\n lengths[i] = len(word_ids)\r\n sort_indices = np.argsort(-lengths)\r\n lengths = lengths[sort_indices]\r\n batch = batch[sort_indices]\r\n max_seqlen = lengths.max()\r\n word_inputs = np.zeros([num_batch, max_seqlen], dtype=np.long)\r\n pos_inputs = np.zeros([num_batch, max_seqlen], dtype=np.long)\r\n tag_outputs = np.zeros([num_batch, max_seqlen], dtype=np.long)\r\n masks = np.zeros([num_batch, max_seqlen], dtype=np.uint8)\r\n for i, (word_ids, pos_ids, tag_ids) in enumerate(batch):\r\n seqlen = len(word_ids)\r\n word_inputs[i][:seqlen] = word_ids\r\n pos_inputs[i][:seqlen] = pos_ids\r\n tag_outputs[i][:seqlen] = tag_ids\r\n masks[i][:seqlen] = 1\r\n offset = offset + batch_size\r\n\r\n word_inputs = torch.from_numpy(word_inputs).long()\r\n pos_inputs = torch.from_numpy(pos_inputs).long()\r\n tag_outputs = torch.from_numpy(tag_outputs).long()\r\n masks = torch.from_numpy(masks).byte()\r\n\r\n if use_gpu:\r\n word_inputs = word_inputs.cuda()\r\n pos_inputs = pos_inputs.cuda()\r\n tag_outputs = tag_outputs.cuda()\r\n masks = masks.cuda()\r\n yield (word_inputs, pos_inputs, masks), tag_outputs\r\n\r\n\r\ndef evaluate(dataset, model):\r\n model.eval()\r\n segmenter = RNNSegmenter(model)\r\n golds = []\r\n segs = []\r\n for paragraph in chain(*dataset):\r\n seged_sents = []\r\n for sentence in paragraph.sentences():\r\n # make sure sentence has edus\r\n if list(sentence.iterfind(node_type_filter(EDU))):\r\n seged_sents.append(Sentence(segmenter.cut_edu(sentence)))\r\n if seged_sents:\r\n segs.append(Paragraph(seged_sents))\r\n golds.append(paragraph)\r\n return edu_eval(segs, golds)\r\n\r\n\r\ndef get_lr(optimizer):\r\n for param_group in optimizer.param_groups:\r\n return param_group['lr']\r\n\r\n\r\ndef main(args):\r\n random.seed(args.seed)\r\n torch.random.manual_seed(args.seed)\r\n np.random.seed(args.seed)\r\n\r\n logger.info(\"args:\" + str(args))\r\n # load dataset\r\n cdtb = CDTB(args.data, \"TRAIN\", \"VALIDATE\", \"TEST\", ctb_dir=args.ctb_dir, preprocess=True, cache_dir=args.cache_dir)\r\n word_vocab, pos_vocab = build_vocab(cdtb.train)\r\n instances, tags = gen_train_instances(cdtb.train)\r\n tag_label = Label(\"tag\", Counter(chain(*tags)))\r\n trainset = numericalize(instances, tags, word_vocab, pos_vocab, tag_label)\r\n\r\n # build model\r\n model = RNNSegmenterModel(hidden_size=args.hidden_size, dropout=args.dropout, rnn_layers=args.rnn_layers,\r\n word_vocab=word_vocab, pos_vocab=pos_vocab, tag_label=tag_label,\r\n pos_size=args.pos_size, pretrained=args.pretrained, w2v_freeze=args.w2v_freeze,\r\n use_gpu=args.use_gpu)\r\n if args.use_gpu:\r\n model.cuda()\r\n logger.info(model)\r\n\r\n # train\r\n step = 0\r\n best_model_f1 = 0\r\n wait_count = 0\r\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)\r\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=3)\r\n for nepoch in range(1, args.epoch+1):\r\n batch_iter = gen_batch_iter(trainset, args.batch_size, use_gpu=args.use_gpu)\r\n for nbatch, (inputs, target) in enumerate(batch_iter, start=1):\r\n step += 1\r\n model.train()\r\n optimizer.zero_grad()\r\n loss = model.loss(inputs, target)\r\n loss.backward()\r\n optimizer.step()\r\n if nbatch > 0 and nbatch % args.log_every == 0:\r\n logger.info(\"step %d, patient %d, lr %f, epoch %d, batch %d, train loss %.4f\" %\r\n (step, wait_count, get_lr(optimizer), nepoch, nbatch, loss.item()))\r\n # model selection\r\n score = evaluate(cdtb.validate, model)\r\n f1 = score[-1]\r\n scheduler.step(f1, nepoch)\r\n logger.info(\"evaluation score:\")\r\n logger.info(\"\\n\" + gen_edu_report(score))\r\n if f1 > best_model_f1:\r\n wait_count = 0\r\n best_model_f1 = f1\r\n logger.info(\"save new best model to %s\" % args.model_save)\r\n with open(args.model_save, \"wb+\") as model_fd:\r\n torch.save(model, model_fd)\r\n logger.info(\"test on new best model...\")\r\n test_score = evaluate(cdtb.test, model)\r\n logger.info(\"test score:\")\r\n logger.info(\"\\n\" + gen_edu_report(test_score))\r\n else:\r\n wait_count += 1\r\n if wait_count > args.patient:\r\n logger.info(\"early stopping...\")\r\n break\r\n\r\n with open(args.model_save, \"rb\") as model_fd:\r\n best_model = torch.load(model_fd)\r\n test_score = evaluate(cdtb.test, best_model)\r\n logger.info(\"test score on final best model:\")\r\n logger.info(\"\\n\" + gen_edu_report(test_score))\r\n\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig(level=logging.INFO)\r\n arg_parser = argparse.ArgumentParser()\r\n # dataset parameters\r\n arg_parser.add_argument(\"data\")\r\n arg_parser.add_argument(\"--ctb_dir\")\r\n arg_parser.add_argument(\"--cache_dir\")\r\n arg_parser.add_argument(\"--seed\", default=21, type=int)\r\n arg_parser.add_argument(\"-model_save\", required=True)\r\n\r\n # model parameter\r\n arg_parser.add_argument(\"-hidden_size\", default=256, type=int)\r\n arg_parser.add_argument(\"-rnn_layers\", default=3, type=int)\r\n arg_parser.add_argument(\"-dropout\", default=0.33, type=float)\r\n w2v_group = arg_parser.add_mutually_exclusive_group(required=True)\r\n w2v_group.add_argument(\"-pretrained\")\r\n w2v_group.add_argument(\"-w2v_size\", type=int)\r\n arg_parser.add_argument(\"-pos_size\", default=30, type=int)\r\n arg_parser.add_argument(\"--w2v_freeze\", dest=\"w2v_freeze\", action=\"store_true\")\r\n arg_parser.set_defaults(w2v_freeze=False)\r\n\r\n # train parameter\r\n arg_parser.add_argument(\"-epoch\", default=20, type=int)\r\n arg_parser.add_argument(\"-lr\", default=0.001, type=float)\r\n arg_parser.add_argument(\"-l2\", default=1e-6, type=float)\r\n arg_parser.add_argument(\"-patient\", default=4, type=int)\r\n arg_parser.add_argument(\"-log_every\", default=5, type=int)\r\n arg_parser.add_argument(\"-batch_size\", default=64, type=int)\r\n arg_parser.add_argument(\"--use_gpu\", dest=\"use_gpu\", action=\"store_true\")\r\n arg_parser.set_defaults(use_gpu=False)\r\n main(arg_parser.parse_args())\r\n"
] | [
[
"numpy.random.seed",
"torch.manual_seed",
"torch.from_numpy",
"numpy.random.permutation",
"numpy.zeros",
"torch.save"
],
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.random.seed",
"torch.load",
"torch.random.manual_seed",
"torch.from_numpy",
"numpy.random.permutation",
"numpy.argsort",
"numpy.zeros",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LaudateCorpus1/coremltools | [
"777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc",
"777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc",
"777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc",
"5ece9069a1487d5083f00f56afe07832d88e3dfa",
"777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc",
"777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc"
] | [
"coremltools/converters/mil/mil/passes/conv_scale_fusion.py",
"coremltools/test/sklearn_tests/test_SVR.py",
"coremltools/converters/mil/experimental/passes/generic_linear_bias_fusion.py",
"coremltools/test/sklearn_tests/test_imputer.py",
"coremltools/converters/mil/backend/mil/load.py",
"coremltools/test/sklearn_tests/test_glm_classifier.py"
] | [
"# Copyright (c) 2021, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nimport numpy as np\n\nfrom coremltools.converters.mil.mil.passes.pass_registry import register_pass\nfrom coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass\nfrom coremltools.converters.mil.mil import Builder as mb\n\n\ndef _try_to_transform(conv_op, scale_op, block):\n\n # get the scale\n if scale_op.x.val is None and scale_op.y.val is None:\n return False\n scale_var = scale_op.x if scale_op.x.val is not None else scale_op.y\n scale = scale_var.val\n\n # for the scalar case, the scalar can be either\n # 1. a python int/float\n # 2. a 0d numpy array\n # 3. a 1d numpy array with shape (1,)\n\n is_scalar = True\n if isinstance(scale, np.ndarray):\n if scale.shape == ():\n scale = scale.tolist()\n elif scale.shape == (1) or scale.shape == (1,):\n scale = scale[0]\n else:\n is_scalar = False\n\n # get weight and bias and groups from conv layer\n if conv_op.weight.val is None:\n return False\n conv_weight = conv_op.weight.val\n conv_bias = conv_op.bias\n groups = conv_op.groups.val\n\n # get type of the conv layer\n is_deconv = conv_op.op_type == 'conv_transpose'\n is_conv_1d = len(conv_weight.shape) == 3\n\n # D_in denotes the spatial dimensions for conv kernel weight\n # for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in]\n # for conv, conv_weight has shape [Cout, Cin / groups, *D_in]\n if is_deconv:\n Cout = conv_weight.shape[1] * groups\n Cin = conv_weight.shape[0]\n else:\n Cout = conv_weight.shape[0]\n Cin = conv_weight.shape[1] * groups\n\n # for the vector scale case, check if the shape is broacastable\n if not is_scalar:\n if not np.product(scale.shape) == Cout:\n return False\n if len(scale.shape) == len(conv_weight.shape):\n if not scale.shape[1] == Cout:\n return False\n elif len(scale.shape) == len(conv_weight.shape) - 1:\n if not scale.shape[0] == Cout:\n return False\n else:\n return False\n\n # transform the scale to 1./scale for the real_div case\n if scale_op.op_type == \"real_div\":\n scale = 1./scale\n\n # get the type of the conv weight\n conv_weight_type = conv_weight.dtype\n\n # create bias for conv if not exist\n if conv_bias is None:\n conv_bias = np.zeros(Cout)\n else:\n conv_bias = conv_bias.val\n conv_bias = conv_bias.astype(conv_weight_type)\n\n # get the original shape of weight and bias\n origin_weight_shape = conv_weight.shape\n origin_bias_shape = conv_bias.shape\n\n # update the weight/bias for conv layer\n if is_scalar:\n new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)\n new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type)\n\n else:\n scale = np.reshape(scale, (Cout))\n new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)\n new_conv_weight = []\n if is_deconv:\n conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])\n conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]))\n\n for i in range(Cout):\n _conv_weight = conv_weight[i] * scale[i]\n new_conv_weight.append(_conv_weight)\n new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type)\n\n if is_deconv:\n new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]))\n new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])\n\n # make sure the updated weight and bias have the same shape as the original ones\n assert new_conv_weight.shape == origin_weight_shape, \"conv weight should have the same shape before and after the fuse_conv_scale pass.\"\n assert new_conv_bias.shape == origin_bias_shape, \"conv bias should have the same shape before and after the fuse_conv_scale pass.\"\n\n # create a new conv op with the new weight, bias value, copying rest of the attributes\n out_name = scale_op.outputs[0].name\n conv_kargs = {\"weight\": new_conv_weight, \"bias\": new_conv_bias, \"name\": out_name, \"before_op\": conv_op}\n\n for k, v in conv_op.inputs.items():\n if k in [\"weight\", \"bias\"]:\n continue\n conv_kargs[k] = v\n\n if is_deconv:\n x = mb.conv_transpose(**conv_kargs)\n else:\n x = mb.conv(**conv_kargs)\n\n scale_op.enclosing_block.replace_uses_of_var_after_op(\n anchor_op=scale_op, old_var=scale_op.outputs[0], new_var=x\n )\n # Remove all the ops at once\n block.remove_ops([conv_op, scale_op])\n return True\n\n@register_pass(namespace=\"common\")\nclass fuse_conv_scale(AbstractGraphPass):\n \"\"\"\n Fold mul/div into conv/conv_transpose by updating the weight/bias of the convolution layers.\n\n The scale const can be a single number (scalar) or a vector with a broacasable shape,\n for instance, if the output of the conv/deconv layer is (B, Cout, H, W),\n const of shape (Cout, 1, 1) and (1, Cout, 1, 1) are allowed.\n\n Given:\n %2 = conv(%1)\n ...\n %3 = mul(%2, constant) # where constant is the scale constant\n ...\n\n Result:\n %3 = conv(%1)\n ...\n\n \"\"\"\n def __init__(self):\n self.ops_to_skip = set()\n\n def set_ops_to_skip(self, prog):\n pass\n\n def _fuse_conv_scale_block(self, block):\n\n def _match_pattern(op):\n if op.op_type == \"conv\" or op.op_type == \"conv_transpose\":\n # abort fusion if op output is also a block output\n if op.outputs[0] in op.enclosing_block.outputs:\n return None\n # find batch_norm op\n child_ops = op.outputs[0].child_ops\n if len(child_ops) == 1:\n scale_op_candidate = list(child_ops)[0]\n if scale_op_candidate.op_type in [\"mul\", \"real_div\"]:\n return scale_op_candidate\n return None\n\n fusion_occurred = False\n for op in list(block.operations):\n for b in op.blocks:\n block_changed = True\n while block_changed:\n block_changed = self._fuse_conv_scale_block(b)\n if len(op.blocks) > 0:\n # This op can't be conv or conv_transpose\n continue\n\n scale_op = _match_pattern(op)\n\n if op in self.ops_to_skip or scale_op in self.ops_to_skip:\n continue\n\n if scale_op is not None:\n with block:\n fusion_occurred = _try_to_transform(op, scale_op, block)\n # has to break as the downstream iterator is affected.\n if fusion_occurred:\n return fusion_occurred\n return fusion_occurred\n\n def apply(self, prog):\n self.set_ops_to_skip(prog)\n for f in prog.functions.values():\n block_changed = True\n while block_changed:\n block_changed = self._fuse_conv_scale_block(f)\n",
"# Copyright (c) 2017, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nimport pandas as pd\nimport numpy as np\nimport random\nimport tempfile\nimport unittest\nimport pytest\n\nfrom coremltools._deps import (\n _HAS_LIBSVM,\n MSG_LIBSVM_NOT_FOUND,\n _HAS_SKLEARN,\n MSG_SKLEARN_NOT_FOUND,\n)\nfrom coremltools.models.utils import evaluate_regressor, _macos_version, _is_macos\n\nif _HAS_LIBSVM:\n import svmutil\n import svm\n from coremltools.converters import libsvm\n\nif _HAS_SKLEARN:\n from sklearn.svm import SVR\n from sklearn.datasets import load_boston\n from coremltools.converters import sklearn as sklearn_converter\n from sklearn.preprocessing import OneHotEncoder\n\n\[email protected](not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND)\nclass SvrScikitTest(unittest.TestCase):\n \"\"\"\n Unit test class for testing scikit-learn sklearn_converter.\n \"\"\"\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n Set up the unit test by loading the dataset and training a model.\n \"\"\"\n if not _HAS_SKLEARN:\n return\n\n scikit_data = load_boston()\n scikit_model = SVR(kernel=\"linear\")\n scikit_model.fit(scikit_data[\"data\"], scikit_data[\"target\"])\n\n # Save the data and the model\n self.scikit_data = scikit_data\n self.scikit_model = scikit_model\n\n def test_conversion_bad_inputs(self):\n # Error on converting an untrained model\n with self.assertRaises(TypeError):\n model = SVR()\n spec = sklearn_converter.convert(model, \"data\", \"out\")\n\n # Check the expected class during covnersion.\n with self.assertRaises(TypeError):\n model = OneHotEncoder()\n spec = sklearn_converter.convert(model, \"data\", \"out\")\n\n @pytest.mark.slow\n def test_evaluation_stress_test(self):\n self._test_evaluation(allow_slow=True)\n\n def test_evaluation(self):\n self._test_evaluation(allow_slow=False)\n\n def _test_evaluation(self, allow_slow):\n \"\"\"\n Test that the same predictions are made\n \"\"\"\n\n # Generate some smallish (some kernels take too long on anything else) random data\n x, y = [], []\n for _ in range(50):\n cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2)\n x.append([cur_x1, cur_x2])\n y.append(1 + 2 * cur_x1 + 3 * cur_x2)\n\n input_names = [\"x1\", \"x2\"]\n df = pd.DataFrame(x, columns=input_names)\n\n # Parameters to test\n kernel_parameters = [\n {},\n {\"kernel\": \"rbf\", \"gamma\": 1.2},\n {\"kernel\": \"linear\"},\n {\"kernel\": \"poly\"},\n {\"kernel\": \"poly\", \"degree\": 2},\n {\"kernel\": \"poly\", \"gamma\": 0.75},\n {\"kernel\": \"poly\", \"degree\": 0, \"gamma\": 0.9, \"coef0\": 2},\n {\"kernel\": \"sigmoid\"},\n {\"kernel\": \"sigmoid\", \"gamma\": 1.3},\n {\"kernel\": \"sigmoid\", \"coef0\": 0.8},\n {\"kernel\": \"sigmoid\", \"coef0\": 0.8, \"gamma\": 0.5},\n ]\n non_kernel_parameters = [\n {},\n {\"C\": 1},\n {\"C\": 1.5, \"epsilon\": 0.5, \"shrinking\": True},\n {\"C\": 0.5, \"epsilon\": 1.5, \"shrinking\": False},\n ]\n\n # Test\n for param1 in non_kernel_parameters:\n for param2 in kernel_parameters:\n cur_params = param1.copy()\n cur_params.update(param2)\n print(\"cur_params=\" + str(cur_params))\n\n cur_model = SVR(**cur_params)\n cur_model.fit(x, y)\n df[\"prediction\"] = cur_model.predict(x)\n\n spec = sklearn_converter.convert(cur_model, input_names, \"target\")\n\n if _is_macos() and _macos_version() >= (10, 13):\n metrics = evaluate_regressor(spec, df)\n self.assertAlmostEqual(metrics[\"max_error\"], 0)\n\n if not allow_slow:\n break\n\n if not allow_slow:\n break\n\n\[email protected](not _HAS_LIBSVM, MSG_LIBSVM_NOT_FOUND)\[email protected](not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND)\nclass EpsilonSVRLibSVMTest(unittest.TestCase):\n \"\"\"\n Unit test class for testing the libsvm sklearn converter.\n \"\"\"\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n Set up the unit test by loading the dataset and training a model.\n \"\"\"\n if not _HAS_SKLEARN:\n return\n if not _HAS_LIBSVM:\n return\n\n scikit_data = load_boston()\n prob = svmutil.svm_problem(scikit_data[\"target\"], scikit_data[\"data\"].tolist())\n param = svmutil.svm_parameter()\n param.svm_type = svmutil.EPSILON_SVR\n param.kernel_type = svmutil.LINEAR\n param.eps = 1\n\n self.libsvm_model = svmutil.svm_train(prob, param)\n\n def test_input_names(self):\n data = load_boston()\n df = pd.DataFrame({\"input\": data[\"data\"].tolist()})\n df[\"input\"] = df[\"input\"].apply(np.array)\n\n # Default values\n spec = libsvm.convert(self.libsvm_model)\n if _is_macos() and _macos_version() >= (10, 13):\n (df[\"prediction\"], _, _) = svmutil.svm_predict(\n data[\"target\"], data[\"data\"].tolist(), self.libsvm_model\n )\n metrics = evaluate_regressor(spec, df)\n self.assertAlmostEqual(metrics[\"max_error\"], 0)\n\n # One extra parameters. This is legal/possible.\n num_inputs = len(data[\"data\"][0])\n spec = libsvm.convert(self.libsvm_model, input_length=num_inputs + 1)\n\n # Not enought input names.\n input_names = [\"this\", \"is\", \"not\", \"enought\", \"names\"]\n with self.assertRaises(ValueError):\n libsvm.convert(self.libsvm_model, input_names=input_names)\n with self.assertRaises(ValueError):\n libsvm.convert(self.libsvm_model, input_length=num_inputs - 1)\n\n def test_conversion_from_filesystem(self):\n libsvm_model_path = tempfile.mktemp(suffix=\"model.libsvm\")\n svmutil.svm_save_model(libsvm_model_path, self.libsvm_model)\n spec = libsvm.convert(\n libsvm_model_path, input_names=\"data\", target_name=\"target\"\n )\n\n def test_conversion_bad_inputs(self):\n # Check the expected class during covnersion.\n with self.assertRaises(TypeError):\n model = OneHotEncoder()\n spec = libsvm.convert(model, \"data\", \"out\")\n\n @pytest.mark.slow\n def test_evaluation_stress_test(self):\n self._test_evaluation(allow_slow=True)\n\n def test_evaluation(self):\n self._test_evaluation(allow_slow=False)\n\n def _test_evaluation(self, allow_slow):\n \"\"\"\n Test that the same predictions are made\n \"\"\"\n from svm import svm_parameter, svm_problem\n from svmutil import svm_train, svm_predict\n\n # Generate some smallish (poly kernels take too long on anything else) random data\n x, y = [], []\n for _ in range(50):\n cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2)\n x.append([cur_x1, cur_x2])\n y.append(1 + 2 * cur_x1 + 3 * cur_x2)\n\n input_names = [\"x1\", \"x2\"]\n df = pd.DataFrame(x, columns=input_names)\n prob = svm_problem(y, x)\n\n # Parameters\n base_param = \"-s 3\" # model type is epsilon SVR\n non_kernel_parameters = [\"\", \"-c 1.5 -p 0.5 -h 1\", \"-c 0.5 -p 0.5 -h 0\"]\n kernel_parameters = [\n \"\",\n \"-t 2 -g 1.2\", # rbf kernel\n \"-t 0\", # linear kernel\n \"-t 1\",\n \"-t 1 -d 2\",\n \"-t 1 -g 0.75\",\n \"-t 1 -d 0 -g 0.9 -r 2\", # poly kernel\n \"-t 3\",\n \"-t 3 -g 1.3\",\n \"-t 3 -r 0.8\",\n \"-t 3 -r 0.8 -g 0.5\", # sigmoid kernel\n ]\n\n for param1 in non_kernel_parameters:\n for param2 in kernel_parameters:\n param_str = \" \".join([base_param, param1, param2])\n print(param_str)\n param = svm_parameter(param_str)\n\n model = svm_train(prob, param)\n (df[\"prediction\"], _, _) = svm_predict(y, x, model)\n\n spec = libsvm.convert(\n model, input_names=input_names, target_name=\"target\"\n )\n\n if _is_macos() and _macos_version() >= (10, 13):\n metrics = evaluate_regressor(spec, df)\n self.assertAlmostEqual(metrics[\"max_error\"], 0)\n\n if not allow_slow:\n break\n\n if not allow_slow:\n break\n",
"# Copyright (c) 2021, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nimport os\nimport numpy as np\n\nfrom coremltools.converters.mil import Builder as mb\nfrom coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import register_generic_pass\nfrom coremltools.converters.mil.mil import get_new_symbol\n\nif os.getenv(\"ENABLE_EXPERIMENTAL_PASSES\") == \"1\":\n arbitrary_shape = (get_new_symbol(), get_new_symbol())\n np.random.seed()\n arbitrary_weight = np.random.rand(4, 3)\n arbitrary_bias = np.random.rand(4)\n\nif os.getenv(\"ENABLE_EXPERIMENTAL_PASSES\") == \"1\":\n @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_shape)])\n def pattern_add(x):\n \"\"\"\n Original:\n % 4 = linear(x= % 1, weight = % 2, bias = % 3) # %2 is a rank-2 const tensor (weight)\n # %3 is a rank-1 const tensor (bias)\n ...\n % 6 = add(x= % 4, y = % 5) # %5 is a const tensor with same shape as %3\n\n Result:\n % 8 = linear(x= % 1, weight = % 2, bias = % 7) # where %7 is a new const tensor with value\n # %7 = %3 + %6\n \"\"\"\n linear = mb.linear(x=x, weight=arbitrary_weight, bias=arbitrary_bias, name=\"linear\")\n add_or_sub = mb.add(x=linear, y=arbitrary_bias, name=\"add_or_sub\")\n return add_or_sub\n\nif os.getenv(\"ENABLE_EXPERIMENTAL_PASSES\") == \"1\":\n @mb.program(input_specs=[mb.TensorSpec(shape=arbitrary_shape)])\n def pattern_sub(x):\n \"\"\"\n Original:\n %4 = linear(x=%1, weight=%2, bias=%3) # %2 is a rank-2 const tensor (weight)\n # %3 is a rank-1 const tensor (bias)\n ...\n %6 = sub(x=%5, y=%4) # %5 is a const tensor with a broacasable shape with %3.\n i.e. if %3 has shape (Dout), %5 could be (1, Dout).\n\n Result:\n %9 = linear(x=%1, weight=%7, bias=%8) # where %7 is a new const tensor with value %7 = -%2\n # %8 = %5 - %3\n \"\"\"\n linear = mb.linear(x=x, weight=arbitrary_weight, bias=arbitrary_bias, name=\"linear\")\n add_or_sub = mb.sub(x=linear, y=arbitrary_bias, name=\"add_or_sub\")\n return add_or_sub\n\n\ndef var_constraints(pattern):\n passed = True\n passed = passed and pattern.add_or_sub.x.val is not None or pattern.add_or_sub.y.val is not None\n\n is_sub, is_first_input = _get_is_sub_and_is_first_input(pattern)\n linear_bias, bias, Dout = _get_linear_bias_bias_Dout(pattern, is_first_input)\n\n # check if the shape is broadcasable\n passed = passed and np.prod(linear_bias.shape) == np.prod(bias.shape)\n passed = passed and bias.shape[-1] == Dout\n return passed\n\n\ndef _get_is_sub_and_is_first_input(pattern):\n is_sub = pattern.add_or_sub.op_type == \"sub\"\n is_first_input = pattern.add_or_sub.x == pattern.linear.outputs[0]\n return is_sub, is_first_input\n\n\ndef _get_linear_bias_bias_Dout(pattern, is_first_input):\n linear_bias = pattern.linear.bias.val\n bias = pattern.add_or_sub.y.val if is_first_input else pattern.add_or_sub.x.val\n Dout = linear_bias.shape[0]\n return linear_bias, bias, Dout\n\n\ndef transform_pattern(pattern):\n is_sub, is_first_input = _get_is_sub_and_is_first_input(pattern)\n linear_bias, bias, Dout = _get_linear_bias_bias_Dout(pattern, is_first_input)\n bias = np.reshape(bias, (Dout,))\n\n if is_sub and is_first_input:\n bias = -bias\n if is_sub and not is_first_input:\n linear_bias = -linear_bias\n\n new_bias = linear_bias + bias\n\n # compute the new weight\n if is_sub and not is_first_input:\n new_weight = -pattern.linear.weight.val\n else:\n new_weight = pattern.linear.weight.val\n\n # create a new linear op with the new weight, bias value, copying rest of the attributes\n out_name = pattern.add_or_sub.outputs[0].name\n linear_kargs = {\"weight\": new_weight, \"bias\": new_bias, \"name\": out_name, \"before_op\": pattern.linear}\n\n linear_kargs.update({k: v for k, v in pattern.linear.inputs.items() if k not in [\"weight\", \"bias\"]})\n\n x = mb.linear(**linear_kargs)\n\n pattern.add_or_sub.enclosing_block.replace_uses_of_var_after_op(\n anchor_op=pattern.add_or_sub, old_var=pattern.add_or_sub.outputs[0], new_var=x\n )\n # Remove all the ops at once\n pattern.block.remove_ops(pattern.op_list())\n\n\nif os.getenv('ENABLE_EXPERIMENTAL_PASSES') == '1':\n register_generic_pass(\n ops_arrangement=pattern_add,\n var_constraints=var_constraints,\n transform_pattern=transform_pattern,\n pass_name=\"fuse_linear_bias\",\n namespace=\"common\",\n )\n\n register_generic_pass(\n ops_arrangement=pattern_sub,\n var_constraints=var_constraints,\n transform_pattern=transform_pattern,\n pass_name=\"fuse_linear_bias\",\n namespace=\"common\",\n )\n",
"# Copyright (c) 2017, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nimport unittest\nfrom coremltools._deps import _HAS_SKLEARN\nimport numpy.random as rn\nimport numpy as np\nfrom coremltools.models.utils import evaluate_transformer, _macos_version, _is_macos\n\nif _HAS_SKLEARN:\n from sklearn.preprocessing import Imputer\n from coremltools.converters import sklearn as converter\n\n\[email protected](\n _is_macos() and _macos_version() >= (10, 13), \"Only supported on macOS 10.13+\"\n)\[email protected](not _HAS_SKLEARN, \"Missing sklearn. Skipping tests.\")\nclass NumericalImputerTestCase(unittest.TestCase):\n \"\"\"\n Unit test class for testing scikit-learn converter.\n \"\"\"\n\n def test_conversion_boston(self):\n\n from sklearn.datasets import load_boston\n\n scikit_data = load_boston()\n\n sh = scikit_data.data.shape\n\n rn.seed(0)\n missing_value_indices = [\n (rn.randint(sh[0]), rn.randint(sh[1])) for k in range(sh[0])\n ]\n\n for strategy in [\"mean\", \"median\", \"most_frequent\"]:\n for missing_value in [0, \"NaN\", -999]:\n\n X = np.array(scikit_data.data).copy()\n\n for i, j in missing_value_indices:\n X[i, j] = missing_value\n\n model = Imputer(missing_values=missing_value, strategy=strategy)\n model = model.fit(X)\n\n tr_X = model.transform(X.copy())\n\n spec = converter.convert(model, scikit_data.feature_names, \"out\")\n\n input_data = [dict(zip(scikit_data.feature_names, row)) for row in X]\n\n output_data = [{\"out\": row} for row in tr_X]\n\n result = evaluate_transformer(spec, input_data, output_data)\n\n assert result[\"num_errors\"] == 0\n",
"# Copyright (c) 2021, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nimport logging\nimport numpy as np\nimport os\n\nfrom .passes import mil_passes\nfrom coremltools import _SPECIFICATION_VERSION_IOS_15\nfrom coremltools.converters.mil.backend.mil.helper import (\n cast_to_framework_io_dtype,\n create_file_value,\n create_immediate_value,\n create_list_scalarvalue,\n create_scalar_value,\n types_to_proto\n)\nfrom coremltools.converters.mil.backend.backend_helper import _get_probability_var_for_classifier\nfrom coremltools.converters.mil.mil import (\n Builder as mb,\n Function,\n mil_list,\n types\n)\nfrom coremltools.converters.mil.backend.nn.load import _set_optional_inputs\nfrom coremltools.converters.mil.input_types import ImageType, TensorType, EnumeratedShapes, RangeDim\nfrom coremltools.converters.mil.mil.ops.registry import SSAOpRegistry\nfrom coremltools.converters.mil.mil.types.symbolic import (\n any_symbolic,\n any_variadic,\n is_symbolic,\n)\nfrom coremltools.libmilstoragepython import _BlobStorageWriter as BlobWriter\nfrom coremltools.models.utils import _WEIGHTS_FILE_NAME\nfrom coremltools.models.neural_network.flexible_shape_utils import (\n add_enumerated_image_sizes,\n add_multiarray_ndshape_enumeration,\n NeuralNetworkImageSize,\n NeuralNetworkImageSizeRange,\n set_multiarray_ndshape_range,\n update_image_size_range\n)\nfrom coremltools.proto import (\n FeatureTypes_pb2 as ft,\n MIL_pb2 as pm,\n Model_pb2 as ml\n)\n\n\ndef should_use_weight_file(val):\n return (\n val is not None\n and isinstance(val, (np.ndarray, np.generic))\n and val.size >= 10\n and val.dtype in ['float16', 'float32']\n )\n\ndef translate_const(op, blob_writer):\n output_var = op.outputs[0]\n\n if should_use_weight_file(output_var.val):\n value = create_file_value(output_var, blob_writer)\n else:\n value = create_immediate_value(output_var)\n\n return pm.Operation(\n type=\"const\",\n attributes={\"name\": create_scalar_value(op.name), \"val\": value},\n outputs=[\n pm.NamedValueType(\n name=output_var.name, type=types_to_proto(output_var.sym_type)\n )\n ],\n )\n\n\ndef translate_generic_op(op, parameters, blob_writer, literal_params=[]):\n inputs = {}\n for param_name, vars in op.inputs.items():\n if param_name.startswith(\"_\"):\n continue\n if not isinstance(vars, (list, tuple)):\n vars = [vars]\n\n arguments = []\n for _var in vars:\n binding = pm.Argument.Binding()\n # use const value literals if requested\n if param_name in literal_params:\n binding.value.CopyFrom(create_immediate_value(_var))\n else:\n binding.name = _var.name\n arguments.append(binding)\n\n args = pm.Argument()\n args.arguments.extend(arguments)\n inputs[param_name] = args\n\n outputs = [\n pm.NamedValueType(name=v.name, type=types_to_proto(v.sym_type))\n for v in op.outputs\n ]\n blocks = None\n if len(op.blocks) > 0:\n blocks = [create_block(b, parameters, blob_writer) for b in op.blocks]\n\n op_type = op.op_type\n attr_dict = {}\n if op.op_type in SSAOpRegistry.custom_ops:\n op_type = \"custom_layer\"\n class_name = op.bindings.get(\"class_name\", op.name)\n input_order = op.bindings.get(\"input_order\", [])\n parameters = op.bindings.get(\"parameters\", [])\n weights = op.bindings.get(\"weights\", [])\n description = op.bindings.get(\"description\", \"\")\n\n attr_dict[\"name\"] = create_scalar_value(op.name)\n attr_dict[\"class_name\"] = create_scalar_value(class_name)\n attr_dict[\"input_order\"] = create_list_scalarvalue(input_order, np.str)\n attr_dict[\"parameters\"] = create_list_scalarvalue(parameters, np.str)\n attr_dict[\"weights\"] = create_list_scalarvalue(weights, np.str)\n attr_dict[\"description\"] = create_scalar_value(description)\n\n return pm.Operation(\n type=op_type,\n blocks=blocks,\n inputs=inputs,\n attributes=attr_dict,\n outputs=outputs,\n )\n\n\ndef create_block(block, parameters, blob_writer):\n proto_ops = []\n\n # Find the const op that generates classify's \"label\" / \"class\" string vec.\n classify_const_classes_op = None\n if len(block.operations) > 0:\n # Classify is always the last operation in the block.\n op = block.operations[-1]\n op_cls_name = type(op).__name__\n if (op_cls_name == \"classify\"):\n classes_var = op.inputs[\"classes\"]\n classify_const_classes_op = classes_var.op\n if (len(classes_var.child_ops) != 1):\n raise ValueError(\"Classify's labels/classes should be input to only 1 op (classify).\")\n\n for op in block.operations:\n op_cls_name = type(op).__name__\n if op_cls_name == \"const\":\n # Do not serialize the const op that creates the var bound to the classifier's \"classes\" param.\n # The variable's value will be bound directly to classify's \"classes\" param instead.\n if op != classify_const_classes_op:\n proto_ops.append(translate_const(op, blob_writer))\n elif op_cls_name == \"classify\":\n # Classify's \"classes\" param should be serialized as a value literal bound\n # directly to the param, rather than as a const-generated variable.\n proto_ops.append(translate_generic_op(op, parameters, blob_writer, [\"classes\"]))\n else:\n proto_ops.append(translate_generic_op(op, parameters, blob_writer))\n\n inputs = []\n if not isinstance(block, Function):\n # Function is subclass of Block, but function's block has no input,\n # and hence skipping reading the block inputs.\n for var in block.inputs:\n proto_type = types_to_proto(var.sym_type)\n inputs.append(pm.NamedValueType(name=var.name, type=proto_type))\n output_names = [v.name for v in block.outputs]\n return pm.Block(inputs=inputs, outputs=output_names, operations=proto_ops)\n\n\ndef convert_function(function, parameters, blob_writer):\n block = create_block(function, parameters, blob_writer)\n\n inputs = []\n for name, var in function.inputs.items():\n proto_type = types_to_proto(var.sym_type)\n inputs.append(pm.NamedValueType(name=name, type=proto_type))\n\n return pm.Function(inputs=inputs, opset=\"CoreML5\", block_specializations={\"CoreML5\": block})\n\n\n# Add a classify op to the output.\n# Replaces the original probabilites output (in the containing MIL block)\n# with the outputs of the classifier op. Returns the name of the original\n# probabilities output variable.\ndef _add_classify_op(prog, classifier_config):\n '''\n Add a \"classify\" op to the program, at the end of the main block\n '''\n block = prog.functions[\"main\"]\n\n message = \"Class labels must be a list of integers / strings or a file path\"\n classes_in = classifier_config.class_labels\n if isinstance(classes_in, str):\n import os\n\n if not os.path.isfile(classes_in):\n raise ValueError(\"Path to class labels (%s) does not exist.\" % classes_in)\n with open(classes_in, \"r\") as f:\n classes = f.read()\n classes = classes.splitlines()\n elif isinstance(classes_in, list): # list[int or str]\n classes = classes_in\n assert all([isinstance(x, (int, str)) for x in classes]), message\n else:\n raise ValueError(message)\n\n probability_var = _get_probability_var_for_classifier(prog, classifier_config)\n\n # add the classify op now\n with block:\n # cast the int label to np.int64\n if isinstance(classes[0], int):\n classes = [np.int64(x) for x in classes]\n classes_var = mb.const(val=mil_list(classes))\n out = mb.classify(probabilities=probability_var, classes=classes_var)\n\n predicted_feature_name = \"classLabel\" if classifier_config.predicted_feature_name is None \\\n else classifier_config.predicted_feature_name\n out[0].name = predicted_feature_name\n out[1].name = predicted_feature_name + \"_probs\"\n\n # Remove probabilities from block outputs, replace with classify's outputs\n for i in range(0, len(block.outputs)):\n if block.outputs[i] is probability_var:\n block.outputs.pop(i)\n break\n block.outputs[:0] = out\n return out[0].name, out[1].name\n\ndef load(prog, weights_dir, resume_on_errors=False, **kwargs):\n if \"main\" not in prog.functions:\n raise ValueError(\"main function not found in program\")\n\n mil_passes.mil_backend_passes(prog)\n\n # if user has specified \"ClassifierConfig\", then add the \"classify\" op to the prog\n classifier_config = kwargs.get(\"classifier_config\", None)\n predicted_feature_name = None\n predicted_probabilities_name = None\n if classifier_config is not None:\n predicted_feature_name, predicted_probabilities_name = _add_classify_op(prog, classifier_config)\n\n input_types = prog.main_input_types\n weight_path = os.path.join(weights_dir, _WEIGHTS_FILE_NAME)\n blob_writer = BlobWriter(weight_path)\n\n function_protos = {}\n for func_name, func in prog.functions.items():\n function_protos[func_name] = convert_function(func, prog.parameters, blob_writer)\n\n proto = pm.Program(\n version=1,\n functions=function_protos,\n )\n\n input_features = []\n output_features = []\n symbolic_inputs = []\n image_input_names = {} # these are the model inputs marked as image by the user\n input_shape_map = {}\n\n for input_type in input_types:\n if isinstance(input_type, ImageType):\n image_input_names[input_type.name] = input_type\n # error checking for input(s) marked as images\n if input_type.name not in list(prog.functions[\"main\"].inputs.keys()):\n msg = \"Provided image input '{}' is not one of the inputs of the MIL program\"\n raise ValueError(msg.format(input_type.name))\n input_shape_map[input_type.name] = input_type\n\n for name, var in prog.functions[\"main\"].inputs.items():\n input_feature_type = ft.FeatureType()\n\n # error checking for input(s) marked as images\n # an image input must be of type tensor in program proto\n # (since an image type does not exist in MIL program)\n if name in image_input_names and \\\n not types.is_tensor(var.sym_type):\n raise ValueError(\"For the image input, '{}', its type in the MIL program must be tensor. \"\n \"Instead it is {}.\".format(name, var.sym_type.__type_info__()))\n\n if types.is_tensor(var.sym_type):\n shape = var.sym_type.get_shape()\n if any_variadic(shape):\n raise ValueError(\"Variable rank model inputs are not supported!\")\n if any_symbolic(shape):\n symbolic_inputs.append(name)\n # We extract the default input shape given by user first\n if name in input_shape_map:\n shape = input_shape_map[name].shape.default\n else:\n logging.warning(\"Input shape not fully specified by enumerated shapes or range dim! 1 will be used for dimension not specified instead.\")\n # If no input shape is provided (ex. auto conversion of -1 in Tensorflow)\n shape = [1 if is_symbolic(d) else d for d in shape]\n\n if name not in image_input_names:\n # make a feature type of Type \"multiArrayType\"\n array_type = ft.ArrayFeatureType(shape=shape, dataType=cast_to_framework_io_dtype(var, False))\n input_feature_type.multiArrayType.CopyFrom(array_type)\n else:\n if len(shape) < 3:\n raise ValueError(\"Image input, '{}', must have rank at least 3. Instead it has rank {}\".\n format(name, len(shape)))\n # make a feature type of Type \"imageType\"\n input_type = image_input_names[name]\n if not input_type.channel_first:\n raise ValueError(\"Image input, '{}', must be in the channel_first format\".\n format(name))\n\n if input_type.color_layout == \"G\":\n clr_space = ft.ImageFeatureType.ColorSpace.GRAYSCALE\n elif input_type.color_layout == \"BGR\":\n clr_space = ft.ImageFeatureType.ColorSpace.BGR\n else:\n clr_space = ft.ImageFeatureType.ColorSpace.RGB\n\n image_type = ft.ImageFeatureType(width=shape[-1],\n height=shape[-2],\n colorSpace=clr_space)\n input_feature_type.imageType.CopyFrom(image_type)\n\n input_features.append(\n ml.FeatureDescription(name=name, type=input_feature_type)\n )\n elif types.is_scalar(var.sym_type):\n array_type = ft.ArrayFeatureType(shape=[1], dataType=cast_to_framework_io_dtype(var, False))\n input_feature_type.multiArrayType.CopyFrom(array_type)\n input_features.append(ml.FeatureDescription(name=var.name, type=input_feature_type))\n else:\n raise NotImplementedError()\n\n for var in prog.functions[\"main\"].outputs:\n output_feature_type = ft.FeatureType()\n if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type):\n dataType = None\n if classifier_config is None or var.name != predicted_feature_name:\n # Not a classifier output, make sure model output type matches with ML Program type.\n dataType = cast_to_framework_io_dtype(var, True)\n else:\n # Classifier outputs are set up separately, so default to fp32 for now.\n dataType = ft.ArrayFeatureType.ArrayDataType.FLOAT32\n\n array_type = ft.ArrayFeatureType(shape=None, dataType=dataType)\n output_feature_type.multiArrayType.CopyFrom(array_type)\n output_features.append(ml.FeatureDescription(name=var.name, type=output_feature_type))\n elif (types.is_dict(var.sym_type)):\n output_feature_type.dictionaryType.MergeFromString(b\"\")\n keytype, valtype = var.sym_type.T\n if types.is_str(keytype):\n output_feature_type.dictionaryType.stringKeyType.MergeFromString(b\"\")\n elif (keytype == types.int64):\n output_feature_type.dictionaryType.int64KeyType.MergeFromString(b\"\")\n else:\n raise ValueError(\"Dictionary key type not supported.\")\n output_features.append(ml.FeatureDescription(name=var.name, type=output_feature_type))\n else:\n raise NotImplementedError()\n\n # Model description\n desc = ml.ModelDescription(input=input_features, output=output_features)\n if classifier_config is not None:\n desc.predictedFeatureName = predicted_feature_name\n desc.predictedProbabilitiesName = predicted_probabilities_name\n\n # Manually edit output type of predictedFeatureName.\n # It doesn't use MLMultiArray and really uses a \"primitive\" type.\n for output in desc.output:\n if output.name == predicted_feature_name:\n if type(classifier_config.class_labels[0]) == int:\n output.type.int64Type.MergeFromString(b\"\")\n else:\n output.type.stringType.MergeFromString(b\"\")\n break\n\n # Create ML Model\n model = ml.Model(description=desc, specificationVersion=_SPECIFICATION_VERSION_IOS_15)\n model.mlProgram.CopyFrom(proto)\n\n # Set symbolic shapes\n for input_name in symbolic_inputs:\n input_type = input_shape_map.get(input_name, None)\n\n if isinstance(input_type, ImageType):\n if isinstance(input_type.shape, EnumeratedShapes):\n enumerated_shapes = []\n for s in input_type.shape.shapes:\n enumerated_shapes.append(\n NeuralNetworkImageSize(\n height=s.shape[-2], width=s.shape[-1]\n )\n )\n add_enumerated_image_sizes(\n model, input_name, sizes=enumerated_shapes\n )\n else:\n img_range = NeuralNetworkImageSizeRange()\n H = input_type.shape.shape[-2]\n W = input_type.shape.shape[-1]\n\n if isinstance(H, RangeDim):\n img_range.add_height_range((H.lower_bound, H.upper_bound))\n elif is_symbolic(H):\n img_range.add_height_range((1, -1))\n else:\n img_range.add_height_range((H, H))\n if isinstance(W, RangeDim):\n img_range.add_width_range((W.lower_bound, W.upper_bound))\n elif is_symbolic(W):\n img_range.add_width_range((1, -1))\n else:\n img_range.add_width_range((W, W))\n\n update_image_size_range(\n model, input_name, img_range\n )\n elif isinstance(input_type, TensorType):\n if isinstance(input_type.shape, EnumeratedShapes):\n add_multiarray_ndshape_enumeration(\n model, input_name, [tuple(s.shape) for s in input_type.shape.shapes]\n )\n else:\n lb = []\n ub = []\n for s in input_type.shape.shape:\n if isinstance(s, RangeDim):\n lb.append(s.lower_bound)\n ub.append(s.upper_bound)\n elif is_symbolic(s):\n lb.append(1)\n ub.append(-1)\n else:\n lb.append(s)\n ub.append(s)\n set_multiarray_ndshape_range(\n model, input_name, lower_bounds=lb, upper_bounds=ub\n )\n elif input_type is None:\n sym_type = prog.functions[\"main\"].inputs[input_name].sym_type\n lb = []\n ub = []\n for s in sym_type.get_shape():\n if is_symbolic(s):\n lb.append(1)\n ub.append(-1)\n else:\n lb.append(s)\n ub.append(s)\n set_multiarray_ndshape_range(\n model, input_name, lower_bounds=lb, upper_bounds=ub\n )\n\n # Set optional inputs\n _set_optional_inputs(model, input_types)\n\n return model\n",
"# Copyright (c) 2017, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nimport itertools\nimport pandas as pd\nimport os\nimport unittest\n\nfrom coremltools._deps import _HAS_SKLEARN\nfrom coremltools.converters.sklearn import convert\nfrom coremltools.models.utils import (\n evaluate_classifier,\n evaluate_classifier_with_probabilities,\n _macos_version,\n _is_macos,\n)\n\nif _HAS_SKLEARN:\n from sklearn.linear_model import LogisticRegression\n from sklearn.svm import LinearSVC\n\n\[email protected](not _HAS_SKLEARN, \"Missing sklearn. Skipping tests.\")\nclass GlmCassifierTest(unittest.TestCase):\n def test_logistic_regression_binary_classification_with_string_labels(self):\n self._conversion_and_evaluation_helper_for_logistic_regression([\"Foo\", \"Bar\"])\n\n def test_logistic_regression_multiclass_classification_with_int_labels(self):\n self._conversion_and_evaluation_helper_for_logistic_regression([1, 2, 3, 4])\n\n @staticmethod\n def _generate_random_data(labels):\n import random\n\n random.seed(42)\n\n # Generate some random data\n x, y = [], []\n for _ in range(100):\n x.append([random.gauss(2, 3), random.gauss(-1, 2)])\n y.append(random.choice(labels))\n return x, y\n\n def _conversion_and_evaluation_helper_for_logistic_regression(self, class_labels):\n options = {\n \"C\": (0.1, 1.0, 2.0),\n \"fit_intercept\": (True, False),\n \"class_weight\": (\"balanced\", None),\n \"solver\": (\"newton-cg\", \"lbfgs\", \"liblinear\", \"sag\"),\n }\n\n # Generate a list of all combinations of options and the default parameters\n product = itertools.product(*options.values())\n args = [{}] + [dict(zip(options.keys(), p)) for p in product]\n\n x, y = GlmCassifierTest._generate_random_data(class_labels)\n column_names = [\"x1\", \"x2\"]\n df = pd.DataFrame(x, columns=column_names)\n\n for cur_args in args:\n print(class_labels, cur_args)\n cur_model = LogisticRegression(**cur_args)\n cur_model.fit(x, y)\n\n spec = convert(\n cur_model, input_features=column_names, output_feature_names=\"target\"\n )\n\n if _is_macos() and _macos_version() >= (10, 13):\n probability_lists = cur_model.predict_proba(x)\n df[\"classProbability\"] = [\n dict(zip(cur_model.classes_, cur_vals))\n for cur_vals in probability_lists\n ]\n\n metrics = evaluate_classifier_with_probabilities(\n spec, df, probabilities=\"classProbability\", verbose=False\n )\n self.assertEqual(metrics[\"num_key_mismatch\"], 0)\n self.assertLess(metrics[\"max_probability_error\"], 0.00001)\n\n def test_linear_svc_binary_classification_with_string_labels(self):\n self._conversion_and_evaluation_helper_for_linear_svc([\"Foo\", \"Bar\"])\n\n def test_linear_svc_multiclass_classification_with_int_labels(self):\n self._conversion_and_evaluation_helper_for_linear_svc([1, 2, 3, 4])\n\n def _conversion_and_evaluation_helper_for_linear_svc(self, class_labels):\n ARGS = [\n {},\n {\"C\": 0.75, \"loss\": \"hinge\"},\n {\"penalty\": \"l1\", \"dual\": False},\n {\"tol\": 0.001, \"fit_intercept\": False},\n {\"intercept_scaling\": 1.5},\n ]\n\n x, y = GlmCassifierTest._generate_random_data(class_labels)\n column_names = [\"x1\", \"x2\"]\n df = pd.DataFrame(x, columns=column_names)\n\n for cur_args in ARGS:\n print(class_labels, cur_args)\n cur_model = LinearSVC(**cur_args)\n cur_model.fit(x, y)\n\n spec = convert(\n cur_model, input_features=column_names, output_feature_names=\"target\"\n )\n\n if _is_macos() and _macos_version() >= (10, 13):\n df[\"prediction\"] = cur_model.predict(x)\n\n cur_eval_metics = evaluate_classifier(spec, df, verbose=False)\n self.assertEqual(cur_eval_metics[\"num_errors\"], 0)\n"
] | [
[
"numpy.product",
"numpy.reshape",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
],
[
"sklearn.svm.SVR",
"sklearn.preprocessing.OneHotEncoder",
"pandas.DataFrame",
"sklearn.datasets.load_boston"
],
[
"numpy.reshape",
"numpy.prod",
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.random.seed",
"sklearn.preprocessing.Imputer",
"sklearn.datasets.load_boston",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.int64"
],
[
"sklearn.linear_model.LogisticRegression",
"pandas.DataFrame",
"sklearn.svm.LinearSVC"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
zeou1/maggot_models | [
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4",
"4e1b518c2981ab1ca9607099c3813e8429d94ca4"
] | [
"notebooks/39.1-BDP-unbiased-clustering.py",
"notebooks/103.0-BDP-cascade-invert.py",
"notebooks/71.0-BDP-pdiff.py",
"notebooks/120.4-BDP-silly-models.py",
"notebooks/64.2-BDP-threshold-investigations.py",
"notebooks/127.0-BDP-more-silly-model.py",
"data/process_scripts/process_maggot_brain_connectome_2020-04-23.py",
"src/visualization/stack_seaborn.py",
"models/runs/drosophila-6-rdpg-sbm/_sources/drosophila-6-rdpg-sbm_05c3491ee6aa302c433b6a825e3d3302.py",
"models/runs/fit_dcsbm/_sources/fit_dcsbm_359460be48d1b9a76bfb12aeb326e155.py",
"src/cluster/divisive.py",
"notebooks/134.0-BDP-embed-revamp.py",
"data/process_scripts/process_maggot_brain_connectome_2020-01-29.py"
] | [
"# %% [markdown]\n# # Imports\nimport json\nimport os\nimport warnings\nfrom operator import itemgetter\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom joblib import Parallel, delayed\nfrom joblib.parallel import Parallel, delayed\nfrom sklearn.metrics import adjusted_rand_score\nimport networkx as nx\n\nfrom graspy.cluster import GaussianCluster, AutoGMMCluster\nfrom graspy.embed import AdjacencySpectralEmbed, OmnibusEmbed\nfrom graspy.models import DCSBMEstimator, SBMEstimator\nfrom graspy.plot import heatmap, pairplot\nfrom graspy.utils import binarize, cartprod, get_lcc, pass_to_ranks\nfrom src.data import load_everything\nfrom src.utils import export_skeleton_json, savefig\nfrom src.visualization import clustergram, palplot, sankey\nfrom src.hierarchy import signal_flow\n\nwarnings.simplefilter(\"ignore\", category=FutureWarning)\n\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\n\n# %% [markdown]\n# # Parameters\nBRAIN_VERSION = \"2019-12-09\"\nGRAPH_TYPES = [\"Gad\", \"Gaa\", \"Gdd\", \"Gda\"]\nGRAPH_TYPE_LABELS = [r\"A $\\to$ D\", r\"A $\\to$ A\", r\"D $\\to$ D\", r\"D $\\to$ A\"]\nN_GRAPH_TYPES = len(GRAPH_TYPES)\n\nSAVEFIGS = True\nDEFAULT_FMT = \"png\"\nDEFUALT_DPI = 150\n\nSAVESKELS = False\n\nMIN_CLUSTERS = 8\nMAX_CLUSTERS = 8\nN_INIT = 50\nPTR = True\nONLY_RIGHT = True\n\nembed = \"LSE\"\ncluster = \"GMM\"\nn_components = 4\nif cluster == \"GMM\":\n gmm_params = {\"n_init\": N_INIT, \"covariance_type\": \"all\"}\nelif cluster == \"AutoGMM\":\n gmm_params = {\"max_agglom_size\": None}\n\nnp.random.seed(23409857)\n\n\ndef stashfig(name, **kws):\n if SAVEFIGS:\n savefig(name, foldername=FNAME, fmt=DEFAULT_FMT, dpi=DEFUALT_DPI, **kws)\n\n\ndef stashskel(name, ids, colors, palette=None, **kws):\n if SAVESKELS:\n return export_skeleton_json(\n name, ids, colors, palette=palette, foldername=FNAME, **kws\n )\n\n\ndef ase(adj, n_components):\n if PTR:\n adj = pass_to_ranks(adj)\n ase = AdjacencySpectralEmbed(n_components=n_components)\n latent = ase.fit_transform(adj)\n latent = np.concatenate(latent, axis=-1)\n return latent\n\n\ndef to_laplace(graph, form=\"DAD\", regularizer=None):\n r\"\"\"\n A function to convert graph adjacency matrix to graph laplacian. \n Currently supports I-DAD, DAD, and R-DAD laplacians, where D is the diagonal\n matrix of degrees of each node raised to the -1/2 power, I is the \n identity matrix, and A is the adjacency matrix.\n \n R-DAD is regularized laplacian: where :math:`D_t = D + regularizer*I`.\n Parameters\n ----------\n graph: object\n Either array-like, (n_vertices, n_vertices) numpy array,\n or an object of type networkx.Graph.\n form: {'I-DAD' (default), 'DAD', 'R-DAD'}, string, optional\n \n - 'I-DAD'\n Computes :math:`L = I - D*A*D`\n - 'DAD'\n Computes :math:`L = D*A*D`\n - 'R-DAD'\n Computes :math:`L = D_t*A*D_t` where :math:`D_t = D + regularizer*I`\n regularizer: int, float or None, optional (default=None)\n Constant to be added to the diagonal of degree matrix. If None, average \n node degree is added. If int or float, must be >= 0. Only used when \n ``form`` == 'R-DAD'.\n Returns\n -------\n L: numpy.ndarray\n 2D (n_vertices, n_vertices) array representing graph \n laplacian of specified form\n References\n ----------\n .. [1] Qin, Tai, and Karl Rohe. \"Regularized spectral clustering\n under the degree-corrected stochastic blockmodel.\" In Advances\n in Neural Information Processing Systems, pp. 3120-3128. 2013\n \"\"\"\n valid_inputs = [\"I-DAD\", \"DAD\", \"R-DAD\"]\n if form not in valid_inputs:\n raise TypeError(\"Unsuported Laplacian normalization\")\n\n A = graph\n\n in_degree = np.sum(A, axis=0)\n out_degree = np.sum(A, axis=1)\n\n # regularize laplacian with parameter\n # set to average degree\n if form == \"R-DAD\":\n if regularizer is None:\n regularizer = 1\n elif not isinstance(regularizer, (int, float)):\n raise TypeError(\n \"Regularizer must be a int or float, not {}\".format(type(regularizer))\n )\n elif regularizer < 0:\n raise ValueError(\"Regularizer must be greater than or equal to 0\")\n regularizer = regularizer * np.mean(out_degree)\n\n in_degree += regularizer\n out_degree += regularizer\n\n with np.errstate(divide=\"ignore\"):\n in_root = 1 / np.sqrt(in_degree) # this is 10x faster than ** -0.5\n out_root = 1 / np.sqrt(out_degree)\n\n in_root[np.isinf(in_root)] = 0\n out_root[np.isinf(out_root)] = 0\n\n in_root = np.diag(in_root) # just change to sparse diag for sparse support\n out_root = np.diag(out_root)\n\n if form == \"I-DAD\":\n L = np.diag(in_degree) - A\n L = in_root @ L @ in_root\n elif form == \"DAD\" or form == \"R-DAD\":\n L = out_root @ A @ in_root\n # return symmetrize(L, method=\"avg\") # sometimes machine prec. makes this necessary\n return L\n\n\ndef lse(adj, n_components, regularizer=None):\n if PTR:\n adj = pass_to_ranks(adj)\n lap = to_laplace(adj, form=\"R-DAD\")\n ase = AdjacencySpectralEmbed(n_components=n_components)\n latent = ase.fit_transform(lap)\n latent = np.concatenate(latent, axis=-1)\n return latent\n\n\ndef omni(adjs, n_components):\n if PTR:\n adjs = [pass_to_ranks(a) for a in adjs]\n omni = OmnibusEmbed(n_components=n_components // len(adjs))\n latent = omni.fit_transform(adjs)\n latent = np.concatenate(latent, axis=-1) # first is for in/out\n latent = np.concatenate(latent, axis=-1) # second is for concat. each graph\n return latent\n\n\ndef ase_concatenate(adjs, n_components):\n if PTR:\n adjs = [pass_to_ranks(a) for a in adjs]\n ase = AdjacencySpectralEmbed(n_components=n_components // len(adjs))\n graph_latents = []\n for a in adjs:\n latent = ase.fit_transform(a)\n latent = np.concatenate(latent, axis=-1)\n graph_latents.append(latent)\n latent = np.concatenate(graph_latents, axis=-1)\n return latent\n\n\ndef sub_ari(known_inds, true_labels, pred_labels):\n true_known_labels = true_labels[known_inds]\n pred_known_labels = pred_labels[known_inds]\n ari = adjusted_rand_score(true_known_labels, pred_known_labels)\n return ari\n\n\n# Set up plotting constants\nplt.style.use(\"seaborn-white\")\nsns.set_palette(\"deep\")\nsns.set_context(\"talk\", font_scale=1)\n\n\n# %% [markdown]\n# # Load the data\n\n\nadj, class_labels, side_labels, skeleton_labels = load_everything(\n \"Gad\",\n version=BRAIN_VERSION,\n return_keys=[\"Merge Class\", \"Hemisphere\"],\n return_ids=True,\n)\n\n\n# select the right hemisphere\nif ONLY_RIGHT:\n side = \"right hemisphere\"\n right_inds = np.where(side_labels == \"R\")[0]\n adj = adj[np.ix_(right_inds, right_inds)]\n class_labels = class_labels[right_inds]\n skeleton_labels = skeleton_labels[right_inds]\nelse:\n side = \"full brain\"\n\n# sort by number of synapses\ndegrees = adj.sum(axis=0) + adj.sum(axis=1)\nsort_inds = np.argsort(degrees)[::-1]\nadj = adj[np.ix_(sort_inds, sort_inds)]\nclass_labels = class_labels[sort_inds]\nskeleton_labels = skeleton_labels[sort_inds]\n\n# remove disconnected nodes\nadj, lcc_inds = get_lcc(adj, return_inds=True)\nclass_labels = class_labels[lcc_inds]\nskeleton_labels = skeleton_labels[lcc_inds]\n\n# remove pendants\ndegrees = np.count_nonzero(adj, axis=0) + np.count_nonzero(adj, axis=1)\nnot_pendant_mask = degrees != 1\nnot_pendant_inds = np.array(range(len(degrees)))[not_pendant_mask]\nadj = adj[np.ix_(not_pendant_inds, not_pendant_inds)]\nclass_labels = class_labels[not_pendant_inds]\nskeleton_labels = skeleton_labels[not_pendant_inds]\n\n# plot degree sequence\nd_sort = np.argsort(degrees)[::-1]\ndegrees = degrees[d_sort]\nplt.figure(figsize=(10, 5))\nsns.scatterplot(x=range(len(degrees)), y=degrees, s=30, linewidth=0)\n\nknown_inds = np.where(class_labels != \"Unk\")[0]\n\n\n# %% [markdown]\n# # Run clustering using LSE on the sum graph\n\nn_verts = adj.shape[0]\n\n\nlatent = lse(adj, n_components, regularizer=None)\npairplot(latent, labels=class_labels, title=embed)\n\nk_list = list(range(MIN_CLUSTERS, MAX_CLUSTERS + 1))\nn_runs = len(k_list)\nout_dicts = []\n\nbin_adj = binarize(adj)\n\nlast_pred_labels = np.zeros(n_verts)\n\nif cluster == \"GMM\":\n ClusterModel = GaussianCluster\nelif cluster == \"AutoGMM\":\n ClusterModel = AutoGMMCluster\n\nfor k in k_list:\n run_name = f\"k = {k}, {cluster}, {embed}, {side} (A to D), PTR, raw\"\n print(run_name)\n print()\n\n # Do clustering\n # TODO: make this autogmm instead\n gmm = ClusterModel(min_components=k, max_components=k, **gmm_params)\n gmm.fit(latent)\n pred_labels = gmm.predict(latent)\n\n # Score unsupervised metrics\n base_dict = {\n \"K\": k,\n \"Cluster\": cluster,\n \"Embed\": embed,\n \"Method\": f\"{cluster} o {embed}\",\n }\n\n # GMM likelihood\n score = gmm.model_.score(latent)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"GMM likelihood\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n\n # GMM BIC\n score = gmm.model_.bic(latent)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"GMM BIC\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n\n # SBM likelihood\n sbm = SBMEstimator(directed=True, loops=False)\n sbm.fit(bin_adj, y=pred_labels)\n score = sbm.score(bin_adj)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"SBM likelihood\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n\n # DCSBM likelihood\n dcsbm = DCSBMEstimator(directed=True, loops=False)\n dcsbm.fit(bin_adj, y=pred_labels)\n score = dcsbm.score(bin_adj)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"DCSBM likelihood\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n\n # ARI of the subset with labels\n score = sub_ari(known_inds, class_labels, pred_labels)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"Simple ARI\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n\n # ARI vs K - 1\n score = adjusted_rand_score(last_pred_labels, pred_labels)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"K-1 ARI\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n last_pred_labels = pred_labels\n\n save_name = f\"k{k}-{cluster}-{embed}-right-ad-PTR-raw\"\n\n # Plot embedding\n # pairplot(latent, labels=pred_labels, title=run_name)\n # stashfig(\"latent-\" + save_name)\n\n # Plot everything else\n clustergram(adj, class_labels, pred_labels)\n stashfig(\"clustergram-\" + save_name)\n\n # New plot\n # - Compute signal flow\n # - Get the centroid of each cluster and project to 1d\n # - Alternatively, just take the first dimension\n # - For each cluster plot as a node\n\n # output skeletons\n if SAVESKELS:\n _, colormap, pal = stashskel(\n save_name, skeleton_labels, pred_labels, palette=\"viridis\", multiout=True\n )\n\n palplot(k, cmap=\"viridis\")\n stashfig(\"palplot-\" + save_name)\n\n # save dict colormapping\n filename = (\n Path(\"./maggot_models/notebooks/outs\")\n / Path(FNAME)\n / str(\"colormap-\" + save_name + \".json\")\n )\n with open(filename, \"w\") as fout:\n json.dump(colormap, fout)\n\n stashskel(\n save_name, skeleton_labels, pred_labels, palette=\"viridis\", multiout=False\n )\n\n# %% [markdown]\n# # Plot results of unsupervised metrics\n\nresult_df = pd.DataFrame(out_dicts)\nfg = sns.FacetGrid(result_df, col=\"Metric\", col_wrap=3, sharey=False, height=4)\nfg.map(sns.lineplot, \"K\", \"Score\")\nstashfig(f\"metrics-{cluster}-{embed}-right-ad-PTR-raw\")\n\n\n# Modifications i need to make to the above\n# - Increase the height of the sankey diagram overall\n# - Look into color maps that could be better\n# - Color the cluster labels by what gets written to the JSON\n# - Plot the clusters as nodes in a small network\n\n# %% [markdown]\n# # try graph flow\n\n\nnode_signal_flow = signal_flow(adj)\nmean_sf = np.zeros(k)\nfor i in np.unique(pred_labels):\n inds = np.where(pred_labels == i)[0]\n mean_sf[i] = np.mean(node_signal_flow[inds])\n\ncluster_mean_latent = gmm.model_.means_[:, 0]\nblock_probs = SBMEstimator().fit(bin_adj, y=pred_labels).block_p_\nblock_prob_df = pd.DataFrame(data=block_probs, index=range(k), columns=range(k))\nblock_g = nx.from_pandas_adjacency(block_prob_df, create_using=nx.DiGraph)\nplt.figure(figsize=(10, 10))\n# don't ever let em tell you you're too pythonic\npos = dict(zip(range(k), zip(cluster_mean_latent, mean_sf)))\n# nx.draw_networkx_nodes(block_g, pos=pos)\nlabels = nx.get_edge_attributes(block_g, \"weight\")\n# nx.draw_networkx_edge_labels(block_g, pos, edge_labels=labels)\nfrom matplotlib.cm import ScalarMappable\nimport matplotlib as mpl\n\nnorm = mpl.colors.LogNorm(vmin=0.01, vmax=0.1)\n\nsm = ScalarMappable(cmap=\"Reds\", norm=norm)\ncmap = sm.to_rgba(np.array(list(labels.values())) + 0.01)\nnx.draw_networkx(\n block_g,\n pos,\n edge_cmap=\"Reds\",\n edge_color=cmap,\n connectionstyle=\"arc3,rad=0.2\",\n width=1.5,\n)\n\n# %% [markdown]\n# # signal flow marginals\n\nsignal_flow_marginal(adj, pred_labels)\n\n# %% [markdown]\n# #\n\n\ndef signal_flow_marginal(adj, labels, col_wrap=5, palette=\"tab20\"):\n sf = signal_flow(adj)\n uni_labels = np.unique(labels)\n medians = []\n for i in uni_labels:\n inds = np.where(labels == i)[0]\n medians.append(np.median(sf[inds]))\n sort_inds = np.argsort(medians)[::-1]\n col_order = uni_labels[sort_inds]\n plot_df = pd.DataFrame()\n plot_df[\"Signal flow\"] = sf\n plot_df[\"Class\"] = labels\n fg = sns.FacetGrid(\n plot_df,\n col=\"Class\",\n aspect=1.5,\n palette=palette,\n col_order=col_order,\n sharey=False,\n col_wrap=col_wrap,\n xlim=(-3, 3),\n )\n fg = fg.map(sns.distplot, \"Signal flow\") # bins=np.linspace(-2.2, 2.2))\n fg.set(yticks=[], yticklabels=[])\n plt.tight_layout()\n return fg\n\n\nsignal_flow_marginal(adj, class_labels)\nstashfig(\"known-class-sf-marginal\")\n\n# tomorrow\n# DEFINITELY\n# run with unsupervised metrics from k=2-50\n\n# IF TIME\n# run hgmm\n",
"# %% [markdown]\n# #\nimport itertools\nimport os\nimport time\nfrom itertools import chain\n\nimport colorcet as cc\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom anytree import LevelOrderGroupIter, Node, RenderTree\nfrom joblib import Parallel, delayed\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom sklearn.decomposition import PCA\n\nfrom graspy.plot import heatmap, pairplot\nfrom src.data import load_metagraph\nfrom src.graph import MetaGraph, preprocess\nfrom src.io import savecsv, savefig, saveskels\nfrom src.traverse import (\n cascades_from_node,\n generate_cascade_tree,\n generate_random_walks,\n path_to_visits,\n to_markov_matrix,\n to_path_graph,\n)\nfrom src.visualization import (\n CLASS_COLOR_DICT,\n barplot_text,\n draw_networkx_nice,\n draw_separators,\n matrixplot,\n remove_shared_ax,\n remove_spines,\n screeplot,\n sort_meta,\n stacked_barplot,\n)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, **kws)\n\n\ndef stashcsv(df, name, **kws):\n savecsv(df, name, foldername=FNAME, save_on=True, **kws)\n\n\n#%% Load and preprocess the data\n\nVERSION = \"2020-03-09\"\nprint(f\"Using version {VERSION}\")\n\nplot_examples = False\nplot_embed = False\nplot_full_mat = False\ngraph_type = \"Gad\"\nthreshold = 0\nweight = \"weight\"\nmg = load_metagraph(graph_type, VERSION)\nmg = preprocess(\n mg,\n threshold=threshold,\n sym_threshold=False,\n remove_pdiff=True,\n binarize=False,\n weight=weight,\n)\nprint(f\"Preprocessed graph {graph_type} with threshold={threshold}, weight={weight}\")\n\n# TODO update this with the mixed groups\n# TODO make these functional for selecting proper paths\n\nout_classes = [\n \"O_dSEZ\",\n \"O_dSEZ;CN\",\n \"O_dSEZ;LHN\",\n \"O_dVNC\",\n \"O_dVNC;O_RG\",\n \"O_dVNC;CN\",\n \"O_RG\",\n \"O_dUnk\",\n \"O_RG-IPC\",\n \"O_RG-ITP\",\n \"O_RG-CA-LP\",\n]\nfrom_groups = [\n (\"sens-ORN\",),\n (\"sens-photoRh5\", \"sens-photoRh6\"),\n (\"sens-MN\",),\n (\"sens-thermo\",),\n (\"sens-vtd\",),\n (\"sens-AN\",),\n]\nfrom_group_names = [\"Odor\", \"Photo\", \"MN\", \"Temp\", \"VTD\", \"AN\"]\n\nout_groups = [\n (\"motor-mAN\", \"motormVAN\", \"motor-mPaN\"),\n (\"O_dSEZ\", \"O_dVNC;O_dSEZ\", \"O_dSEZ;CN\", \"LHN;O_dSEZ\"),\n (\"O_dVNC\", \"O_dVNC;CN\", \"O_RG;O_dVNC\", \"O_dVNC;O_dSEZ\"),\n (\"O_RG\", \"O_RG-IPC\", \"O_RG-ITP\", \"O_RG-CA-LP\", \"O_RG;O_dVNC\"),\n (\"O_dUnk\",),\n]\nout_group_names = [\"Motor\", \"SEZ\", \"VNC\", \"RG\", \"dUnk\"]\n\n\nfrom_classes = list(chain.from_iterable(from_groups)) # make this a flat list\nout_classes = list(chain.from_iterable(out_groups))\n\nclass_key = \"Merge Class\"\n\nadj = nx.to_numpy_array(mg.g, weight=weight, nodelist=mg.meta.index.values)\nn_verts = len(adj)\nmeta = mg.meta.copy()\ng = mg.g.copy()\nmeta[\"idx\"] = range(len(meta))\n\nfrom_inds = meta[meta[class_key].isin(from_classes)][\"idx\"].values\nout_inds = meta[meta[class_key].isin(out_classes)][\"idx\"].values\nind_map = dict(zip(meta.index, meta[\"idx\"]))\ng = nx.relabel_nodes(g, ind_map, copy=True)\nout_ind_map = dict(zip(out_inds, range(len(out_inds))))\n\n# %% [markdown]\n# # Use a method to generate visits\n\npath_type = \"cascade\"\nif path_type == \"cascade\":\n p = 0.01\n not_probs = (\n 1 - p\n ) ** adj # probability of none of the synapses causing postsynaptic\n probs = 1 - not_probs # probability of ANY of the synapses firing onto next\nelif path_type == \"fancy-cascade\":\n alpha = 0.5\n flat = np.full(adj.shape, alpha)\n deg = meta[\"dendrite_input\"].values\n deg[deg == 0] = 1\n flat = flat / deg[None, :]\n not_probs = np.power((1 - flat), adj)\n probs = 1 - not_probs\n\n#%%\nseed = 8888\nmax_depth = 10\nn_bins = 10\nn_sims = 100\nmethod = \"tree\"\nnormalize_n_source = False\n\n\nbasename = f\"-{graph_type}-t{threshold}-pt{path_type}-b{n_bins}-n{n_sims}-m{method}\"\nbasename += f\"-norm{normalize_n_source}\"\nbasename += f\"-plus-inverted\"\n\n\nnp.random.seed(seed)\nif method == \"tree\":\n seeds = np.random.choice(int(1e8), size=len(from_inds), replace=False)\n outs = Parallel(n_jobs=1, verbose=10)(\n delayed(cascades_from_node)(\n fi, probs, out_inds, max_depth, n_sims, seed, n_bins, method\n )\n for fi, seed in zip(from_inds, seeds)\n )\nelif method == \"path\":\n outs = []\n for start_ind in from_inds:\n temp_hist = cascades_from_node(\n start_ind, probs, out_inds, max_depth, n_sims, seed, n_bins, method\n )\n outs.append(temp_hist)\nfrom_hist_mat = np.concatenate(outs, axis=-1)\n\n###\n# invert\nif method == \"tree\":\n seeds = np.random.choice(int(1e8), size=len(out_inds), replace=False)\n outs = Parallel(n_jobs=1, verbose=10)(\n delayed(cascades_from_node)(\n fi, probs.T, from_inds, max_depth, n_sims, seed, n_bins, method\n )\n for fi, seed in zip(out_inds, seeds)\n )\nelif method == \"path\":\n outs = []\n for start_ind in from_inds:\n temp_hist = cascades_from_node(\n start_ind, probs.T, out_inds, max_depth, n_sims, seed, n_bins, method\n )\n outs.append(temp_hist)\nout_hist_mat = np.concatenate(outs, axis=-1)\n\n\n# generate_cascade_paths(start_ind, probs, 1, stop_inds=out_inds, max_depth=10)\n# %% [markdown]\n# # Sort metadata\nfull_hist_mat = np.concatenate((from_hist_mat, out_hist_mat), axis=1)\nhist_mat = full_hist_mat\n# row metadata\nids = pd.Series(index=meta[\"idx\"], data=meta.index, name=\"id\")\nto_class = ids.map(meta[\"Merge Class\"])\nto_class.name = \"to_class\"\nrow_df = pd.concat([ids, to_class], axis=1)\n\n# col metadata\norders = pd.Series(data=len(from_inds) * list(range(n_bins)), name=\"order\")\nfrom_idx = pd.Series(data=np.repeat(from_inds, n_bins), name=\"idx\")\nfrom_ids = from_idx.map(ids)\nfrom_ids.name = \"id\"\nfrom_class = from_ids.map(meta[\"Merge Class\"])\nfrom_class.name = \"class\"\nfrom_col_df = pd.concat([orders, from_idx, from_ids, from_class], axis=1)\n\norders = pd.Series(data=len(out_inds) * list(range(n_bins)), name=\"order\")\nout_idx = pd.Series(data=np.repeat(out_inds, n_bins), name=\"idx\")\nout_ids = out_idx.map(ids)\nout_ids.name = \"id\"\nout_class = out_ids.map(meta[\"Merge Class\"])\nout_class.name = \"class\"\nout_col_df = pd.concat([orders, out_idx, out_ids, out_class], axis=1)\ncol_df = pd.concat([from_col_df, out_col_df], axis=0, ignore_index=True)\n# %% [markdown]\n# #\nlog_mat = np.log10(hist_mat + 1)\nif plot_full_mat:\n shape = log_mat.shape\n figsize = (10, 20)\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n matrixplot(\n log_mat,\n ax=ax,\n col_meta=col_df,\n col_sort_class=[\"from_class\"],\n row_meta=row_df,\n row_sort_class=[\"to_class\"],\n plot_type=\"scattermap\",\n sizes=(0.5, 0.5),\n tick_rot=45,\n )\n stashfig(\"log-full-scatter\" + basename)\n\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n matrixplot(\n log_mat,\n ax=ax,\n col_meta=col_df,\n col_sort_class=[\"from_class\"],\n row_colors=CLASS_COLOR_DICT,\n row_meta=row_df,\n row_sort_class=[\"to_class\"],\n plot_type=\"heatmap\",\n sizes=(0.5, 0.5),\n tick_rot=45,\n )\n stashfig(\"log-full-heat\" + basename)\n\n# %% [markdown]\n# # Screeplots\n\nif plot_embed:\n screeplot(hist_mat.astype(float), title=\"Raw hist mat (full)\")\n stashfig(\"scree-raw-mat\" + basename)\n screeplot(log_mat, title=\"Log hist mat (full)\")\n stashfig(\"scree-log-mat\" + basename)\n\n# %% [markdown]\n# # Pairplots\nif plot_embed:\n pca = PCA(n_components=6)\n embed = pca.fit_transform(log_mat)\n loadings = pca.components_.T\n pg = pairplot(\n embed,\n labels=to_class.values,\n palette=CLASS_COLOR_DICT,\n height=5,\n title=\"Node response embedding (log)\",\n )\n pg._legend.remove()\n stashfig(\"node-pca-log\" + basename)\n pg = pairplot(\n loadings,\n labels=from_class.values,\n height=5,\n title=\"Source class embedding (log)\",\n )\n stashfig(\"source-pca-log\" + basename)\n\n pca = PCA(n_components=6)\n embed = pca.fit_transform(hist_mat.astype(float))\n loadings = pca.components_.T\n pg = pairplot(\n embed,\n labels=to_class.values,\n palette=CLASS_COLOR_DICT,\n height=5,\n title=\"Node response embedding (raw)\",\n )\n pg._legend.remove()\n stashfig(\"node-pca-log\" + basename)\n pg = pairplot(\n loadings,\n labels=from_class.values,\n height=5,\n title=\"Source class embedding (raw)\",\n )\n stashfig(\"source-pca-log\" + basename)\n\n# %% [markdown]\n# # Collapse that matrix\nhist_mat = full_hist_mat\ncollapsed_hist = []\ncollapsed_col_df = []\ngroups = from_groups + out_groups\nnames = from_group_names + out_group_names\nfor fg, fg_name in zip(groups, names):\n from_df = col_df[col_df[\"class\"].isin(fg)]\n n_in_group = len(from_df)\n for order in from_df[\"order\"].unique():\n inds = from_df[from_df[\"order\"] == order].index\n col = hist_mat[:, inds].sum(axis=1)\n if normalize_n_source:\n col = col.astype(float)\n col /= n_in_group\n collapsed_hist.append(col)\n row = {\"order\": order, \"class\": fg_name}\n collapsed_col_df.append(row)\n\n\ncollapsed_col_df = pd.DataFrame(collapsed_col_df)\ncollapsed_hist = np.array(collapsed_hist).T\nlog_collapsed_hist = np.log10(collapsed_hist + 1)\n\n# %% [markdown]\n# #\nif plot_embed:\n pca = PCA(n_components=6)\n embed = pca.fit_transform(log_collapsed_hist)\n loadings = pca.components_.T\n pg = pairplot(\n embed,\n labels=to_class.values,\n palette=CLASS_COLOR_DICT,\n height=5,\n title=\"Collapsed node response embedding (log)\",\n )\n pg._legend.remove()\n stashfig(\"coll-node-pca-log\" + basename)\n pg = pairplot(\n loadings,\n labels=collapsed_col_df[\"from_class\"].values,\n height=5,\n title=\"Collapsed source class embedding (log)\",\n )\n stashfig(\"coll-source-pca-log\" + basename)\n\n pca = PCA(n_components=6)\n embed = pca.fit_transform(collapsed_hist.astype(float))\n loadings = pca.components_.T\n pg = pairplot(\n embed,\n labels=to_class.values,\n palette=CLASS_COLOR_DICT,\n height=5,\n title=\"Collapsed node response embedding (raw)\",\n )\n pg._legend.remove()\n stashfig(\"coll-node-pca-log\" + basename)\n pg = pairplot(\n loadings,\n labels=collapsed_col_df[\"from_class\"].values,\n height=5,\n title=\"Collapsed source class embedding (raw)\",\n )\n stashfig(\"coll-source-pca-log\" + basename)\n\n# %% [markdown]\n# # Compute mean visit over all sources, for plotting\ndef mean_visit(row):\n n_groups = len(row) // n_bins\n s = 0\n for i in range(n_groups):\n group = row[i * n_bins : (i + 1) * n_bins]\n for j, val in enumerate(group):\n s += j * val\n s /= row.sum()\n return s\n\n\nvisits = []\nfor r in collapsed_hist:\n mv = mean_visit(r)\n visits.append(mv)\nvisits = np.array(visits)\nvisits[np.isnan(visits)] = n_bins + 1\nrow_df[\"visit_order\"] = visits\nmean_visit_order = row_df.groupby([\"to_class\"])[\"visit_order\"].mean()\nrow_df[\"group_visit_order\"] = row_df[\"to_class\"].map(mean_visit_order)\nrow_df[\"n_visit\"] = collapsed_hist.sum(axis=1)\n# %% [markdown]\n# #\nfig, ax = plt.subplots(1, 1, figsize=(15, 15))\nsns.set_context(\"talk\", font_scale=0.8)\ngridline_kws = dict(color=\"grey\", linestyle=\"--\", alpha=0.7, linewidth=0.3)\nmatrixplot(\n log_collapsed_hist,\n ax=ax,\n col_meta=collapsed_col_df,\n col_sort_class=[\"class\"],\n row_meta=row_df,\n row_sort_class=[\"to_class\"],\n row_colors=CLASS_COLOR_DICT,\n row_class_order=\"group_visit_order\",\n row_item_order=[\"visit_order\"],\n plot_type=\"heatmap\",\n tick_rot=0,\n row_ticks=False,\n gridline_kws=gridline_kws,\n)\nstashfig(\"collapsed-log-heat\" + basename)\n\n# %% [markdown]\n# #\nsns.set_context(\"talk\", font_scale=1)\ngridline_kws = dict(color=\"grey\", linestyle=\"--\", alpha=0.7, linewidth=0.3)\n\nfig, ax = plt.subplots(1, 1, figsize=(25, 15))\nax, divider, top_cax, left_cax = matrixplot(\n log_collapsed_hist.T,\n ax=ax,\n row_meta=collapsed_col_df,\n row_sort_class=[\"class\"],\n col_meta=row_df,\n col_sort_class=[\"to_class\"],\n col_colors=CLASS_COLOR_DICT,\n col_class_order=\"group_visit_order\",\n col_item_order=[\"visit_order\"],\n plot_type=\"heatmap\",\n tick_rot=45,\n col_ticks=False,\n gridline_kws=gridline_kws,\n)\ncax = divider.append_axes(\"right\", size=\"1%\", pad=0.02, sharey=ax)\nremove_shared_ax(cax)\nsns.heatmap(\n collapsed_col_df[\"order\"][:, None], ax=cax, cbar=False, cmap=\"RdBu\", center=0\n)\ncax.set_xticks([])\ncax.set_yticks([])\ncax.set_ylabel(r\"Hops $\\to$\", rotation=-90, ha=\"center\", va=\"center\", labelpad=20)\ncax.yaxis.set_label_position(\"right\")\ntop_cax.set_yticks([0.5])\ntop_cax.set_yticklabels([\"Class\"], va=\"center\")\nax.set_xlabel(\"Neuron\")\nax.set_ylabel(\"Source class\")\nstashfig(\"collapsed-log-heat-transpose\" + basename, dpi=200)\n\nfig, ax = plt.subplots(1, 1, figsize=(25, 15))\nax, divider, top_cax, left_cax = matrixplot(\n log_collapsed_hist.T,\n ax=ax,\n row_meta=collapsed_col_df,\n row_sort_class=[\"class\"],\n col_meta=row_df,\n col_sort_class=[\"to_class\"],\n col_colors=CLASS_COLOR_DICT,\n col_class_order=\"group_visit_order\",\n col_item_order=[\"visit_order\"],\n plot_type=\"heatmap\",\n tick_rot=45,\n col_ticks=True,\n gridline_kws=gridline_kws,\n)\ncax = divider.append_axes(\"right\", size=\"1%\", pad=0.02, sharey=ax)\nremove_shared_ax(cax)\nsns.heatmap(\n collapsed_col_df[\"order\"][:, None], ax=cax, cbar=False, cmap=\"RdBu\", center=0\n)\ncax.set_xticks([])\ncax.set_yticks([])\ncax.set_ylabel(r\"Hops $\\to$\", rotation=-90, ha=\"center\", va=\"center\", labelpad=20)\ncax.yaxis.set_label_position(\"right\")\ntop_cax.set_yticks([0.5])\ntop_cax.set_yticklabels([\"Class\"], va=\"center\")\nax.set_xlabel(\"Neuron\")\nax.set_ylabel(\"Source class\")\nstashfig(\"collapsed-log-heat-transpose-labeled\" + basename, dpi=200)\n\n# %% [markdown]\n# # clustermap the matrix\n\n\nsns.set_context(\"talk\", font_scale=1)\nlinkage = \"average\"\nmetric = \"euclidean\"\ncolors = np.vectorize(CLASS_COLOR_DICT.get)(row_df[\"to_class\"])\n\nperm_inds, sort_collapsed_col_df = sort_meta(\n collapsed_col_df, sort_class=[\"from_class\"]\n)\nsort_log_collapsed_hist = log_collapsed_hist[:, perm_inds]\n\n\ncg = sns.clustermap(\n data=sort_log_collapsed_hist.T,\n col_cluster=True,\n row_cluster=False,\n col_colors=colors,\n cmap=\"RdBu_r\",\n center=0,\n cbar_pos=None,\n method=linkage,\n metric=metric,\n)\nax = cg.ax_heatmap\ndraw_separators(\n ax,\n ax_type=\"y\",\n sort_meta=sort_collapsed_col_df,\n sort_class=[\"from_class\"],\n tick_rot=0,\n)\nax.xaxis.set_ticks([])\n# ax.set_ylabel(r\"Visits over time $\\to$\")\nax.set_xlabel(\"Neuron\")\nax.yaxis.tick_left()\n# ax.set_yticklabels(ax.get_yticklabels(), ha=\"left\")\nstashfig(\"collapsed-log-clustermap\" + basename)\n# stashfig(\"collapsed-log-clustermap\" + basename, fmt=\"pdf\")\n\n\n# %% [markdown]\n# # Do some plotting for illustration only\n\n\nif plot_examples:\n sns.set_context(\"talk\")\n sns.set_palette(\"Set1\")\n examples = [742, 605, 743, 2282, 596, 2367, 1690, 2313]\n for target_ind in examples:\n row = collapsed_hist[target_ind, :]\n perm_inds, sort_col_df = sort_meta(collapsed_col_df, sort_class=[\"from_class\"])\n sort_row = row[perm_inds]\n\n fig, ax = plt.subplots(1, 1)\n xs = np.arange(len(sort_row)) + 0.5\n divider = make_axes_locatable(ax)\n bot_cax = divider.append_axes(\"bottom\", size=\"3%\", pad=0.02, sharex=ax)\n remove_shared_ax(bot_cax)\n\n ax.bar(x=xs, height=sort_row, width=0.8)\n draw_separators(\n ax, sort_meta=sort_col_df, sort_class=[\"from_class\"], tick_rot=0\n )\n ax.set_xlim(0, len(xs))\n ax.set_ylabel(\"# hits @ time\")\n\n sns.heatmap(\n collapsed_col_df[\"order\"][None, :],\n ax=bot_cax,\n cbar=False,\n cmap=\"RdBu\",\n center=0,\n )\n bot_cax.set_xticks([])\n bot_cax.set_yticks([])\n bot_cax.set_xlabel(r\"Hops $\\to$\", x=0.1, ha=\"left\", labelpad=-22)\n bot_cax.set_xticks([20.5, 24.5, 28.5])\n bot_cax.set_xticklabels([1, 5, 9], rotation=0)\n\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n target_skid = meta.iloc[target_ind, :].name\n ax.set_title(\n f\"Response for cell {target_skid} ({meta[meta['idx'] == target_ind]['Merge Class'].values[0]})\"\n )\n\n stashfig(f\"{target_skid}-response-hist\" + basename)\n\n",
"#%%\nfrom src.data import load_metagraph\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nmg = load_metagraph(\"G\", \"2020-01-21\")\n\nis_pdiff = np.where(mg[\"is_pdiff\"])[0]\nmg = mg.reindex(is_pdiff)\ndegree_df = mg.calculate_degrees()\nplt.figure()\nmelt_degree = pd.melt(\n degree_df.reset_index(),\n id_vars=[\"ID\"],\n value_vars=[\"In degree\", \"Out degree\", \"Total degree\"],\n value_name=\"Degree\",\n)\nsns.stripplot(y=\"Degree\", data=melt_degree, x=\"variable\", jitter=0.45)\n\nplt.figure()\nmelt_syns = pd.melt(\n degree_df.reset_index(),\n id_vars=[\"ID\"],\n value_vars=[\"In edgesum\", \"Out edgesum\", \"Total edgesum\"],\n value_name=\"Synapses\",\n)\nsns.stripplot(y=\"Synapses\", data=melt_syns, x=\"variable\", jitter=0.45)\n",
"# %% [markdown]\n# # THE MIND OF A MAGGOT\n\n# %% [markdown]\n# ## Imports\nimport os\nimport time\n\nimport colorcet as cc\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as transforms\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom scipy.linalg import orthogonal_procrustes\nfrom scipy.optimize import linear_sum_assignment\nfrom sklearn.metrics import adjusted_rand_score\nfrom tqdm import tqdm\n\nfrom graspy.cluster import GaussianCluster\nfrom graspy.embed import AdjacencySpectralEmbed\nfrom graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator\nfrom graspy.plot import heatmap, pairplot\nfrom graspy.simulations import rdpg\nfrom graspy.utils import binarize, pass_to_ranks\nfrom src.data import load_metagraph\nfrom src.graph import preprocess\nfrom src.hierarchy import signal_flow\nfrom src.io import savefig\nfrom src.visualization import (\n CLASS_COLOR_DICT,\n barplot_text,\n gridmap,\n matrixplot,\n stacked_barplot,\n adjplot,\n)\nfrom sklearn.utils.testing import ignore_warnings\nfrom sklearn.exceptions import ConvergenceWarning\nimport warnings\n\nwarnings.filterwarnings(action=\"ignore\", category=ConvergenceWarning)\n\nCLUSTER_SPLIT = \"best\"\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\nrc_dict = {\n \"axes.spines.right\": False,\n \"axes.spines.top\": False,\n \"axes.formatter.limits\": (-3, 3),\n \"figure.figsize\": (6, 3),\n \"figure.dpi\": 100,\n}\nfor key, val in rc_dict.items():\n mpl.rcParams[key] = val\ncontext = sns.plotting_context(context=\"talk\", font_scale=1, rc=rc_dict)\nsns.set_context(context)\n\nnp.random.seed(8888)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, **kws)\n\n\ndef get_paired_inds(meta):\n pair_meta = meta[meta[\"Pair\"].isin(meta.index)]\n pair_group_size = pair_meta.groupby(\"Pair ID\").size()\n remove_pairs = pair_group_size[pair_group_size == 1].index\n pair_meta = pair_meta[~pair_meta[\"Pair ID\"].isin(remove_pairs)]\n assert pair_meta.groupby(\"Pair ID\").size().min() == 2\n pair_meta.sort_values([\"Pair ID\", \"hemisphere\"], inplace=True)\n lp_inds = pair_meta[pair_meta[\"hemisphere\"] == \"L\"][\"inds\"]\n rp_inds = pair_meta[pair_meta[\"hemisphere\"] == \"R\"][\"inds\"]\n assert (\n meta.iloc[lp_inds][\"Pair ID\"].values == meta.iloc[rp_inds][\"Pair ID\"].values\n ).all()\n return lp_inds, rp_inds\n\n\n# TODO broken in some cases, switched to `compute_pairedness_bipartite`\ndef compute_pairedness(partition, left_pair_inds, right_pair_inds, plot=False):\n uni_labels, inv = np.unique(partition, return_inverse=True)\n train_int_mat = np.zeros((len(uni_labels), len(uni_labels)))\n for i, ul in enumerate(uni_labels):\n c1_mask = inv == i\n for j, ul in enumerate(uni_labels):\n c2_mask = inv == j\n # number of times a thing in cluster 1 has a pair also in cluster 2\n pairs_in_other = np.logical_and(\n c1_mask[left_pair_inds], c2_mask[right_pair_inds]\n ).sum()\n train_int_mat[i, j] = pairs_in_other\n\n row_ind, col_ind = linear_sum_assignment(train_int_mat, maximize=True)\n train_pairedness = np.trace(train_int_mat[np.ix_(row_ind, col_ind)]) / np.sum(\n train_int_mat\n ) # TODO double check that this is right\n\n if plot:\n fig, axs = plt.subplots(1, 2, figsize=(20, 10))\n sns.heatmap(\n train_int_mat, square=True, ax=axs[0], cbar=False, cmap=\"RdBu_r\", center=0\n )\n int_df = pd.DataFrame(data=train_int_mat, index=uni_labels, columns=uni_labels)\n int_df = int_df.reindex(index=uni_labels[row_ind])\n int_df = int_df.reindex(columns=uni_labels[col_ind])\n sns.heatmap(int_df, square=True, ax=axs[1], cbar=False, cmap=\"RdBu_r\", center=0)\n\n return train_pairedness, row_ind, col_ind\n\n\ndef compute_pairedness_bipartite(left_labels, right_labels):\n left_uni_labels, left_inv = np.unique(left_labels, return_inverse=True)\n right_uni_labels, right_inv = np.unique(right_labels, return_inverse=True)\n train_int_mat = np.zeros((len(left_uni_labels), len(right_uni_labels)))\n for i, ul in enumerate(left_uni_labels):\n c1_mask = left_inv == i\n for j, ul in enumerate(right_uni_labels):\n c2_mask = right_inv == j\n # number of times a thing in cluster 1 has a pair also in cluster 2\n pairs_in_other = np.logical_and(c1_mask, c2_mask).sum()\n train_int_mat[i, j] = pairs_in_other\n\n row_ind, col_ind = linear_sum_assignment(train_int_mat, maximize=True)\n train_pairedness = np.trace(train_int_mat[np.ix_(row_ind, col_ind)]) / np.sum(\n train_int_mat\n ) # TODO double check that this is right\n return train_pairedness, row_ind, col_ind\n\n\ndef fit_and_score(X_train, X_test, k, **kws):\n gc = GaussianCluster(min_components=k, max_components=k, **kws)\n gc.fit(X_train)\n model = gc.model_\n train_bic = model.bic(X_train)\n train_lik = model.score(X_train)\n test_bic = model.bic(X_test)\n test_lik = model.score(X_test)\n bic = model.bic(np.concatenate((X_train, X_test), axis=0))\n res = {\n \"train_bic\": -train_bic,\n \"train_lik\": train_lik,\n \"test_bic\": -test_bic,\n \"test_lik\": test_lik,\n \"bic\": -bic,\n \"lik\": train_lik + test_lik,\n \"k\": k,\n \"model\": gc.model_,\n }\n return res, model\n\n\ndef crossval_cluster(\n embed,\n left_inds,\n right_inds,\n min_clusters=2,\n max_clusters=15,\n n_init=25,\n left_pair_inds=None,\n right_pair_inds=None,\n):\n left_embed = embed[left_inds]\n right_embed = embed[right_inds]\n print(\"Running left/right clustering with cross-validation\\n\")\n currtime = time.time()\n rows = []\n for k in tqdm(range(min_clusters, max_clusters)):\n # TODO add option for AutoGMM as well, might as well check\n for i in range(n_init):\n left_row, left_gc = fit_and_score(left_embed, right_embed, k)\n left_row[\"train\"] = \"left\"\n right_row, right_gc = fit_and_score(right_embed, left_embed, k)\n right_row[\"train\"] = \"right\"\n\n # pairedness computation, if available\n if left_pair_inds is not None and right_pair_inds is not None:\n # TODO double check this is right\n pred_left = left_gc.predict(embed[left_pair_inds])\n pred_right = right_gc.predict(embed[right_pair_inds])\n pness, _, _ = compute_pairedness_bipartite(pred_left, pred_right)\n left_row[\"pairedness\"] = pness\n right_row[\"pairedness\"] = pness\n\n ari = adjusted_rand_score(pred_left, pred_right)\n left_row[\"ARI\"] = ari\n right_row[\"ARI\"] = ari\n\n rows.append(left_row)\n rows.append(right_row)\n\n results = pd.DataFrame(rows)\n print(f\"{time.time() - currtime} elapsed\")\n return results\n\n\ndef plot_crossval_cluster(results):\n fig, axs = plt.subplots(3, 1, figsize=(10, 10), sharex=True)\n\n ax = axs[0]\n sns.lineplot(data=results, x=\"k\", y=\"test_lik\", hue=\"train\", ax=ax, legend=False)\n ax.lines[0].set_linestyle(\"--\")\n ax.lines[1].set_linestyle(\"--\")\n sns.lineplot(data=results, x=\"k\", y=\"train_lik\", hue=\"train\", ax=ax, legend=False)\n ax.set_ylabel(\"Log likelihood\")\n ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(nbins=3, min_n_ticks=3))\n\n ax = axs[1]\n sns.lineplot(data=results, x=\"k\", y=\"test_bic\", hue=\"train\", ax=ax, legend=\"full\")\n ax.lines[0].set_linestyle(\"--\")\n ax.lines[1].set_linestyle(\"--\")\n sns.lineplot(data=results, x=\"k\", y=\"train_bic\", hue=\"train\", ax=ax, legend=\"full\")\n ax.set_ylabel(\"-BIC\")\n ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(nbins=3, min_n_ticks=3))\n\n leg = ax.legend()\n leg.set_title(\"Train side\")\n leg.texts[0].set_text(\"Test contra\")\n leg.set_bbox_to_anchor((1, 1.8))\n lines = leg.get_lines()\n lines[0].set_linestyle(\"--\")\n lines[1].set_linestyle(\"--\")\n lines[2].set_linestyle(\"--\")\n leg.texts[3].set_text(\"Test ipsi\")\n\n ax = axs[2]\n sns.lineplot(\n data=results,\n x=\"k\",\n y=\"pairedness\",\n ax=ax,\n legend=\"full\",\n color=\"purple\",\n label=\"Pairedness\",\n )\n sns.lineplot(\n data=results, x=\"k\", y=\"ARI\", ax=ax, legend=\"full\", color=\"green\", label=\"ARI\"\n )\n ax.set_ylabel(\"Pair score\")\n leg = ax.legend().remove()\n ax.legend(bbox_to_anchor=(1, 1), loc=\"upper left\")\n # leg.loc = 2\n # leg.set_bbox_to_anchor((1, 1))\n\n # ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(nbins=3, min_n_ticks=3))\n # trans = transforms.blended_transform_factory(ax.transAxes, ax.transAxes)\n # ax.text(0.8, 0.8, \"Pairedness\", color=\"purple\", transform=trans)\n # ax.text(0.8, 0.6, \"ARI\", color=\"green\", transform=trans)\n return fig, axs\n\n\ndef make_ellipses(gmm, ax, i, j, colors, alpha=0.5, equal=False, **kws):\n inds = [j, i]\n for n, color in enumerate(colors):\n if gmm.covariance_type == \"full\":\n covariances = gmm.covariances_[n][np.ix_(inds, inds)]\n elif gmm.covariance_type == \"tied\":\n covariances = gmm.covariances_[np.ix_(inds, inds)]\n elif gmm.covariance_type == \"diag\":\n covariances = np.diag(gmm.covariances_[n][inds])\n elif gmm.covariance_type == \"spherical\":\n covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]\n v, w = np.linalg.eigh(covariances)\n u = w[0] / np.linalg.norm(w[0])\n angle = np.arctan2(u[1], u[0])\n angle = 180 * angle / np.pi # convert to degrees\n v = 2.0 * np.sqrt(2.0) * np.sqrt(v)\n ell = mpl.patches.Ellipse(\n gmm.means_[n, inds], v[0], v[1], 180 + angle, color=color, **kws\n )\n ell.set_clip_box(ax.bbox)\n ell.set_alpha(alpha)\n ax.add_artist(ell)\n if equal:\n ax.set_aspect(\"equal\", \"datalim\")\n\n\ndef plot_cluster_pairs(\n X, left_inds, right_inds, left_model, right_model, labels, colors=None, equal=True\n):\n k = left_model.n_components\n n_dims = X.shape[1]\n\n if colors is None:\n colors = sns.color_palette(\"tab10\", n_colors=k, desat=0.7)\n\n fig, axs = plt.subplots(\n n_dims, n_dims, sharex=False, sharey=False, figsize=(20, 20)\n )\n data = pd.DataFrame(data=X)\n data[\"label\"] = labels #\n pred = composite_predict(\n X, left_inds, right_inds, left_model, right_model, relabel=False\n )\n data[\"pred\"] = pred\n\n for i in range(n_dims):\n for j in range(n_dims):\n ax = axs[i, j]\n ax.axis(\"off\")\n if i < j:\n sns.scatterplot(\n data=data,\n x=j,\n y=i,\n ax=ax,\n alpha=0.5,\n linewidth=0,\n s=5,\n legend=False,\n hue=\"label\",\n palette=CLASS_COLOR_DICT,\n )\n make_ellipses(left_model, ax, i, j, colors, fill=False, equal=equal)\n if i > j:\n sns.scatterplot(\n data=data,\n x=j,\n y=i,\n ax=ax,\n alpha=0.7,\n linewidth=0,\n s=5,\n legend=False,\n hue=\"pred\",\n palette=colors,\n )\n make_ellipses(left_model, ax, i, j, colors, fill=True, equal=equal)\n\n plt.tight_layout()\n return fig, axs\n\n\ndef composite_predict(X, left_inds, right_inds, left_model, right_model, relabel=False):\n # TODO add option to boost the right numbers\n X_left = X[left_inds]\n X_right = X[right_inds]\n pred_left = left_model.predict(X_left)\n pred_right = right_model.predict(X_right)\n if relabel:\n leftify = np.vectorize(lambda x: str(x) + \"L\")\n rightify = np.vectorize(lambda x: str(x) + \"R\")\n pred_left = leftify(pred_left)\n pred_right = rightify(pred_right)\n dtype = pred_left.dtype\n pred = np.empty(len(X), dtype=dtype)\n pred[left_inds] = pred_left\n pred[right_inds] = pred_right\n return pred\n\n\ndef reindex_model(gmm, perm_inds):\n gmm.weights_ = gmm.weights_[perm_inds]\n gmm.means_ = gmm.means_[perm_inds]\n if gmm.covariance_type != \"tied\":\n gmm.covariances_ = gmm.covariances_[perm_inds]\n gmm.precisions_ = gmm.precisions_[perm_inds]\n gmm.precisions_cholesky_ = gmm.precisions_cholesky_[perm_inds]\n return gmm\n\n\ndef plot_metrics(results, plot_all=True):\n plot_results = results.copy()\n plot_results[\"k\"] += np.random.normal(size=len(plot_results), scale=0.1)\n\n fig, axs = plt.subplots(3, 3, figsize=(20, 10), sharex=True)\n\n def miniplotter(var, ax):\n if plot_all:\n sns.scatterplot(\n data=plot_results,\n x=\"k\",\n y=var,\n hue=\"train\",\n ax=ax,\n s=8,\n linewidth=0,\n alpha=0.5,\n )\n best_inds = results.groupby([\"k\"])[var].idxmax()\n best_results = results.loc[best_inds].copy()\n sns.lineplot(\n data=best_results, x=\"k\", y=var, ax=ax, color=\"purple\", label=\"max\"\n )\n mean_results = results.groupby([\"k\"]).mean()\n mean_results.reset_index(inplace=True)\n sns.lineplot(\n data=mean_results, x=\"k\", y=var, ax=ax, color=\"green\", label=\"mean\"\n )\n ax.get_legend().remove()\n\n plot_vars = [\n \"train_lik\",\n \"test_lik\",\n \"lik\",\n \"train_bic\",\n \"test_bic\",\n \"bic\",\n \"ARI\",\n \"pairedness\",\n ]\n axs = axs.T.ravel()\n\n for pv, ax in zip(plot_vars, axs):\n miniplotter(pv, ax)\n\n axs[2].xaxis.set_major_locator(mpl.ticker.MultipleLocator(2))\n axs[-2].tick_params(labelbottom=True)\n axs[-2].set_xlabel(\"k\")\n\n handles, labels = axs[-2].get_legend_handles_labels()\n axs[-1].legend(handles, labels, loc=\"upper left\")\n axs[-1].axis(\"off\")\n\n return fig, axs\n\n\n# %% [markdown]\n# ## Load data\n# In this case we are working with `G`, the directed graph formed by summing the edge\n# weights of the 4 different graph types. Preprocessing here includes removing\n# partially differentiated cells, and cutting out the lowest 5th percentile of nodes in\n# terms of their number of incident synapses. 5th percentile ~= 12 synapses. After this,\n# the largest connected component is used.\n\nmg = load_metagraph(\"G\", version=\"2020-04-01\")\nmg = preprocess(\n mg,\n threshold=0,\n sym_threshold=False,\n remove_pdiff=True,\n binarize=False,\n weight=\"weight\",\n)\nmeta = mg.meta\n\n# plot where we are cutting out nodes based on degree\ndegrees = mg.calculate_degrees()\nfig, ax = plt.subplots(1, 1, figsize=(5, 2.5))\nsns.distplot(np.log10(degrees[\"Total edgesum\"]), ax=ax)\nq = np.quantile(degrees[\"Total edgesum\"], 0.05)\nax.axvline(np.log10(q), linestyle=\"--\", color=\"r\")\nax.set_xlabel(\"log10(total synapses)\")\n\n# remove low degree neurons\nidx = meta[degrees[\"Total edgesum\"] > q].index\nmg = mg.reindex(idx, use_ids=True)\n\n# remove center neurons # FIXME\nidx = mg.meta[mg.meta[\"hemisphere\"].isin([\"L\", \"R\"])].index\nmg = mg.reindex(idx, use_ids=True)\n\nmg = mg.make_lcc()\nmg.calculate_degrees(inplace=True)\nmeta = mg.meta\n\nadj = mg.adj\nadj = pass_to_ranks(adj)\nmeta[\"inds\"] = range(len(meta))\n\nleft_inds = meta[meta[\"left\"]][\"inds\"]\nright_inds = meta[meta[\"right\"]][\"inds\"]\nlp_inds, rp_inds = get_paired_inds(meta)\n\n\n# %% [markdown]\n# ## Embed\n# Here the embedding is ASE, with PTR and DiagAug, the number of embedding dimensions\n# is for now set to ZG2 (4 + 4). Using the known pairs as \"seeds\", the left embedding\n# is matched to the right using procrustes.\nase = AdjacencySpectralEmbed(n_components=None, n_elbows=2)\nembed = ase.fit_transform(adj)\nn_components = embed[0].shape[1] # use all of ZG2\nX = np.concatenate((embed[0][:, :n_components], embed[1][:, :n_components]), axis=-1)\nR, _ = orthogonal_procrustes(X[lp_inds], X[rp_inds])\n\nif CLUSTER_SPLIT == \"best\":\n X[left_inds] = X[left_inds] @ R\n\n# %% [markdown]\n# ## Clustering\n# Clustering is performed using Gaussian mixture modeling. At each candidate value of k,\n# 50 models are trained on the left embedding, 50 models are trained on the right\n# embedding (choosing the best covariance structure based on BIC on the train set).\nresults = crossval_cluster(\n X,\n left_inds,\n right_inds,\n left_pair_inds=lp_inds,\n right_pair_inds=rp_inds,\n max_clusters=15,\n n_init=50,\n)\n# best_inds = results.groupby([\"k\", \"train\"])[\"test_bic\"].idxmax()\n# best_results = results.loc[best_inds].copy()\n# plot_crossval_cluster(best_results)\n# stashfig(f\"cross-val-n_components={n_components}\")\n\n# %% [markdown]\n# ## Evaluating Clustering\n# Of the 100 models we fit as described above, we now evaluate them on a variety of\n# metrics:\n# - likelihood of the data the model was trained on (\"train_lik\")\n# - likelihood of the held out (other hemisphere) data (\"test_lik\")\n# - likelihood of all of the data (\"lik\", = \"train_lik\" + \"test_lik\")\n# - BIC using the data the model was trained on (\"train_bic\")\n# - BIC using the held out (other hemisphere) data (\"test_bic\")\n# - BIC using all of the data (\"bic\")\n# - ARI for pairs. Given the prediction of the model on the left data and the right\n# data, using known pairs to define a correspondence between (some) nodes, what is\n# the ARI(left_prediction, right_prediciton) for the given model\n# - Pairedness, like the above but simply the raw fraction of pairs that end up in\n# corresponding L/R clusters. Very related to ARI but not normalized.\n\nplot_metrics(results)\nstashfig(f\"cluster-metrics-n_components={n_components}\")\n\n\n# %% [markdown]\n# ## Choose a model\n# A few things are clear from the above. One is that the likelihood on the train set\n# continues to go up as `k` increases, but plateaus and then drops on the test set around\n# k = 6 - 8. This is even slightly more clear when looking at the BIC plots, where the\n# only difference is the added penalty for complexity. Based on this, I would say that\n# the best k at this scale is around 6-8; however, we still need to pick a single metric\n# to give us the *best* model to proceed. I'm not sure whether it makes more sense to use\n# likelihood or bic here, or, to use performance on the test set or performance on all\n# of the data. Here we will proceed with k=7, and choose the model with the best BIC on\n# all of the data.\n\nk = 6\nmetric = \"bic\"\nbasename = f\"-metric={metric}-k={k}-n_components={n_components}\"\nbasetitle = f\"Metric={metric}, k={k}, n_components={n_components}\"\n\nind = results[results[\"k\"] == k][metric].idxmax()\n\nprint(f\"Choosing model at k={k} based on best {metric}.\\n\")\nprint(f\"ARI: {results.loc[ind, 'ARI']}\")\nprint(f\"Pairedness: {results.loc[ind, 'pairedness']}\\n\")\n\nmodel = results.loc[ind, \"model\"]\nleft_model = model\nright_model = model\n\npred = composite_predict(\n X, left_inds, right_inds, left_model, right_model, relabel=False\n)\npred_side = composite_predict(\n X, left_inds, right_inds, left_model, right_model, relabel=True\n)\n\nax = stacked_barplot(\n pred_side, meta[\"merge_class\"].values, color_dict=CLASS_COLOR_DICT, legend_ncol=6\n)\nax.set_title(basetitle)\nstashfig(f\"barplot\" + basename)\n\n\nfig, ax = plot_cluster_pairs(\n X, left_inds, right_inds, left_model, right_model, meta[\"merge_class\"].values\n)\nfig.suptitle(basetitle, y=1)\n\nstashfig(f\"pairs\" + basename)\n\n\nsf = signal_flow(adj)\nmeta[\"signal_flow\"] = -sf\nmeta[\"pred\"] = pred\nmeta[\"pred_side\"] = pred_side\nmeta[\"group_signal_flow\"] = meta[\"pred\"].map(meta.groupby(\"pred\")[\"signal_flow\"].mean())\nfig, ax = plt.subplots(1, 1, figsize=(20, 20))\nadjplot(\n adj,\n ax=ax,\n meta=meta,\n sort_class=\"pred_side\",\n class_order=\"group_signal_flow\",\n colors=\"merge_class\",\n palette=CLASS_COLOR_DICT,\n item_order=[\"merge_class\", \"signal_flow\"],\n plot_type=\"scattermap\",\n sizes=(0.5, 1),\n)\nfig.suptitle(basetitle, y=0.94)\nstashfig(f\"adj-sf\" + basename)\n\nmeta[\"te\"] = -meta[\"Total edgesum\"]\nfig, ax = plt.subplots(1, 1, figsize=(20, 20))\nadjplot(\n adj,\n ax=ax,\n meta=meta,\n sort_class=\"pred_side\",\n class_order=\"group_signal_flow\",\n colors=\"merge_class\",\n palette=CLASS_COLOR_DICT,\n item_order=[\"merge_class\", \"te\"],\n plot_type=\"scattermap\",\n sizes=(0.5, 1),\n)\nfig.suptitle(basetitle, y=0.94)\nstashfig(f\"adj-te\" + basename)\n\nmeta[\"rand\"] = np.random.uniform(size=len(meta))\nfig, ax = plt.subplots(1, 1, figsize=(20, 20))\nadjplot(\n adj,\n ax=ax,\n meta=meta,\n sort_class=\"pred_side\",\n class_order=\"group_signal_flow\",\n colors=\"merge_class\",\n palette=CLASS_COLOR_DICT,\n item_order=\"rand\",\n plot_type=\"scattermap\",\n sizes=(0.5, 1),\n)\nfig.suptitle(basetitle, y=0.94)\nstashfig(f\"adj-rand\" + basename)\n\n# %% [markdown]\n# ## SUBCLUSTER\n\nnp.random.seed(8888)\n\nuni_labels, inv = np.unique(pred, return_inverse=True)\nall_sub_results = []\nsub_data = []\n\nreembed = False\n\nfor label in uni_labels:\n print(label)\n print()\n label_mask = pred == label\n sub_meta = meta[label_mask].copy()\n sub_meta[\"inds\"] = range(len(sub_meta))\n sub_left_inds = sub_meta[sub_meta[\"left\"]][\"inds\"].values\n sub_right_inds = sub_meta[sub_meta[\"right\"]][\"inds\"].values\n sub_lp_inds, sub_rp_inds = get_paired_inds(sub_meta)\n sub_adj = adj[np.ix_(label_mask, label_mask)]\n\n if reembed:\n ase = AdjacencySpectralEmbed()\n # TODO look into PTR at this level as well\n sub_embed = ase.fit_transform(sub_adj)\n sub_X = np.concatenate(sub_embed, axis=1)\n sub_R, _ = orthogonal_procrustes(sub_X[sub_lp_inds], sub_X[sub_rp_inds])\n sub_X[sub_left_inds] = sub_X[sub_left_inds] @ sub_R\n else:\n sub_X = X[label_mask].copy()\n sub_R = R\n\n var_dict = {\n \"meta\": sub_meta,\n \"left_inds\": sub_left_inds,\n \"right_inds\": sub_right_inds,\n \"left_pair_inds\": sub_lp_inds,\n \"right_pair_inds\": sub_rp_inds,\n \"X\": sub_X,\n \"adj\": sub_adj,\n }\n\n sub_data.append(var_dict)\n\n sub_results = crossval_cluster(\n sub_X,\n sub_left_inds,\n sub_right_inds,\n left_pair_inds=sub_lp_inds,\n right_pair_inds=sub_rp_inds,\n max_clusters=10,\n min_clusters=1,\n n_init=50,\n )\n\n fig, axs = plot_metrics(sub_results, plot_all=False)\n fig.suptitle(f\"Subclustering for cluster {label}, reembed={reembed}\")\n stashfig(f\"sub-cluster-profile-label={label}-reembed={reembed}\")\n plt.close()\n all_sub_results.append(sub_results)\n\n# %% [markdown]\n# ##\n# sub_ks = [(2, 4), (0,), (3, 4), (3,), (2, 3), (0,), (4,)]\n# sub_kws = [(4,), (0,), (4,), (3, 4), (2, 3), (3,), (3, 4, 5)]\nif not reembed:\n sub_ks = [(4,), (4,), (3,), (2, 3, 4), (0,), (3,)]\nelse:\n pass\n\n\nfor i, label in enumerate(uni_labels):\n ks = sub_ks[i]\n sub_results = all_sub_results[i]\n sub_X = sub_data[i][\"X\"]\n sub_left_inds = sub_data[i][\"left_inds\"]\n sub_right_inds = sub_data[i][\"right_inds\"]\n sub_lp_inds = sub_data[i][\"left_pair_inds\"]\n sub_rp_inds = sub_data[i][\"right_pair_inds\"]\n sub_meta = sub_data[i][\"meta\"]\n\n fig, axs = plot_metrics(sub_results)\n fig.suptitle(f\"Subclustering for cluster {label}, reembed={reembed}\")\n for ax in axs[:-1]:\n for k in ks:\n ax.axvline(k, linestyle=\"--\", color=\"red\", linewidth=2)\n stashfig(f\"sub-cluster-metrics-label={label}-reembed={reembed}\" + basename)\n plt.close()\n\n for k in ks:\n if k != 0:\n sub_basename = f\"-label={label}-subk={k}-reembed={reembed}\" + basename\n sub_basetitle = f\"Subcluster for {label}, subk={k}, reembed={reembed},\"\n sub_basetitle += f\" metric={metric}, k={k}, n_components={n_components}\"\n\n ind = sub_results[sub_results[\"k\"] == k][metric].idxmax()\n sub_model = sub_results.loc[ind, \"model\"]\n sub_left_model = sub_model\n sub_right_model = sub_model\n\n sub_pred_side = composite_predict(\n sub_X,\n sub_left_inds,\n sub_right_inds,\n sub_left_model,\n sub_right_model,\n relabel=True,\n )\n\n ax = stacked_barplot(\n sub_pred_side,\n sub_meta[\"merge_class\"].values,\n color_dict=CLASS_COLOR_DICT,\n legend_ncol=6,\n )\n ax.set_title(sub_basetitle)\n stashfig(f\"barplot\" + sub_basename)\n plt.close()\n\n fig, ax = plot_cluster_pairs(\n sub_X,\n sub_left_inds,\n sub_right_inds,\n sub_left_model,\n sub_right_model,\n sub_meta[\"merge_class\"].values,\n )\n fig.suptitle(sub_basetitle, y=1)\n stashfig(f\"pairs\" + sub_basename)\n plt.close()\n\n sub_adj = sub_data[i][\"adj\"]\n sub_meta[\"sub_pred_side\"] = sub_pred_side\n\n sub_pred_var = f\"c{label}_sub_pred_side\"\n meta[sub_pred_var] = \"\"\n meta.loc[\n pred == label, sub_pred_var\n ] = sub_pred_side # TODO indexing is dangerous here\n meta[f\"c{label}_sub_pred\"] = \"\"\n meta.loc[pred == label, f\"c{label}_sub_pred\"] = composite_predict(\n sub_X,\n sub_left_inds,\n sub_right_inds,\n sub_left_model,\n sub_right_model,\n relabel=False,\n )\n meta[f\"is_c{label}\"] = pred == label\n fig, ax = plt.subplots(1, 1, figsize=(20, 20))\n adjplot(\n adj,\n ax=ax,\n meta=meta,\n sort_class=[\"pred_side\", sub_pred_var],\n class_order=\"group_signal_flow\",\n colors=\"merge_class\",\n palette=CLASS_COLOR_DICT,\n item_order=[\"merge_class\", \"signal_flow\"],\n highlight=f\"is_c{label}\",\n highlight_kws=dict(color=\"red\", linestyle=\"-\", linewidth=1),\n plot_type=\"scattermap\",\n sizes=(0.5, 1),\n )\n fig.suptitle(sub_basetitle, y=0.94)\n stashfig(\"full-adj\" + sub_basename)\n plt.close()\n# %% [markdown]\n# ##\n\ncols = meta.columns\nsub_pred_side_cols = []\nsub_pred_cols = []\nfor c in cols:\n if \"_sub_pred\" in c:\n if \"_side\" in c:\n sub_pred_side_cols.append(c)\n else:\n sub_pred_cols.append(c)\n\nmeta[\"total_pred\"] = \"\"\nmeta[\"total_pred\"] = meta[\"pred\"].astype(str) + \"-\"\nmeta[\"total_pred_side\"] = \"\"\nmeta[\"total_pred_side\"] = meta[\"pred_side\"].astype(str) + \"-\"\nmeta[\"sub_pred\"] = \"\"\nmeta[\"sub_pred_side\"] = \"\"\n\nfor c in sub_pred_cols:\n meta[\"total_pred\"] += meta[c].astype(str)\n meta[\"sub_pred\"] += meta[c].astype(str)\n\nfor c in sub_pred_side_cols:\n meta[\"sub_pred_side\"] += meta[c].astype(str)\n meta[\"total_pred_side\"] += meta[c].astype(str)\n\n# %% [markdown]\n# ##\nmeta[\"lvl2_signal_flow\"] = meta[\"total_pred\"].map(\n meta.groupby(\"total_pred\")[\"signal_flow\"].mean()\n)\n\nfig, ax = plt.subplots(1, 1, figsize=(20, 20))\nadjplot(\n adj,\n ax=ax,\n meta=meta,\n sort_class=[\"hemisphere\", \"pred\", \"sub_pred\"],\n class_order=\"lvl2_signal_flow\",\n colors=\"merge_class\",\n palette=CLASS_COLOR_DICT,\n item_order=[\"merge_class\", \"signal_flow\"],\n plot_type=\"scattermap\",\n sizes=(0.5, 1),\n)\nfig.suptitle(f\"2-level hierarchy clustering, reembed={reembed}\" + basetitle, y=0.94)\nstashfig(\"lvl2-full-adj\" + sub_basename)\n\nfig, ax = plt.subplots(1, 1, figsize=(20, 20))\nadjplot(\n adj,\n ax=ax,\n meta=meta,\n sort_class=[\"hemisphere\", \"pred\", \"sub_pred\"],\n class_order=\"lvl2_signal_flow\",\n colors=\"merge_class\",\n palette=CLASS_COLOR_DICT,\n item_order=[\"rand\"],\n plot_type=\"scattermap\",\n sizes=(0.5, 1),\n)\nfig.suptitle(f\"2-level hierarchy clustering, reembed={reembed}\" + basetitle, y=0.94)\nstashfig(\"lvl2-full-adj-rand\" + sub_basename)\n\n# %% [markdown]\n# ##\nfig, ax = plt.subplots(1, 1, figsize=(15, 20))\nax = stacked_barplot(\n meta[\"total_pred_side\"].values,\n meta[\"merge_class\"].values,\n color_dict=CLASS_COLOR_DICT,\n legend_ncol=6,\n ax=ax,\n norm_bar_width=False,\n)\n\nstashfig(\"lvl2-barplot\" + sub_basename)\n\n# %% [markdown]\n# ##\nimport pymaid\nfrom src.pymaid import start_instance\n\n\nstart_instance()\n\nfor tp in meta[\"total_pred\"].unique()[:10]:\n ids = list(meta[meta[\"total_pred\"] == tp].index.values)\n ids = [int(i) for i in ids]\n fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n skeleton_color_dict = dict(\n zip(meta.index, np.vectorize(CLASS_COLOR_DICT.get)(meta[\"merge_class\"]))\n )\n pymaid.plot2d(ids, color=skeleton_color_dict, ax=ax)\n ax.axis(\"equal\")\n stashfig(f\"test-plot2d-{tp}\")\n\n# %% [markdown]\n# ##\n\n\n# %%\n",
"# %% [markdown]\n# # Imports\nimport os\nimport random\nfrom operator import itemgetter\nfrom pathlib import Path\n\nimport colorcet as cc\nimport matplotlib.colors as mplc\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom graspy.cluster import AutoGMMCluster, GaussianCluster\nfrom graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed\nfrom graspy.plot import gridplot, heatmap, pairplot\nfrom graspy.utils import symmetrize\nfrom src.data import load_metagraph\nfrom src.embed import ase, lse, preprocess_graph\nfrom src.graph import MetaGraph\nfrom src.io import savefig, saveobj, saveskels\nfrom src.visualization import (\n bartreeplot,\n get_color_dict,\n get_colors,\n remove_spines,\n sankey,\n screeplot,\n)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\nSAVESKELS = True\nSAVEFIGS = True\nBRAIN_VERSION = \"2020-01-21\"\n\nsns.set_context(\"talk\")\n\nbase_path = Path(\"maggot_models/data/raw/Maggot-Brain-Connectome/\")\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=SAVEFIGS, **kws)\n\n\ndef stashskel(name, ids, labels, colors=None, palette=None, **kws):\n saveskels(\n name,\n ids,\n labels,\n colors=colors,\n palette=None,\n foldername=FNAME,\n save_on=SAVESKELS,\n **kws,\n )\n\n\ndef threshold_sweep(edgelist_df, max_pair_edgelist_df, start=0, stop=0.3, steps=20):\n threshs = np.linspace(start, stop, steps)\n rows = []\n for threshold in threshs:\n thresh_df = max_pair_edge_df[max_pair_edge_df[\"weight\"] > threshold]\n p_sym = len(thresh_df[thresh_df[\"edge pair counts\"] == 2]) / len(thresh_df)\n p_edges_left = (\n thresh_df[\"edge pair counts\"].sum()\n / max_pair_edge_df[\"edge pair counts\"].sum()\n )\n temp_df = edgelist_df[edgelist_df[\"max_weight\"] > threshold]\n p_syns_left = temp_df[\"weight\"].sum() / edgelist_df[\"weight\"].sum()\n row = {\n \"threshold\": threshold,\n \"Prop. paired edges symmetric\": p_sym,\n \"Prop. edges left\": p_edges_left,\n \"Prop. synapses left\": p_syns_left,\n }\n rows.append(row)\n return pd.DataFrame(rows)\n\n\n# %% [markdown]\n# # throw out all edges to or from any cell with...\n# # threshold curves for cells w > 100 dendritic inputs\n# # threshold curves for cells w > 50 dendritic inputs\n# # do the thresholding based on percent input\n\nbase_path = Path(\n \"maggot_models/data/raw/Maggot-Brain-Connectome/4-color-matrices_Brain/\"\n)\nsub_path = Path(\"2020-01-21/input_counts.csv\")\ninput_path = base_path / sub_path\ninput_df = pd.read_csv(input_path)\ninput_df = input_df.set_index(\"skeleton_id\")\ninput_thresh = 100\nremove_inds = input_df[input_df[\" dendrite_inputs\"] < input_thresh].index\n\ngraph_type = \"Gadn\"\nmg = load_metagraph(graph_type, version=BRAIN_VERSION)\ng = mg.g\nmeta = mg.meta\nremove_pdiff = True\nif remove_pdiff:\n keep_inds = np.where(~mg[\"is_pdiff\"])[0]\n mg = mg.reindex(keep_inds)\nedgelist_df = mg.to_edgelist(remove_unpaired=True)\nedgelist_df[\"source\"] = edgelist_df[\"source\"].astype(\"int64\")\nedgelist_df[\"target\"] = edgelist_df[\"target\"].astype(\"int64\")\n\nn_paired_edges = len(edgelist_df)\n# get rid of edges where the target is a low dendritic input node\nedgelist_df = edgelist_df[~edgelist_df[\"target\"].isin(remove_inds)]\nn_left_edges = len(edgelist_df)\n\nmax_pair_edge_df = edgelist_df.groupby(\"edge pair ID\", sort=False).max()\nedge_max_weight_map = dict(zip(max_pair_edge_df.index.values, max_pair_edge_df.values))\nedgelist_df[\"max_weight\"] = itemgetter(*edgelist_df[\"edge pair ID\"])(\n edge_max_weight_map\n)\n\nthresh_result_df = threshold_sweep(edgelist_df, max_pair_edge_df)\n\nfig, ax = plt.subplots(1, 1, figsize=(10, 6))\nsns.lineplot(\n data=thresh_result_df, x=\"threshold\", y=\"Prop. paired edges symmetric\", ax=ax\n)\nremove_spines(ax)\nax_right = plt.twinx(ax)\nsns.lineplot(\n data=thresh_result_df,\n x=\"threshold\",\n y=\"Prop. edges left\",\n ax=ax_right,\n color=\"orange\",\n label=\"Edges\",\n)\nremove_spines(ax_right)\nsns.lineplot(\n data=thresh_result_df,\n x=\"threshold\",\n y=\"Prop. synapses left\",\n ax=ax_right,\n color=\"green\",\n label=\"Synapses\",\n)\nax_right.set_ylabel(\"Prop. left\")\nax.set_title(\n f\"Min dendridic input = {input_thresh} (removed {n_paired_edges - n_left_edges} edges)\"\n)\npad = 0.02\nax.set_ylim((0 - pad, 1 + pad))\nax_right.set_ylim((0 - pad, 1 + pad))\nplt.legend(bbox_to_anchor=(1.08, 1), loc=2, borderaxespad=0.0)\nstashfig(f\"min-dend-{input_thresh}-threshold-sweep-{graph_type}\")\n# %% [markdown]\n# # get number of inputs to kenyon cells\n# # just list the number of connections onto each kenyon cell, by claw number\nplt.style.use(\"seaborn-whitegrid\")\nsns.set_context(\"talk\")\ngraph_type = \"Gad\"\nmg = load_metagraph(graph_type, version=BRAIN_VERSION)\nedgelist_df = mg.to_edgelist()\nadj = mg.adj\nclass_labels = mg.meta[\"Class 1\"].fillna(\"\")\nsubclass_labels = mg.meta[\"Class 2\"].fillna(\"\")\nkc_inds = np.where(class_labels == \"KC\")[0]\nfor i in range(1, 7):\n name = f\"{i}claw\"\n sub_edgelist_df = edgelist_df[edgelist_df[\"target Class 2\"] == name]\n ids = sub_edgelist_df[\"target\"].unique()\n # fig, ax = plt.subplots(1, 1, figsize=(15, 5))\n fig = plt.figure(figsize=(20, 7))\n ax = plt.subplot2grid((1, 5), (0, 0), colspan=4)\n ax2 = plt.subplot2grid((1, 5), (0, 4), colspan=1)\n sns.stripplot(data=sub_edgelist_df, y=\"weight\", x=\"target\", ax=ax, order=ids)\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n ax.set_title(name + \" input weights\")\n # remove_spines(ax, keep_corner=True)\n mins = []\n ticks = ax.get_xticks()\n color = sns.color_palette(\"deep\", desat=1, n_colors=2)[1]\n for j, cell_id in enumerate(ids):\n cell_df = sub_edgelist_df[sub_edgelist_df[\"target\"] == cell_id]\n cell_df = cell_df.sort_values(\"weight\", ascending=False)\n cell_df = cell_df.iloc[:i, :]\n min_max_weight = cell_df[\"weight\"].min()\n ax.text(\n j,\n min_max_weight,\n min_max_weight,\n fontsize=\"small\",\n horizontalalignment=\"center\",\n verticalalignment=\"bottom\",\n )\n mins.append(min_max_weight)\n sns.violinplot(\n mins, ax=ax2, orient=\"v\", inner=\"quart\", color=color, alpha=0.8, saturation=1\n )\n median = np.median(mins)\n ax2.text(\n 0,\n median,\n f\"{median:.0f}\",\n horizontalalignment=\"center\",\n verticalalignment=\"center\",\n backgroundcolor=color,\n alpha=0.8,\n )\n # ax2.yaxis.set_major_locator(plt.NullLocator())\n ax2.set_ylim(ax.get_ylim())\n ax2.yaxis.set_ticks([])\n ax2.set_title(\"\")\n stashfig(name + \"-input-weights\")\n\nname = \"all KC\"\nkc_edgelist_df = edgelist_df[edgelist_df[\"target Class 1\"] == \"KC\"]\nfig, ax = plt.subplots(1, 1, figsize=(15, 5))\nsns.stripplot(\n data=kc_edgelist_df,\n y=\"weight\",\n x=\"target Class 2\",\n ax=ax,\n order=[f\"{i}claw\" for i in range(1, 7)],\n jitter=0.45,\n)\nfor tick in ax.get_xticklabels():\n tick.set_rotation(90)\nax.set_title(name + \" input weights\")\n# remove_spines(ax, keep_corner=True)\nstashfig(\"all-kc-input-weights\")\n\n# %% [markdown]\n# # plot the distribution of # of dendritic / axonic inputs\n\ngraph_type = \"Gad\"\nmg = load_metagraph(graph_type, version=BRAIN_VERSION)\nmeta = mg.meta\nmeta.loc[input_df.index, \"dendrite_input\"] = input_df[\" dendrite_inputs\"]\nmeta.loc[input_df.index, \"axon_input\"] = input_df[\" axon_inputs\"]\n\n\ndef filter(string):\n string = string.replace(\"akira\", \"\")\n string = string.replace(\"Lineage\", \"\")\n string = string.replace(\"*\", \"\")\n string = string.strip(\"_\")\n return string\n\n\nlineages = meta[\"lineage\"]\nlineages = np.vectorize(filter)(lineages)\nmeta[\"lineage\"] = lineages\n\n\nn_rows = 6\nfig, axs = plt.subplots(n_rows, 1, figsize=(15, 30), sharey=True)\nuni_lineages = np.unique(meta[\"lineage\"])\nn_lineages = len(uni_lineages)\nn_per_row = n_lineages // n_rows\nfor i in range(n_rows):\n ax = axs[i]\n temp_lineages = uni_lineages[i * n_per_row : (i + 1) * n_per_row]\n temp_df = meta[meta[\"lineage\"].isin(temp_lineages)]\n sns.stripplot(\n data=temp_df, x=\"lineage\", y=\"dendrite_input\", ax=ax, palette=\"deep\", jitter=0.4\n )\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n ax.set_xlabel(\"\")\n remove_spines(ax)\n ax.yaxis.set_major_locator(plt.MaxNLocator(3))\n ax.xaxis.set_tick_params(length=0)\nplt.tight_layout()\nstashfig(\"all-lineage-dendrite-input\")\n\n# %% [markdown]\n# # Plot this but by cell class\nn_rows = 3\nuni_lineages = np.unique(meta[\"Merge Class\"])\nn_lineages = len(uni_lineages)\nn_per_row = n_lineages // n_rows\nfig, axs = plt.subplots(n_rows + 1, 1, figsize=(15, 30), sharey=True)\nfor i in range(n_rows):\n ax = axs[i]\n temp_lineages = uni_lineages[i * n_per_row : (i + 1) * n_per_row]\n temp_df = meta[meta[\"Merge Class\"].isin(temp_lineages)]\n sns.stripplot(\n data=temp_df,\n x=\"Merge Class\",\n y=\"dendrite_input\",\n ax=ax,\n palette=\"deep\",\n jitter=0.4,\n )\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n ax.set_xlabel(\"\")\n remove_spines(ax)\n ax.yaxis.set_major_locator(plt.MaxNLocator(3))\n ax.xaxis.set_tick_params(length=0)\nxlim = ax.get_xlim()\nax = axs[-1]\ntemp_lineages = uni_lineages[(i + 1) * n_per_row :]\ntemp_df = meta[meta[\"Merge Class\"].isin(temp_lineages)]\nsns.stripplot(\n data=temp_df, x=\"Merge Class\", y=\"dendrite_input\", ax=ax, palette=\"deep\", jitter=0.4\n)\nfor tick in ax.get_xticklabels():\n tick.set_rotation(90)\nax.set_xlabel(\"\")\nremove_spines(ax)\nax.yaxis.set_major_locator(plt.MaxNLocator(3))\nax.xaxis.set_tick_params(length=0)\nax.set_xlim(xlim)\nplt.tight_layout()\nstashfig(\"all-merge-class-dendrite-input\")\n\n# %% [markdown]\n# # plot some kind of asymmetry score by lineage\n# # - proportion of edges onto a lineage which are asymmetric after thresholding\n# # - IOU score?\n# # - something else?\n\ngraph_type = \"Gadn\"\nmg = load_metagraph(graph_type, version=BRAIN_VERSION)\ng = mg.g\nmeta = mg.meta\n\nlineages = meta[\"lineage\"]\nlineages = np.vectorize(filter)(lineages)\nmeta[\"lineage\"] = lineages\n\nedgelist_df = mg.to_edgelist(remove_unpaired=True)\nedgelist_df[\"source\"] = edgelist_df[\"source\"].astype(\"int64\")\nedgelist_df[\"target\"] = edgelist_df[\"target\"].astype(\"int64\")\n\nn_paired_edges = len(edgelist_df)\n# get rid of edges where the target is a low dendritic input node\nedgelist_df = edgelist_df[~edgelist_df[\"target\"].isin(remove_inds)]\nn_left_edges = len(edgelist_df)\n\nmax_pair_edge_df = edgelist_df.groupby(\"edge pair ID\").max()\nedge_max_weight_map = dict(\n zip(max_pair_edge_df.index.values, max_pair_edge_df[\"weight\"])\n)\nedgelist_df[\"max_weight\"] = itemgetter(*edgelist_df[\"edge pair ID\"])(\n edge_max_weight_map\n)\n\nthreshold = 0.0\nthresh_df = max_pair_edge_df[max_pair_edge_df[\"weight\"] > threshold]\n\nsource_pair_ids = np.unique(max_pair_edge_df[\"source Pair ID\"])\ntarget_pair_ids = np.unique(max_pair_edge_df[\"target Pair ID\"])\npair_ids = np.union1d(source_pair_ids, target_pair_ids)\n\nrows = []\nfor pid in pair_ids:\n temp_df = thresh_df[\n (thresh_df[\"source Pair ID\"] == pid) | (thresh_df[\"target Pair ID\"] == pid)\n ]\n\n if len(temp_df) > 0:\n iou = len(temp_df[temp_df[\"edge pair counts\"] == 2]) / len(temp_df)\n else:\n iou = 0\n\n temp_meta = meta[meta[\"Pair ID\"] == pid]\n lineage = temp_meta[\"lineage\"].values[0]\n row = {\"IOU\": iou, \"lineage\": lineage}\n rows.append(row)\n\nlineage_iou_df = pd.DataFrame(rows)\n\nn_rows = 6\nfig, axs = plt.subplots(n_rows, 1, figsize=(15, 30), sharey=True)\nuni_lineages = np.unique(lineage_iou_df[\"lineage\"])\nn_lineages = len(uni_lineages)\nn_per_row = n_lineages // n_rows\nfor i in range(n_rows):\n ax = axs[i]\n temp_lineages = uni_lineages[i * n_per_row : (i + 1) * n_per_row]\n temp_df = lineage_iou_df[lineage_iou_df[\"lineage\"].isin(temp_lineages)]\n sns.stripplot(data=temp_df, x=\"lineage\", y=\"IOU\", ax=ax, palette=\"deep\", jitter=0.4)\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n ax.set_xlabel(\"\")\n remove_spines(ax)\n ax.yaxis.set_major_locator(plt.MaxNLocator(3))\n ax.xaxis.set_tick_params(length=0)\nplt.suptitle(f\"IOU after threshold = {threshold}\", y=1.02)\nplt.tight_layout()\nstashfig(f\"all-lineage-iou-{threshold}\")\n\n# %% [markdown]\n# # Do the same by cell class\n\nrows = []\nfor pid in pair_ids:\n temp_df = thresh_df[\n (thresh_df[\"source Pair ID\"] == pid) | (thresh_df[\"target Pair ID\"] == pid)\n ]\n\n if len(temp_df) > 0:\n iou = len(temp_df[temp_df[\"edge pair counts\"] == 2]) / len(temp_df)\n else:\n iou = 0\n\n temp_meta = meta[meta[\"Pair ID\"] == pid]\n lineage = temp_meta[\"Merge Class\"].values[0]\n row = {\"IOU\": iou, \"Merge Class\": lineage}\n rows.append(row)\n\nlineage_iou_df = pd.DataFrame(rows)\n\nn_rows = 3\nfig, axs = plt.subplots(n_rows, 1, figsize=(15, 30), sharey=True)\nuni_lineages = np.unique(lineage_iou_df[\"Merge Class\"])\nn_lineages = len(uni_lineages)\nn_per_row = n_lineages // n_rows\nfor i in range(n_rows):\n ax = axs[i]\n temp_lineages = uni_lineages[i * n_per_row : (i + 1) * n_per_row]\n temp_df = lineage_iou_df[lineage_iou_df[\"Merge Class\"].isin(temp_lineages)]\n sns.stripplot(\n data=temp_df, x=\"Merge Class\", y=\"IOU\", ax=ax, palette=\"deep\", jitter=0.4\n )\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n ax.set_xlabel(\"\")\n remove_spines(ax)\n ax.yaxis.set_major_locator(plt.MaxNLocator(3))\n ax.xaxis.set_tick_params(length=0)\nplt.suptitle(f\"IOU after threshold = {threshold}\", y=1.02)\nplt.tight_layout()\nstashfig(f\"all-class-iou-{threshold}\")\n",
"# %% [markdown]\n# # THE MIND OF A MAGGOT\n\n# %% [markdown]\n# ## Imports\nimport os\nimport time\nimport warnings\n\nimport colorcet as cc\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as transforms\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom anytree import LevelOrderGroupIter, NodeMixin\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.linalg import orthogonal_procrustes\nfrom scipy.optimize import linear_sum_assignment\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.utils.testing import ignore_warnings\nfrom tqdm import tqdm\n\nimport pymaid\nfrom graspy.cluster import GaussianCluster\nfrom graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed, selectSVD\nfrom graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator\nfrom graspy.plot import heatmap, pairplot\nfrom graspy.simulations import rdpg\nfrom graspy.utils import augment_diagonal, binarize, pass_to_ranks\nfrom src.cluster import (\n MaggotCluster,\n add_connections,\n compute_pairedness_bipartite,\n crossval_cluster,\n fit_and_score,\n get_paired_inds,\n make_ellipses,\n plot_cluster_pairs,\n plot_metrics,\n predict,\n)\nfrom src.data import load_metagraph\nfrom src.graph import MetaGraph, preprocess\nfrom src.hierarchy import signal_flow\nfrom src.io import savecsv, savefig\nfrom src.pymaid import start_instance\nfrom src.visualization import (\n CLASS_COLOR_DICT,\n adjplot,\n barplot_text,\n gridmap,\n matrixplot,\n set_axes_equal,\n stacked_barplot,\n)\n\nwarnings.filterwarnings(action=\"ignore\", category=ConvergenceWarning)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\nrc_dict = {\n \"axes.spines.right\": False,\n \"axes.spines.top\": False,\n \"axes.formatter.limits\": (-3, 3),\n \"figure.figsize\": (6, 3),\n \"figure.dpi\": 100,\n}\nfor key, val in rc_dict.items():\n mpl.rcParams[key] = val\ncontext = sns.plotting_context(context=\"talk\", font_scale=1, rc=rc_dict)\nsns.set_context(context)\n\nPLOT_MODELS = True\n\nnp.random.seed(8888)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, **kws)\n\n\ndef stashcsv(df, name, **kws):\n savecsv(df, name)\n\n\n# %% [markdown]\n# ## Load data\n# In this case we are working with `G`, the directed graph formed by summing the edge\n# weights of the 4 different graph types. Preprocessing here includes removing\n# partially differentiated cells, and cutting out the lowest 5th percentile of nodes in\n# terms of their number of incident synapses. 5th percentile ~= 12 synapses. After this,\n# the largest connected component is used.\n\nmg = load_metagraph(\"G\", version=\"2020-04-01\")\nmg = preprocess(\n mg,\n threshold=0,\n sym_threshold=False,\n remove_pdiff=True,\n binarize=False,\n weight=\"weight\",\n)\nmeta = mg.meta\n\n# plot where we are cutting out nodes based on degree\ndegrees = mg.calculate_degrees()\nfig, ax = plt.subplots(1, 1, figsize=(5, 2.5))\nsns.distplot(np.log10(degrees[\"Total edgesum\"]), ax=ax)\nq = np.quantile(degrees[\"Total edgesum\"], 0.05)\nax.axvline(np.log10(q), linestyle=\"--\", color=\"r\")\nax.set_xlabel(\"log10(total synapses)\")\n\n# remove low degree neurons\nidx = meta[degrees[\"Total edgesum\"] > q].index\nmg = mg.reindex(idx, use_ids=True)\n\n# remove center neurons # FIXME\nidx = mg.meta[mg.meta[\"hemisphere\"].isin([\"L\", \"R\"])].index\nmg = mg.reindex(idx, use_ids=True)\n\nmg = mg.make_lcc()\nmg.calculate_degrees(inplace=True)\nmeta = mg.meta\n\nadj = mg.adj\nmeta[\"inds\"] = range(len(meta))\n\n# %% [markdown]\n# ## Normalize\nnp.random.seed(8888)\nmc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=50,\n stashfig=stashfig,\n max_clusters=4,\n n_components=None,\n embed=\"unscaled_ase\",\n # reembed=True,\n realign=True,\n normalize=True,\n)\nmc.fit_candidates()\n\n# %% [markdown]\n# ##\nmc.plot_model(4)\n# %% [markdown]\n# ##\n\nmc.plot_model(6)\n\n# %% [markdown]\n# ##\nimport n_sphere\n\nnorm_embed = mc.X_\nlabels = meta[\"merge_class\"].values\nnorm_embed_spherical = n_sphere.convert_spherical(norm_embed)\nnorm_embed_spherical = norm_embed_spherical[:, 1:] # chop off R dimension\npg = pairplot(norm_embed_spherical, labels=labels, palette=CLASS_COLOR_DICT)\npg._legend.remove()\n\n\n# %% [markdown]\n# ##\n\nfrom sklearn.cluster import AgglomerativeClustering\n\nfor k in range(2, 10):\n ag = AgglomerativeClustering(n_clusters=k, affinity=\"cosine\", linkage=\"average\")\n pred_labels = ag.fit_predict(norm_embed)\n\n ax = stacked_barplot(pred_labels, labels, color_dict=CLASS_COLOR_DICT)\n ax.set_title(f\"k={k}\")\n# %% [markdown]\n# ## new\nnp.random.seed(8888)\nmc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=50,\n stashfig=stashfig,\n max_clusters=8,\n n_components=None,\n embed=\"unscaled_ase\",\n reembed=True,\n realign=True,\n)\nmc.fit_candidates()\nmc.plot_model(6)\nmc.plot_model(7)\nmc.select_model(6)\n\nnp.random.seed(9999)\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n node.fit_candidates()\n\nsub_ks = [(2, 3, 4, 5), (2, 3, 4), (2, 4, 5, 6), (2, 3, 4), (2, 3, 4), (2, 3, 4, 5)]\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n for k in sub_ks[i]:\n node.plot_model(k)\n\n# %% [markdown]\n# ## new\n\nnp.random.seed(8888)\nmc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=50,\n stashfig=stashfig,\n max_clusters=8,\n n_components=None,\n embed=\"unscaled_ase\",\n reembed=False,\n realign=True,\n)\nmc.fit_candidates()\nmc.plot_model(6)\nmc.plot_model(7)\nmc.select_model(6)\n\nnp.random.seed(9999)\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n node.fit_candidates()\n\nsub_ks = [(2, 3, 4, 5), (2, 3, 4), (2, 4, 5, 6), (2, 3, 4), (2, 3, 4), (2, 3, 4, 5)]\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n for k in sub_ks[i]:\n node.plot_model(k)\n\n# %% [markdown]\n# ## new\n\nnp.random.seed(8888)\nmc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=50,\n stashfig=stashfig,\n max_clusters=8,\n n_components=None,\n embed=\"unscaled_ase\",\n reembed=\"masked\",\n realign=True,\n)\nmc.fit_candidates()\nmc.plot_model(6)\nmc.plot_model(7)\nmc.select_model(6)\n\nnp.random.seed(9999)\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n node.fit_candidates()\n\nsub_ks = [(2, 3, 4, 5), (2, 3, 4), (2, 4, 5, 6), (2, 3, 4), (2, 3, 4), (2, 3, 4, 5)]\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n for k in sub_ks[i]:\n node.plot_model(k)\n\n\n# %% [markdown]\n# ##\nnp.random.seed(8888)\nmc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=50,\n stashfig=stashfig,\n max_clusters=8,\n n_components=4,\n embed=\"ase\",\n realign=True,\n)\nmc.fit_candidates()\nmc.plot_model(6)\nmc.plot_model(7)\nmc.select_model(6)\n\n#%%\nnp.random.seed(9999)\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n node.fit_candidates()\n\nsub_ks = [(2, 3, 4, 5), (2, 3, 4), (2, 4, 5, 6), (2, 3, 4), (2, 3, 4), (2, 3, 4, 5)]\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n for k in sub_ks[i]:\n node.plot_model(k)\n\n# %% [markdown]\n# ##\nnp.random.seed(8888)\n\nmc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=50,\n stashfig=stashfig,\n max_clusters=8,\n n_components=4,\n embed=\"ase\",\n realign=False,\n)\nmc.fit_candidates()\nmc.plot_model(6)\nmc.select_model(6)\n\nnp.random.seed(9999)\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n node.fit_candidates()\n\nsub_ks = [(2, 3, 4, 5), (2, 3, 4), (2, 4, 5, 6), (2, 3, 4), (2, 3, 4), (2, 3, 4, 5)]\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n for k in sub_ks[i]:\n node.plot_model(k)\n\n# %% [markdown]\n# ##\nnp.random.seed(8888)\n\nmc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=50,\n stashfig=stashfig,\n max_clusters=8,\n n_components=4,\n embed=\"unscaled_ase\",\n realign=False,\n)\nmc.fit_candidates()\nmc.plot_model(6)\nmc.select_model(6)\n\nnp.random.seed(9999)\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n node.fit_candidates()\n\nsub_ks = [(2, 3, 4, 5), (2, 3, 4), (2, 4, 5, 6), (2, 3, 4), (2, 3, 4), (2, 3, 4, 5)]\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n for k in sub_ks[i]:\n node.plot_model(k)\n# %% [markdown]\n# ##\nnp.random.seed(8888)\n\nmc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=50,\n stashfig=stashfig,\n max_clusters=8,\n n_components=4,\n embed=\"ase\",\n reembed=True,\n)\nmc.fit_candidates()\nmc.plot_model(6)\nmc.select_model(6)\n\nnp.random.seed(9999)\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n node.fit_candidates()\n\nsub_ks = [(2, 3, 4, 5), (2, 3, 4), (2, 4, 5, 6), (2, 3, 4), (2, 3, 4), (2, 3, 4, 5)]\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n for k in sub_ks[i]:\n node.plot_model(k)\n\n# # %% [markdown]\n# # ##\n# np.random.seed(8888)\n\n# mc = MaggotCluster(\n# \"0\",\n# adj=adj,\n# meta=meta,\n# n_init=50,\n# stashfig=stashfig,\n# max_clusters=8,\n# n_components=4,\n# embed=\"ase\",\n# reembed=True,\n# )\n# mc.fit_candidates()\n# mc.select_model(6)\n\n# np.random.seed(9999)\n# for i, node in enumerate(mc.get_lowest_level()):\n# print(node.name)\n# print()\n# node.fit_candidates()\n\n# sub_ks = [(2, 3, 4, 5), (2, 3, 4), (2, 4, 5, 6), (2, 3, 4), (2, 3, 4), (2, 3, 4, 5)]\n# for i, node in enumerate(mc.get_lowest_level()):\n# print(node.name)\n# print()\n# for k in sub_ks[i]:\n# node.plot_model(k)\n\n\n# %%\n\n# %% [markdown]\n# ##\n\nnp.random.seed(8888)\n\nmc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=50,\n stashfig=stashfig,\n max_clusters=8,\n n_components=None,\n embed=\"unscaled_ase\",\n realign=False,\n reembed=True,\n)\nmc.fit_candidates()\nmc.plot_model(6)\nmc.select_model(6)\n\n# %% [markdown]\n# ##\nnp.random.seed(9999)\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n node.fit_candidates()\n\nsub_ks = [(2, 3, 4, 5), (2, 3, 4), (2, 4, 5, 6), (2, 3, 4), (2, 3, 4), (2, 3, 4, 5)]\nfor i, node in enumerate(mc.get_lowest_level()):\n print(node.name)\n print()\n for k in sub_ks[i]:\n node.plot_model(k)\n\n\n# %% focus on the antenal lobey cluster\nat_node = mc.get_lowest_level()[2]\nat_node.select_model(2) # or 6\n\nfor i, node in enumerate(at_node.get_lowest_level()):\n print(node.name)\n print()\n node.fit_candidates()\n\n",
"#%% Imports and file loading\nimport glob\nimport json\nimport pprint\nimport sys\nfrom operator import itemgetter\nfrom os import listdir\nfrom pathlib import Path\n\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\n\nimport pymaid\nfrom graspy.plot import gridplot\nfrom src.data import load_networkx\nfrom src.pymaid import start_instance\n\n# File locations\nbase_path = Path(\"./maggot_models/data/raw/Maggot-Brain-Connectome/\")\n\ndata_path = base_path / \"4-color-matrices_Brain\"\n\ndata_date_graphs = \"2020-04-23\"\n\ngraph_types = [\"axon-axon\", \"axon-dendrite\", \"dendrite-axon\", \"dendrite-dendrite\"]\n\ninput_counts_file = \"input_counts\"\n\npair_file = base_path / \"pairs/pairs-2020-02-27.csv\"\n\noutput_name = \"2020-04-23\"\noutput_path = Path(f\"maggot_models/data/processed/{output_name}\")\n\n# sys.stdout = open(f\"maggot_models/data/logs/{output_name}.txt\", \"w\")\n\n\nprint(output_path)\n\n\nlineage_file = data_path / Path(data_date_graphs) / \"skeleton_id_vs_lineage.csv\"\n\n\ndef df_to_nx(df, meta_data_dict):\n c = df.columns.values\n c = c.astype(int)\n r = df.index.values\n df.columns = c\n if not (c == r).all():\n raise ValueError(\"Mismatching df indexing\")\n graph = nx.from_pandas_adjacency(df, create_using=nx.DiGraph)\n nx.set_node_attributes(graph, meta_data_dict)\n return graph\n\n\npriority_map = {\n \"MBON\": 1,\n \"MBIN\": 1,\n \"KC\": 1,\n \"uPN\": 1,\n \"tPN\": 1,\n \"vPN\": 1,\n \"mPN\": 1,\n \"sens\": 1,\n \"APL\": 1,\n \"LHN\": 2,\n \"CN\": 2,\n \"dVNC\": 2,\n \"dSEZ\": 2,\n \"RG\": 2,\n \"dUnk\": 2,\n \"FBN\": 3,\n \"FAN\": 3,\n \"LHN2\": 5, # used to be 4\n \"CN2\": 6, # used to be 5\n \"FB2N\": 3,\n \"FFN\": 4, # used to be 4\n \"MN2\": 3,\n \"AN2\": 3,\n \"vtd2\": 3,\n}\n\n\ndef priority(name):\n if name in priority_map:\n return priority_map[name]\n else:\n return 1000\n\n\ncheck_priority = np.vectorize(priority)\n\n\ndef get_single_class(classes):\n single_class = classes[0]\n for c in classes[1:]:\n single_class += \";\" + c\n return str(single_class)\n\n\ndef get_classes(meta, class_cols, fill_unk=False):\n all_class = []\n single_class = []\n n_class = []\n for index, row in meta.iterrows():\n classes = class_cols[row[class_cols].astype(bool)]\n all_class.append(str(classes))\n n_class.append(int(len(classes)))\n if len(classes) > 0:\n priorities = check_priority(classes)\n inds = np.where(priorities == priorities.min())[0]\n sc = get_single_class(classes[inds])\n else:\n if fill_unk:\n sc = \"unk\"\n else:\n sc = \"\"\n single_class.append(sc)\n return single_class, all_class, n_class\n\n\n# %% [markdown]\n# ##\nprint(\"Loading annotations:\\n\")\n\nstart_instance()\nannot_df = pymaid.get_annotated(\"mw neuron groups\")\n\nseries_ids = []\nfor annot_name in annot_df[\"name\"]:\n print(annot_name)\n ids = pymaid.get_skids_by_annotation(annot_name)\n name = annot_name.replace(\"mw \", \"\")\n name = name.replace(\" \", \"_\")\n indicator = pd.Series(\n index=ids, data=np.ones(len(ids), dtype=bool), name=name, dtype=bool\n )\n series_ids.append(indicator)\n print()\n\n\n# %% [markdown]\n# ##\nmeta = pd.concat(series_ids, axis=1, ignore_index=False)\nmeta.fillna(False, inplace=True)\n\nclass1_name_map = {\n \"APL\": \"APL\",\n \"dSEZ\": \"dSEZ\",\n \"dVNC\": \"dVNC\",\n \"RG\": \"RG\",\n \"picky_LN\": \"pLN\",\n \"choosy_LN\": \"cLN\",\n \"broad_LN\": \"bLN\",\n \"CN\": \"CN\",\n \"CN2\": \"CN2\",\n \"CX\": \"CX\",\n \"FAN\": \"FAN\",\n \"FB2N\": \"FB2N\",\n \"FBN\": \"FBN\",\n \"KC\": \"KC\",\n \"keystone\": \"keystone\",\n \"LHN\": \"LHN\",\n \"LHN2\": \"LHN2\",\n \"LON\": \"LON\",\n \"MBIN\": \"MBIN\",\n \"MBON\": \"MBON\",\n \"motor\": \"motor\",\n \"mPN\": \"mPN\",\n \"dUnk\": \"dUnk\",\n \"sens\": \"sens\",\n \"tPN\": \"tPN\",\n \"uPN\": \"uPN\",\n \"vPN\": \"vPN\",\n \"vtd_2ndOrder\": \"vtd2\",\n \"AN_2nd_order\": \"AN2\",\n \"MN_2nd_order\": \"MN2\",\n}\n\n\nmeta.rename(class1_name_map, axis=1, inplace=True)\n\n\n# %% [markdown]\n# ##\nclass1_cols = np.array(list(class1_name_map.values()))\n\n\nsingle_class1, all_class1, n_class1 = get_classes(meta, class1_cols, fill_unk=True)\n\nmeta[\"class1\"] = single_class1\nmeta[\"all_class1\"] = all_class1\nmeta[\"n_class1\"] = n_class1\n\n\n# %% [markdown]\n# ##\nclass2_cols = []\nfor c in meta.columns.values:\n if \"subclass\" in c:\n class2_cols.append(c)\nclass2_cols = np.array(class2_cols)\n\n\nsingle_class2, all_class2, n_class2 = get_classes(meta, class2_cols)\n\n\ndef remove_subclass(string):\n ind = string.find(\"subclass_\")\n return string[ind + len(\"subclass_\") :]\n\n\nclass2_name_map = {\n \"appetitive\": \"app\",\n \"aversive\": \"av\",\n \"neither\": \"neith\",\n \"olfactory\": \"olfac\",\n}\n\n\ndef name_mapper(string, name_map):\n if string in name_map:\n return name_map[string]\n else:\n return string\n\n\nsingle_class2 = np.vectorize(remove_subclass)(single_class2)\nsingle_class2 = np.vectorize(lambda x: name_mapper(x, class2_name_map))(single_class2)\n\nmeta[\"class2\"] = single_class2\nmeta[\"all_class2\"] = all_class2\nmeta[\"n_class2\"] = n_class2\n\n# %% [markdown]\n# ##\nprint()\nprint(\"Class 1 unique values:\")\npprint.pprint(dict(zip(*np.unique(all_class1, return_counts=True))))\nprint()\nprint(\"Class 2 unique values:\")\npprint.pprint(dict(zip(*np.unique(all_class2, return_counts=True))))\nprint()\n\n# %% [markdown]\n# ## Hemisphere\nmeta[\"hemisphere\"] = \"C\" # default is center\nleft_meta = meta[meta[\"left\"]]\nmeta.loc[left_meta.index, \"hemisphere\"] = \"L\"\nright_meta = meta[meta[\"right\"]]\nmeta.loc[right_meta.index, \"hemisphere\"] = \"R\"\n\n# %% [markdown]\n# # Pairs\n\n# Pairs (NOTE this file has some issues where some ids are repeated in multiple pairs)\npair_df = pd.read_csv(pair_file, usecols=range(2))\npair_df[\"pair_id\"] = range(len(pair_df))\n\nuni_left, left_counts = np.unique(pair_df[\"leftid\"], return_counts=True)\nuni_right, right_counts = np.unique(pair_df[\"rightid\"], return_counts=True)\n\ndup_left_inds = np.where(left_counts != 1)[0]\ndup_right_inds = np.where(right_counts != 1)[0]\ndup_left_ids = uni_left[dup_left_inds]\ndup_right_ids = uni_right[dup_right_inds]\n\nprint(\"\\n\\n\")\nif len(dup_left_inds) > 0:\n print(\"Duplicate pairs left:\")\n print(dup_left_ids)\nif len(dup_right_inds) > 0:\n print(\"Duplicate pairs right:\")\n print(dup_right_ids)\nprint(\"\\n\\n\")\n\ndrop_df = pair_df[\n pair_df[\"leftid\"].isin(dup_left_ids) | pair_df[\"rightid\"].isin(dup_right_ids)\n]\nprint(\"\\n\\n\")\nprint(\"Dropping pairs:\")\nprint(drop_df)\nprint(\"\\n\\n\")\n\npair_df.drop(drop_df.index, axis=0, inplace=True)\n\npair_ids = np.concatenate((pair_df[\"leftid\"].values, pair_df[\"rightid\"].values))\nmeta_ids = meta.index.values\nin_meta_ids = np.isin(pair_ids, meta_ids)\ndrop_ids = pair_ids[~in_meta_ids]\npair_df = pair_df[~pair_df[\"leftid\"].isin(drop_ids)]\npair_df = pair_df[~pair_df[\"rightid\"].isin(drop_ids)]\n\nleft_to_right_df = pair_df.set_index(\"leftid\")\nright_to_left_df = pair_df.set_index(\"rightid\")\nright_to_left_df.head()\n\nmeta[\"Pair\"] = -1\nmeta[\"Pair ID\"] = -1\nmeta.loc[left_to_right_df.index, \"Pair\"] = left_to_right_df[\"rightid\"]\nmeta.loc[right_to_left_df.index, \"Pair\"] = right_to_left_df[\"leftid\"]\n\nmeta.loc[left_to_right_df.index, \"Pair ID\"] = left_to_right_df[\"pair_id\"]\nmeta.loc[right_to_left_df.index, \"Pair ID\"] = right_to_left_df[\"pair_id\"]\n\n#%% Fix places where L/R labels are not the same\nprint(\"\\n\\nFinding asymmetric L/R labels\")\nfor i in range(len(meta)):\n my_id = meta.index[i]\n my_class = meta.loc[my_id, \"class1\"]\n partner_id = meta.loc[my_id, \"Pair\"]\n if partner_id != -1:\n partner_class = meta.loc[partner_id, \"class1\"]\n if partner_class != \"unk\" and my_class == \"unk\":\n print(f\"{my_id} had asymmetric class label {partner_class}, fixed\")\n meta.loc[my_id, \"class1\"] = partner_class\n elif (partner_class != my_class) and (partner_class != \"unk\"):\n msg = (\n f\"{meta.index[i]} and partner {partner_id} have different labels\"\n + f\", labels are {my_class}, {partner_class}\"\n )\n print(msg)\nprint()\n\n# %% [markdown]\n# #\n\n# Merge class (put class 1 and class 2 together as a column)\nmeta[\"merge_class\"] = \"\"\nfor i in meta.index.values:\n merge_class = meta.loc[i, \"class1\"]\n if meta.loc[i, \"class2\"] != \"\":\n merge_class += \"-\" + meta.loc[i, \"class2\"]\n meta.loc[i, \"merge_class\"] = merge_class\n\nprint()\nprint(\"Merge class unique values:\")\npprint.pprint(dict(zip(*np.unique(meta[\"merge_class\"], return_counts=True))))\nprint()\n#%% lineages\n\n\ndef filt(string):\n string = string.replace(\"akira\", \"\")\n string = string.replace(\"Lineage\", \"\")\n string = string.replace(\"lineage\", \"\")\n string = string.replace(\"*\", \"\")\n string = string.strip(\"_\")\n string = string.strip(\" \")\n string = string.replace(\"_r\", \"\")\n string = string.replace(\"_l\", \"\")\n string = string.replace(\"right\", \"\")\n string = string.replace(\"left\", \"\")\n string = string.replace(\"unknown\", \"unk\")\n return string\n\n\nlineage_df = []\n\nannot_df = pymaid.get_annotated(\"Volker\")\nfor annot_name in annot_df[\"name\"]:\n print(annot_name)\n ids = pymaid.get_skids_by_annotation(annot_name.replace(\"*\", \"\\*\"))\n name = filt(annot_name)\n print(name)\n print()\n indicator = pd.Series(\n index=ids, data=np.ones(len(ids), dtype=bool), name=name, dtype=bool\n )\n lineage_df.append(indicator)\n\nlineage_df = pd.concat(lineage_df, axis=1, ignore_index=False)\n#%%\nlineage_df = lineage_df.fillna(False)\ndata = lineage_df.values\nrow_sums = data.sum(axis=1)\nlineage_df.loc[row_sums > 1, :] = False\ncheck_row_sums = lineage_df.values.sum(axis=1)\nassert check_row_sums.max() == 1\n\ncolumns = lineage_df.columns\nlineages = []\nfor index, row in lineage_df.iterrows():\n lineage = columns[row].values\n if len(lineage) < 1:\n lineage = \"unk\"\n else:\n lineage = lineage[0]\n lineages.append(lineage)\nlineage_series = pd.Series(index=lineage_df.index, data=lineages)\nlineage_series = lineage_series[lineage_series.index.isin(meta.index)]\nmeta[\"lineage\"] = \"unk\"\nmeta.loc[lineage_series.index, \"lineage\"] = lineage_series.values\n\n# %% [markdown]\n# ##\n\npair_meta = meta[meta[\"Pair\"] != -1]\npair_meta = pair_meta.sort_values([\"Pair ID\", \"hemisphere\"])\n\npair_unk = 0\nunk = []\npair_mismatch = 0\nmismatch = []\nfor p in pair_meta[\"Pair ID\"].unique():\n pm = pair_meta[pair_meta[\"Pair ID\"] == p]\n uni_lin = pm[\"lineage\"].unique()\n if (\"unk\" in uni_lin) and len(uni_lin) > 1:\n print(str(uni_lin) + \" unk\")\n pair_unk += 1\n unk.append(pm.index.values)\n elif len(uni_lin) > 1:\n print(str(uni_lin) + \" mismatch\")\n pair_mismatch += 1\n mismatch.append(pm.index.values)\n\nfrom src.io import savecsv\n\nmismatch = pd.DataFrame(mismatch)\nsavecsv(mismatch, \"mismatch\")\nunk = pd.DataFrame(unk)\nsavecsv(unk, \"unk\")\n\n#%%\ninput_counts_path = data_path / data_date_graphs / (input_counts_file + \".csv\")\ninput_counts_df = pd.read_csv(input_counts_path, index_col=0)\ncols = input_counts_df.columns.values\ncols = [str(c).strip(\" \") for c in cols]\ninput_counts_df.columns = cols\n\nmeta.loc[input_counts_df.index, \"dendrite_input\"] = input_counts_df[\"dendrite_inputs\"]\nmeta.loc[input_counts_df.index, \"axon_input\"] = input_counts_df[\"axon_inputs\"]\n\n\n#%% Import the raw graphs\nprint(\"Importing raw adjacency matrices:\\n\")\nnx_graphs_raw = {}\ndf_graphs_raw = {}\nfor graph_type in graph_types:\n print(graph_type)\n edgelist_path = data_path / data_date_graphs / (graph_type + \".csv\")\n adj = pd.read_csv(edgelist_path, index_col=0)\n meta = meta.reindex(adj.index)\n meta_data_dict = meta.to_dict(orient=\"index\")\n graph = df_to_nx(adj, meta_data_dict)\n nx_graphs_raw[graph_type] = graph\n df_graphs_raw[graph_type] = adj\n print()\n\n\n#%% Normalize weights for the raw graphs\ndf_graphs_norm = {}\nnx_graphs_norm = {}\nprint(\"Checking normalized weights\")\ninput_counts = input_counts_df[\"axon_inputs\"].values\n\ninput_counts[input_counts == 0] = 1\nfor graph_type in [\"axon-axon\", \"dendrite-axon\"]:\n print(graph_type)\n df_adj_raw = df_graphs_raw[graph_type]\n if (input_counts_df.index.values == adj.index.values).all():\n print(\"Same indexing!\")\n else:\n raise ValueError(\"Indexing of input counts file not the same!\")\n adj_raw = df_adj_raw.values\n adj_norm = adj_raw / input_counts[np.newaxis, :]\n print(adj_norm.sum(axis=0).max())\n df_adj_norm = pd.DataFrame(\n index=df_adj_raw.index, columns=df_adj_raw.columns, data=adj_norm\n )\n df_graphs_norm[graph_type] = df_adj_norm\n graph = df_to_nx(df_adj_norm, meta_data_dict)\n nx_graphs_norm[graph_type] = graph\n print()\n\ninput_counts = input_counts_df[\"dendrite_inputs\"].values\ninput_counts[input_counts == 0] = 1\nfor graph_type in [\"axon-dendrite\", \"dendrite-dendrite\"]:\n print(graph_type)\n df_adj_raw = df_graphs_raw[graph_type]\n if (input_counts_df.index.values == adj.index.values).all():\n print(\"Same indexing!\")\n adj_raw = df_adj_raw.values\n adj_norm = adj_raw / input_counts[np.newaxis, :]\n print(adj_norm.sum(axis=0).max())\n df_adj_norm = pd.DataFrame(\n index=df_adj_raw.index, columns=df_adj_raw.columns, data=adj_norm\n )\n df_graphs_norm[graph_type] = df_adj_norm\n graph = df_to_nx(df_adj_norm, meta_data_dict)\n nx_graphs_norm[graph_type] = graph\n print()\n\n#%%\n\nprint(\"\\n\\nChecking for rows with Nan values\")\nmissing_na = []\nnan_df = meta[meta.isna().any(axis=1)]\nfor row in nan_df.index:\n na_ind = nan_df.loc[row].isna()\n print(nan_df.loc[row][na_ind])\n missing_na.append(row)\nprint()\nprint(\"These skeletons have missing values in the metadata\")\nprint(missing_na)\nprint(\"\\n\\n\")\n\n\n#%% All-all graph\ntotal_input = (\n input_counts_df[\"dendrite_inputs\"].values + input_counts_df[\"axon_inputs\"].values\n)\ntotal_input[total_input == 0] = 1\n\nall_adj_raw = np.zeros_like(adj_norm)\nfor graph_type in graph_types:\n all_adj_raw += df_graphs_raw[graph_type].values\n\ndf_all_raw = pd.DataFrame(\n index=df_adj_raw.index, columns=df_adj_raw.columns, data=all_adj_raw\n)\n\nnx_all_raw = df_to_nx(df_all_raw, meta_data_dict)\n\nall_adj_norm = all_adj_raw / total_input[np.newaxis, :]\ndf_all_norm = pd.DataFrame(\n index=df_adj_raw.index, columns=df_adj_raw.columns, data=all_adj_norm\n)\n\nnx_all_norm = df_to_nx(df_all_norm, meta_data_dict)\n\n#%% Save\n\nprint(\"Saving graphs:\\n\")\nout_graphs = []\n[out_graphs.append(i) for i in nx_graphs_raw.values()]\n[print(i) for i in nx_graphs_raw.keys()]\nsave_names = [\"Gaa\", \"Gad\", \"Gda\", \"Gdd\"]\n[out_graphs.append(i) for i in nx_graphs_norm.values()]\n[print(i) for i in nx_graphs_norm.keys()]\nsave_names += [\"Gaan\", \"Gdan\", \"Gadn\", \"Gddn\"]\nout_graphs.append(nx_all_raw)\nsave_names.append(\"G\")\nout_graphs.append(nx_all_norm)\nsave_names.append(\"Gn\")\n\nfor name, graph in zip(save_names, out_graphs):\n nx.write_graphml(graph, output_path / (name + \".graphml\"))\n\nmeta.to_csv(output_path / \"meta_data.csv\")\n\n#%% verify things are right\nprint(\"\\n\\nChecking graphs are the same when saved\")\nprint(output_path)\nfor name, graph_wrote in zip(save_names, out_graphs):\n print(name)\n graph_read = nx.read_graphml(output_path / (name + \".graphml\"))\n adj_read = nx.to_numpy_array(graph_read)\n adj_wrote = nx.to_numpy_array(graph_wrote)\n print(np.array_equal(adj_read, adj_wrote))\n graph_loader = load_networkx(name, version=output_name)\n adj_loader = nx.to_numpy_array(graph_loader)\n print(np.array_equal(adj_wrote, adj_loader))\n print()\n\nprint(\"Done!\")\nsys.stdout.close()\n",
"from __future__ import division\nfrom textwrap import dedent\nimport colorsys\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\nimport matplotlib as mpl\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.patches as Patches\nimport matplotlib.pyplot as plt\nimport warnings\nfrom six import string_types\nfrom six.moves import range\n\nfrom seaborn import utils\nfrom seaborn.axisgrid import FacetGrid\nfrom seaborn.categorical import _BarPlotter, _CategoricalPlotter\nfrom seaborn.categorical import factorplot as _factorplot\n\n\n__all__ = [\"countplot\", \"factorplot\", \"freqplot\"]\n\n\nclass _StackBarPlotter(_BarPlotter):\n \"\"\" Stacked Bar Plotter\n \n A modification of the :mod:`seaborn._BarPlotter` object with the added ability of\n stacking bars either verticaly or horizontally. It takes the same arguments\n as :mod:`seaborn._BarPlotter` plus the following:\n \n Arguments\n ---------\n stack : bool\n Stack bars if true, otherwise returns equivalent barplot as\n :mod:`seaborn._BarPlotter`.\n \"\"\"\n\n def draw_bars(self, ax, kws):\n \"\"\"Draw the bars onto `ax`.\"\"\"\n # Get the right matplotlib function depending on the orientation\n barfunc = ax.bar if self.orient == \"v\" else ax.barh\n barpos = np.arange(len(self.statistic))\n\n if self.plot_hues is None:\n\n # Draw the bars\n barfunc(\n barpos,\n self.statistic,\n self.width,\n color=self.colors,\n align=\"center\",\n **kws,\n )\n\n # Draw the confidence intervals\n errcolors = [self.errcolor] * len(barpos)\n self.draw_confints(\n ax, barpos, self.confint, errcolors, self.errwidth, self.capsize\n )\n else:\n # Stack by hue\n for j, hue_level in enumerate(self.hue_names):\n\n barpos_prior = None if j == 0 else np.sum(self.statistic[:, :j], axis=1)\n\n # Draw the bars\n if self.orient == \"v\":\n barfunc(\n barpos,\n self.statistic[:, j],\n self.nested_width,\n bottom=barpos_prior,\n color=self.colors[j],\n align=\"center\",\n label=hue_level,\n **kws,\n )\n elif self.orient == \"h\":\n barfunc(\n barpos,\n self.statistic[:, j],\n self.nested_width,\n left=barpos_prior,\n color=self.colors[j],\n align=\"center\",\n label=hue_level,\n **kws,\n )\n\n # Draw the confidence intervals\n if self.confint.size:\n confint = (\n self.confint[:, j]\n if j == 0\n else np.sum(self.confint[:, :j], axis=1)\n )\n errcolors = [self.errcolor] * len(barpos)\n self.draw_confints(\n ax, barpos, confint, errcolors, self.errwidth, self.capsize\n )\n\n\ndef countplot(\n x=None,\n y=None,\n hue=None,\n data=None,\n order=None,\n hue_order=None,\n orient=None,\n color=None,\n palette=None,\n saturation=0.75,\n dodge=True,\n stack=False,\n ax=None,\n **kwargs,\n):\n \"\"\" Show the count of observations in each categorical bin using bars.\n \n The count plot is a normalization of a histogram across categories, as opposed\n to quantitative variables. The basic API and options are identical to those for\n :func:`barplot`, so you can compare counts across nested variables.\n \n Parameters\n ----------\n x, y, hue : str or array-like, optional\n Inputs for plotting long-form data.\n data : DataFrame, array, or list of arrays, optional\n Dataset for plotting. If `x` and `y` are absent, this is interpreted as wide-form.\n Otherwise, data is expected to be long-form.\n order, hue_order : list of str, optional\n Order to plot the categorical levels, otherwise the levels are inferred from the\n data object.\n orient : {\"v\", \"h\"}, optional\n Whether to plot bars vertically (\"v\") or horizontally (\"h\"). This can also be\n inferred from the dtype of the input variables, but can be used to specify when the\n \"categorical\" variable is a numeric or when plotting wide-form data.\n color : matplotlib color, optional\n Color for all of the elemnts, or seed for a gradient palette.\n palette : palette name, list, or dict, optional\n Colors to use for the different levels of the `hue` variable. Should be somthing that\n can be interpreted by `color_palette()` or a dictionary mapping hue levels to\n matplotlib colors.\n saturation : float, optional\n Proportion of the original saturation to draw colors. Large patches often look better\n with slighlty desaturated colors, but set this to `1` if you want the plot colorss to\n perfectly match the input color spec.\n dodge : bool, optional\n When hue nesting is used, whether elements should be shifted along the categorical axis.\n stack : bool, optional\n When hue nesting is used, whether elements should be stacked ontop of each other. Note,\n dodge is set to False when stack is True.\n ax : matplotlib.axes, optional\n Axes object to draw the plot onto, otherwise uses the current axes.\n **kwargs : Other keyword arguments are passed through to `plt.bar` at draw time\n \n Examples\n --------\n .. plot::\n :context: close-figs\n \n >>> import schmeaborn as sns\n >>> titanic = sns.load_dataset(\"titanic\")\n >>> ax = sns.freqplot(x=\"class\", data=titanic)\n \n Show frequencies for two categorical variables:\n \n .. plot::\n :context: close-figs\n \n >>> ax = sns.freqplot(x=\"class\", hue=\"who\", data=titanic)\n \n Plot the bars horizontally:\n \n .. plot::\n :context: close-figs\n \n >>> ax = sns.freqplot(y=\"class\", hue=\"who\", data=titanic)\n \n Plot categories stacked:\n \n .. plot::\n :context: close-figs\n \n >>> ax = sns.freqplot(x=\"class\", hue=\"who\", stack=True, data=titanic)\n \"\"\"\n\n # Define parameters for barplot\n if stack:\n dodge = False\n estimator = len\n ci = None\n n_boot = 0\n units = None\n errcolor = None\n errwidth = None\n capsize = None\n\n # Check orientation by input\n if x is None and y is not None:\n orient = \"h\"\n x = y\n elif y is None and x is not None:\n orient = \"v\"\n y = x\n elif x is not None and y is not None:\n raise TypeError(\"Cannot pass values for both `x` and `y`\")\n else:\n raise TypeError(\"Must pass values for either `x` or `y`\")\n\n bar_plot_func = _StackBarPlotter if stack else _BarPlotter\n plotter = bar_plot_func(\n x,\n y,\n hue,\n data,\n order,\n hue_order,\n estimator,\n ci,\n n_boot,\n units,\n orient,\n color,\n palette,\n saturation,\n errcolor,\n errwidth,\n capsize,\n dodge,\n )\n\n plotter.value_label = \"count\"\n\n if ax is None:\n ax = plt.gca()\n\n plotter.plot(ax, kwargs)\n return ax\n\n\ndef freqplot(\n x=None,\n y=None,\n hue=None,\n data=None,\n order=None,\n hue_order=None,\n orient=None,\n color=None,\n palette=None,\n saturation=0.75,\n dodge=True,\n stack=False,\n ax=None,\n **kwargs,\n):\n \"\"\" Show the frequency of observations in each categorical bin using bars.\n \n The frequency plot is a normalization of a histogram across categories, as opposed\n to quantitative variables. The basic API and options are identical to those for\n :func:`barplot`, so you can compare counts across nested variables.\n \n Parameters\n ----------\n x, y, hue : str or array-like, optional\n Inputs for plotting long-form data.\n data : DataFrame, array, or list of arrays, optional\n Dataset for plotting. If `x` and `y` are absent, this is interpreted as wide-form.\n Otherwise, data is expected to be long-form.\n order, hue_order : list of str, optional\n Order to plot the categorical levels, otherwise the levels are inferred from the\n data object.\n orient : {\"v\", \"h\"}, optional\n Whether to plot bars vertically (\"v\") or horizontally (\"h\"). This can also be\n inferred from the dtype of the input variables, but can be used to specify when the\n \"categorical\" variable is a numeric or when plotting wide-form data.\n color : matplotlib color, optional\n Color for all of the elemnts, or seed for a gradient palette.\n palette : palette name, list, or dict, optional\n Colors to use for the different levels of the `hue` variable. Should be somthing that\n can be interpreted by `color_palette()` or a dictionary mapping hue levels to\n matplotlib colors.\n saturation : float, optional\n Proportion of the original saturation to draw colors. Large patches often look better\n with slighlty desaturated colors, but set this to `1` if you want the plot colorss to\n perfectly match the input color spec.\n dodge : bool, optional\n When hue nesting is used, whether elements should be shifted along the categorical axis.\n stack : bool, optional\n When hue nesting is used, whether elements should be stacked ontop of each other. Note,\n dodge is set to False when stack is True.\n ax : matplotlib.axes, optional\n Axes object to draw the plot onto, otherwise uses the current axes.\n **kwargs : Other keyword arguments are passed through to `plt.bar` at draw time\n \n Examples\n --------\n .. plot::\n :context: close-figs\n \n >>> import schmeaborn as sns\n >>> titanic = sns.load_dataset(\"titanic\")\n >>> ax = sns.freqplot(x=\"class\", data=titanic)\n \n Show frequencies for two categorical variables:\n \n .. plot::\n :context: close-figs\n \n >>> ax = sns.freqplot(x=\"class\", hue=\"who\", data=titanic)\n \n Plot the bars horizontally:\n \n .. plot::\n :context: close-figs\n \n >>> ax = sns.freqplot(y=\"class\", hue=\"who\", data=titanic)\n \n Plot categories stacked:\n \n .. plot::\n :context: close-figs\n \n >>> ax = sns.freqplot(x=\"class\", hue=\"who\", stack=True, data=titanic)\n \"\"\"\n\n # Define parameters for barplot\n if stack:\n dodge = False\n estimator = len\n ci = None\n n_boot = 0\n units = None\n errcolor = None\n errwidth = None\n capsize = None\n\n # Check orientation by input\n if x is None and y is not None:\n orient = \"h\"\n x = y\n elif y is None and x is not None:\n orient = \"v\"\n y = x\n elif x is not None and y is not None:\n raise TypeError(\"Cannot pass values for both `x` and `y`\")\n else:\n raise TypeError(\"Must pass values for either `x` or `y`\")\n\n bar_plot_func = _StackBarPlotter if stack else _BarPlotter\n plotter = bar_plot_func(\n x,\n y,\n hue,\n data,\n order,\n hue_order,\n estimator,\n ci,\n n_boot,\n units,\n orient,\n color,\n palette,\n saturation,\n errcolor,\n errwidth,\n capsize,\n dodge,\n )\n\n # Safely calculate frequencies: NaN counts replaced by 0\n plotter.statistic = np.nan_to_num(plotter.statistic)\n\n if plotter.statistic.ndim == 1:\n # Normalize statistic\n plotter.statistic = plotter.statistic / np.nansum(plotter.statistic)\n\n # Safety Check for proper normalization\n err = f\"Frequencies not properly normalized. \\n {plotter.statistic} \\n\"\n assert np.allclose(np.nansum(plotter.statistic), 1, rtol=1e-6), err\n elif plotter.statistic.ndim > 1:\n # Normalize row-stochastic\n plotter.statistic = (\n plotter.statistic / np.nansum(plotter.statistic, axis=1)[:, None]\n )\n\n # Safely check for proper normalization (ignore where full row is null)\n sum_stats = np.nansum(plotter.statistic, axis=1)\n\n # Safety Check for proper normalization\n err = f\"Frequencies not properly normalized. \\n {plotter.statistic} \\n\"\n assert np.allclose(sum_stats, 1, rtol=1e-6), err\n else:\n raise ValueError(\"Unable to count the combination of x and hue.\")\n\n plotter.value_label = \"frequency\"\n\n if ax is None:\n ax = plt.gca()\n\n plotter.plot(ax, kwargs)\n return ax\n",
"from os.path import basename\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom sacred import Experiment\nfrom sacred.observers import FileStorageObserver, SlackObserver\nfrom sklearn.model_selection import ParameterGrid\n\nfrom graspy.datasets import load_drosophila_left\nfrom graspy.utils import binarize, symmetrize\nfrom src.models import select_rdpg, select_sbm, select_dcsbm\nfrom src.utils import save_obj\n\nex = Experiment(\"Drosophila model selection 6 - new, DCSBM\")\n\ncurrent_file = basename(__file__)[:-3]\n\nsacred_file_path = Path(f\"./maggot_models/models/runs/{current_file}\")\n\nslack_obs = SlackObserver.from_config(\"slack.json\")\n\nfile_obs = FileStorageObserver.create(sacred_file_path)\n\nex.observers.append(slack_obs)\nex.observers.append(file_obs)\n\n\[email protected]\ndef config():\n # Variables defined in config get automatically passed to main\n\n n_block_try_range = list(range(1, 11)) # noqa: F841\n n_components_try_range = list(range(1, 13)) # noqa: F841\n n_components_try_rdpg = list(range(1, 13)) # noqa: F841\n reg_try_range = np.linspace(0, 10, 10)\n\n embed_kws_try_range = [{\"regularizer\": i} for i in reg_try_range] # noqa: F841\n n_init = 50 # 50 # noqa: F841\n n_jobs = -1 # noqa: F841\n directed = True # noqa: F841\n\n\ndef run_fit(\n seed,\n n_components_try_range,\n n_components_try_rdpg,\n n_block_try_range,\n directed,\n n_init,\n embed_kws_try_range,\n n_jobs,\n):\n graph = load_drosophila_left()\n if not directed:\n graph = symmetrize(graph, method=\"avg\")\n graph = binarize(graph)\n\n np.random.seed(seed)\n\n param_grid = {\n \"n_components\": n_components_try_range,\n \"n_blocks\": n_block_try_range,\n \"embed_kws\": embed_kws_try_range,\n }\n out_df = select_dcsbm(\n graph,\n param_grid,\n directed=directed,\n degree_directed=False,\n n_jobs=n_jobs,\n n_init=n_init,\n )\n\n print(out_df.head())\n\n save_obj(out_df, file_obs, \"grid_search_out\")\n return 0\n\n\[email protected]\ndef main(\n n_components_try_range,\n n_components_try_rdpg,\n n_block_try_range,\n directed,\n n_init,\n embed_kws_try_range,\n n_jobs,\n):\n seed = 8888\n out = run_fit(\n seed,\n n_components_try_range,\n n_components_try_rdpg,\n n_block_try_range,\n directed,\n n_init,\n embed_kws_try_range,\n n_jobs,\n )\n return out\n",
"from os.path import basename\nfrom pathlib import Path\n\nimport numpy as np\nfrom sacred import Experiment\nfrom sacred.observers import FileStorageObserver, SlackObserver\n\nfrom graspy.datasets import load_drosophila_left, load_drosophila_right\nfrom graspy.utils import binarize, symmetrize\nfrom src.models import select_dcsbm\nfrom src.utils import save_obj\n\nex = Experiment(\"Fit DSCSBM\")\n\ncurrent_file = basename(__file__)[:-3]\n\nsacred_file_path = Path(f\"./maggot_models/models/runs/{current_file}\")\n\nslack_obs = SlackObserver.from_config(\"slack.json\")\n\nfile_obs = FileStorageObserver.create(sacred_file_path)\n\nex.observers.append(slack_obs)\nex.observers.append(file_obs)\n\n\[email protected]\ndef config():\n # Variables defined in config get automatically passed to main\n\n # Parameter range for the models\n n_block_try_range = list(range(1, 21))\n n_components_try_range = list(range(1, 21))\n reg_try_range = np.linspace(0, 20, 20)\n embed_kws_try_range = [{\"regularizer\": i} for i in reg_try_range]\n\n param_grid = { # noqa: F841\n \"n_components\": n_components_try_range,\n \"n_blocks\": n_block_try_range,\n \"embed_kws\": embed_kws_try_range,\n }\n\n # Parameters for the experiment\n n_init = 100 # 50 # noqa: F841\n n_jobs = -2 # noqa: F841\n directed = True # noqa: F841\n\n\ndef run_fit(seed, param_grid, directed, n_init, n_jobs):\n # run left\n graph = load_drosophila_left()\n if not directed:\n graph = symmetrize(graph, method=\"avg\")\n graph = binarize(graph)\n ddcsbm_left_df = select_dcsbm(\n graph,\n param_grid,\n directed=directed,\n degree_directed=False,\n n_jobs=n_jobs,\n n_init=n_init,\n )\n save_obj(ddcsbm_left_df, file_obs, \"ddcsbm_left_df\")\n\n # run right\n graph = load_drosophila_right()\n if not directed:\n graph = symmetrize(graph, method=\"avg\")\n graph = binarize(graph)\n ddcsbm_right_df = select_dcsbm(\n graph,\n param_grid,\n directed=directed,\n degree_directed=False,\n n_jobs=n_jobs,\n n_init=n_init,\n )\n save_obj(ddcsbm_right_df, file_obs, \"ddcsbm_right_df\")\n\n return 0\n\n\[email protected]\ndef main(seed, param_grid, directed, n_init, n_jobs):\n seed = 8888\n out = run_fit(seed, param_grid, directed, n_init, n_jobs)\n return out\n",
"import numpy as np\nfrom anytree import LevelOrderGroupIter, NodeMixin, PostOrderIter, RenderTree\nfrom anytree.util import leftsibling\n\nfrom graspy.cluster import GaussianCluster, AutoGMMCluster\n\nvalid_methods = [\"graspy-gmm\", \"auto-gmm\"]\n\n\nclass DivisiveCluster(NodeMixin):\n def __init__(\n self,\n name=\"\",\n min_split_samples=5,\n parent=None,\n children=None,\n n_init=50,\n cluster_method=\"graspy-gmm\",\n ):\n self.name = name\n self.parent = parent\n if children:\n self.children = children\n self.min_split_samples = min_split_samples\n self.samples_ = None\n self.y_ = None\n self.n_init = n_init\n self.cluster_method = cluster_method\n\n def fit(self, X, y=None):\n n_samples = X.shape[0]\n self.n_samples_ = n_samples\n self.cum_dist_ = 0\n if n_samples > self.min_split_samples:\n if self.cluster_method == \"graspy-gmm\":\n cluster = GaussianCluster(\n min_components=1,\n max_components=2,\n n_init=self.n_init,\n covariance_type=\"all\",\n )\n elif self.cluster_method == \"auto-gmm\":\n cluster = AutoGMMCluster(\n min_components=1, max_components=2, max_agglom_size=None\n )\n elif self.cluster_method == \"vmm\":\n # cluster = VonMisesFisherMixture(n)\n pass\n else:\n raise ValueError(f\"`cluster_method` must be one of {valid_methods}\")\n cluster.fit(X)\n pred_labels = cluster.predict(X)\n self.pred_labels_ = pred_labels\n self.model_ = cluster\n if hasattr(cluster, \"bic_\"):\n bics = cluster.bic_\n self.bics_ = bics\n bic_ratio = bics.loc[2].min() / bics.loc[1].min()\n self.bic_ratio_ = bic_ratio\n if cluster.n_components_ != 1: # recurse\n indicator = pred_labels == 0\n self.X_children_ = (X[indicator, :], X[~indicator, :])\n children = []\n for i, X_child in enumerate(self.X_children_):\n child = DivisiveCluster(\n name=self.name + str(i),\n parent=self,\n min_split_samples=self.min_split_samples,\n n_init=self.n_init,\n cluster_method=self.cluster_method,\n )\n child = child.fit(X_child)\n children.append(child)\n self.children = children\n return self\n\n def predict_sample(self, sample, label):\n \"\"\"depricated\n \n Parameters\n ----------\n sample : [type]\n [description]\n label : [type]\n [description]\n \n Returns\n -------\n [type]\n [description]\n \"\"\"\n if not self.children:\n if not self.samples_:\n self.samples_ = []\n self.samples_.append(sample)\n if not self.y_:\n self.y_ = []\n self.y_.append(label)\n return self\n else:\n pred = self.model_.predict([sample])[0]\n if pred == 0:\n return self.children[0].predict_sample(sample, label)\n else:\n return self.children[1].predict_sample(sample, label)\n\n def predict(self, X, y=None):\n if not self.children:\n prediction = np.array(X.shape[0] * [self.name])\n return prediction\n else:\n node_preds = self.model_.predict(X, y=None)\n indicator = node_preds == 0\n left_preds = self.children[0].predict(X[indicator, :])\n right_preds = self.children[1].predict(X[~indicator, :])\n # this is a hacky way of making sure arrays have sufficiently large string\n # datatype to not lose information, without making any assumptions about\n # number of splits ahead of time. Sure there is a better way.\n if np.can_cast(left_preds.dtype, right_preds.dtype):\n # everything in left can be safely cast to right\n preds = np.zeros(X.shape[0], dtype=right_preds.dtype)\n elif np.can_cast(right_preds.dtype, left_preds.dtype):\n preds = np.zeros(X.shape[0], dtype=left_preds.dtype)\n else:\n print(left_preds.dtype, right_preds.dtype)\n raise ValueError(\"Cannot cast strings to proper size\")\n preds[indicator] = left_preds\n preds[~indicator] = right_preds\n return preds\n\n def print_tree(self, print_val=\"n_samples\"):\n for pre, _, node in RenderTree(self):\n if print_val == \"n_samples\":\n to_print = node.n_samples_\n elif print_val == \"bic_ratio\":\n if hasattr(node, \"bic_ratio_\"):\n to_print = node.bic_ratio_\n else:\n to_print = None\n treestr = \"%s%s (%s)\" % (pre, node.name, to_print)\n print(treestr.ljust(8))\n\n def build_linkage(self, bic_distance=False):\n # get a tuple of node at each level\n levels = []\n for group in LevelOrderGroupIter(self):\n levels.append(group)\n\n # just find how many nodes are leaves\n # this is necessary only because we need to add n to non-leaf clusters\n num_leaves = 0\n for node in PostOrderIter(self):\n if not node.children:\n num_leaves += 1\n\n link_count = 0\n node_index = 0\n linkages = []\n labels = []\n\n for g, group in enumerate(levels[::-1][:-1]): # reversed and skip the last\n for i in range(len(group) // 2):\n # get partner nodes\n left_node = group[2 * i]\n right_node = group[2 * i + 1]\n # just double check that these are always partners\n assert leftsibling(right_node) == left_node\n\n # check if leaves, need to add some new fields to track for linkage\n if not left_node.children:\n left_node._ind = node_index\n left_node._n_clusters = 1\n node_index += 1\n labels.append(left_node.name)\n\n if not right_node.children:\n right_node._ind = node_index\n right_node._n_clusters = 1\n node_index += 1\n labels.append(right_node.name)\n\n # find the parent, count samples\n parent_node = left_node.parent\n n_clusters = left_node._n_clusters + right_node._n_clusters\n parent_node._n_clusters = n_clusters\n\n # assign an ind to this cluster for the dendrogram\n parent_node._ind = link_count + num_leaves\n link_count += 1\n\n if not bic_distance:\n distance = g + 1 # equal height for all links\n else:\n raise NotImplementedError()\n # tried to use BIC as linkage distance, but not monotonic.\n # would need to sort somehow by BIC ratios, but this may not be\n # possible while preserving splitting nature of the tree\n\n # self.cum_dist_ += (left_node.cum_dist_ + right_node.cum_dist_) / 2\n # self.cum_dist_ += parent_node.bic_ratio_ - 1\n # distance = self.cum_dist_\n distance = parent_node.bic_ratio_ - 1\n\n # add a row to the linkage matrix\n linkages.append([left_node._ind, right_node._ind, distance, n_clusters])\n\n labels = np.array(labels)\n linkages = np.array(linkages, dtype=np.double) # needs to be a double for scipy\n return (linkages, labels)\n\n",
"# %% [markdown]\n# ##\nimport os\nimport time\nimport warnings\nfrom itertools import chain\n\nimport colorcet as cc\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as transforms\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom anytree import LevelOrderGroupIter, NodeMixin\nfrom joblib import Parallel, delayed\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nfrom scipy.linalg import orthogonal_procrustes\nfrom scipy.optimize import linear_sum_assignment\nfrom scipy.spatial.distance import squareform\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.manifold import MDS, TSNE, Isomap\nfrom sklearn.metrics import adjusted_rand_score, pairwise_distances\nfrom sklearn.utils.testing import ignore_warnings\nfrom tqdm.autonotebook import tqdm\n\nfrom graspy.cluster import AutoGMMCluster, GaussianCluster\nfrom graspy.embed import (\n AdjacencySpectralEmbed,\n ClassicalMDS,\n LaplacianSpectralEmbed,\n select_dimension,\n selectSVD,\n)\nfrom graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator\nfrom graspy.plot import heatmap, pairplot\nfrom graspy.simulations import rdpg\nfrom graspy.utils import augment_diagonal, binarize, pass_to_ranks\nfrom src.cluster import get_paired_inds\nfrom src.data import load_metagraph\nfrom src.graph import preprocess\nfrom src.hierarchy import signal_flow\nfrom src.io import savecsv, savefig\nfrom src.traverse import (\n Cascade,\n RandomWalk,\n TraverseDispatcher,\n to_markov_matrix,\n to_path_graph,\n to_transmission_matrix,\n)\nfrom src.visualization import (\n CLASS_COLOR_DICT,\n adjplot,\n barplot_text,\n draw_networkx_nice,\n gridmap,\n matrixplot,\n palplot,\n screeplot,\n set_axes_equal,\n stacked_barplot,\n)\n\n\nwarnings.filterwarnings(action=\"ignore\", category=ConvergenceWarning)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\nrc_dict = {\n \"axes.spines.right\": False,\n \"axes.spines.top\": False,\n \"axes.formatter.limits\": (-3, 3),\n \"figure.figsize\": (6, 3),\n \"figure.dpi\": 100,\n}\nfor key, val in rc_dict.items():\n mpl.rcParams[key] = val\ncontext = sns.plotting_context(context=\"talk\", font_scale=1, rc=rc_dict)\nsns.set_context(context)\n\nnp.random.seed(8888)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, **kws)\n\n\ndef stashcsv(df, name, **kws):\n savecsv(df, name)\n\n\ngraph_type = \"G\"\nmg = load_metagraph(graph_type, version=\"2020-04-01\")\nmg = preprocess(\n mg,\n threshold=0,\n sym_threshold=False,\n remove_pdiff=True,\n binarize=False,\n weight=\"weight\",\n)\nmeta = mg.meta\n\n# plot where we are cutting out nodes based on degree\ndegrees = mg.calculate_degrees()\nfig, ax = plt.subplots(1, 1, figsize=(5, 2.5))\nsns.distplot(np.log10(degrees[\"Total edgesum\"]), ax=ax)\nq = np.quantile(degrees[\"Total edgesum\"], 0.05)\nax.axvline(np.log10(q), linestyle=\"--\", color=\"r\")\nax.set_xlabel(\"log10(total synapses)\")\n\n# remove low degree neurons\nidx = meta[degrees[\"Total edgesum\"] > q].index\nmg = mg.reindex(idx, use_ids=True)\n\n# remove center neurons # FIXME\nidx = mg.meta[mg.meta[\"hemisphere\"].isin([\"L\", \"R\"])].index\nmg = mg.reindex(idx, use_ids=True)\n\nmg = mg.make_lcc()\nmg.calculate_degrees(inplace=True)\nmeta = mg.meta\nmeta[\"inds\"] = range(len(meta))\nadj = mg.adj.copy()\n\n# %% [markdown]\n# ##\nadj = pass_to_ranks(adj)\n\nleft_inds = meta[meta[\"left\"]][\"inds\"].values\nright_inds = meta[meta[\"right\"]][\"inds\"].values\nlp_inds, rp_inds = get_paired_inds(meta)\n\nleft_left_adj = adj[np.ix_(left_inds, left_inds)]\nright_right_adj = adj[np.ix_(right_inds, right_inds)]\nright_left_adj = adj[np.ix_(right_inds, left_inds)]\nleft_right_adj = adj[np.ix_(left_inds, right_inds)]\n\n# %% [markdown]\n# ##\n\n\ndef add_connections(x1, x2, y1, y2, color=\"black\", alpha=0.2, linewidth=0.2, ax=None):\n x1 = np.array(x1)\n x2 = np.array(x2)\n y1 = np.array(y1)\n y2 = np.array(y2)\n if ax is None:\n ax = plt.gca()\n for i in range(len(x1)):\n ax.plot(\n [x1[i], x2[i]],\n [y1[i], y2[i]],\n color=color,\n alpha=alpha,\n linewidth=linewidth,\n )\n\n\ndef plot_pairs(\n X,\n # left_inds,\n # right_inds,\n labels,\n model=None,\n left_pair_inds=None,\n right_pair_inds=None,\n equal=False,\n):\n\n n_dims = X.shape[1]\n\n # if colors is None:\n # colors = sns.color_palette(\"tab10\", n_colors=k, desat=0.7)\n\n fig, axs = plt.subplots(\n n_dims, n_dims, sharex=False, sharey=False, figsize=(20, 20)\n )\n data = pd.DataFrame(data=X)\n data[\"label\"] = labels\n\n for i in range(n_dims):\n for j in range(n_dims):\n ax = axs[i, j]\n ax.axis(\"off\")\n if i < j:\n sns.scatterplot(\n data=data,\n x=j,\n y=i,\n ax=ax,\n alpha=0.7,\n linewidth=0,\n s=8,\n legend=False,\n hue=\"label\",\n palette=CLASS_COLOR_DICT,\n )\n if left_pair_inds is not None and right_pair_inds is not None:\n add_connections(\n data.iloc[left_pair_inds.values, j],\n data.iloc[right_pair_inds.values, j],\n data.iloc[left_pair_inds.values, i],\n data.iloc[right_pair_inds.values, i],\n ax=ax,\n )\n\n plt.tight_layout()\n return fig, axs\n\n\n# %% [markdown]\n# ##\n\nase = AdjacencySpectralEmbed(n_components=None, n_elbows=True)\n\nleft_left_embed = ase.fit_transform(left_left_adj)\nright_right_embed = ase.fit_transform(right_right_adj)\n\n# left_right_embed = ase.fit_transform(left_right_adj)\n# right_left_embed = ase.fit_transform(right_left_adj)\n\n\n#%%\n\n\nclass Procrustes:\n def __init__(self, method=\"ortho\"):\n self.method = method\n\n def fit(self, X, Y=None, x_seeds=None, y_seeds=None):\n if Y is None and (x_seeds is not None and y_seeds is not None):\n Y = X[y_seeds]\n X = X[x_seeds]\n elif Y is not None and (x_seeds is not None or y_seeds is not None):\n ValueError(\"May only use one of \\{Y, \\{x_seeds, y_seeds\\}\\}\")\n\n X = X.copy()\n Y = Y.copy()\n\n if self.method == \"ortho\":\n R = orthogonal_procrustes(X, Y)[0]\n elif self.method == \"diag-ortho\":\n norm_X = np.linalg.norm(X, axis=1)\n norm_Y = np.linalg.norm(Y, axis=1)\n norm_X[norm_X <= 1e-15] = 1\n norm_Y[norm_Y <= 1e-15] = 1\n X = X / norm_X[:, None]\n Y = Y / norm_Y[:, None]\n R = orthogonal_procrustes(X, Y)[0]\n else:\n raise ValueError(\"Invalid `method` parameter\")\n\n self.R_ = R\n return self\n\n def transform(self, X, map_inds=None):\n if map_inds is not None:\n X_transform = X.copy()\n X_transform[map_inds] = X_transform[map_inds] @ self.R_\n else:\n X_transform = X @ self.R_\n return X_transform\n\n\n# %% [markdown]\n# ##\n\ngraph_types = [\"Gad\", \"Gaa\", \"Gdd\", \"Gda\"]\nadjs = []\nfor g in graph_types:\n temp_mg = load_metagraph(g, version=\"2020-04-01\")\n temp_mg.reindex(mg.meta.index, use_ids=True)\n temp_adj = temp_mg.adj\n adjs.append(temp_adj)\n\nembed_adjs = [pass_to_ranks(a) for a in adjs]\n# embed_adjs = [a + 1 / a.size for a in embed_adjs]\n# embed_adjs = [augment_diagonal(a) for a in embed_adjs]\n\n# %% [markdown]\n# ##\n\n\ndef bilateral_ase(adj):\n ase = AdjacencySpectralEmbed(n_components=None, n_elbows=2, check_lcc=False)\n ipsi_adj = adj.copy()\n ipsi_adj[np.ix_(left_inds, right_inds)] = 0\n ipsi_adj[np.ix_(right_inds, left_inds)] = 0\n ipsi_embed = ase.fit_transform(ipsi_adj)\n\n procrust = Procrustes()\n align_ipsi_embed = []\n for e in ipsi_embed:\n procrust.fit(e, x_seeds=lp_inds, y_seeds=rp_inds)\n align_e = procrust.transform(e, map_inds=left_inds)\n align_ipsi_embed.append(align_e)\n align_ipsi_embed = np.concatenate(align_ipsi_embed, axis=1)\n\n contra_adj = adj.copy()\n contra_adj[np.ix_(left_inds, left_inds)] = 0\n contra_adj[np.ix_(right_inds, right_inds)] = 0\n contra_embed = ase.fit_transform(contra_adj)\n\n procrust = Procrustes()\n align_contra_embed = []\n for e in contra_embed:\n procrust.fit(e, x_seeds=lp_inds, y_seeds=rp_inds)\n align_e = procrust.transform(e, map_inds=left_inds)\n align_contra_embed.append(align_e)\n align_contra_embed = np.concatenate(align_contra_embed, axis=1)\n return align_ipsi_embed, align_contra_embed\n\n\nall_embeds = []\nfor a in embed_adjs:\n embed = bilateral_ase(a)\n all_embeds.append(embed[0])\n all_embeds.append(embed[1])\n # U, _, _ = selectSVD(embed, n_elbows=2)\n # plot_pairs(\n # U,\n # labels=meta[\"merge_class\"].values,\n # left_pair_inds=lp_inds,\n # right_pair_inds=rp_inds,\n # )\ncat_all_embeds = np.concatenate(all_embeds, axis=1)\n# %% [markdown]\n# ##\n# align_joint_embed = np.concatenate((align_ipsi_embed, align_contra_embed), axis=1)\n# U, S, V = selectSVD(align_joint_embed)\nU, S, V = selectSVD(cat_all_embeds, n_elbows=4)\nprint(U.shape)\nplt.plot(S)\n# %% [markdown]\n# ##\nplot_pairs(\n U,\n labels=meta[\"merge_class\"].values,\n left_pair_inds=lp_inds,\n right_pair_inds=rp_inds,\n)\n\n# %% [markdown]\n# ##\nfrom graspy.utils import symmetrize\n\n# manifold = TSNE(metric=\"cosine\")\n# tsne_embed = tsne.fit_transform(U)\nmanifold = ClassicalMDS(n_components=U.shape[1] - 1, dissimilarity=\"precomputed\")\n# manifold = MDS(n_components=2, dissimilarity=\"precomputed\")\n# manifold = Isomap(n_components=2, metric=\"precomputed\")\npdist = symmetrize(pairwise_distances(U, metric=\"cosine\"))\nmanifold_embed = manifold.fit_transform(pdist)\n\nplot_pairs(\n manifold_embed,\n labels=meta[\"merge_class\"].values,\n left_pair_inds=lp_inds,\n right_pair_inds=rp_inds,\n)\n\n# %% [markdown]\n# ##\n\nfig, ax = plt.subplots(1, 1, figsize=(10, 10))\nplot_df = pd.DataFrame(data=manifold_embed)\nplot_df[\"merge_class\"] = meta[\"merge_class\"].values\nsns.scatterplot(\n data=plot_df,\n x=0,\n y=1,\n hue=\"merge_class\",\n palette=CLASS_COLOR_DICT,\n legend=False,\n ax=ax,\n s=20,\n linewidth=0.5,\n alpha=0.7,\n)\n# remove_axis(ax)\nax.axis(\"off\")\nadd_connections(\n plot_df.iloc[lp_inds, 0],\n plot_df.iloc[rp_inds, 0],\n plot_df.iloc[lp_inds, 1],\n plot_df.iloc[rp_inds, 1],\n ax=ax,\n)\n\n# %% [markdown]\n# ## Notes\n# is it worth trying mvmds here\n# is it worth doing an ASE/LSE combo\n# way I am doing the procrustes now is also weird.\n# maybe try making a similarity matrix for classes\n\nmetric = \"euclidean\"\npdists = []\nfor embed in all_embeds:\n pdist = pairwise_distances(embed, metric=metric)\n pdists.append(pdist)\n\n\n# %% [markdown]\n# ##\nfrom mvlearn.embed import MVMDS\n\n# %% [markdown]\n# ##\n\nmvmds = MVMDS(n_components=6)\n\nmvmds_embed = mvmds.fit_transform(all_embeds)\n\n# %% [markdown]\n# ##\nplot_pairs(\n mvmds_embed,\n labels=meta[\"merge_class\"].values,\n left_pair_inds=lp_inds,\n right_pair_inds=rp_inds,\n)\n# dont like mvmds on cosine distances\n\n\n# %%\nfrom src.cluster import crossval_cluster, plot_metrics, predict, plot_cluster_pairs\n\nresults = crossval_cluster(\n mvmds_embed,\n left_inds,\n right_inds,\n min_clusters=2,\n max_clusters=20,\n n_init=25,\n left_pair_inds=lp_inds,\n right_pair_inds=rp_inds,\n)\n\nplot_metrics(results)\n\n# %% [markdown]\n# ##\nmetric = \"bic\"\nk = 2\nind = results[results[\"k\"] == k][metric].idxmax()\nmodel = results.loc[ind, \"model\"]\npred = predict(mvmds_embed, left_inds, right_inds, model, relabel=False)\nplot_cluster_pairs(\n mvmds_embed,\n left_inds,\n right_inds,\n model,\n meta[\"merge_class\"].values,\n lp_inds,\n rp_inds,\n)\npred_side = predict(mvmds_embed, left_inds, right_inds, model, relabel=True)\n\nstacked_barplot(pred_side, meta[\"merge_class\"].values, color_dict=CLASS_COLOR_DICT)\n\n# %% [markdown]\n# ##\n\nfrom src.cluster import MaggotCluster\n\nfrom sklearn.model_selection import ParameterGrid\n\nbasename = \"mvmds\"\n# params = list(ParameterGrid(param_grid))\n# n_levels = 7\n\n# mcs = []\n# for p in params:\n# metric = p[\"metric\"]\n# embed = p[\"embed\"]\n# realign = p[\"realign\"]\n# reembed = p[\"reembed\"]\n# basename = f\"-{p}\".replace(\" \", \"\")\n# basename = basename.replace(\":\", \"=\")\n# basename = basename.replace(\",\", \"-\")\n# basename = basename.replace(\"'\", \"\")\n# print(basename)\n\n# np.random.seed(8888)\nn_levels = 8\nmc = MaggotCluster(\n \"0\",\n adj=adj,\n meta=meta,\n n_init=25,\n stashfig=stashfig,\n min_clusters=1,\n max_clusters=3,\n X=mvmds_embed,\n)\n\nfor i in range(n_levels):\n for j, node in enumerate(mc.get_lowest_level()):\n node.fit_candidates()\n for j, node in enumerate(mc.get_lowest_level()):\n node.select_model(2, metric=metric)\n mc.collect_labels()\n\nfig, axs = plt.subplots(1, n_levels, figsize=(10 * n_levels, 30))\nfor i in range(n_levels):\n ax = axs[i]\n stacked_barplot(\n mc.meta[f\"lvl{i}_labels_side\"],\n mc.meta[\"merge_class\"],\n category_order=np.unique(mc.meta[f\"lvl{i}_labels_side\"].values),\n color_dict=CLASS_COLOR_DICT,\n norm_bar_width=False,\n ax=ax,\n )\n ax.set_yticks([])\n ax.get_legend().remove()\n\nstashfig(f\"count-barplot-lvl{i}\" + basename)\nplt.close()\n\nfig, axs = plt.subplots(1, n_levels, figsize=(10 * n_levels, 30))\nfor i in range(n_levels):\n ax = axs[i]\n stacked_barplot(\n mc.meta[f\"lvl{i}_labels_side\"],\n mc.meta[\"merge_class\"],\n category_order=np.unique(mc.meta[f\"lvl{i}_labels_side\"].values),\n color_dict=CLASS_COLOR_DICT,\n norm_bar_width=True,\n ax=ax,\n )\n ax.set_yticks([])\n ax.get_legend().remove()\n\nstashfig(f\"prop-barplot-lvl{i}\" + basename)\nplt.close()\n\nfor i in range(n_levels):\n fig, ax = plt.subplots(1, 1, figsize=(20, 20))\n adjplot(\n adj,\n meta=mc.meta,\n sort_class=f\"lvl{i}_labels_side\",\n item_order=\"merge_class\",\n plot_type=\"scattermap\",\n sizes=(0.5, 1),\n ticks=False,\n colors=\"merge_class\",\n ax=ax,\n palette=CLASS_COLOR_DICT,\n gridline_kws=dict(linewidth=0.2, color=\"grey\", linestyle=\"--\"),\n )\n stashfig(f\"adj-lvl{i}\" + basename)\n\n",
"#%% Imports and file loading\nfrom pathlib import Path\nimport glob\nimport json\nfrom os import listdir\nfrom operator import itemgetter\n\nimport pandas as pd\nimport networkx as nx\nimport numpy as np\nfrom graspy.plot import gridplot\nfrom src.data import load_networkx\n\n# File locations\nbase_path = Path(\"./maggot_models/data/raw/Maggot-Brain-Connectome/\")\n\ndata_path = base_path / \"4-color-matrices_Brain\"\n\ndata_date_graphs = \"2020-01-29\" # this is for the graph, not the annotations\n\ngraph_types = [\"axon-axon\", \"axon-dendrite\", \"dendrite-axon\", \"dendrite-dendrite\"]\n\ndata_date_groups = \"2020-01-29\" # this is for the annotations\n\nclass_data_folder = base_path / f\"neuron-groups/{data_date_groups}\"\n\nall_neuron_file = \"all-neurons-with-sensories-2020-01-14.json\"\nleft_file = \"hemisphere-L-2020-1-14.json\"\nright_file = \"hemisphere-R-2020-1-14.json\"\n\ninput_counts_file = \"input_counts\"\n\npair_file = base_path / \"pairs/bp-pairs-2020-01-28.csv\"\n\noutput_path = Path(f\"maggot_models/data/processed/{data_date_graphs}\")\n\nskeleton_data_file = (\n data_path / Path(data_date_graphs) / \"skeleton_id_vs_neuron_name.csv\"\n)\n\nlineage_file = data_path / Path(data_date_graphs) / \"skeleton_id_vs_lineage.csv\"\n\n\ndef df_to_nx(df, meta_data_dict):\n c = df.columns.values\n c = c.astype(int)\n r = df.index.values\n df.columns = c\n if not (c == r).all():\n raise ValueError(\"Mismatching df indexing\")\n graph = nx.from_pandas_adjacency(df, create_using=nx.DiGraph)\n nx.set_node_attributes(graph, meta_data_dict)\n return graph\n\n\ndef extract_ids(lod):\n out_list = []\n for d in lod:\n skel_id = d[\"skeleton_id\"]\n out_list.append(skel_id)\n return out_list\n\n\ndef remove_date(string):\n datestrings = [\"-2019\", \"-2020\"]\n for d in datestrings:\n ind = string.find(d)\n if ind != -1:\n return string[:ind]\n print(f\"Could not remove date from string {string}\")\n return -1\n\n\ndef append_class(df, id, col, name):\n try:\n if df.loc[i, col] == \"\":\n df.loc[i, col] += name\n elif df.loc[i, col] == \"unk\": # always replace \"unk\"\n df.loc[i, col] = name\n elif not df.loc[i, col]:\n df.loc[i, col] = name\n else:\n df.loc[i, col] += \";\" + name\n return 0\n except KeyError:\n print(f\"Skeleton ID {id} not in graph\")\n print(f\"Skeleton class was {name}\")\n return 1\n\n\n# # Begin main script\n\nmeta_data_df = pd.read_csv(skeleton_data_file, delimiter=\",\", usecols=range(2))\nmeta_data_df.rename(lambda x: x.strip(\" \"), axis=1, inplace=True)\nmeta_data_df.head()\n\nmeta_data_df.set_index(\"skeleton_id\", inplace=True)\nskeleton_ids = meta_data_df.index.values\nprint(f\"There are {len(skeleton_ids)} possible nodes in the graph\")\n\n# %% [markdown]\n# # Load initial files\n\n# append new cell type classes\ngroup_files = listdir(class_data_folder)\n\nremove_files = [all_neuron_file, left_file, right_file]\n[group_files.remove(rf) for rf in remove_files]\n\nnew_group_files = []\nfor f in group_files:\n if f.endswith(\".json\"):\n new_group_files.append(f)\ngroup_files = new_group_files\n\n# %% [markdown]\n# # Iterate over all class and subclasses, put into dicts\nnames = []\ngroup_map = {}\nsubgroup_map = {}\nfor f in group_files:\n if \"CAT\" not in f: # skip categorical ones here\n name = remove_date(f)\n print(name)\n with open(class_data_folder / f, \"r\") as json_file:\n temp_dict = json.load(json_file)\n temp_ids = extract_ids(temp_dict)\n if \"subclass_\" in name:\n ind = name.find(\"subclass_\")\n temp_name = name[ind + len(\"subclass_\") :] # only keep things after\n subgroup_map[temp_name] = temp_ids\n else:\n group_map[name] = temp_ids\n\n# %% [markdown]\n# #\nmeta_data_df[\"Class 1\"] = \"unk\"\nnum_missing = 0\nfor name, ids in group_map.items():\n for i in ids:\n num_missing += append_class(meta_data_df, i, \"Class 1\", name)\n\nprint()\nprint(f\"{num_missing} skeleton IDs missing from graph\")\nprint()\n\nmeta_data_df[\"Class 2\"] = \"\"\nfor name, ids in subgroup_map.items():\n for i in ids:\n num_missing += append_class(meta_data_df, i, \"Class 2\", name)\n\nprint(np.unique(meta_data_df[\"Class 1\"]))\n\nprint(np.unique(meta_data_df[\"Class 2\"]))\n\n\n# Merge class (put class 1 and class 2 together as a column)\nmeta_data_df[\"Merge Class\"] = \"\"\nfor i in meta_data_df.index.values:\n merge_class = meta_data_df.loc[i, \"Class 1\"]\n if meta_data_df.loc[i, \"Class 2\"] != \"\":\n merge_class += \"-\" + meta_data_df.loc[i, \"Class 2\"]\n meta_data_df.loc[i, \"Merge Class\"] = merge_class\n\n#%% manage the \"Categorical\" (true/false) labels\nfor f in group_files:\n if \"CAT\" in f:\n name = remove_date(f)\n print(name)\n name = name.replace(\"CAT-\", \"\")\n meta_data_df[name] = False\n with open(class_data_folder / f, \"r\") as json_file:\n temp_dict = json.load(json_file)\n temp_ids = extract_ids(temp_dict)\n for i in temp_ids:\n append_class(meta_data_df, i, name, True)\n\n# %% [markdown]\n# # Add hemisphere labels\nmeta_data_df[\"Hemisphere\"] = None\nfor f in [left_file, right_file]:\n name = remove_date(f)\n name = name.replace(\"hemisphere-\", \"\")\n print(name)\n with open(class_data_folder / f, \"r\") as json_file:\n temp_dict = json.load(json_file)\n temp_ids = extract_ids(temp_dict)\n for i in temp_ids:\n append_class(meta_data_df, i, \"Hemisphere\", name)\n\nprint(\"Missing Hemisphere annotation for\")\nprint(meta_data_df[meta_data_df[\"Hemisphere\"].isnull()])\n\n# %% [markdown]\n# # Pairs\n\n# Pairs (NOTE this file has some issues where some ids are repeated in multiple pairs)\npair_df = pd.read_csv(pair_file, usecols=range(3))\npair_df.head()\n\nuni_left, left_counts = np.unique(pair_df[\"leftid\"], return_counts=True)\nuni_right, right_counts = np.unique(pair_df[\"rightid\"], return_counts=True)\n\ndup_left_inds = np.where(left_counts != 1)[0]\ndup_right_inds = np.where(right_counts != 1)[0]\ndup_left_ids = uni_left[dup_left_inds]\ndup_right_ids = uni_right[dup_right_inds]\nif len(dup_left_inds) > 0:\n print(\"Duplicate pairs left:\")\n print(dup_left_ids)\nif len(dup_right_inds) > 0:\n print(\"Duplicate pairs right:\")\n print(dup_right_ids)\n\ndrop_df = pair_df[\n pair_df[\"leftid\"].isin(dup_left_ids) | pair_df[\"rightid\"].isin(dup_right_ids)\n]\nprint(\"Dropping pairs:\")\nprint(drop_df)\npair_df.drop(drop_df.index, axis=0, inplace=True)\n\npair_ids = np.concatenate((pair_df[\"leftid\"].values, pair_df[\"rightid\"].values))\nmeta_ids = meta_data_df.index.values\nin_meta_ids = np.isin(pair_ids, meta_ids)\ndrop_ids = pair_ids[~in_meta_ids]\npair_df = pair_df[~pair_df[\"leftid\"].isin(drop_ids)]\npair_df = pair_df[~pair_df[\"rightid\"].isin(drop_ids)]\n\nleft_to_right_df = pair_df.set_index(\"leftid\")\nright_to_left_df = pair_df.set_index(\"rightid\")\nright_to_left_df.head()\n\nmeta_data_df[\"Pair\"] = -1\nmeta_data_df[\"Pair ID\"] = -1\nmeta_data_df.loc[left_to_right_df.index, \"Pair\"] = left_to_right_df[\"rightid\"]\nmeta_data_df.loc[right_to_left_df.index, \"Pair\"] = right_to_left_df[\"leftid\"]\n\nmeta_data_df.loc[left_to_right_df.index, \"Pair ID\"] = left_to_right_df[\"pair_id\"]\nmeta_data_df.loc[right_to_left_df.index, \"Pair ID\"] = right_to_left_df[\"pair_id\"]\n\n#%% Fix places where L/R labels are not the same\nprint()\nfor i in range(len(meta_data_df)):\n my_id = meta_data_df.index[i]\n my_class = meta_data_df.loc[my_id, \"Class 1\"]\n partner_id = meta_data_df.loc[my_id, \"Pair\"]\n if partner_id != -1:\n partner_class = meta_data_df.loc[partner_id, \"Class 1\"]\n if partner_class != \"unk\" and my_class == \"unk\":\n print(f\"{my_id} had asymmetric class label, fixed\")\n meta_data_df.loc[my_id, \"Class 1\"] = partner_class\n elif (partner_class != my_class) and (partner_class != \"unk\"):\n print(\n f\"{meta_data_df.index[i]} and partner {partner_id} have different labels\"\n )\nprint()\n\n#%% lineagees\nlineage_df = pd.read_csv(lineage_file)\nlineage_df = lineage_df.set_index(\"skeleton_id\")\nlineage_df = lineage_df.fillna(\"unk\")\n# ignore lineages for nonexistent skeletons\nlineage_df = lineage_df[lineage_df.index.isin(meta_data_df.index)]\nprint(f\"Missing lineage info for {len(meta_data_df) - len(lineage_df)} skeletons\")\nprint(meta_data_df[~meta_data_df.index.isin(lineage_df.index)])\n\n\ndef filter(string):\n string = string.replace(\"akira\", \"\")\n string = string.replace(\"Lineage\", \"\")\n string = string.replace(\"*\", \"\")\n string = string.strip(\"_\")\n string = string.strip(\" \")\n string = string.replace(\"_r\", \"\")\n string = string.replace(\"_l\", \"\")\n return string\n\n\nlineages = lineage_df[\"lineage\"]\nlineages = np.vectorize(filter)(lineages)\nmeta_data_df[\"lineage\"] = \"unk\"\nmeta_data_df.loc[lineage_df.index, \"lineage\"] = lineages\nnulls = meta_data_df[meta_data_df.isnull().any(axis=1)]\n\ninput_counts_path = data_path / data_date_graphs / (input_counts_file + \".csv\")\ninput_counts_df = pd.read_csv(input_counts_path, index_col=0)\ncols = input_counts_df.columns.values\ncols = [str(c).strip(\" \") for c in cols]\ninput_counts_df.columns = cols\nprint(input_counts_df.head())\n\nmeta_data_df.loc[input_counts_df.index, \"dendrite_input\"] = input_counts_df[\n \"dendrite_inputs\"\n]\nmeta_data_df.loc[input_counts_df.index, \"axon_input\"] = input_counts_df[\"axon_inputs\"]\n\nprint(meta_data_df.head())\n\nmeta_data_dict = meta_data_df.to_dict(orient=\"index\")\n\n\n#%% Import the raw graphs\nnx_graphs_raw = {}\ndf_graphs_raw = {}\nfor graph_type in graph_types:\n print(graph_type)\n edgelist_path = data_path / data_date_graphs / (graph_type + \".csv\")\n adj = pd.read_csv(edgelist_path, index_col=0)\n graph = df_to_nx(adj, meta_data_dict)\n nx_graphs_raw[graph_type] = graph\n df_graphs_raw[graph_type] = adj\n print()\n\n\n#%% Normalize weights for the raw graphs\ndf_graphs_norm = {}\nnx_graphs_norm = {}\n\ninput_counts = input_counts_df[\"axon_inputs\"].values\n\ninput_counts[input_counts == 0] = 1\nfor graph_type in [\"axon-axon\", \"dendrite-axon\"]:\n print(graph_type)\n df_adj_raw = df_graphs_raw[graph_type]\n if (input_counts_df.index.values == adj.index.values).all():\n print(\"Same indexing!\")\n else:\n raise ValueError(\"Indexing of input counts file not the same!\")\n adj_raw = df_adj_raw.values\n adj_norm = adj_raw / input_counts[np.newaxis, :]\n print(adj_norm.sum(axis=0).max())\n df_adj_norm = pd.DataFrame(\n index=df_adj_raw.index, columns=df_adj_raw.columns, data=adj_norm\n )\n df_graphs_norm[graph_type] = df_adj_norm\n graph = df_to_nx(df_adj_norm, meta_data_dict)\n nx_graphs_norm[graph_type] = graph\n print()\n\ninput_counts = input_counts_df[\"dendrite_inputs\"].values\ninput_counts[input_counts == 0] = 1\nfor graph_type in [\"axon-dendrite\", \"dendrite-dendrite\"]:\n print(graph_type)\n df_adj_raw = df_graphs_raw[graph_type]\n if (input_counts_df.index.values == adj.index.values).all():\n print(\"Same indexing!\")\n adj_raw = df_adj_raw.values\n adj_norm = adj_raw / input_counts[np.newaxis, :]\n print(adj_norm.sum(axis=0).max())\n df_adj_norm = pd.DataFrame(\n index=df_adj_raw.index, columns=df_adj_raw.columns, data=adj_norm\n )\n df_graphs_norm[graph_type] = df_adj_norm\n graph = df_to_nx(df_adj_norm, meta_data_dict)\n nx_graphs_norm[graph_type] = graph\n print()\n\n\n#%% All-all graph\ntotal_input = (\n input_counts_df[\"dendrite_inputs\"].values + input_counts_df[\"axon_inputs\"].values\n)\ntotal_input[total_input == 0] = 1\n\nall_adj_raw = np.zeros_like(adj_norm)\nfor graph_type in graph_types:\n all_adj_raw += df_graphs_raw[graph_type].values\n\ndf_all_raw = pd.DataFrame(\n index=df_adj_raw.index, columns=df_adj_raw.columns, data=all_adj_raw\n)\n\nnx_all_raw = df_to_nx(df_all_raw, meta_data_dict)\n\nall_adj_norm = all_adj_raw / total_input[np.newaxis, :]\ndf_all_norm = pd.DataFrame(\n index=df_adj_raw.index, columns=df_adj_raw.columns, data=all_adj_norm\n)\n\nnx_all_norm = df_to_nx(df_all_norm, meta_data_dict)\n\n#%% Save\n\nout_graphs = []\n[out_graphs.append(i) for i in nx_graphs_raw.values()]\n[print(i) for i in nx_graphs_raw.keys()]\nsave_names = [\"Gaa\", \"Gad\", \"Gda\", \"Gdd\"]\n[out_graphs.append(i) for i in nx_graphs_norm.values()]\n[print(i) for i in nx_graphs_norm.keys()]\nsave_names += [\"Gaan\", \"Gdan\", \"Gadn\", \"Gddn\"]\nout_graphs.append(nx_all_raw)\nsave_names.append(\"G\")\nout_graphs.append(nx_all_norm)\nsave_names.append(\"Gn\")\n\nfor name, graph in zip(save_names, out_graphs):\n nx.write_graphml(graph, output_path / (name + \".graphml\"))\n\nmeta_data_df.to_csv(output_path / \"meta_data.csv\")\n\n#%% verify things are right\nfor name, graph_wrote in zip(save_names, out_graphs):\n print(name)\n graph_read = nx.read_graphml(output_path / (name + \".graphml\"))\n adj_read = nx.to_numpy_array(graph_read)\n adj_wrote = nx.to_numpy_array(graph_wrote)\n print(np.array_equal(adj_read, adj_wrote))\n graph_loader = load_networkx(name, version=data_date_graphs)\n adj_loader = nx.to_numpy_array(graph_loader)\n print(np.array_equal(adj_wrote, adj_loader))\n print()\n\n"
] | [
[
"numpy.diag",
"numpy.sqrt",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.mean",
"sklearn.metrics.adjusted_rand_score",
"numpy.where",
"numpy.ix_",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"matplotlib.cm.ScalarMappable",
"numpy.count_nonzero",
"numpy.zeros",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.median",
"numpy.errstate",
"numpy.argsort",
"numpy.sum",
"matplotlib.colors.LogNorm",
"numpy.random.seed",
"numpy.isinf"
],
[
"pandas.concat",
"pandas.Series",
"numpy.random.seed",
"numpy.power",
"numpy.isnan",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.full",
"numpy.concatenate",
"numpy.log10",
"numpy.vectorize",
"numpy.repeat",
"numpy.array",
"sklearn.decomposition.PCA"
],
[
"numpy.where",
"matplotlib.pyplot.figure"
],
[
"numpy.diag",
"matplotlib.ticker.MultipleLocator",
"numpy.sqrt",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.arctan2",
"sklearn.metrics.adjusted_rand_score",
"scipy.optimize.linear_sum_assignment",
"numpy.ix_",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"numpy.eye",
"matplotlib.pyplot.close",
"numpy.quantile",
"numpy.log10",
"numpy.linalg.eigh",
"scipy.linalg.orthogonal_procrustes",
"numpy.logical_and",
"numpy.sum",
"matplotlib.patches.Ellipse",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.linalg.norm",
"numpy.vectorize",
"matplotlib.ticker.MaxNLocator"
],
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.twinx",
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"numpy.unique",
"matplotlib.pyplot.MaxNLocator",
"numpy.median",
"numpy.union1d",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.subplot2grid",
"numpy.vectorize",
"matplotlib.pyplot.suptitle",
"numpy.where",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
],
[
"numpy.random.seed",
"numpy.quantile",
"matplotlib.pyplot.subplots",
"numpy.log10",
"sklearn.cluster.AgglomerativeClustering"
],
[
"pandas.concat",
"pandas.read_csv",
"pandas.Series",
"numpy.array_equal",
"numpy.unique",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.vectorize",
"numpy.zeros_like",
"numpy.array",
"numpy.where",
"numpy.isin"
],
[
"matplotlib.pyplot.gca",
"numpy.allclose",
"numpy.nan_to_num",
"numpy.nansum",
"numpy.sum"
],
[
"numpy.random.seed",
"numpy.linspace"
],
[
"numpy.linspace"
],
[
"numpy.can_cast",
"numpy.array",
"numpy.zeros"
],
[
"sklearn.metrics.pairwise_distances",
"numpy.ix_",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.gca",
"numpy.random.seed",
"numpy.unique",
"scipy.linalg.orthogonal_procrustes",
"numpy.quantile",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.linalg.norm",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.log10",
"matplotlib.pyplot.close",
"numpy.array"
],
[
"pandas.read_csv",
"numpy.array_equal",
"numpy.unique",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.vectorize",
"numpy.zeros_like",
"numpy.where",
"numpy.isin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
theoptips/PySyft | [
"4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc",
"4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc"
] | [
"examples/tutorials/advanced/websockets-example-MNIST-parallel/run_websocket_client.py",
"syft/federated/federated_client.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import transforms, datasets\n\nimport logging\nimport argparse\nimport sys\nimport asyncio\nimport numpy as np\n\nimport syft as sy\nfrom syft import workers\nfrom syft.frameworks.torch.federated import utils\n\nlogger = logging.getLogger(__name__)\n\nLOG_INTERVAL = 25\n\n\n# Loss function\[email protected]\ndef loss_fn(pred, target):\n return F.nll_loss(input=pred, target=target)\n\n\n# Model\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4 * 4 * 50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4 * 4 * 50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\ndef define_and_get_arguments(args=sys.argv[1:]):\n parser = argparse.ArgumentParser(\n description=\"Run federated learning using websocket client workers.\"\n )\n parser.add_argument(\"--batch_size\", type=int, default=32, help=\"batch size of the training\")\n parser.add_argument(\n \"--test_batch_size\", type=int, default=128, help=\"batch size used for the test data\"\n )\n parser.add_argument(\n \"--training_rounds\", type=int, default=40, help=\"number of federated learning rounds\"\n )\n parser.add_argument(\n \"--federate_after_n_batches\",\n type=int,\n default=10,\n help=\"number of training steps performed on each remote worker before averaging\",\n )\n parser.add_argument(\"--lr\", type=float, default=0.1, help=\"learning rate\")\n parser.add_argument(\"--cuda\", action=\"store_true\", help=\"use cuda\")\n parser.add_argument(\"--seed\", type=int, default=1, help=\"seed used for randomization\")\n parser.add_argument(\"--save_model\", action=\"store_true\", help=\"if set, model will be saved\")\n parser.add_argument(\n \"--verbose\",\n \"-v\",\n action=\"store_true\",\n help=\"if set, websocket client workers will be started in verbose mode\",\n )\n\n args = parser.parse_args(args=args)\n return args\n\n\nasync def fit_model_on_worker(\n worker: workers.WebsocketClientWorker,\n traced_model: torch.jit.ScriptModule,\n batch_size: int,\n curr_round: int,\n max_nr_batches: int,\n lr: float,\n):\n \"\"\"Send the model to the worker and fit the model on the worker's training data.\n\n Args:\n worker: Remote location, where the model shall be trained.\n traced_model: Model which shall be trained.\n batch_size: Batch size of each training step.\n curr_round: Index of the current training round (for logging purposes).\n max_nr_batches: If > 0, training on worker will stop at min(max_nr_batches, nr_available_batches).\n lr: Learning rate of each training step.\n\n Returns:\n A tuple containing:\n * worker_id: Union[int, str], id of the worker.\n * improved model: torch.jit.ScriptModule, model after training at the worker.\n * loss: Loss on last training batch, torch.tensor.\n \"\"\"\n train_config = sy.TrainConfig(\n model=traced_model,\n loss_fn=loss_fn,\n batch_size=batch_size,\n shuffle=True,\n max_nr_batches=max_nr_batches,\n epochs=1,\n lr=lr,\n )\n train_config.send(worker)\n logger.info(\n \"Training round %s, calling fit on worker: %s, lr = %s\",\n curr_round,\n worker.id,\n \"{:.3f}\".format(train_config.lr),\n )\n loss = await worker.async_fit(dataset_key=\"mnist\", return_ids=[0])\n logger.info(\"Training round: %s, worker: %s, avg_loss: %s\", curr_round, worker.id, loss.mean())\n model = train_config.model_ptr.get().obj\n return worker.id, model, loss\n\n\ndef evaluate_models_on_test_data(test_loader, results):\n np.set_printoptions(formatter={\"float\": \"{: .0f}\".format})\n for worker_id, worker_model, _ in results:\n evaluate_model(worker_id, worker_model, \"cpu\", test_loader, print_target_hist=False)\n\n\ndef evaluate_model(worker_id, model, device, test_loader, print_target_hist=False):\n model.eval()\n test_loss = 0.0\n correct = 0\n hist_target = np.zeros(10)\n hist_pred = np.zeros(10)\n\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n hist, _ = np.histogram(target, bins=10, range=(0, 10))\n hist_target += hist\n output = model(data)\n test_loss += loss_fn(output, target).item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n hist, _ = np.histogram(pred, bins=10, range=(0, 10))\n hist_pred += hist\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n if print_target_hist:\n logger.info(\"Target histogram: %s\", hist_target)\n logger.info(\"Prediction hist.: %s\", hist_pred)\n\n logger.info(\n \"%s: Test set: Average loss: %s, Accuracy: %s/%s (%s)\",\n worker_id,\n \"{:.4f}\".format(test_loss),\n correct,\n len(test_loader.dataset),\n \"{:.2f}\".format(100.0 * correct / len(test_loader.dataset)),\n )\n\n\nasync def main():\n args = define_and_get_arguments()\n\n hook = sy.TorchHook(torch)\n\n kwargs_websocket = {\"host\": \"localhost\", \"hook\": hook, \"verbose\": args.verbose}\n alice = workers.WebsocketClientWorker(id=\"alice\", port=8777, **kwargs_websocket)\n bob = workers.WebsocketClientWorker(id=\"bob\", port=8778, **kwargs_websocket)\n charlie = workers.WebsocketClientWorker(id=\"charlie\", port=8779, **kwargs_websocket)\n\n worker_instances = [alice, bob, charlie]\n\n use_cuda = args.cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {\"num_workers\": 1, \"pin_memory\": True} if use_cuda else {}\n\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n \"../data\",\n train=False,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n ),\n ),\n batch_size=args.test_batch_size,\n shuffle=False,\n drop_last=False,\n **kwargs,\n )\n\n model = Net().to(device)\n\n (data, target) = test_loader.__iter__().next()\n traced_model = torch.jit.trace(model, data)\n learning_rate = args.lr\n\n for curr_round in range(1, args.training_rounds + 1):\n logger.info(\"Starting training round %s/%s\", curr_round, args.training_rounds)\n\n results = await asyncio.gather(\n *[\n fit_model_on_worker(\n worker=worker,\n traced_model=traced_model,\n batch_size=args.batch_size,\n curr_round=curr_round,\n max_nr_batches=args.federate_after_n_batches,\n lr=learning_rate,\n )\n for worker in worker_instances\n ]\n )\n models = {}\n loss_values = {}\n\n test_models = curr_round % 10 == 1 or curr_round == args.training_rounds\n if test_models:\n evaluate_models_on_test_data(test_loader, results)\n\n for worker_id, worker_model, worker_loss in results:\n if worker_model is not None:\n models[worker_id] = worker_model\n loss_values[worker_id] = worker_loss\n\n traced_model = utils.federated_avg(models)\n if test_models:\n evaluate_model(\n \"Federated model\", traced_model, \"cpu\", test_loader, print_target_hist=True\n )\n\n # decay learning rate\n learning_rate = max(0.98 * learning_rate, args.lr * 0.01)\n\n if args.save_model:\n torch.save(model.state_dict(), \"mnist_cnn.pt\")\n\n\nif __name__ == \"__main__\":\n # Logging setup\n logger = logging.getLogger(\"run_websocket_server\")\n FORMAT = \"%(asctime)s %(levelname)s %(filename)s(l:%(lineno)d, p:%(process)d) - %(message)s\"\n logging.basicConfig(format=FORMAT)\n logger.setLevel(level=logging.DEBUG)\n\n # Websockets setup\n websockets_logger = logging.getLogger(\"websockets\")\n websockets_logger.setLevel(logging.INFO)\n websockets_logger.addHandler(logging.StreamHandler())\n\n # Run main\n asyncio.get_event_loop().run_until_complete(main())\n",
"import torch as th\nfrom torch.utils.data import BatchSampler, RandomSampler, SequentialSampler\n\nfrom syft.generic import ObjectStorage\nfrom syft.federated.train_config import TrainConfig\n\n\nclass FederatedClient(ObjectStorage):\n \"\"\"A Client able to execute federated learning in local datasets.\"\"\"\n\n def __init__(self, datasets=None):\n super().__init__()\n self.datasets = datasets if datasets is not None else dict()\n self.optimizer = None\n self.train_config = None\n\n def add_dataset(self, dataset, key: str):\n self.datasets[key] = dataset\n\n def remove_dataset(self, key: str):\n if key in self.datasets:\n del self.datasets[key]\n\n def set_obj(self, obj: object):\n \"\"\"Registers objects checking if which objects it should cache.\n\n Args:\n obj: An object to be registered.\n \"\"\"\n if isinstance(obj, TrainConfig):\n self.train_config = obj\n self.optimizer = None\n else:\n super().set_obj(obj)\n\n def _build_optimizer(\n self, optimizer_name: str, model, optimizer_args: dict\n ) -> th.optim.Optimizer:\n \"\"\"Build an optimizer if needed.\n\n Args:\n optimizer_name: A string indicating the optimizer name.\n optimizer_args: A dict containing the args used to initialize the optimizer.\n Returns:\n A Torch Optimizer.\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer\n\n if optimizer_name in dir(th.optim):\n optimizer = getattr(th.optim, optimizer_name)\n self.optimizer = optimizer(model.parameters(), **optimizer_args)\n else:\n raise ValueError(\"Unknown optimizer: {}\".format(optimizer_name))\n return self.optimizer\n\n def fit(self, dataset_key: str, **kwargs):\n \"\"\"Fits a model on the local dataset as specified in the local TrainConfig object.\n\n Args:\n dataset_key: Identifier of the local dataset that shall be used for training.\n **kwargs: Unused.\n\n Returns:\n loss: Training loss on the last batch of training data.\n \"\"\"\n if self.train_config is None:\n raise ValueError(\"TrainConfig not defined.\")\n\n if dataset_key not in self.datasets:\n raise ValueError(\"Dataset {} unknown.\".format(dataset_key))\n\n model = self.get_obj(self.train_config._model_id).obj\n loss_fn = self.get_obj(self.train_config._loss_fn_id).obj\n\n self._build_optimizer(\n self.train_config.optimizer, model, optimizer_args=self.train_config.optimizer_args\n )\n\n return self._fit(model=model, dataset_key=dataset_key, loss_fn=loss_fn)\n\n def _create_data_loader(self, dataset_key: str, shuffle: bool = False):\n data_range = range(len(self.datasets[dataset_key]))\n if shuffle:\n sampler = RandomSampler(data_range)\n else:\n sampler = SequentialSampler(data_range)\n data_loader = th.utils.data.DataLoader(\n self.datasets[dataset_key],\n batch_size=self.train_config.batch_size,\n sampler=sampler,\n num_workers=0,\n )\n return data_loader\n\n def _fit(self, model, dataset_key, loss_fn):\n model.train()\n data_loader = self._create_data_loader(\n dataset_key=dataset_key, shuffle=self.train_config.shuffle\n )\n\n loss = None\n iteration_count = 0\n\n for _ in range(self.train_config.epochs):\n for (data, target) in data_loader:\n # Set gradients to zero\n self.optimizer.zero_grad()\n\n # Update model\n output = model(data)\n loss = loss_fn(target=target, pred=output)\n loss.backward()\n self.optimizer.step()\n\n # Update and check interation count\n iteration_count += 1\n if iteration_count >= self.train_config.max_nr_batches >= 0:\n break\n\n return loss\n"
] | [
[
"torch.jit.trace",
"numpy.histogram",
"torch.nn.functional.nll_loss",
"torch.nn.functional.log_softmax",
"torch.manual_seed",
"numpy.set_printoptions",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.nn.functional.max_pool2d",
"numpy.zeros"
],
[
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.utils.data.RandomSampler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ali-ry/azureml-examples | [
"817ae89d2766dcafd70937a22cb3a80f100a2906"
] | [
"python-sdk/tutorials/automl-with-azureml/forecasting-recipes-univariate/forecasting_script.py"
] | [
"\"\"\"\r\nThis is the script that is executed on the compute instance. It relies\r\non the model.pkl file which is uploaded along with this script to the\r\ncompute instance.\r\n\"\"\"\r\n\r\nimport argparse\r\nfrom azureml.core import Dataset, Run\r\nfrom azureml.automl.core.shared.constants import TimeSeriesInternal\r\nfrom sklearn.externals import joblib\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\r\n \"--target_column_name\",\r\n type=str,\r\n dest=\"target_column_name\",\r\n help=\"Target Column Name\",\r\n)\r\nparser.add_argument(\r\n \"--test_dataset\", type=str, dest=\"test_dataset\", help=\"Test Dataset\"\r\n)\r\n\r\nargs = parser.parse_args()\r\ntarget_column_name = args.target_column_name\r\ntest_dataset_id = args.test_dataset\r\n\r\nrun = Run.get_context()\r\nws = run.experiment.workspace\r\n\r\n# get the input dataset by id\r\ntest_dataset = Dataset.get_by_id(ws, id=test_dataset_id)\r\n\r\nX_test = (\r\n test_dataset.drop_columns(columns=[target_column_name])\r\n .to_pandas_dataframe()\r\n .reset_index(drop=True)\r\n)\r\ny_test_df = (\r\n test_dataset.with_timestamp_columns(None)\r\n .keep_columns(columns=[target_column_name])\r\n .to_pandas_dataframe()\r\n)\r\n\r\n# generate forecast\r\nfitted_model = joblib.load(\"model.pkl\")\r\n# We have default quantiles values set as below(95th percentile)\r\nquantiles = [0.025, 0.5, 0.975]\r\npredicted_column_name = \"predicted\"\r\nPI = \"prediction_interval\"\r\nfitted_model.quantiles = quantiles\r\npred_quantiles = fitted_model.forecast_quantiles(X_test)\r\npred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(\r\n lambda x: \"[{}, {}]\".format(x[0], x[1]), axis=1\r\n)\r\nX_test[target_column_name] = y_test_df[target_column_name]\r\nX_test[PI] = pred_quantiles[PI]\r\nX_test[predicted_column_name] = pred_quantiles[0.5]\r\n# drop rows where prediction or actuals are nan\r\n# happens because of missing actuals\r\n# or at edges of time due to lags/rolling windows\r\nclean = X_test[\r\n X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)\r\n]\r\nclean.rename(columns={target_column_name: \"actual\"}, inplace=True)\r\n\r\nfile_name = \"outputs/predictions.csv\"\r\nexport_csv = clean.to_csv(file_name, header=True, index=False) # added Index\r\n\r\n# Upload the predictions into artifacts\r\nrun.upload_file(name=file_name, path_or_stream=file_name)\r\n"
] | [
[
"sklearn.externals.joblib.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nielsbril/best | [
"8a902293605f1bee1abf3ca66ae3708706658772"
] | [
"matching/matching.py"
] | [
"import pandas as pd\nimport argparse\nimport logging\nimport sys\nimport json\n\n\ndef get_best_logger(log_file, verbose):\n # Setup logger - (Python logger breaks PEP8 by default)\n logger = logging.getLogger(__name__)\n if verbose:\n logger.setLevel('DEBUG')\n # file_handler logs to file, stream_handler to console\n file_handler = logging.FileHandler(log_file)\n stream_handler = logging.StreamHandler()\n # formatter sets log format\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s : %(levelname)s - %(message)s')\n # add formatter to both handlers\n file_handler.setFormatter(formatter)\n stream_handler.setFormatter(formatter)\n # add both handlers to logger\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n return logger\n\n\ndef compare_addresses(args):\n \"\"\"Compare the addresses of two files\n \"\"\"\n logger.info('Started reading BOSA address file')\n try:\n bosa = pd.read_csv(args.input_file_1)\n logger.info('Read the BOSA address file')\n except IOError as io:\n logger.fatal(io)\n sys.exit(1)\n\n logger.info('Started reading comparison file')\n try:\n comparison = pd.read_csv(args.input_file_2)\n logger.info('Read the comparison file')\n except IOError as io:\n logger.fatal(io)\n sys.exit(1)\n\n comp_keys = []\n bosa_ids = []\n for comp_key, bosa_key in args.mapping.items():\n try:\n comp_keys.append(comp_key)\n bosa_ids.append(bosa.columns.get_loc(bosa_key))\n except KeyError as ke:\n logger.error(\n 'Column %s of column mapping (%s -> %s) not found in BOSA file', ke, comp_key, bosa_key)\n sys.exit(1)\n\n address_dict = {}\n logger.info('Building data structure to perform matching')\n for i, row in enumerate(bosa.values):\n if i % 50_000 == 0:\n logger.info('Processed %i / %i addresses', i, len(bosa))\n address_dict[tuple(el.lower() if type(\n el) == str else el for el in row[bosa_ids])] = row\n\n extended = perform_exact_matching(\n bosa, comparison, address_dict, comp_keys)\n\n try:\n extended.to_csv(args.output_file, index=False)\n except IOError as io:\n logger.fatal(io)\n sys.exit(1)\n\n\ndef perform_exact_matching(bosa, comparison, address_dict, comp_keys):\n \"\"\"Match the addresses in the comparison file and add address_id and coordinates when matched\n \"\"\"\n addr_id = bosa.columns.get_loc('address_id')\n lon_id = bosa.columns.get_loc('EPSG:4326_lon')\n lat_id = bosa.columns.get_loc('EPSG:4326_lat')\n\n extended = []\n logger.info('Performing matching')\n for i, row in comparison.iterrows():\n if i % 50_000 == 0:\n logger.info('Matched %i / %i addresses', i, len(comparison))\n try:\n key = tuple(el.lower() if type(el) ==\n str else el for el in row[comp_keys])\n except KeyError as ke:\n logger.error('Column %s not found in the comparison file', ke)\n sys.exit(1)\n if key in address_dict:\n # If the address is matched add address_id and coordinates to it\n data = address_dict[key]\n row['address_id'] = data[addr_id]\n row['EPSG:4326_lon'] = data[lon_id]\n row['EPSG:4326_lat'] = data[lat_id]\n extended.append(row)\n extended = pd.DataFrame(extended)\n # Convert column to int type that can handle NaN\n extended['address_id'] = extended['address_id'].astype('Int64')\n\n return extended\n\n\nif __name__ == \"__main__\":\n # Setup argument parser\n parser = argparse.ArgumentParser(\n description='Compare addresses between two csv files.')\n parser.add_argument(\n 'input_file_1', help='BOSA address file, in csv format')\n parser.add_argument(\n 'input_file_2', help='Address file to compare to BOSA address file, in csv format')\n parser.add_argument('output_file', help='Name of file to write output to')\n parser.add_argument('--mode', default='exact',\n choices=['exact'], help='How to compare the addresses.')\n parser.add_argument(\n '--mapping', default={}, type=json.loads, help='Column names to consider in the comparison and how they map to the \\\n column names of the BOSA address file. (as a json dict of {comparison_key: bosa_key})')\n parser.add_argument('--log_name', default=\"compare.log\",\n help='name of the log file')\n parser.add_argument('--verbose', action=\"store_true\",\n help=\"toggle verbose output\", default=False)\n\n args = parser.parse_args()\n\n logger = get_best_logger(args.log_name, args.verbose)\n\n compare_addresses(args)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
spyke/spyke | [
"20934521de9c557924911cf6190690ac1c6f8e80",
"20934521de9c557924911cf6190690ac1c6f8e80",
"20934521de9c557924911cf6190690ac1c6f8e80",
"20934521de9c557924911cf6190690ac1c6f8e80"
] | [
"spyke/sort.py",
"spyke/core.py",
"spyke/nsx.py",
"demo/threadpooltest2.py"
] | [
"\"\"\"Spike sorting classes and window\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\n__authors__ = ['Martin Spacek', 'Reza Lotun']\n\nimport os\nimport sys\nimport time\nimport datetime\nfrom copy import copy\nimport operator\nimport random\nimport shutil\nimport hashlib\nimport multiprocessing as mp\n\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.QtCore import Qt\nfrom PyQt4.QtGui import QAction, QIcon, QApplication\n\nimport numpy as np\nimport scipy\nimport scipy.signal\n#from scipy.cluster.hierarchy import fclusterdata\n\nimport pylab as pl\n\nimport pyximport\npyximport.install(build_in_temp=False, inplace=True)\nfrom . import util # .pyx file\n\nfrom . import core\nfrom .core import (WaveForm, Gaussian, MAXLONGLONG, R, toiter, intround, printflush, lstrip,\n rstrip, lrstrip, pad, td2days, SpykeToolWindow, NList, NSList, dist,\n USList, ClusterChange, SpikeSelectionSlider, lrrep2Darrstripis, rollwin2D)\nfrom .detect import DEBUG\nfrom .surf import EPOCH\nfrom .plot import SpikeSortPanel, CLUSTERCOLOURDICT, WHITE\nfrom .__version__ import __version__\n\n#MAXCHANTOLERANCE = 100 # um\n\nNSLISTWIDTH = 70 # minimize nslist width, enough for 7 digit spike IDs\nPANELWIDTHPERCOLUMN = 120 # sort panel width per column of channels\nPANELHEIGHTPERROW = 50 # sort panel height per row of channels\nVSCROLLBARWIDTH = 14 # hack\nSORTWINDOWHEIGHT = 1035 # TODO: this should be set programmatically\nMINSORTWINDOWWIDTH = 566\n\nMEANWAVEMAXSAMPLES = 2000\nNPCSPERCHAN = 7\n\nPCALIB = 'mdp'\nICALIB = 'sklearn'\n\nDEFMINISI = 50 # default minimum ISI to check for on export, us\n\nMAXGROUPISI = 100000 # us (100 ms)\nMAXGROUPDT = 100000000 # us (100 s)\n\n\nclass Sort(object):\n \"\"\"A spike sorting session, in which you can detect spikes and sort them into Neurons.\n A .sort file is a single Python2-pickled Sort object. A .json file is a\n jsonpickle-pickled Sort object\"\"\"\n def __init__(self, detector=None, stream=None, tw=None):\n self.__version__ = __version__\n self.fname = ''\n self.user = ''\n self.notes = ''\n self.detector = detector # this Sort's current Detector object\n self.tw = tw # time window (us) relative to spike time\n self.stream = stream\n self.probe = stream.probe # only one probe design per sort allowed\n self.converter = stream.converter\n self.neurons = {}\n self.clusters = {} # neurons with multidm params scaled for plotting\n self.norder = [] # stores order of neuron ids display in nlist\n self.npcsperchan = NPCSPERCHAN\n\n def get_nextnid(self):\n \"\"\"nextnid is used to retrieve the next unique single unit ID\"\"\"\n nids = list(self.neurons)\n if len(nids) == 0:\n return 1 # single unit nids start at 1\n else:\n return max(max(nids) + 1, 1) # at least 1\n\n nextnid = property(get_nextnid)\n\n def get_nextmuid(self):\n \"\"\"nextmuid is used to retrieve the next unique multiunit ID\"\"\"\n nids = list(self.neurons)\n if len(nids) == 0:\n return -1 # multiunit ids start at -1\n else:\n return min(min(nids) - 1, -1) # at most -1\n\n nextmuid = property(get_nextmuid)\n\n def get_good(self):\n \"\"\"Return array of nids marked by user as 'good'\"\"\"\n good = []\n for neuron in self.neurons.values():\n try:\n if neuron.good:\n good.append(neuron.id)\n except AttributeError: # neuron is from older sort, no .good attrib\n neuron.good = False\n return np.asarray(good)\n\n def set_good(self, good):\n \"\"\"Set good flag to True for nids in good, False otherwise\"\"\"\n nids = list(self.neurons)\n assert np.all([ nid in nids for nid in good ]) # make sure all nids in good exist\n notgood = np.setdiff1d(nids, good)\n for nid in notgood:\n neuron = self.neurons[nid]\n neuron.good = False\n for nid in good:\n neuron = self.neurons[nid]\n neuron.good = True\n\n good = property(get_good, set_good)\n\n def get_stream(self):\n try:\n return self._stream\n except AttributeError:\n # this is likely a brand new sort, has yet to be assigned a Stream\n return None\n\n def set_stream(self, stream=None):\n \"\"\"Check stream type and name and probe type, and restore filtmeth, car, sampfreq and\n shcorrect to stream when binding/modifying stream to self\"\"\"\n oldstream = self.stream\n if stream != None and oldstream != None:\n # do stream types match?\n if type(stream) != type(oldstream):\n raise ValueError(\"Stream types don't match: %s, %s\"\n % (type(oldstream), type(stream)))\n # do stream probe types match?\n if type(stream.probe) != type(oldstream.probe):\n raise ValueError(\"Stream probe types don't match: %s, %s\"\n % (type(oldstream.probe), type(stream.probe)))\n # is one stream fname a superset of the other?\n if (stream.fname not in oldstream.fname) and (oldstream.fname not in stream.fname):\n raise ValueError(\"Stream file names are not supersets of each other: %s, %s\"\n % (oldstream.fname, stream.fname))\n else:\n print('Stream file names are similar enough to proceed: %s, %s'\n % (stream.fname, oldstream.fname))\n try:\n stream.filtmeth = self.filtmeth\n stream.car = self.car\n stream.sampfreq = self.sampfreq\n stream.shcorrect = self.shcorrect\n except AttributeError:\n pass # one of the above aren't bound\n self._stream = stream # set it\n print('Bound stream %r to sort %r' % (stream.fname, self.fname))\n # now that tres is known, calculate window timepoints wrt spike time:\n self.calc_twts_twi()\n\n stream = property(get_stream, set_stream)\n\n def calc_twts_twi(self):\n \"\"\"Calculate temporal window timepoints wrt spike time, and the indices of these\n timepoints wrt spike time\"\"\"\n tres = self.tres\n tw = self.tw\n twts = np.arange(tw[0], tw[1], tres)\n twts += twts[0] % tres # get rid of mod, so twts go through zero\n self.twts = twts\n self.twi = intround(twts[0] / tres), intround(twts[-1] / tres)\n #info('twi = %s' % (self.twi,))\n\n def update_tw(self, tw):\n \"\"\"Update tw and everything that depends on it. Note that this shouldn't\n be called directly by the user. Call SpykeWindow.update_spiketw() instead\"\"\"\n oldtw = self.tw\n self.tw = tw\n self.calc_twts_twi()\n dtw = np.asarray(tw) - np.asarray(oldtw) # new minus old\n self.spikes['t0'] += dtw[0]\n self.spikes['t1'] += dtw[1]\n self.spikes['tis'] = self.spikes['tis'] - intround(dtw[0] / self.tres)\n # recalculate any existing templates:\n for neuron in self.neurons.values():\n if neuron.wave.data != None:\n neuron.update_wave()\n print('WARNING: all spike waveforms need to be reloaded!')\n\n def get_tres(self):\n return self.stream.tres\n\n tres = property(get_tres)\n\n def __getstate__(self):\n \"\"\"Get object state for pickling\"\"\"\n # copy it cuz we'll be making changes, this is fast because it's just a shallow copy\n d = self.__dict__.copy()\n # Spikes and wavedata arrays are (potentially) saved separately.\n # usids and PCs/ICs can be regenerated from the spikes array.\n for attr in ['spikes', 'wavedata', 'usids', 'X', 'Xhash']:\n # keep _stream during normal pickling for multiprocessing, but remove it\n # manually when pickling to sort file\n try: del d[attr]\n except KeyError: pass\n return d\n\n def get_nspikes(self):\n try: return len(self.spikes)\n except AttributeError: return 0\n\n nspikes = property(get_nspikes)\n\n def update_usids(self):\n \"\"\"Update usids, which is an array of indices of unsorted spikes\"\"\"\n nids = self.spikes['nid']\n self.usids, = np.where(nids == 0) # 0 means unclustered\n\n def get_spikes_sortedby(self, attr='id'):\n \"\"\"Return array of all spikes, sorted by attribute 'attr'\"\"\"\n vals = self.spikes[attr]\n spikes = self.spikes[vals.argsort()]\n return spikes\n\n def get_wave(self, sid):\n \"\"\"Return WaveForm corresponding to spike sid\"\"\"\n spikes = self.spikes\n nchans = spikes['nchans'][sid]\n chans = spikes['chans'][sid, :nchans]\n t0 = spikes['t0'][sid]\n t1 = spikes['t1'][sid]\n wavedata = self.wavedata[sid, 0:nchans]\n ts = np.arange(t0, t1, self.tres) # build them up\n return WaveForm(data=wavedata, ts=ts, chans=chans, tres=self.tres)\n\n def get_maxchan_wavedata(self, sid=None, nid=None):\n \"\"\"Return wavedata of maxchan of spike sid or neuron nid\"\"\"\n if sid != None:\n assert nid == None\n chani = self.spikes['chani'][sid]\n return self.wavedata[sid, chani]\n elif nid != None:\n assert sid == None\n neuron = self.neurons[nid]\n chani, = np.where(neuron.chans == neuron.chan)\n assert len(chani) == 1\n chani = chani[0] # pull out of length 1 array\n return neuron.wave.data[chani]\n\n def get_mean_wave(self, sids, nid=None):\n \"\"\"Return the mean and std waveform of spike waveforms in sids\"\"\"\n spikes = self.spikes\n nsids = len(sids)\n if nsids > MEANWAVEMAXSAMPLES:\n step = nsids // MEANWAVEMAXSAMPLES + 1 \n s = (\"get_mean_wave() sampling every %d spikes instead of all %d\"\n % (step, nsids))\n if nid != None:\n s = \"neuron %d: \" % nid + s\n print(s)\n sids = sids[::step]\n nsids = len(sids) # update\n \n chanss = spikes['chans'][sids]\n nchanss = spikes['nchans'][sids]\n chanslist = [ chans[:nchans] for chans, nchans in zip(chanss, nchanss) ] # list of arrays\n chanpopulation = np.concatenate(chanslist)\n groupchans = np.unique(chanpopulation) # comes out sorted\n \n wavedata = self.wavedata[sids]\n if wavedata.ndim == 2: # should be 3, get only 2 if nsids == 1\n wavedata.shape = 1, wavedata.shape[0], wavedata.shape[1] # give it a singleton 3rd dim\n nt = wavedata.shape[-1]\n maxnchans = len(groupchans)\n data = np.zeros((maxnchans, nt))\n # all spikes have same nt, but not necessarily same nchans, keep track of\n # how many spikes contributed to each of the group's chans\n nspikes = np.zeros((maxnchans, 1), dtype=int)\n for chans, wd in zip(chanslist, wavedata):\n chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans\n data[chanis] += wd[:len(chans)] # accumulate\n nspikes[chanis] += 1 # inc spike count for this spike's chans\n #t0 = time.time()\n data /= nspikes # normalize all data points appropriately, this is now the mean\n var = np.zeros((maxnchans, nt))\n for chans, wd in zip(chanslist, wavedata):\n chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans\n var[chanis] += (wd[:len(chans)] - data[chanis]) ** 2 # accumulate 2nd moment\n var /= nspikes # normalize all data points appropriately, this is now the variance\n std = np.sqrt(var)\n # keep only those chans that at least 1/2 the spikes contributed to\n bins = list(groupchans) + [np.inf] # concatenate rightmost bin edge\n hist, bins = np.histogram(chanpopulation, bins=bins)\n chans = groupchans[hist >= nsids/2]\n chanis = groupchans.searchsorted(chans)\n data = data[chanis]\n std = std[chanis]\n return WaveForm(data=data, std=std, chans=chans)\n\n def check_ISIs(self, nids='good'):\n \"\"\"Check that interspike intervals of spikes in each nid never fall below DEFMINISI\"\"\"\n print('Checking inter-spike intervals')\n if nids == 'good':\n nids = self.good\n elif nids == 'all':\n nids = sorted(self.neurons)\n for nid in nids:\n neuron = self.neurons[nid]\n spikets = self.spikes['t'][neuron.sids] # should be a sorted copy\n assert spikets.flags['OWNDATA'] # safe to modify in place\n spikets.sort() # just in case it isn't perfectly sorted\n ndupl = (np.diff(spikets) < DEFMINISI).sum()\n if ndupl > 0:\n msg = ('n%d has %d duplicate spikes (given DEFMINISI=%d us).\\n'\n 'Remove duplicate spikes with the ISI tool in the Verify tab'\n % (nid, ndupl, DEFMINISI))\n raise RuntimeError(msg)\n\n def check_wavealign(self, nids='good', maxdti=1):\n \"\"\"Check that each neurons's primary peak on the max chan is no more than +/- maxdti\n timepoints away from the t=0 alignment timepoint\"\"\"\n print('Checking neuron mean waveform alignment')\n if nids == 'good':\n nids = self.good\n elif nids == 'all':\n nids = sorted(self.neurons)\n nt = self.twi[1] - self.twi[0] + 1 # expected number of points of each chan's wavedata\n for nid in nids:\n neuron = self.neurons[nid]\n wd = self.get_maxchan_wavedata(nid=nid)\n assert len(wd) == nt\n # find biggest positive and negative peaks, check which comes first, ensure\n # the primary peak is within maxdti of t=0 alignment timepoint:\n ppeakis, _ = scipy.signal.find_peaks(wd) # positive peak indices\n npeakis, _ = scipy.signal.find_peaks(-wd) # negative peak indices\n pmaxi = ppeakis[wd[ppeakis].argmax()] # max positive peak index\n nmaxi = npeakis[wd[npeakis].argmin()] # max negative peak index\n if nmaxi < pmaxi: # usual case: -ve then +ve peak\n peak1i = nmaxi\n else: # less common: +ve then -ve peak, make sure +ve peak is worthy of alignment\n pmax, nmax = wd[pmaxi], wd[nmaxi]\n if pmax > abs(nmax): # +ve peak is bigger than -ve peak, align to +ve peak\n peak1i = pmaxi\n else:\n peak1i = nmaxi # default to -ve peak\n alignti = 0 - self.twi[0] # +ve\n dti = peak1i - alignti\n #print(\"n%d: dti=%d\" % (nid, dti))\n if abs(dti) > maxdti:\n peak1uV = self.converter.AD2uV(wd[peak1i])\n peak1us = intround(self.tres*(peak1i-alignti))\n msg = ('Primary peak (%+d uV @ t=%d us) of n%d is %+d timepoints away from '\n 'the t=0 us alignment point. Shift it closer and try again'\n % (peak1uV, peak1us, nid, dti))\n raise RuntimeError(msg)\n\n def check_wavepadding(self, nids='good', npad=2):\n \"\"\"Check if any spikes are edge padded, presumably due to being shifted but not\n reloaded. For robustness, check for consistent signs of padding across all channels.\n An edge is considered padded if it does not change over npad datapoints\"\"\"\n print('Checking spike waveform padding')\n assert npad >= 2 # need at least 2 points to do a diff\n if nids == 'good':\n nids = self.good\n elif nids == 'all':\n nids = sorted(self.neurons)\n for nid in nids:\n neuron = self.neurons[nid]\n for sid in neuron.sids:\n wd = self.wavedata[sid] # multichannel waveform data\n # are left and right edges of wavedata identical for npad number of points?\n l, r = wd[:, :npad], wd[:, -npad:] # shape (nchans, npad)\n leftpadded = (np.diff(l, axis=1) == 0).all()\n rightpadded = (np.diff(r, axis=1) == 0).all()\n # handle case where spike is right after or right before a 0-padded\n # region of data due to gaps between experiments:\n if leftpadded:\n if (wd[:, 0] == 0).all():\n leftpadded = False\n if rightpadded:\n if (wd[:, -1] == 0).all():\n rightpadded = False\n if leftpadded or rightpadded:\n msg = ('n%d has s%d that looks like it has been padded.\\n'\n 'leftpadded, rightpadded = %r, %r\\n'\n 'Reload s%d or n%d or all spikes and try again'\n % (nid, sid, leftpadded, rightpadded, sid, nid))\n raise RuntimeError(msg)\n\n def check_contiguous_nids(self):\n \"\"\"Check that neuron IDs are contiguous (no gaps)\"\"\"\n print('Checking that neuron IDs are contiguous')\n nids = np.array(list(self.neurons))\n nids = nids[nids > 0] # only consider +ve nids\n nids.sort()\n if (np.diff(nids) != 1).any():\n raise RuntimeError('Neuron IDs are not contiguous, renumber all and try again')\n\n def exportptcsfiles(self, basepath, sortpath, user='', notes=''):\n \"\"\"Export spike data to binary .ptcs files under basepath, one file per recording\"\"\"\n # First check to make sure various things are OK before exporting:\n self.check_ISIs()\n self.check_wavealign()\n self.check_wavepadding()\n self.check_contiguous_nids()\n spikes = self.spikes\n exportdt = str(datetime.datetime.now()) # get an export datetime stamp\n exportdt = exportdt.split('.')[0] # ditch the us\n if self.stream.is_multi(): # self.stream is a MultiStream\n streams = self.stream.streams\n else: # self.stream is a single Stream\n streams = [self.stream]\n print('Exporting \"good\" clusters to:')\n # do a separate export for each recording:\n # absolute start and stop times of all streams, rounded to nearest raw timepoint:\n tranges = self.stream.tranges\n t0 = tranges[0, 0] # absolute start time of first stream\n for stream, trange in zip(streams, tranges):\n abst0 = trange[0] # absolute start time of this stream relative to t0\n # time delta between this stream and first stream, to nearest raw timepoint, us:\n dt = abst0 - t0\n dt = intround(dt) # to nearest int us\n self.exportptcsfile(stream, basepath, dt, exportdt, sortpath,\n user=user, notes=notes)\n\n def exportptcsfile(self, stream, basepath, dt, exportdt, sortpath, user='', notes=''):\n \"\"\"Export spike data of all \"good\" spikes to binary .ptcs file in basepath.\n Constrain to spikes in stream, and undo any time delta in spike times.\n dt is the integer time difference between start of stream and start of first stream in\n the track, rounded to the nearest us (spike times are stored as int64 us in .ptcs)\"\"\"\n\n # build up list of PTCSNeuronRecords that have spikes in this stream,\n # and tally their spikes\n nsamplebytes = 4 # float32\n nrecs = []\n nspikes = 0\n # only export neurons marked as \"good\", could be single or multi unit:\n for nid in sorted(self.good):\n neuron = self.neurons[nid]\n spikets = self.spikes['t'][neuron.sids] # should be a sorted copy\n assert spikets.flags['OWNDATA'] # safe to modify in place\n spikets.sort() # just in case it isn't perfectly sorted\n spikets -= dt # export spike times relative to t=0 of this recording\n # only include spikes that occurred during this recording\n lo, hi = spikets.searchsorted([stream.t0, stream.t1])\n spikets = spikets[lo:hi]\n if len(spikets) == 0:\n continue # don't save empty neurons\n nrec = PTCSNeuronRecord(neuron, spikets, nsamplebytes, descr='')\n nrecs.append(nrec)\n nspikes += len(spikets)\n nneurons = len(nrecs)\n\n # create the header and write everything to file:\n path = os.path.join(basepath, stream.srcfnameroot)\n try: os.mkdir(path)\n except OSError: pass # path already exists?\n fname = stream.srcfnameroot + '.ptcs'\n fullfname = os.path.join(path, fname)\n header = PTCSHeader(self, sortpath, stream, nneurons, nspikes, nsamplebytes,\n fullfname, exportdt, user=user, notes=notes)\n \n with open(fullfname, 'wb') as f:\n header.write(f)\n for nrec in nrecs:\n nrec.write(f)\n print(fullfname)\n\n def exportcsv(self, fname):\n \"\"\"Export all \"good\" spikes to a .csv file with time (s), nid, and maxchan as the\n columns\"\"\"\n sids = []\n #chans = []\n for nid in sorted(self.good):\n neuron = self.neurons[nid]\n sids.append(neuron.sids)\n # the alternative is to export each spike's unit's channel:\n #chans.append(np.tile(neuron.chan, neuron.nspikes))\n sids = np.hstack(sids)\n spikes = self.spikes[sids]\n tsecs = spikes['t'] / 1e6 # convert from us to s\n nids = spikes['nid']\n chans = spikes['chan']\n #chans = np.hstack(chans)\n data = np.column_stack([tsecs, nids, chans])\n print('Exporting (tsec, nid, chan) of all spikes marked as \"good\" to %s' % fname)\n np.savetxt(fname, data, fmt='%.6f, %d, %d')\n\n def exporttschid(self, basepath):\n \"\"\"Export int64 (timestamp, channel, neuron id) 3 tuples to binary file\"\"\"\n raise NotImplementedError('Needs to be redone to work with multiple streams')\n spikes = self.spikes[self.spikes['nid'] > 0] # don't export unsorted/multiunit spikes\n dt = str(datetime.datetime.now()) # get an export timestamp\n dt = dt.split('.')[0] # ditch the us\n dt = dt.replace(' ', '_')\n dt = dt.replace(':', '.')\n srffnameroot = srffnameroot.replace(' ', '_')\n tschidfname = dt + '_' + srffnameroot + '.tschid'\n tschid = np.empty((len(spikes), 3), dtype=np.int64)\n tschid[:, 0] = spikes['t']\n tschid[:, 1] = spikes['chan']\n tschid[:, 2] = spikes['nid']\n tschid.tofile(os.path.join(path, tschidfname)) # save it\n print(tschidfname)\n\n def exportdin(self, basepath):\n \"\"\"Export stimulus din(s) to binary .din file(s) in basepath\"\"\"\n if self.stream.is_multi(): # self.stream is a MultiStream\n streams = self.stream.streams\n else: # self.stream is a single Stream\n streams = [self.stream]\n dinfiledtype=[('TimeStamp', '<i8'), ('SVal', '<i8')] # pairs of int64s\n print('Exporting DIN(s) to:')\n for stream in streams:\n try: # neither of these attribs should exist for recordings with no stimuli:\n svrecs = stream.srff.digitalsvalrecords\n dsprecs = stream.srff.displayrecords\n except AttributeError:\n continue # no din to export for this stream\n if len(svrecs) == 0 or stream.srff.ndigitalsvalrecords == 0:\n raise ValueError(\"digitalsvalrecords are empty for stream %r. Attribute \"\n \"shouldn't exist\" % stream.fname)\n path = os.path.join(basepath, stream.srcfnameroot)\n try: os.mkdir(path)\n except OSError: pass # path already exists?\n # upcast SVal field from uint16 to int64, creates a copy,\n # but it's not too expensive:\n svrecs = svrecs.astype(dinfiledtype)\n # convert to normal n x 2 int64 array\n svrecs = svrecs.view(np.int64).reshape(-1, 2)\n # Some old recordings (<= ptc15) contain multiple experiments.\n # To deal with this, iterate over stream.srff.displayrecords, export one .din\n # per displayrecord. Append experiment ID to each .din filename, if necessary.\n svrects = svrecs[:, 0]\n dsprects = [ dsprec.TimeStamp for dsprec in dsprecs ]\n svalrecis = svrects.searchsorted(dsprects)\n assert svalrecis[0] == 0\n svalrecis = svalrecis[1:] # exclude the trivial 0 index\n # split sval records according to displayrecord timestamps:\n dins = np.split(svrecs, svalrecis)\n assert len(dins) == len(dsprecs)\n for eid, din in enumerate(dins):\n if eid == 0 and len(dins) == 1:\n eidstr = ''\n elif len(dins) < 10:\n eidstr = '.%d' % eid\n else: # include leading zero to maintain alphabetical fname order\n eidstr = '.%02d' % eid\n dinfname = stream.srcfnameroot + eidstr + '.din'\n fullfname = os.path.join(path, dinfname)\n din.tofile(fullfname) # save it\n print(fullfname)\n\n def exporttextheader(self, basepath):\n \"\"\"Export stimulus text header(s) to .textheader file(s) in basepath\"\"\"\n if self.stream.is_multi(): # self.stream is a MultiStream\n streams = self.stream.streams\n else: # self.stream is a single Stream\n streams = [self.stream]\n print('Exporting text header(s) to:')\n for stream in streams:\n try:\n dsprecs = stream.srff.displayrecords\n except AttributeError: # no textheader to export for this stream\n continue\n if len(dsprecs) == 0:\n raise ValueError(\"displayrecords are empty for stream %r. Attribute \"\n \"shouldn't exist\" % stream.fname)\n path = os.path.join(basepath, stream.srcfnameroot)\n try: os.mkdir(path)\n except OSError: pass # path already exists?\n # Some old recordings (<= ptc15) contain multiple experiments.\n # To deal with this, iterate over stream.srff.displayrecords, export one\n # .textheader per displayrecord. Append experiment ID to each .textheader\n # filename, if necessary.\n for eid, dsprec in enumerate(dsprecs):\n textheader = dsprec.Header.python_tbl\n if eid == 0 and len(dsprecs) == 1:\n eidstr = ''\n elif len(dsprecs) < 10:\n eidstr = '.%d' % eid\n else: # include leading zero to maintain alphabetical fname order\n eidstr = '.%02d' % eid\n textheaderfname = stream.srcfnameroot + eidstr + '.textheader'\n fullfname = os.path.join(path, textheaderfname)\n with open(fullfname, 'w') as f:\n f.write(textheader) # save it\n print(fullfname)\n\n def exportall(self, basepath, sortpath):\n \"\"\"Export spike data, stimulus din and textheader to basepath\"\"\"\n self.exportptcsfiles(basepath, sortpath)\n self.exportdin(basepath)\n self.exporttextheader(basepath)\n\n def exportspikewaves(self, sids, selchans, tis, fname, format):\n \"\"\"Export spike waveform data of selected sids, selchans and tis to binary\n .spikes.zip file or text .spikes.csv file\"\"\"\n nspikes = len(sids)\n chans, chanslist = self.get_common_chans(sids, selchans)\n nchans = len(chans)\n ti0, ti1 = tis\n nt = ti1 - ti0\n # fill in 3D data array:\n dtype = self.wavedata.dtype\n data = np.zeros((nspikes, nchans, nt), dtype=dtype)\n for sii, sid in enumerate(sids):\n spikechans = chanslist[sii]\n spikechanis = spikechans.searchsorted(chans)\n data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]\n if format == 'text': # flatten timepoints of all chans into columns\n data.shape = nspikes, nchans*nt\n stream = self.stream\n assert stream.kind == 'highpass' # should be the only type ever saved to self\n if format == 'binary':\n nids = self.spikes['nid'][sids]\n spiketimes = self.spikes['t'][sids]\n chanpos = stream.probe.siteloc_arr()\n uVperAD = stream.converter.AD2uV(1) # convert 1 AD unit to uV\n with open(fname, 'wb') as f:\n np.savez_compressed(f, data=data, sids=sids, nids=nids,\n spiketimes=spiketimes, chans=chans, tis=tis,\n chanpos=chanpos, uVperAD=uVperAD)\n elif format == 'text':\n np.savetxt(fname, data, fmt='%d', delimiter=',') # data should be int\n else:\n raise ValueError('Unknown format: %r' % format)\n print('Exported %d spikes on chans=%r and tis=%r to %s'\n % (nspikes, list(chans), list(tis), fname))\n \n def get_param_matrix(self, kind=None, sids=None, tis=None, selchans=None, norm=False,\n dims=None, scale=True):\n \"\"\"Organize dims parameters from sids into a data matrix, each column\n corresponding to a dim. To do PCA/ICA clustering on all spikes, one maxchan at\n a time, caller needs to call this multiple times, one for each set of\n maxchan unique spikes,\"\"\"\n spikes = self.spikes\n dtypefields = list(spikes.dtype.fields)\n if sids is None:\n sids = spikes['id'] # default to all spikes\n comps = [ dim for dim in dims if dim.startswith('c') and dim[-1].isdigit() ]\n rmserror = np.any([ dim == 'RMSerror' for dim in dims ])\n ncomp = len(comps)\n hascomps = ncomp > 0\n if hascomps:\n X = self.get_component_matrix(kind, sids, tis=tis, chans=selchans,\n minncomp=ncomp, norm=norm)\n if rmserror:\n rms = self.get_rms_error(sids, tis=tis, chans=selchans)\n\n data = []\n for dim in dims:\n if dim in dtypefields:\n data.append( np.float32(spikes[dim][sids]) )\n elif dim.startswith('c') and dim[-1].isdigit():\n compid = int(lstrip(dim, 'c'))\n data.append( np.float32(X[:, compid]) )\n elif dim == 'RMSerror':\n data.append( np.float32(rms) )\n else:\n raise RuntimeError('Unknown dim %r' % dim)\n # np.column_stack returns a copy, not modifying the original array\n data = np.column_stack(data)\n if scale:\n # ensure 0 mean, and unit variance/stdev\n for dim, d in zip(dims, data.T): # d iterates over columns\n d -= d.mean()\n if dim in ['x0', 'y0'] and self.probe.ncols > 1:\n try: x0std # normalize spatial params by x0 std\n except NameError: x0std = spikes['x0'].std()\n if x0std != 0.0:\n d /= x0std\n #elif dim == 't': # the longer the recording in hours, the greater the\n # # scaling in time\n # trange = d.max() - d.min()\n # tscale = trange / (60*60*1e6)\n # d *= tscale / d.std()\n else: # normalize all other dims by their std\n dstd = d.std()\n if dstd != 0.0:\n d /= dstd\n return data\n\n def get_component_matrix(self, kind, sids, tis=None, chans=None, minncomp=None,\n norm=False):\n \"\"\"Find set of chans common to all sids, and do PCA/ICA on those waveforms. Or,\n if chans are specified, limit PCA/ICA to them. Return component matrix with at\n least minncomp dimensions\"\"\"\n spikes = self.spikes\n nt = self.wavedata.shape[2]\n if tis is None: # use full waveform\n tis = np.asarray([0, nt])\n #print('tis: %r' % (tis,))\n ti0, ti1 = tis\n assert ti0 < ti1 <= nt\n nt = ti1 - ti0\n chans, chanslist = self.get_common_chans(sids, chans)\n nchans = len(chans)\n nspikes = len(sids)\n if nspikes < 2:\n raise RuntimeError(\"Need at least 2 spikes for %s\" % kind)\n if nchans == 0:\n raise RuntimeError(\"Spikes have no common chans for %s\" % kind)\n\n # check if desired components have already been calculated (cache hit):\n Xhash = self.get_Xhash(kind, sids, tis, chans, self.npcsperchan, norm)\n self.Xhash = Xhash # save as key to most recent component matrix in self.X\n try: self.X\n except AttributeError: self.X = {} # init the dimension reduction cache attrib\n if Xhash in self.X:\n print('Cache hit, using cached %ss from tis=%r, chans=%r of %d spikes' %\n (kind[:-1], list(tis), list(chans), nspikes))\n return self.X[Xhash] # no need to recalculate\n\n print('Cache miss, (re)calculating %ss' % kind[:-1])\n\n # collect data between tis from chans from all spikes:\n print('Doing %s on tis=%r, chans=%r of %d spikes' %\n (kind, list(tis), list(chans), nspikes))\n # MDP complains of roundoff errors with float32 for large covariance matrices\n data = np.zeros((nspikes, nchans, nt), dtype=np.float64)\n for sii, sid in enumerate(sids):\n spikechans = chanslist[sii]\n spikechanis = spikechans.searchsorted(chans)\n spikedata = self.wavedata[sid][spikechanis, ti0:ti1]\n if norm:\n # normalize by Vpp of chan with max Vpp:\n maxptp = spikedata.ptp(axis=1).max()\n if maxptp != 0: # prevent div by 0\n spikedata = spikedata / maxptp\n data[sii] = spikedata\n print('Input shape for %s: %r' % (kind, data.shape))\n t0 = time.time()\n data.shape = nspikes, nchans*nt # flatten timepoints of all chans into columns\n print('Reshaped input for %s: %r' % (kind, data.shape))\n if kind == 'PCA': # principal components analysis\n if PCALIB == 'mdp':\n import mdp # delay as late as possible\n X = mdp.pca(data, output_dim=5, svd=False) # svd=False is default\n elif PCALIB == 'sklearn':\n # sklearn's PCA is about 8x slower than mdp.pca, I think because it\n # doesn't tap into scipy.linalg.eig compiled code. RandomizedPCA is faster\n # than PCA, but isn't deterministic, and is still 2-3x slower than mdp.pca\n from sklearn.decomposition import PCA\n pca = PCA(n_components=5)\n X = pca.fit_transform(data) # do both the fit and the transform\n else:\n raise ValueError('Invalid PCALIB %r' % PCALIB)\n if X.shape[1] < minncomp:\n raise RuntimeError(\"Can't satisfy minncomp=%d request\" % minncomp)\n elif kind == 'sPCA': # sparse principal components analysis\n from sklearn.decomposition import SparsePCA\n n_components = 5\n alpha = 1 # sparseness parameter\n n_jobs = mp.cpu_count()\n spca = SparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)\n X = spca.fit_transform(data) # do both the fit and the transform\n elif kind == 'mbsPCA': # mini batch sparse principal components analysis\n from sklearn.decomposition import MiniBatchSparsePCA\n n_components = 5\n alpha = 1 # sparseness parameter\n n_jobs = mp.cpu_count()\n mbspca = MiniBatchSparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)\n X = mbspca.fit_transform(data) # do both the fit and the transform\n elif kind == 'NMF': # non-negative matrix factorization\n from sklearn.decomposition import NMF\n n_components = 5\n init = None # 'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'\n nmf = NMF(n_components=n_components, init=init)\n X = nmf.fit_transform(data) # do both the fit and the transform\n elif kind == 'tSNE': # t-distributed stochastic neighbor embedding\n # limit number of PCs to feed into ICA, keep up to npcsperchan components per\n # chan on average:\n ncomp = min((self.npcsperchan*nchans, data.shape[1]))\n print('ncomp: %d' % ncomp)\n import mdp # delay as late as possible\n # do PCA first, to reduce dimensionality and speed up ICA:\n data = mdp.pca(data, output_dim=ncomp)\n from sklearn.manifold import TSNE\n n_components = 3 # not suited for any more than 3, according to the paper\n #init = 'random', 'pca'\n tsne = TSNE(n_components=n_components)\n X = tsne.fit_transform(data) # do both the fit and the transform\n elif kind == 'ICA': # independent components analysis\n # ensure nspikes >= ndims**2 for good ICA convergence\n maxncomp = intround(np.sqrt(nspikes))\n if maxncomp < minncomp:\n raise RuntimeError(\"Can't satisfy minncomp=%d request\" % minncomp)\n if data.shape[0] <= data.shape[1]:\n raise RuntimeError('Need more observations than dimensions for ICA')\n # limit number of PCs to feed into ICA, keep up to npcsperchan components per\n # chan on average:\n ncomp = min((self.npcsperchan*nchans, maxncomp, data.shape[1]))\n if ICALIB == 'mdp':\n import mdp # delay as late as possible\n # do PCA first, to reduce dimensionality and speed up ICA:\n print('ncomp: %d' % ncomp)\n data = mdp.pca(data, output_dim=ncomp)\n # nonlinearity g='pow3', ie x**3. tanh seems to separate better,\n # but is a bit slower. gaus seems to be slower still, and no better\n # than tanh, but these are just vague impressions.\n # defaults to whitened=False, ie assumes data isn't whitened\n node = mdp.nodes.FastICANode(g='pow3')\n X = node(data)\n pm = node.get_projmatrix()\n X = X[:, np.any(pm, axis=0)] # keep only the non zero columns\n elif ICALIB == 'sklearn':\n from sklearn.decomposition import FastICA\n # when whiten=True (default), FastICA preprocesses the data using PCA, and\n # n_components is the number of PCs that are kept before doing ICA.\n alg = 'parallel' # parallel or deflation, default is parallel\n fun = 'logcosh' # logcosh, exp, or cube, default is logcosh\n maxiter = 100 # default is 200\n tol = 0.5 # default is 0.0001, seems need >~ 0.1 to exit faster\n ## TODO: make FastICA algorithm (parallel, deflation), nonlinearity (logcosh,\n ## exp, cube) and IC sort method (abs(kurtosis) vs. negentropy) GUI options\n print('ncomp=%d, alg=%r, fun=%r, maxiter=%d, tol=%g'\n % (ncomp, alg, fun, maxiter, tol))\n fastica = FastICA(n_components=ncomp, algorithm=alg,\n whiten=True, fun=fun, fun_args=None,\n max_iter=maxiter, tol=tol, w_init=None,\n random_state=None)\n X = fastica.fit_transform(data) # do both the fit and the transform\n #pm = fastica.components_\n print('fastica niters: %d' % (fastica.n_iter_))\n else:\n raise ValueError('Invalid ICALIB %r' % ICALIB)\n if X.shape[1] < 3:\n raise RuntimeError('Need at least 3 columns')\n\n # Sort ICs by decreasing kurtosis or negentropy. For kurtosis, see Scholz2004 (or\n # rather, opposite to their approach, which picked ICs with most negative\n # kurtosis). For methods of estimating negentropy, see Hyvarinen1997.\n\n '''\n # sort by abs(kurtosis) of each IC (column)\n k = scipy.stats.kurtosis(X, axis=0)\n ki = abs(k).argsort()[::-1] # decreasing order of abs(kurtosis)\n print('Sort by abs(kurtosis):')\n print(k[ki])\n X = X[:, ki] # sort the ICs\n '''\n # sort by negentropy of each IC (column), this seems to work better than kurtosis\n # at separating clusters of similar size:\n ne = core.negentropy(X, axis=0)\n assert (ne > 0).all()\n nei = ne.argsort()[::-1] # decreasing order of negentropy\n print('Sort by negentropy:')\n print(ne[nei])\n X = X[:, nei] # sort the ICs\n '''\n import pylab as pl\n pl.figure()\n pl.imshow(pm)\n pl.colorbar()\n pl.title('original projmatrix')\n pl.figure()\n pl.imshow(pm[:, ki])\n pl.colorbar()\n pl.title('decreasing abs(kurtosis) projmatrix')\n pl.figure()\n pl.imshow(pm[:, nei])\n pl.colorbar()\n pl.title('decreasing negentropy projmatrix')\n '''\n else:\n raise ValueError('Unknown kind %r' % kind)\n print('Output shape for %s: %r' % (kind, X.shape))\n self.X[Xhash] = X # cache for fast future retrieval\n print('%s took %.3f sec' % (kind, time.time()-t0))\n unids = list(np.unique(spikes['nid'][sids])) # set of all nids that sids span\n for nid in unids:\n # don't update pos of junk cluster, if any, since it might not have any chans\n # common to all its spikes, and therefore can't have PCA/ICA done on it\n if nid != 0:\n self.clusters[nid].update_comppos(X, sids)\n return X\n\n def get_rms_error(self, sids, tis=None, chans=None):\n \"\"\"Calculate RMS error of spike waveforms (all from the same cluster) relative to\n their cluster's mean waveform. Consider only selected tis and chans\"\"\"\n spikes = self.spikes\n nids = np.unique(spikes['nid'][sids])\n nid = nids[0]\n if len(nids) > 1 or nid == 0:\n raise RuntimeError(\"Spikes must all belong to the same (non-junk) cluster for \"\n \"RMS error calculation\")\n nt = self.wavedata.shape[2]\n if tis is None: # use full waveform\n tis = np.asarray([0, nt])\n #print('tis: %r' % (tis,))\n ti0, ti1 = tis\n assert ti0 < ti1 <= nt\n nt = ti1 - ti0\n chans, chanslist = self.get_common_chans(sids, chans)\n nchans = len(chans)\n nspikes = len(sids)\n if nchans == 0:\n raise RuntimeError(\"Spikes have no common chans for RMS error\")\n\n # collect data between tis from chans from all spikes:\n print('Getting RMS error on tis=%r, chans=%r of %d spikes' %\n (list(tis), list(chans), nspikes))\n data = np.zeros((nspikes, nchans, nt), dtype=np.float64)\n for sii, sid in enumerate(sids):\n spikechans = chanslist[sii]\n spikechanis = spikechans.searchsorted(chans)\n data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]\n\n # get cluster mean waveform between tis on chans:\n wave = self.neurons[nid].get_wave()\n chanis = wave.chans.searchsorted(chans)\n meandata = np.float64(wave.data[chanis, ti0:ti1])\n\n # calculate RMS error between each spike and the cluster mean waveform:\n se = (data - meandata) ** 2 # squared error\n # take mean across timepoints and chans, but not across spikes:\n mse = se.mean(axis=2).mean(axis=1) # mean squared error\n return np.sqrt(mse)\n\n def get_common_chans(self, sids, chans=None):\n \"\"\"Find channels common to all sids, and optionally to chans as well. Also,\n return chanslist, ie list of arrays of chans of sids\"\"\"\n spikes = self.spikes\n chanss = spikes['chans'][sids]\n nchanss = spikes['nchans'][sids]\n #t0 = time.time()\n chanslist = [ cs[:ncs] for cs, ncs in zip(chanss, nchanss) ] # list of arrays\n #print('Building chanslist took %.3f sec' % (time.time()-t0))\n commonchans = util.intersect1d_uint8(chanslist) # find intersection\n if chans is not None and len(chans) > 0:\n # values in chans but not in commonchans:\n diffchans = np.setdiff1d(chans, commonchans)\n commonchans = np.intersect1d(chans, commonchans) # values in both\n if len(diffchans) > 0:\n print('WARNING: ignored chans %r not common to all spikes' % list(diffchans))\n return commonchans, chanslist\n\n def get_Xhash(self, kind, sids, tis, chans, npcsperchan, norm):\n \"\"\"Return MD5 hex digest of args, for uniquely identifying the matrix resulting\n from dimension reduction of spike data\"\"\"\n h = hashlib.md5()\n h.update(kind.encode())\n h.update(sids)\n h.update(tis)\n h.update(chans)\n if kind == 'ICA': # consider npcsperchan only if doing ICA\n h.update(str(npcsperchan).encode())\n h.update(str(norm).encode())\n return h.hexdigest()\n\n def create_neuron(self, id=None, inserti=None):\n \"\"\"Create and return a new Neuron with a unique ID\"\"\"\n if id == None:\n id = self.nextnid\n if id in self.neurons:\n raise RuntimeError('Neuron %d already exists' % id)\n id = int(id) # get rid of numpy ints\n neuron = Neuron(self, id)\n # add neuron to self\n self.neurons[neuron.id] = neuron\n if inserti == None:\n self.norder.append(neuron.id)\n else:\n self.norder.insert(inserti, neuron.id)\n return neuron\n\n def remove_neuron(self, id):\n try:\n del self.neurons[id] # may already be removed due to recursive call\n del self.clusters[id]\n self.norder.remove(id)\n except (KeyError, ValueError):\n pass\n\n def shift(self, sids, nt):\n \"\"\"Shift sid waveforms by nt timepoints: -ve shifts waveforms left, +ve shifts right.\n For speed, pad waveforms with edge values at the appropriate end\"\"\"\n spikes = self.spikes\n wd = self.wavedata\n for sid in sids: # maybe there's a more efficient way than iterating over sids\n core.shiftpad(wd[sid], nt) # modifies wd in-place\n # update spike parameters:\n dt = intround(nt * self.tres) # amount of time to shift by, signed, in us\n # so we can later reload the wavedata accurately, shifting the waveform right and\n # padding it on its left requires decrementing the associated timepoints\n # (and vice versa)\n spikes['t'][sids] -= dt\n spikes['t0'][sids] -= dt\n spikes['t1'][sids] -= dt\n # might result in some out of bounds tis because the original peaks\n # have shifted off the ends. Opposite sign wrt timepoints above, referencing within\n # wavedata:\n spikes['tis'][sids] = spikes['tis'][sids] + nt\n # this in-place operation raises a TypeError in numpy 1.11.2, something related to\n # subtracting an int from an unsigned int:\n #spikes['tis'][sid] += nt\n # caller should treat all sids as dirty\n '''\n # replaced by util.alignbest_cy():\n def alignbest(self, sids, tis, chans):\n \"\"\"Align all sids between tis on chans by best fit according to mean squared error.\n chans are assumed to be a subset of channels of sids. Return sids\n that were actually moved and therefore need to be marked as dirty\"\"\"\n spikes = self.spikes\n nspikes = len(sids)\n nchans = len(chans)\n wd = self.wavedata\n nt = wd.shape[2] # num timepoints in each waveform\n ti0, ti1 = tis\n subnt = ti1 - ti0 # num timepoints to slice from each waveform\n # TODO: make maxshift a f'n of interpolation factor\n maxshift = 2 # shift +/- this many timepoints\n subntdiv2 = subnt // 2\n #print('subntdiv2 on either side of t=0: %d' % subntdiv2)\n if subntdiv2 < maxshift:\n raise ValueError(\"Selected waveform duration too short\")\n #maxshiftus = maxshift * self.stream.tres\n # NOTE: in this case, it may be faster to keep shifts and sti0s and sti1s as lists\n # of ints instead of np int arrays, maybe because their values are faster to iterate\n # over or index with in python loops and lists:\n shifts = range(-maxshift, maxshift+1) # from -maxshift to maxshift, inclusive\n nshifts = len(shifts)\n sti0s = [ ti0+shifti for shifti in range(nshifts) ] # shifted ti0 values\n sti1s = [ ti1+shifti for shifti in range(nshifts) ] # shifted ti1 values\n sti0ssti1s = zip(sti0s, sti1s)\n print(\"Padding waveforms with up to +/- %d points of fake data\" % maxshift)\n\n # not worth subsampling here while calculating meandata, since all this\n # stuff in this loop is needed in the shift loop below\n subsd = np.zeros((nspikes, nchans, subnt), dtype=wd.dtype) # subset of spike data\n spikechanis = np.zeros((nspikes, nchans), dtype=np.int64)\n t0 = time.time()\n for sidi, sid in enumerate(sids):\n spike = spikes[sid]\n nspikechans = spike['nchans']\n spikechans = spike['chans'][:nspikechans]\n spikechanis[sidi] = spikechans.searchsorted(chans)\n subsd[sidi] = wd[sid, spikechanis[sidi], ti0:ti1]\n print('Mean prep loop for best shift took %.3f sec' % (time.time()-t0))\n t0 = time.time()\n meandata = subsd.mean(axis=0) # float64\n print('Mean for best shift took %.3f sec' % (time.time()-t0))\n\n # choose best shifted waveform for each spike\n # widesd holds current spike data plus padding on either side\n # to allow for full width slicing for all time shifts:\n maxnchans = spikes['nchans'].max() # of all spikes in sort\n widesd = np.zeros((maxnchans, maxshift+nt+maxshift), dtype=wd.dtype) \n shiftedsubsd = subsd.copy() # init\n tempsubshifts = np.zeros((nshifts, nchans, subnt), dtype=wd.dtype)\n dirtysids = []\n t0 = time.time()\n for sidi, sid in enumerate(sids):\n # for speed, instead of adding real data, pad start and end with fake values\n chanis = spikechanis[sidi]\n sd = wd[sid] # sid's spike data\n widesd[:, maxshift:-maxshift] = sd # 2D\n widesd[:, :maxshift] = sd[:, 0, None] # pad start with first point per chan\n widesd[:, -maxshift:] = sd[:, -1, None] # pad end with last point per chan\n wideshortsd = widesd[chanis] # sid's padded spike data on chanis, 2D\n\n # keep this inner loop as fast as possible:\n for shifti, (sti0, sti1) in enumerate(sti0ssti1s):\n tempsubshifts[shifti] = wideshortsd[:, sti0:sti1] # len: subnt\n \n errors = tempsubshifts - meandata # (nshifts, nchans, subnt) - (nchans, subnt)\n # get sum squared errors by taking sum across highest two dims - for purpose\n # of error comparison, don't need to take mean or square root. Also, order\n # of summation along axes doesn't matter, as long as it's done on the highest two:\n sserrors = (errors**2).sum(axis=2).sum(axis=1) # nshifts long\n bestshifti = sserrors.argmin()\n bestshift = shifts[bestshifti]\n if bestshift != 0: # no need to update sort.wavedata[sid] if there's no shift\n # update time values:\n dt = bestshift * self.tres # time to shift by, signed, in us\n spikes['t'][sid] += dt # should remain halfway between t0 and t1\n spikes['t0'][sid] += dt\n spikes['t1'][sid] += dt\n # might result in some out of bounds tis because the original peaks\n # have shifted off the ends. Opposite sign, referencing within wavedata:\n spikes['tis'][sid] -= bestshift\n # update sort.wavedata\n wd[sid] = widesd[:, bestshifti:bestshifti+nt]\n shiftedsubsd[sidi] = tempsubshifts[bestshifti]\n dirtysids.append(sid) # mark sid as dirty\n print('Shifting loop took %.3f sec' % (time.time()-t0))\n AD2uV = self.converter.AD2uV\n stdevbefore = AD2uV(subsd.std(axis=0).mean())\n stdevafter = AD2uV(shiftedsubsd.std(axis=0).mean())\n print('stdev went from %.3f to %.3f uV' % (stdevbefore, stdevafter))\n return dirtysids\n '''\n def alignminmax(self, sids, to):\n \"\"\"Align sids by their min or max. Return those that were actually moved\n and therefore need to be marked as dirty\"\"\"\n if not self.stream.is_open():\n raise RuntimeError(\"No open stream to reload spikes from\")\n spikes = self.spikes\n V0s = spikes['V0'][sids]\n V1s = spikes['V1'][sids]\n Vss = np.column_stack((V0s, V1s))\n alignis = spikes['aligni'][sids]\n b = np.column_stack((alignis==0, alignis==1)) # 2D boolean array\n if to == 'min':\n i = Vss[b] > 0 # indices into sids of spikes aligned to the max peak\n elif to == 'max':\n i = Vss[b] < 0 # indices into sids of spikes aligned to the min peak\n else:\n raise ValueError('Unknown to %r' % to)\n sids = sids[i] # sids that need realigning\n nspikes = len(sids)\n print(\"Realigning %d spikes\" % nspikes)\n if nspikes == 0: # nothing to do\n return [] # no sids to mark as dirty\n\n multichantis = spikes['tis'][sids] # nspikes x nchans x 2 arr\n chanis = spikes['chani'][sids] # nspikes arr of max chanis\n # peak tis on max chan of each spike, convert from uint8 to int32 for safe math\n tis = np.int32(multichantis[np.arange(nspikes), chanis]) # nspikes x 2 arr\n # NOTE: tis aren't always in temporal order!\n dpeaktis = tis[:, 1] - tis[:, 0] # could be +ve or -ve\n dpeaks = spikes['dt'][sids] # stored as +ve\n\n # for each spike, decide whether to add or subtract dpeak to/from its temporal values\n ordered = dpeaktis > 0 # in temporal order\n reversed = dpeaktis < 0 # in reversed temporal order\n alignis = spikes['aligni'][sids]\n alignis0 = alignis == 0\n alignis1 = alignis == 1\n dpeaki = np.zeros(nspikes, dtype=int)\n # add dpeak to temporal values to align to later peak\n dpeaki[ordered & alignis0 | reversed & alignis1] = 1\n # subtact dpeak from temporal values to align to earlier peak\n dpeaki[ordered & alignis1 | reversed & alignis0] = -1\n\n # upcast aligni from 1 byte to an int before doing arithmetic on it:\n #dalignis = -np.int32(alignis)*2 + 1\n dts = dpeaki * dpeaks\n dtis = -dpeaki * abs(dpeaktis)\n # shift values\n spikes['t'][sids] += dts\n spikes['t0'][sids] += dts\n spikes['t1'][sids] += dts\n spikes['tis'][sids] = spikes['tis'][sids] + dtis[:, None, None] # update wrt new t0i\n spikes['aligni'][sids[alignis0]] = 1\n spikes['aligni'][sids[alignis1]] = 0\n\n # update wavedata for each shifted spike\n self.reload_spikes(sids)\n return sids # mark all sids as dirty\n\n def choose_new_meanchans(self, sids):\n \"\"\"Get mean waveform of all sids, then find the mean's chan with max Vpp, then\n choose det.maxnchansperspike channels around that maxchan.\n Return meanchans, furthestchan, and furthestchani\"\"\"\n print('Choosing new channel set for all selected spikes')\n det = self.detector\n meanwave = self.get_mean_wave(sids)\n # mean chan with max Vpp:\n maxchan = meanwave.chans[meanwave.data.ptp(axis=1).argmax()]\n maxchani = det.chans.searchsorted(maxchan)\n distances = det.dm.data[maxchani]\n # keep the maxnchansperspike closest chans to maxchan, including maxchan:\n chanis = distances.argsort()[:det.maxnchansperspike]\n meanchans = det.chans[chanis]\n meanchans.sort() # keep them sorted\n print('meanchans: %r' % list(meanchans))\n furthestchan = det.chans[chanis[-1]]\n print('furthestchan: %d' % furthestchan)\n furthestchani = meanchans.searchsorted(furthestchan)\n # sanity checks:\n assert len(meanchans) == det.maxnchansperspike\n assert maxchan in meanchans\n return meanchans, furthestchan, furthestchani\n\n def reload_spikes(self, sids, usemeanchans=False):\n \"\"\"Update wavedata of designated spikes from stream. Optionally fix incorrect\n time values from .sort 0.3 files. Optionally choose new set of channels for all\n sids based on the chans closest to the mean of the sids. It's the caller's\n responsibility to mark sids as dirty and trigger resaving of .wave file\"\"\"\n\n ## TODO: add findmaxchan=False and recenteronmaxchan=False kwargs\n\n nsids = len(sids)\n print('(Re)loading %d spikes' % nsids)\n stream = self.stream\n if not stream.is_open():\n raise RuntimeError(\"No open stream to reload spikes from\")\n spikes = self.spikes\n det = self.detector\n ver_lte_03 = float(self.__version__) <= 0.3\n if ver_lte_03:\n print('Fixing potentially incorrect time values during spike reloading')\n nfixed = 0\n treload = time.time()\n if usemeanchans:\n if ver_lte_03:\n raise RuntimeError(\"Best not to choose new chans from mean until after \"\n \"converting to .sort >= 0.4\")\n meanchans, furthestchan, furthestchani = self.choose_new_meanchans(sids)\n nmeanchans = len(meanchans)\n\n # split up sids into groups efficient for loading from stream:\n ts = spikes[sids]['t'] # noncontig, not a copy\n # ensure they're in temporal order:\n if not (np.diff(ts) >= 0).all():\n print(\"Selected sids aren't in temporal order, sorting by time...\")\n tsis = ts.argsort()\n sids = sids[tsis]\n print(\"Done sorting sids by time\")\n # break up spikes by ISIs >= MAXGROUPISI:\n splitis = np.where(np.diff(ts) >= MAXGROUPISI)[0] + 1\n groups = np.split(sids, splitis)\n # limit each group of sids to no more than MAXGROUPDT:\n groupi = 0\n while groupi < len(groups):\n group = groups[groupi] # group of sids all with ISIs < MAXGROUPISI\n ## TODO: not a copy: is this the optimal way to get the times in this case?\n relts = spikes[group]['t'] - spikes[group[0]]['t']\n splitis = np.where(np.diff(relts // MAXGROUPDT) > 0)[0] + 1\n nsubgroups = len(splitis) + 1\n if nsubgroups > 1:\n # del original group, replace with subgroups\n del groups[groupi]\n subgroups = np.split(group, splitis)\n groups[groupi:groupi] = subgroups\n groupi += len(subgroups)\n else:\n groupi += 1\n print('ngroups: %d' % len(groups))\n\n # process each group:\n sidi = 0 # init sid index across all groups, used as status counter\n for groupi, group in enumerate(groups):\n printflush('<%d>' % groupi, end='')\n assert len(group) > 0 # otherwise something went wrong above\n t0 = spikes[group[0]]['t0']\n t1 = spikes[group[-1]]['t1']\n if ver_lte_03:\n # load a little extra, in case we need to reload misaligned first and/or\n # last spike in this group\n t0 -= 5000 # -5 ms\n t1 += 5000 # +5 ms\n \"\"\"\n Find union of chans of sids in this group, ask Stream for only those such that no\n unnecessary resampling takes place on unneeded chans. Note that this doesn't make\n a difference when CAR is enabled in the stream, because the full set of enabled\n chans have to be maintained in Stream.__call__ until the very end. Don't bother\n cutting out the correct nchans for each sid. At worst, chan 0 (the \"empty\" chans\n array value) will be unnecessarily added to unionchans, and we'll retrieve one\n extra chan when creating tempwave, which will then later be discarded:\n \"\"\"\n unionchans = np.unique(spikes['chans'][group])\n if usemeanchans:\n # now that we have the original unionchans of this group,\n # update this group's spikes array entries with meanchans:\n spikes['nchans'][group] = nmeanchans\n # we're using the max num chans, so assign the full array:\n spikes['chans'][group] = meanchans\n # now update unionchans as well:\n unionchans = np.unique(np.hstack((unionchans, meanchans)))\n if 0 not in stream.chans: # if chan 0 is disabled in stream\n # remove 0 from unionchans, otherwise an error would be raised when\n # calling stream()\n unionchans = unionchans[unionchans != 0]\n # load and resample only what's needed for this group:\n tempwave = stream(t0, t1, unionchans)\n # slice out each spike's reloaded data from tempwave:\n for sid in group:\n # print status:\n if sidi % 10000 == 0:\n printflush(sidi, end='')\n elif sidi % 1000 == 0:\n printflush('.', end='')\n if usemeanchans: # already checked above that ver_lte_03 == False\n # this spike's chans have been set to meanchans, now\n # check that each spike's maxchan is in meanchans:\n chan = spikes[sid]['chan']\n if chan not in meanchans:\n # replace furthest chan with spike's maxchan:\n print(\"spike %d: replacing furthestchan %d with spike's maxchan %d\"\n % (sid, furthestchan, chan))\n nchans = spikes[sid]['nchans']\n chans = spikes[sid]['chans'][:nchans]\n # replace furthest chan with max chan, modifies spikes array in-place:\n chans[furthestchani] = chan\n # make sure chans remain sorted:\n chans.sort()\n # this isn't necessary, because all the above was in-place:\n #spikes['chans'][sid][:nchans] = chans\n spike = spikes[sid]\n nchans = spike['nchans']\n chans = spike['chans'][:nchans]\n rd = tempwave[spike['t0']:spike['t1']][chans].data # reloaded data\n if ver_lte_03: # fix potentially incorrect spike tis\n result = self.reload_spike_ver_lte_03(sid, nchans, tempwave, rd)\n if result == None:\n sidi += 1 # inc status counter\n continue # rollwin2D won't work, skip to next sid\n else:\n rd, fixed = result\n if fixed:\n nfixed += 1\n nt = rd.shape[1]\n self.wavedata[sid, :nchans, :nt] = rd # update wavedata\n sidi += 1 # inc status counter\n print()\n\n if ver_lte_03:\n print('Fixed time values of %d spikes' % nfixed)\n print('(Re)loaded %d spikes, took %.3f sec' % (len(sids), time.time()-treload))\n\n def reload_spike_ver_lte_03(self, sid, nchans, tempwave, rd):\n \"\"\"In sort.__version__ <= 0.3, t, t0, t1, and tis were not updated\n during alignbest() calls. To fix this, load new data with old potentially\n incorrect t0 and t1 values, and compare this new data to existing old data\n in wavedata array. Find where the non-repeating parts of the old data fits\n into the new, and calculate the correction needed to fix the time values.\n Finally, reload new data according to these corrected time values.\"\"\"\n #print('Reloading sid from ver_lte_03: %d' % sid)\n od = self.wavedata[sid, :nchans] # old data\n # indices that strip const values from left and right ends:\n lefti, righti = lrrep2Darrstripis(od)\n od = od[:, lefti:righti] # stripped old data\n # reloaded data rd uses old incorrect t0 and t1, but they should be\n # wide enough to encompass the non-repeating parts of the old data\n width = od.shape[1] # rolling window width\n if not width <= rd.shape[1]:\n print('') # newline\n print(\"WARNING: od.shape[1]=%d > rd.shape[1]=%d for sid %d\" %\n (od.shape[1], rd.shape[1], sid))\n #import pdb; pdb.set_trace()\n return\n odinndis = np.where((rollwin2D(rd, width) == od).all(axis=1).all(axis=1))[0]\n if len(odinndis) == 0: # no hits of old data in new\n dnt = 0 # reload data based on current timepoints\n elif len(odinndis) == 1: # exactly 1 hit of old data in new\n odinndi = odinndis[0] # pull it out\n dnt = odinndi - lefti # num timepoints to correct by, signed\n else:\n raise RuntimeError(\"Multiple hits of old data in new, don't know \"\n \"how to reload spike %d\" % sid)\n newrd, fixed = rd, False\n if dnt != 0:\n dt = intround(dnt * self.tres) # time to correct by, signed, in us\n spikes['t'][sid] += dt # should remain halfway between t0 and t1\n spikes['t0'][sid] += dt\n spikes['t1'][sid] += dt\n # might result in some out of bounds tis because the original peaks\n # have shifted off the ends. Use opposite sign because we're\n # referencing within wavedata:\n # in versions <= 0.3, 'tis' were named 'phasetis':\n spikes['phasetis'][sid] = spikes['phasetis'][sid] - dnt\n spike = spikes[sid]\n # reslice tempwave again now that t0 and t1 have changed\n newrd = tempwave[spike['t0']:spike['t1']][chans].data\n fixed = True\n #printflush('F', end='')\n return newrd, fixed\n\n def reload_spikes_and_templates(self, sids, usemeanchans=False):\n self.reload_spikes(sids, usemeanchans=usemeanchans)\n # update neuron templates:\n unids = np.unique(self.spikes['nid'][sids])\n unids = unids[unids != 0] # exclude junk cluster, which doesn't have a neuron\n neurons = [ self.neurons[nid] for nid in unids ]\n for neuron in neurons:\n neuron.update_wave() # update affected mean waveforms\n\n def init_spike_alignment(self):\n \"\"\"Set initial spike alignment points according to alignment points of each\n spike's neuron\"\"\"\n print('Setting initial spike alignment points')\n ntis, nalignis = {}, {} # tis and aligni derived from each neuron's mean waveform\n for neuron in self.neurons.values():\n nwave = neuron.get_wave() # update and return mean waveform\n mintis = nwave.data.argmin(axis=1)\n maxtis = nwave.data.argmax(axis=1)\n ntis[neuron.id] = np.column_stack([mintis, maxtis])\n # choose aligni with least variance:\n nalignis[neuron.id] = np.argmin([mintis.std(), maxtis.std()])\n AD2uV = self.converter.AD2uV\n for s, wd in zip(self.spikes, self.wavedata):\n sid = s['id']\n # print out progress on a regular basis:\n if sid % 100000 == 0:\n printflush(sid, end='')\n elif sid % 10000 == 0:\n printflush('.', end='')\n nid = s['nid']\n #chan = s['chan']\n nchans = s['nchans']\n chans = s['chans'][:nchans]\n neuronchans = self.neurons[nid].wave.chans\n assert (chans == neuronchans).all()\n s['tis'][:nchans] = ntis[nid] # set according to its neuron, wrt t0i=0\n s['aligni'] = nalignis[nid] # set according to its neuron\n maxchani = s['chani']\n t0i, t1i = int(s['tis'][maxchani, 0]), int(s['tis'][maxchani, 1])\n s['dt'] = abs(t1i - t0i) / self.sampfreq * 1e6 # us\n # note that V0 and V1 might not be of opposite sign, because tis are derived\n # from mean neuron waveform, not from each individual spike:\n s['V0'], s['V1'] = AD2uV(wd[maxchani, t0i]), wd[maxchani, t1i] # uV\n s['Vpp'] = abs(s['V1'] - s['V0']) # uV\n print()\n\n def spatially_localize_spikes(self, sortwin, method='fit'):\n \"\"\"Assuming that wavedata have been extracted and neuron mean waveforms calculated,\n find tis and perform spatial localization of every spike in self\"\"\"\n det = self.detector\n weights2f = self.extractor.weights2spatial\n weights2spatialmean = self.extractor.weights2spatialmean\n f = self.extractor.f\n nreject = 0 # number spikes rejected during spatial localization\n print('Running spatial localization on all %d spikes' % self.nspikes)\n tstart = time.clock()\n\n ## TODO: chan this be multithreaded/processed?\n\n for s, wd in zip(self.spikes, self.wavedata):\n # Get Vpp at each inclchan's tis, use as spatial weights:\n # see core.rowtake() or util.rowtake_cy() for indexing explanation:\n sid = s['id']\n # print out progress on a regular basis:\n if sid % 10000 == 0:\n printflush(sid, end='')\n elif sid % 1000 == 0:\n printflush('.', end='')\n chan = s['chan']\n nchans = s['nchans']\n chans = s['chans'][:nchans]\n maxchani = s['chani']\n chanis = det.chans.searchsorted(chans)\n w = np.float32(wd[np.arange(s['nchans'])[:, None], s['tis'][:nchans]]) # nchans x 2\n w = abs(w).sum(axis=1) # Vpp for each chan, measured at t0i and t1i\n x = det.siteloc[chanis, 0] # 1D array (row)\n y = det.siteloc[chanis, 1]\n if method == 'fit':\n # localize by fitting extractor.f function to wavedata\n params = weights2f(f, w, x, y, maxchani)\n elif method == 'mean':\n # set localization to Vpp-weighted spatial mean and 0 sigma:\n x0, y0 = weights2spatialmean(w, x, y)\n # a very ad-hoc guess for spatial sigma:\n sx = 2 * dist((x0, y0), self.probe.SiteLoc[chan])\n params = x0, y0, sx, sx\n else:\n print('Unknown method %r' % method)\n if params == None: # presumably a non-localizable many-channel noise event\n #printflush('X', end='') # to indicate a rejected spike\n if DEBUG:\n spiket = intround(s['t']) # nearest us\n det.log(\"Reject spike %d at t=%d based on fit params\" % (sid, spiket))\n neuron = self.neurons[s['nid']]\n # remove from its neuron, add to unsorted list of spikes:\n sortwin.MoveSpikes2List(neuron, [sid], update=False)\n # manually set localization params to Vpp-weighted spatial mean and 0 sigma:\n x0, y0 = weights2spatialmean(w, x, y)\n # set sigma to 0 um, and then later round lockr up to 1 um so that only one\n # raster tick shows up for each rejected spike, reducing clutter\n params = x0, y0, 0, 0\n nreject += 1\n # Save spatial fit params, and \"lockout\" only the channels within lockrx*sx\n # of the fit spatial location of the spike, up to a max of inclr. \"Lockout\"\n # in this case only refers to which channels are highlighted with a raster tick\n # for each spike:\n s['x0'], s['y0'], s['sx'], s['sy'] = params\n x0, y0 = s['x0'], s['y0']\n # lockout radius for this spike:\n lockr = min(det.lockrx*s['sx'], det.inclr) # in um\n lockr = max(lockr, 1) # at least 1 um, so at least the maxchan gets a tick\n # test y coords of chans in y array, ylockchaniis can be used to index\n # into x, y and chans:\n ylockchaniis, = np.where(np.abs(y - y0) <= lockr) # convert bool arr to int\n # test Euclid distance from x0, y0 for each ylockchani:\n lockchaniis = ylockchaniis.copy()\n for ylockchanii in ylockchaniis:\n if dist((x[ylockchanii], y[ylockchanii]), (x0, y0)) > lockr:\n # Euclidean distance is too great, remove ylockchanii from lockchaniis:\n lockchaniis = lockchaniis[lockchaniis != ylockchanii]\n lockchans = chans[lockchaniis]\n nlockchans = len(lockchans)\n s['lockchans'][:nlockchans], s['nlockchans'] = lockchans, nlockchans\n print('Spatial localization of spikes took %.3f s' % (time.clock() - tstart))\n\n return nreject\n\n '''\n def get_component_matrix(self, dims=None, weighting=None):\n \"\"\"Convert spike param matrix into pca/ica data for clustering\"\"\"\n\n import mdp # can't delay this any longer\n X = self.get_param_matrix(dims=dims)\n if weighting == None:\n return X\n if weighting.lower() == 'ica':\n node = mdp.nodes.FastICANode()\n elif weighting.lower() == 'pca':\n node = mdp.nodes.PCANode()\n else:\n raise ValueError, 'unknown weighting %r' % weighting\n node.train(X)\n features = node.execute(X) # returns all available components\n #self.node = node\n #self.weighting = weighting\n #self.features = features\n return features\n\n def get_ids(self, cids, spikes):\n \"\"\"Convert a list of cluster ids into 2 dicts: n2sids maps neuron IDs to\n spike IDs; s2nids maps spike IDs to neuron IDs\"\"\"\n cids = np.asarray(cids)\n cids = cids - cids.min() # make sure cluster IDs are 0-based\n uniquecids = set(cids)\n nclusters = len(uniquecids)\n # neuron ID to spike IDs (plural) mapping\n n2sids = dict(zip(uniquecids, [ [] for i in range(nclusters) ]))\n s2nids = {} # spike ID to neuron ID mapping\n for spike, nid in zip(spikes, cids):\n s2nids[spike['id']] = nid\n n2sids[nid].append(spike['id'])\n return n2sids, s2nids\n\n def write_spc_input(self):\n \"\"\"Generate input data file to SPC\"\"\"\n X = self.get_component_matrix()\n # write to space-delimited .dat file. Each row is a spike, each column a param\n spykedir = os.path.dirname(__file__)\n dt = str(datetime.datetime.now())\n dt = dt.split('.')[0] # ditch the us\n dt = dt.replace(' ', '_')\n dt = dt.replace(':', '.')\n self.spcdatfname = os.path.join(spykedir, 'spc', dt+'.dat')\n # not sure why spc adds the dg_01 part:\n self.spclabfname = os.path.join(spykedir, 'spc', dt+'.dg_01.lab')\n f = open(self.spcdatfname, 'w')\n for params in X: # write text data to file, one row at a time\n params.tofile(f, sep=' ', format='%.6f')\n f.write('\\n')\n f.close()\n\n def parse_spc_lab_file(self, fname=None):\n \"\"\"Parse output .lab file from SPC. Each row in the file is the assignment of each\n spin (datapoint) to a cluster, one row per temperature datapoint. First column is\n temperature run number (0-based). 2nd column is the temperature. All remaining\n columns correspond to the datapoints in the order presented in the input .dat file.\n Returns (Ts, cids)\"\"\"\n #spikes = self.get_spikes_sortedby('id')\n if fname == None:\n defaultDir = r\"C:\\Documents and Settings\\Administrator\\Desktop\\Charlie\\From\"\n dlg = wx.FileDialog(None, message=\"Open SPC .lab file\",\n defaultDir=defaultDir, defaultFile='',\n wildcard=\"All files (*.*)|*.*|.lab files (*.lab)|*.lab|\",\n style=wx.OPEN)\n if dlg.ShowModal() == wx.ID_OK:\n fname = dlg.GetPath()\n dlg.Destroy()\n data = np.loadtxt(fname, dtype=np.float32)\n Ts = data[:, 1] # 2nd column\n cids = np.int32(data[:, 2:]) # 3rd column on\n print('Parsed %r' % fname)\n return Ts, cids\n\n def parse_charlies_output(self, fname=None):\n if fname == None:\n fname = (r'C:\\Documents and Settings\\Administrator\\Desktop\\Charlie\\'\n 'From\\2009-07-20\\clustered_events_coiflet_T0.125.txt')\n nids = np.loadtxt(fname, dtype=int) # one neuron id per spike\n return nids\n\n def write_spc_app_input(self):\n \"\"\"Generate input data file to spc_app\"\"\"\n spikes = self.get_spikes_sortedby('id')\n X = self.get_component_matrix()\n # write to tab-delimited data file. Each row is a param, each column a spike\n # (this is the transpose of X)\n # first row has labels \"AFFX\", \"NAME\", and then spike ids\n # first col has labels \"AFFX\", and then param names\n f = open(r'C:\\home\\mspacek\\Desktop\\Work\\SPC\\Weizmann\\spc_app\\spc_app_input.txt', 'w')\n f.write('AFFX\\tNAME\\t')\n for spike in spikes:\n f.write('s%d\\t' % spike['id'])\n f.write('\\n')\n for parami, param in enumerate(['Vpp', 'dt', 'x0', 'y0', 'sx', 'sy', 'theta']):\n f.write(param+'\\t'+param+'\\t')\n for val in X[:, parami]:\n f.write('%f\\t' % val)\n f.write('\\n')\n f.close()\n\n def hcluster(self, t=1.0):\n \"\"\"Hierarchically cluster self.spikes\n\n TODO: consider doing multiple cluster runs. First, cluster by spatial location (x0,\n y0). Then split those clusters up by Vpp. Then those by spatial distrib (sy/sx,\n theta), then by temporal distrib (dt, s1, s2). This will ensure that the lousier\n params will only be considered after the best ones already have, and therefore that\n you start off with pretty good clusters that are then only slightly refined using\n the lousy params\n \"\"\"\n spikes = self.get_spikes_sortedby('id')\n X = self.get_component_matrix()\n print(X)\n # try 'weighted' or 'average' with 'mahalanobis'\n cids = fclusterdata(X, t=t, method='single', metric='euclidean')\n n2sids, s2nids = self.get_ids(cids, spikes)\n return n2sids\n\n def export2Charlie(self, fname='spike_data', onlymaxchan=False, nchans=3, npoints=32):\n \"\"\"Export spike data to a text file, one spike per row.\n Columns are x0, y0, followed by most prominent npoints datapoints\n (1/4, 3/4 wrt spike time) of each nearest nchans. This is to\n give to Charlie to do WPD and SPC on\"\"\"\n if onlymaxchan:\n nchans = 1\n assert np.log2(npoints) % 1 == 0, 'npoints is not a power of 2'\n # get ti - time index each spike is assumed to be centered on\n self.spikes[0].update_wave(self.stream) # make sure it has a wave\n ti = intround(self.spikes[0].wave.data.shape[-1] / 4) # 13 for 50 kHz, 6 for 25 kHz\n dims = self.nspikes, 2+nchans*npoints\n output = np.empty(dims, dtype=np.float32)\n dm = self.detector.dm\n chanis = np.arange(len(dm.data))\n coords = np.asarray(dm.coords)\n xcoords = coords[:, 0]\n ycoords = coords[:, 1]\n sids = list(self.spikes) # self.spikes is a dict!\n sids.sort()\n for sid in sids:\n spike = self.spikes[sid]\n chani = spike.chani # max chani\n x0, y0 = spike.x0, spike.y0\n if onlymaxchan:\n nearestchanis = np.asarray([chani])\n else:\n # find closest chans to x0, y0\n d2s = (xcoords - x0)**2 + (ycoords - y0)**2 # squared distances\n sortis = d2s.argsort()\n nearestchanis = chanis[sortis][0:nchans] # pick the first nchan nearest chans\n if chani not in nearestchanis:\n print(\"WARNING: max chani %d is not among the %d chanis nearest \"\n \"(x0, y0) = (%.1f, %.1f) for spike %d at t=%d\"\n % (chani, nchans, x0, y0, sid, spike.t))\n if spike.wave.data is None:\n spike.update_wave(self.stream)\n row = [x0, y0]\n for chani in nearestchanis:\n chan = dm.chans[chani] # dereference\n try:\n data = spike.wave[chan].data[0] # pull out singleton dimension\n except IndexError: # empty array\n data = np.zeros(data.shape[-1], data.dtype)\n row.extend(data[ti-npoints/4:ti+npoints*3/4])\n output[sid] = row\n dt = str(datetime.datetime.now())\n dt = dt.split('.')[0] # ditch the us\n dt = dt.replace(' ', '_')\n dt = dt.replace(':', '.')\n fname += '.' + dt + '.txt'\n np.savetxt(fname, output, fmt='%.1f', delimiter=' ')\n\n def match(self, templates=None, weighting='signal', sort=True):\n \"\"\"Match templates to all .spikes with nearby maxchans,\n save error values to respective templates.\n\n Note: slowest step by far is loading in the wave data from disk.\n (First match is slow, subsequent ones are ~ 15X faster.)\n Unless something's done about that in advance, don't bother optimizing here much.\n Right now, once waves are loaded, performance is roughly 20000 matches/sec\n\n TODO: Nick's alternative to gaussian distance weighting: have two templates: a mean\n template, and an stdev template, and weight the error between each matched\n spike and the mean on each chan at each timepoint by the corresponding stdev value\n (divide the error by the stdev, so that timepoints with low stdev are more sensitive\n to error)\n\n TODO: looks like I still need to make things more nonlinear - errors at high signal\n values aren't penalized enough, while errors at small signal values are penalized\n too much. Try cubing both signals, then taking sum(err**2)\n\n DONE: maybe even better, instead of doing an elaborate cubing of signal, followed by\n a rather elaborate gaussian spatiotemporal weighting of errors, just take difference\n of signals, and weight the error according to the abs(template_signal) at each point\n in time and across chans. That way, error in parts of the signal far from zero are\n considered more important than deviance of perhaps similar absolute value for signal\n close to zero\n\n \"\"\"\n # None defaults to matching all templates:\n templates = templates or self.templates.values()\n sys.stdout.write('matching')\n t0 = time.time()\n nspikes = len(self.spikes)\n dm = self.detector.dm\n for template in templates:\n template.err = [] # overwrite any existing .err attrib\n tw = template.tw\n templatewave = template.wave[template.chans] # pull out template's enabled chans\n #stdev = template.get_stdev()[template.chans] # pull out template's enabled chans\n # replace any 0s with 1s - TODO: what's best way to avoid singularities?:\n #stdev[stdev == 0] = 1\n # Gaussian weighting in space and/or time:\n weights = template.get_weights(weighting=weighting, sstdev=self.detector.slock/2,\n tstdev=self.detector.tlock/2)\n for spike in self.spikes.values():\n # check if spike.maxchan is outside some minimum distance from template.maxchan\n if dm[template.maxchan, spike.maxchan] > MAXCHANTOLERANCE: # um\n continue # don't even bother\n if spike.wave.data is None or template.tw != TW: # make sure their data line up\n spike.update_wave(tw) # this slows things down a lot, but is necessary\n # slice template's enabled chans out of spike, calculate sum of\n # squared weighted error\n # first impression is that dividing by stdev makes separation worse, not better\n # low stdev means more sensitive to error:\n #err = (templatewave.data - spike.wave[template.chans].data) / stdev * weights\n # pull out template's enabled chans from spike:\n spikewave = spike.wave[template.chans]\n if weighting == 'signal':\n tsdata = np.asarray([templatewave.data, spikewave.data])\n # take elementwise max of abs of template and spike data:\n weights = np.abs(tsdata).max(axis=0)\n err = (templatewave.data - spikewave.data) * weights # weighted error\n err = (err**2).sum(axis=None) # sum of squared weighted error\n template.err.append((spike.id, intround(err)))\n template.err = np.asarray(template.err, dtype=np.int64)\n if sort and len(template.err) != 0:\n i = template.err[:, 1].argsort() # row indices that sort by error\n template.err = template.err[i]\n sys.stdout.write('.')\n print('\\nmatch took %.3f sec' % (time.time()-t0))\n '''\n\nclass Neuron(object):\n \"\"\"A collection of spikes that have been deemed somehow, whether manually\n or automatically, to have come from the same cell. A Neuron's waveform\n is the mean of its member spikes\"\"\"\n def __init__(self, sort, id=None):\n self.sort = sort\n self.id = id # neuron id\n self.wave = WaveForm() # init to empty waveform\n self.sids = np.array([], dtype=int) # indices of spikes that make up this neuron\n # relative reference timestamp, here for symmetry with fellow spike rec\n # (obj.t comes up sometimes):\n self.t = 0\n self.plt = None # Plot currently holding self\n self.cluster = None\n self.good = False # user can mark this neuron as \"good\" if so desired\n #self.fname # not here, let's allow neurons to have spikes from different files?\n\n def get_chans(self):\n if self.wave.data is None:\n self.update_wave()\n return self.wave.chans # self.chans just refers to self.wave.chans\n\n chans = property(get_chans)\n\n def get_chan(self):\n if self.wave.data is None:\n self.update_wave()\n return self.wave.chans[self.wave.data.ptp(axis=1).argmax()] # chan with max Vpp\n\n chan = property(get_chan)\n\n def get_nspikes(self):\n return len(self.sids)\n\n nspikes = property(get_nspikes)\n\n def __getstate__(self):\n \"\"\"Get object state for pickling\"\"\"\n d = self.__dict__.copy()\n # don't save any calculated PCs/ICs:\n #d.pop('X', None)\n #d.pop('Xhash', None)\n # don't save plot self is assigned to, since that'll change anyway on unpickle\n d['plt'] = None\n return d\n\n def get_wave(self):\n \"\"\"Check for valid mean and std waveform before returning it\"\"\"\n # many neuron waveforms saved in old .sort files won't have a wave.std field:\n try:\n self.wave.std\n except AttributeError:\n return self.update_wave()\n if self.wave == None or self.wave.data is None or self.wave.std is None:\n return self.update_wave()\n else:\n return self.wave # return existing waveform\n\n def update_wave(self):\n \"\"\"Update mean and std of self's waveform\"\"\"\n sort = self.sort\n spikes = sort.spikes\n if len(self.sids) == 0: # no member spikes, perhaps I should be deleted?\n raise RuntimeError(\"n%d has no spikes and its waveform can't be updated\" % self.id)\n meanwave = sort.get_mean_wave(self.sids, nid=self.id)\n\n # update self's Waveform object\n self.wave.data = meanwave.data\n self.wave.std = meanwave.std\n self.wave.ts = sort.twts.copy() # meanwave has no .ts, copy for clean jsonpickle\n self.wave.chans = meanwave.chans\n self.wave.tres = sort.tres # meanwave has no .tres\n return self.wave\n\n def __sub__(self, other):\n \"\"\"Return difference array between self and other neurons' waveforms\n on common channels\"\"\"\n selfwavedata, otherwavedata = self.getCommonWaveData(other.chan, other.chans,\n other.wave.data)\n return selfwavedata - otherwavedata\n\n def getCommonWaveData(self, otherchan, otherchans, otherwavedata):\n \"\"\"Return waveform data common to self's chans and otherchans, while\n requiring that both include the other's maxchan\"\"\"\n chans = np.intersect1d(self.chans, otherchans, assume_unique=True)\n if len(chans) == 0:\n raise ValueError('No common chans')\n if self.chan not in chans or otherchan not in chans:\n raise ValueError(\"maxchans aren't part of common chans\")\n selfchanis = self.chans.searchsorted(chans)\n otherchanis = otherchans.searchsorted(chans)\n return self.wave.data[selfchanis], otherwavedata[otherchanis]\n '''\n def get_stdev(self):\n \"\"\"Return 2D array of stddev of each timepoint of each chan of member spikes.\n Assumes self.update_wave has already been called\"\"\"\n data = []\n # TODO: speed this up by pre-allocating memory and then filling in the array\n for spike in self.spikes:\n data.append(spike.wave.data) # collect spike's data\n stdev = np.asarray(data).std(axis=0)\n return stdev\n\n def get_weights(self, weighting=None, sstdev=None, tstdev=None):\n \"\"\"Returns unity, spatial, temporal, or spatiotemporal Gaussian weights\n for self's enabled chans in self.wave.data, given spatial and temporal\n stdevs\"\"\"\n nchans = len(self.wave.chans)\n nt = len(self.wave.data[0]) # assume all chans have the same number of timepoints\n if weighting == None:\n weights = 1\n elif weighting == 'spatial':\n weights = self.get_gaussian_spatial_weights(sstdev) # vector\n elif weighting == 'temporal':\n weights = self.get_gaussian_temporal_weights(tstdev) # vector\n elif weighting == 'spatiotemporal':\n sweights = self.get_gaussian_spatial_weights(sstdev)\n tweights = self.get_gaussian_temporal_weights(tstdev)\n weights = np.outer(sweights, tweights) # matrix, outer product of the two\n elif weighting == 'signal':\n weights = None # this is handled by caller\n #print('\\nweights:\\n%r' % weights)\n return weights\n\n def get_gaussian_spatial_weights(self, stdev):\n \"\"\"Return a vector that weights self.chans according to a 2D gaussian\n centered on self.maxchan with standard deviation stdev in um\"\"\"\n g = Gaussian(mean=0, stdev=stdev)\n # distances between maxchan and all enabled chans:\n d = self.sort.detector.dm[self.maxchan, self.chans]\n weights = g[d]\n weights.shape = (-1, 1) # vertical vector with nchans rows, 1 column\n return weights\n\n def get_gaussian_temporal_weights(self, stdev):\n \"\"\"Return a vector that weights timepoints in self's mean waveform\n by a gaussian centered on t=0, with standard deviation stdev in us\"\"\"\n g = Gaussian(mean=0, stdev=stdev)\n ts = self.wave.ts # template mean timepoints relative to t=0 spike time\n weights = g[ts] # horizontal vector with 1 row, nt timepoints\n return weights\n '''\n\nclass PTCSHeader(object):\n \"\"\"\n Polytrode clustered spikes file header:\n\n formatversion: int64 (currently version 3)\n ndescrbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)\n descr: ndescrbytes of ASCII text\n (padded with null bytes if needed for 8 byte alignment)\n\n nneurons: uint64 (number of neurons)\n nspikes: uint64 (total number of spikes)\n nsamplebytes: uint64 (number of bytes per template waveform sample)\n samplerate: uint64 (Hz)\n\n npttypebytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)\n pttype: npttypebytes of ASCII text\n (padded with null bytes if needed for 8 byte alignment)\n nptchans: uint64 (total num chans in polytrode)\n chanpos: nptchans * 2 * float64\n (array of (x, y) positions, in um, relative to top of polytrode,\n indexed by 0-based channel IDs)\n nsrcfnamebytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)\n srcfname: nsrcfnamebytes of ASCII text\n (source file name, probably .srf, padded with null bytes if needed for\n 8 byte alignment)\n datetime: float64\n (absolute datetime corresponding to t=0 us timestamp, stored as days since\n epoch: December 30, 1899 at 00:00)\n ndatetimestrbytes: uint64 \n datetimestr: ndatetimestrbytes of ASCII text\n (human readable string representation of datetime, preferrably ISO 8601,\n padded with null bytes if needed for 8 byte alignment)\n \"\"\"\n FORMATVERSION = 3 # overall .ptcs file format version, not header format version\n def __init__(self, sort, sortpath, stream, nneurons, nspikes, nsamplebytes,\n fullfname, exportdt, user='', notes=''):\n self.sort = sort\n self.stream = stream\n self.nneurons = nneurons\n self.nspikes = nspikes\n self.nsamplebytes = nsamplebytes\n homelessfullfname = lstrip(fullfname, os.path.expanduser('~'))\n sortfname = sort.fname\n sortfullfname = os.path.join(sortpath, sortfname)\n sortfmoddt = str(datetime.datetime.fromtimestamp(os.path.getmtime(sortfullfname)))\n sortfmoddt = sortfmoddt.split('.')[0] # ditch the us\n sortfsize = os.path.getsize(sortfullfname) # in bytes\n d = {'file_type': '.ptcs (polytrode clustered spikes) file',\n 'original_fname': homelessfullfname, 'export_time': exportdt,\n 'sort': {'fname': sortfname, 'path': sortpath,\n 'fmtime': sortfmoddt, 'fsize': sortfsize},\n 'user': user, 'notes': notes}\n descr = str(d)\n self.descr = pad(descr, align=8)\n self.srcfname = pad(lstrip(stream.fname, '../'), align=8)\n self.pttype = pad(stream.probe.name, align=8)\n self.dt = stream.datetime\n self.dtstr = pad(self.dt.isoformat(), align=8)\n\n def write(self, f):\n s = self.sort\n np.int64(self.FORMATVERSION).tofile(f) # formatversion\n np.uint64(len(self.descr)).tofile(f) # ndescrbytes\n f.write(self.descr) # descr\n \n np.uint64(self.nneurons).tofile(f) # nneurons\n np.uint64(self.nspikes).tofile(f) # nspikes\n np.uint64(self.nsamplebytes).tofile(f) # nsamplebytes\n np.uint64(s.sampfreq).tofile(f) # samplerate\n\n np.uint64(len(self.pttype)).tofile(f) # npttypebytes\n f.write(self.pttype) # pttype\n np.uint64(s.stream.probe.nchans).tofile(f) # nptchans\n np.float64(s.stream.probe.siteloc_arr()).tofile(f) # chanpos\n np.uint64(len(self.srcfname)).tofile(f) # nsrcfnamebytes\n f.write(self.srcfname) # srcfname\n np.float64(td2days(self.dt - EPOCH)).tofile(f) # datetime (in days)\n np.uint64(len(self.dtstr)).tofile(f) # ndatetimestrbytes\n f.write(self.dtstr)\n\n\nclass PTCSNeuronRecord(object):\n \"\"\"\n Polytrode clustered spikes file neuron record:\n \n nid: int64 (signed neuron id, could be -ve, could be non-contiguous with previous)\n ndescrbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment, defaults to 0)\n descr: ndescrbytes of ASCII text\n (padded with null bytes if needed for 8 byte alignment)\n clusterscore: float64\n xpos: float64 (um)\n ypos: float64 (um)\n sigma: float64 (um) (Gaussian spatial sigma)\n nchans: uint64 (num chans in template waveforms)\n chanids: nchans * uint64 (0 based IDs of channels in template waveforms)\n maxchanid: uint64 (0 based ID of max channel in template waveforms)\n nt: uint64 (num timepoints per template waveform channel)\n nwavedatabytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)\n wavedata: nwavedatabytes of nsamplebytes sized floats\n (template waveform data, laid out as nchans * nt, in uV,\n padded with null bytes if needed for 8 byte alignment)\n nwavestdbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)\n wavestd: nwavestdbytes of nsamplebytes sized floats\n (template waveform standard deviation, laid out as nchans * nt, in uV,\n padded with null bytes if needed for 8 byte alignment)\n nspikes: uint64 (number of spikes in this neuron)\n spike timestamps: nspikes * uint64 (us, should be sorted)\n \"\"\"\n def __init__(self, neuron, spikets=None, nsamplebytes=None, descr=''):\n n = neuron\n AD2uV = n.sort.converter.AD2uV\n self.neuron = neuron\n self.spikets = spikets # constrained to stream range, may be < neuron.sids\n self.wavedtype = {2: np.float16, 4: np.float32, 8: np.float64}[nsamplebytes]\n if n.wave.data is None or n.wave.std is None: # some may have never been displayed\n n.update_wave()\n # wavedata and wavestd are nchans * nt * nsamplebytes long:\n self.wavedata = pad(self.wavedtype(AD2uV(n.wave.data)), align=8)\n self.wavestd = pad(self.wavedtype(AD2uV(n.wave.std)), align=8)\n self.descr = pad(descr, align=8)\n \n def write(self, f):\n n = self.neuron\n np.int64(n.id).tofile(f) # nid\n np.uint64(len(self.descr)).tofile(f) # ndescrbytes\n f.write(self.descr) # descr, bytes\n np.float64(np.nan).tofile(f) # clusterscore\n np.float64(n.cluster.pos['x0']).tofile(f) # xpos (um)\n np.float64(n.cluster.pos['y0']).tofile(f) # ypos (um)\n np.float64(n.cluster.pos['sx']).tofile(f) # sigma (um)\n np.uint64(len(n.wave.chans)).tofile(f) # nchans\n np.uint64(n.wave.chans).tofile(f) # chanids\n np.uint64(n.chan).tofile(f) # maxchanid\n np.uint64(len(n.wave.ts)).tofile(f) # nt\n np.uint64(self.wavedata.nbytes).tofile(f) # nwavedatabytes\n self.wavedata.tofile(f) # wavedata \n np.uint64(self.wavestd.nbytes).tofile(f) # nwavestdbytes\n self.wavestd.tofile(f) # wavestd \n np.uint64(len(self.spikets)).tofile(f) # nspikes\n np.uint64(self.spikets).tofile(f) # spike timestamps (us)\n\n\nclass PanelScrollArea(QtGui.QScrollArea):\n \"\"\"A scroll area for the spikesortpanel\"\"\"\n def keyPressEvent(self, event):\n key = event.key()\n # seems the ENTER key needs be handled to directly call plot, unlike in sortwin\n # where the event is passed on to be handled by the list widgets\n if key in [Qt.Key_Enter, Qt.Key_Return]:\n sortwin = self.topLevelWidget()\n sortwin.parent().ui.plotButton.click()\n else:\n QtGui.QScrollArea.keyPressEvent(self, event) # pass it on\n\n\nclass SortWindow(SpykeToolWindow):\n \"\"\"Sort window\"\"\"\n def __init__(self, parent, pos=None):\n SpykeToolWindow.__init__(self, parent, flags=QtCore.Qt.Tool)\n self.spykewindow = parent\n ncols = self.sort.probe.ncols\n nrows = self.sort.probe.nrows\n # try and allow the same amount of horizontal space per column for 2 and 3 col probes:\n if ncols <= 2:\n self.MAINSPLITTERPOS = 300\n else:\n self.MAINSPLITTERPOS = 265 # move it more to the left\n # make horizontal sort slider use as little vertical space as possible\n self.VSPLITTERPOS = 1\n panelwidth = PANELWIDTHPERCOLUMN * ncols\n panelheight = PANELHEIGHTPERROW * nrows\n width = max(self.MAINSPLITTERPOS + panelwidth + VSCROLLBARWIDTH, MINSORTWINDOWWIDTH)\n size = (width, SORTWINDOWHEIGHT)\n self.setWindowTitle('Sort Window')\n self.move(*pos)\n self.resize(*size)\n\n self._source = None # source cluster for comparison\n self.slider = SpikeSelectionSlider(Qt.Horizontal, self)\n self.slider.setInvertedControls(True)\n self.slider.setToolTip('Position of sliding spike selection time window')\n self.connect(self.slider, QtCore.SIGNAL('valueChanged(int)'),\n self.on_slider_valueChanged)\n self.connect(self.slider, QtCore.SIGNAL('sliderPressed()'),\n self.on_slider_sliderPressed)\n\n self.nlist = NList(self)\n self.nlist.setToolTip('Neuron list')\n self.nslist = NSList(self)\n self.nslist.setToolTip('Sorted spike list')\n self.uslist = USList(self) # should really be multicolumn tableview\n self.uslist.setToolTip('Unsorted spike list')\n tw = self.spykewindow.sort.tw\n\n self.panel = SpikeSortPanel(self, tw=tw)\n self.panel.setMinimumSize(QtCore.QSize(panelwidth, panelheight))\n\n self.panelscrollarea = PanelScrollArea(self)\n self.panelscrollarea.setWidget(self.panel)\n self.panelscrollarea.setMinimumWidth(panelwidth + VSCROLLBARWIDTH)\n self.panelscrollarea.setWidgetResizable(True) # allows panel to size bigger than min\n\n self.vsplitter = QtGui.QSplitter(Qt.Vertical)\n self.vsplitter.addWidget(self.slider)\n self.vsplitter.addWidget(self.nlist)\n self.vsplitter.addWidget(self.nslist)\n self.vsplitter.addWidget(self.uslist)\n\n self.mainsplitter = QtGui.QSplitter(Qt.Horizontal)\n self.mainsplitter.addWidget(self.vsplitter)\n self.mainsplitter.addWidget(self.panelscrollarea)\n\n self.layout = QtGui.QVBoxLayout()\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.layout.addWidget(self.mainsplitter)\n\n mainwidget = QtGui.QWidget(self)\n mainwidget.setLayout(self.layout)\n self.setCentralWidget(mainwidget)\n\n self.toolbar = self.setupToolbar()\n self.addToolBar(self.toolbar)\n\n def setupToolbar(self):\n toolbar = QtGui.QToolBar(self)\n toolbar.setObjectName('toolbar')\n toolbar.setFloatable(True)\n toolbar.setIconSize(QtCore.QSize(16, 16)) # like in main spyke window\n\n actionDelete = QAction(QIcon('res/edit-delete.svg'), 'Del', self)\n tt = ('<nobr><b>Del</b> Delete selected spikes or clusters</nobr>\\n'\n '<nobr><b>CTRL+Del</b> Delete selected spikes</nobr>')\n actionDelete.setToolTip(tt)\n self.connect(actionDelete, QtCore.SIGNAL('triggered()'),\n self.on_actionDelete_triggered)\n toolbar.addAction(actionDelete)\n\n actionMergeClusters = QAction('M', self)\n tt = '<nobr><b>M</b> Merge clusters</nobr>'\n actionMergeClusters.setToolTip(tt)\n self.connect(actionMergeClusters, QtCore.SIGNAL('triggered()'),\n self.on_actionMergeClusters_triggered)\n toolbar.addAction(actionMergeClusters)\n\n #actionToggleClustersGood = QAction(QIcon('res/dialog-apply.svg'), 'G', self)\n actionToggleClustersGood = QAction('G', self)\n tt = '<nobr><b>G</b> Toggle clusters as \"good\"</nobr>'\n actionToggleClustersGood.setToolTip(tt)\n self.connect(actionToggleClustersGood, QtCore.SIGNAL('triggered()'),\n self.on_actionToggleClustersGood_triggered)\n toolbar.addAction(actionToggleClustersGood)\n\n actionSplit = QAction('+', self)\n tt = '<nobr><b>+</b> Split off selected spikes</nobr>'\n actionSplit.setToolTip(tt)\n self.connect(actionSplit, QtCore.SIGNAL('triggered()'),\n self.on_actionSplit_triggered)\n toolbar.addAction(actionSplit)\n\n actionLabelMultiunit = QAction('-', self)\n tt = '<nobr><b>-</b> Label clusters as multiunit</nobr>'\n actionLabelMultiunit.setToolTip(tt)\n self.connect(actionLabelMultiunit, QtCore.SIGNAL('triggered()'),\n self.on_actionLabelMultiunit_triggered)\n toolbar.addAction(actionLabelMultiunit)\n\n actionChanSplitClusters = QAction('/', self)\n tt = '<nobr><b>/</b> Split clusters by channels</nobr>'\n actionChanSplitClusters.setToolTip(tt)\n self.connect(actionChanSplitClusters, QtCore.SIGNAL('triggered()'),\n self.on_actionChanSplitClusters_triggered)\n toolbar.addAction(actionChanSplitClusters)\n\n actionDensitySplit = QAction('P', self)\n tt = ('<nobr><b>P</b> Split cluster pair by density along line between '\n 'their centers</nobr>')\n actionDensitySplit.setToolTip(tt)\n self.connect(actionDensitySplit, QtCore.SIGNAL('triggered()'),\n self.on_actionDensitySplit_triggered)\n toolbar.addAction(actionDensitySplit)\n\n actionRandomSplit = QAction('\\\\', self)\n tt = ('<nobr><b>\\\\</b> Randomly split each selected cluster in half</nobr>')\n actionRandomSplit.setToolTip(tt)\n self.connect(actionRandomSplit, QtCore.SIGNAL('triggered()'),\n self.on_actionRandomSplit_triggered)\n toolbar.addAction(actionRandomSplit)\n\n #actionRenumber = QAction(QIcon('res/gtk-edit.svg'), '#', self)\n actionRenumber = QAction('#', self)\n tt = ('<nobr><b>#</b> Renumber all clusters in vertical spatial order</nobr>\\n'\n '<nobr><b>CTRL+#</b> Renumber selected cluster</nobr>')\n actionRenumber.setToolTip(tt)\n self.connect(actionRenumber, QtCore.SIGNAL('triggered()'),\n self.on_actionRenumber_triggered)\n toolbar.addAction(actionRenumber)\n\n actionFind = QAction(QIcon('res/edit-find.svg'), 'Find', self)\n tt = ('<nobr><b>CTRL+F</b> Find spike in cluster plot</nobr>')\n actionFind.setToolTip(tt)\n self.connect(actionFind, QtCore.SIGNAL('triggered()'),\n self.on_actionFind_triggered)\n toolbar.addAction(actionFind)\n\n actionSelectRandomSpikes = QAction('R', self)\n tt = '<nobr><b>R</b> Select random sample of spikes of current clusters</nobr>'\n actionSelectRandomSpikes.setToolTip(tt)\n self.connect(actionSelectRandomSpikes, QtCore.SIGNAL('triggered()'),\n self.on_actionSelectRandomSpikes_triggered)\n toolbar.addAction(actionSelectRandomSpikes)\n\n actionToggleErrors = QAction('E', self)\n actionToggleErrors.setCheckable(True)\n actionToggleErrors.setChecked(self.panel.enable_fills)\n tt = '<nobr><b>CTRL+E</b> Toggle visibility of template error limits</nobr>'\n actionToggleErrors.setToolTip(tt)\n self.connect(actionToggleErrors, QtCore.SIGNAL('toggled(bool)'),\n self.on_actionToggleErrors_toggled)\n toolbar.addAction(actionToggleErrors)\n self.actionToggleErrors = actionToggleErrors\n\n nsamplesComboBox = QtGui.QComboBox(self)\n nsamplesComboBox.setToolTip('Number of spikes per cluster to randomly select')\n nsamplesComboBox.setFocusPolicy(Qt.NoFocus)\n nsamplesComboBox.addItems(['100', '50', '20', '10', '5', '1'])\n nsamplesComboBox.setCurrentIndex(2)\n toolbar.addWidget(nsamplesComboBox)\n self.connect(nsamplesComboBox, QtCore.SIGNAL('activated(int)'),\n self.on_actionSelectRandomSpikes_triggered)\n self.nsamplesComboBox = nsamplesComboBox\n\n gainComboBox = QtGui.QComboBox(self)\n gainComboBox.setToolTip('Waveform gain (default: 1.5)')\n gainComboBox.setFocusPolicy(Qt.NoFocus)\n gainComboBox.addItems(['4', '3.75', '3.5', '3.25', '3', '2.75', '2.5', '2.25', '2',\n '1.75', '1.5', '1.25', '1', '0.75', '0.5', '0.25'])\n gainComboBox.setCurrentIndex(3)\n toolbar.addWidget(gainComboBox)\n self.connect(gainComboBox, QtCore.SIGNAL('activated(int)'),\n self.on_gainComboBox_triggered)\n self.gainComboBox = gainComboBox\n\n #actionAlignMin = QAction(QIcon('res/go-bottom.svg'), 'Min', self)\n actionAlignMin = QAction('Min', self)\n actionAlignMin.setToolTip('Align selected spikes to min')\n self.connect(actionAlignMin, QtCore.SIGNAL('triggered()'),\n self.on_actionAlignMin_triggered)\n toolbar.addAction(actionAlignMin)\n\n #actionAlignMax = QAction(QIcon('res/go-top.svg'), 'Max', self)\n actionAlignMax = QAction('Max', self)\n actionAlignMax.setToolTip('Align selected spikes to max')\n self.connect(actionAlignMax, QtCore.SIGNAL('triggered()'),\n self.on_actionAlignMax_triggered)\n toolbar.addAction(actionAlignMax)\n\n #actionAlignBest = QAction(QIcon('res/emblem-OK.png'), 'Best', self)\n actionAlignBest = QAction('B', self)\n tt = '<nobr><b>B</b> Align selected spikes by best fit</nobr>'\n actionAlignBest.setToolTip(tt)\n self.connect(actionAlignBest, QtCore.SIGNAL('triggered()'),\n self.on_actionAlignBest_triggered)\n toolbar.addAction(actionAlignBest)\n\n actionShiftLeft = QAction('[', self)\n tt = ('<nobr><b>[</b> Shift selected spikes 2 points left</nobr>\\n'\n '<nobr><b>CTRL+[</b> Shift selected spikes 1 point left</nobr>')\n actionShiftLeft.setToolTip(tt)\n self.connect(actionShiftLeft, QtCore.SIGNAL('triggered()'),\n self.on_actionShiftLeft_triggered)\n toolbar.addAction(actionShiftLeft)\n\n actionShiftRight = QAction(']', self)\n tt = ('<nobr><b>]</b> Shift selected spikes 2 points right</nobr>\\n'\n '<nobr><b>CTRL+]</b> Shift selected spikes 1 point right</nobr>')\n actionShiftRight.setToolTip(tt)\n self.connect(actionShiftRight, QtCore.SIGNAL('triggered()'),\n self.on_actionShiftRight_triggered)\n toolbar.addAction(actionShiftRight)\n\n incltComboBox = QtGui.QComboBox(self)\n incltComboBox.setToolTip(\"Waveform duration (us) to include for component \"\n \"analysis,\\nasymmetric around spike time\")\n incltComboBox.setFocusPolicy(Qt.NoFocus)\n dtw = self.sort.tw[1] - self.sort.tw[0] # spike time window width\n incltstep = intround(dtw / 10) # evenly spaced inclt values\n incltvals = np.arange(dtw, 0, -incltstep)\n incltComboBox.addItems([ str(incltval) for incltval in incltvals ])\n incltComboBox.setCurrentIndex(0)\n toolbar.addWidget(incltComboBox)\n self.connect(incltComboBox, QtCore.SIGNAL('activated(int)'),\n self.on_incltComboBox_triggered)\n self.incltComboBox = incltComboBox\n #incltunitsLabel = QtGui.QLabel('us', self)\n #toolbar.addWidget(incltunitsLabel)\n\n nPCsPerChanSpinBox = QtGui.QSpinBox(self)\n nPCsPerChanSpinBox.setToolTip(\"Number of PCs to use per channel to feed into ICA\")\n nPCsPerChanSpinBox.setFocusPolicy(Qt.NoFocus)\n toolbar.addWidget(nPCsPerChanSpinBox)\n nPCsPerChanSpinBox.setMinimum(1)\n self.connect(nPCsPerChanSpinBox, QtCore.SIGNAL('valueChanged(int)'),\n self.on_nPCsPerChanSpinBox_valueChanged)\n nPCsPerChanSpinBox.setValue(self.sort.npcsperchan)\n self.nPCsPerChanSpinBox = nPCsPerChanSpinBox\n\n #actionFindPrevMostSimilar = QAction(QIcon('res/go-previous.svg'), '<', self)\n actionFindPrevMostSimilar = QAction('<', self)\n tt = '<nobr><b><</b> Find previous most similar cluster</nobr>'\n actionFindPrevMostSimilar.setToolTip(tt)\n self.connect(actionFindPrevMostSimilar, QtCore.SIGNAL('triggered()'),\n self.on_actionFindPrevMostSimilar_triggered)\n toolbar.addAction(actionFindPrevMostSimilar)\n\n #actionFindNextMostSimilar = QAction(QIcon('res/go-next.svg'), '>', self)\n actionFindNextMostSimilar = QAction('>', self)\n tt = '<nobr><b>></b> Find next most similar cluster</nobr>'\n actionFindNextMostSimilar.setToolTip(tt)\n self.connect(actionFindNextMostSimilar, QtCore.SIGNAL('triggered()'),\n self.on_actionFindNextMostSimilar_triggered)\n toolbar.addAction(actionFindNextMostSimilar)\n\n actionReloadSpikes = QAction(QIcon('res/view-refresh.svg'), 'Reload', self)\n tt = ('<nobr><b>F5</b> Reload waveforms of selected spikes. '\n 'If none selected, reload all</nobr>\\n'\n '<nobr><b>CTRL+F5</b> Use mean waveform to choose chans to reload</nobr>')\n actionReloadSpikes.setToolTip(tt)\n self.connect(actionReloadSpikes, QtCore.SIGNAL('triggered()'),\n self.on_actionReloadSpikes_triggered)\n toolbar.addAction(actionReloadSpikes)\n\n actionSave = QAction(QIcon('res/document-save.svg'), '&Save', self)\n actionSave.setToolTip('Save sort panel to file')\n self.connect(actionSave, QtCore.SIGNAL('triggered()'),\n self.on_actionSave_triggered)\n toolbar.addAction(actionSave)\n\n return toolbar\n\n def get_sort(self):\n return self.spykewindow.sort\n\n sort = property(get_sort) # make this a property for proper behaviour after unpickling\n\n def closeEvent(self, event):\n self.spykewindow.HideWindow('Sort')\n\n def mousePressEvent(self, event):\n \"\"\"These are mostly passed on up from spyke list views and sort panel. Left\n clicks are (or should be) filtered out\"\"\"\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #self.on_actionSelectRandomSpikes_triggered()\n self.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist\n elif buttons == QtCore.Qt.RightButton:\n self.clear()\n\n def keyPressEvent(self, event):\n \"\"\"Alpha character keypresses are by default caught by the child lists for quickly\n scrolling down to and selecting list items. However, the appropriate alpha\n keypresses have been set in the child lists to be ignored, so they propagate\n up to here\"\"\"\n key = event.key()\n modifiers = event.modifiers()\n ctrl = modifiers & Qt.ControlModifier # ctrl is down\n spw = self.spykewindow\n if key == Qt.Key_A: # ignored in SpykeListViews\n spw.ui.plotButton.click() # same as hitting ENTER in nslist\n elif key == Qt.Key_X: # ignored in SpykeListViews\n spw.ui.plotXcorrsButton.click()\n elif key == Qt.Key_N: # ignored in SpykeListViews\n spw.ui.normButton.click()\n elif key == Qt.Key_Escape: # deselect all spikes and all clusters\n self.clear()\n elif key == Qt.Key_Delete:\n self.on_actionDelete_triggered()\n elif key == Qt.Key_M: # ignored in SpykeListViews\n self.on_actionMergeClusters_triggered()\n elif key == Qt.Key_G: # ignored in SpykeListViews\n self.on_actionToggleClustersGood_triggered()\n elif key == Qt.Key_Equal: # ignored in SpykeListViews\n self.on_actionSplit_triggered()\n elif key == Qt.Key_Minus: # ignored in SpykeListViews\n self.on_actionLabelMultiunit_triggered()\n elif key == Qt.Key_Slash: # ignored in SpykeListViews\n self.on_actionChanSplitClusters_triggered()\n elif key == Qt.Key_P: # ignored in SpykeListViews\n self.on_actionDensitySplit_triggered()\n elif key == Qt.Key_Backslash: # ignored in SpykeListViews\n self.on_actionRandomSplit_triggered()\n elif key == Qt.Key_NumberSign: # ignored in SpykeListViews\n self.on_actionRenumber_triggered()\n elif key == Qt.Key_F: # ignored in SpykeListViews\n if ctrl:\n self.FindSpike()\n else:\n self.FindCluster()\n elif key == Qt.Key_R: # ignored in SpykeListViews\n self.on_actionSelectRandomSpikes_triggered()\n elif key == Qt.Key_Space: # ignored in SpykeListViews\n if ctrl:\n SpykeToolWindow.keyPressEvent(self, event) # pass it on\n else:\n spw.on_clusterButton_clicked()\n elif key == Qt.Key_B: # ignored in SpykeListViews\n self.on_actionAlignBest_triggered()\n elif key == Qt.Key_BracketLeft: # ignored in SpykeListViews\n self.on_actionShiftLeft_triggered()\n elif key == Qt.Key_BracketRight: # ignored in SpykeListViews\n self.on_actionShiftRight_triggered()\n elif key == Qt.Key_Comma: # ignored in SpykeListViews\n self.on_actionFindPrevMostSimilar_triggered()\n elif key == Qt.Key_Period: # ignored in SpykeListViews\n self.on_actionFindNextMostSimilar_triggered()\n elif key == Qt.Key_F5: # ignored in SpykeListViews\n self.on_actionReloadSpikes_triggered()\n elif key == Qt.Key_E: # ignored in SpykeListViews\n if ctrl:\n self.actionToggleErrors.toggle()\n else:\n self.clear() # E is synonymous with ESC\n elif key == Qt.Key_C: # toggle between PCA and ICA, ignored in SpykeListViews\n c = str(spw.ui.componentAnalysisComboBox.currentText())\n if c == 'PCA':\n index = spw.ui.componentAnalysisComboBox.findText('ICA')\n spw.ui.componentAnalysisComboBox.setCurrentIndex(index)\n elif c == 'ICA':\n index = spw.ui.componentAnalysisComboBox.findText('PCA')\n spw.ui.componentAnalysisComboBox.setCurrentIndex(index)\n spw.on_plotButton_clicked()\n elif key == Qt.Key_T: # toggle plotting against time, ignored in SpykeListViews\n z = str(spw.ui.zDimComboBox.currentText())\n if z == 't':\n spw.on_c0c1c2Button_clicked() # plot in pure component analysis space\n else:\n spw.on_c0c1tButton_clicked() # plot against time\n elif key == Qt.Key_W: # toggle plotting against RMSError, ignored in SpykeListViews\n z = str(spw.ui.zDimComboBox.currentText())\n if z == 'RMSerror':\n spw.on_c0c1c2Button_clicked() # plot in pure component analysis space\n else:\n spw.ui.zDimComboBox.setCurrentIndex(3)\n spw.on_plotButton_clicked() # plot against RMSError\n elif key in [Qt.Key_Enter, Qt.Key_Return]:\n # this is handled at a lower level by on_actionItem_triggered\n # in the various listview controls\n pass\n else:\n SpykeToolWindow.keyPressEvent(self, event) # pass it on\n\n def clear(self):\n \"\"\"Clear selections in this order: unsorted spikes, sorted spikes,\n cluster automatically selected for comparison, cluster 0, clusters\"\"\"\n spw = self.spykewindow\n clusters = spw.GetClusters()\n if len(self.uslist.selectedIndexes()) > 0:\n self.uslist.clearSelection()\n elif self.nslist.nrowsSelected > 0:\n self.nslist.clearSelection()\n elif len(clusters) == 2 and self._source in clusters:\n clusters.remove(self._source)\n spw.SelectClusters(clusters, on=False)\n elif 0 in spw.GetClusterIDs():\n for cluster in spw.GetClusters():\n if cluster.id == 0:\n spw.SelectClusters([cluster], on=False)\n break\n else:\n self.nlist.clearSelection()\n # reset colours in cluster plot:\n gw = spw.windows['Cluster'].glWidget\n gw.colour()\n gw.updateGL()\n\n def on_actionDelete_triggered(self):\n \"\"\"Delete explicity selected spikes, or clusters\"\"\"\n selsids = self.spykewindow.GetSpikes() # IDs of explicitly selected spikes\n nselsids = len(selsids)\n if (QApplication.instance().keyboardModifiers() & Qt.ControlModifier\n or nselsids > 0):\n self.delete_spikes()\n else:\n self.delete_clusters()\n\n def delete_clusters(self):\n \"\"\"Del button press/click\"\"\"\n spw = self.spykewindow\n clusters = spw.GetClusters()\n s = self.sort\n spikes = s.spikes\n sids = []\n for cluster in clusters:\n sids.append(cluster.neuron.sids)\n sids = np.concatenate(sids)\n\n # save some undo/redo stuff\n message = 'delete clusters %r' % [ c.id for c in clusters ]\n cc = ClusterChange(sids, spikes, message)\n cc.save_old(clusters, s.norder, s.good)\n\n # deselect and delete clusters\n spw.DelClusters(clusters)\n if len(s.clusters) > 0:\n # select cluster that replaces the first of the deleted clusters in norder\n selrows = [ cc.oldnorder.index(oldunid) for oldunid in cc.oldunids ]\n if len(selrows) > 0:\n selrow = selrows[0]\n nlist = spw.windows['Sort'].nlist\n nlist.selectRows(selrow) # TODO: this sets selection, but not focus\n #else: # first of deleted clusters was last in norder, don't select anything\n\n # save more undo/redo stuff\n newclusters = []\n cc.save_new(newclusters, s.norder, s.good)\n spw.AddClusterChangeToStack(cc)\n print(cc.message)\n\n def delete_spikes(self):\n \"\"\"CTRL+Del button press/click\"\"\"\n self.spykewindow.SplitSpikes(delete=True)\n\n def on_actionSplit_triggered(self):\n \"\"\"+ button click. Split off selected clusters into their own cluster\"\"\"\n self.spykewindow.SplitSpikes(delete=False)\n\n def on_actionMergeClusters_triggered(self):\n \"\"\"Merge button (M) click. Merge selected clusters. Easier to use than\n running gac() on selected clusters using a really big sigma to force\n them to all merge\"\"\"\n spw = self.spykewindow\n clusters = spw.GetClusters()\n s = self.sort\n spikes = s.spikes\n sids = [] # spikes to merge\n for cluster in clusters:\n sids.append(cluster.neuron.sids)\n # merge any selected usids as well\n sids.append(spw.GetUnsortedSpikes())\n sids = np.concatenate(sids)\n if len(sids) == 0:\n return\n\n # save some undo/redo stuff\n message = 'merge clusters %r' % [ c.id for c in clusters ]\n cc = ClusterChange(sids, spikes, message)\n cc.save_old(clusters, s.norder, s.good)\n\n # decide on newnid and where to insert it into norder\n newnid = None # merge by default into a new highest numbered nid\n inserti = None # order new cluster by default to end of nlist\n if len(clusters) == 1:\n # keep same position of this one nid in norder, regardless of whether it's\n # single-unit, multiunit, or junk\n inserti = s.norder.index(clusters[0].id)\n elif len(clusters) > 1:\n oldunids = np.asarray(cc.oldunids)\n suids = oldunids[oldunids > 0] # selected single unit nids\n if len(suids) > 0: # merge into largest selected single unit nid:\n spikecounts = np.asarray([ s.neurons[suid].nspikes for suid in suids ])\n newnid = suids[spikecounts.argmax()]\n inserti = s.norder.index(newnid)\n # correct for shift due to deletion of oldunids that precede newnid in norder:\n inserti -= sum([ s.norder.index(oldunid) < inserti for oldunid in oldunids])\n\n # delete selected clusters and deselect selected usids\n spw.DelClusters(clusters, update=False)\n self.uslist.clearSelection()\n\n # create new cluster\n #t0 = time.time()\n newcluster = spw.CreateCluster(update=False, id=newnid, inserti=inserti)\n neuron = newcluster.neuron\n self.MoveSpikes2Neuron(sids, neuron, update=False)\n plotdims = spw.GetClusterPlotDims()\n newcluster.update_pos()\n\n # save more undo/redo stuff\n cc.save_new([newcluster], s.norder, s.good)\n spw.AddClusterChangeToStack(cc)\n\n # now do some final updates\n spw.UpdateClustersGUI()\n spw.ColourPoints(newcluster)\n #print('applying clusters to plot took %.3f sec' % (time.time()-t0))\n # select newly created cluster\n spw.SelectClusters(newcluster)\n cc.message += ' into cluster %d' % newcluster.id\n print(cc.message)\n\n def on_actionToggleClustersGood_triggered(self):\n \"\"\"'Good' button (G) click. Toggle 'good' flag of all selected clusters\"\"\"\n spw = self.spykewindow\n clusters = spw.GetClusters()\n cids = []\n for cluster in clusters:\n cluster.neuron.good = not cluster.neuron.good\n cids.append(cluster.id)\n self.nlist.updateAll() # nlist item colouring will change as a result\n print(\"Toggled 'good' flag of clusters %r\" % cids)\n\n def on_actionLabelMultiunit_triggered(self):\n \"\"\"- button click. Label all selected clusters as multiunit by deleting them\n and creating new ones with -ve IDs\"\"\"\n spw = self.spykewindow\n clusters = spw.GetClusters()\n s = self.sort\n spikes = s.spikes\n # only relabel single unit clusters:\n clusters = [ cluster for cluster in clusters if cluster.id > 0 ]\n if len(clusters) == 0:\n return\n sids = []\n for cluster in clusters:\n sids.append(cluster.neuron.sids)\n sids = np.concatenate(sids)\n\n # save some undo/redo stuff\n message = 'label as multiunit clusters %r' % [ c.id for c in clusters ]\n cc = ClusterChange(sids, spikes, message)\n cc.save_old(clusters, s.norder, s.good)\n\n # delete old clusters\n inserti = s.norder.index(clusters[0].id)\n # collect cluster sids before cluster deletion\n sidss = [ cluster.neuron.sids for cluster in clusters ]\n spw.DelClusters(clusters, update=False)\n\n # create new multiunit clusters\n newclusters = []\n for sids in sidss:\n muid = s.get_nextmuid()\n newcluster = spw.CreateCluster(update=False, id=muid, inserti=inserti)\n neuron = newcluster.neuron\n self.MoveSpikes2Neuron(sids, neuron, update=False)\n newcluster.update_pos()\n newclusters.append(newcluster)\n inserti += 1\n\n # select newly labelled multiunit clusters\n spw.SelectClusters(newclusters)\n\n # save more undo/redo stuff\n cc.save_new(newclusters, s.norder, s.good)\n spw.AddClusterChangeToStack(cc)\n print(cc.message)\n\n def on_actionChanSplitClusters_triggered(self):\n \"\"\"Split by channels button (/) click\"\"\"\n ## TODO: make sure this works on .srf files! Why was chancombosplit being used?\n self.spykewindow.maxchansplit()\n #self.spykewindow.chancombosplit()\n\n def on_actionDensitySplit_triggered(self):\n \"\"\"Split cluster pair by density along line between their centers\"\"\"\n self.spykewindow.densitysplit()\n\n def on_actionRandomSplit_triggered(self):\n \"\"\"Randomly split each selected cluster in half\"\"\"\n self.spykewindow.randomsplit()\n\n def on_actionRenumber_triggered(self):\n if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:\n self.renumber_selected_cluster()\n else:\n self.renumber_all_clusters()\n\n def renumber_selected_cluster(self):\n \"\"\"Renumber a single selected cluster to whatever free ID the user wants, for\n colouring purposes\"\"\"\n spw = self.spykewindow\n s = self.sort\n spikes = s.spikes\n\n cluster = spw.GetCluster() # exactly one selected cluster\n oldid = cluster.id\n newid = max(s.norder) + 1\n newid, ok = QtGui.QInputDialog.getInt(self, \"Renumber cluster\",\n \"This will clear the undo/redo stack, and is not undoable.\\n\"\n \"Enter new ID:\", value=newid)\n if not ok:\n return\n if newid in s.norder:\n print(\"Choose a non-existing nid to renumber to\")\n return\n # deselect cluster\n spw.SelectClusters(cluster, on=False)\n\n # rename to newid\n cluster.id = newid # this indirectly updates neuron.id\n # update cluster and neuron dicts, and spikes array\n s.clusters[newid] = cluster\n s.neurons[newid] = cluster.neuron\n sids = cluster.neuron.sids\n spikes['nid'][sids] = newid\n # remove duplicate oldid dict entries\n del s.clusters[oldid]\n del s.neurons[oldid]\n # replace oldid with newid in norder\n s.norder[s.norder.index(oldid)] = newid\n # update colour of any relevant points in cluster plot\n spw.ColourPoints(cluster)\n # reselect cluster\n spw.SelectClusters(cluster)\n # some cluster changes in stack may no longer be applicable, reset cchanges\n del spw.cchanges[:]\n spw.cci = -1\n print('Renumbered neuron %d to %d' % (oldid, newid))\n\n def renumber_all_clusters(self):\n \"\"\"Renumber single unit clusters consecutively from 1, ordered by y position. Do the\n same for multiunit (-ve number) clusters, starting from -1. Sorting by y position\n makes user inspection of clusters more orderly, makes the presence of duplicate\n clusters more obvious, and allows for maximal spatial separation between clusters of\n the same colour, reducing colour conflicts\"\"\"\n val = QtGui.QMessageBox.question(self.panel, \"Renumber all clusters\",\n \"Are you sure? This will clear the undo/redo stack, and is not undoable.\",\n QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)\n if val == QtGui.QMessageBox.No:\n return\n\n spw = self.spykewindow\n s = self.sort\n spikes = s.spikes\n\n # get spatially and numerically ordered lists of new ids\n oldids = np.asarray(s.norder)\n oldsuids = oldids[oldids > 0]\n oldmuids = oldids[oldids < 0]\n # this is a bit confusing: find indices that would sort old ids by y pos, but then\n # what you really want is to find the y pos *rank* of each old id, so you need to\n # take argsort again:\n newsuids = np.asarray([ s.clusters[cid].pos['y0']\n for cid in oldsuids ]).argsort().argsort() + 1\n newmuids = np.asarray([ s.clusters[cid].pos['y0']\n for cid in oldmuids ]).argsort().argsort() + 1\n newmuids = -newmuids\n # multiunit, followed by single unit, no 0 junk cluster. Can't seem to do it the other\n # way around as of Qt 4.7.2 - it seems QListViews don't like having a -ve value in\n # the last entry. Doing so causes all 2 digit values in the list to become blank,\n # suggests a spacing calculation bug. Reproduce by making last entry multiunit,\n # undoing then redoing. Actually, maybe the bug is it doesn't like having a number\n # in the last entry with fewer digits than the preceding entry. Only seems to be a\n # problem when setting self.setUniformItemSizes(True).\n newids = np.concatenate([newmuids, newsuids])\n\n # test\n if np.all(oldids == newids):\n print('Nothing to renumber: cluster IDs already ordered in y0 and contiguous')\n return\n # update for replacing oldids with newids\n oldids = np.concatenate([oldmuids, oldsuids])\n\n # deselect current selections\n selclusters = spw.GetClusters()\n oldselids = [ cluster.id for cluster in selclusters ]\n spw.SelectClusters(selclusters, on=False)\n\n # delete junk cluster, if it exists\n if 0 in s.clusters:\n s.remove_neuron(0)\n print('Deleted junk cluster 0')\n if 0 in oldselids:\n oldselids.remove(0)\n\n # replace old ids with new ids\n cw = spw.windows['Cluster']\n oldclusters = s.clusters.copy() # no need to deepcopy, just copy refs, not clusters\n dims = spw.GetClusterPlotDims()\n for oldid, newid in zip(oldids, newids):\n newid = int(newid) # keep as Python int, not numpy int\n if oldid == newid:\n continue # no need to waste time removing and recreating this cluster\n # change all occurences of oldid to newid\n cluster = oldclusters[oldid]\n cluster.id = newid # this indirectly updates neuron.id\n # update cluster and neuron dicts\n s.clusters[newid] = cluster\n s.neurons[newid] = cluster.neuron\n sids = cluster.neuron.sids\n spikes['nid'][sids] = newid\n\n # remove any orphaned cluster ids\n for oldid in oldids:\n if oldid not in newids:\n del s.clusters[oldid]\n del s.neurons[oldid]\n\n # reset norder\n s.norder = []\n s.norder.extend(sorted([ int(newid) for newid in newmuids ])[::-1])\n s.norder.extend(sorted([ int(newid) for newid in newsuids ]))\n\n # now do some final updates\n spw.UpdateClustersGUI()\n spw.ColourPoints(s.clusters.values())\n # reselect the previously selected (but now renumbered) clusters,\n # helps user keep track\n oldiis = [ list(oldids).index(oldselid) for oldselid in oldselids ]\n newselids = newids[oldiis]\n spw.SelectClusters([s.clusters[cid] for cid in newselids])\n # all cluster changes in stack are no longer applicable, reset cchanges\n del spw.cchanges[:]\n spw.cci = -1\n print('Renumbering complete')\n\n def on_actionFind_triggered(self):\n \"\"\"Find current cluster or spike\"\"\"\n ctrl = QApplication.instance().keyboardModifiers() & Qt.ControlModifier\n if ctrl:\n self.FindSpike()\n else:\n self.FindCluster()\n\n def FindCluster(self):\n \"\"\"Move focus to location of currently selected (single) cluster\"\"\"\n spw = self.spykewindow\n try:\n cluster = spw.GetCluster()\n except RuntimeError as err:\n print(err)\n return\n gw = spw.windows['Cluster'].glWidget\n dims = spw.GetClusterPlotDims()\n gw.focus = np.float32([ cluster.normpos[dim] for dim in dims ])\n gw.panTo() # pan to new focus\n gw.updateGL()\n\n def FindSpike(self):\n \"\"\"Move focus to location of currently selected (single) spike\"\"\"\n spw = self.spykewindow\n try:\n sid = spw.GetSpike()\n except RuntimeError as err:\n print(err)\n return\n gw = spw.windows['Cluster'].glWidget\n pointis = gw.sids.searchsorted(sid)\n gw.focus = gw.points[pointis]\n gw.panTo() # pan to new focus\n gw.updateGL()\n\n def on_actionSelectRandomSpikes_triggered(self):\n \"\"\"Select random sample of spikes in current cluster(s), or random sample\n of unsorted spikes if no cluster(S) selected\"\"\"\n nsamples = int(self.nsamplesComboBox.currentText())\n if len(self.nslist.neurons) > 0:\n slist = self.nslist\n else:\n slist = self.uslist\n slist.clearSelection() # emits selectionChanged signal, .reset() doesn't\n slist.selectRandom(nsamples)\n\n def on_gainComboBox_triggered(self):\n \"\"\"Set gain of panel based on gainComboBox selection\"\"\"\n panel = self.panel\n panel.gain = float(self.gainComboBox.currentText())\n panel.do_layout() # resets axes lims and recalcs panel.pos\n panel._update_scale()\n panel.draw_refs()\n panel.updateAllItems()\n\n def on_actionAlignMin_triggered(self):\n self.Align('min')\n\n def on_actionAlignMax_triggered(self):\n self.Align('max')\n\n def on_actionAlignBest_triggered(self):\n self.Align('best')\n\n def on_actionShiftLeft_triggered(self):\n if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:\n nt = -1\n else:\n nt = -2\n self.Shift(nt)\n \n def on_actionShiftRight_triggered(self): \n if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:\n nt = 1\n else:\n nt = 2\n self.Shift(nt)\n\n def on_incltComboBox_triggered(self):\n \"\"\"Change length of chan selection lines, optionally trigger cluster replot\"\"\"\n self.panel.update_selvrefs()\n self.panel.draw_refs()\n #self.spykewindow.ui.plotButton.click()\n\n def get_inclt(self):\n \"\"\"Return inclt value in incltComboBox\"\"\"\n return float(self.incltComboBox.currentText()) # us\n\n inclt = property(get_inclt)\n\n def get_tis(self):\n \"\"\"Return tis (start and end timepoint indices) of duration inclt, asymmetric around\n t=0 spike time. Note that any changes to the code here should also be made in the\n timepoint selection display code in SortPanel.update_selvrefs()\"\"\"\n s = self.sort\n inclt = self.inclt # duration to include, asymmetric around t=0 spike time (us)\n tw = self.panel.tw\n dtw = tw[1] - tw[0] # spike time window width\n left = intround(abs(tw[0]) / dtw * inclt) # left fraction wrt t=0 spike time\n right = inclt - left # right fraction wrt t=0 spike time\n tis = s.twts.searchsorted([-left, right])\n return tis\n\n tis = property(get_tis)\n\n def on_nPCsPerChanSpinBox_valueChanged(self, val):\n self.sort.npcsperchan = val\n\n def on_actionReloadSpikes_triggered(self):\n spw = self.spykewindow\n sids = spw.GetAllSpikes()\n sort = self.sort\n if len(sids) == 0:\n # if no spikes specified, reload all spikes\n sids = sort.spikes['id']\n usemeanchans = False\n if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:\n usemeanchans = True\n sort.reload_spikes_and_templates(sids, usemeanchans=usemeanchans)\n # add sids to the set of dirtysids to be resaved to .wave file:\n spw.update_dirtysids(sids)\n # auto-refresh all plots:\n self.panel.updateAllItems()\n\n def on_actionFindPrevMostSimilar_triggered(self):\n self.findMostSimilarCluster('previous')\n\n def on_actionFindNextMostSimilar_triggered(self):\n self.findMostSimilarCluster('next')\n\n def on_actionToggleErrors_toggled(self, checked):\n self.panel.showFills(checked)\n\n def on_slider_valueChanged(self, slideri):\n self.nslist.clearSelection() # emits selectionChanged signal, .reset() doesn't\n if self.nslist.model().sliding == False:\n self.nslist.model().sids.sort() # change from nid order to sid order\n self.nslist.updateAll() # update to reflect new ordering\n self.nslist.model().sliding = True\n nsamples = int(self.nsamplesComboBox.currentText())\n rows = np.arange(slideri, slideri+nsamples)\n self.nslist.selectRows(rows)\n\n def on_slider_sliderPressed(self):\n \"\"\"Make slider click (without movement) highlight the first nsamples\n or fewer spikes when slider is at 0 position\"\"\"\n slideri = self.slider.value()\n if slideri == 0:\n nsamples = int(self.nsamplesComboBox.currentText())\n nsamples = min(nsamples, self.nslist.model().nspikes)\n rows = np.arange(nsamples)\n self.nslist.selectRows(rows)\n\n def update_slider(self):\n \"\"\"Update slider limits and step sizes\"\"\"\n nsamples = int(self.nsamplesComboBox.currentText())\n nsids = len(self.nslist.sids)\n ulim = max(nsids-nsamples, 1) # upper limit\n self.slider.setRange(0, ulim)\n self.slider.setSingleStep(1)\n self.slider.setPageStep(nsamples)\n\n def findMostSimilarCluster(self, which='next'):\n \"\"\"If no chans selected, compare source to next or previous most similar cluster\n based on chans the two have in common, while requiring the two have each others'\n max chans in common. If chans have been selected, use them as a starting set of\n chans to compare on. Also, use only the timepoint range selected in incltComboBox\"\"\"\n try:\n source = self.getClusterComparisonSource()\n except RuntimeError as err:\n print(err)\n return\n destinations = list(self.sort.clusters.values())\n destinations.remove(source)\n selchans = np.sort(self.panel.chans_selected)\n if len(selchans) > 0:\n srcchans = np.intersect1d(source.neuron.wave.chans, selchans)\n if len(srcchans) == 0:\n print(\"Source cluster doesn't overlap with selected chans\")\n return\n else:\n srcchans = source.neuron.wave.chans\n\n if self.spykewindow.ui.normButton.isChecked():\n print(\"NOTE: findMostSimilarCluster() doesn't currently take spike amplitude \"\n \"normalization into account. To see the true amplitudes used to compare \"\n \"neuron pairs, turn off normalization\")\n\n errors = []\n dests = []\n t0i, t1i = self.tis # timepoint range selected in incltComboBox\n # try and compare source neuron waveform to all destination neuron waveforms\n for dest in destinations:\n if dest.neuron.wave.data is None: # hasn't been calculated yet\n dest.neuron.update_wave()\n dstchans = dest.neuron.wave.chans\n if len(selchans) > 0:\n if not set(selchans).issubset(dstchans):\n continue\n dstchans = selchans\n cmpchans = np.intersect1d(srcchans, dstchans)\n if len(cmpchans) == 0: # not comparable\n continue\n # ensure maxchan of both source and dest neuron are both in cmpchans\n if source.neuron.chan not in cmpchans or dest.neuron.chan not in cmpchans:\n continue\n srcwavedata = source.neuron.wave[cmpchans].data[:, t0i:t1i]\n dstwavedata = dest.neuron.wave[cmpchans].data[:, t0i:t1i]\n error = core.rms(srcwavedata - dstwavedata)\n errors.append(error)\n dests.append(dest)\n if len(errors) == 0:\n print(\"No sufficiently overlapping clusters on selected chans to compare to\")\n return\n errors = np.asarray(errors)\n dests = np.asarray(dests)\n desterrsortis = errors.argsort()\n\n if which == 'next':\n self._cmpid += 1\n elif which == 'previous':\n self._cmpid -= 1\n else: raise ValueError('Unknown which: %r' % which)\n self._cmpid = max(self._cmpid, 0)\n self._cmpid = min(self._cmpid, len(dests)-1)\n\n dest = dests[desterrsortis][self._cmpid]\n self.spykewindow.SelectClusters(dest)\n desterr = errors[desterrsortis][self._cmpid]\n print('n%d to n%d rmserror: %.2f uV' %\n (source.id, dest.id, self.sort.converter.AD2uV(desterr)))\n\n def getClusterComparisonSource(self):\n selclusters = self.spykewindow.GetClusters()\n errmsg = 'unclear which cluster to use as source for comparison'\n if len(selclusters) == 1:\n source = selclusters[0]\n self._source = source\n self._cmpid = -1 # init/reset\n elif len(selclusters) == 2:\n source = self._source\n if source not in selclusters:\n raise RuntimeError(errmsg)\n # deselect old destination cluster:\n selclusters.remove(source)\n self.spykewindow.SelectClusters(selclusters, on=False)\n else:\n self._source = None # reset for tidiness\n raise RuntimeError(errmsg)\n return source\n\n def Shift(self, nt):\n \"\"\"Shift selected sids by nt timepoints\"\"\"\n s = self.sort\n spikes = s.spikes\n spw = self.spykewindow\n sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))\n self.sort.shift(sids, nt)\n print('Shifted %d spikes by %d timepoints' % (len(sids), nt))\n unids = np.unique(spikes['nid'][sids])\n neurons = [ s.neurons[nid] for nid in unids ]\n for neuron in neurons:\n neuron.update_wave() # update affected mean waveforms\n # add dirtysids to the set to be resaved to .wave file:\n spw.update_dirtysids(sids)\n # auto-refresh all plots\n self.panel.updateAllItems()\n\n def Align(self, to):\n \"\"\"Align all implicitly selected spikes to min or max, or best fit\n on selected chans\"\"\" \n s = self.sort\n spikes = s.spikes\n spw = self.spykewindow\n sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))\n if to == 'best':\n tis = self.tis\n # find which chans are common to all sids:\n commonchans = s.get_common_chans(sids)[0]\n # check selected chans\n selchans = spw.get_selchans(sids)\n for selchan in selchans:\n if selchan not in commonchans:\n print(\"Chan %d not common to all spikes, pick from %r\"\n % (selchan, list(commonchans)))\n return\n print('Best fit aligning %d spikes between tis=%r on chans=%r' %\n (len(sids), list(tis), selchans))\n # numpy implementation:\n #dirtysids = s.alignbest(sids, tis, selchans)\n # cython implementation:\n dirtysids = util.alignbest_cy(s, sids, tis, np.int64(selchans))\n else: # to in ['min', 'max']\n print('Aligning %d spikes to %s' % (len(sids), to))\n dirtysids = s.alignminmax(sids, to)\n paligned = len(dirtysids) / len(sids) * 100\n print('Aligned %d/%d (%.1f%%) spikes' % (len(dirtysids), len(sids), paligned))\n unids = np.unique(spikes['nid'][dirtysids])\n neurons = [ s.neurons[nid] for nid in unids ]\n for neuron in neurons:\n neuron.update_wave() # update affected mean waveforms\n # add dirtysids to the set to be resaved to .wave file:\n spw.update_dirtysids(dirtysids)\n # auto-refresh all plots:\n self.panel.updateAllItems()\n\n def RemoveNeuron(self, neuron, update=True):\n \"\"\"Remove neuron and all its spikes from the GUI and the Sort\"\"\"\n self.MoveSpikes2List(neuron, neuron.sids, update=update)\n self.sort.remove_neuron(neuron.id)\n if update:\n self.nlist.updateAll()\n\n def MoveSpikes2Neuron(self, sids, neuron=None, update=True):\n \"\"\"Assign spikes from sort.spikes to a neuron, and trigger eventual update of\n mean wave. If neuron is None, create a new one\"\"\"\n sids = toiter(sids)\n spikes = self.sort.spikes\n if neuron == None:\n neuron = self.sort.create_neuron()\n neuron.sids = np.union1d(neuron.sids, sids) # update\n spikes['nid'][sids] = neuron.id\n if update:\n self.sort.update_usids()\n self.uslist.updateAll()\n if neuron in self.nslist.neurons:\n self.nslist.neurons = self.nslist.neurons # trigger nslist refresh\n # TODO: selection doesn't seem to be working, always jumps to top of list\n #self.uslist.Select(row) # automatically select the new item at that position\n neuron.wave.data = None # trigger template mean update\n return neuron\n\n def MoveSpikes2List(self, neuron, sids, update=True):\n \"\"\"Move spikes from a neuron back to the unsorted spike list control\"\"\"\n sids = toiter(sids)\n if len(sids) == 0:\n return # nothing to do\n spikes = self.sort.spikes\n neuron.sids = np.setdiff1d(neuron.sids, sids) # return what's in 1st arr and not in 2nd\n spikes['nid'][sids] = 0 # unbind neuron id of sids in spikes struct array\n if update:\n self.sort.update_usids()\n self.uslist.updateAll()\n # this only makes sense if the neuron is currently selected in the nlist:\n if neuron in self.nslist.neurons:\n self.nslist.neurons = self.nslist.neurons # this triggers a refresh\n neuron.wave.data = None # triggers an update when it's actually needed\n\n def PlotClusterHistogram(self, X, nids):\n \"\"\"Plot histogram of given clusters along a single dimension. If two clusters are\n given, project them onto axis connecting their centers, and calculate separation\n indices between them. Otherwise, plot the distribution of all given clusters\n (up to a limit) along the first dimension in X.\"\"\"\n spw = self.spykewindow\n mplw = spw.OpenWindow('MPL')\n unids = np.unique(nids) # each unid corresponds to a cluster, except possibly unid 0\n nclusters = len(unids)\n if nclusters == 0:\n mplw.ax.clear()\n mplw.figurecanvas.draw()\n print(\"No spikes selected\")\n return\n elif nclusters > 5: # to prevent slowdowns, don't plot too many\n mplw.ax.clear()\n mplw.figurecanvas.draw()\n print(\"Too many clusters selected for cluster histogram\")\n return\n elif nclusters == 2:\n calc_measures = True\n else:\n calc_measures = False\n projdimi = 0\n\n ndims = X.shape[1]\n points = [] # list of projection of each cluster's points onto dimi\n for unid in unids:\n sidis, = np.where(nids == unid)\n # don't seem to need contig points for NDsepmetric, no need for copy:\n points.append(X[sidis])\n #points.append(np.ascontiguousarray(X[sidis]))\n if calc_measures:\n t0 = time.time()\n NDsep = util.NDsepmetric(*points, Nmax=20000)\n print('NDsep calc took %.3f sec' % (time.time()-t0))\n # centers of both clusters, use median:\n c0 = np.median(points[0], axis=0) # ndims vector\n c1 = np.median(points[1], axis=0)\n # line connecting the centers of the two clusters, wrt c0\n line = c1-c0\n line /= np.linalg.norm(line) # make it unit length\n #print('c0=%r, c1=%r, line=%r' % (c0, c1, line))\n else:\n line = np.zeros(ndims)\n line[projdimi] = 1.0 # pick out just the one component\n c0 = 0.0 # set origin at 0\n # calculate projection of each cluster's points onto line\n projs = []\n for cpoints in points:\n projs.append(np.dot(cpoints-c0, line))\n if calc_measures:\n d = np.median(projs[1]) - np.median(projs[0])\n # measure whether centers are at least 3 of the bigger stdevs away from\n # each other:\n maxstd = max(projs[0].std(), projs[1].std())\n if maxstd == 0:\n oneDsep = 0 # not sure if this is ideal\n else:\n oneDsep = d / (3 * maxstd)\n #print('std0=%f, std1=%f, d=%f' % (projs[0].std(), projs[1].std(), d))\n proj = np.concatenate(projs)\n nbins = max(intround(np.sqrt(len(proj))), 2) # seems like a good heuristic\n #print('nbins = %d' % nbins)\n edges = np.histogram(proj, bins=nbins)[1]\n hists = []\n for i in range(nclusters):\n hists.append(np.histogram(projs[i], bins=edges)[0])\n hist = np.concatenate([hists]) # one cluster hist per row\n masses = np.asarray([ h.sum() for h in hist ])\n sortedmassis = masses.argsort()\n # Take the fraction of area that the two distribs overlap.\n # At each bin, take min value of the two distribs. Add up all those min values,\n # and divide by the mass of the smaller distrib.\n if calc_measures:\n overlaparearatio = hist.min(axis=0).sum() / masses[sortedmassis[0]]\n djs = core.DJS(hists[0], hists[1])\n # plotting:\n ledges = edges[:-1] # keep just the left edges, discard the last right edge\n assert len(ledges) == nbins\n binwidth = ledges[1] - ledges[0]\n # plot:\n a = mplw.ax\n a.clear()\n windowtitle = \"clusters %r\" % list(unids)\n print(windowtitle)\n mplw.setWindowTitle(windowtitle)\n if calc_measures:\n #title = (\"sep index=%.3f, overlap area ratio=%.3f, DJS=%.3f, sqrt(DJS)=%.3f\"\n # % (oneDsep, overlaparearatio, djs, np.sqrt(djs)))\n title = (\"%dDsep=%.3f, 1Dsep=%.3f, OAR=%.3f, DJS=%.3f\"\n % (ndims, NDsep, oneDsep, overlaparearatio, djs))\n print(title)\n a.set_title(title)\n cs = [ CLUSTERCOLOURDICT[unid] for unid in unids ]\n for i, c in enumerate(cs):\n # due to white background, replace white clusters with black:\n if c == WHITE:\n cs[i] = 'black'\n # plot the smaller cluster last, to maximize visibility:\n for i in sortedmassis[::-1]:\n a.bar(ledges, hist[i], width=binwidth, color=cs[i], edgecolor=cs[i])\n ## TODO: tight_layout call needs updating for MPL 2.2:\n #mplw.f.tight_layout(pad=0.3) # crop figure to contents\n mplw.figurecanvas.draw()\n",
"\"\"\"Core classes and functions used throughout spyke\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import with_statement\n\n__authors__ = ['Martin Spacek', 'Reza Lotun']\n\nimport sys\nimport os\nimport hashlib\nimport time\nimport datetime\nfrom collections import OrderedDict as odict\n\nimport random\nimport string\nfrom copy import copy\nimport json\nimport pickle\n\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.QtCore import Qt\ngetSaveFileName = QtGui.QFileDialog.getSaveFileName\n\nimport numpy as np\nfrom numpy import pi, cos, sin\nimport scipy.signal, scipy.io\n\nimport matplotlib as mpl\n\n# set some numpy print and error options - these hold for all modules in spyke:\nnp.set_printoptions(linewidth=100,\n formatter={'float_kind':'{:f}'.format}) # disable scientific notation\n#np.set_printoptions(precision=6, floatmode='maxprec')\n#np.set_printoptions(precision=3, threshold=1000, edgeitems=5, linewidth=150, suppress=True)\n# make overflow, div by zero, and invalid raise errors, and underflow just raise a warning,\n# so that program doesn't halt on underflow during gaussian fit in extract module:\nnp.seterr(over='raise', divide='raise', invalid='raise', under='warn')\n\nUNIXEPOCH = datetime.datetime(1970, 1, 1, 0, 0, 0) # UNIX epoch: Jan 1, 1970\n\nNULL = b'\\x00'\n\n# For .dat and .nsx files, only allow start and stop time requests that deviate up to\n# DATSAMPLEERRPCT from the nearest (re)sample timepoint. This avoids roundoff errors for\n# time requests that fall exactly in between (50%) (re)sample timepoints:\nDATSAMPLEERRPCT = 49.9 # percent\n\nDEFDATFILTMETH = 'BW' # default .dat filter method: None, 'BW', 'WMLDR'\nDEFNSXFILTMETH = 'BW' # default .nsx filter method: None, 'BW', 'WMLDR'\nBWHPF0 = 300 # Butterworth high-pass filter low-frequency cutoff, Hz\nBWLPF1 = 300 # Butterworth low-pass filter high-frequency cutoff, Hz\nBWHPORDER = 4 # Butterworth high-pass filter order\nBWLPORDER = 4 # Butterworth low-pass filter order\n# low-pass filter raw data to get low-pass stream?\n# otherwise just decimate, which is simpler and faster, but aliases:\nLOWPASSFILTERLPSTREAM = True\n\nDEFCAR = 'Median' # default common average reference method: None, 'Median', 'Mean';\n # 'Median' works best because it's least affected by spikes\n\nDEFHPRESAMPLEX = 2 # default highpass resampling factor for all stream types\nDEFLPSAMPLFREQ = 1000 # default lowpass sampling rate for wide-band stream types, Hz\nDEFHPDATSHCORRECT = False ## TODO: this may not hold for open-ephys and Intan chips!\nDEFHPNSXSHCORRECT = False # no need for .nsx files, s+h delay is only 1 ns between chans\nDEFHPSRFSHCORRECT = True\n\nSRFNCHANSPERBOARD = 32 # used for s+h delay correction in .srf files\n\n# Apparently KERNELSIZE == number of kernel zero crossings, but that seems to depend on\n# the phase of the kernel, some have one less, depending on the channel (when doing sample\n# and hold correction). Anyway, total number of points in the kernel is KERNELSIZE plus 1\n# (for the middle point) - see Blanche2006.\n# Should kernel size depend on the sampling rate? No, but perhaps the minimum kernel\n# size depends on the Nyquist rate.\nKERNELSIZE = 12\n# kernel size needs to be even, otherwise there's a slight but undesireable time shift,\n# perhaps because sampfreq always needs to be an integer multiple of rawsampfreq:\nassert KERNELSIZE % 2 == 0\n# number of excess raw datapoints to include on either side of each wideband Stream\n# (such as a DATStream or NSXStream) during a slice call. Due to the lack of analog filtering,\n# a greater excess is needed than e.g. SurfStream because it's already analog filtered\nXSWIDEBANDPOINTS = 200\n\nMAXLONGLONG = 2**63-1\nMAXNBYTESTOFILE = 2**31 # max array size safe to call .tofile() on in Numpy 1.5.0 on Windows\n\nMAXNSPIKEPLOTS = 50\nMAXNROWSLISTSELECTION = 10000\n\nCHANFIELDLEN = 256 # channel string field length at start of .resample file\n\nINVPI = 1 / pi\n\n\nclass EmptyClass(object):\n pass\n\n\nclass Converter(object):\n \"\"\"Store intgain and extgain values and provide methods to convert between AD and uV\n values for .srf files, even when a Stream (where intgain and extgain are stored) isn't\n available\"\"\"\n def __init__(self, intgain, extgain):\n self.intgain = intgain\n self.extgain = extgain\n\n def AD2uV(self, AD):\n \"\"\"Convert rescaled AD values to float32 uV\n Biggest +ve voltage is 10 million uV, biggest +ve rescaled signed int16 AD val\n is half of 16 bits, then divide by internal and external gains\n\n TODO: unsure: does the DT3010 acquire from -10 to 10 V at intgain == 1 and encode\n that from 0 to 4095?\n \"\"\"\n return np.float32(AD) * 10000000 / (2**15 * self.intgain * self.extgain)\n\n def uV2AD(self, uV, dtype=np.int16):\n \"\"\"Convert uV to signed rescaled AD values of type dtype\"\"\"\n return dtype(np.round(uV * (2**15 * self.intgain * self.extgain) / 10000000))\n\n\nclass Converter_TSF_1002(object):\n \"\"\"Store intgain and extgain values and provide methods to convert between AD and uV\n values, even when a Stream (where intgain and extgain are stored) isn't available. Meant\n specifically for .tsf version 1002 files, which have no specific AD voltage limits, and\n already come as signed values centered around 0 V\"\"\"\n def __init__(self, intgain, extgain):\n self.intgain = intgain # uV per AD value\n self.extgain = extgain\n\n def AD2uV(self, AD):\n \"\"\"Convert signed int16 AD values to float32 uV\"\"\"\n return np.float32(AD) * self.intgain * self.extgain\n\n def uV2AD(self, uV, dtype=np.int16):\n \"\"\"Convert float32 uV to signed AD values of type dtype\"\"\"\n return dtype(np.round(uV / (self.intgain * self.extgain)))\n\n\nclass SimpleConverter(object):\n \"\"\"Store conversion factors between AD values and uV values, and provide\n methods to convert between them, even when a stream isn't available. Note that\n conceptually, AD2uVx is identical to uVperAD\"\"\"\n def __init__(self, AD2uVx):\n self.AD2uVx = AD2uVx\n self.uV2ADx = 1 / AD2uVx\n\n def AD2uV(self, AD):\n return self.AD2uVx * np.float32(AD)\n \n def uV2AD(self, uV, dtype=np.int16):\n return dtype(np.round(self.uV2ADx * uV))\n\n\nclass DatConverter(SimpleConverter):\n pass\n\n\nclass NSXConverter(SimpleConverter):\n pass\n\n\nclass WaveForm(object):\n \"\"\"Just a container for data, std of data, timestamps, channels, and saturation indices.\n Sliceable in time, and indexable in channel space. Only really used for\n convenient plotting. Everything else uses the sort.wavedata array, and\n related sort.spikes fields\"\"\"\n def __init__(self, data=None, std=None, ts=None, chans=None, tres=None, satis=None):\n self.data = data # in AD, potentially multichannel, depending on shape\n self.std = std # standard deviation, same shape as data\n self.ts = ts # timestamps array in us, one for each sample (column) in data\n self.chans = chans # channel ids corresponding to rows in .data\n self.tres = tres # timestamp resolution, in us\n self.satis = satis # boolean array denoting saturation, same shape as data\n\n def __getitem__(self, key):\n \"\"\"Make waveform data sliceable in time, and directly indexable by channel id(s).\n Return a new WaveForm\"\"\"\n \n # check for std field, won't exist for old saved Waveforms in .sort files:\n try: self.std\n except AttributeError: self.std = None\n \n if type(key) == slice: # slice self in time\n if self.ts is None:\n return WaveForm() # empty WaveForm\n # use timestamp indices instead of timestamps, to avoid searchsorted\n # problems with potential float roundoff errors:\n try:\n self.tis\n except AttributeError:\n self.tis = intround(self.ts / self.tres) # calc timestamp indices on first use\n t0i, t1i = intround(key.start / self.tres), intround(key.stop / self.tres)\n lo, hi = self.tis.searchsorted([t0i, t1i])\n # direct method can cause occasional float roundoff error if self.ts are float:\n #lo, hi = self.ts.searchsorted([key.start, key.stop])\n data = self.data[:, lo:hi]\n if self.std is None:\n std = None\n else:\n std = self.std[:, lo:hi]\n ts = self.ts[lo:hi]\n # return a new WaveForm:\n return WaveForm(data=data, std=std, ts=ts, chans=self.chans, tres=self.tres)\n else: # index into self by channel id(s)\n keys = toiter(key)\n # don't assume self.chans are sorted:\n try:\n chanis = argmatch(self.chans, keys)\n except ValueError:\n raise IndexError('Invalid index %r' % key)\n data = self.data[chanis] # grab the appropriate rows of data\n if self.std is None:\n std = None\n else:\n std = self.std[chanis]\n # return a new WaveForm:\n return WaveForm(data=data, std=std, ts=self.ts, chans=keys, tres=self.tres)\n\n def __getstate__(self):\n \"\"\"Don't pickle self.tis, it can always be regenerated\"\"\"\n d = self.__dict__.copy() # copy it cuz we'll be making changes\n try: del d['tis']\n except KeyError: pass\n return d\n\n def __len__(self):\n \"\"\"Number of data points in time\"\"\"\n nt = len(self.ts)\n assert nt == self.data.shape[1] # obsessive\n return nt\n\n def _check_add_sub(self, other):\n \"\"\"Check a few things before adding or subtracting waveforms\"\"\"\n if self.data.shape != other.data.shape:\n raise ValueError(\"Waveform shapes %r and %r don't match\" %\n (self.data.shape, other.data.shape))\n if self.chans != other.chans:\n raise ValueError(\"Waveform channel ids %r and %r don't match\" %\n (self.chans, other.chans))\n\n def __add__(self, other):\n \"\"\"Return new waveform which is self+other. Keep self's timestamps\"\"\"\n self._check_add_sub(other)\n return WaveForm(data=self.data+other.data,\n ts=self.ts, chans=self.chans, tres=self.tres)\n\n def __sub__(self, other):\n \"\"\"Return new waveform which is self-other. Keep self's timestamps\"\"\"\n self._check_add_sub(other)\n return WaveForm(data=self.data-other.data,\n ts=self.ts, chans=self.chans, tres=self.tres)\n\n \nclass SpykeToolWindow(QtGui.QMainWindow):\n \"\"\"Base class for all of spyke's tool windows\"\"\"\n def __init__(self, parent, flags=Qt.Tool):\n QtGui.QMainWindow.__init__(self, parent, flags)\n self.maximized = False\n\n def keyPressEvent(self, event):\n key = event.key()\n modifiers = event.modifiers()\n shift = modifiers == Qt.ShiftModifier # only modifier is shift\n if key == Qt.Key_F11:\n self.toggleMaximized()\n elif key == Qt.Key_S and shift:\n self.on_actionSave_triggered()\n else:\n QtGui.QMainWindow.keyPressEvent(self, event) # pass it on\n\n def mouseDoubleClickEvent(self, event):\n \"\"\"Doesn't catch window titlebar doubleclicks for some reason (window manager\n catches them?). Have to doubleclick on a part of the window with no widgets in it\"\"\"\n self.toggleMaximized()\n\n def closeEvent(self, event):\n # remove 'Window' from class name\n windowtype = type(self).__name__.replace('Window', '')\n self.parent().HideWindow(windowtype)\n\n def toggleMaximized(self):\n if not self.maximized:\n self.normalPos, self.normalSize = self.pos(), self.size()\n dw = QtGui.QDesktopWidget()\n rect = dw.availableGeometry(self)\n self.setGeometry(rect)\n self.maximized = True\n else: # restore\n self.resize(self.normalSize)\n self.move(self.normalPos)\n self.maximized = False\n\n def on_actionSave_triggered(self):\n \"\"\"Save panel to file\"\"\"\n f = self.panel.figure\n\n # copied and adapted from mpl.backend_qt4.NavigationToolbar2QT.save_figure():\n filetypes = f.canvas.get_supported_filetypes_grouped()\n sorted_filetypes = filetypes.items()\n sorted_filetypes.sort()\n default_filetype = f.canvas.get_default_filetype()\n\n startpath = mpl.rcParams.get('savefig.directory', '')\n startpath = os.path.expanduser(startpath)\n start = os.path.join(startpath, f.canvas.get_default_filename())\n filters = []\n selectedFilter = None\n for name, exts in sorted_filetypes:\n exts_list = \" \".join(['*.%s' % ext for ext in exts])\n filter = '%s (%s)' % (name, exts_list)\n if default_filetype in exts:\n selectedFilter = filter\n filters.append(filter)\n filters = ';;'.join(filters)\n fname = getSaveFileName(self.panel, \"Save panel to\",\n start, filters, selectedFilter)\n if fname:\n fname = str(fname) # convert from QString\n if startpath == '':\n # explicitly missing key or empty str signals to use cwd\n mpl.rcParams['savefig.directory'] = startpath\n else:\n # save dir for next time\n mpl.rcParams['savefig.directory'] = os.path.dirname(str(fname))\n try:\n f.canvas.print_figure(fname, facecolor=None, edgecolor=None)\n except Exception as e:\n QtGui.QMessageBox.critical(\n self.panel, \"Error saving file\", str(e),\n QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton)\n print('Panel saved to %r' % fname)\n\n\nclass SpykeListView(QtGui.QListView):\n def __init__(self, parent):\n QtGui.QListView.__init__(self, parent)\n self.sortwin = parent\n #self.setSelectionBehavior(QTableWidget.SelectRows)\n self.setSelectionMode(QtGui.QListView.ExtendedSelection)\n self.setLayoutMode(QtGui.QListView.Batched) # prevents lockup during huge layout ops\n # Setting resize mode to \"adjust\" sometimes results in a bug where Qt seems to\n # be reflowing the contents many times over before it finally stops, resulting in\n # very slow operations when changing list contents (like adding/removing neurons).\n # But, with this disabled, the contents no longer reflow, and you're forced to use\n # scrollbars unnecessarily to see all the list contents. This might also be\n # interacting with the setWrapping and/or setBatchSize features:\n #self.setResizeMode(QtGui.QListView.Adjust) # recalculates layout on resize\n self.setUniformItemSizes(True) # speeds up listview\n self.setFlow(QtGui.QListView.LeftToRight) # default is TopToBottom\n self.setWrapping(True)\n self.setBatchSize(300)\n #self.setViewMode(QtGui.QListView.IconMode)\n\n def mousePressEvent(self, event):\n sw = self.sortwin\n buttons = event.buttons()\n if buttons == QtCore.Qt.LeftButton:\n QtGui.QListView.mousePressEvent(self, event) # handle as usual\n else:\n self.sortwin.mousePressEvent(event) # pass on up to Sort window\n\n def keyPressEvent(self, event):\n key = event.key()\n modifiers = event.modifiers()\n ctrldown = bool(modifiers & Qt.ControlModifier)\n ctrlup = not ctrldown\n if (key in [Qt.Key_A, Qt.Key_X, Qt.Key_N, Qt.Key_M, Qt.Key_G,\n Qt.Key_Equal, Qt.Key_Minus, Qt.Key_Slash,\n Qt.Key_P, Qt.Key_Backslash, Qt.Key_NumberSign, Qt.Key_F, Qt.Key_R,\n Qt.Key_E, Qt.Key_B, Qt.Key_BracketLeft, Qt.Key_BracketRight,\n Qt.Key_Comma, Qt.Key_Period, Qt.Key_F5, Qt.Key_C, Qt.Key_T, Qt.Key_W]\n or ctrlup and key == Qt.Key_Space):\n event.ignore() # pass it on up to the parent\n else:\n QtGui.QListView.keyPressEvent(self, event) # handle it as usual\n\n def selectionChanged(self, selected, deselected, prefix=None):\n \"\"\"Plot neurons or spikes on list item selection\"\"\"\n # For short lists, display the actual selection in the list, otherwise, if there are\n # too many entries in the list, selection gets unbearably slow, especially as you\n # select items further down the list. So for very long lists, don't actually show the\n # selection. The selection events all still seem to work though, and for some reason\n # sometimes the selections themselves are displayed, even when selected\n # programmatically:\n if self.nrows < MAXNROWSLISTSELECTION:\n QtGui.QListView.selectionChanged(self, selected, deselected)\n panel = self.sortwin.panel\n addis = [ qvar2int(i.data()) for i in selected.indexes() ]\n remis = [ qvar2int(i.data()) for i in deselected.indexes() ]\n panel.removeItems([ prefix+str(i) for i in remis ])\n # for speed, don't allow more than MAXNSPIKEPLOTS spikes to be plotted in sort panel:\n if prefix == 's':\n '''\n # note that self.nrowsSelected seems to report nrows selected *including* those\n # added and removed by the current selection event\n net = len(addis) - len(remis)\n print('nselected: %d' % self.nrowsSelected)\n print('Net change: %d' % net)\n nwereselected = self.nrowsSelected - net\n print('num were selected is %d' % nwereselected)\n maxnadd = max(MAXNSPIKEPLOTS - nwereselected + len(remis), 0)\n print('maxnadd is %d' % maxnadd)\n addis = addis[:maxnadd]\n '''\n nadd = len(addis)\n maxnadd = max(MAXNSPIKEPLOTS - self.nrowsSelected + nadd, 0)\n if maxnadd == 0:\n return\n if nadd > maxnadd:\n # if we can't add all the requested spikes to the sort panel without\n # exceeding MAXNSPIKEPLOTS, then randomly sample however many we can still\n # add (maxnadd), and add them to the sort panel\n #print('Adding %d randomly sampled plots of %d selected spikes'\n # % (maxnadd, self.nrowsSelected))\n addis = random.sample(addis, maxnadd)\n #t0 = time.time()\n panel.addItems([ prefix+str(i) for i in addis ])\n #print('addItems took %.3f sec' % (time.time()-t0))\n #print(\"Done selchanged, %r, addis=%r, remis=%r\" % (prefix, addis, remis))\n\n def updateAll(self):\n self.model().updateAll()\n\n def get_nrows(self):\n return self.model().rowCount()\n\n nrows = property(get_nrows)\n\n def selectRows(self, rows, on=True):\n \"\"\"Row selection in listview is complex. This makes it simpler\"\"\"\n ## TODO: There's a bug here, where if you select the last two neurons in nlist,\n ## (perhaps these last two need to be near a list edge), merge them, and then\n ## undo, then merge again (instead of just redoing), then undo again, they're\n ## both selected, but only the first is replotted because the selchanged event\n ## is only passed the first of the two as being newly selected. If however\n ## before remerging, you clear the selection or select something else, and then\n ## go back and select those same two neurons and merge, and undo, it works fine,\n ## and the selchanged event gets both items as newly selected. Seems like a Qt\n ## bug, or at least some very subtle timing problem of some kind. This might have\n ## something to do with reflow when changing list contents, but even resetting\n ## listview behaviour to default doesn't make this go away. Also, seems to happen\n ## for selection of one index at a time, and for doing it all in one go with a\n ## QItemSelection.\n \n rows = toiter(rows)\n m = self.model()\n sm = self.selectionModel()\n if on:\n flag = sm.Select\n else:\n flag = sm.Deselect\n #print('start select=%r loop for rows %r' % (on, rows))\n '''\n # unnecessarily emits nrows selectionChanged signals, causes slow\n # plotting in mpl commit 50fc548465b1525255bc2d9f66a6c7c95fd38a75 (pre\n # 1.0) and later:\n [ sm.select(m.index(row), flag) for row in rows ]\n '''\n # emits single selectionChanged signal, more efficient, but causes a bit of\n # flickering, or at least used to in Qt 4.7.0:\n sel = QtGui.QItemSelection()\n for row in rows:\n index = m.index(row)\n #print('row: %r, index: %r' % (row, index))\n sel.select(index, index) # topleft to bottomright\n #print('sel has indexes, rows, cols, data:')\n #for index in sel.indexes():\n # print(index, index.row(), index.column(), index.data())\n sm.select(sel, flag)\n #print('end select loop')\n '''\n # constantly scrolling to selection slows everything quite noticeably, especially\n # when using the spike selection sortwin.slider\n if scrollTo and on and len(rows) > 0: # scroll to last row that was just selected\n self.scrollTo(m.index(rows[-1]))\n '''\n def selectedRows(self):\n \"\"\"Return list of selected rows\"\"\"\n return [ i.row() for i in self.selectedIndexes() ]\n\n def rowSelected(self, row):\n \"\"\"Simple way to check if a row is selected\"\"\"\n return self.model().index(row) in self.selectedIndexes()\n\n def get_nrowsSelected(self):\n return len(self.selectedIndexes())\n\n nrowsSelected = property(get_nrowsSelected)\n\n def selectRandom(self, start, stop, nsamples):\n \"\"\"Select random sample of rows\"\"\"\n start = max(0, start)\n if stop == -1:\n stop = self.nrows\n stop = min(self.nrows, stop)\n nrows = stop - start\n nsamples = min(nsamples, nrows)\n rows = random.sample(range(start, stop), nsamples)\n self.selectRows(rows)\n\n\nclass NList(SpykeListView):\n \"\"\"Neuron list view\"\"\"\n def __init__(self, parent):\n SpykeListView.__init__(self, parent)\n self.setModel(NListModel(parent))\n self.setItemDelegate(NListDelegate(parent))\n #self.connect(self, QtCore.SIGNAL(\"activated(QModelIndex)\"),\n # self.on_actionItem_triggered)\n # alternate style of connecting signals, seems \"activated\" is needed now with\n # new style signals and slots, instead of \"triggered\", even though \"activated\"\n # is supposed to be deprecated:\n self.activated.connect(self.on_actionItem_triggered)\n\n def selectionChanged(self, selected, deselected):\n SpykeListView.selectionChanged(self, selected, deselected, prefix='n')\n selnids = [ qvar2int(i.data()) for i in self.selectedIndexes() ]\n #if 1 <= len(selnids) <= 3: # populate nslist if exactly 1, 2 or 3 neurons selected\n self.sortwin.nslist.neurons = [ self.sortwin.sort.neurons[nid] for nid in selnids ]\n #else:\n # self.sortwin.nslist.neurons = []\n\n def on_actionItem_triggered(self, index):\n sw = self.sortwin\n sw.parent().ui.plotButton.click()\n\n\nclass NSList(SpykeListView):\n \"\"\"Neuron-Spike list view. Displays spikes of currently selected neuron(s).\n For high performance when quickly selecting large numbers of points via 'painting'\n in the cluster window, a 'fake' selection is maintained separately from the actual\n selection in the list view\"\"\"\n def __init__(self, parent):\n SpykeListView.__init__(self, parent)\n self.setModel(NSListModel(parent))\n #self.connect(self, QtCore.SIGNAL(\"activated(QModelIndex)\"),\n # self.on_actionItem_triggered)\n self.activated.connect(self.on_actionItem_triggered)\n self.clear_fake_selection()\n\n def selectRows(self, rows, on=True):\n \"\"\"Normal (manual) row selection\"\"\"\n self.clear_fake_selection()\n SpykeListView.selectRows(self, rows, on=on)\n\n def fake_selectRows(self, rows, on=True, plot=True):\n \"\"\"Fake row selection, same interface as normal selectRows()\"\"\"\n panel = self.sortwin.panel\n SpykeListView.clearSelection(self) # clear any manually selected rows\n if on: # fake select\n self.fake_selected_rows = np.union1d(self.fake_selected_rows, rows)\n if plot:\n # add at most MAXNSPIKEPLOTS to sort panel:\n nfreeplots = MAXNSPIKEPLOTS - len(panel.used_plots)\n if nfreeplots < len(rows):\n panel.removeAllSpikes()\n nfreeplots = MAXNSPIKEPLOTS\n plot_sids = self.sids[rows[:nfreeplots]]\n panel.addItems([ 's'+str(sid) for sid in plot_sids ])\n else: # fake deselect\n self.fake_selected_rows = np.setxor1d(self.fake_selected_rows, rows)\n # Remove at most all currently plotted spikes in sort panel.\n # Not all fake selected spikes are necessarily plotted, and therefore\n # deselecting fake selected spikes in cluster window won't necessarily\n # have corresponding spikes to remove from the sort panel:\n panel.removeItems([ 's'+str(i) for i in self.sids[rows] ])\n #panel.removeAllSpikes() # more drastic, but less interactive\n\n def selectedRows(self):\n \"\"\"Return selected rows\"\"\"\n real = np.int64([ i.row() for i in self.selectedIndexes() ])\n fake = self.fake_selected_rows\n return np.concatenate([real, fake])\n\n def rowSelected(self, row):\n \"\"\"Simple way to check if a row is selected\"\"\"\n return (self.model().index(row) in self.selectedIndexes() or\n row in self.fake_selected_rows)\n\n def get_nrowsSelected(self):\n return len(self.selectedIndexes()) + len(self.fake_selected_rows)\n\n nrowsSelected = property(get_nrowsSelected)\n\n def clearSelection(self):\n SpykeListView.clearSelection(self)\n self.clear_fake_selection()\n self.sortwin.panel.removeAllSpikes()\n\n def clear_fake_selection(self):\n self.fake_selected_rows = np.array([], dtype=int)\n\n def selectionChanged(self, selected, deselected):\n SpykeListView.selectionChanged(self, selected, deselected, prefix='s')\n\n def on_actionItem_triggered(self, index):\n sw = self.sortwin\n if sw.sort.stream.is_open():\n sid = self.sids[index.row()]\n spike = sw.sort.spikes[sid]\n sw.parent().seek(spike['t'])\n else:\n sw.parent().ui.plotButton.click()\n\n def get_neurons(self):\n return self.model().neurons\n\n def set_neurons(self, neurons):\n \"\"\"Every time neurons are set, clear any existing selection and update data model\"\"\"\n self.clearSelection() # remove any plotted sids, at least for now\n self.model().neurons = neurons\n\n neurons = property(get_neurons, set_neurons)\n\n def get_nids(self):\n return np.asarray([ neuron.id for neuron in self.model().neurons ])\n\n nids = property(get_nids)\n\n def get_sids(self):\n return self.model().sids\n\n sids = property(get_sids)\n\n def keyPressEvent(self, event):\n sw = self.sortwin\n key = event.key()\n # passing horizontal keys to nlist assumes nslist is a single column\n # and are therefore not needed:\n if key in [Qt.Key_Enter, Qt.Key_Return]:\n sw.nlist.keyPressEvent(event) # pass on to nlist\n else:\n SpykeListView.keyPressEvent(self, event) # handle it as usual\n\n def selectRandom(self, nsamples):\n \"\"\"Select up to nsamples random rows per neuron\"\"\"\n if self.model().sliding == True:\n self.neurons = self.neurons # trigger NSListModel.set_neurons() call\n self.model().sliding = False\n for neuron in self.neurons:\n allrows = self.sids.searchsorted(neuron.sids)\n nsamples = min(nsamples, len(allrows))\n rows = np.random.choice(allrows, nsamples, replace=False)\n self.selectRows(rows)\n\n\nclass USList(SpykeListView):\n \"\"\"Unsorted spike list view\"\"\"\n def __init__(self, parent):\n SpykeListView.__init__(self, parent)\n self.setModel(USListModel(parent))\n #self.connect(self, QtCore.SIGNAL(\"activated(QModelIndex)\"),\n # self.on_actionItem_triggered)\n self.activated.connect(self.on_actionItem_triggered)\n\n def keyPressEvent(self, event):\n sw = self.sortwin\n key = event.key()\n if key in [Qt.Key_Enter, Qt.Key_Return]:\n sw.nlist.keyPressEvent(event) # pass on to nlist\n else:\n SpykeListView.keyPressEvent(self, event) # handle it as usual\n\n def selectionChanged(self, selected, deselected):\n SpykeListView.selectionChanged(self, selected, deselected, prefix='s')\n\n def on_actionItem_triggered(self, index):\n sw = self.sortwin\n if sw.sort.stream.is_open():\n sid = sw.sort.usids[index.row()]\n spike = sw.sort.spikes[sid]\n sw.parent().seek(spike['t'])\n else:\n sw.parent().ui.plotButton.click()\n\n def selectRandom(self, nsamples):\n \"\"\"Select up to nsamples random rows\"\"\"\n SpykeListView.selectRandom(self, 0, -1, nsamples)\n\n\nclass SpykeAbstractListModel(QtCore.QAbstractListModel):\n def __init__(self, parent):\n QtCore.QAbstractListModel.__init__(self, parent)\n self.sortwin = parent\n\n def updateAll(self):\n \"\"\"Emit dataChanged signal so that view updates itself immediately.\n Hard to believe this doesn't already exist in some form\"\"\"\n i0 = self.createIndex(0, 0) # row, col\n i1 = self.createIndex(self.rowCount()-1, 0) # seems this isn't necessary\n # seems to refresh all, though should only refresh 1st row:\n #self.dataChanged.emit(i0, i0)\n self.dataChanged.emit(i0, i1) # refresh all\n\n\nclass NListModel(SpykeAbstractListModel):\n \"\"\"Model for neuron list view\"\"\"\n def rowCount(self, parent=None):\n try:\n # update nlist tooltip before returning, only +ve nids count as neurons:\n sort = self.sortwin.sort\n neurons = sort.neurons\n nneurons = (np.asarray(sort.norder) > 0).sum()\n goodnids = sort.get_good()\n ngood = len(goodnids)\n ngoodspikes = sum(neurons[nid].nspikes for nid in goodnids)\n self.sortwin.nlist.setToolTip(\"Neuron list\\n\"\n \"%d neurons\\n\"\n \"%d good with %d spikes\"\n % (nneurons, ngood, ngoodspikes))\n return len(sort.norder)\n except AttributeError: # sort doesn't exist\n self.sortwin.nlist.setToolTip(\"Neuron list\")\n return 0\n\n def data(self, index, role=Qt.DisplayRole):\n if index.isValid():\n neurons = self.sortwin.sort.neurons\n norder = self.sortwin.sort.norder\n try:\n nid = norder[index.row()]\n except IndexError:\n print('WARNING: tried to index non-existent row %d' % index.row())\n #print('.data(): row=%d, val=%d' % (index.row(), nid))\n if role == Qt.DisplayRole:\n return nid # no need to use QVariant() apparently\n elif role == Qt.ToolTipRole:\n neuron = neurons[nid]\n try:\n chan = neuron.chan\n except ValueError: # probably not enough overlapping chans for a template\n chan = None\n pos = neuron.cluster.pos\n return ('nid: %d\\n' % nid +\n '%d spikes\\n' % neuron.nspikes +\n 'chan: %r\\n' % chan +\n 't: %d us\\n' % pos['t'] +\n 'dt: %.4g us\\n' % pos['dt'] +\n 'x0: %.4g um\\n' % pos['x0'] +\n 'y0: %.4g um\\n' % pos['y0'] +\n 'Vpp: %.4g uV\\n' % pos['Vpp'] +\n 'sx: %.4g um' % pos['sx'])\n # this stuff is handled in NListDelegate:\n '''\n elif role == Qt.ForegroundRole:\n if nid in self.sortwin.sort.get_good():\n return QtGui.QBrush(QtGui.QColor(255, 255, 255))\n elif role == Qt.BackgroundRole:\n if nid in self.sortwin.sort.get_good():\n return QtGui.QBrush(QtGui.QColor(0, 128, 0))\n '''\nclass SListModel(SpykeAbstractListModel):\n \"\"\"Base model for spike list models\"\"\"\n def spiketooltip(self, spike):\n return ('sid: %d\\n' % spike['id'] +\n 'nid: %d\\n' % spike['nid'] +\n 'chan: %d\\n' % spike['chan'] +\n 't: %d us\\n' % spike['t'] +\n 'dt: %.4g us\\n' % spike['dt'] +\n 'x0: %.4g um\\n' % spike['x0'] +\n 'y0: %.4g um\\n' % spike['y0'] +\n 'Vpp: %.4g uV\\n' % spike['Vpp'] +\n 'sx: %.4g um' % spike['sx'])\n\n\nclass NSListModel(SListModel):\n \"\"\"Model for neuron spikes list view\"\"\"\n def __init__(self, parent):\n SpykeAbstractListModel.__init__(self, parent)\n self._neurons = []\n self.nspikes = 0\n self.sids = np.empty(0, dtype=np.int32)\n\n def get_neurons(self):\n return self._neurons\n\n def set_neurons(self, neurons):\n self._neurons = neurons\n if neurons:\n self.sids = np.concatenate([ neuron.sids for neuron in neurons ])\n self.sids.sort() # keep them sorted\n self.sortwin.slider.setEnabled(True)\n else:\n self.sids = np.empty(0, dtype=np.int32)\n self.sortwin.slider.setEnabled(False)\n self.nspikes = len(self.sids)\n # triggers new calls to rowCount() and data(), and critically, clears selection\n # before moving slider to pos 0, which triggers slider.valueChanged:\n self.reset()\n self.sortwin.slider.setValue(0) # reset position to 0\n self.sortwin.update_slider() # update limits and step sizes\n self.sliding = False\n\n neurons = property(get_neurons, set_neurons)\n\n def rowCount(self, parent=None):\n # update nslist tooltip before returning:\n self.sortwin.nslist.setToolTip(\"Sorted spike list\\n%d spikes\" % self.nspikes)\n return self.nspikes\n\n def data(self, index, role=Qt.DisplayRole):\n if index.isValid() and role in [Qt.DisplayRole, Qt.ToolTipRole]:\n sid = int(self.sids[index.row()])\n if role == Qt.DisplayRole:\n return sid\n elif role == Qt.ToolTipRole:\n spike = self.sortwin.sort.spikes[sid]\n return self.spiketooltip(spike)\n\n\nclass USListModel(SListModel):\n \"\"\"Model for unsorted spike list view\"\"\"\n def rowCount(self, parent=None):\n try:\n nspikes = len(self.sortwin.sort.usids)\n # update uslist tooltip before returning:\n self.sortwin.uslist.setToolTip(\"Unsorted spike list\\n%d spikes\" % nspikes)\n return nspikes\n except AttributeError: # sort doesn't exist\n self.sortwin.uslist.setToolTip(\"Unsorted spike list\")\n return 0\n\n def data(self, index, role=Qt.DisplayRole):\n if index.isValid() and role in [Qt.DisplayRole, Qt.ToolTipRole]:\n sid = int(self.sortwin.sort.usids[index.row()])\n if role == Qt.DisplayRole:\n return sid\n elif role == Qt.ToolTipRole:\n spike = self.sortwin.sort.spikes[sid]\n return self.spiketooltip(spike)\n\n\nclass NListDelegate(QtGui.QStyledItemDelegate):\n \"\"\"Delegate for neuron list view, modifies appearance of items\"\"\"\n def __init__(self, parent):\n QtGui.QStyledItemDelegate.__init__(self, parent)\n self.sortwin = parent\n palette = QtGui.QApplication.palette()\n self.selectedgoodbrush = QtGui.QBrush(QtGui.QColor(0, 0, 255)) # blue\n self.unselectedgoodbrush = QtGui.QBrush(QtGui.QColor(0, 128, 0)) # mid green\n self.selectedbrush = palette.highlight()\n self.unselectedbrush = palette.base()\n self.selectedgoodpen = QtGui.QPen(Qt.white)\n self.unselectedgoodpen = QtGui.QPen(Qt.white)\n self.selectedpen = QtGui.QPen(palette.highlightedText().color())\n self.unselectedpen = QtGui.QPen(palette.text().color())\n self.focusedpen = QtGui.QPen(Qt.gray, 0, Qt.DashLine)\n self.focusedpen.setDashPattern([1, 1])\n self.focusedpen.setCapStyle(Qt.FlatCap)\n\n def paint(self, painter, option, index):\n \"\"\"Change background colour for nids designated as \"good\"\"\"\n model = index.model()\n nid = model.data(index) # should come out as an int\n good = nid in self.sortwin.sort.get_good()\n # don't care whether self is active or inactive, only care about\n # selection, \"good\", and focused states\n selected = option.state & QtGui.QStyle.State_Selected\n focused = option.state & QtGui.QStyle.State_HasFocus\n painter.save()\n # paint background:\n painter.setPen(QtGui.QPen(Qt.NoPen))\n if selected:\n if good:\n painter.setBrush(self.selectedgoodbrush)\n else: # use default selection brush\n painter.setBrush(self.selectedbrush)\n else: # unselected\n if good:\n painter.setBrush(self.unselectedgoodbrush)\n else: # use default background brush\n painter.setBrush(self.unselectedbrush)\n painter.drawRect(option.rect)\n # paint focus rect:\n if focused:\n rect = copy(option.rect)\n painter.setBrush(Qt.NoBrush) # no need to draw bg again\n painter.setPen(self.focusedpen)\n rect.adjust(0, 0, -1, -1) # make space for outline\n painter.drawRect(rect)\n # paint foreground:\n value = index.data(Qt.DisplayRole)\n if selected:\n if good:\n painter.setPen(self.selectedgoodpen)\n else: # use default selection pen\n painter.setPen(self.selectedpen)\n else: # unselected\n if good:\n painter.setPen(self.unselectedgoodpen)\n else: # use default background pen\n painter.setPen(self.unselectedpen)\n text = qvar2str(value)\n painter.drawText(option.rect, Qt.AlignCenter, text)\n painter.restore()\n\n\nclass ClusterTabSpinBox(QtGui.QSpinBox):\n \"\"\"Intercept CTRL+Z key event for cluster undo instead of spinbox edit undo\"\"\"\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Z and event.modifiers() == Qt.ControlModifier:\n self.topLevelWidget().on_actionUndo_triggered()\n else:\n QtGui.QSpinBox.keyPressEvent(self, event) # handle it as usual\n\n\nclass ClusterTabDoubleSpinBox(QtGui.QDoubleSpinBox):\n \"\"\"Intercept CTRL+Z key event for cluster undo instead of spinbox edit undo\"\"\"\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Z and event.modifiers() == Qt.ControlModifier:\n self.topLevelWidget().on_actionUndo_triggered()\n else:\n QtGui.QDoubleSpinBox.keyPressEvent(self, event) # handle it as usual\n\n\nclass ClusteringGroupBox(QtGui.QGroupBox):\n \"\"\"Make ENTER key event activate the cluster button\"\"\"\n def keyPressEvent(self, event):\n if event.key() in [Qt.Key_Enter, Qt.Key_Return]:\n self.topLevelWidget().ui.clusterButton.click()\n else:\n QtGui.QGroupBox.keyPressEvent(self, event) # handle it as usual\n\n\nclass PlottingGroupBox(QtGui.QGroupBox):\n \"\"\"Make ENTER key event activate the plot button\"\"\"\n def keyPressEvent(self, event):\n if event.key() in [Qt.Key_Enter, Qt.Key_Return]:\n self.topLevelWidget().ui.plotButton.click()\n else:\n QtGui.QGroupBox.keyPressEvent(self, event) # handle it as usual\n\n\nclass XCorrsGroupBox(QtGui.QGroupBox):\n \"\"\"Make ENTER key event activate the correlograms plot button\"\"\"\n def keyPressEvent(self, event):\n if event.key() in [Qt.Key_Enter, Qt.Key_Return]:\n self.topLevelWidget().ui.plotXcorrsButton.click()\n else:\n QtGui.QGroupBox.keyPressEvent(self, event) # handle it as usual\n\n\nclass SpikeSelectionSlider(QtGui.QSlider):\n \"\"\"Make ENTER key event activate the plot button\"\"\"\n def keyPressEvent(self, event):\n if event.key() in [Qt.Key_Enter, Qt.Key_Return]:\n self.topLevelWidget().spykewindow.ui.plotButton.click()\n else:\n QtGui.QSlider.keyPressEvent(self, event) # handle it as usual\n\n\nclass Stack(list):\n \"\"\"A list that doesn't allow -ve indices\"\"\"\n def __getitem__(self, key):\n if key < 0:\n raise IndexError('Stack index %d out of range' % key)\n return list.__getitem__(self, key)\n\n\nclass ClusterChange(object):\n \"\"\"Stores info for undoing/redoing a change to any set of clusters\"\"\"\n def __init__(self, sids, spikes, message):\n self.sids = sids\n self.spikes = spikes\n self.message = message\n\n def __repr__(self):\n return self.message\n\n def save_old(self, oldclusters, oldnorder, oldgood):\n self.oldnids = self.spikes['nid'][self.sids] # this seems to create a copy\n self.oldunids = [ c.id for c in oldclusters ]\n self.oldposs = [ c.pos.copy() for c in oldclusters ]\n self.oldnormposs = [ c.normpos.copy() for c in oldclusters ]\n self.oldnorder = copy(oldnorder)\n self.oldgood = copy(oldgood)\n\n def save_new(self, newclusters, newnorder, newgood):\n self.newnids = self.spikes['nid'][self.sids] # this seems to create a copy\n self.newunids = [ c.id for c in newclusters ]\n self.newposs = [ c.pos.copy() for c in newclusters ]\n self.newnormposs = [ c.normpos.copy() for c in newclusters ]\n self.newnorder = copy(newnorder)\n self.newgood = copy(newgood)\n\n\ndef get_sha1(fname, blocksize=2**20):\n \"\"\"Gets the sha1 hash of file designated by fname (with full path)\"\"\"\n m = hashlib.sha1()\n with open(fname, 'rb') as f:\n # continually update hash until EOF\n while True:\n block = f.read(blocksize)\n if not block:\n break\n m.update(block)\n return m.hexdigest()\n\ndef intround(n):\n \"\"\"Round to the nearest integer, return an integer. Works on arrays.\n Saves on parentheses, nothing more\"\"\"\n if iterable(n): # it's a sequence, return as an int64 array\n return np.int64(np.round(n))\n else: # it's a scalar, return as normal Python int\n return int(round(n))\n\ndef intfloor(n):\n \"\"\"Round down to the nearest integer, return an integer. Works on arrays.\n Saves on parentheses, nothing more\"\"\"\n if iterable(n): # it's a sequence, return as an int64 array\n return np.int64(np.floor(n))\n else: # it's a scalar, return as normal Python int\n return int(np.floor(n))\n\ndef intceil(n):\n \"\"\"Round up to the nearest integer, return an integer. Works on arrays.\n Saves on parentheses, nothing more\"\"\"\n if iterable(n): # it's a sequence, return as an int64 array\n return np.int64(np.ceil(n))\n else: # it's a scalar, return as normal Python int\n return int(np.ceil(n))\n\ndef iterable(x):\n \"\"\"Check if the input is iterable, stolen from numpy.iterable()\"\"\"\n try:\n iter(x)\n return True\n except TypeError:\n return False\n\ndef toiter(x):\n \"\"\"Convert to iterable. If input is iterable, returns it. Otherwise returns it in a list.\n Useful when you want to iterate over something (like in a for loop),\n and you don't want to have to do type checking or handle exceptions\n when it isn't a sequence\"\"\"\n if iterable(x):\n return x\n else:\n return [x]\n\ndef tolist(x):\n \"\"\"Convert to list if not already a list\"\"\"\n try:\n return list(x)\n except TypeError: # x is not iterable\n return [x]\n\ndef tocontig(x):\n \"\"\"Return C contiguous copy of array x if it isn't C contiguous already\"\"\"\n if not x.flags.c_contiguous:\n x = x.copy()\n return x\n'''\n# use np.vstack instead:\ndef cvec(x):\n \"\"\"Return x as a column vector. x must be a scalar or a vector\"\"\"\n x = np.asarray(x)\n assert x.squeeze().ndim in [0, 1]\n try:\n nrows = len(x)\n except TypeError: # x is scalar?\n nrows = 1\n x.shape = (nrows, 1)\n return x\n'''\ndef is_empty(x):\n \"\"\"Check if sequence is empty. There really should be a np.is_empty function\"\"\"\n print(\"WARNING: not thoroughly tested!!!\")\n x = np.asarray(x)\n if np.prod(x.shape) == 0:\n return True\n else:\n return False\n\ndef cut(ts, trange):\n \"\"\"Returns timestamps, where tstart <= timestamps <= tend\n Copied and modified from neuropy rev 149\"\"\"\n lo, hi = argcut(ts, trange)\n return ts[lo:hi] # slice it\n\ndef argcut(ts, trange):\n \"\"\"Returns timestamp slice indices, where tstart <= timestamps <= tend\n Copied and modified from neuropy rev 149\"\"\"\n tstart, tend = trange[0], trange[1]\n '''\n # this is what we're trying to do:\n return ts[ (ts >= tstart) & (ts <= tend) ]\n ts.searchsorted([tstart, tend]) method does it faster, because it assumes ts are ordered.\n It returns an index where the values would fit in ts. The index is such that\n ts[index-1] < value <= ts[index]. In this formula ts[ts.size]=inf and ts[-1]= -inf\n '''\n lo, hi = ts.searchsorted([tstart, tend]) # indices where tstart and tend would fit in ts\n # can probably avoid all this end inclusion code by using the 'side' kwarg,\n # not sure if I want end inclusion anyway:\n '''\n if tend == ts[min(hi, len(ts)-1)]:\n # if tend matches a timestamp (protect from going out of index bounds when checking)\n hi += 1 # inc to include a timestamp if it happens to exactly equal tend.\n # This gives us end inclusion\n hi = min(hi, len(ts)) # limit hi to max slice index (==max value index + 1)\n '''\n return lo, hi\n\ndef argmatch(a, v):\n \"\"\"\n Find indices into `a` where elements in `v` match those in `a`.\n\n Find the indices into an array `a` whose values match those queried in `v`.\n Both arrays are first flattened to 1D, and the values in `a` must be unique.\n This is an accelerated equivalent of:\n\n `np.array([ int(np.where(a == val)[0]) for val in v ])`\n\n Parameters\n ----------\n a : 1-D array_like\n Input array.\n v : array_like\n Values to match against `a`.\n\n Returns\n -------\n indices : array of ints\n Array of indices into `a` with the same shape as `v`.\n\n Adapted from http://stackoverflow.com/a/8251668\n See numpy GH PR: https://github.com/numpy/numpy/pull/9055\n \"\"\"\n a, v = np.ravel(a), np.ravel(v)\n if len(a) != len(np.unique(a)):\n raise ValueError(\"Values in `a` must be unique for unambiguous results\")\n if not np.in1d(v, a).all():\n raise ValueError(\"Values array %s is not a subset of input array %s\" % (v, a))\n asortis = a.argsort()\n return asortis[a.searchsorted(v, sorter=asortis)]\n\ndef dist(a, b):\n \"\"\"Return the Euclidean distance between two N-dimensional coordinates\"\"\"\n a = np.asarray(a)\n b = np.asarray(b)\n return np.sqrt(((a-b)**2).sum())\n\ndef eucd(coords):\n \"\"\"Generates Euclidean distance matrix from a\n sequence of n m-dimensional coordinates. Nice and fast.\n Written by Willi Richert\n Taken from:\n http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/498246\n on 2006/11/11\n \"\"\"\n coords = np.array(coords)\n n, m = coords.shape\n delta = np.zeros((n, n), dtype=np.float64)\n for d in range(m):\n data = coords[:, d]\n delta += (data - data[:, np.newaxis]) ** 2\n return np.sqrt(delta)\n\ndef revcmp(x, y):\n \"\"\"Does the reverse of cmp():\n Return negative if y<x, zero if y==x, positive if y>x\"\"\"\n return cmp(y, x)\n\n\nclass Gaussian(object):\n \"\"\"Gaussian function, works with ndarray inputs\"\"\"\n def __init__(self, mu, sigma):\n self.mu = mu\n self.sigma = sigma\n\n def __call__(self, x):\n \"\"\"Called when self is called as a f'n.\n Don't bother normalizing by 1/(sigma*np.sqrt(2*pi)),\n don't care about normalizing the integral,\n just want to make sure that f(0) == 1\"\"\"\n return np.exp( -(x-self.mu)**2 / (2*self.sigma**2) )\n\n def __getitem__(self, x):\n \"\"\"Called when self is indexed into\"\"\"\n return self(x)\n\n\ndef g(x0, sx, x):\n \"\"\"1-D Gaussian\"\"\"\n return np.exp( -(x-x0)**2 / (2*sx**2) )\n\ndef g2(x0, y0, sx, sy, x, y):\n \"\"\"2-D Gaussian\"\"\"\n arg = -(x-x0)**2 / (2*sx**2) - (y-y0)**2 / (2*sy**2)\n return np.exp(arg)\n\ndef g2s(x0, y0, s, x, y):\n \"\"\"Symmetric 2-D Gaussian\"\"\"\n arg = -(x-x0)**2 / (2*s**2) - (y-y0)**2 / (2*s**2)\n return np.exp(arg)\n\ndef g3(x0, y0, z0, sx, sy, sz, x, y, z):\n \"\"\"3-D Gaussian\"\"\"\n return np.exp( -(x-x0)**2 / (2*sx**2) - (y-y0)**2 / (2*sy**2) - (z-z0)**2 / (2*sz**2) )\n\ndef cauchy(x0, gx, x):\n \"\"\"1-D Cauchy. See http://en.wikipedia.org/wiki/Cauchy_distribution\"\"\"\n #return INVPI * gx/((x-x0)**2+gx**2)\n gx2 = gx * gx\n return gx2 / ((x-x0)**2 + gx2)\n\ndef cauchy2(x0, y0, gx, gy, x, y):\n \"\"\"2-D Cauchy\"\"\"\n #return INVPI * gx/((x-x0)**2+gx**2) * gy/((y-y0)**2+gy**2)\n return (gx*gy)**2 / ((x-x0)**2 + gx**2) / ((y-y0)**2 + gy**2)\n\ndef Vf(Im, x0, y0, z0, sx, sy, sz, x, y, z):\n \"\"\"1/r voltage decay function in 2D space\n What to do with the singularity so that the leastsq gets a smooth differentiable f'n?\"\"\"\n #if np.any(x == x0) and np.any(y == y0) and np.any(z == z0):\n # raise ValueError, 'V undefined at singularity'\n return Im / (4*pi) / np.sqrt( sx**2 * (x-x0)**2 + sy**2 * (y-y0)**2 + sz**2 * (z-z0)**2)\n\ndef dgdmu(mu, sigma, x):\n \"\"\"Partial of g wrt mu\"\"\"\n return (x - mu) / sigma**2 * g(mu, sigma, x)\n\ndef dgdsigma(mu, sigma, x):\n \"\"\"Partial of g wrt sigma\"\"\"\n return (x**2 - 2*x*mu + mu**2) / sigma**3 * g(mu, sigma, x)\n\ndef dg2dx0(x0, y0, sx, sy, x, y):\n \"\"\"Partial of g2 wrt x0\"\"\"\n return g(y0, sy, y) * dgdmu(x0, sx, x)\n\ndef dg2dy0(x0, y0, sx, sy, x, y):\n \"\"\"Partial of g2 wrt y0\"\"\"\n return g(x0, sx, x) * dgdmu(y0, sy, y)\n\ndef dg2dsx(x0, y0, sx, sy, x, y):\n \"\"\"Partial of g2 wrt sx\"\"\"\n return g(y0, sy, y) * dgdsigma(x0, sx, x)\n\ndef dg2dsy(x0, y0, sx, sy, x, y):\n \"\"\"Partial of g2 wrt sy\"\"\"\n return g(x0, sx, x) * dgdsigma(y0, sy, y)\n\ndef dg2sdx0(x0, y0, s, x, y):\n \"\"\"Partial of g2s wrt x0\"\"\"\n return g(y0, s, y) * dgdmu(x0, s, x)\n\ndef dg2sdy0(x0, y0, s, x, y):\n \"\"\"Partial of g2s wrt y0\"\"\"\n return g(x0, s, x) * dgdmu(y0, s, y)\n\ndef dg2sds(x0, y0, s, x, y):\n \"\"\"Partial of g2s wrt s\"\"\"\n return g(x0, s, y) * dgdsigma(y0, s, x) + g(y0, s, y) * dgdsigma(x0, s, x)\n\ndef RM(theta):\n \"\"\"Return 2D (2x2) rotation matrix, with theta counterclockwise rotation in radians\"\"\"\n return np.array([[cos(theta), -sin(theta)],\n [sin(theta), cos(theta)]])\n\n\nclass Poo(object):\n \"\"\"Poo function, works with ndarray inputs\"\"\"\n def __init__(self, a, b, c):\n self.a = a\n self.b = b\n self.c = c\n\n def __call__(self, x):\n \"\"\"Called when self is called as a f'n\"\"\"\n return (1+self.a*x) / (self.b+self.c*x**2)\n\n def __getitem__(self, x):\n \"\"\"Called when self is indexed into\"\"\"\n return self(x)\n\n\ndef hamming(t, N):\n \"\"\"Return y values of Hamming window at sample points t\"\"\"\n #if N == None:\n # N = (len(t) - 1) / 2\n return 0.54 - 0.46 * cos(pi * (2*t + N)/N)\n\ndef hex2rgb(hexcolours):\n \"\"\"Convert colours RGB hex string list into an RGB int array\"\"\"\n hexcolours = toiter(hexcolours)\n rgb = []\n for s in hexcolours:\n s = s[len(s)-6:len(s)] # get last 6 characters\n r, g, b = s[0:2], s[2:4], s[4:6]\n r, g, b = int(r, base=16), int(g, base=16), int(b, base=16)\n rgb.append((r, g, b))\n return np.uint8(rgb)\n\ndef hex2rgba(hexcolours, alpha=255):\n \"\"\"Convert colours RGB hex string list into an RGBA int array\"\"\"\n assert type(alpha) == int and 0 <= alpha <= 255\n rgb = hex2rgb(hexcolours)\n alphas = np.repeat(alpha, len(rgb))\n alphas.shape = -1, 1 # make it 2D column vector\n return np.concatenate([rgb, alphas], axis=1)\n\ndef hex2floatrgba(hexcolours, alpha=255):\n \"\"\"Convert colours RGB hex string list into an RGBA float array\"\"\"\n assert type(alpha) == int and 0 <= alpha <= 255\n rgba = hex2rgba(hexcolours, alpha)\n return np.float64(rgba) / 255.\n\ndef rgb2hex(rgbcolours):\n \"\"\"Convert RGB int array into a hex string list\"\"\"\n rgbcolours = toiter(rgbcolours)\n hx = []\n for rgb in rgbcolours:\n r, g, b = rgb\n h = hex(r*2**16 + g*2**8 + b)\n h = lrstrip(h, '0x', 'L')\n pad = (6 - len(h)) * '0'\n h = '#' + pad + h\n hx.append(h)\n return hx\n\ndef Rx(t):\n \"\"\"Rotation matrix around x axis, theta in radians\"\"\"\n return np.matrix([[1, 0, 0],\n [0, cos(t), -sin(t)],\n [0, sin(t), cos(t)]])\n\ndef Ry(t):\n \"\"\"Rotation matrix around y axis, theta in radians\"\"\"\n return np.matrix([[ cos(t), 0, sin(t)],\n [ 0, 1, 0],\n [-sin(t), 0, cos(t)]])\n\ndef Rz(t):\n \"\"\"Rotation matrix around z axis, theta in radians\"\"\"\n return np.matrix([[cos(t), -sin(t), 0],\n [sin(t), cos(t), 0],\n [ 0, 0, 1]])\n\ndef R(tx, ty, tz):\n \"\"\"Return full 3D rotation matrix, given thetas in degress.\n Mayavi (tvtk actually) rotates axes in Z, X, Y order, for\n some unknown reason. So, we have to do the same. See:\n tvtk_classes.zip/actor.py:32\n tvtk_classes.zip/prop3d.py:67\n \"\"\"\n # convert to radians, then take matrix product\n return Rz(tz*pi/180)*Rx(tx*pi/180)*Ry(ty*pi/180)\n'''\ndef normdeg(angle):\n return angle % 360\n\ndef win2posixpath(path):\n path = path.replace('\\\\', '/')\n path = os.path.splitdrive(path)[-1] # remove drive name from start\n return path\n\ndef oneD2D(a):\n \"\"\"Convert 1D array to 2D array. Can do this just as easily using a[None, :]\"\"\"\n a = a.squeeze()\n assert a.ndim == 1, \"array has more than one non-singleton dimension\"\n a.shape = 1, len(a) # make it 2D\n return a\n\ndef twoD1D(a):\n \"\"\"Convert trivially 2D array to 1D array. Seems unnecessary. Just call squeeze()\"\"\"\n a = a.squeeze()\n assert a.ndim == 1, \"array has more than one non-singleton dimension\"\n return a\n'''\ndef is_unique(a):\n \"\"\"Check whether a has purely unique values in it\"\"\"\n u = np.unique(a)\n if len(a) != len(u):\n return False\n else:\n return True\n\ndef intersect1d(arrays, assume_unique=False):\n \"\"\"Find the intersection of any number of 1D arrays.\n Return the sorted, unique values that are in all of the input arrays.\n Adapted from numpy.lib.arraysetops.intersect1d\"\"\"\n N = len(arrays)\n if N == 0:\n return np.asarray(arrays)\n arrays = list(arrays) # allow assignment\n if not assume_unique:\n for i, arr in enumerate(arrays):\n arrays[i] = np.unique(arr)\n aux = np.concatenate(arrays) # one long 1D array\n aux.sort() # sorted\n if N == 1:\n return aux\n shift = N-1\n return aux[aux[shift:] == aux[:-shift]]\n\ndef rowtake(a, i):\n \"\"\"For each row in a, return values according to column indices in the\n corresponding row in i. Returned shape == i.shape\"\"\"\n assert a.ndim == 2\n assert i.ndim <= 2\n '''\n if i.ndim == 1:\n j = np.arange(a.shape[0])\n else: # i.ndim == 2\n j = np.repeat(np.arange(a.shape[0]), i.shape[1])\n j.shape = i.shape\n j *= a.shape[1]\n j += i\n return a.flat[j]\n '''\n # this is about 3X faster:\n if i.ndim == 1:\n return a[np.arange(a.shape[0]), i]\n else: # i.ndim == 2\n return a[np.arange(a.shape[0])[:, None], i]\n\ndef td2usec(td):\n \"\"\"Convert datetime.timedelta to int microseconds\"\"\"\n sec = td.total_seconds() # float\n usec = intround(sec * 1000000) # round to nearest us\n return usec\n\ndef td2fusec(td):\n \"\"\"Convert datetime.timedelta to float microseconds\"\"\"\n sec = td.total_seconds() # float\n usec = sec * 1000000\n return usec\n\ndef td2days(td):\n \"\"\"Convert datetime.timedelta to days\"\"\"\n sec = td.total_seconds() # float\n days = sec / 3600 / 24\n return days\n\ndef unsortedis(x):\n \"\"\"Return indices of entries in x that are out of order\"\"\"\n x = np.asarray(x)\n try:\n if x.dtype.kind == 'u':\n # x is unsigned int array, risk of int underflow in np.diff\n x = np.int64(x)\n except AttributeError:\n pass # no dtype, not an array\n return np.where(np.diff(x) < 0)[0] # where is the diff between consecutive entries < 0?\n\ndef issorted(x):\n \"\"\"Check if x is sorted\"\"\"\n return len(unsortedis(x)) == 0\n # or, you could compare the array to an explicitly sorted version of itself,\n # and see if they're identical\n\ndef concatenate_destroy(arrs):\n \"\"\"Concatenate list of arrays along 0th axis, destroying them in the process.\n Doesn't duplicate everything in arrays, as does numpy.concatenate. Only\n temporarily duplicates one array at a time, saving memory\"\"\"\n if type(arrs) != list:\n raise TypeError('Arrays must be in a list')\n #arrs = list(arrs) # don't do this! this prevents destruction of the original arrs\n nrows = 0\n subshape = arrs[0].shape[1::] # dims excluding concatenation dim\n dtype = arrs[0].dtype\n # ensure all arrays in arrs are compatible:\n for i, a in enumerate(arrs):\n nrows += len(a)\n if a.shape[1::] != subshape:\n raise TypeError(\"Array %d has subshape %r instead of %r\" %\n (i, a.shape[1::], subshape))\n if a.dtype != dtype:\n raise TypeError(\"Array %d has dtype %r instead of %r\" % (i, a.dtype, dtype))\n subshape = list(subshape)\n shape = [nrows] + subshape\n\n # unlike np.zeros, it seems np.empty doesn't allocate real memory, but does temporarily\n # allocate virtual memory, which is then converted to real memory as 'a' is filled:\n try:\n a = np.empty(shape, dtype=dtype) # empty only allocates virtual memory\n except MemoryError:\n raise MemoryError(\"concatenate_destroy: not enough virtual memory to allocate \"\n \"destination array. Create/grow your swap file?\")\n \n rowi = 0\n for i in range(len(arrs)):\n arr = arrs.pop(0)\n nrows = len(arr)\n a[rowi:rowi+nrows] = arr # concatenate along 0th axis\n rowi += nrows\n return a\n\ndef merged_interval_gen(intervals):\n \"\"\"Generator that merges overlapping (start, stop) intervals.\n Adapted from https://codereview.stackexchange.com/a/108651\"\"\"\n lo, hi = intervals[0] # bounds of the current run of merges\n for interval in intervals[1:]:\n if interval[0] <= hi: # new interval overlaps current run\n hi = max(hi, interval[1]) # merge with the current run\n else: # current run is over\n yield lo, hi # yield accumulated interval\n lo, hi = interval # start new run\n yield lo, hi # end the final run\n\ndef merge_intervals(intervals):\n return list(merged_interval_gen(intervals))\n\ndef nullwavesat(wave, ntwin):\n \"\"\"Null out saturated regions of WaveForm data in-place by replacing them with a linear\n ramp on each channel to minimize sharp edges. Return nx2 2D array of nulled tranges\"\"\"\n nt = wave.data.shape[1] # num timepoints in wave.data\n sattis = wave.satis.any(axis=0) # time only, collapse across all chans\n edges = np.diff(sattis.astype(int)) # find +ve and -ve edges\n onis = np.where(edges > 0)[0] + 1\n offis = np.where(edges < 0)[0] + 1\n if len(onis) - len(offis) == 1:\n offis = np.append(offis, nt) # last off is end of block\n elif len(offis) - len(onis) == 1:\n onis = np.append(onis, 0) # first on is start of block\n # convert to nx2 array, expand window for zeroing around on\n # and off index of each saturation:\n nulltrangeis = np.stack([onis-ntwin, offis+ntwin], axis=1)\n nulltrangeis = np.asarray(merge_intervals(nulltrangeis)) # remove overlap\n nulltrangeis = nulltrangeis.clip(min=0, max=nt-1) # limit to valid index values\n # replace data on each channel with linear ramp from start to end of each\n # time nulltrange:\n for chan in wave.data:\n for nulltrangei in nulltrangeis:\n i, j = nulltrangei\n chan[i:j] = np.linspace(chan[i], chan[j], j-i, dtype=int)\n nulltranges = wave.ts[nulltrangeis] # dereference\n print('Nulled time ranges:')\n print(intround(nulltranges)) # convert to int for better display\n return nulltranges\n\ndef lst2shrtstr(lst, sigfigs=4, brackets=False):\n \"\"\"Return string representation of list, replacing any floats with potentially\n shorter representations with fewer sig figs. Any string items in list will be\n simplified by having their quotes removed\"\"\"\n gnumfrmt = string.join(['%.', str(sigfigs), 'g'], sep='')\n strlst = []\n for val in lst:\n try:\n strlst.append(gnumfrmt % val)\n except TypeError:\n strlst.append(val) # val isn't a general number\n s = string.join(strlst, sep=', ')\n if brackets:\n s = string.join(['[', s, ']'], sep='')\n return s\n\ndef rms(a, axis=None):\n \"\"\"Return root-mean-squared value of array a along axis\"\"\"\n return np.sqrt(np.mean(a**2, axis))\n\ndef rmserror(a, b, axis=None):\n \"\"\"Return root-mean-squared error between arrays a and b\"\"\"\n return rms(a - b, axis=axis)\n\ndef printflush(*args, **kwargs):\n \"\"\"Print args and flush to stdout immediately, so that\n python need not be started in unbuffered mode, or PYTHONUNBUFFERED env need\n not be set. Adapted from https://stackoverflow.com/a/27991478\"\"\"\n file = sys.stdout\n kwargs['file'] = file\n print(*args, **kwargs)\n file.flush()\n\ndef lstrip(s, strip):\n \"\"\"What I think str.lstrip should really do\"\"\"\n if s.startswith(strip):\n return s[len(strip):] # strip it\n else:\n return s\n\ndef rstrip(s, strip):\n \"\"\"What I think str.rstrip should really do\"\"\"\n if s.endswith(strip):\n return s[:-len(strip)] # strip it\n else:\n return s\n\ndef strip(s, strip):\n \"\"\"What I think str.strip should really do\"\"\"\n return rstrip(lstrip(s, strip), strip)\n\ndef lrstrip(s, lstr, rstr):\n \"\"\"Strip lstr from start of s and rstr from end of s\"\"\"\n return rstrip(lstrip(s, lstr), rstr)\n\ndef isascii(c):\n \"\"\"Check if character c is a printable character, TAB, LF, or CR\"\"\"\n try:\n c = ord(c) # convert string character to decimal representation\n except TypeError: # it's already an int? (Py3)\n pass\n return 32 <= c <= 127 or c in [9, 10, 13]\n\ndef rstripnonascii(s):\n \"\"\"Return a new string with all characters after the first non-ASCII character\n stripped from the string\"\"\"\n for i, c in enumerate(s):\n if not isascii(c):\n return s[:i]\n return s\n\ndef matlabize(s):\n \"\"\"Make string s suitable for use as a MATLAB function/script name\"\"\"\n s = s.replace(' ', '_')\n s = s.replace('.', '_')\n s = s.replace('-', '_')\n assert len(s) <= 63 # MATLAB function/script name length limitation\n return s\n\ndef pad(x, align=8):\n \"\"\"Pad x with null bytes so it's a multiple of align bytes long\"\"\"\n if type(x) == str: # or maybe unicode?\n return padstr(x, align=align)\n elif type(x) == np.ndarray:\n return padarr(x, align=align)\n else:\n raise TypeError('Unhandled type %r in pad()')\n\ndef padstr(x, align=8):\n \"\"\"Pad string x with null bytes so it's a multiple of align bytes long.\n Return bytes object\"\"\"\n nbytes = len(x)\n rem = nbytes % align\n npadbytes = align - rem if rem else 0 # nbytes to pad with for 8 byte alignment\n x = x.encode('ascii') # ensure it's pure ASCII, where each char is 1 byte\n if npadbytes == 0:\n return x\n x += NULL * npadbytes # returns a copy, doesn't modify in place\n assert len(x) % align == 0\n return x\n\ndef padarr(x, align=8):\n \"\"\"Flatten array x and pad with null bytes so it's a multiple of align bytes long\"\"\"\n nitems = len(x.ravel())\n nbytes = x.nbytes\n dtypenbytes = x.dtype.itemsize\n rem = nbytes % align\n npadbytes = align - rem if rem else 0 # nbytes to pad with for 8 byte alignment\n if npadbytes == 0:\n return x\n if npadbytes % dtypenbytes != 0:\n raise RuntimeError(\"Can't pad %d byte array to %d byte alignment\" %\n (dtypenbytes, align))\n npaditems = npadbytes / dtypenbytes\n x = x.ravel().copy() # don't modify in place\n # pads with npaditems zeros, each of length dtypenbytes:\n x.resize(nitems + npaditems, refcheck=False)\n assert x.nbytes % align == 0\n return x\n\ndef shiftpad(a, n):\n \"\"\"Horizontally shift 2D array a *in-place* by n points. -ve n shifts\n left, +ve shifts right. Pad with edge values at the appropriate end.\n This is probably the same as np.roll(), except edge values are padded\n instead of wrapped. Also, I think np.roll returns a copy\"\"\"\n assert a.ndim == 2\n assert type(n) == int\n assert n != 0\n if n > 0: # shift right, pad with left edge\n ledge = a[:, 0, None] # keep it 2D (nrows x 1)\n a[:, n:] = a[:, :-n] # throw away right edge\n a[:, 1:n] = ledge # pad with left edge\n else: # n < 0, shift left, pad with right edge\n redge = a[:, -1, None] # keep it 2D (nrows x 1)\n a[:, :n] = a[:, -n:] # throw away left edge\n a[:, n:-1] = redge # pad with right edge\n # no need to return anything\n\ndef rollwin(a, width):\n \"\"\"Return a.nd + 1 dimensional array, where the last dimension contains\n consecutively shifted windows of a of the given width, each shifted by 1\n along the last dimension of a. This allows for calculating rolling stats,\n as well as searching for the existence and position of subarrays in a\n larger array, all without having to resort to Python loops or making\n copies of a.\n\n Taken from:\n http://www.rigtorp.se/2011/01/01/rolling-statistics-numpy.html\n http://stackoverflow.com/questions/7100242/python-numpy-first-occurrence-of-subarray\n http://stackoverflow.com/questions/6811183/rolling-window-for-1d-arrays-in-numpy\n\n Ex 1:\n >>> x = np.arange(10).reshape((2,5))\n >>> rollwin(x, 3)\n array([[[0, 1, 2], [1, 2, 3], [2, 3, 4]],\n [[5, 6, 7], [6, 7, 8], [7, 8, 9]]]) \n >>> np.mean(rollwin(x, 3), -1)\n array([[ 1., 2., 3.],\n [ 6., 7., 8.]])\n\n Ex 2:\n >>> a = np.arange(10)\n >>> np.random.shuffle(a)\n >>> a\n array([7, 3, 6, 8, 4, 0, 9, 2, 1, 5])\n >>> rollwin(a, 3) == [8, 4, 0]\n array([[False, False, False],\n [False, False, False],\n [False, False, False],\n [ True, True, True],\n [False, False, False],\n [False, False, False],\n [False, False, False],\n [False, False, False]], dtype=bool)\n >>> np.all(rollwin(a, 3) == [8, 4, 0], axis=1)\n array([False, False, False, True, False, False, False, False], dtype=bool)\n >>> np.where(np.all(rollwin(a, 3) == [8, 4, 0], axis=1))[0][0]\n 3\n \"\"\"\n shape = a.shape[:-1] + (a.shape[-1] - width + 1, width)\n strides = a.strides + (a.strides[-1],)\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n\ndef rollwin2D(a, width):\n \"\"\"A modified version of rollwin. Allows for easy columnar search of 2D\n subarray b within larger 2D array a, assuming both have the same number of\n rows.\n \n Ex:\n >>> a\n array([[44, 89, 34, 67, 11, 92, 22, 72, 10, 81],\n [52, 40, 29, 35, 67, 10, 24, 23, 65, 51],\n [70, 58, 14, 34, 11, 66, 47, 68, 11, 56],\n [70, 55, 47, 30, 39, 79, 71, 70, 67, 33]]) \n >>> b\n array([[67, 11, 92],\n [35, 67, 10],\n [34, 11, 66],\n [30, 39, 79]])\n >>> np.where((rollwin2D(a, 3) == b).all(axis=1).all(axis=1))[0]\n array([3])\n \"\"\"\n assert a.ndim == 2\n shape = (a.shape[1] - width + 1, a.shape[0], width)\n strides = (a.strides[-1],) + a.strides\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n\ndef argcolsubarr2D(a, b):\n \"\"\"Return column index of smaller subarray b within bigger array a. Both\n must be 2D and have the same number of rows. Raises IndexError if b is not\n a subarray of a\"\"\"\n assert a.ndim == b.ndim == 2\n assert a.shape[0] == b.shape[0] # same nrows\n width = b.shape[1] # ncols in b\n return np.where((rollwin2D(a, width) == b).all(axis=1).all(axis=1))[0]\n\ndef lrrep2Darrstripis(a):\n \"\"\"Return left and right slice indices that strip repeated values from all rows\n from left and right ends of 2D array a, such that a[:, lefti:righti] gives you\n the stripped version.\n\n Ex:\n >>> a\n array([[44, 44, 44, 44, 89, 34, 67, 11, 92, 22, 72, 10, 81, 81, 81],\n [52, 52, 52, 52, 40, 29, 35, 67, 10, 24, 23, 65, 51, 51, 51],\n [70, 70, 70, 70, 58, 14, 34, 11, 66, 47, 68, 11, 56, 56, 56],\n [70, 70, 70, 70, 55, 47, 30, 39, 79, 71, 70, 67, 33, 33, 33]])\n >>> lrrep2Darrstripis(a)\n (3, -2)\n \"\"\"\n assert a.ndim == 2\n left = a[:, :1] # 2D column vector\n right = a[:, -1:] # 2D column vector\n leftcolis = argcolsubarr2D(a, left)\n lefti = 0 # at least 1 hit, at the far left edge\n if len(leftcolis) > 1: # multiple hits, get slice index of rightmost consecutive hit\n consecis = np.where(np.diff(leftcolis) == 1)[0]\n if len(consecis) > 0:\n lefti = max(consecis) + 1\n rightcolis = argcolsubarr2D(a, right)\n righti = a.shape[1] # at least 1 hit, at the far right edge\n if len(rightcolis) > 1: # multiple hits, get slice index of leftmost consecutive hit\n consecis = np.where(np.diff(rightcolis)[::-1] == 1)[0]\n if len(consecis) > 0:\n righti = -(max(consecis) + 1)\n return lefti, righti\n\ndef normpdf(p, lapcorrect=1e-10):\n \"\"\"Ensure p is normalized (sums to 1). Return p unchanged if it's already normalized.\n Otherwise, return it normalized. I guess this treats p as a pmf, not strictly a pdf.\n Optional apply Laplacian correction to avoid 0s\"\"\"\n p = np.float64(p) # copy and ensure it's float before modifying in-place\n if lapcorrect and (p == 0).any():\n p += lapcorrect\n psum = p.sum()\n if not np.allclose(psum, 1.0) and psum > 0: # make sure the probs sum to 1\n #print(\"p sums to %f instead of 1, normalizing\" % psum)\n p /= psum\n return p\n\ndef negentropy(x, axis=0):\n \"\"\"Return estimate of negative entropy (and differential entropy) of ndarray x along axis.\n Adapted from Aapo Hyvarinen's mentappr.m dated May 2012, which is based on his NIPS*97\n paper: http://www.cs.helsinki.fi/u/ahyvarin/papers/NIPS97.pdf - \"New approximations of\n differential entropy for independent component analysis and projection pursuit\"\n \"\"\"\n # constants:\n k1 = 36 / (8*np.sqrt(3) - 9)\n gamma = 0.37457\n k2 = 79.047\n # entropy of a standard Gaussian, 1.4189 (in bits? maybe not, since it's natural log):\n gaussianEntropy = np.log(2*pi) / 2 + 0.5\n # normalize to 0 mean and unit variance:\n x = x - x.mean(axis=axis) # don't do this in place\n stdx = x.std(axis=axis)\n x = x / stdx\n\n negentropy = ( k2*((np.log(np.cosh(x))).mean(axis=axis) - gamma)**2 +\n k1*((x*np.exp(-x**2/2)).mean(axis=axis))**2 )\n #diffentropy = gaussianEntropy - negentropy + np.log(stdx)\n return negentropy\n\ndef DKL(p, q):\n \"\"\"Kullback-Leibler divergence from true probability distribution p to arbitrary\n distribution q\"\"\"\n assert len(p) == len(q)\n p, q = normpdf(np.asarray(p)), normpdf(np.asarray(q))\n return sum(p * np.log2(p/q))\n \ndef DJS(p, q):\n \"\"\"Jensen-Shannon divergence, a symmetric measure of divergence between\n distributions p and q\"\"\"\n assert len(p) == len(q)\n p, q = normpdf(np.asarray(p)), normpdf(np.asarray(q))\n m = (p + q) / 2\n return (DKL(p, m) + DKL(q, m)) / 2\n\ndef filter(data, sampfreq=1000, f0=0, f1=7, fr=0.5, gpass=0.01, gstop=30, ftype='ellip'):\n \"\"\"Bandpass filter data on row indices chanis, between f0 and f1 (Hz), with filter\n rolloff (?) fr (Hz).\n\n ftype: 'ellip', 'butter', 'cheby1', 'cheby2', 'bessel'\n \"\"\"\n w0 = f0 / (sampfreq / 2) # fraction of Nyquist frequency == 1/2 sampling rate\n w1 = f1 / (sampfreq / 2)\n wr = fr / (sampfreq / 2)\n if w0 == 0:\n wp = w1\n ws = w1+wr\n elif w1 == 0:\n wp = w0\n ws = w0-wr\n else:\n wp = [w0, w1]\n ws = [w0-wr, w1+wr]\n b, a = scipy.signal.iirdesign(wp, ws, gpass=gpass, gstop=gstop, analog=0, ftype=ftype)\n data = scipy.signal.lfilter(b, a, data)\n return data, b, a\n\ndef filterord(data, sampfreq=1000, f0=300, f1=None, order=4, rp=None, rs=None,\n btype='highpass', ftype='butter', causal=True):\n \"\"\"Bandpass filter data by specifying filter order and btype, instead of gpass and gstop.\n\n btype: 'lowpass', 'highpass', 'bandpass', 'bandstop'\n ftype: 'ellip', 'butter', 'cheby1', 'cheby2', 'bessel'\n\n For 'ellip', need to also specify passband and stopband ripple with rp and rs.\n \"\"\"\n if f0 != None and f1 != None: # both are specified\n assert btype in ['bandpass', 'bandstop']\n fn = np.array([f0, f1])\n elif f0 != None: # only f0 is specified\n assert btype == 'highpass'\n fn = f0\n elif f1 != None: # only f1 is specified\n assert btype == 'lowpass'\n fn = f1\n else: # neither f0 nor f1 are specified\n raise ValueError('At least one of f0 or f1 have to be specified')\n wn = fn / (sampfreq / 2) # wn can be either a scalar or a length 2 vector\n b, a = scipy.signal.iirfilter(order, wn, rp=rp, rs=rs, btype=btype, analog=0,\n ftype=ftype, output='ba')\n if causal:\n data = scipy.signal.lfilter(b, a, data) # causal, adds freq-dependent phase lag\n else:\n data = scipy.signal.filtfilt(b, a, data) # non-causal, 0 phase lag\n return data, b, a\n\ndef WMLDR(data, wname=\"db4\", maxlevel=6, mode='sym'):\n \"\"\"Perform wavelet multi-level decomposition and reconstruction (WMLDR) on multichannel\n data. See Wiltschko2008. Default to Daubechies(4) wavelet. Modifies data in-place, at\n least for now. The effective cutoff frequency is:\n\n fc = (sampfreq / 2) / 2**maxlevel (Wiltschko2008)\n\n For sampfreq of 25 kHz and maxlevel of 6, the effective cutoff frequency is 195 Hz.\n For sampfreq of 30 kHz and maxlevel of 6, the effective cutoff frequency is 234 Hz.\n\n TODO: for now, this only returns highpass data. In the future, this probably should\n return both low and highpass data (and not modify it in-place). The Discussion in\n Wiltschko2008 suggests that this approach cannot be used to extract the LFP, but\n I don't see why you can't simply subtract the highpass data from the raw data to get the\n lowpass data.\n\n Signal extension modes (from PyWavelets docs):\n\n PyWavelets provides several methods of signal extrapolation that can be used to minimize\n edge effects. PyWavelet's default is 'sym':\n\n zpd - zero-padding - signal is extended by adding zero samples:\n ... 0 0 | x1 x2 ... xn | 0 0 ...\n\n cpd - constant-padding - border values are replicated:\n ... x1 x1 | x1 x2 ... xn | xn xn ...\n\n sym - symmetric-padding - signal is extended by mirroring samples:\n ... x2 x1 | x1 x2 ... xn | xn xn-1 ...\n\n ppd - periodic-padding - signal is treated as a periodic one:\n ... xn-1 xn | x1 x2 ... xn | x1 x2 ...\n\n sp1 - smooth-padding - signal is extended according to the first derivatives calculated on\n the edges (straight line)\n\n DWT performed for these extension modes is slightly redundant, but ensures perfect\n reconstruction. To receive the smallest possible number of coefficients, computations can\n be performed with the periodization mode:\n\n per - periodization - is like periodic-padding but gives the smallest possible number of\n decomposition coefficients. IDWT must be performed with the same mode.\n \"\"\"\n import pywt\n\n data = np.atleast_2d(data)\n nt = data.shape[1]\n # reconstructed signals always seem to have an even number of points. If the number of\n # input data points is odd, trim last data point from reconstructed signal:\n isodd = nt % 2\n # filter data in place, iterate over channels in rows:\n nchans = len(data)\n for chani in range(nchans):\n # decompose the signal:\n cs = pywt.wavedec(data[chani], wname, mode=mode, level=maxlevel)\n # destroy the appropriate approximation coefficients to get highpass data:\n cs[0] = None\n # reconstruct the signal:\n recsignal = pywt.waverec(cs, wname, mode=mode)\n ntrec = len(recsignal)\n data[chani] = recsignal[:ntrec-isodd]\n \n return data\n\ndef envelope_hilbert(x):\n \"\"\"Return envelope of signal x by taking abs of hilbert transform of x. Note this\n only really works for narrow-band signals. Otherwise, the output envelope is almost\n as noisy as the input.\n See:\n https://dsp.stackexchange.com/a/3464\n https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.signal.hilbert.html\n \"\"\"\n return np.abs(scipy.signal.hilbert(x))\n\ndef envelope_filt(x, sampfreq=None, f0=None, f1=BWLPF1, order=BWLPORDER, ftype='butter',\n causal=False):\n \"\"\"Calculate envelope of x by rectifying and then low-pass filtering. Return float64\"\"\"\n assert sampfreq is not None\n x = np.abs(x)\n x, b, a = filterord(x, sampfreq=sampfreq, f0=f0, f1=f1,\n order=order, rp=None, rs=None,\n btype='lowpass', ftype=ftype, causal=causal) # float64\n return x\n\ndef poly_between(x, ylower, yupper):\n \"\"\"Return the polygon that fills the regions between ylower and yupper, at the x.\n All 3 arguments must have the same length.\n\n Return values are x, y arrays for use with `matplotlib.axes.Axes.fill()`\n\n Adapted from deprecated `matplotlib.mlab.poly_between()`:\n https://matplotlib.org/api/mlab_api.html#matplotlib.mlab.poly_between\n \"\"\"\n assert len(x) == len(ylower) == len(yupper)\n # go in one direction for upper values, then opposite direction for lower values:\n x = np.concatenate((x, x[::-1]))\n y = np.concatenate((yupper, ylower[::-1]))\n return x, y\n\ndef updatenpyfilerows(fname, rows, arr):\n \"\"\"Given a numpy formatted binary file (usually with .npy extension,\n but not necessarily), update 0-based rows (first dimension) of the\n array stored in the file from arr. Works for arrays of any rank >= 1\"\"\"\n assert len(arr) >= 1 # has at least 1 row\n with open(fname, 'r+b') as f: # open in read+write binary mode\n # get .npy format version:\n major, minor = np.lib.format.read_magic(f)\n assert (major == 1 and minor == 0)\n # read header to move file pointer to start of array in file:\n shape, fortran_order, dtype = np.lib.format.read_array_header_1_0(f)\n assert shape == arr.shape\n assert fortran_order == np.isfortran(arr)\n assert dtype == arr.dtype\n arroffset = f.tell()\n rowsize = arr[0].size * dtype.itemsize # nbytes per row\n # sort rows so that we move efficiently from start to end of file:\n rows = sorted(rows) # rows might be a set, list, tuple, or array, convert to list\n # update rows in file:\n for row in rows:\n f.seek(arroffset + row*rowsize) # seek from start of file, row is 0-based\n f.write(arr[row])\n\n\nclass SpykeUnpickler(pickle.Unpickler):\n\n def find_class(self, oldmod, oldcls):\n \"\"\"Required for unpickling some .sort files and upgrading them to the next version.\n Rename class and module names that changed between two .sort versions. Unfortunately,\n we can't check the .sort version number until after unpickling, so this has to be done\n for *all* .sort files during unpickling, not after\"\"\"\n oldmod2newmod = {'core': 'stream'} # version 0.7 to 0.8\n oldcls2newcls = {'Stream': 'SurfStream', # 0.7 to 0.8\n 'SimpleStream': 'SimpleStream', # 0.7 to 0.8\n 'TrackStream': 'MultiStream', # 0.7 to 0.8\n 'A1x64_Poly2_6mm_23s_160': 'A1x64'} # 1.2 to 1.3\n try:\n newcls = oldcls2newcls[oldcls]\n except KeyError: # no old to new class conversion\n exec('import %s' % oldmod)\n return eval('%s.%s' % (oldmod, oldcls))\n newmod = oldmod2newmod.get(oldmod, oldmod)\n print('Rename on unpickle: %s.%s -> %s.%s' % (oldmod, oldcls, newmod, newcls))\n exec('import %s' % newmod)\n return eval('%s.%s' % (newmod, newcls))\n\ndef arr2json(arr, indent=0, ndec=6, max_line_width=75):\n \"\"\"Convert numpy.ndarray to string suitable for json list. Much nicer formatting\n than Python list. Indent all but the first line with indent spaces\"\"\"\n assert type(arr) == np.ndarray\n fltfmt = \"%.\" + str(ndec) + 'f'\n max_line_width = max_line_width - indent\n s = np.array2string(arr, separator=', ', threshold=np.inf, max_line_width=max_line_width,\n formatter={'float_kind':lambda x: fltfmt % x})\n indentstr = indent * ' '\n if indent:\n sl = s.splitlines()\n nlines = len(sl)\n for linei in range(1, nlines):\n sl[linei] = indentstr + sl[linei]\n s = '\\n'.join(sl)\n return s\n\ndef write_dat_json(stream, fulljsonfname, sampfreq=None, chans=None, auxchans=None,\n chan_order=None, envelope=None, gaps=False,\n tranges=None, nulltranges=None):\n \"\"\"Write .json metadata file as a companion to stream's file. For now, stream should be\n either a DATStream, NSXStream, or SurfStream\"\"\"\n ext = stream.ext\n assert ext in ['.dat', '.ns6', '.srf']\n if stream.is_multi(): # it's a MultiStream\n source_fnames = stream.fnames\n stream = stream.streams[0] # use its first stream to get field values\n else: # it's a single Stream\n source_fnames = [stream.fname]\n fh = stream.f.fileheader\n\n # choose values:\n if sampfreq is None:\n sampfreq = stream.sampfreq\n sample_rate = sampfreq\n if chans is None:\n chans = stream.chans # array\n if auxchans is None:\n auxchans = np.array([])\n nchans = len(chans) + len(auxchans)\n uV_per_AD = stream.converter.AD2uV(1)\n try:\n adaptername = stream.adaptername\n except AttributeError:\n adaptername = None # converts to json null\n # multiply by sampfreq instead of dividing by stream.tres, because passed sampfreq\n # might differ from that of stream:\n nsamples_offset = intround(stream.t0 / 1e6 * sampfreq)\n datetime = stream.datetime.isoformat()\n if ext == '.dat':\n author = fh.author\n version = fh.version\n notes = fh.notes\n elif ext == '.ns6':\n author = 'Blackrock NSP'\n version = ''\n notes = fh.comment\n elif ext == '.srf':\n author = fh.user_name\n version = fh.app_info\n notes = ''\n else:\n raise ValueError\n if tranges is None:\n tranges = stream.tranges # 2D\n if nulltranges is None:\n nulltranges = np.array([]) # normally would be 2D\n filtering = stream.filtering\n common_avg_ref = stream.car\n\n # write to odict:\n od = odict()\n od['nchans'] = nchans\n od['sample_rate'] = sample_rate\n if ext == '.srf':\n od['shcorrect'] = stream.shcorrect\n od['dtype'] = 'int16' # hard-coded, only dtype supported for now\n od['uV_per_AD'] = uV_per_AD\n od['probe_name'] = stream.probe.name\n od['adapter_name'] = adaptername\n od['chans'] = 'CHANSPLACEHOLDER' # array\n od['chan_order'] = chan_order # for human reference only, 'depth' & None are obvious values\n od['aux_chans'] = 'AUXCHANSPLACEHOLDER' # array\n od['nsamples_offset'] = nsamples_offset\n od['datetime'] = datetime\n od['author'] = author\n od['version'] = version\n od['notes'] = notes\n\n od['source_fnames'] = source_fnames\n if len(source_fnames) > 1:\n od['gaps'] = gaps # only meaningful for MultiStream with more than one Stream\n od['tranges'] = 'TRANGESPLACEHOLDER' # 2D array\n od['nulltranges'] = 'NULLTRANGESPLACEHOLDER' # 2D array\n\n od['filtering'] = filtering\n od['common_avg_ref'] = common_avg_ref\n if envelope:\n od['envelope'] = envelope\n\n # convert odict to json string, end with a blank line:\n indent = 1\n s = json.dumps(od, indent=indent) + '\\n'\n # swap array placeholders with nicely formatted array reprs:\n s = s.replace('\"CHANSPLACEHOLDER\"', arr2json(chans, indent=indent+9))\n s = s.replace('\"AUXCHANSPLACEHOLDER\"', arr2json(auxchans, indent=indent+13))\n s = s.replace('\"TRANGESPLACEHOLDER\"', arr2json(tranges, indent=indent+11))\n s = s.replace('\"NULLTRANGESPLACEHOLDER\"', arr2json(nulltranges, indent=indent+15))\n # write to file:\n with open(fulljsonfname, 'w') as jsonf:\n jsonf.write(s)\n print('Wrote metadata file %r' % fulljsonfname)\n\ndef write_ks2_chanmap_mat(stream, fname):\n \"\"\"Write stream's channel map information to .mat file for use by Kilosort2\"\"\"\n nchans = stream.nchans # number of enabled chans\n # mask to tell Kilosort2 which chans in .dat to use for sorting?:\n connected = np.tile(True, (nchans, 1)) # column vector\n chans = stream.chans # array of enabled chans\n # row vector, 1-based indices into coords, in order of data in .dat\n chanMap = np.arange(1, nchans+1)\n chanMap0ind = chanMap - 1 # 0-based version of chanMap\n # get coords of only the enabled chans, will be indexed into by chanMap in MATLAB:\n coords = np.asarray([ stream.probe.SiteLoc[chan] for chan in chans ])\n xcoords = coords[:, 0].reshape(-1, 1) # column vector\n ycoords = coords[:, 1].reshape(-1, 1) # column vector, origin is at top of probe\n ycoords = ycoords.max() - ycoords # Kilosort2 expects origin at bottom of probe\n kcoords = np.tile(1, (nchans, 1)) # column vector, something to do with shanks?\n fs = stream.rawsampfreq\n \"\"\"\n Export to .mat file with exactly the same dtypes (doubles) that Kilosort2's chanmap\n creation script (see .m files in templates/Kilosort2) does. Note that this saves to a\n MATLAB 5 (to version 7.2) .mat file, while by default contemporary MATLAB saves to the\n newer HDF5 version, which is unsupported by scipy.io. Both seem to load identically in\n MATLAB 2015a, so it shouldn't matter:\n \"\"\"\n matd = {'connected': connected, # leave as boolean\n 'chanMap': np.float64(chanMap),\n 'chanMap0ind': np.float64(chanMap0ind),\n 'xcoords': np.float64(xcoords),\n 'ycoords': np.float64(ycoords),\n 'kcoords': np.float64(kcoords),\n 'fs': np.float64(fs)}\n scipy.io.savemat(fname, matd)\n print('Wrote Kilosort2 chanmap file %r' % fname)\n\n\n## TODO: these should be deprecated after moving to Py3 and/or Qt5:\n\ndef qvar2list(qvar):\n \"\"\"Convert Py2 + Qt4 QVariant to a list\"\"\"\n try:\n return qvar.toList() # Py2 + Qt4\n except AttributeError:\n return qvar # Py3 and/or Qt5\n\ndef qvar2str(qvar):\n \"\"\"Convert Py2 + Qt4 QVariant to a string\"\"\"\n try:\n return str(qvar.toString()) # Py2 + Qt4\n except AttributeError:\n return str(qvar) # Py3 and/or Qt5\n\ndef qvar2int(qvar):\n \"\"\"Convert Py2 + Qt4 QVariant to an int\"\"\"\n try:\n return qvar.toInt()[0] # Py2 + Qt4\n except AttributeError:\n return qvar # Py3 and/or Qt5\n",
"\"\"\"Load Blackrock Neural Signal Processing System .nsx files. Inherits from dat.File, because\n.nsx also stores its waveform data in flat .dat format\n\nBased on file documentation at:\n\nhttp://support.blackrockmicro.com/KB/View/166838-file-specifications-packet-details-headers-etc\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\n__authors__ = ['Martin Spacek']\n\n# When comparing e.g. a Python string to a numpy numeric array with `in` or `==` operators,\n# numpy currently (1.17.4) returns a scalar (True or False), though it would make more sense\n# if it returned an array. This is due to a standoff between Python and numpy devs:\n# https://stackoverflow.com/a/46721064\n# This raises the following annoying warning:\n# FutureWarning: elementwise comparison failed; returning scalar instead, but in the\n# future will perform elementwise comparison\n# Suppress with:\n#import warnings\n#warnings.simplefilter(action='ignore', category=FutureWarning)\n\n# use raw_input() in Py2, which is simply called input() in Py3:\ntry:\n input = raw_input\nexcept NameError:\n pass\n\nimport os\nfrom struct import unpack\nimport datetime\nimport json\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom .core import NULL, rstripnonascii\nfrom . import dat # for inheritance\nfrom . import probes\nfrom .stream import NSXStream\n\n\nclass File(dat.File):\n \"\"\"Open an .nsx file and expose its header fields and data as attribs\"\"\"\n\n def _bind_streams(self):\n self.hpstream = NSXStream(self, kind='highpass')\n self.lpstream = NSXStream(self, kind='lowpass')\n\n def _parseFileHeader(self):\n \"\"\"Parse the file header\"\"\"\n self.fileheader = FileHeader()\n self.fileheader.parse(self.f)\n self.fileheader.parse_json(self.f)\n #print('Parsed fileheader')\n\n def load(self):\n \"\"\"Load the waveform data. Data are stored in packets. Normally, there is only one\n long contiguous data packet, but if there are pauses during the recording, the\n data is broken up into multiple packets, with a time gap between each one. Need\n to step over all chans, including aux chans, so pass nchanstotal instead of nchans\"\"\"\n datapacket = DataPacket(self.f, self.fileheader.nchanstotal)\n if self.f.tell() != self.filesize: # make sure we're at EOF\n raise NotImplementedError(\"Can't handle pauses in recording yet\")\n self.datapacket = datapacket\n self.contiguous = True\n\n def chan2datarowi(self, chan):\n \"\"\"Find row in self.datapacket._data corresponding to chan.\n chan can be either an integer id or a string label\"\"\"\n if type(chan) == int:\n datarowis, = np.where(chan == self.fileheader.allchans)\n elif type(chan) == str:\n datarowis, = np.where(chan == self.fileheader.alllabels)\n else:\n raise ValueError(\"Unexpected type %s for chan %s\" % (type(chan), chan))\n if len(datarowis) == 0:\n raise ValueError(\"Can't find chan %r\" % chan)\n elif len(datarowis) > 1:\n raise ValueError(\"Found multiple occurences of chan %r at data rows %r\"\n % (chan, datarowis))\n return datarowis[0]\n\n def get_chantype(self, chan):\n \"\"\"Return the type ('ephys' or 'aux') of chan. chan can be either an integer id\n or a string label.\"\"\"\n chantypes = []\n fh = self.fileheader\n if chan in list(fh.chans) or chan in list(fh.ephyschanlabels):\n chantypes.append('ephys')\n if chan in list(fh.auxchans) or chan in list(fh.auxchanlabels):\n chantypes.append('aux')\n if len(chantypes) == 0:\n raise ValueError(\"Can't find chan %r\" % chan)\n elif len(chantypes) > 1:\n raise ValueError(\"Found multiple types for chan %r: %r\" % (chan, chantypes))\n return chantypes[0]\n\n def get_chanAD(self, chan):\n \"\"\"Return AD data for a single chan. chan can be either an integer id\n or a string label. To convert to voltage, use the appropriate multiplier\n (AD2uVx for ephys chans, AD2mVx for aux chans)\"\"\"\n datarowi = self.chan2datarowi(chan)\n return self.datapacket._data[datarowi]\n\n def get_chanV(self, chan):\n \"\"\"Return data for a single chan, in volts. chan can be either an integer id\n or a string label\"\"\"\n AD = self.get_chanAD(chan)\n chantype = self.get_chantype(chan)\n if chantype == 'ephys':\n return AD * self.fileheader.AD2uVx / 1000000 # convert uV to V\n elif chantype == 'aux':\n return AD * self.fileheader.AD2mVx / 1000 # convert mV to V\n else:\n raise ValueError('Unknown chantype %r' % chantype)\n\n def plot_chanV(self, chan, trange=None, figsize=(16, 10), maximize=True, fmt='.-',\n linewidth=1, markersize=3, color='k', alpha=0.2):\n \"\"\"Plot chan voltage as a function of time. chan can be either an integer id\n or a string label. trange is window time range, in sec\"\"\"\n f, a = plt.subplots(figsize=figsize)\n t = self.tsec\n V = self.get_chanV(chan)\n if trange:\n assert len(trange) == 2\n t0i, t1i = np.searchsorted(t, trange)\n t = t[t0i:t1i]\n V = V[t0i:t1i]\n a.plot(t, V, fmt, linewidth=linewidth, markersize=markersize,\n color=color, alpha=alpha)\n a.set_xlabel('Time (s)')\n a.set_ylabel('Voltage (V)')\n f.canvas.set_window_title(self.fname)\n if maximize:\n win = f.canvas.window() # possibly a Qt window\n try:\n win.showMaximized() # Qt specific method\n except AttributeError:\n pass # not using Qt backend?\n return a\n\n\nclass FileHeader(dat.FileHeader):\n \"\"\".nsx file header. Takes an open file, parses in from current file\n pointer position, stores header fields as attribs\"\"\"\n def __len__(self):\n return self.nbytes\n\n def parse(self, f):\n # \"basic\" header:\n self.offset = f.tell()\n self.filetype = f.read(8).decode()\n assert self.filetype == 'NEURALCD'\n self.version = unpack('BB', f.read(2)) # aka \"File Spec\", major and minor versions\n self.nbytes, = unpack('I', f.read(4)) # length of full header, in bytes\n self.label = f.read(16).rstrip(NULL).decode() # sampling group label, null terminated\n # null terminated, trailing junk bytes (bug):\n self.comment = rstripnonascii(f.read(256)).decode()\n # \"Period\" wrt sampling freq; sampling freq in Hz:\n self.decimation, self.sampfreq = unpack('II', f.read(8))\n if self.decimation != 1: # doesn't have to be, but probably should for neural data\n print('WARNING: data is decimated by a factor of %d' % self.decimation)\n self.tres = self.decimation / self.sampfreq * 1e6 # float us\n #print('FileHeader.tres = %f' % self.tres)\n\n # date and time corresponding to t=0:\n year, month, dow, day, hour, m, s, ms = unpack('HHHHHHHH', f.read(16))\n self.datetime = datetime.datetime(year, month, day, hour, m, s, ms)\n self.nchanstotal, = unpack('I', f.read(4)) # ephys and aux chans\n\n # \"extended\" headers, each one describing a channel. Use the channel label\n # to distinguish ephys chans from auxiliary channels. Note that seeking through\n # the DataPacket won't work if ephys and aux channels are intermingled. The current\n # assumption is that all ephys chans come before any aux chans:\n self.chanheaders = {} # for ephys signals\n self.auxchanheaders = {} # for auxiliary signals, such as opto/LED signals\n for chani in range(self.nchanstotal):\n chanheader = ChanHeader()\n chanheader.parse(f)\n label, id = chanheader.label, chanheader.id\n if label != ('chan%d' % id):\n print('Treating chan%d (%r) as auxiliary channel' % (id, label))\n self.auxchanheaders[id] = chanheader\n else: # save ephys channel\n self.chanheaders[id] = chanheader\n self.nchans = len(self.chanheaders) # number of ephys chans\n self.nauxchans = len(self.auxchanheaders) # number of aux chans\n assert self.nchans + self.nauxchans == self.nchanstotal\n if self.nauxchans > 0: # some chans were aux chans\n print('Found %d auxiliary channels' % (self.nauxchans))\n assert len(self) == f.tell() # header should be of expected length\n\n # if there's no adapter, AD ephys chans == probe chans:\n self.chans = np.int64(sorted(self.chanheaders)) # sorted array of keys\n self.auxchans = np.int64(sorted(self.auxchanheaders)) # sorted array of keys\n if len(self.chans) > 0 and len(self.auxchans) > 0:\n # ensure that the last ephys chan comes before the first aux chan:\n assert self.chans[-1] < self.auxchans[0]\n\n # check AD2uV params of all ephys and aux chans:\n for chantype, chanheaders in (('ephys', self.chanheaders),\n ('aux', self.auxchanheaders)):\n chans = {'ephys': self.chans, 'aux': self.auxchans}[chantype]\n # all ephys should be in uV, all aux in mV:\n units = {'ephys': 'uV', 'aux': 'mV'}[chantype]\n try:\n c0 = chanheaders[chans[0]] # ref channel for comparing AD2V params\n except IndexError:\n continue # no channels of this type (ephys or aux)\n assert c0.units == units # assumed later during AD2V conversion\n assert c0.maxaval == abs(c0.minaval) # not strictly necessary, but check anyway\n assert c0.maxdval == abs(c0.mindval)\n ref = c0.units, c0.maxaval, c0.minaval, c0.maxdval, c0.mindval\n for c in chanheaders.values():\n if (c.units, c.maxaval, c.minaval, c.maxdval, c.mindval) != ref:\n raise ValueError('Not all chans have the same AD2V params')\n # calculate AD2uV/AD2mV conversion factor:\n if chantype == 'ephys':\n self.AD2uVx = (c0.maxaval-c0.minaval) / float(c0.maxdval-c0.mindval)\n else: # chantype == 'aux'\n self.AD2mVx = (c0.maxaval-c0.minaval) / float(c0.maxdval-c0.mindval)\n\n def parse_json(self, f):\n \"\"\"Parse potential .nsx.json file for probe name and optional adapter name\"\"\"\n fname = os.path.realpath(f.name) # make sure we have the full fname with path\n path = os.path.dirname(fname)\n ext = os.path.splitext(fname)[1] # e.g., '.ns6'\n # check if there is a file named exactly fname.json:\n jsonfname = fname + '.json'\n jsonbasefname = os.path.split(jsonfname)[-1]\n print('Checking for metadata file %r' % jsonbasefname)\n if os.path.exists(jsonfname):\n print('Found metadata file %r' % jsonbasefname)\n else:\n jsonext = '%s.json' % ext # e.g. '.ns6.json'\n print('No file named %r, checking for a single %s file of any name'\n % (jsonbasefname, jsonext))\n jsonbasefnames = [ fname for fname in os.listdir(path) if fname.endswith(jsonext)\n and not fname.startswith('.') ]\n njsonfiles = len(jsonbasefnames)\n if njsonfiles == 1:\n jsonbasefname = jsonbasefnames[0]\n jsonfname = os.path.join(path, jsonbasefname) # full fname with path\n print('Using metadata file %r' % jsonbasefname)\n else:\n jsonfname = None\n print('Found %d %s files, ignoring them' % (njsonfiles, jsonext))\n\n # get probe name and optional adapter name:\n if jsonfname:\n with open(jsonfname, 'r') as jf:\n j = json.load(jf) # should return a dict of key:val pairs\n assert type(j) == dict\n # check field validity:\n validkeys = ['chan_layout_name', # old name\n 'probe_name', # new name\n 'adapter_name']\n keys = list(j)\n for key in keys:\n if key not in validkeys:\n raise ValueError(\"Found invalid field %r in %r\\n\"\n \"Fields currently allowed in .nsx.json files: %r\"\n % (key, jsonfname, validkeys))\n try:\n self.probename = j['probe_name'] # new name\n except KeyError:\n self.probename = j['chan_layout_name'] # old name\n # make sure probename is valid probe.name or probe.layout,\n # potentially rename any old probe names to new ones:\n probe = probes.getprobe(self.probename)\n self.probename = probe.name\n self.adaptername = j.get('adapter_name')\n else: # no .json file, maybe the .nsx comment specifies the probe type?\n self.probename = self.comment.replace(' ', '_')\n if self.probename != '':\n print('Using %r in .nsx comment as probe name' % self.probename)\n else:\n self.probename = probes.DEFNSXPROBETYPE # A1x32\n print('WARNING: assuming probe %s was used in this recording' % self.probename)\n self.adaptername = None\n\n # initialize probe and adapter:\n self.set_probe()\n self.set_adapter()\n self.check_probe()\n self.check_adapter()\n\n def get_ephyschanlabels(self):\n return np.array([ self.chanheaders[chan].label for chan in self.chans ], dtype=str)\n\n ephyschanlabels = property(get_ephyschanlabels)\n\n def get_auxchanlabels(self):\n return np.array([ self.auxchanheaders[chan].label for chan in self.auxchans ], dtype=str)\n\n auxchanlabels = property(get_auxchanlabels)\n\n def get_alllabels(self):\n return np.concatenate([self.ephyschanlabels, self.auxchanlabels])\n\n alllabels = property(get_alllabels)\n\n\nclass ChanHeader(object):\n \"\"\".nsx header information for a single channel\"\"\"\n def parse(self, f):\n self.type = f.read(2).decode()\n assert self.type == 'CC' # for \"continuous channel\"\n self.id, = unpack('H', f.read(2)) # AD channel, usually == probe channel if no adapter\n self.label = f.read(16).rstrip(NULL).decode()\n self.connector, self.pin = unpack('BB', f.read(2)) # physical connector and pin\n # max and min digital and analog values:\n self.mindval, self.maxdval, self.minaval, self.maxaval = unpack('hhhh', f.read(8))\n self.units = f.read(16).rstrip(NULL).decode() # analog value units: \"mV\" or \"uV\"\n # high and low pass hardware filter settings? Blackrock docs are a bit vague:\n # corner freq (mHz); filt order (0=None); filter type (0=None, 1=Butterworth)\n self.hpcorner, self.hporder, self.hpfilttype = unpack(\"IIH\", f.read(10))\n self.lpcorner, self.lporder, self.lpfilttype = unpack(\"IIH\", f.read(10))\n\n\nclass DataPacket(object):\n \"\"\".nsx data packet\"\"\"\n def __init__(self, f, nchans):\n self.offset = f.tell()\n self.nchans = nchans\n header, = unpack('B', f.read(1))\n assert header == 1\n # nsamples offset of first timepoint from t=0; number of timepoints:\n self.t0i, self.nt = unpack('II', f.read(8))\n self.dataoffset = f.tell()\n filesize = os.stat(f.name).st_size # in bytes\n datasize = filesize - self.dataoffset\n expectednt = datasize / 2 / self.nchans\n if self.nt != int(expectednt):\n print(\"*** WARNING: Header in file %s says nt=%d, \"\n \"but file size suggests nt=%.3f. Perhaps the nt header field \"\n \"is incorrect/corrupt, but the rest of the file is fine.\"\n % (f.name, self.nt, expectednt))\n if expectednt % 1 != 0.0: # remaining file size doesn't divide evenly\n print(\"*** WARNING: Expected nt=%.3f is non-integer. \"\n \"This means there was an unknown amount of actual data loss, \"\n \"which could have occurred any time during the recording, including \"\n \"somewhere in the middle which would create offset errors with respect \"\n \"to e.g. stimulus data. Use extreme caution!\" % expectednt)\n response = input(\"Try and continue anyway? (y/[n]) >> \")\n if response != 'y':\n raise RuntimeError('Stopping')\n else:\n self.nt = int(np.floor(expectednt)) # round down\n ndroppedbytes = datasize - 2*self.nchans*self.nt\n print('Using only the first %d timepoints, discarding last %d bytes'\n % (self.nt, ndroppedbytes))\n # load all data into memory using np.fromfile. Time is MSB, chan is LSB:\n #self._data = np.fromfile(f, dtype=np.int16, count=self.nt*nchans)\n #self._data.shape = -1, self.nchans # reshape, t in rows, chans in columns\n #self._data = self._data.T # reshape, chans in columns, t in rows\n\n # load data on demand using np.memmap, numpy always assumes binary mode.\n # Time is the outer loop, chan is the inner loop, so load in column-major (Fortran)\n # order to get contiguous (chani, ti) array:\n self._data = np.memmap(f, dtype=np.int16, mode='r', offset=self.dataoffset,\n shape=(self.nchans, self.nt), order='F')\n",
"\"\"\"Tests threadpool code from http://code.activestate.com/recipes/576519,\nsaved as spyke.threadpool. Threaded code runs about ncpus times faster than\nunthreaded code, yet unlike multiprocessing, allows for shared memory. Only\nwhen you're inside a numpy loop is the GIL released, and multithreading possible.\nOr, same goes if you explicitlyl release the GIL inside Cython code\"\"\"\n\nimport numpy as np\nimport pyximport\npyximport.install(build_in_temp=False, inplace=True)\n\nfrom spyke import threadpool\nfrom multiprocessing import cpu_count\nimport time\n\nimport spyke.util as util\n\na = np.random.random(8*10000000)\ntmt = time.time()\nutil.dostuffthreads(a)\nprint('dostuffthreads took %.3f' % (time.time()-tmt))\ntst = time.time()\nutil.dostuff(a)\nprint('single thread took %.3f' % (time.time()-tst))\n\n'''\ntpool = time.time()\nncpus = cpu_count()\n#ncpus = 4\npool = threadpool.Pool(ncpus)\nprint('pool creation took %.3f' % (time.time()-tpool))\n\na = np.random.random(8*10000000)\nunits = np.split(a, ncpus)\n\ntmt = time.time()\npool.map(util.dostuff, units) # multithread over units\nprint('%d thread took %.3f' % (ncpus, time.time()-tmt))\ntst = time.time()\nutil.dostuff(a)\nprint('single thread took %.3f' % (time.time()-tst))\npool.terminate()\n'''\n\n\n\n'''\n\ndef sort_inplace(data):\n \"\"\"Sort data in-place along last axis\"\"\"\n data.sort()\n\nsortdata1 = np.random.random((8, 1e7))\nsortdata2 = sortdata1.copy()\n\ntmt = time.time()\npool.map(sort_inplace, sortdata1) # multithread over rows in data\nprint(sortdata1)\nprint('%d thread numpy sort took %.3f' % (ncpus, time.time()-tmt))\n\ntst = time.time()\nsort_inplace(sortdata2)\nprint(sortdata2)\nprint('single thread numpy sort took %.3f' % (time.time()-tst))\n\ndel sortdata1\ndel sortdata2\n\ntpool = time.time()\npool.terminate()\nprint('pool termination took %.3f' % (time.time()-tpool))\n\n\n\n\n\n\n\ntpool = time.time()\nncpus = cpu_count()\npool = threadpool.Pool(ncpus)\nprint('pool creation took %.3f' % (time.time()-tpool))\n\n#data = np.random.random((16, 1e7))\ndata = np.empty((64, 30*500000), dtype=np.int16)\n#data -= 0.5 # centered on 0\n#data *= 2**15\n#data = np.int16(data)\n\ntmt = time.time()\nresults = pool.map(np.mean, data) # multithread over rows in data\nresults = np.asarray(results)\nprint(results)\nprint('%d thread numpy mean took %.3f' % (ncpus, time.time()-tmt))\n\ntst = time.time()\nresults = np.mean(data, axis=1)\nprint(results)\nprint('single thread numpy mean took %.3f' % (time.time()-tst))\n\ndel data\n\ntpool = time.time()\npool.terminate()\nprint('pool termination took %.3f' % (time.time()-tpool))\n'''\n"
] | [
[
"numpy.dot",
"numpy.split",
"scipy.signal.find_peaks",
"numpy.sqrt",
"numpy.asarray",
"sklearn.decomposition.FastICA",
"numpy.all",
"numpy.concatenate",
"sklearn.manifold.TSNE",
"numpy.any",
"numpy.histogram",
"numpy.where",
"numpy.hstack",
"numpy.unique",
"numpy.arange",
"sklearn.decomposition.MiniBatchSparsePCA",
"numpy.intersect1d",
"numpy.diff",
"numpy.float32",
"numpy.column_stack",
"numpy.zeros",
"numpy.median",
"numpy.union1d",
"numpy.int64",
"numpy.savetxt",
"numpy.array",
"sklearn.decomposition.PCA",
"sklearn.decomposition.NMF",
"sklearn.decomposition.SparsePCA",
"numpy.abs",
"numpy.linalg.norm",
"numpy.setdiff1d",
"numpy.sort",
"numpy.savez_compressed",
"numpy.uint64",
"numpy.float64"
],
[
"matplotlib.rcParams.get",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.lib.format.read_magic",
"numpy.in1d",
"numpy.concatenate",
"numpy.seterr",
"numpy.lib.stride_tricks.as_strided",
"numpy.round",
"numpy.mean",
"numpy.exp",
"numpy.where",
"numpy.setxor1d",
"numpy.allclose",
"numpy.unique",
"numpy.uint8",
"numpy.arange",
"numpy.stack",
"numpy.sin",
"numpy.ceil",
"numpy.diff",
"numpy.float32",
"numpy.ravel",
"numpy.array2string",
"numpy.zeros",
"numpy.log",
"numpy.lib.format.read_array_header_1_0",
"numpy.cosh",
"numpy.random.choice",
"numpy.union1d",
"numpy.atleast_2d",
"numpy.append",
"numpy.int64",
"numpy.floor",
"numpy.array",
"numpy.log2",
"numpy.abs",
"numpy.set_printoptions",
"numpy.isfortran",
"numpy.tile",
"numpy.cos",
"numpy.float64",
"numpy.prod",
"numpy.empty"
],
[
"numpy.memmap",
"matplotlib.pyplot.subplots",
"numpy.concatenate",
"numpy.searchsorted",
"numpy.floor",
"numpy.array",
"numpy.where"
],
[
"numpy.random.random"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
emmettmeinzer/hmwgen | [
"cd47733b5a34a6a3a9b56026eb5e73069e398033",
"cd47733b5a34a6a3a9b56026eb5e73069e398033"
] | [
"archive/reuUpdated.py",
"archive/attention.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 11 13:41:14 2019\r\n\r\n@author: Emmett & Binyang\r\n\"\"\"\r\n\r\nfrom pprint import pprint\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer\r\n\r\n##Let’s first build a corpus to train our tokenizer on. We’ll use stuff available in NLTK:\r\n\r\nfrom nltk.corpus import gutenberg\r\n\r\n# print (dir(gutenberg))\r\n# print (gutenberg.fileids())\r\n\r\ntext = \"\"\r\nfor file_id in gutenberg.fileids():\r\n text += gutenberg.raw(file_id)\r\n \r\nprint (len(text))\r\n\r\n##a funtion that converts a list to a string\r\ndef listToString(s): \r\n \r\n # initialize an empty string \r\n str1 = \"\" \r\n \r\n # traverse in the string \r\n for ele in s: \r\n str1 += ele \r\n \r\n # return string \r\n return str1\r\n\r\n##extract sentences from samples for following sentiment analysis\r\nsampNum = 1\r\nsent_df = pd.DataFrame()\r\ni = 0\r\n\r\nwhile (sampNum < 186):\r\n fileOpen = open(\"sample\"+str(sampNum)+\".txt\",\"r\")\r\n temp = fileOpen.readlines()\r\n temp = listToString(temp)\r\n \r\n trainer = PunktTrainer()\r\n trainer.INCLUDE_ALL_COLLOCS = True\r\n trainer.train(text)\r\n tokenizer = PunktSentenceTokenizer(trainer.get_params())\r\n \r\n ##Adding more abbreviations\r\n tokenizer._params.abbrev_types.add('dr')\r\n \r\n sent = tokenizer.tokenize(temp)\r\n \r\n for sent in sent:\r\n sent_df.loc[i, 'sent'] = sent\r\n sent_df.loc[i, 'sample'] = sampNum\r\n i += 1\r\n \r\n sampNum += 1\r\n\r\n##NLTK’s built-in Vader Sentiment Analyzer will simply rank a piece of text as positive, negative or neutral \r\n##using a lexicon of positive and negative words.\r\n\r\n##We can utilize this tool by first creating a Sentiment Intensity Analyzer (SIA) to categorize our headlines, \r\n##then we'll use the polarity_scores method to get the sentiment.\r\n\r\n##We'll append each sentiment dictionary to a results list, which we'll transform into a dataframe:\r\n\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA\r\n\r\nsia = SIA()\r\nresults = []\r\n\r\nfor idx, row in sent_df.iterrows():\r\n line = row['sent']\r\n score = sia.polarity_scores(line)\r\n sent_df.loc[idx, 'neg'] = score.get('neg')\r\n sent_df.loc[idx, 'neu'] = score.get('neu')\r\n sent_df.loc[idx, 'pos'] = score.get('pos')\r\n sent_df.loc[idx, 'compound'] = score.get('compound')\r\n\r\n# pprint(results[:10], width=100)\r\n\r\n##We will consider posts with a compound value greater than 0.2 as positive and less than -0.2 as negative. \r\n##There's some testing and experimentation that goes with choosing these ranges, and there is a trade-off to be \r\n##made here. If you choose a higher value, you might get more compact results (less false positives and false \r\n##negatives), but the size of the results will decrease significantly.\r\n\r\nsent_df['label'] = 0\r\nsent_df.loc[sent_df['compound'] > 0.3, 'label'] = 1\r\nsent_df.loc[sent_df['compound'] < -0.3, 'label'] = -1\r\n# sent_df.head()\r\n\r\n##We have all the data we need to save, so let's do that:\r\n\r\nsent_df.to_csv('sentiment analysis.csv', mode='a', encoding='utf-8', index=False)\r\n\r\n##We can now keep appending to this csv, but just make sure that if you reassign the headlines set, you could get \r\n##duplicates. Maybe add a more advanced saving function that reads and removes duplicates before saving.\r\n\r\n#Let's first take a peak at a few positive and negative headlines:\r\n\r\nprint(\"Positive headlines:\\n\")\r\npprint(list(sent_df[sent_df['label'] == 1].sent)[:5], width=200)\r\n\r\nprint(\"\\nNegative headlines:\\n\")\r\npprint(list(sent_df[sent_df['label'] == -1].sent)[:5], width=200)\r\n\r\n##Now let's check how many total positives and negatives we have in this dataset:\r\n\r\nprint(sent_df.label.value_counts())\r\nprint(sent_df.label.value_counts(normalize=True) * 100)\r\n\r\n##The first line gives us raw value counts of the labels, whereas the second line provides percentages \r\n##with the normalize keyword.\r\n\r\n##For fun, let's plot a bar chart:\r\n\"\"\"\r\nfig, ax = plt.subplots(figsize=(8, 8))\r\n\r\ncounts = sent_df.label.value_counts(normalize=True) * 100\r\n\r\nsns.barplot(x=counts.index, y=counts, ax=ax)\r\n\r\nax.set_xticklabels(['Negative', 'Neutral', 'Positive'])\r\nax.set_ylabel(\"Percentage\")\r\n\r\nplt.show()\r\n\"\"\"\r\n\r\n##filter the sentences by number of words in it\r\nfor idx, row in sent_df.iterrows():\r\n sentence = row['sent']\r\n sent_df.loc[idx, 'len_sent'] = len(sentence.split())\r\n\r\n##split positive and other sentences\r\npos = sent_df[sent_df['label'] == 1]\r\nneg = sent_df[sent_df['label'] != 1]\r\n\r\nimport gensim\r\nfrom gensim.parsing.preprocessing import strip_non_alphanum\r\nfrom gensim.parsing.preprocessing import strip_punctuation\r\nfrom gensim.parsing.preprocessing import strip_multiple_whitespaces\r\nfrom gensim.parsing.preprocessing import stem_text\r\n\r\ncorpus_full = []\r\nfor idx, row in sent_df.iterrows():\r\n temp = row['sent']\r\n temp1 = strip_non_alphanum(str(temp))\r\n temp2 = strip_punctuation(temp1)\r\n temp3 = strip_multiple_whitespaces(temp2)\r\n final = stem_text(temp3)\r\n corpus_full.append(final)\r\n\r\ncorpus_pos = []\r\nfor idx, row in pos.iterrows():\r\n temp = row['sent']\r\n temp1 = strip_non_alphanum(str(temp))\r\n temp2 = strip_punctuation(temp1)\r\n temp3 = strip_multiple_whitespaces(temp2)\r\n final = stem_text(temp3)\r\n corpus_pos.append(final)\r\n \r\ncorpus_neg = []\r\nfor idx, row in neg.iterrows():\r\n temp = row['sent']\r\n temp1 = strip_non_alphanum(str(temp))\r\n temp2 = strip_punctuation(temp1)\r\n temp3 = strip_multiple_whitespaces(temp2)\r\n final = stem_text(temp3)\r\n corpus_neg.append(final)\r\n\r\nfrom nltk.corpus import stopwords\r\nstop_words = stopwords.words('english')\r\n\r\nstoplist = set('a about above after again against all am an and any are arent\\\r\n as also at be because been before being below between both but\\\r\n by cant cannot could couldnt did didnt do does doesnt doing dont\\\r\n down during each els few for from further had hadnt has have havent\\\r\n having he hed hes her here heres hers herself him himself his\\\r\n how hows i id ill im ive if in into is isnt it its itself lets\\\r\n me more most mustnt my myself no nor not of off on once only or\\\r\n other ought our ours ourselves out over own same shant she shes\\\r\n should shouldnt so some such than that thats the their theirs\\\r\n them themselves then there theres these they theyd theyll theyre\\\r\n theyve this those through to too under until up very was wasnt\\\r\n we wed were weve were werent what whats when whens which while\\\r\n who whos whom why whys with wont would wouldnt you youd youll\\\r\n youre youve your yours yourself yourselves ll ve s ar mayb ha re\\\r\n us thi isn a b c d e f g h i j k l m n o p q r s t u v w x y z\\\r\n hi will can get back go don wa let atc ok ani mi thei whenev make\\\r\n just take aw know sai good baltimor jetblu lol thank thanks like\\\r\n vari might less highest billion nice probabl lot fuck shit sure\\\r\n feel dure befor realli work veri chanc see awai onc onli dy aren\\\r\n 100 someth thing even happen becaus wai everi much help want think\\\r\n fear flight plane fly mai time dai\\\r\n 1 2 3 4 5 6 7 8 9 10'.split())\r\n\r\nprint (len(stoplist))\r\nstoplist.update(stop_words)\r\n\r\nprint(len(stop_words))\r\nprint(len(stoplist))\r\n\r\n#standardize text -- makes all characters lowercase and removes common stop words\r\ntext_full = [[word for word in document.lower().split() if word not in stoplist]\r\n for document in corpus_full]\r\nprint(text_full)\r\ntext_pos = [[word for word in document.lower().split() if word not in stoplist]\r\n for document in corpus_pos]\r\ntext_neg = [[word for word in document.lower().split() if word not in stoplist]\r\n for document in corpus_neg]\r\n\r\n#count number of times that word appears in corpus\r\n#pair frequency with respective word in new array\r\nfrom collections import defaultdict\r\n\r\nfrequency = defaultdict(int)\r\nfor text in text_full:\r\n for token in text:\r\n frequency[token] += 1\r\n\r\ncorpus_removeOne_full = [[token for token in text if frequency[token]>1] for text in text_full]\r\n\r\nfrequency = defaultdict(int)\r\nfor text in text_pos:\r\n for token in text:\r\n frequency[token] += 1\r\n \r\ncorpus_removeOne_pos = [[token for token in text if frequency[token]>1] for text in text_pos]\r\n\r\nfrequency = defaultdict(int)\r\nfor text in text_neg:\r\n for token in text:\r\n frequency[token] += 1\r\n \r\ncorpus_removeOne_neg = [[token for token in text if frequency[token]>1] for text in text_neg]\r\n\r\n\r\nfrom gensim import corpora\r\n#add corpora to dictionary\r\ndictionary_full = corpora.Dictionary(corpus_removeOne_full)\r\ndictionary_pos = corpora.Dictionary(corpus_removeOne_pos)\r\ndictionary_neg = corpora.Dictionary(corpus_removeOne_neg)\r\n#save dictionary for future reference\r\ndictionary_full.save('redditTest_full.dict')\r\ndictionary_pos.save('redditTest_pos.dict') #location of document in computer\r\ndictionary_neg.save('redditTest_neg.dict')\r\n#dict = gensim.corpora.Dictionary.load('redditTest.dict')\r\n\r\n#assign numeric id to each token in dictionary\r\ndictID_full = dictionary_full.token2id\r\ndictID_pos = dictionary_pos.token2id\r\ndictID_neg = dictionary_neg.token2id\r\n\r\n#remove empty sentences\r\nfor text in corpus_removeOne_full:\r\n if len(text) == 0:\r\n corpus_removeOne_full.remove(text)\r\n\r\nfor text in corpus_removeOne_pos:\r\n if len(text) == 0:\r\n corpus_removeOne_pos.remove(text)\r\n \r\nfor text in corpus_removeOne_neg:\r\n if len(text) == 0:\r\n corpus_removeOne_neg.remove(text)\r\n\r\n\r\n#converts each word into vector following same process as example\r\n#Bag of Word Corpus of Full Sentiment\r\nbow_corpus_full = [dictionary_full.doc2bow(text) for text in corpus_removeOne_full]\r\ncorpora.MmCorpus.serialize('redditTest_full.mm', bow_corpus_full)\r\ncorp_full = gensim.corpora.MmCorpus('redditTest_full.mm')\r\n\r\nfrom gensim import models\r\ntfidf_pos = models.TfidfModel(bow_corpus_full)\r\ncorpus_tfidf_full = tfidf_pos[bow_corpus_full]\r\n\r\n#Bag of Word Corpus of Positive Sentiment\r\nbow_corpus_pos = [dictionary_pos.doc2bow(text) for text in corpus_removeOne_pos]\r\ncorpora.MmCorpus.serialize('redditTest_pos.mm', bow_corpus_pos)\r\ncorp_pos = gensim.corpora.MmCorpus('redditTest_pos.mm')\r\n\r\nfrom gensim import models\r\ntfidf_pos = models.TfidfModel(bow_corpus_pos)\r\ncorpus_tfidf_pos = tfidf_pos[bow_corpus_pos]\r\n\r\n#Bag of Word Corpus of Negative Sentiment\r\nbow_corpus_neg = [dictionary_neg.doc2bow(text) for text in corpus_removeOne_neg]\r\ncorpora.MmCorpus.serialize('redditTest_neg.mm', bow_corpus_neg)\r\ncorp_neg = gensim.corpora.MmCorpus('redditTest_neg.mm')\r\n\r\nfrom gensim import models\r\ntfidf_neg = models.TfidfModel(bow_corpus_neg)\r\ncorpus_tfidf_neg = tfidf_neg[bow_corpus_neg]\r\n\r\n\r\n#LDA Mallet for full corpus\r\nmallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'\r\nlda_full = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_full, num_topics=9, id2word=dictionary_full, workers=1, alpha=110, random_seed=109, iterations=50)\r\ncorpus_LDA_full = lda_full[bow_corpus_full]\r\nlda_full.print_topics(9)\r\n\r\n#LDA Mallet for positive corpus\r\nmallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'\r\nlda_pos = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_pos, num_topics=9, id2word=dictionary_pos, workers=1, alpha=110, random_seed=109, iterations=50)\r\ncorpus_LDA_pos = lda_pos[bow_corpus_pos]\r\nlda_pos.print_topics(9)\r\n\r\n#LDA Mallet for negative corpus\r\nmallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'\r\nlda_neg = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_neg, num_topics=9, id2word=dictionary_neg, workers=1, alpha=110, random_seed=109, iterations=50)\r\ncorpus_LDA_neg = lda_neg[bow_corpus_neg]\r\nlda_neg.print_topics(9)\r\n\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as mcolors\r\nfrom sklearn.manifold import TSNE\r\n\r\ncolors = np.array([color for name, color in mcolors.TABLEAU_COLORS.items()])\r\n\r\n#t-SNE plot for full corpus\r\nn_topics = 9\r\ntopic_weights_full = []\r\nfor row_list in lda_full[bow_corpus_full]:\r\n tmp = np.zeros(n_topics)\r\n for i, w in row_list:\r\n tmp[i] = w\r\n topic_weights_full.append(tmp)\r\n \r\narr_full = pd.DataFrame(topic_weights_full).fillna(9).values\r\ntopic_num_full = np.argmax(arr_full, axis=1)\r\ntsne_model_full = TSNE(n_components=3, random_state=None, method='barnes_hut', \r\n angle=0.5, init='pca')\r\ntsne_lda_full = tsne_model_full.fit_transform(arr_full)\r\n\r\nsub = str.maketrans(\"0123456789\", \"₀₁₂₃₄₅₆₇₈₉\")\r\nplt.xlabel('t-SNE1'.translate(sub))\r\nplt.ylabel('t-SNE2'.translate(sub))\r\nplt.title('t-SNE Plot of Topics within Positive Sentiment Corpus')\r\ntsne_full = plt.scatter(x=tsne_lda_full[:,0], y=tsne_lda_full[:,1])\r\nplt.show(tsne_full)\r\n\r\n\"\"\"\r\n#t-SNE plot for positive corpus\r\nn_topics = 9\r\ntopic_weights_pos = []\r\nfor row_list in lda_pos[bow_corpus_pos]:\r\n tmp = np.zeros(n_topics)\r\n for i, w in row_list:\r\n tmp[i] = w\r\n topic_weights_pos.append(tmp)\r\n \r\narr_pos = pd.DataFrame(topic_weights_pos).fillna(0).values\r\ntopic_num_pos = np.argmax(arr_pos, axis=1)\r\ntsne_model_pos = TSNE(n_components=3, random_state=None, method='barnes_hut', \r\n angle=0.5, init='pca')\r\ntsne_lda_pos = tsne_model_pos.fit_transform(arr_pos)\r\n\r\nsub = str.maketrans(\"0123456789\", \"₀₁₂₃₄₅₆₇₈₉\")\r\nplt.xlabel('t-SNE1'.translate(sub))\r\nplt.ylabel('t-SNE2'.translate(sub))\r\nplt.title('t-SNE Plot of Topics within Positive Sentiment Corpus')\r\ntsne_pos = plt.scatter(x=tsne_lda_pos[:,0], y=tsne_lda_pos[:,1])\r\n#plt.show(tsne_pos)\r\n\r\n\r\n#t-SNE plot for negative corpus\r\nn_topics = 9\r\ntopic_weights_neg = []\r\nfor row_list in lda_neg[bow_corpus_neg]:\r\n tmp = np.zeros(n_topics)\r\n for i, w in row_list:\r\n tmp[i] = w\r\n topic_weights_neg.append(tmp)\r\n \r\narr_neg = pd.DataFrame(topic_weights_neg).fillna(0).values\r\ntopic_num_neg = np.argmax(arr_neg, axis=1)\r\ntsne_model_neg = TSNE(n_components=3, random_state=None, method='barnes_hut', \r\n angle=0.5, init='pca')\r\ntsne_lda_neg = tsne_model_neg.fit_transform(arr_neg)\r\n\r\nsub = str.maketrans(\"0123456789\", \"₀₁₂₃₄₅₆₇₈₉\")\r\nplt.xlabel('t-SNE1'.translate(sub))\r\nplt.ylabel('t-SNE2'.translate(sub))\r\nplt.title('t-SNE Plot of Topics within Negative Sentiment Corpus')\r\ntsne_neg = plt.scatter(tsne_lda_neg[:,0], tsne_lda_neg[:,1])\r\n#plt.show(tsne_neg)\r\n\"\"\"\r\n\r\nfrom collections import Counter\r\n#Word Count & Keyword for Full Corpus\r\ntopics_full = lda_full.show_topics(formatted=False)\r\nflatten_full = [w for w_list in bow_corpus_full for w in w_list]\r\ncounter_full = Counter(flatten_full)\r\n\r\ntopic_weight_full = []\r\nfor i, topic in topics_full:\r\n for word, weight in topic:\r\n topic_weight_full.append([word, i , weight, counter_full[word]])\r\n\r\ndata_frame_full = pd.DataFrame(topic_weight_full, columns=['word', 'topic_id', 'importance', 'word_count']) \r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10,6), sharey=True, dpi=160)\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n ax.bar(x='word', height=\"word_count\", data=data_frame_full.loc[data_frame_full.topic_id==i, :], color=colors[i], width=0.5, alpha=0.3, label='Word Count')\r\n ax_twin = ax.twinx()\r\n ax_twin.bar(x='word', height=\"importance\", data=data_frame_full.loc[data_frame_full.topic_id==i, :], color=colors[i], width=0.2, label='Weights')\r\n ax.set_ylabel('Word Count', color=colors[i])\r\n ax_twin.set_ylim(0, 0.5); ax.set_ylim(0, 100)\r\n ax.set_title('Topic: ' + str(i+1), color=colors[i], fontsize=8)\r\n ax.tick_params(axis='y', left=False)\r\n ax.set_xticklabels(data_frame_full.loc[data_frame_full.topic_id==i, 'word'], rotation=90, horizontalalignment= 'center')\r\n ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')\r\n\r\nfig.tight_layout(w_pad=2) \r\nplt.show()\r\n\r\n\"\"\"\r\n#Word Count & Keyword for Positive Corpus\r\ntopics_pos = lda_pos.show_topics(formatted=False)\r\nflatten_pos = [w for w_list in bow_corpus_pos for w in w_list]\r\ncounter_pos = Counter(flatten_pos)\r\n\r\ntopic_weight_pos = []\r\nfor i, topic in topics_pos:\r\n for word, weight in topic:\r\n topic_weight_pos.append([word, i , weight, counter_pos[word]])\r\n\r\ndata_frame_pos = pd.DataFrame(topic_weight_pos, columns=['word', 'topic_id', 'importance', 'word_count']) \r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10,6), sharey=True, dpi=160)\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n ax.bar(x='word', height=\"word_count\", data=data_frame_pos.loc[data_frame_pos.topic_id==i, :], color=colors[i], width=0.5, alpha=0.3, label='Word Count')\r\n ax_twin = ax.twinx()\r\n ax_twin.bar(x='word', height=\"importance\", data=data_frame_pos.loc[data_frame_pos.topic_id==i, :], color=colors[i], width=0.2, label='Weights')\r\n ax.set_ylabel('Word Count', color=colors[i])\r\n ax_twin.set_ylim(0, 0.5); ax.set_ylim(0, 100)\r\n ax.set_title('Topic: ' + str(i+1), color=colors[i], fontsize=8)\r\n ax.tick_params(axis='y', left=False)\r\n ax.set_xticklabels(data_frame_pos.loc[data_frame_pos.topic_id==i, 'word'], rotation=90, horizontalalignment= 'center')\r\n ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')\r\n\r\nfig.tight_layout(w_pad=2) \r\nplt.show()\r\n\r\n#Word Count & Keyword for Negative Corpus\r\ntopics_neg = lda_neg.show_topics(formatted=False)\r\nflatten_neg = [w for w_list in bow_corpus_neg for w in w_list]\r\ncounter_neg = Counter(flatten_neg)\r\n\r\ntopic_weight_neg = []\r\nfor i, topic in topics_neg:\r\n for word, weight in topic:\r\n topic_weight_neg.append([word, i , weight, counter_neg[word]])\r\n\r\ndata_frame_neg = pd.DataFrame(topic_weight_neg, columns=['word', 'topic_id', 'importance', 'word_count']) \r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10,6), sharey=True, dpi=160)\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n ax.bar(x='word', height=\"word_count\", data=data_frame_neg.loc[data_frame_neg.topic_id==i, :], color=colors[i], width=0.5, alpha=0.3, label='Word Count')\r\n ax_twin = ax.twinx()\r\n ax_twin.bar(x='word', height=\"importance\", data=data_frame_neg.loc[data_frame_neg.topic_id==i, :], color=colors[i], width=0.2, label='Weights')\r\n ax.set_ylabel('Word Count', color=colors[i])\r\n ax_twin.set_ylim(0, 0.5); ax.set_ylim(0, 100)\r\n ax.set_title('Topic: ' + str(i+1), color=colors[i], fontsize=8)\r\n ax.tick_params(axis='y', left=False)\r\n ax.set_xticklabels(data_frame_neg.loc[data_frame_neg.topic_id==i, 'word'], rotation=90, horizontalalignment= 'center')\r\n ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')\r\n\r\nfig.tight_layout(w_pad=2) \r\nplt.show()\r\n\"\"\"\r\n\r\nfrom wordcloud import WordCloud\r\nimport matplotlib.colors as mcolors\r\n#Word Cloud Display for Full Corpus\r\ncloud = WordCloud(stopwords=stoplist, background_color='white', width=2500, height=1800, max_words=7, colormap='tab10', color_func=lambda *args, **kwargs: colors[i], prefer_horizontal=1.0)\r\n\r\ntopics_full = lda_full.show_topics(formatted=False)\r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10, 6))\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n fig.add_subplot(ax)\r\n topic_words_full = dict(topics_full[i][1])\r\n cloud.generate_from_frequencies(topic_words_full, max_font_size=300)\r\n plt.gca().imshow(cloud)\r\n plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=10))\r\n plt.gca().axis('off')\r\n\r\nplt.axis('off')\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n\"\"\"\r\n#Word Cloud Display for Positive Corpus\r\ncloud = WordCloud(stopwords=stoplist, background_color='white', width=2500, height=1800, max_words=7, colormap='tab10', color_func=lambda *args, **kwargs: colors[i], prefer_horizontal=1.0)\r\n\r\ntopics_pos = lda_pos.show_topics(formatted=False)\r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10, 6))\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n fig.add_subplot(ax)\r\n topic_words_pos = dict(topics_pos[i][1])\r\n cloud.generate_from_frequencies(topic_words_pos, max_font_size=300)\r\n plt.gca().imshow(cloud)\r\n plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=10))\r\n plt.gca().axis('off')\r\n\r\nplt.axis('off')\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n#Word Cloud Display for Negative Corpus\r\ncloud = WordCloud(stopwords=stoplist, background_color='white', width=2500, height=1800, max_words=7, colormap='tab10', color_func=lambda *args, **kwargs: colors[i], prefer_horizontal=1.0)\r\n\r\ntopics_neg = lda_neg.show_topics(formatted=False)\r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10, 6))\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n fig.add_subplot(ax)\r\n topic_words_neg = dict(topics_neg[i][1])\r\n cloud.generate_from_frequencies(topic_words_neg, max_font_size=300)\r\n plt.gca().imshow(cloud)\r\n plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=10))\r\n plt.gca().axis('off')\r\n\r\nplt.axis('off')\r\nplt.tight_layout()\r\nplt.show()\r\n\"\"\"\r\n\r\nimport pyLDAvis.gensim\r\nimport pyLDAvis\r\nimport gensim \r\n\r\n#LDA Mallet pyLDAvis for Full Corpus\r\nmallet2lda_full = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_full)\r\nvisualizeLDA_full = pyLDAvis.gensim.prepare(mallet2lda_full, bow_corpus_full, dictionary_full)\r\npyLDAvis.show()\r\n\r\n\"\"\"\r\n#LDA Mallet pyLDAvis for Postiive Corpus\r\nmallet2lda_pos = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_pos)\r\nvisualizeLDA_pos = pyLDAvis.gensim.prepare(mallet2lda_pos, bow_corpus_pos, dictionary_pos)\r\npyLDAvis.show(visualizeLDA_pos)\r\n\r\n#LDA Mallet pyLDAvis for Negative Corpus\r\nmallet2lda_neg = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_neg)\r\nvisualizeLDA_neg = pyLDAvis.gensim.prepare(mallet2lda_neg, bow_corpus_neg, dictionary_neg)\r\npyLDAvis.show(visualizeLDA_neg)\r\n\"\"\"",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 5 22:53:44 2020\r\n\r\n@author: Emmett\r\n\"\"\"\r\nimport tensorflow as tf\r\nimport os\r\nfrom tensorflow.python.keras.layers import Layer\r\nfrom tensorflow.python.keras import backend as K\r\n\r\n\r\nclass AttentionLayer(Layer):\r\n \"\"\"\r\n This class implements Bahdanau attention (https://arxiv.org/pdf/1409.0473.pdf).\r\n There are three sets of weights introduced W_a, U_a, and V_a\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n super(AttentionLayer, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n assert isinstance(input_shape, list)\r\n # Create a trainable weight variable for this layer.\r\n\r\n self.W_a = self.add_weight(name='W_a',\r\n shape=tf.TensorShape((input_shape[0][2], input_shape[0][2])),\r\n initializer='uniform',\r\n trainable=True)\r\n self.U_a = self.add_weight(name='U_a',\r\n shape=tf.TensorShape((input_shape[1][2], input_shape[0][2])),\r\n initializer='uniform',\r\n trainable=True)\r\n self.V_a = self.add_weight(name='V_a',\r\n shape=tf.TensorShape((input_shape[0][2], 1)),\r\n initializer='uniform',\r\n trainable=True)\r\n\r\n super(AttentionLayer, self).build(input_shape) # Be sure to call this at the end\r\n\r\n def call(self, inputs, verbose=False):\r\n \"\"\"\r\n inputs: [encoder_output_sequence, decoder_output_sequence]\r\n \"\"\"\r\n assert type(inputs) == list\r\n encoder_out_seq, decoder_out_seq = inputs\r\n if verbose:\r\n print('encoder_out_seq>', encoder_out_seq.shape)\r\n print('decoder_out_seq>', decoder_out_seq.shape)\r\n\r\n def energy_step(inputs, states):\r\n \"\"\" Step function for computing energy for a single decoder state\r\n inputs: (batchsize * 1 * de_in_dim)\r\n states: (batchsize * 1 * de_latent_dim)\r\n \"\"\"\r\n\r\n assert_msg = \"States must be an iterable. Got {} of type {}\".format(states, type(states))\r\n assert isinstance(states, list) or isinstance(states, tuple), assert_msg\r\n\r\n \"\"\" Some parameters required for shaping tensors\"\"\"\r\n en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]\r\n de_hidden = inputs.shape[-1]\r\n\r\n \"\"\" Computing S.Wa where S=[s0, s1, ..., si]\"\"\"\r\n # <= batch size * en_seq_len * latent_dim\r\n W_a_dot_s = K.dot(encoder_out_seq, self.W_a)\r\n\r\n \"\"\" Computing hj.Ua \"\"\"\r\n U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1) # <= batch_size, 1, latent_dim\r\n if verbose:\r\n print('Ua.h>', U_a_dot_h.shape)\r\n\r\n \"\"\" tanh(S.Wa + hj.Ua) \"\"\"\r\n # <= batch_size*en_seq_len, latent_dim\r\n Ws_plus_Uh = K.tanh(W_a_dot_s + U_a_dot_h)\r\n if verbose:\r\n print('Ws+Uh>', Ws_plus_Uh.shape)\r\n\r\n \"\"\" softmax(va.tanh(S.Wa + hj.Ua)) \"\"\"\r\n # <= batch_size, en_seq_len\r\n e_i = K.squeeze(K.dot(Ws_plus_Uh, self.V_a), axis=-1)\r\n # <= batch_size, en_seq_len\r\n e_i = K.softmax(e_i)\r\n\r\n if verbose:\r\n print('ei>', e_i.shape)\r\n\r\n return e_i, [e_i]\r\n\r\n def context_step(inputs, states):\r\n \"\"\" Step function for computing ci using ei \"\"\"\r\n\r\n assert_msg = \"States must be an iterable. Got {} of type {}\".format(states, type(states))\r\n assert isinstance(states, list) or isinstance(states, tuple), assert_msg\r\n\r\n # <= batch_size, hidden_size\r\n c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)\r\n if verbose:\r\n print('ci>', c_i.shape)\r\n return c_i, [c_i]\r\n\r\n fake_state_c = K.sum(encoder_out_seq, axis=1)\r\n fake_state_e = K.sum(encoder_out_seq, axis=2) # <= (batch_size, enc_seq_len, latent_dim\r\n\r\n \"\"\" Computing energy outputs \"\"\"\r\n # e_outputs => (batch_size, de_seq_len, en_seq_len)\r\n last_out, e_outputs, _ = K.rnn(\r\n energy_step, decoder_out_seq, [fake_state_e],\r\n )\r\n\r\n \"\"\" Computing context vectors \"\"\"\r\n last_out, c_outputs, _ = K.rnn(\r\n context_step, e_outputs, [fake_state_c],\r\n )\r\n\r\n return c_outputs, e_outputs\r\n\r\n def compute_output_shape(self, input_shape):\r\n \"\"\" Outputs produced by the layer \"\"\"\r\n return [\r\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),\r\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))\r\n ]"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"sklearn.manifold.TSNE",
"numpy.argmax",
"matplotlib.pyplot.axis",
"matplotlib.colors.TABLEAU_COLORS.items",
"matplotlib.pyplot.show",
"numpy.zeros"
],
[
"tensorflow.python.keras.backend.rnn",
"tensorflow.python.keras.backend.softmax",
"tensorflow.TensorShape",
"tensorflow.python.keras.backend.tanh",
"tensorflow.python.keras.backend.sum",
"tensorflow.python.keras.backend.dot",
"tensorflow.python.keras.backend.expand_dims"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.2",
"2.10"
]
}
] |
LatencyTDH/DeepSpeed | [
"eecef309cb12528cfa78d932a6f073afb43847e5"
] | [
"deepspeed/runtime/engine.py"
] | [
"'''\nCopyright 2019 The Microsoft DeepSpeed Team\n'''\n\nimport os\nimport stat\nimport torch\nimport warnings\nimport hashlib\nimport torch.distributed as dist\nfrom collections import OrderedDict\nfrom shutil import copyfile\n\nfrom torch.nn.modules import Module\nfrom torch.distributed.distributed_c10d import _get_global_rank\nfrom tensorboardX import SummaryWriter\n\nfrom deepspeed.runtime.utils import see_memory_usage\nfrom deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer\nfrom deepspeed.runtime.zero.stage1 import FP16_DeepSpeedZeroOptimizer_Stage1\nfrom deepspeed.runtime.zero.partition_parameters import ZeroParamStatus\nfrom deepspeed.runtime.zero.utils import is_zero_supported_optimizer\nfrom deepspeed.runtime.activation_checkpointing import checkpointing as activation_checkpointing\nfrom deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer\nfrom deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer\nfrom deepspeed.runtime.config import DeepSpeedConfig, DEEPSPEED_OPTIMIZERS, \\\n ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, \\\n TORCH_ADAM_PARAM, ADAM_W_MODE, ADAM_W_MODE_DEFAULT\n\nfrom deepspeed.runtime.dataloader import DeepSpeedDataLoader\nfrom deepspeed.runtime.constants import \\\n ROUTE_TRAIN, ROUTE_PREDICT, ROUTE_EVAL, \\\n PLD_THETA, PLD_GAMMA\nfrom deepspeed.runtime.zero.constants import \\\n ZERO_OPTIMIZATION_OPTIMIZER_STATES, ZERO_OPTIMIZATION_GRADIENTS, ZERO_OPTIMIZATION_WEIGHTS\nfrom deepspeed.runtime.csr_tensor import CSRTensor\nimport deepspeed.runtime.lr_schedules as lr_schedules\nfrom deepspeed.utils import logger, log_dist, init_distributed\nfrom deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer\nfrom deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop\n\nfrom .pipe.module import PipelineModule\nfrom .utils import ensure_directory_exists\nfrom ..ops.op_builder import UtilsBuilder\nfrom ..ops.adam import DeepSpeedCPUAdam\nfrom ..ops.adam import FusedAdam\n\nfrom deepspeed.profiling.flops_profiler.profiler import FlopsProfiler\n\nMEMORY_OPT_ALLREDUCE_SIZE = 500000000\n\ntry:\n from apex import amp\nexcept ImportError:\n # Fail silently so we don't spam logs unnecessarily if user isn't using amp\n pass\n\n\ndef split_half_float_double_csr(tensors):\n dtypes = [\n \"torch.cuda.HalfTensor\",\n \"torch.cuda.FloatTensor\",\n \"torch.cuda.DoubleTensor\",\n CSRTensor.type()\n ]\n buckets = []\n for i, dtype in enumerate(dtypes):\n bucket = [t for t in tensors if t.type() == dtype]\n if bucket:\n buckets.append((dtype, bucket))\n return buckets\n\n\ndef _initialize_parameter_parallel_groups(parameter_parallel_size=None):\n data_parallel_size = int(dist.get_world_size())\n if parameter_parallel_size is None:\n parameter_parallel_size = int(data_parallel_size)\n logger.info(\"data_parallel_size: %s, parameter_parallel_size: %s\",\n data_parallel_size,\n parameter_parallel_size)\n assert data_parallel_size % parameter_parallel_size == 0, \\\n 'world size should be divisible by parameter parallel size'\n rank = dist.get_rank()\n my_group = None\n for i in range(dist.get_world_size() // parameter_parallel_size):\n ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size)\n group = torch.distributed.new_group(ranks)\n if rank in ranks:\n my_group = group\n return my_group\n\n\ndef print_configuration(args, name):\n logger.info('{}:'.format(name))\n for arg in sorted(vars(args)):\n dots = '.' * (29 - len(arg))\n logger.info(' {} {} {}'.format(arg, dots, getattr(args, arg)))\n\n\nclass DeepSpeedEngine(Module):\n r\"\"\"DeepSpeed engine for training.\n \"\"\"\n def __init__(self,\n args,\n model,\n optimizer=None,\n model_parameters=None,\n training_data=None,\n lr_scheduler=None,\n mpu=None,\n dist_init_required=None,\n collate_fn=None,\n config_params=None,\n dont_change_device=False):\n super(DeepSpeedEngine, self).__init__()\n self.dont_change_device = dont_change_device\n self.client_optimizer = optimizer\n self.client_model_parameters = model_parameters\n self.client_lr_scheduler = lr_scheduler\n self.training_data = training_data\n self.collate_fn = collate_fn\n self.mpu = mpu\n self.data_parallel_group = None\n self.global_steps = 0\n self.global_samples = 0\n self.micro_steps = 0\n self.skipped_steps = 0\n self.gradient_average = True\n self.warn_unscaled_loss = True\n self.config_params = config_params\n self.loaded_checkpoint_mp_world_size = None\n self.loaded_checkpoint_dp_world_size = None\n self.enable_backward_allreduce = True\n self.progressive_layer_drop = None\n self.dist_backend = \"nccl\"\n\n if dist_init_required is None:\n dist_init_required = not dist.is_initialized()\n\n if dist_init_required is False:\n assert dist.is_initialized() is True, \"Torch distributed not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()\"\n else:\n # Initialize torch distributed if needed\n init_distributed(dist_backend=self.dist_backend)\n\n see_memory_usage(f\"DeepSpeed Engine: Before args sanity test\")\n self._do_args_sanity_check(args)\n self._configure_with_arguments(args, mpu)\n self._do_sanity_check()\n\n if mpu is not None:\n assert not self.elasticity_enabled(), \"Elasticity is not currently supported\" \\\n \" with model parallelism.\"\n\n self._set_distributed_vars()\n\n if self.tensorboard_enabled() and self.global_rank == 0:\n self.summary_writer = self.get_summary_writer()\n\n see_memory_usage(f\"DeepSpeed Engine: Before configure distributed model\")\n\n # Configure distributed model\n self._configure_distributed_model(model)\n\n see_memory_usage(f\"DeepSpeed Engine: After configure distributed model\")\n\n # Configure wall clock timer\n self.timers = SynchronizedWallClockTimer()\n\n # Throughput timer\n self.tput_timer = ThroughputTimer(\n batch_size=self.train_micro_batch_size_per_gpu(),\n num_workers=self.dp_world_size,\n steps_per_output=self.steps_per_print(),\n monitor_memory=False)\n\n if training_data:\n self.training_dataloader = self.deepspeed_io(training_data)\n else:\n self.training_dataloader = None\n\n # Configure optimizer and scheduler\n self.optimizer = None\n self.lr_scheduler = None\n if model_parameters or optimizer:\n self._configure_optimizer(optimizer, model_parameters)\n self._configure_lr_scheduler(lr_scheduler)\n self._report_progress(0)\n\n # Bookkeeping for csr support\n self.csr_tensor_module_names = set()\n if self.sparse_gradients_enabled():\n for name, module in self.module.named_modules():\n if isinstance(module, torch.nn.Embedding):\n self.csr_tensor_module_names.add(name + \".weight\")\n logger.info(\"Will convert {} to sparse (csr) \"\n \"tensor during training\".format(name))\n\n self.save_non_zero_checkpoint = False\n self.save_zero_checkpoint = False\n self._configure_checkpointing(dist_init_required)\n\n if self.pld_enabled():\n self.progressive_layer_drop = self._configure_progressive_layer_drop()\n\n if self.global_rank == 0:\n self._config.print('DeepSpeedEngine configuration')\n if self.dump_state():\n print_configuration(self, 'DeepSpeedEngine')\n\n # Load pre-installed or JIT compile (un)flatten ops\n util_ops = UtilsBuilder().load()\n self.flatten = util_ops.flatten\n self.unflatten = util_ops.unflatten\n\n def get_batch_info(self):\n \"\"\" Get all training batch related settings.\n\n Returns:\n train_batch_size (int): The effective training batch size. This is the amount of data\n samples that leads to one step of model update.\n train_micro_batch_size_per_gpu (int): Batch size to be processed by one GPU in one\n step (without gradient accumulation).\n gradient_accumulation_steps (int): Number of training steps to accumulate gradients\n before averaging and applying them.\n \"\"\"\n return self.train_batch_size, self.train_micro_batch_size_per_gpu, self.gradient_accumulation_steps\n\n def checkpoint_tag_validation_enabled(self):\n return self._config.checkpoint_tag_validation_enabled\n\n def checkpoint_tag_validation_fail(self):\n return self._config.checkpoint_tag_validation_fail\n\n def elasticity_enabled(self):\n return self._config.elasticity_enabled\n\n def pld_enabled(self):\n return self._config.pld_enabled\n\n def pld_params(self):\n return self._config.pld_params\n\n def pld_theta(self):\n return self.pld_params()[PLD_THETA]\n\n def pld_gamma(self):\n return self.pld_params()[PLD_GAMMA]\n\n def tensorboard_enabled(self):\n return self._config.tensorboard_enabled\n\n def tensorboard_output_path(self):\n return self._config.tensorboard_output_path\n\n def tensorboard_job_name(self):\n return self._config.tensorboard_job_name\n\n def get_summary_writer(self,\n name=\"DeepSpeedJobName\",\n base=os.path.join(os.path.expanduser(\"~\"),\n \"tensorboard\")):\n if self.tensorboard_output_path():\n base_dir = self.tensorboard_output_path()\n job_name = self.tensorboard_job_name()\n log_dir = os.path.join(base_dir, job_name)\n else:\n if self.tensorboard_job_name():\n name = self.tensorboard_job_name()\n\n # Infrastructure-specific job-id\n if 'DLWS_JOB_ID' in os.environ:\n infra_job_id = os.environ['DLWS_JOB_ID']\n elif 'DLTS_JOB_ID' in os.environ:\n infra_job_id = os.environ['DLTS_JOB_ID']\n else:\n infra_job_id = 'unknown-job-id'\n\n summary_writer_dir_name = os.path.join(infra_job_id, \"logs\")\n log_dir = os.path.join(base, summary_writer_dir_name, name)\n\n os.makedirs(log_dir, exist_ok=True)\n\n return SummaryWriter(log_dir=log_dir)\n\n def wall_clock_breakdown(self):\n return self._config.wall_clock_breakdown\n\n def flops_profiler_enabled(self):\n return self._config.flops_profiler_config.enabled\n\n def flops_profiler_profile_step(self):\n return self._config.flops_profiler_config.profile_step\n\n def flops_profiler_module_depth(self):\n return self._config.flops_profiler_config.module_depth\n\n def flops_profiler_top_modules(self):\n return self._config.flops_profiler_config.top_modules\n\n def flops_profiler_detailed(self):\n return self._config.flops_profiler_config.detailed\n\n def memory_breakdown(self):\n return self._config.memory_breakdown\n\n def sparse_gradients_enabled(self):\n return self._config.sparse_gradients_enabled\n\n def train_batch_size(self):\n return self._config.train_batch_size\n\n def train_micro_batch_size_per_gpu(self):\n return self._config.train_micro_batch_size_per_gpu\n\n def optimizer_name(self):\n return self.client_optimizer.__class__.__name__ if self.client_optimizer else self._config.optimizer_name\n\n def optimizer_params(self):\n return self._config.optimizer_params\n\n def optimizer_legacy_fusion(self):\n return self._config.optimizer_legacy_fusion\n\n def scheduler_name(self):\n return self._config.scheduler_name\n\n def scheduler_params(self):\n return self._config.scheduler_params\n\n def zero_optimization(self):\n return self._config.zero_enabled\n\n def zero_allow_untested_optimizer(self):\n return self._config.zero_allow_untested_optimizer\n\n def zero_reduce_scatter(self):\n return self._config.zero_config.reduce_scatter\n\n def zero_overlap_comm(self):\n return self._config.zero_config.overlap_comm\n\n def zero_offload_optimizer(self):\n return self._config.zero_config.offload_optimizer\n\n def zero_offload_param(self):\n return self._config.zero_config.offload_param\n\n def zero_cpu_offload(self):\n return self._config.zero_config.offload_optimizer is not None\n\n def zero_sub_group_size(self):\n return self._config.zero_config.sub_group_size\n\n def zero_optimization_stage(self):\n return self._config.zero_optimization_stage\n\n def zero_reduce_bucket_size(self):\n return self._config.zero_config.reduce_bucket_size\n\n def zero_allgather_bucket_size(self):\n return self._config.zero_config.allgather_bucket_size\n\n def zero_optimization_partition_gradients(self):\n return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_GRADIENTS\n\n def zero_optimization_partition_weights(self):\n return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_WEIGHTS\n\n def zero_contiguous_gradients(self):\n return self._config.zero_config.contiguous_gradients\n\n def zero_load_from_fp32_weights(self):\n return self._config.zero_config.load_from_fp32_weights\n\n def zero_elastic_checkpoint(self):\n return self._config.zero_config.elastic_checkpoint\n\n def zero_max_live_parameters(self):\n return self._config.zero_config.max_live_parameters\n\n def zero_max_reuse_distance(self):\n return self._config.zero_config.max_reuse_distance\n\n def zero_prefetch_bucket_size(self):\n return self._config.zero_config.prefetch_bucket_size\n\n def zero_param_persistence_threshold(self):\n return self._config.zero_config.param_persistence_threshold\n\n def zero_gather_fp16_weights_on_model_save(self):\n return self._config.zero_config.gather_fp16_weights_on_model_save\n\n def fp16_enabled(self):\n return self._config.fp16_enabled\n\n def amp_enabled(self):\n return self._config.amp_enabled\n\n def amp_params(self):\n return self._config.amp_params\n\n def loss_scale(self):\n return self._config.loss_scale\n\n def gradient_accumulation_steps(self):\n return self._config.gradient_accumulation_steps\n\n def allreduce_always_fp32(self):\n return self._config.allreduce_always_fp32\n\n def postscale_gradients(self):\n return not self._config.prescale_gradients\n\n def gradient_predivide_factor(self):\n return self._config.gradient_predivide_factor\n\n def steps_per_print(self):\n return self._config.steps_per_print\n\n def zero_allgather_partitions(self):\n return self._config.zero_config.allgather_partitions\n\n def dump_state(self):\n return self._config.dump_state\n\n def gradient_clipping(self):\n return self._config.gradient_clipping\n\n def dynamic_loss_scale(self):\n return self._config.loss_scale == 0\n\n def initial_dynamic_scale(self):\n return self._config.initial_dynamic_scale\n\n def dynamic_loss_scale_args(self):\n return self._config.dynamic_loss_scale_args\n\n def swap_tensor_config(self):\n return self._config.swap_tensor_config\n\n def aio_config(self):\n return self._config.aio_config\n\n def _configure_lr_scheduler(self, client_lr_scheduler):\n # First check for scheduler in json configuration\n lr_scheduler = self._scheduler_from_config(self.optimizer)\n if lr_scheduler:\n if self.global_rank == 0:\n logger.info(\n f'DeepSpeed using configured LR scheduler = {self.scheduler_name()}')\n self.lr_scheduler = lr_scheduler\n else:\n if self.global_rank == 0:\n logger.info('DeepSpeed using client LR scheduler')\n self.lr_scheduler = client_lr_scheduler\n log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0])\n\n def _configure_checkpointing(self, dist_init_required):\n\n dp_rank = self.global_rank\n if self.mpu:\n dp_rank = self.mpu.get_data_parallel_rank()\n\n # only the first data parallel process needs to store the model checkpoint\n self.save_non_zero_checkpoint = (\n dp_rank == 0) or self.zero_optimization_partition_weights()\n\n if self.zero_optimization():\n param_rank = torch.distributed.get_rank(\n group=self.optimizer.dp_process_group)\n\n # Only the first parameter parallel process needs to store the\n # optimizer state checkpoints for zero\n self.save_zero_checkpoint = (param_rank == dp_rank)\n\n def _scheduler_from_config(self, optimizer):\n scheduler_name = self.scheduler_name()\n if scheduler_name is not None:\n if hasattr(lr_schedules, scheduler_name):\n scheduler = getattr(lr_schedules, scheduler_name)\n else:\n assert hasattr(torch.optim.lr_scheduler, scheduler_name), \\\n f\"DeepSpeed does not recognize LR scheduler {scheduler_name}\"\n\n scheduler = getattr(torch.optim.lr_scheduler, scheduler_name)\n\n scheduler_params = self.scheduler_params()\n instantiated_scheduler = scheduler(optimizer, **scheduler_params)\n return instantiated_scheduler\n else:\n return None\n\n def _set_distributed_vars(self):\n if self.local_rank >= 0:\n torch.cuda.set_device(self.local_rank)\n self.device = torch.device(\"cuda\", self.local_rank)\n self.world_size = dist.get_world_size()\n self.global_rank = dist.get_rank()\n else:\n self.world_size = 1\n self.global_rank = 0\n self.device = torch.device(\"cuda\")\n\n # Configure based on command line arguments\n def _configure_with_arguments(self, args, mpu):\n # After the distributed backend is initialized we are guaranteed the LOCAL_RANK\n # environment variable is set. We must align args.local_rank to this value for\n # backwards compatability with scripts relying on [args|self].local_rank containing\n # the correct local rank info. _do_args_sanity_check will ensure this is the case.\n self.local_rank = int(os.environ['LOCAL_RANK'])\n if hasattr(args, 'local_rank'):\n args.local_rank = self.local_rank\n\n config_file = args.deepspeed_config if hasattr(args,\n 'deepspeed_config') else None\n self._config = DeepSpeedConfig(config_file, mpu, param_dict=self.config_params)\n\n # Validate command line arguments\n def _do_args_sanity_check(self, args):\n if hasattr(args, 'deepscale_config') and args.deepscale_config is not None:\n logger.warning(\n \"************ --deepscale_config is deprecated, please use --deepspeed_config ************\"\n )\n if hasattr(args, 'deepspeed_config'):\n assert args.deepspeed_config is None, \"Not sure how to proceed, we were given both a deepscale_config and deepspeed_config\"\n args.deepspeed_config = args.deepscale_config\n\n assert \"LOCAL_RANK\" in os.environ, \"DeepSpeed requires the LOCAL_RANK environment variable, it is set by the deepspeed launcher, \" \\\n \"deepspeed.init_distributed, or the torch.distributed launcher. If using a different launcher please ensure LOCAL_RANK is set prior to initializing deepspeed.\"\n if hasattr(args, 'local_rank') and args.local_rank != None:\n assert isinstance(args.local_rank, int), f\"args.local_rank of {args.local_rank} is an unknown type {type(args.local_rank)}\"\n if args.local_rank >= 0:\n env_local_rank = int(os.environ.get(\"LOCAL_RANK\"))\n assert env_local_rank == args.local_rank, \\\n f\"Mismatch in local rank setting, args.local_rank={args.local_rank} but env['LOCAL_RANK']={env_local_rank}.\"\n\n if self.config_params is None:\n assert hasattr(args, 'deepspeed_config') and args.deepspeed_config is not None, \\\n 'DeepSpeed requires --deepspeed_config to specify configuration file'\n\n assert os.path.isfile(args.deepspeed_config), \\\n 'DeepSpeed configuration file: {} is not an existing file'.format(args.deepspeed_config)\n\n def _is_supported_optimizer(self, optimizer_name):\n return optimizer_name in DEEPSPEED_OPTIMIZERS or \\\n getattr(torch.optim, optimizer_name, None) is not None\n\n # Validate configuration based on command line arguments\n def _do_sanity_check(self):\n if not self.client_optimizer:\n if self.optimizer_name() is not None:\n assert self._is_supported_optimizer(self.optimizer_name()), \\\n '{} is not a supported DeepSpeed Optimizer'.format(self.optimizer_name())\n\n if self.optimizer_name() == LAMB_OPTIMIZER:\n assert self.dynamic_loss_scale(), \\\n 'DeepSpeed {} optimizer requires dynamic loss scaling'.format(self.optimizer_name())\n\n def _broadcast_model(self):\n def is_replicated(p):\n if hasattr(p, 'ds_status') and p.ds_status is not ZeroParamStatus.AVAILABLE:\n return False\n return True\n\n for p in self.module.parameters():\n if torch.is_tensor(p) and is_replicated(p):\n dist.broadcast(p,\n self.broadcast_src_rank,\n group=self.data_parallel_group)\n\n def _configure_distributed_model(self, model):\n self.module = model\n if self.fp16_enabled():\n self.module.half()\n\n if not self.dont_change_device:\n self.module.to(self.device)\n\n if self.mpu is None:\n self.data_parallel_group = _initialize_parameter_parallel_groups()\n self.dp_world_size = dist.get_world_size()\n self.mp_world_size = 1\n self.broadcast_src_rank = 0\n else:\n self.data_parallel_group = self.mpu.get_data_parallel_group()\n self.dp_world_size = self.mpu.get_data_parallel_world_size()\n self.mp_world_size = self.mpu.get_model_parallel_world_size()\n self.broadcast_src_rank = _get_global_rank(\n self.mpu.get_data_parallel_group(),\n 0)\n\n if not self.amp_enabled():\n self._broadcast_model()\n\n # Configure optimizer\n def _configure_optimizer(self, client_optimizer, model_parameters):\n\n if client_optimizer is not None:\n client_optimizer.param_groups[:] = [\n pg for pg in client_optimizer.param_groups if len(pg[\"params\"]) != 0\n ]\n if self.global_rank == 0:\n logger.info(\n \"Removing param_group that has no 'params' in the client Optimizer\")\n\n basic_optimizer = client_optimizer\n if self.global_rank == 0:\n logger.info('Using client Optimizer as basic optimizer')\n else:\n basic_optimizer = self._configure_basic_optimizer(model_parameters)\n if self.global_rank == 0:\n logger.info(\n 'Using DeepSpeed Optimizer param name {} as basic optimizer'.format(\n self.optimizer_name()))\n\n if self.global_rank == 0:\n logger.info('DeepSpeed Basic Optimizer = {}'.format(\n basic_optimizer.__class__.__name__))\n\n if self.zero_optimization():\n assert not self.amp_enabled(), \"Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2\"\n if not is_zero_supported_optimizer(basic_optimizer):\n assert self.zero_allow_untested_optimizer(), \\\n 'You are using an untested ZeRO Optimizer. Please add <\"zero_allow_untested_optimizer\": true> in the configuration file to use it.'\n\n if self.global_rank == 0:\n logger.warning(\n \"**** You are using ZeRO with an untested optimizer, proceed with caution *****\"\n )\n self.optimizer = self._configure_zero_optimizer(basic_optimizer)\n elif self.amp_enabled():\n assert not self.fp16_enabled(), \"Cannot enable both amp with (legacy) fp16 mode\"\n amp_params = self.amp_params()\n if self.global_rank == 0:\n logger.info(f\"Initializing AMP with these params: {amp_params}\")\n try:\n logger.info(\"Initializing Apex amp from: {}\".format(amp.__path__))\n except NameError:\n # If apex/amp is available it will be imported above\n raise RuntimeError(\n \"Unable to import apex/amp, please make sure it is installed\")\n self.module, self.optimizer = amp.initialize(self.module, basic_optimizer, **amp_params)\n self._broadcast_model()\n elif self.fp16_enabled():\n self.optimizer = self._configure_fp16_optimizer(basic_optimizer)\n else:\n self.optimizer = basic_optimizer\n log_dist('DeepSpeed Final Optimizer = {}'.format(self.optimizer_name()),\n ranks=[0])\n\n def _configure_basic_optimizer(self, model_parameters):\n optimizer_parameters = self.optimizer_params()\n # print(optimizer_parameters.keys())\n if 'max_grad_norm' in optimizer_parameters.keys():\n raise ValueError(\n \"'max_grad_norm' is not supported as an optimizer parameter, please switch to using the deepspeed parameter 'gradient_clipping' see: https://www.deepspeed.ai/docs/config-json/#gradient-clipping for more details\"\n )\n\n if self.optimizer_name() in [ADAM_OPTIMIZER, ADAMW_OPTIMIZER]:\n torch_adam = optimizer_parameters.pop(TORCH_ADAM_PARAM, False)\n adam_w_mode = optimizer_parameters.pop(ADAM_W_MODE, ADAM_W_MODE_DEFAULT)\n\n # Optimizer name of Adam forces AdamW logic unless adam_w_mode is explictly set\n effective_adam_w_mode = self.optimizer_name(\n ) == ADAMW_OPTIMIZER or adam_w_mode\n\n if torch_adam:\n if not effective_adam_w_mode:\n optimizer = torch.optim.Adam(model_parameters,\n **optimizer_parameters)\n else:\n optimizer = torch.optim.AdamW(model_parameters,\n **optimizer_parameters)\n else:\n if self.zero_cpu_offload():\n from deepspeed.ops.adam import DeepSpeedCPUAdam\n optimizer = DeepSpeedCPUAdam(model_parameters,\n **optimizer_parameters,\n adamw_mode=effective_adam_w_mode)\n else:\n from deepspeed.ops.adam import FusedAdam\n optimizer = FusedAdam(model_parameters,\n **optimizer_parameters,\n adam_w_mode=effective_adam_w_mode)\n\n elif self.optimizer_name() == LAMB_OPTIMIZER:\n from deepspeed.ops.lamb import FusedLamb\n optimizer = FusedLamb(model_parameters, **optimizer_parameters)\n elif self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:\n from deepspeed.runtime.fp16.onebit.adam import OnebitAdam\n optimizer = OnebitAdam(model_parameters, self, **optimizer_parameters)\n if not self.fp16_enabled():\n logger.warning(\n f'Currently the convergence of 1-bit Adam is only verified under FP16'\n )\n else:\n torch_optimizer = getattr(torch.optim, self.optimizer_name())\n optimizer = torch_optimizer(model_parameters, **optimizer_parameters)\n return optimizer\n\n def _configure_fp16_optimizer(self, optimizer):\n initial_dynamic_scale = self.initial_dynamic_scale()\n dynamic_loss_args = self.dynamic_loss_scale_args()\n clip_grad = self.gradient_clipping()\n if isinstance(optimizer,\n FusedAdam) or self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:\n if self.dynamic_loss_scale():\n log_dist('Creating fp16 optimizer with dynamic loss scale', ranks=[0])\n timers = self.timers if self.wall_clock_breakdown() else None\n optimizer = FP16_Optimizer(\n optimizer,\n dynamic_loss_scale=True,\n initial_dynamic_scale=initial_dynamic_scale,\n dynamic_loss_args=dynamic_loss_args,\n mpu=self.mpu,\n clip_grad=clip_grad,\n fused_adam_legacy=self.optimizer_legacy_fusion(),\n timers=timers)\n else:\n log_dist('Creating fp16 optimizer with static loss scale: {}'.format(\n self.loss_scale()),\n ranks=[0])\n optimizer = FP16_Optimizer(\n optimizer,\n static_loss_scale=self.loss_scale(),\n mpu=self.mpu,\n clip_grad=clip_grad,\n fused_adam_legacy=self.optimizer_legacy_fusion())\n else:\n log_dist('Creating fp16 unfused optimizer with dynamic loss scale',\n ranks=[0])\n optimizer = FP16_UnfusedOptimizer(\n optimizer,\n static_loss_scale=self.loss_scale(),\n dynamic_loss_scale=self.dynamic_loss_scale(),\n dynamic_loss_args=dynamic_loss_args,\n mpu=self.mpu,\n clip_grad=clip_grad,\n fused_lamb_legacy=self.optimizer_name() == LAMB_OPTIMIZER)\n\n return optimizer\n\n def _configure_zero_optimizer(self, optimizer):\n zero_stage = self.zero_optimization_stage()\n log_dist('Creating fp16 ZeRO stage {} optimizer'.format(zero_stage), ranks=[0])\n assert not self.allreduce_always_fp32(), \"ZeRO does not support 'fp32_allreduce': true\"\n timers = self.timers if self.wall_clock_breakdown() else None\n\n if zero_stage == ZERO_OPTIMIZATION_OPTIMIZER_STATES:\n assert self.zero_reduce_scatter(), 'Stage 1 only supports reduce scatter mode'\n optimizer = FP16_DeepSpeedZeroOptimizer_Stage1(\n optimizer,\n static_loss_scale=self.loss_scale(),\n dynamic_loss_scale=self.dynamic_loss_scale(),\n dynamic_loss_args=self.dynamic_loss_scale_args(),\n clip_grad=self.gradient_clipping(),\n all_gather_partitions=self.zero_allgather_partitions(),\n allgather_size=self.zero_allgather_bucket_size(),\n max_elements_per_comm=self.zero_reduce_bucket_size(),\n dp_process_group=self.data_parallel_group,\n elastic_checkpoint=self.zero_elastic_checkpoint(),\n mpu=self.mpu)\n elif zero_stage == ZERO_OPTIMIZATION_GRADIENTS:\n optimizer = FP16_DeepSpeedZeroOptimizer(\n optimizer,\n timers=timers,\n static_loss_scale=self.loss_scale(),\n dynamic_loss_scale=self.dynamic_loss_scale(),\n dynamic_loss_args=self.dynamic_loss_scale_args(),\n clip_grad=self.gradient_clipping(),\n contiguous_gradients=self.zero_contiguous_gradients(),\n reduce_bucket_size=self.zero_reduce_bucket_size(),\n allgather_bucket_size=self.zero_allgather_bucket_size(),\n dp_process_group=self.data_parallel_group,\n reduce_scatter=self.zero_reduce_scatter(),\n overlap_comm=self.zero_overlap_comm(),\n cpu_offload=self.zero_cpu_offload(),\n mpu=self.mpu,\n postscale_gradients=self.postscale_gradients(),\n gradient_predivide_factor=self.gradient_predivide_factor(),\n gradient_accumulation_steps=self.gradient_accumulation_steps())\n elif zero_stage == ZERO_OPTIMIZATION_WEIGHTS:\n print(\"Initializing ZeRO Stage 3\") if dist.get_rank() == 0 else None\n from deepspeed.runtime.zero.stage3 import FP16_DeepSpeedZeroOptimizer_Stage3\n optimizer = FP16_DeepSpeedZeroOptimizer_Stage3(\n self.module,\n optimizer,\n timers=timers,\n static_loss_scale=self.loss_scale(),\n dynamic_loss_scale=self.dynamic_loss_scale(),\n dynamic_loss_args=self.dynamic_loss_scale_args(),\n clip_grad=self.gradient_clipping(),\n contiguous_gradients=self.zero_contiguous_gradients(),\n reduce_bucket_size=self.zero_reduce_bucket_size(),\n prefetch_bucket_size=self.zero_prefetch_bucket_size(),\n max_reuse_distance=self.zero_max_reuse_distance(),\n max_live_parameters=self.zero_max_live_parameters(),\n param_persistence_threshold=self.zero_param_persistence_threshold(),\n dp_process_group=self.data_parallel_group,\n reduce_scatter=self.zero_reduce_scatter(),\n overlap_comm=self.zero_overlap_comm(),\n offload_optimizer_config=self.zero_offload_optimizer(),\n offload_param_config=self.zero_offload_param(),\n sub_group_size=self.zero_sub_group_size(),\n mpu=self.mpu,\n postscale_gradients=self.postscale_gradients(),\n gradient_predivide_factor=self.gradient_predivide_factor(),\n gradient_accumulation_steps=self.gradient_accumulation_steps(),\n aio_config=self.aio_config())\n\n else:\n raise NotImplementedError(\"ZeRO stage {} not implemented\".format(zero_stage))\n\n return optimizer\n\n def _configure_progressive_layer_drop(self):\n pld = ProgressiveLayerDrop(theta=self.pld_theta(), gamma=self.pld_gamma())\n\n return pld\n\n def deepspeed_io(self,\n dataset,\n batch_size=None,\n route=ROUTE_TRAIN,\n pin_memory=True,\n data_sampler=None,\n collate_fn=None,\n num_local_io_workers=None):\n if not isinstance(dataset, torch.utils.data.Dataset):\n raise ValueError(\"Training data must be a torch Dataset\")\n\n if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL):\n data_sampler = torch.utils.data.SequentialSampler(dataset)\n\n if batch_size is None:\n batch_size = self.train_micro_batch_size_per_gpu()\n\n if collate_fn is None:\n collate_fn = self.collate_fn\n\n # Currently we only use timer in train route\n deepspeed_io_timer = None\n if route == ROUTE_TRAIN:\n deepspeed_io_timer = self.tput_timer\n\n # If mpu is provied, forward world size and parallel rank to sampler.\n data_parallel_world_size = None\n data_parallel_rank = None\n if self.mpu is not None:\n data_parallel_world_size = self.mpu.get_data_parallel_world_size()\n data_parallel_rank = self.mpu.get_data_parallel_rank()\n\n return DeepSpeedDataLoader(dataset=dataset,\n batch_size=batch_size,\n pin_memory=pin_memory,\n collate_fn=collate_fn,\n local_rank=self.local_rank,\n tput_timer=deepspeed_io_timer,\n num_local_io_workers=num_local_io_workers,\n data_sampler=data_sampler,\n data_parallel_world_size=data_parallel_world_size,\n data_parallel_rank=data_parallel_rank)\n\n def train(self, mode=True):\n r\"\"\"\n \"\"\"\n\n self.warn_unscaled_loss = True\n self.module.train(mode)\n\n def eval(self):\n r\"\"\"\n \"\"\"\n\n self.warn_unscaled_loss = True\n self.module.train(False)\n\n def _scale_loss(self, prescaled_loss):\n if isinstance(prescaled_loss, torch.Tensor):\n scaled_loss = prescaled_loss / self.gradient_accumulation_steps()\n elif isinstance(prescaled_loss, tuple) or isinstance(prescaled_loss, list):\n scaled_loss = []\n for l in prescaled_loss:\n if isinstance(l, torch.Tensor):\n scaled_loss.append(l / self.gradient_accumulation_steps())\n else:\n scaled_loss.append(l)\n else:\n scaled_loss = prescaled_loss\n if self.warn_unscaled_loss:\n logger.warning(\n f'DeepSpeed unable to scale loss because of type: {type(prescaled_loss)}'\n )\n self.warn_unscaled_loss = False\n\n return scaled_loss\n\n def forward(self, *inputs, **kwargs):\n r\"\"\"Execute forward propagation\n\n Arguments:\n *inputs: Variable length input list\n **kwargs: variable length keyword arguments\n \"\"\"\n if self.flops_profiler_enabled(\n ) and self.global_steps == self.flops_profiler_profile_step(\n ) and self.global_rank == 0:\n self.flops_profiler = FlopsProfiler(self.module)\n self.flops_profiler.start_profile(ignore_list=None)\n\n if self.module.training and self.progressive_layer_drop:\n kwargs.update(self.progressive_layer_drop.get_state())\n\n if self.zero_optimization_partition_weights():\n # Enable automated discovery of external parameters by indicating that\n # we are in a forward pass.\n for module in self.module.modules():\n module._parameters._in_forward = True\n pass\n\n if self.wall_clock_breakdown():\n self.timers('forward_microstep').start()\n self.timers('forward').start()\n\n if self.training_dataloader is None:\n self.tput_timer.start()\n loss = self.module(*inputs, **kwargs)\n\n if self.zero_optimization_partition_weights():\n # Reset the ZeRO-3 state if we are only doing forward-passes (ie evaluation).\n if not torch._C.is_grad_enabled():\n self.optimizer.param_coordinator.reset_step()\n\n # Disable automated discovery of external parameters\n for module in self.module.modules():\n module._parameters._in_forward = False\n\n if self.wall_clock_breakdown():\n self.timers('forward').stop()\n self.timers('forward_microstep').stop()\n\n if self.flops_profiler_enabled(\n ) and self.global_steps == self.flops_profiler_profile_step(\n ) and self.global_rank == 0:\n self.flops_profiler.print_model_profile(\n profile_step=self.global_steps,\n module_depth=self.flops_profiler_module_depth(),\n top_modules=self.flops_profiler_top_modules(),\n detailed=self.flops_profiler_detailed())\n self.flops_profiler.end_profile()\n\n return loss\n\n def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE):\n #Zero stage 2 communicates during non gradient accumulation boundaries as well\n if self.zero_optimization_partition_gradients():\n self.optimizer.overlapping_partition_gradients_reduce_epilogue()\n\n #Communicate only at gradient accumulation boundaries\n elif self.is_gradient_accumulation_boundary():\n if self.zero_optimization_stage() == ZERO_OPTIMIZATION_OPTIMIZER_STATES:\n assert self.zero_reduce_scatter()\n self.optimizer.reduce_scatter_gradients(\n postscale_gradients=self.postscale_gradients(),\n gradient_predivide_factor=self.gradient_predivide_factor(),\n gradient_average=self.gradient_average)\n else:\n self.buffered_allreduce_fallback(elements_per_buffer=bucket_size)\n\n def backward(self, loss, allreduce_gradients=True, release_loss=False):\n r\"\"\"Execute backward pass on the loss\n\n Arguments:\n loss: Torch tensor on which to execute backward propagation\n allreduce_gradients: is deprecated, ignored, and will soon be removed'\n \"\"\"\n\n if not allreduce_gradients:\n logger.warning(\n f'Argument `allreduce_gradients` is deprecated, ignored, and will soon be removed'\n )\n\n # scale loss w.r.t. gradient accumulation if needed\n if self.gradient_accumulation_steps() > 1:\n loss = self._scale_loss(loss.float())\n\n # Log training Loss\n if self.tensorboard_enabled():\n if self.is_gradient_accumulation_boundary():\n if self.global_rank == 0:\n self.summary_events = [\n (f'Train/Samples/train_loss',\n loss.mean().item() * self.gradient_accumulation_steps(),\n self.global_samples)\n ]\n for event in self.summary_events: # write_summary_events\n self.summary_writer.add_scalar(event[0], event[1], event[2])\n self.summary_writer.flush()\n\n if self.wall_clock_breakdown():\n self.timers('backward_microstep').start()\n self.timers('backward').start()\n\n assert self.optimizer is not None, \"must provide optimizer during \" \\\n \"init in order to use backward\"\n\n if self.wall_clock_breakdown():\n self.timers('backward_inner_microstep').start()\n self.timers('backward_inner').start()\n\n if self.zero_optimization():\n self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary(\n )\n self.optimizer.backward(loss)\n elif self.amp_enabled():\n # AMP requires delaying unscale when inside gradient accumulation boundaries\n # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations\n delay_unscale = not self.is_gradient_accumulation_boundary()\n with amp.scale_loss(loss,\n self.optimizer,\n delay_unscale=delay_unscale) as scaled_loss:\n scaled_loss.backward()\n elif self.fp16_enabled():\n self.optimizer.backward(loss)\n else:\n loss.backward()\n\n if self.wall_clock_breakdown():\n self.timers('backward_inner').stop()\n self.timers('backward_inner_microstep').stop()\n\n if self.wall_clock_breakdown():\n self.timers('backward_allreduce_microstep').start()\n self.timers('backward_allreduce').start()\n\n if self.enable_backward_allreduce:\n self.allreduce_gradients()\n\n if self.wall_clock_breakdown():\n self.timers('backward_allreduce').stop()\n self.timers('backward_allreduce_microstep').stop()\n self.timers('backward').stop()\n self.timers('backward_microstep').stop()\n\n if release_loss:\n # loss.data = None\n pass\n\n return loss\n\n def is_gradient_accumulation_boundary(self):\n \"\"\"Query whether the current micro-batch is at the boundary of\n gradient accumulation, and thus will trigger gradient reductions and\n an optimizer step.\n\n Returns:\n bool: if the current step is a gradient accumulation boundary.\n \"\"\"\n return (self.micro_steps + 1) % \\\n self.gradient_accumulation_steps() == 0\n\n def zero_grad(self):\n \"\"\"\n Zero parameter grads.\n \"\"\"\n for param_name, param in self.module.named_parameters():\n param.grad = None\n\n def clip_fp32_gradients(self):\n torch.nn.utils.clip_grad_norm_(parameters=self.module.parameters(),\n max_norm=self.gradient_clipping())\n\n def _take_model_step(self, lr_kwargs):\n if self.gradient_clipping() > 0.0:\n if not self.fp16_enabled() and not self.amp_enabled():\n self.clip_fp32_gradients()\n elif self.amp_enabled():\n # AMP's recommended way of doing clipping\n # https://nvidia.github.io/apex/advanced.html#gradient-clipping\n master_params = amp.master_params(self.optimizer)\n torch.nn.utils.clip_grad_norm_(parameters=master_params,\n max_norm=self.gradient_clipping())\n self.optimizer.step()\n\n #zero grad in basic optimizer could be unreliable and may not exhibit\n #the behaviour that we want\n if not self.zero_optimization() and not self.fp16_enabled(\n ) and not self.amp_enabled():\n self.zero_grad()\n else:\n self.optimizer.zero_grad()\n\n report_progress = self.global_rank == 0 if self.global_rank else True\n\n # Check overlow here since in DS fp16 optimizer, the overflow is updated in above step() function.\n overflow = False\n if hasattr(self.optimizer, 'overflow'):\n overflow = self.optimizer.overflow\n\n if overflow:\n self.skipped_steps += 1\n else:\n if self.lr_scheduler is not None:\n self.lr_scheduler.step(**(lr_kwargs or {}))\n\n if report_progress and (self.global_steps + 1) % self.steps_per_print() == 0:\n self._report_progress(self.global_steps + 1)\n\n self.global_steps += 1\n self.global_samples += self.train_batch_size()\n\n def step(self, lr_kwargs=None):\n r\"\"\"Execute the weight update step after forward and backward propagation\n on effective_train_batch.\n \"\"\"\n if self.wall_clock_breakdown():\n self.timers('step_microstep').start()\n self.timers('step').start()\n\n assert self.optimizer is not None, \"must provide optimizer during \" \\\n \"init in order to use step\"\n report_progress = self.global_rank == 0 if self.global_rank else True\n\n # Update the model when we reach gradient accumulation boundaries\n if self.is_gradient_accumulation_boundary():\n if self.progressive_layer_drop:\n self.progressive_layer_drop.update_state(self.global_steps)\n\n self._take_model_step(lr_kwargs)\n\n self.tput_timer.stop(report_progress)\n\n # Log learning rate\n if self.tensorboard_enabled():\n if self.is_gradient_accumulation_boundary():\n if self.global_rank == 0:\n self.summary_events = [(f'Train/Samples/lr',\n self.get_lr()[0],\n self.global_samples)]\n for event in self.summary_events: # write_summary_events\n self.summary_writer.add_scalar(event[0], event[1], event[2])\n if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'):\n self.summary_events.append((f'Train/Samples/loss_scale',\n self.optimizer.cur_scale,\n self.global_samples))\n for event in self.summary_events: # write_summary_events\n self.summary_writer.add_scalar(event[0], event[1], event[2])\n self.summary_writer.flush()\n\n if self.wall_clock_breakdown():\n self.timers('step').stop()\n self.timers('step_microstep').stop()\n timer_names = [\n 'forward_microstep',\n 'backward_microstep',\n 'backward_inner_microstep',\n 'backward_allreduce_microstep',\n 'step_microstep'\n ]\n self.timers.log(names=timer_names, memory_breakdown=self.memory_breakdown())\n\n # Log timing\n if self.is_gradient_accumulation_boundary():\n if self.tensorboard_enabled():\n if self.global_rank == 0:\n self.summary_events = [\n (f'Train/Samples/elapsed_time_ms_forward',\n self.timers('forward').elapsed(reset=False) * 1000.0,\n self.global_samples),\n (f'Train/Samples/elapsed_time_ms_backward',\n self.timers('backward').elapsed(reset=False) * 1000.0,\n self.global_samples),\n (f'Train/Samples/elapsed_time_ms_backward_inner',\n self.timers('backward_inner').elapsed(reset=False) * 1000.0,\n self.global_samples),\n (f'Train/Samples/elapsed_time_ms_backward_allreduce',\n self.timers('backward_allreduce').elapsed(reset=False) *\n 1000.0,\n self.global_samples),\n (f'Train/Samples/elapsed_time_ms_step',\n self.timers('step').elapsed(reset=False) * 1000.0,\n self.global_samples)\n ]\n for event in self.summary_events: # write_summary_events\n self.summary_writer.add_scalar(event[0], event[1], event[2])\n self.summary_writer.flush()\n\n if self.wall_clock_breakdown():\n self.timers.log([\n 'forward',\n 'backward',\n 'backward_inner',\n 'backward_allreduce',\n 'step'\n ])\n\n self.micro_steps += 1\n\n def _get_optimizer_param(self, param_name):\n result = []\n if not self.optimizer:\n return result\n for group in self.optimizer.param_groups:\n if param_name in group:\n result.append(group[param_name])\n else:\n result.append(0.0)\n return result\n\n def get_lr(self):\n return self._get_optimizer_param('lr')\n\n def get_type(self):\n return self._get_optimizer_param('type')\n\n def get_mom(self):\n if self.optimizer_name() in ['SGD', 'RMSprop']:\n return self._get_optimizer_param('momentum')\n else:\n return self._get_optimizer_param('betas')\n\n def get_pld_theta(self):\n if self.progressive_layer_drop:\n return self.progressive_layer_drop.get_theta()\n else:\n return None\n\n def _report_progress(self, step):\n lr = self.get_lr()\n mom = self.get_mom()\n log_dist(f'step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}',\n ranks=[0])\n\n def allreduce_bucket(self, bucket):\n tensor = self.flatten(bucket)\n\n tensor_to_allreduce = tensor\n\n if self.allreduce_always_fp32():\n tensor_to_allreduce = tensor.float()\n\n if self.postscale_gradients():\n if self.gradient_predivide_factor() != 1.0:\n tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor())\n\n dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)\n\n if self.gradient_average:\n if self.gradient_predivide_factor() != self.dp_world_size:\n tensor_to_allreduce.mul_(self.gradient_predivide_factor() /\n self.dp_world_size)\n else:\n tensor_to_allreduce.div_(self.dp_world_size)\n dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)\n\n if self.allreduce_always_fp32() and tensor is not tensor_to_allreduce:\n tensor.copy_(tensor_to_allreduce)\n\n return tensor\n\n def allreduce_and_copy(self, small_bucket):\n allreduced = self.allreduce_bucket(small_bucket)\n for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):\n buf.copy_(synced)\n\n def allreduce_no_retain(self, bucket, numel_per_bucket=500000000):\n small_bucket = []\n numel = 0\n for tensor in bucket:\n small_bucket.append(tensor)\n numel = numel + tensor.numel()\n if numel > numel_per_bucket:\n self.allreduce_and_copy(small_bucket)\n small_bucket = []\n numel = 0\n if len(small_bucket) > 0:\n self.allreduce_and_copy(small_bucket)\n\n def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000):\n grads = []\n for param_name, param in self.module.named_parameters():\n if param.grad is None:\n # In cases where there is an imbalance of empty grads across\n # ranks we must create empty grads, this will ensure that every\n # rank is reducing the same size. In some cases it may make\n # sense in the future to support the ability to average not\n # w.r.t. world size but with a different value.\n param.grad = torch.zeros(param.size(),\n dtype=param.dtype,\n device=param.device)\n grads.append(param.grad.data)\n else:\n grad_data = param.grad.data\n if self.sparse_gradients_enabled(\n ) and param_name in self.csr_tensor_module_names:\n grads.append(CSRTensor(grad_data))\n else:\n grads.append(grad_data)\n\n split_buckets = split_half_float_double_csr(grads)\n\n for i, bucket_tuple in enumerate(split_buckets):\n bucket_type, bucket = bucket_tuple\n if bucket_type == CSRTensor.type():\n self.csr_allreduce_no_retain(bucket)\n else:\n self.allreduce_no_retain(bucket, numel_per_bucket=elements_per_buffer)\n\n def csr_allreduce_no_retain(self, bucket):\n allreduced_csrs = self.csr_allreduce_bucket(bucket)\n # Densify csr tensor and copy back to original location\n for csr in allreduced_csrs:\n dense_tensor = csr.to_dense()\n csr.orig_dense_tensor.copy_(dense_tensor)\n\n def csr_allreduce_bucket(self, bucket):\n csr_list = []\n for csr in bucket:\n csr_list.append(self.csr_allreduce(csr))\n return csr_list\n\n def csr_allreduce(self, csr):\n # Pre-divide for fp16 stability\n csr.values.div_(self.dp_world_size)\n\n indices_device_list = self.csr_all_gather(csr.indices)\n values_device_list = self.csr_all_gather(csr.values)\n\n csr.indices = torch.cat(indices_device_list)\n csr.values = torch.cat(values_device_list)\n return csr\n\n def csr_all_gather(self, value):\n my_size = torch.LongTensor([value.size()[0]]).to(self.device)\n all_sizes = self.all_gather_scalar(my_size)\n max_size = torch.cat(all_sizes).max()\n fill_size = (max_size - my_size)\n\n assert value.dim() in [1, 2]\n if value.dim() == 1:\n if fill_size > 0:\n value = torch.cat([value, value.new_zeros(fill_size)])\n tensor_list = [value.new_zeros(max_size) for _ in range(self.dp_world_size)]\n else:\n if fill_size > 0:\n value = torch.cat([value, value.new_zeros(fill_size, value.size()[1])])\n tensor_list = [\n value.new_zeros(max_size,\n value.size()[1]) for _ in range(self.dp_world_size)\n ]\n\n dist.all_gather(tensor_list, value, group=self.data_parallel_group)\n tensors = []\n for dev_idx, t in enumerate(tensor_list):\n size = all_sizes[dev_idx][0]\n tensors.append(\n t.index_select(0,\n torch.LongTensor(range(size)).to(self.device)))\n\n return tensors\n\n def all_gather_scalar(self, value):\n tensor_list = [value.new_zeros(value.size()) for _ in range(self.dp_world_size)]\n dist.all_gather(tensor_list, value, group=self.data_parallel_group)\n return tensor_list\n\n def module_state_dict(self, destination=None, prefix='', keep_vars=False):\n sd = self.module.state_dict(destination, prefix, keep_vars)\n return sd\n\n def load_module_state_dict(self, state_dict, strict=True):\n self.module.load_state_dict(state_dict, strict=strict)\n\n def _get_rank_zero_ckpt_name(self, checkpoints_path, tag, mp_rank, dp_rank):\n filename = 'zero_pp_rank_{}'.format(dp_rank)\n zero_ckpt_name = os.path.join(\n checkpoints_path,\n str(tag),\n filename + '_mp_rank_{:02d}'.format(mp_rank) + '_optim_states.pt')\n return zero_ckpt_name\n\n def _get_zero_ckpt_name(self, checkpoints_path, tag):\n mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()\n pp_rank = torch.distributed.get_rank(group=self.optimizer.dp_process_group)\n return self._get_rank_zero_ckpt_name(checkpoints_path, tag, mp_rank, pp_rank)\n\n def _get_ckpt_name(self, checkpoints_path, tag):\n mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()\n if self.zero_optimization_partition_weights():\n filename = 'zero_pp_rank_{}'.format(\n torch.distributed.get_rank(group=self.optimizer.dp_process_group))\n ckpt_name = os.path.join(\n checkpoints_path,\n str(tag),\n filename + '_mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')\n else:\n ckpt_name = os.path.join(\n checkpoints_path,\n str(tag),\n 'mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')\n return ckpt_name\n\n def load_checkpoint(self,\n load_dir,\n tag=None,\n load_module_strict=True,\n load_optimizer_states=True,\n load_lr_scheduler_states=True):\n \"\"\"Load training checkpoint\n\n Arguments:\n load_dir: Required. Directory to load the checkpoint from\n tag: Checkpoint tag used as a unique identifier for checkpoint, if not provided will attempt to load tag in 'latest' file\n load_module_strict: Optional. Boolean to strictly enforce that the keys in state_dict of module and checkpoint match.\n load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance\n load_lr_scheduler_states: Optional. Boolean to add the learning rate scheduler states from Checkpoint.\n Returns:\n A tuple of ``load_path`` and ``client_state``.\n\n *``load_path``: Path of the loaded checkpoint. ``None`` if loading the checkpoint failed.\n\n *``client_state``: State dictionary used for loading required training states in the client code.\n \"\"\"\n\n if tag is None:\n latest_path = os.path.join(load_dir, 'latest')\n if os.path.isfile(latest_path):\n with open(latest_path, 'r') as fd:\n tag = fd.read().strip()\n else:\n logger.warning(f\"Unable to find latest file at {latest_path}, if trying to load latest \" \\\n \"checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint.\")\n return None, None\n\n load_path, client_states = self._load_checkpoint(load_dir,\n tag,\n load_module_strict=load_module_strict,\n load_optimizer_states=load_optimizer_states,\n load_lr_scheduler_states=load_lr_scheduler_states)\n\n if self.zero_optimization() and load_path is not None:\n self._load_zero_checkpoint(load_dir,\n tag,\n load_optimizer_states=load_optimizer_states)\n\n return load_path, client_states\n\n def _load_checkpoint(self,\n load_dir,\n tag,\n load_module_strict=True,\n load_optimizer_states=True,\n load_lr_scheduler_states=True):\n\n load_path = self._get_ckpt_name(load_dir, tag)\n\n if not os.path.exists(load_path):\n logger.warn(\n 'Client provided checkpoint load path: {} does not exist ... skip checkpoint load'\n .format(load_path))\n return None, None\n\n logger.info(f'rank: {self.global_rank} loading checkpoint: {load_path}')\n checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage)\n\n if isinstance(self.module, PipelineModule):\n # Pipeline parallelism uses this to load its own checkpoint files.\n self._curr_ckpt_path = os.path.join(load_dir, tag)\n\n self.load_module_state_dict(state_dict=checkpoint['module'],\n strict=load_module_strict)\n if self.optimizer is not None and not self.zero_optimization():\n if self.fp16_enabled():\n self.optimizer.load_state_dict(\n checkpoint['optimizer'],\n load_optimizer_states=load_optimizer_states)\n elif load_optimizer_states:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if load_lr_scheduler_states and self.lr_scheduler is not None:\n self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n\n self.csr_tensor_module_names = checkpoint['csr_tensor_module_names']\n self.global_steps = checkpoint['global_steps']\n self.global_samples = checkpoint.get('global_samples',\n self.global_steps * self.train_batch_size())\n self.skipped_steps = checkpoint['skipped_steps']\n self.loaded_checkpoint_mp_world_size = checkpoint['mp_world_size']\n self.loaded_checkpoint_dp_world_size = checkpoint['dp_world_size']\n deepspeed_states = [\n 'module',\n 'optimizer',\n 'lr_scheduler',\n 'csr_tensor_module_names',\n 'skipped_steps',\n 'global_steps',\n 'dp_world_size',\n 'mp_world_size'\n ]\n client_state = {\n key: value\n for key,\n value in checkpoint.items() if not key in deepspeed_states\n }\n\n return load_path, client_state\n\n def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True):\n zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag)\n if zero_sd_list is None:\n return\n\n self.optimizer.load_state_dict(\n state_dict_list=zero_sd_list,\n load_optimizer_states=load_optimizer_states,\n load_from_fp32_weights=self.zero_load_from_fp32_weights())\n print(\n f'loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}'\n )\n\n def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size):\n zero_ckpt_names = []\n for dp_rank in range(dp_world_size):\n ckpt_name = self._get_rank_zero_ckpt_name(checkpoints_path=load_dir,\n tag=tag,\n mp_rank=mp_rank,\n dp_rank=dp_rank)\n zero_ckpt_names.append(ckpt_name)\n\n return zero_ckpt_names\n\n def _get_all_zero_checkpoint_names(self,\n load_dir,\n tag,\n mp_world_size,\n dp_world_size):\n zero_ckpt_names = []\n for mp_rank in range(mp_world_size):\n mp_rank_ckpt_names = self._get_mp_rank_zero_checkpoint_names(\n load_dir=load_dir,\n tag=tag,\n mp_rank=mp_rank,\n dp_world_size=dp_world_size)\n zero_ckpt_names += mp_rank_ckpt_names\n\n return zero_ckpt_names\n\n def _get_all_zero_checkpoints(self, load_dir, tag):\n mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()\n zero_ckpt_names = self._get_mp_rank_zero_checkpoint_names(\n load_dir=load_dir,\n tag=tag,\n mp_rank=mp_rank,\n dp_world_size=self.loaded_checkpoint_dp_world_size)\n invalid_zero_ckpt_paths = []\n for i, ckpt_name in enumerate(zero_ckpt_names):\n if not os.path.exists(ckpt_name):\n # transparently handle the old file pattern for optim_states\n if 'optim_states.pt' in ckpt_name:\n ckpt_name_try = ckpt_name.replace(\"_optim_states.pt\",\n \"optim_states.pt\")\n if os.path.exists(ckpt_name_try):\n zero_ckpt_names[i] = ckpt_name_try\n continue\n invalid_zero_ckpt_paths.append(ckpt_name)\n\n if len(invalid_zero_ckpt_paths) > 0:\n logger.warn(\n f\"The following zero checkpoints paths are missing: {invalid_zero_ckpt_paths}\"\n )\n return None\n\n zero_sd_list = []\n for ckpt_name in zero_ckpt_names:\n zero_sd_list.append(torch.load(ckpt_name, map_location='cpu'))\n\n zero_optimizer_sd = [sd['optimizer_state_dict'] for sd in zero_sd_list]\n print(\n f\"successfully loaded {len(zero_optimizer_sd)} ZeRO state_dicts for rank {self.global_rank}\"\n )\n return zero_optimizer_sd\n\n def _checkpoint_tag_validation(self, tag):\n if self.checkpoint_tag_validation_enabled():\n s_hash = hashlib.sha1(tag.encode())\n bhash = torch.ByteTensor([s_hash.digest()]).flatten().to(self.device)\n max_bhash = bhash.clone()\n min_bhash = bhash.clone()\n dist.all_reduce(max_bhash, op=torch.distributed.ReduceOp.MAX)\n dist.all_reduce(min_bhash, op=torch.distributed.ReduceOp.MIN)\n valid = all(min_bhash == bhash) and all(max_bhash == bhash)\n msg = f\"[rank={dist.get_rank()}] The checkpoint tag name '{tag}' is not consistent across \" \\\n \"all ranks. Including rank unique information in checkpoint tag could cause issues when \" \\\n \"restoring with different world sizes.\"\n if self.checkpoint_tag_validation_fail():\n assert valid, msg\n elif not valid:\n logger.warning(msg)\n\n def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True):\n r\"\"\"Save training checkpoint\n\n Arguments:\n save_dir: Required. Directory for saving the checkpoint\n tag: Optional. Checkpoint tag used as a unique identifier for the checkpoint, global step is\n used if not provided. Tag name must be the same across all ranks.\n client_state: Optional. State dictionary used for saving required training states in the client code.\n save_latest: Optional. Save a file 'latest' pointing to the latest saved checkpoint.\n\n Important: all processes must call this method and not just the process with rank 0. It is\n because each process needs to save its master weights and scheduler+optimizer states. This\n method will hang waiting to synchronize with other processes if it's called just for the\n process with rank 0.\n \"\"\"\n\n if self.zero_optimization_partition_weights():\n # Prepare for state_dict() by ensuring all parameters are partitioned\n self.optimizer.save_checkpoint_prologue()\n\n # This is to make sure the checkpoint names are created without collision\n # There seems to be issue creating them in parallel\n\n # Ensure save_dir directory exists\n os.makedirs(save_dir, exist_ok=True)\n\n if tag is None:\n tag = f\"global_step{self.global_steps}\"\n\n # Ensure tag is a string\n tag = str(tag)\n\n # Ensure checkpoint tag is consistent across ranks\n self._checkpoint_tag_validation(tag)\n\n if self.save_non_zero_checkpoint:\n self._create_checkpoint_file(save_dir, tag, False)\n self._save_checkpoint(save_dir, tag, client_state=client_state)\n\n if self.save_zero_checkpoint:\n self._create_zero_checkpoint_files(save_dir, tag)\n self._save_zero_checkpoint(save_dir, tag)\n\n # Save latest checkpoint tag\n if save_latest:\n with open(os.path.join(save_dir, 'latest'), 'w') as fd:\n fd.write(tag)\n\n if self.zero_optimization_partition_weights():\n self.optimizer.save_checkpoint_epilogue()\n\n return True\n\n def _create_checkpoint_file(self, save_dir, tag, zero_checkpoint):\n name_function = self._get_zero_ckpt_name if zero_checkpoint else self._get_ckpt_name\n try:\n checkpoint_name = name_function(save_dir, tag)\n ensure_directory_exists(checkpoint_name)\n except:\n logger.error(f'Failed saving model checkpoint to {save_dir} with tag {tag}')\n return False\n\n return True\n\n def _create_zero_checkpoint_files(self, save_dir, tag):\n success = True\n # zero checkpoint files are created sequentially\n for rank in range(self.world_size):\n if rank == self.global_rank:\n success = self._create_checkpoint_file(save_dir, tag, True)\n\n dist.barrier()\n\n return success\n\n def _save_checkpoint(self, save_dir, tag, client_state={}):\n\n save_path = self._get_ckpt_name(save_dir, tag)\n # A hack to save the checkpointing directory. Pipeline parallelism overrides\n # module_state_dict() and uses this path to save the model. module_state_dict()\n # then instead just returns None.\n self._curr_ckpt_path = os.path.join(save_dir, tag)\n\n state = dict(\n module=self.module_state_dict(),\n optimizer=self.optimizer.state_dict()\n if self.optimizer and not self.zero_optimization() else None,\n lr_scheduler=self.lr_scheduler.state_dict()\n if self.lr_scheduler is not None else None,\n csr_tensor_module_names=self.csr_tensor_module_names,\n skipped_steps=self.skipped_steps,\n global_steps=self.global_steps,\n global_samples=self.global_samples,\n dp_world_size=self.dp_world_size,\n mp_world_size=self.mp_world_size,\n )\n state.update(client_state)\n\n log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0])\n #logger.info('Saving model checkpoint: {}'.format(save_path))\n torch.save(state, save_path)\n self._curr_save_path = None\n\n def _get_param_shapes(self):\n param_shapes = OrderedDict()\n for name, param in self.module.named_parameters():\n param_shapes[name] = param.ds_shape if hasattr(param,\n \"ds_shape\") else param.shape\n # print(f\"saving param {name} {param_shapes[name]}\")\n return param_shapes\n\n def _copy_recovery_script(self, save_path):\n base_dir = os.path.dirname(os.path.dirname(__file__))\n script = \"zero_to_fp32.py\"\n src = os.path.join(base_dir, \"utils\", script)\n dst = os.path.join(save_path, script)\n logger.info(f\"creating recovery script {dst}\")\n copyfile(src, dst)\n # make executable\n os.chmod(dst, os.stat(dst).st_mode | stat.S_IEXEC)\n\n def _save_zero_checkpoint(self, save_path, tag):\n zero_checkpoint_name = self._get_zero_ckpt_name(save_path, tag)\n zero_sd = dict(\n optimizer_state_dict=self.optimizer.state_dict(),\n param_shapes=self._get_param_shapes(),\n )\n torch.save(zero_sd, zero_checkpoint_name)\n self._copy_recovery_script(save_path)\n logger.info('zero checkpoint saved {}'.format(zero_checkpoint_name))\n\n def _zero3_consolidated_fp16_state_dict(self):\n \"\"\"\n\n Get a full non-partitioned state_dict with fp16 weights on cpu.\n\n Important: this function must be called on all ranks and not just rank 0.\n\n This is similar to nn.Module.state_dict (modelled after _save_to_state_dict), but:\n\n 1. consolidates the weights from different partitions on gpu0\n 2. works on one layer at a time to require as little gpu0 memory as possible, by\n moving the already consolidated weights to cpu\n 3. takes care to keep the shared params shared when gradually copying the params to cpu\n\n Returns:\n a consolidated fp16 ``state_dict`` on cpu on rank 0, ``None`` on other ranks\n\n \"\"\"\n import deepspeed\n\n if not self.zero_optimization_partition_weights():\n raise ValueError(\"this function requires ZeRO-3 mode\")\n\n state_dict = OrderedDict() if torch.distributed.get_rank() == 0 else None\n shared_weights = {}\n\n def get_layer_state_dict(module, prefix=\"\"):\n # gather one layer at a time to be memory-efficient\n with deepspeed.zero.GatheredParameters(list(\n module.parameters(recurse=False))):\n if torch.distributed.get_rank() == 0:\n for name, param in module.named_parameters(recurse=False):\n if param is None:\n continue\n key = prefix + name\n # for shared weights we want to make sure not to unshare them when copying to cpu\n data_ptr_id = param.storage().data_ptr()\n if data_ptr_id in shared_weights:\n # shared weights\n # print(f\"`{key}` is shared with `{shared_weights[data_ptr_id]}`\")\n state_dict[key] = state_dict[shared_weights[data_ptr_id]]\n else:\n state_dict[key] = param.detach().cpu()\n shared_weights[data_ptr_id] = key\n #print(f\"param {name} {param.shape}\")\n #print(f\"param {key} {param.shape} {state_dict[key].storage().data_ptr()}\")\n\n # now buffers - not sure if need to take care of potentially shared weights here\n for name, buf in module.named_buffers(recurse=False):\n if buf is not None and name not in module._non_persistent_buffers_set:\n state_dict[prefix + name] = buf.detach().cpu()\n\n for name, child in module.named_children():\n if child is not None:\n get_layer_state_dict(child, prefix + name + \".\")\n\n see_memory_usage(\"before get_layer_state_dict\", force=False)\n get_layer_state_dict(self.module, prefix=\"\")\n see_memory_usage(\"after get_layer_state_dict\", force=False)\n\n return state_dict\n\n def save_fp16_model(self, save_dir, save_filename=\"pytorch_model.bin\"):\n r\"\"\"Save fp16 model weights\n\n This method saves the fp16 model weights at the desired destination.\n\n Arguments:\n save_dir: Required. Directory for saving the model\n save_filename: Optional. Filename to save to. Defaults to ``pytorch_model.bin``\n\n Important: all processes must call this method and not just the process with rank 0. It is\n because the processes need to work in sync to gather the weights. This method will hang\n waiting to synchronize with other processes if it's called just for the process with rank 0.\n\n \"\"\"\n\n path = os.path.join(save_dir, save_filename)\n\n if self.zero_optimization_partition_weights():\n if self.zero_gather_fp16_weights_on_model_save():\n # consolidation is expensive in time and memory and therefore isn't a default\n state_dict = self._zero3_consolidated_fp16_state_dict()\n else:\n # the model will be bogus if not consolidated so don't confuse the user by saving it\n logger.info(\n f\"Did not save the model {path} because `stage3_gather_fp16_weights_on_model_save` is False\"\n )\n return\n else:\n state_dict = self.module.state_dict()\n\n if torch.distributed.get_rank() == 0:\n os.makedirs(save_dir, exist_ok=True)\n logger.info(f\"Saving model weights to {path}\")\n torch.save(state_dict, path)\n"
] | [
[
"torch.optim.Adam",
"torch.distributed.broadcast",
"torch.cuda.set_device",
"torch.cat",
"torch.load",
"torch.utils.data.SequentialSampler",
"torch.distributed.all_gather",
"torch.distributed.is_initialized",
"torch.is_tensor",
"torch.distributed.barrier",
"torch.optim.AdamW",
"torch.distributed.new_group",
"torch.device",
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.distributed.all_reduce",
"torch._C.is_grad_enabled",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
notmatthancock/notmatthancock.github.io | [
"abcd91cc7c2653c5243fe96ba2fd681ec03930bb"
] | [
"code/py/test_statsrecorder.py"
] | [
"import numpy as np\nimport statsrecorder as sr\n\nrs = np.random.RandomState(323)\n\nmystats = sr.StatsRecorder()\n\n# Hold all observations in \"data\" to check for correctness.\nndims = 42\ndata = np.empty((0, ndims))\n\nfor i in range(1000):\n nobserv = rs.randint(10,101)\n newdata = rs.randn(nobserv, ndims)\n data = np.vstack((data, newdata))\n\n # Update stats recorder object\n mystats.update(newdata)\n\n # Check stats recorder object is doing its business right.\n assert np.allclose(mystats.mean, data.mean(axis=0))\n assert np.allclose(mystats.std, data.std(axis=0))\n"
] | [
[
"numpy.vstack",
"numpy.random.RandomState",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samtygier-stfc/SScanSS-2 | [
"0df2160c32fdc533f7d391735bd55d524e253f4d"
] | [
"sscanss/ui/dialogs/insert.py"
] | [
"import numpy as np\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom sscanss.config import path_for, settings\nfrom sscanss.core.math import Plane, Matrix33, Vector3, clamp, map_range, trunc, VECTOR_EPS\nfrom sscanss.core.geometry import mesh_plane_intersection\nfrom sscanss.core.util import Primitives, DockFlag, StrainComponents, PointType, PlaneOptions, Attributes\nfrom sscanss.ui.widgets import (FormGroup, FormControl, GraphicsView, GraphicsScene, create_tool_button, FormTitle,\n create_scroll_area, CompareValidator, GraphicsPointItem, Grid, create_icon)\nfrom .managers import PointManager\n\n\nclass InsertPrimitiveDialog(QtWidgets.QWidget):\n \"\"\"Provides UI for typing in measurement/fiducial points\n\n :param primitive: primitive type\n :type primitive: Primitives\n :param parent: Main window\n :type parent: MainWindow\n \"\"\"\n dock_flag = DockFlag.Upper\n\n def __init__(self, primitive, parent):\n super().__init__(parent)\n self.parent = parent\n self.parent_model = self.parent.presenter.model\n self.parent.scenes.switchToSampleScene()\n self.primitive = primitive\n\n self.main_layout = QtWidgets.QVBoxLayout()\n\n self.textboxes = {}\n name = self.parent_model.uniqueKey(self.primitive.value)\n self.mesh_args = {'name': name}\n if self.primitive == Primitives.Tube:\n self.mesh_args.update({'outer_radius': 100.000, 'inner_radius': 50.000, 'height': 200.000})\n elif self.primitive == Primitives.Sphere:\n self.mesh_args.update({'radius': 100.000})\n elif self.primitive == Primitives.Cylinder:\n self.mesh_args.update({'radius': 100.000, 'height': 200.000})\n else:\n self.mesh_args.update({'width': 50.000, 'height': 100.000, 'depth': 200.000})\n\n self.createPrimitiveSwitcher()\n self.createFormInputs()\n\n button_layout = QtWidgets.QHBoxLayout()\n self.create_primitive_button = QtWidgets.QPushButton('Create')\n self.create_primitive_button.clicked.connect(self.createPrimiviteButtonClicked)\n button_layout.addWidget(self.create_primitive_button)\n button_layout.addStretch(1)\n\n self.main_layout.addLayout(button_layout)\n self.main_layout.addStretch(1)\n\n self.setLayout(self.main_layout)\n\n self.title = 'Insert {}'.format(self.primitive.value)\n self.setMinimumWidth(450)\n self.textboxes['name'].setFocus()\n\n def createPrimitiveSwitcher(self):\n switcher_layout = QtWidgets.QHBoxLayout()\n switcher = create_tool_button(style_name='MenuButton', status_tip='Open dialog for a different primitive')\n switcher.setArrowType(QtCore.Qt.DownArrow)\n switcher.setPopupMode(QtWidgets.QToolButton.InstantPopup)\n switcher.setMenu(self.parent.primitives_menu)\n switcher_layout.addStretch(1)\n switcher_layout.addWidget(switcher)\n self.main_layout.addLayout(switcher_layout)\n\n def createFormInputs(self):\n self.form_group = FormGroup()\n for key, value in self.mesh_args.items():\n pretty_label = key.replace('_', ' ').title()\n\n if key == 'name':\n control = FormControl(pretty_label, value, required=True)\n control.form_lineedit.textChanged.connect(self.nameCheck)\n else:\n control = FormControl(pretty_label, value, desc='mm', required=True, number=True)\n control.range(0, None, min_exclusive=True)\n\n self.textboxes[key] = control\n self.form_group.addControl(control)\n\n if self.primitive == Primitives.Tube:\n outer_radius = self.textboxes['outer_radius']\n inner_radius = self.textboxes['inner_radius']\n\n outer_radius.compareWith(inner_radius, CompareValidator.Operator.Greater)\n inner_radius.compareWith(outer_radius, CompareValidator.Operator.Less)\n\n self.main_layout.addWidget(self.form_group)\n self.form_group.groupValidation.connect(self.formValidation)\n\n def nameCheck(self, value):\n if self.parent_model.all_sample_key == value:\n self.textboxes['name'].isInvalid(f'\"{self.parent_model.all_sample_key}\" is a reserved name')\n\n def formValidation(self, is_valid):\n if is_valid:\n self.create_primitive_button.setEnabled(True)\n else:\n self.create_primitive_button.setDisabled(True)\n\n def createPrimiviteButtonClicked(self):\n for key, textbox in self.textboxes.items():\n value = textbox.value\n self.mesh_args[key] = value\n\n self.parent.presenter.addPrimitive(self.primitive, self.mesh_args)\n new_name = self.parent_model.uniqueKey(self.primitive.value)\n self.textboxes['name'].value = new_name\n\n\nclass InsertPointDialog(QtWidgets.QWidget):\n \"\"\"Provides UI for typing in measurement/fiducial points\n\n :param point_type: point type\n :type point_type: PointType\n :param parent: Main window\n :type parent: MainWindow\n \"\"\"\n dock_flag = DockFlag.Upper\n\n def __init__(self, point_type, parent):\n super().__init__(parent)\n self.parent = parent\n self.parent_model = parent.presenter.model\n self.parent.scenes.switchToSampleScene()\n self.point_type = point_type\n self.title = 'Add {} Point'.format(point_type.value)\n self.main_layout = QtWidgets.QVBoxLayout()\n unit = 'mm'\n self.form_group = FormGroup()\n self.x_axis = FormControl('X', 0.0, required=True, desc=unit, number=True)\n self.y_axis = FormControl('Y', 0.0, required=True, desc=unit, number=True)\n self.z_axis = FormControl('Z', 0.0, required=True, desc=unit, number=True)\n self.form_group.addControl(self.x_axis)\n self.form_group.addControl(self.y_axis)\n self.form_group.addControl(self.z_axis)\n self.form_group.groupValidation.connect(self.formValidation)\n button_layout = QtWidgets.QHBoxLayout()\n self.execute_button = QtWidgets.QPushButton(self.title)\n self.execute_button.clicked.connect(self.executeButtonClicked)\n button_layout.addWidget(self.execute_button)\n button_layout.addStretch(1)\n\n self.main_layout.addWidget(self.form_group)\n self.main_layout.addLayout(button_layout)\n self.main_layout.addStretch(1)\n self.setLayout(self.main_layout)\n\n self.setMinimumWidth(450)\n\n def formValidation(self, is_valid):\n if is_valid:\n self.execute_button.setEnabled(True)\n else:\n self.execute_button.setDisabled(True)\n\n def executeButtonClicked(self):\n point = [self.x_axis.value, self.y_axis.value, self.z_axis.value]\n self.parent.presenter.addPoints([(point, True)], self.point_type)\n\n\nclass InsertVectorDialog(QtWidgets.QWidget):\n \"\"\"Provides UI for adding measurement vectors using a variety of methods\n\n :param parent: Main window\n :type parent: MainWindow\n \"\"\"\n dock_flag = DockFlag.Upper\n\n def __init__(self, parent):\n super().__init__(parent)\n self.parent = parent\n self.parent_model = parent.presenter.model\n self.parent.scenes.switchToSampleScene()\n self.title = 'Add Measurement Vectors'\n self.main_layout = QtWidgets.QVBoxLayout()\n spacing = 10\n self.main_layout.addSpacing(spacing)\n self.main_layout.addWidget(QtWidgets.QLabel('Measurement Point:'))\n self.points_combobox = QtWidgets.QComboBox()\n self.points_combobox.setView(QtWidgets.QListView())\n self.main_layout.addWidget(self.points_combobox)\n self.updatePointList()\n self.main_layout.addSpacing(spacing)\n\n layout = QtWidgets.QHBoxLayout()\n alignment_layout = QtWidgets.QVBoxLayout()\n alignment_layout.addWidget(QtWidgets.QLabel('Alignment:'))\n self.alignment_combobox = QtWidgets.QComboBox()\n self.alignment_combobox.setView(QtWidgets.QListView())\n self.alignment_combobox.setInsertPolicy(QtWidgets.QComboBox.InsertAtCurrent)\n self.updateAlignment()\n self.alignment_combobox.activated.connect(self.addNewAlignment)\n self.alignment_combobox.currentIndexChanged.connect(self.changeRenderedAlignment)\n alignment_layout.addWidget(self.alignment_combobox)\n alignment_layout.addSpacing(spacing)\n layout.addLayout(alignment_layout)\n\n self.detector_combobox = QtWidgets.QComboBox()\n self.detector_combobox.setView(QtWidgets.QListView())\n self.detector_combobox.addItems(list(self.parent_model.instrument.detectors.keys()))\n if len(self.parent_model.instrument.detectors) > 1:\n detector_layout = QtWidgets.QVBoxLayout()\n detector_layout.addWidget(QtWidgets.QLabel('Detector:'))\n detector_layout.addWidget(self.detector_combobox)\n size = self.detector_combobox.iconSize()\n self.detector_combobox.setItemIcon(0, create_icon(settings.value(settings.Key.Vector_1_Colour), size))\n self.detector_combobox.setItemIcon(1, create_icon(settings.value(settings.Key.Vector_2_Colour), size))\n detector_layout.addSpacing(spacing)\n layout.addSpacing(spacing)\n layout.addLayout(detector_layout)\n\n self.main_layout.addLayout(layout)\n\n self.main_layout.addWidget(QtWidgets.QLabel('Strain Component:'))\n self.component_combobox = QtWidgets.QComboBox()\n self.component_combobox.setView(QtWidgets.QListView())\n strain_components = [s.value for s in StrainComponents]\n self.component_combobox.addItems(strain_components)\n self.component_combobox.currentTextChanged.connect(self.toggleKeyInBox)\n self.main_layout.addWidget(self.component_combobox)\n self.main_layout.addSpacing(spacing)\n\n button_layout = QtWidgets.QHBoxLayout()\n self.execute_button = QtWidgets.QPushButton(self.title)\n self.execute_button.clicked.connect(self.executeButtonClicked)\n button_layout.addWidget(self.execute_button)\n button_layout.addStretch(1)\n\n self.createKeyInBox()\n\n self.reverse_checkbox = QtWidgets.QCheckBox('Reverse Direction of Vector')\n self.main_layout.addWidget(self.reverse_checkbox)\n self.main_layout.addSpacing(spacing)\n\n self.main_layout.addLayout(button_layout)\n self.main_layout.addStretch(1)\n self.setLayout(self.main_layout)\n self.parent_model.measurement_points_changed.connect(self.updatePointList)\n self.parent_model.measurement_vectors_changed.connect(self.updateAlignment)\n self.parent.scenes.rendered_alignment_changed.connect(self.alignment_combobox.setCurrentIndex)\n self.setMinimumWidth(450)\n\n def updatePointList(self):\n self.points_combobox.clear()\n point_list = ['All Points']\n point_list.extend(['{}'.format(i+1) for i in range(self.parent_model.measurement_points.size)])\n self.points_combobox.addItems(point_list)\n\n def updateAlignment(self):\n align_count = self.parent_model.measurement_vectors.shape[2]\n if align_count != self.alignment_combobox.count() - 1:\n self.alignment_combobox.clear()\n alignment_list = ['{}'.format(i + 1) for i in range(align_count)]\n alignment_list.append('Add New...')\n self.alignment_combobox.addItems(alignment_list)\n\n self.alignment_combobox.setCurrentIndex(self.parent.scenes.rendered_alignment)\n\n def addNewAlignment(self, index):\n if index == self.alignment_combobox.count() - 1:\n self.alignment_combobox.insertItem(index, '{}'.format(index + 1))\n self.alignment_combobox.setCurrentIndex(index)\n\n def changeRenderedAlignment(self, index):\n align_count = self.parent_model.measurement_vectors.shape[2]\n if 0 <= index < align_count:\n self.parent.scenes.changeRenderedAlignment(index)\n elif index >= align_count:\n self.parent.scenes.changeVisibility(Attributes.Vectors, False)\n\n def toggleKeyInBox(self, selected_text):\n strain_component = StrainComponents(selected_text)\n if strain_component == StrainComponents.custom:\n self.key_in_box.setVisible(True)\n self.form_group.validateGroup()\n else:\n self.key_in_box.setVisible(False)\n self.execute_button.setEnabled(True)\n\n def createKeyInBox(self):\n self.key_in_box = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout()\n\n self.form_group = FormGroup(FormGroup.Layout.Horizontal)\n self.x_axis = FormControl('X', 1.0, required=True, number=True, decimals=7)\n self.x_axis.range(-1.0, 1.0)\n self.y_axis = FormControl('Y', 0.0, required=True, number=True, decimals=7)\n self.y_axis.range(-1.0, 1.0)\n self.z_axis = FormControl('Z', 0.0, required=True, number=True, decimals=7)\n self.z_axis.range(-1.0, 1.0)\n self.form_group.addControl(self.x_axis)\n self.form_group.addControl(self.y_axis)\n self.form_group.addControl(self.z_axis)\n self.form_group.groupValidation.connect(self.formValidation)\n\n layout.addWidget(self.form_group)\n self.key_in_box.setLayout(layout)\n self.main_layout.addWidget(self.key_in_box)\n self.toggleKeyInBox(self.component_combobox.currentText())\n\n def formValidation(self, is_valid):\n self.execute_button.setDisabled(True)\n if is_valid:\n if np.linalg.norm([self.x_axis.value, self.y_axis.value, self.z_axis.value]) > VECTOR_EPS:\n self.x_axis.validation_label.setText('')\n self.execute_button.setEnabled(True)\n else:\n self.x_axis.validation_label.setText('Bad Normal')\n\n def executeButtonClicked(self):\n points = self.points_combobox.currentIndex() - 1\n\n selected_text = self.component_combobox.currentText()\n strain_component = StrainComponents(selected_text)\n\n alignment = self.alignment_combobox.currentIndex()\n detector = self.detector_combobox.currentIndex()\n check_state = self.reverse_checkbox.checkState()\n reverse = True if check_state == QtCore.Qt.Checked else False\n\n if strain_component == StrainComponents.custom:\n vector = [self.x_axis.value, self.y_axis.value, self.z_axis.value]\n else:\n vector = None\n\n self.parent.presenter.addVectors(points, strain_component, alignment, detector,\n key_in=vector, reverse=reverse)\n # New vectors are drawn by the scene manager after function ends\n self.parent.scenes._rendered_alignment = alignment\n\n def closeEvent(self, event):\n self.parent.scenes.changeRenderedAlignment(0)\n event.accept()\n\n\nclass PickPointDialog(QtWidgets.QWidget):\n \"\"\"Provides UI for selecting measurement points on a cross section of the sample\n\n :param parent: Main window\n :type parent: MainWindow\n \"\"\"\n dock_flag = DockFlag.Full\n\n def __init__(self, parent):\n super().__init__(parent)\n self.parent = parent\n self.parent_model = parent.presenter.model\n self.parent.scenes.switchToSampleScene()\n self.title = 'Add Measurement Points Graphically'\n self.setMinimumWidth(500)\n\n self.plane_offset_range = (-1., 1.)\n self.slider_range = (-10000000, 10000000)\n\n self.sample_scale = 20\n self.path_pen = QtGui.QPen(QtGui.QColor(255, 0, 0), 0)\n self.point_pen = QtGui.QPen(QtGui.QColor(200, 0, 0), 0)\n\n self.main_layout = QtWidgets.QVBoxLayout()\n self.setLayout(self.main_layout)\n button_layout = QtWidgets.QHBoxLayout()\n self.help_button = create_tool_button(tooltip='Help', style_name='ToolButton',\n status_tip='Display shortcuts for the cross-section view',\n icon_path=path_for('question.png'))\n self.help_button.clicked.connect(self.showHelp)\n\n self.reset_button = create_tool_button(tooltip='Reset View', style_name='ToolButton',\n status_tip='Reset camera transformation of the cross-section view',\n icon_path=path_for('refresh.png'))\n self.execute_button = QtWidgets.QPushButton('Add Points')\n self.execute_button.clicked.connect(self.addPoints)\n button_layout.addWidget(self.help_button)\n button_layout.addWidget(self.reset_button)\n button_layout.addStretch(1)\n button_layout.addWidget(self.execute_button)\n self.main_layout.addLayout(button_layout)\n\n self.splitter = QtWidgets.QSplitter(QtCore.Qt.Vertical)\n self.splitter.setChildrenCollapsible(False)\n self.main_layout.addWidget(self.splitter)\n self.createGraphicsView()\n self.reset_button.clicked.connect(self.view.reset)\n self.createControlPanel()\n\n self.prepareMesh()\n self.parent_model.sample_changed.connect(self.prepareMesh)\n self.parent_model.measurement_points_changed.connect(self.updateCrossSection)\n self.initializing = True\n\n def showEvent(self, event):\n if self.initializing:\n self.view.fitInView(self.view.anchor, QtCore.Qt.KeepAspectRatio)\n self.initializing = False\n\n super().showEvent(event)\n\n def closeEvent(self, event):\n self.parent.scenes.removePlane()\n event.accept()\n\n def prepareMesh(self):\n self.mesh = None\n samples = self.parent_model.sample\n for _, sample in samples.items():\n if self.mesh is None:\n self.mesh = sample.copy()\n else:\n self.mesh.append(sample)\n\n self.scene.clear()\n self.tabs.setEnabled(self.mesh is not None)\n if self.mesh is not None:\n self.setPlane(self.plane_combobox.currentText())\n else:\n self.parent.scenes.removePlane()\n self.view.reset()\n\n def updateStatusBar(self, point):\n if self.view.rect().contains(point):\n transform = self.view.scene_transform.inverted()[0]\n scene_pt = transform.map(self.view.mapToScene(point)) / self.sample_scale\n world_pt = [scene_pt.x(), scene_pt.y(), -self.old_distance] @ self.matrix.transpose()\n cursor_text = f'X: {world_pt[0]:.3f} Y: {world_pt[1]:.3f} Z: {world_pt[2]:.3f}'\n self.parent.cursor_label.setText(cursor_text)\n else:\n self.parent.cursor_label.clear()\n\n def createGraphicsView(self):\n self.scene = GraphicsScene(self.sample_scale, self)\n self.view = GraphicsView(self.scene)\n self.view.mouse_moved.connect(self.updateStatusBar)\n self.view.setMinimumHeight(350)\n self.splitter.addWidget(self.view)\n\n def createControlPanel(self):\n self.tabs = QtWidgets.QTabWidget()\n self.tabs.setMinimumHeight(250)\n self.tabs.setTabPosition(QtWidgets.QTabWidget.South)\n self.splitter.addWidget(self.tabs)\n\n self.createPlaneTab()\n self.createSelectionToolsTab()\n self.createGridOptionsTab()\n point_manager = PointManager(PointType.Measurement, self.parent)\n self.tabs.addTab(create_scroll_area(point_manager), 'Point Manager')\n\n def createPlaneTab(self):\n layout = QtWidgets.QVBoxLayout()\n layout.addWidget(QtWidgets.QLabel('Specify Plane:'))\n self.plane_combobox = QtWidgets.QComboBox()\n self.plane_combobox.setView(QtWidgets.QListView())\n self.plane_combobox.addItems([p.value for p in PlaneOptions])\n self.plane_combobox.currentTextChanged.connect(self.setPlane)\n self.createCustomPlaneBox()\n layout.addWidget(self.plane_combobox)\n layout.addWidget(self.custom_plane_widget)\n layout.addSpacing(20)\n\n slider_layout = QtWidgets.QHBoxLayout()\n slider_layout.addWidget(QtWidgets.QLabel('Plane Distance from Origin (mm):'))\n self.plane_lineedit = QtWidgets.QLineEdit()\n validator = QtGui.QDoubleValidator(self.plane_lineedit)\n validator.setNotation(QtGui.QDoubleValidator.StandardNotation)\n validator.setDecimals(3)\n self.plane_lineedit.setValidator(validator)\n self.plane_lineedit.textEdited.connect(self.updateSlider)\n self.plane_lineedit.editingFinished.connect(self.movePlane)\n slider_layout.addStretch(1)\n slider_layout.addWidget(self.plane_lineedit)\n layout.addLayout(slider_layout)\n self.plane_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n self.plane_slider.setMinimum(self.slider_range[0])\n self.plane_slider.setMaximum(self.slider_range[1])\n self.plane_slider.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.plane_slider.setSingleStep(1)\n self.plane_slider.sliderMoved.connect(self.updateLineEdit)\n self.plane_slider.sliderReleased.connect(self.movePlane)\n layout.addWidget(self.plane_slider)\n layout.addStretch(1)\n\n plane_tab = QtWidgets.QWidget()\n plane_tab.setLayout(layout)\n self.tabs.addTab(create_scroll_area(plane_tab), 'Define Plane')\n\n def createSelectionToolsTab(self):\n layout = QtWidgets.QVBoxLayout()\n selector_layout = QtWidgets.QHBoxLayout()\n selector_layout.addWidget(QtWidgets.QLabel('Select Geometry of Points: '))\n self.button_group = QtWidgets.QButtonGroup()\n self.button_group.buttonClicked[int].connect(self.changeSceneMode)\n\n self.object_selector = create_tool_button(checkable=True, checked=True, tooltip='Select Points',\n status_tip='Select movable points from the cross-section view',\n style_name='MidToolButton', icon_path=path_for('select.png'))\n self.point_selector = create_tool_button(checkable=True, tooltip='Draw a Point',\n status_tip='Draw a single point at the selected position',\n style_name='MidToolButton', icon_path=path_for('point.png'))\n self.line_selector = create_tool_button(checkable=True, tooltip='Draw Points on Line',\n status_tip='Draw equally spaced points on the selected line',\n style_name='MidToolButton', icon_path=path_for('line_tool.png'))\n self.area_selector = create_tool_button(checkable=True, tooltip='Draw Points on Area',\n status_tip='Draw a grid of points on the selected area',\n style_name='MidToolButton', icon_path=path_for('area_tool.png'))\n\n self.button_group.addButton(self.object_selector, GraphicsScene.Mode.Select.value)\n self.button_group.addButton(self.point_selector, GraphicsScene.Mode.Draw_point.value)\n self.button_group.addButton(self.line_selector, GraphicsScene.Mode.Draw_line.value)\n self.button_group.addButton(self.area_selector, GraphicsScene.Mode.Draw_area.value)\n selector_layout.addWidget(self.object_selector)\n selector_layout.addWidget(self.point_selector)\n selector_layout.addWidget(self.line_selector)\n selector_layout.addWidget(self.area_selector)\n selector_layout.addStretch(1)\n\n self.createLineToolWidget()\n self.createAreaToolWidget()\n\n layout.addLayout(selector_layout)\n layout.addWidget(self.line_tool_widget)\n layout.addWidget(self.area_tool_widget)\n layout.addStretch(1)\n\n select_tab = QtWidgets.QWidget()\n select_tab.setLayout(layout)\n self.tabs.addTab(create_scroll_area(select_tab), 'Selection Tools')\n\n def createGridOptionsTab(self):\n layout = QtWidgets.QVBoxLayout()\n self.show_grid_checkbox = QtWidgets.QCheckBox('Show Grid')\n self.show_grid_checkbox.stateChanged.connect(self.showGrid)\n self.snap_to_grid_checkbox = QtWidgets.QCheckBox('Snap Selection to Grid')\n self.snap_to_grid_checkbox.stateChanged.connect(self.snapToGrid)\n self.snap_to_grid_checkbox.setEnabled(self.view.show_grid)\n layout.addWidget(self.show_grid_checkbox)\n layout.addWidget(self.snap_to_grid_checkbox)\n self.createGridWidget()\n layout.addWidget(self.grid_widget)\n layout.addStretch(1)\n\n grid_tab = QtWidgets.QWidget()\n grid_tab.setLayout(layout)\n self.tabs.addTab(create_scroll_area(grid_tab), 'Grid Options')\n\n def createCustomPlaneBox(self):\n self.custom_plane_widget = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout()\n\n self.form_group = FormGroup(FormGroup.Layout.Horizontal)\n self.x_axis = FormControl('X', 1.0, required=True, number=True)\n self.x_axis.range(-1.0, 1.0)\n self.y_axis = FormControl('Y', 0.0, required=True, number=True)\n self.y_axis.range(-1.0, 1.0)\n self.z_axis = FormControl('Z', 0.0, required=True, number=True)\n self.z_axis.range(-1.0, 1.0)\n self.form_group.addControl(self.x_axis)\n self.form_group.addControl(self.y_axis)\n self.form_group.addControl(self.z_axis)\n self.form_group.groupValidation.connect(self.setCustomPlane)\n\n layout.addWidget(self.form_group)\n self.custom_plane_widget.setLayout(layout)\n\n def createLineToolWidget(self):\n self.line_tool_widget = QtWidgets.QWidget(self)\n layout = QtWidgets.QHBoxLayout()\n layout.setContentsMargins(0, 20, 0, 0)\n layout.addWidget(QtWidgets.QLabel('Number of Points: '))\n self.line_point_count_spinbox = QtWidgets.QSpinBox()\n self.line_point_count_spinbox.setValue(self.scene.line_tool_size)\n self.line_point_count_spinbox.setRange(2, 100)\n self.line_point_count_spinbox.valueChanged.connect(self.scene.setLineToolSize)\n\n layout.addWidget(self.line_point_count_spinbox)\n self.line_tool_widget.setVisible(False)\n self.line_tool_widget.setLayout(layout)\n\n def createAreaToolWidget(self):\n self.area_tool_widget = QtWidgets.QWidget(self)\n layout = QtWidgets.QHBoxLayout()\n layout.setContentsMargins(0, 20, 0, 0)\n layout.addWidget(QtWidgets.QLabel('Number of Points: '))\n self.area_x_spinbox = QtWidgets.QSpinBox()\n self.area_x_spinbox.setValue(self.scene.area_tool_size[0])\n self.area_x_spinbox.setRange(2, 100)\n self.area_y_spinbox = QtWidgets.QSpinBox()\n self.area_y_spinbox.setValue(self.scene.area_tool_size[1])\n self.area_y_spinbox.setRange(2, 100)\n\n stretch_factor = 3\n layout.addStretch(1)\n layout.addWidget(QtWidgets.QLabel('X: '))\n self.area_x_spinbox.valueChanged.connect(lambda: self.scene.setAreaToolSize(self.area_x_spinbox.value(),\n self.area_y_spinbox.value()))\n layout.addWidget(self.area_x_spinbox, stretch_factor)\n layout.addStretch(1)\n layout.addWidget(QtWidgets.QLabel('Y: '))\n self.area_y_spinbox.valueChanged.connect(lambda: self.scene.setAreaToolSize(self.area_x_spinbox.value(),\n self.area_y_spinbox.value()))\n layout.addWidget(self.area_y_spinbox, stretch_factor)\n self.area_tool_widget.setVisible(False)\n self.area_tool_widget.setLayout(layout)\n\n def createGridWidget(self):\n self.grid_widget = QtWidgets.QWidget(self)\n main_layout = QtWidgets.QVBoxLayout()\n main_layout.setContentsMargins(0, 20, 0, 0)\n layout = QtWidgets.QHBoxLayout()\n layout.addWidget(QtWidgets.QLabel('Grid Type: '))\n grid_combobox = QtWidgets.QComboBox()\n grid_combobox.setView(QtWidgets.QListView())\n grid_combobox.addItems([g.value for g in Grid.Type])\n grid_combobox.currentTextChanged.connect(lambda value: self.setGridType(Grid.Type(value)))\n layout.addWidget(grid_combobox)\n main_layout.addLayout(layout)\n main_layout.addSpacing(20)\n\n layout = QtWidgets.QHBoxLayout()\n layout.addWidget(QtWidgets.QLabel('Grid Size: '))\n self.grid_x_label = QtWidgets.QLabel('')\n self.grid_x_spinbox = QtWidgets.QDoubleSpinBox()\n self.grid_x_spinbox.setDecimals(1)\n self.grid_x_spinbox.setSingleStep(0.1)\n self.grid_x_spinbox.valueChanged.connect(self.changeGridSize)\n self.grid_y_label = QtWidgets.QLabel('')\n self.grid_y_spinbox = QtWidgets.QDoubleSpinBox()\n self.grid_y_spinbox.setDecimals(1)\n self.grid_y_spinbox.setSingleStep(0.1)\n self.grid_y_spinbox.valueChanged.connect(self.changeGridSize)\n stretch_factor = 3\n layout.addStretch(1)\n layout.addWidget(self.grid_x_label)\n layout.addWidget(self.grid_x_spinbox, stretch_factor)\n layout.addStretch(1)\n layout.addWidget(self.grid_y_label)\n layout.addWidget(self.grid_y_spinbox, stretch_factor)\n main_layout.addLayout(layout)\n self.setGridType(self.view.grid.type)\n self.grid_widget.setVisible(False)\n self.grid_widget.setLayout(main_layout)\n\n def changeGridSize(self):\n if self.view.grid.type == Grid.Type.Box:\n grid_x = int(self.grid_x_spinbox.value() * self.sample_scale)\n grid_y = int(self.grid_y_spinbox.value() * self.sample_scale)\n else:\n grid_x = int(self.grid_x_spinbox.value() * self.sample_scale)\n grid_y = self.grid_y_spinbox.value()\n self.view.setGridSize((grid_x, grid_y))\n\n def setGridType(self, grid_type):\n self.view.setGridType(grid_type)\n size = self.view.grid.size\n if grid_type == Grid.Type.Box:\n self.grid_x_label.setText('X (mm): ')\n self.grid_y_label.setText('Y (mm): ')\n self.grid_x_spinbox.setValue(size[0])\n self.grid_y_spinbox.setValue(size[1])\n self.grid_x_spinbox.setRange(0.1, 1000)\n self.grid_y_spinbox.setRange(0.1, 1000)\n else:\n self.grid_x_label.setText('Radius (mm): ')\n self.grid_y_label.setText('Angle (degree): ')\n self.grid_x_spinbox.setValue(size[0])\n self.grid_y_spinbox.setValue(size[1])\n self.grid_x_spinbox.setRange(0.1, 1000)\n self.grid_y_spinbox.setRange(0.1, 360)\n\n def changeSceneMode(self, button_id):\n self.scene.mode = GraphicsScene.Mode(button_id)\n self.line_tool_widget.setVisible(self.scene.mode == GraphicsScene.Mode.Draw_line)\n self.area_tool_widget.setVisible(self.scene.mode == GraphicsScene.Mode.Draw_area)\n\n def showHelp(self):\n self.view.show_help = False if self.view.has_foreground else True\n self.scene.update()\n\n def showGrid(self, state):\n self.view.show_grid = True if state == QtCore.Qt.Checked else False\n self.snap_to_grid_checkbox.setEnabled(self.view.show_grid)\n self.grid_widget.setVisible(self.view.show_grid)\n self.scene.update()\n\n def snapToGrid(self, state):\n self.view.snap_to_grid = True if state == QtCore.Qt.Checked else False\n\n def updateSlider(self, value):\n if not self.plane_lineedit.hasAcceptableInput():\n return\n\n new_distance = clamp(float(value), *self.plane_offset_range)\n slider_value = int(map_range(*self.plane_offset_range, *self.slider_range, new_distance))\n self.plane_slider.setValue(slider_value)\n\n offset = new_distance - self.old_distance\n self.parent.scenes.movePlane(offset * self.plane.normal)\n self.old_distance = new_distance\n\n def updateLineEdit(self, value):\n new_distance = trunc(map_range(*self.slider_range, *self.plane_offset_range, value), 3)\n self.plane_lineedit.setText('{:.3f}'.format(new_distance))\n\n offset = new_distance - self.old_distance\n self.parent.scenes.movePlane(offset * self.plane.normal)\n self.old_distance = new_distance\n\n def movePlane(self):\n distance = clamp(float(self.plane_lineedit.text()), *self.plane_offset_range)\n self.plane_lineedit.setText('{:.3f}'.format(distance))\n point = distance * self.plane.normal\n self.plane = Plane(self.plane.normal, point)\n self.updateCrossSection()\n\n def setCustomPlane(self, is_valid):\n if is_valid:\n normal = np.array([self.x_axis.value, self.y_axis.value, self.z_axis.value])\n try:\n self.initializePlane(normal, self.mesh.bounding_box.center)\n except ValueError:\n self.x_axis.validation_label.setText('Bad Normal')\n\n def setPlane(self, selected_text):\n if selected_text == PlaneOptions.Custom.value:\n self.custom_plane_widget.setVisible(True)\n self.form_group.validateGroup()\n return\n else:\n self.custom_plane_widget.setVisible(False)\n\n if selected_text == PlaneOptions.XY.value:\n plane_normal = np.array([0., 0., 1.])\n elif selected_text == PlaneOptions.XZ.value:\n plane_normal = np.array([0., 1., 0.])\n else:\n plane_normal = np.array([1., 0., 0.])\n\n self.initializePlane(plane_normal, self.mesh.bounding_box.center)\n\n def initializePlane(self, plane_normal, plane_point):\n self.plane = Plane(plane_normal, plane_point)\n plane_size = self.mesh.bounding_box.radius\n\n self.parent.scenes.drawPlane(self.plane, 2 * plane_size, 2 * plane_size)\n distance = self.plane.distanceFromOrigin()\n self.plane_offset_range = (distance - plane_size, distance + plane_size)\n slider_value = int(map_range(*self.plane_offset_range, *self.slider_range, distance))\n self.plane_slider.setValue(slider_value)\n self.plane_lineedit.setText('{:.3f}'.format(distance))\n self.old_distance = distance\n # inverted the normal so that the y-axis is flipped\n self.matrix = self.__lookAt(-Vector3(self.plane.normal))\n self.view.resetTransform()\n self.updateCrossSection()\n\n def updateCrossSection(self):\n self.scene.clear()\n segments = mesh_plane_intersection(self.mesh, self.plane)\n if len(segments) == 0:\n return\n segments = np.array(segments)\n\n item = QtWidgets.QGraphicsPathItem()\n cross_section_path = QtGui.QPainterPath()\n rotated_segments = self.sample_scale * (segments @ self.matrix)\n for i in range(0, rotated_segments.shape[0], 2):\n start = rotated_segments[i, :]\n cross_section_path.moveTo(start[0], start[1])\n end = rotated_segments[i + 1, :]\n cross_section_path.lineTo(end[0], end[1])\n item.setPath(cross_section_path)\n item.setPen(self.path_pen)\n item.setTransform(self.view.scene_transform)\n self.scene.addItem(item)\n rect = item.boundingRect()\n anchor = rect.center()\n\n ab = self.plane.point - self.parent_model.measurement_points.points\n d = np.einsum('ij,ij->i', np.expand_dims(self.plane.normal, axis=0), ab)\n index = np.where(np.abs(d) < VECTOR_EPS)[0]\n rotated_points = self.parent_model.measurement_points.points[index, :]\n rotated_points = rotated_points @ self.matrix\n\n for i, p in zip(index, rotated_points):\n point = QtCore.QPointF(p[0], p[1]) * self.sample_scale\n point = self.view.scene_transform.map(point)\n item = GraphicsPointItem(point, size=self.scene.point_size)\n item.setToolTip(f'Point {i + 1}')\n item.fixed = True\n item.makeControllable(self.scene.mode == GraphicsScene.Mode.Select)\n item.setPen(self.point_pen)\n self.scene.addItem(item)\n rect = rect.united(item.boundingRect().translated(point))\n\n # calculate new rectangle that encloses original rect with a different anchor\n rect.united(rect.translated(anchor - rect.center()))\n self.view.setSceneRect(rect)\n self.view.fitInView(rect, QtCore.Qt.KeepAspectRatio)\n self.view.anchor = rect\n\n @staticmethod\n def __lookAt(forward):\n rot_matrix = Matrix33.identity()\n up = Vector3([0., -1., 0.]) if -VECTOR_EPS < forward[1] < VECTOR_EPS else Vector3([0., 0., 1.])\n left = up ^ forward\n left.normalize()\n up = forward ^ left\n\n rot_matrix.c1[:3] = left\n rot_matrix.c2[:3] = up\n rot_matrix.c3[:3] = forward\n\n return rot_matrix\n\n def addPoints(self):\n if len(self.scene.items()) < 2:\n return\n\n points_2d = []\n transform = self.view.scene_transform.inverted()[0]\n for item in self.scene.items():\n if isinstance(item, GraphicsPointItem) and not item.fixed:\n pos = transform.map(item.pos()) / self.sample_scale\n # negate distance due to inverted normal when creating matrix\n points_2d.append([pos.x(), pos.y(), -self.old_distance])\n self.scene.removeItem(item)\n\n if not points_2d:\n return\n\n points = points_2d[::-1] @ self.matrix.transpose()\n enabled = [True] * points.shape[0]\n self.parent.presenter.addPoints(list(zip(points, enabled)), PointType.Measurement, False)\n\n\nclass AlignSample(QtWidgets.QWidget):\n \"\"\"Provides UI for aligning sample on instrument with 6D pose\n\n :param parent: Main window\n :type parent: MainWindow\n \"\"\"\n dock_flag = DockFlag.Upper\n\n def __init__(self, parent):\n super().__init__(parent)\n self.parent = parent\n self.parent.scenes.switchToInstrumentScene()\n self.title = 'Align Sample with 6D pose'\n self.setMinimumWidth(450)\n\n self.main_layout = QtWidgets.QVBoxLayout()\n self.setLayout(self.main_layout)\n self.main_layout.addSpacing(20)\n self.main_layout.addWidget(FormTitle('Create Transformation for Alignment'))\n self.main_layout.addSpacing(10)\n\n self.main_layout.addWidget(QtWidgets.QLabel('Translation along the X, Y, and Z axis (mm):'))\n self.position_form_group = FormGroup(FormGroup.Layout.Horizontal)\n self.x_position = FormControl('X', 0.0, required=True, number=True)\n self.y_position = FormControl('Y', 0.0, required=True, number=True)\n self.z_position = FormControl('Z', 0.0, required=True, number=True)\n self.position_form_group.addControl(self.x_position)\n self.position_form_group.addControl(self.y_position)\n self.position_form_group.addControl(self.z_position)\n self.position_form_group.groupValidation.connect(self.formValidation)\n self.main_layout.addWidget(self.position_form_group)\n\n self.main_layout.addWidget(QtWidgets.QLabel('Rotation around the X, Y, and Z axis (degrees):'))\n self.orientation_form_group = FormGroup(FormGroup.Layout.Horizontal)\n self.x_rotation = FormControl('X', 0.0, required=True, number=True)\n self.x_rotation.range(-360.0, 360.0)\n self.y_rotation = FormControl('Y', 0.0, required=True, number=True)\n self.y_rotation.range(-360.0, 360.0)\n self.z_rotation = FormControl('Z', 0.0, required=True, number=True)\n self.z_rotation.range(-360.0, 360.0)\n self.orientation_form_group.addControl(self.x_rotation)\n self.orientation_form_group.addControl(self.y_rotation)\n self.orientation_form_group.addControl(self.z_rotation)\n self.orientation_form_group.groupValidation.connect(self.formValidation)\n self.main_layout.addWidget(self.orientation_form_group)\n\n button_layout = QtWidgets.QHBoxLayout()\n self.execute_button = QtWidgets.QPushButton('Align Sample')\n self.execute_button.clicked.connect(self.executeButtonClicked)\n button_layout.addWidget(self.execute_button)\n button_layout.addStretch(1)\n self.main_layout.addLayout(button_layout)\n self.main_layout.addStretch(1)\n\n def formValidation(self):\n if self.position_form_group.valid and self.orientation_form_group.valid:\n self.execute_button.setEnabled(True)\n else:\n self.execute_button.setDisabled(True)\n\n def executeButtonClicked(self):\n pose = [self.x_position.value, self.y_position.value, self.z_position.value,\n self.z_rotation.value, self.y_rotation.value, self.x_rotation.value]\n\n self.parent.presenter.alignSampleWithPose(pose)\n"
] | [
[
"numpy.abs",
"numpy.array",
"numpy.expand_dims",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cnheider/onnx | [
"f5bb59aa0f8b18b602763abe47d1d24d0d54b197",
"781545783a4e2bbbda48fc64318fb2c6d8bbb3cc",
"f5bb59aa0f8b18b602763abe47d1d24d0d54b197",
"8e9c7d57f7c5aa6f6eb7ee7abb0ba2a243781933",
"8e9c7d57f7c5aa6f6eb7ee7abb0ba2a243781933"
] | [
"onnx/backend/test/case/node/batchnorm.py",
"onnx/backend/test/case/base.py",
"onnx/backend/test/case/node/isnan.py",
"onnx/backend/test/case/node/max.py",
"onnx/backend/test/case/node/unsqueeze.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass BatchNormalization(Base):\n\n @staticmethod\n def export(): # type: () -> None\n def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5): # type: ignore\n dims_x = len(x.shape)\n dim_ones = (1,) * (dims_x - 2)\n s = s.reshape(-1, *dim_ones)\n bias = bias.reshape(-1, *dim_ones)\n mean = mean.reshape(-1, *dim_ones)\n var = var.reshape(-1, *dim_ones)\n return s * (x - mean) / np.sqrt(var + epsilon) + bias\n\n # input size: (1, 2, 1, 3)\n x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)\n s = np.array([1.0, 1.5]).astype(np.float32)\n bias = np.array([0, 1]).astype(np.float32)\n mean = np.array([0, 3]).astype(np.float32)\n var = np.array([1, 1.5]).astype(np.float32)\n y = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\n node = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n )\n\n # output size: (1, 2, 1, 3)\n expect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_example')\n\n # input size: (2, 3, 4, 5)\n x = np.random.randn(2, 3, 4, 5).astype(np.float32)\n s = np.random.randn(3).astype(np.float32)\n bias = np.random.randn(3).astype(np.float32)\n mean = np.random.randn(3).astype(np.float32)\n var = np.random.rand(3).astype(np.float32)\n epsilon = 1e-2\n y = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\n node = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n epsilon=epsilon,\n )\n\n # output size: (2, 3, 4, 5)\n expect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_epsilon')\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict\nimport inspect\nfrom textwrap import dedent\nfrom typing import Dict, Text, List, Tuple, Type, Sequence, Any\n\nimport numpy as np # type: ignore\nfrom six import add_metaclass\n\n\ndef process_snippet(op_name, name, export): # type: (Text, Text, Any) -> Tuple[Text, Text]\n snippet_name = name[len('export_'):] or op_name.lower()\n source_code = dedent(inspect.getsource(export))\n # remove the function signature line\n lines = source_code.splitlines()\n assert lines[0] == '@staticmethod'\n assert lines[1].startswith('def export')\n return snippet_name, dedent(\"\\n\".join(lines[2:]))\n\n\nSnippets = defaultdict(list) # type: Dict[Text, List[Tuple[Text, Text]]]\n\n\nclass _Exporter(type):\n exports = defaultdict(list) # type: Dict[Text, List[Tuple[Text, Text]]]\n\n def __init__(cls, name, bases, dct): # type: (str, Tuple[Type[Any], ...], Dict[str, Any]) -> None\n for k, v in dct.items():\n if k.startswith('export'):\n if not isinstance(v, staticmethod):\n raise ValueError(\n 'Only staticmethods could be named as export.*')\n export = getattr(cls, k)\n Snippets[name].append(process_snippet(name, k, export))\n # export functions should call expect and so populate\n # TestCases\n np.random.seed(seed=0)\n export()\n super(_Exporter, cls).__init__(name, bases, dct)\n\n\n@add_metaclass(_Exporter)\nclass Base(object):\n pass\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass IsNaN(Base):\n\n @staticmethod\n def export(): # type: () -> None\n node = onnx.helper.make_node(\n 'IsNaN',\n inputs=['x'],\n outputs=['y'],\n )\n\n x = np.array([3.0, np.nan, 4.0, np.nan], dtype=np.float32)\n y = np.isnan(x)\n expect(node, inputs=[x], outputs=[y], name='test_isnan')\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\nfrom ..utils import all_numeric_dtypes\n\n\nclass Max(Base):\n\n @staticmethod\n def export(): # type: () -> None\n data_0 = np.array([3, 2, 1]).astype(np.float32)\n data_1 = np.array([1, 4, 4]).astype(np.float32)\n data_2 = np.array([2, 5, 3]).astype(np.float32)\n result = np.array([3, 5, 4]).astype(np.float32)\n node = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_max_example')\n\n node = onnx.helper.make_node(\n 'Max',\n inputs=['data_0'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0], outputs=[data_0],\n name='test_max_one_input')\n\n result = np.maximum(data_0, data_1)\n node = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_max_two_inputs')\n\n @staticmethod\n def export_max_all_numeric_types(): # type: () -> None\n for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([3, 4, 4]).astype(op_dtype)\n node = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_max_{0}'.format(np.dtype(op_dtype).name))\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass Unsqueeze(Base):\n\n @staticmethod\n def export_unsqueeze_one_axis(): # type: () -> None\n x = np.random.randn(3, 4, 5).astype(np.float32)\n\n for i in range(x.ndim):\n node = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[i],\n )\n y = np.expand_dims(x, axis=i)\n\n expect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_axis_' + str(i))\n\n @staticmethod\n def export_unsqueeze_two_axes(): # type: () -> None\n x = np.random.randn(3, 4, 5).astype(np.float32)\n\n node = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[1, 4],\n )\n y = np.expand_dims(x, axis=1)\n y = np.expand_dims(y, axis=4)\n\n expect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_two_axes')\n\n @staticmethod\n def export_unsqueeze_three_axes(): # type: () -> None\n x = np.random.randn(3, 4, 5).astype(np.float32)\n\n node = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[2, 4, 5],\n )\n y = np.expand_dims(x, axis=2)\n y = np.expand_dims(y, axis=4)\n y = np.expand_dims(y, axis=5)\n\n expect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_three_axes')\n\n @staticmethod\n def export_unsqueeze_unsorted_axes(): # type: () -> None\n x = np.random.randn(3, 4, 5).astype(np.float32)\n\n node = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[5, 4, 2],\n )\n y = np.expand_dims(x, axis=2)\n y = np.expand_dims(y, axis=4)\n y = np.expand_dims(y, axis=5)\n\n expect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_unsorted_axes')\n\n @staticmethod\n def export_unsqueeze_negative_axes(): # type: () -> None\n node = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[-2],\n )\n x = np.random.randn(1, 3, 1, 5).astype(np.float32)\n y = np.expand_dims(x, axis=-2)\n expect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_negative_axes')\n"
] | [
[
"numpy.sqrt",
"numpy.array",
"numpy.random.randn",
"numpy.random.rand"
],
[
"numpy.random.seed"
],
[
"numpy.isnan",
"numpy.array"
],
[
"numpy.array",
"numpy.maximum",
"numpy.dtype"
],
[
"numpy.expand_dims",
"numpy.random.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nghitrampham/air_pollution_death_rate_related | [
"3fd72b9684e8362de5706ba37c1d90b844d4afe0"
] | [
"air_pollution_death_rate_related/scripts/air_pollution/predict_aqi.py"
] | [
"\"\"\"\nThis module is used to predict the Air Quality Index model for 2019 for all counties.\n\"\"\"\nimport pickle\nimport warnings\n\nimport pandas as pd\nimport numpy as np\nfrom keras.models import load_model\n\nimport helpers\n\nwarnings.filterwarnings(\"ignore\")\n\ndef main():\n\n data2019_raw = pd.read_csv(\"\"\"air_pollution_death_rate_related/data/air_pollution/\n data_air_raw/daily_aqi_by_county_2019.csv\"\"\")\n data2019 = helpers.data_cleaning(data2019_raw)\n predicted_date = \"2019-03-12\"\n\n file = open(\"temp.csv\", \"w\")\n file.write(\"date,state_county,AQI\\n\")\n\n # for county in list(data2019[\"state_county\"].unique()):\n for county in list(data2019[\"state_county\"].unique())[:5]:\n\n ## load model to predict AQI\n print(\"---> Loading model for county {} ...\".format(county))\n\n try:\n scaler_path = (\"air_pollution_death_rate_related/trained_model/min_scaler_model/\" +\n county + \"_scaler.pickle\")\n\n model_path = (\"air_pollution_death_rate_related/trained_model/county_aqi/\" +\n county + \"_model.h5\")\n\n model = load_model(model_path)\n mm_scaler = pickle.load(open(scaler_path, \"rb\"))\n\n ### feature engineering for model\n data_feature_temp = helpers.data_feature_engineering_for_test(\n data2019,\n county,\n predicted_date)\n x_test, y_test = helpers.load_test_data(data_feature_temp[\"data\"], mm_scaler)\n\n ## predicting AQI\n predictions = helpers.predict_point_by_point(model, x_test)\n # helpers.plot_results(predictions, y_test)\n\n ## keep prediction for all counties\n print(\"Predicting ....\")\n y_pred = np.append(x_test, predictions.reshape(1, 1, 1)).reshape(1, 39)\n y_scale = mm_scaler.inverse_transform(y_pred)[-1][-1]\n\n file.write(predicted_date+\",\"+county+\",\"+str(y_scale)+\"\\n\")\n\n del data_feature_temp, scaler_path,\\\n model_path, model, mm_scaler, x_test, y_test, predictions, y_pred, y_scale\n\n except Exception as exp:\n print(exp)\n exp.args += ('Path and list_year must not be empty', \"check read_raw_data function\")\n\n file.close()\n\n ## creating dataframe containing county, state, predicted AQI,\n ## predicted date for interactive visualization map\n county_code = pd.read_csv(\"\"\"air_pollution_death_rate_related/data/air_pollution/\n data_misc/county_with_code.csv\"\"\")\n df_prediction = pd.read_csv(\"temp.csv\")\n\n df_result = (pd.merge(county_code, df_prediction,\n how='inner',\n left_on=[\"state_county\"],\n right_on=[\"state_county\"])\n )\n df_result.to_csv(\"predicted_AQI\" + predicted_date + \".csv\", index=False)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.merge",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
positivevaib/semi-supervised-imagenet-classification | [
"4fb6427f5a72951c1b866a1ddbc2599811bb5770",
"4fb6427f5a72951c1b866a1ddbc2599811bb5770"
] | [
"deep-clustering-conv-autoencoder/main.py",
"rotation-net/train.py"
] | [
"# import\nimport numpy as np\nimport sklearn as skl\nimport sklearn.cluster as cluster\nimport sklearn.metrics as metrics\nimport torch\nimport torch.distributions.kl as kl\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport tqdm\n\n\n# model\nclass CAE_ENC(nn.Module):\n def __init__(self):\n super().__init__()\n # self.enc = nn.Sequential(*list(model.features.children())[:-5])\n self.conv1 = nn.Conv2d(3, 32, kernel_size=5, padding=2, stride=2)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=2)\n self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=2)\n self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2)\n self.fc1 = nn.Linear(256 * 6 * 6, 1000)\n\n def forward(self, x):\n # x = self.features(x)\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = F.relu(self.conv4(x))\n x = x.view(-1, 256 * 6 * 6)\n x = self.fc1(x)\n return x\n\n\nclass CAE_DEC(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc2 = nn.Linear(1000, 256 * 6 * 6)\n self.deconv1 = nn.ConvTranspose2d(256, 128, 2, stride=2)\n self.deconv2 = nn.ConvTranspose2d(128, 64, 2, stride=2)\n self.deconv3 = nn.ConvTranspose2d(64, 32, 2, stride=2)\n self.deconv4 = nn.ConvTranspose2d(32, 3, 2, stride=2)\n self.conv5 = nn.Conv2d(3, 3, kernel_size=1) # might have to remove\n\n def forward(self, x):\n x = F.relu(self.fc2(x))\n x = x.view(128, 256, 6, 6)\n x = F.relu(self.deconv1(x))\n x = F.relu(self.deconv2(x))\n x = F.relu(self.deconv3(x))\n x = F.relu(self.deconv4(x))\n x = torch.sigmoid(self.conv5(x)) # might have to remove\n return x\n\n\nclass ClusteringLayer(nn.Module):\n def __init__(self, weights=None, alpha=1.0):\n super().__init__()\n if weights:\n self.weights = weights\n else:\n self.weights = torch.empty(1000, 1000)\n nn.init.xavier_uniform_(self.weights)\n self.alpha = alpha\n\n def forward(self, x):\n q = 1.0 / (1.0 + (torch.sum(\n (x.unsqueeze(1) - self.weights)**2, dim=2) / self.alpha))\n q **= (self.alpha + 1.0) / 2.0\n q = torch.transpose(\n torch.transpose(q, 1, 2) / torch.sum(q, dim=1), 1, 2)\n return q\n\n\ndef set_weights(module, weights):\n if isinstance(module, ClusteringLayer):\n module.weights = weights\n\n\nclass CAE(nn.Module):\n def __init__(self):\n super().__init__()\n self.enc = CAE_ENC()\n self.dec = CAE_DEC()\n self.clus = ClusteringLayer()\n\n def forward(self, x):\n h = self.enc(x)\n q = self.clus(h)\n o = self.dec(h)\n return (h, q, o)\n\n\ndef loss(q, p, o, gamma=0.1):\n mse = nn.MSELoss(o)\n kld = gamma * kl.kl_divergence(p, q)\n l = mse + kld\n return l\n\n\ndef target_distribution(q):\n weight = q**2 / torch.sum(q, dim=0)\n return torch.transpose(torch.transpose(q) / torch.sum(weight, dim=1))\n\n\n# data\ntransformations = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225),\n inplace=True)\n])\ndataset1 = datasets.ImageFolder('/beegfs/vag273/ssl_data_96/supervised/train/',\n transform=transformations)\ndataset2 = datasets.ImageFolder('/beegfs/vag273/ssl_data_96/unsupervised/',\n transform=transformations)\ndataset = data.ConcatDataset((dataset1, dataset2))\n\ntrain_ratio = 0.9\ntrain_set_size = int(train_ratio * len(dataset))\nval_set_size = len(dataset) - train_set_size\n\ntrain_data, val_data = data.random_split(dataset,\n (train_set_size, val_set_size))\n\ntrain_loader = data.DataLoader(train_data, batch_size=128, shuffle=True)\nval_loader = data.DataLoader(val_data, batch_size=128, shuffle=False)\n\n# training\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\nmodel = CAE().to(device)\n# criterion = nn.MSELoss()\noptimizer = optim.Adam(model.parameters())\n\n# pretrain\nbest_val_loss = float('inf')\ntot_epochs = 200 # maybe lower it on one of the runs\nprint('pretrain')\nfor epoch in range(tot_epochs):\n model.train()\n\n print('epoch {} of {}'.format(epoch + 1, tot_epochs))\n\n desc = \"ITERATION - loss: {:.2f}\"\n pbar = tqdm.tqdm(desc=desc.format(0),\n total=len(train_loader),\n leave=False,\n file=None,\n initial=0)\n\n running_loss = 0\n for batch_idx, data in enumerate(train_loader):\n img, _ = data\n img = img.to(device)\n\n optimizer.zero_grad()\n\n _, _, out = model(img)\n loss = nn.MSELoss(out, img)\n\n running_loss += loss.item()\n\n loss.backward()\n optimizer.step()\n\n pbar.desc = desc.format(loss.item())\n pbar.update()\n\n print('loss: {}'.format(running_loss / len(train_loader)))\n\n model.eval()\n with torch.no_grad():\n val_running_loss = 0\n for val_batch_idx, val_data in enumerate(val_loader):\n val_img, _ = val_data\n val_img = val_img.to(device)\n\n _, _, val_out = model(val_img)\n val_loss = nn.MSELoss(val_out, val_img)\n\n val_running_loss += val_loss.item()\n\n if val_running_loss / len(val_loader) < best_val_loss:\n torch.save(model.state_dict(), 'weights.pth')\n\n print('val loss: {}'.format(val_running_loss / len(val_loader)))\n\n pbar.close()\n\n# first cluster\nfeatures = None\nfor batch_idx, data in enumerate(train_loader):\n img, _ = data\n img = img.to(device)\n\n if not features:\n features = model(img)\n else:\n torch.cat((features, model(img)), 0)\n\nkmeans = cluster.kMeans(n_clusters=1000, n_init=20)\nfeatures = features.view(-1)\npred_last = kmeans.fit_predict(features)\nq = kmeans.cluster_centers_\n\n# deep cluster\nprint('deep cklustering')\nupdate_interval = 140 # maybe reduce this for sake of time\nmaxiter = 20000 # maybe reduce this for sake of time\nfor ite in range(int(maxiter)):\n model.train()\n if ite % update_interval == 0:\n q = None\n for batch_idx, data in enumerate(train_loader):\n img, _ = data\n img = img.to(device)\n\n if not features:\n _, q, _ = model(img)\n else:\n _, new_q, _ = model(img)\n torch.cat((q, new_q), 0)\n p = target_distribution(\n q) # update the auxiliary target distribution p\n\n # evaluate the clustering performance\n pred = q.argmax(1)\n\n # check stop criterion\n delta_label = np.sum(pred != pred_last).astype(\n np.float32) / pred.shape[0]\n pred_last = np.copy(pred)\n if ite > 0 and delta_label < 0.001: # 0.001 is the tolerance\n print('delta_label ', delta_label, '< tol ', 0.001) # tol\n print('Reached tolerance threshold. Stopping training.')\n break\n\n print('epoch {} of {}'.format(epoch + 1, tot_epochs))\n\n desc = \"ITERATION - loss: {:.2f}\"\n pbar = tqdm.tqdm(desc=desc.format(0),\n total=len(train_loader),\n leave=False,\n file=None,\n initial=0)\n\n running_loss = 0\n for batch_idx, data in enumerate(train_loader):\n img, _ = data\n img = img.to(device)\n\n optimizer.zero_grad()\n\n _, q, out = model(img)\n loss = loss(q,\n p[batch_idx * 128:batch_idx * 128 + 128, :],\n out,\n gamma=0.1)\n\n running_loss += loss.item()\n\n loss.backward()\n optimizer.step()\n\n pbar.desc = desc.format(loss.item())\n pbar.update()\n\n print('loss: {}'.format(running_loss / len(train_loader)))\n\n model.eval()\n with torch.no_grad():\n val_running_loss = 0\n for val_batch_idx, val_data in enumerate(val_loader):\n val_img, _ = val_data\n val_img = val_img.to(device)\n\n _, val_q, val_out = model(val_img)\n val_loss = loss(val_q,\n p[val_batch_idx * 128:val_batch_idx * 128 +\n 128, :],\n val_out,\n gamma=0.1)\n\n val_running_loss += val_loss.item()\n\n if val_running_loss / len(val_loader) < best_val_loss:\n torch.save(model.state_dict(), 'overall_weights.pth')\n\n print('val loss: {}'.format(val_running_loss / len(val_loader)))\n\n pbar.close()\n",
"# import dependencies\nimport pickle\n\nimport matplotlib as mpl\nmpl.use('TKAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport tqdm\nimport visdom\n\nimport evaluate\n\n\n# define function\ndef train(model, train_loader, train_size, val_loader, val_size, criterion,\n optimizer, scheduler, epochs, device, model_path, checkpoint,\n hist_path, resume, visdom, environment, matplotlib, pbar_file):\n '''train model'''\n # setup loss and accuracy visualization\n if visdom:\n viz = visdom.Visdom()\n loss_plot, acc_plot = None, None\n else:\n train_loss, val_loss, train_acc, val_acc = {}, {}, {}, {}\n\n # load checkpoints, if training to be resumed\n if resume:\n checkpoint_dict = torch.load(checkpoint)\n\n best_val_loss = checkpoint_dict['best_val_loss']\n no_improvement = checkpoint_dict['no_improvement']\n start_epoch = checkpoint_dict['epoch']\n\n if not visdom:\n train_loss = checkpoint_dict['train_loss']\n val_loss = checkpoint_dict['val_loss']\n train_acc = checkpoint_dict['train_acc']\n val_acc = checkpoint_dict['val_acc']\n else:\n best_val_loss = float('inf')\n no_improvement = 0\n start_epoch = 1\n\n # train\n model.train()\n for epoch in range(start_epoch, epochs + 1):\n # save checkpoint\n torch.save(\n {\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'scheduler_state_dict': scheduler.state_dict(),\n 'best_val_loss': best_val_loss,\n 'no_improvement': no_improvement,\n 'train_loss': train_loss if not visdom else None,\n 'val_loss': val_loss if not visdom else None,\n 'train_acc': train_acc if not visdom else None,\n 'val_acc': val_acc if not visdom else None\n }, checkpoint)\n\n # setup progress bar\n desc = \"ITERATION - loss: {:.2f}\"\n pbar = tqdm.tqdm(desc=desc.format(0),\n total=len(train_loader),\n leave=False,\n file=pbar_file,\n initial=0)\n\n tqdm.tqdm.write('epoch {} of {}'.format(epoch, epochs), file=pbar_file)\n\n # iterate\n batch_idx = 0\n running_loss = 0\n correct = 0\n for batch_idx, data in enumerate(train_loader):\n inputs, labels = data\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # optimize and save stats\n optimizer.zero_grad()\n\n outputs = model(inputs)\n\n preds = torch.argmax(outputs, dim=1)\n correct += torch.sum(preds == labels).item()\n\n loss = criterion(outputs, labels)\n running_loss += loss.item()\n\n loss.backward()\n optimizer.step()\n\n # evaluate model performance and invoke learning rate scheduler\n if batch_idx == (len(train_loader) - 1):\n avg_val_loss, val_correct = evaluate.evaluate(\n model, val_loader, val_size, criterion, device, False,\n pbar_file)\n scheduler.step(metrics=avg_val_loss)\n\n # update progress bar\n pbar.desc = desc.format(loss.item())\n pbar.update()\n\n # print epoch end losses and accuracies\n tqdm.tqdm.write('training loss: {:.4f}, val loss: {:.4f}'.format(\n running_loss / (batch_idx + 1), avg_val_loss, file=pbar_file))\n tqdm.tqdm.write('training acc: {:.2f}%, val acc: {:.2f}%\\n'.format(\n (correct * 100) / train_size, (val_correct * 100) / val_size,\n file=pbar_file))\n\n # close progress bar\n pbar.close()\n\n # plot loss history\n if visdom:\n if not loss_plot:\n loss_plot = viz.line(X=np.array([epoch]),\n Y=np.array(\n [running_loss / (batch_idx + 1)]),\n env=environment,\n opts=dict(legend=['train', 'val'],\n title='loss hist',\n xlabel='epochs',\n ylabel='loss'))\n else:\n viz.line(X=np.array([epoch]),\n Y=np.array([running_loss / (batch_idx + 1)]),\n env=environment,\n win=loss_plot,\n name='train',\n update='append')\n\n if not acc_plot:\n acc_plot = viz.line(X=np.array([epoch]),\n Y=np.array([(correct * 100) / train_size]),\n env=environment,\n opts=dict(legend=['train', 'val'],\n title='acc hist',\n xlabel='epochs',\n ylabel='acc'))\n else:\n viz.line(X=np.array([epoch]),\n Y=np.array([(correct * 100) / train_size]),\n env=environment,\n win=acc_plot,\n name='train',\n update='append')\n else:\n train_loss[epoch] = running_loss / (batch_idx + 1)\n val_loss[epoch] = avg_val_loss\n train_acc[epoch] = (correct * 100) / train_size\n val_acc[epoch] = (val_correct * 100) / val_size\n\n # save model and apply early stopping\n if avg_val_loss < best_val_loss:\n torch.save(model.state_dict(), model_path)\n best_val_loss = avg_val_loss\n no_improvement = 0\n else:\n no_improvement += 1\n\n if no_improvement == 5:\n print('applying early stopping')\n break\n\n # save training history\n if not visdom:\n hist = {\n 'train_loss': train_loss,\n 'val_loss': val_loss,\n 'train_acc': train_acc,\n 'val_acc': val_acc\n }\n with open(hist_path, 'wb') as hist_file:\n pickle.dump(hist, hist_file)\n\n # visualize losses and accuracies\n if not visdom and matplotlib:\n for subplot in ['loss', 'acc']:\n if subplot == 'loss':\n plt.subplot(1, 2, 1)\n else:\n plt.subplot(1, 2, 2)\n\n plt.title(subplot)\n plt.xlabel = 'epochs'\n plt.ylabel = 'loss' if subplot == 'loss' else 'acc'\n\n train_plot, = plt.plot(train_loss.values() if subplot == 'loss'\n else train_acc.values(),\n label='train')\n val_plot, = plt.plot(\n val_loss.values() if subplot == 'loss' else val_acc.values(),\n label='val')\n plt.legend(handles=[train_plot, val_plot], loc='best')\n\n plt.tight_layout()\n plt.show()\n"
] | [
[
"torch.transpose",
"numpy.sum",
"torch.empty",
"torch.nn.ConvTranspose2d",
"torch.cat",
"sklearn.cluster.kMeans",
"torch.nn.Conv2d",
"torch.utils.data.DataLoader",
"torch.distributions.kl.kl_divergence",
"torch.sum",
"torch.nn.Linear",
"torch.utils.data.ConcatDataset",
"torch.utils.data.random_split",
"torch.no_grad",
"numpy.copy",
"torch.cuda.is_available",
"torch.nn.init.xavier_uniform_",
"torch.nn.MSELoss"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"torch.load",
"matplotlib.use",
"torch.sum",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.show",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yhl111/PCNN | [
"2e0967aec962d55df1eb7d149a44b91c6c751a1a"
] | [
"model/config.py"
] | [
"import os\nimport numpy as np\n\nfrom .general_utils import get_logger\nfrom .data_utils import load_vocab, get_processing_word\n\nclass Config():\n def __init__(self, load=True):\n \"\"\"Initialize hyperparameters and load vocabs\n\n Args:\n load_embeddings: (bool) if True, load embeddings into\n np array, else None\n\n \"\"\"\n # directory for training outputs\n if not os.path.exists(self.dir_output):\n os.makedirs(self.dir_output)\n\n # create instance of logger\n self.logger = get_logger(self.path_log)\n\n # load if requested (default)\n if load:\n self.load()\n\n\n def load(self):\n \"\"\"Loads vocabulary, processing functions and embeddings\n\n Supposes that build_data.py has been run successfully and that\n the corresponding files have been created (vocab and trimmed\n vectors)\n\n \"\"\"\n # 1. vocabulary\n self.vocab_words = load_vocab(self.filename_words)\n self.vocab_relations = load_vocab(self.filename_relation)\n\n self.nwords = len(self.vocab_words)\n self.nrelations = len(self.vocab_relations)\n\n # 2. get processing functions that map str -> id\n self.processing_word = get_processing_word(self.vocab_words, UNK = \"<UNK>\")\n self.processing_relation = get_processing_word(self.vocab_relations, UNK='NA')\n\n # 3. get pre-trained embeddings\n self.embeddings = (np.load(self.filename_embeddings)['vec']\n if self.use_pretrained else None)\n\n\n # general config\n dir_output = \"./results/test/\"\n graph_output = \"./graph\"\n dir_model = dir_output + \"model.weights/\" # directory to save models\n path_log = dir_output + \"log.txt\"\n restore_model = \"./results/test/model.weights/early_best.ckpt\"\n\n # embeddings\n dim_word = 50\n dim_pos = 5\n dim = dim_word + 2*dim_pos\n\n # position range in sentence\n nposition = 500\n\n # convolution\n window_size = 3\n feature_maps = 230\n\n filename_train_origin = \"./data/origin_data/train.txt\"\n filename_train = \"./data/processed_data/train.txt\"\n filename_train_wrong = \"./data/processed_data/wrong_parse_train.txt\"\n\n filename_dev = \"./data/processed_data/test.txt\"\n\n filename_test_origin = \"./data/origin_data/test.txt\"\n filename_test = \"./data/processed_data/test.txt\"\n filename_test_wrong = \"./data/processed_data/wrong_parse_test.txt\"\n\n max_iter = None # if not None, max number of examples in Dataset\n\n # vocab (created from dataset with build_data.py)\n filename_words = \"./data/processed_data/words.txt\"\n filename_embeddings = \"./data/processed_data/vectors.npz\"\n\n filename_relation_origin = \"./data/origin_data/relation2id.txt\"\n filename_relation = \"./data/processed_data/relation.txt\"\n\n # word vectors file\n filename_wordvectors = \"./data/origin_data/vec.txt\"\n\n use_pretrained = True\n\n MIL = False # if True, using multi-instances learning\n shuffle = False # if True, shuffle train dataset\n max_iter = None # if not None, max number of examples in Dataset\n\n # training\n train_word_embeddings = False\n train_pos_embeddings = True\n nepochs = 15\n dropout = 0.5\n batch_size = 50\n lr_method = \"adadelta\"\n lr = 0.001\n lr_decay = 0.9\n clip = -1 # if negative, no clipping\n nepoch_no_imprv = 3\n early_stop = True\n max_train_step = 100000\n"
] | [
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thompson318/scikit-surgerycore | [
"22867073a5a3e87def68b4a76e70fe54d085be32"
] | [
"tests/algorithms/test_tracking_smoothing.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Tests for BARD pointer module\"\"\"\nimport math\nimport numpy as np\nimport pytest\nimport sksurgerycore.algorithms.tracking_smoothing as reg\n\n\ndef test_rvec_to_quaterion():\n \"\"\"\n Does it convert correctly\n \"\"\"\n\n #a 90 degree rotation about the x axis\n rvec = np.array([math.pi/2.0, 0.0, 0.0])\n\n quaternion = reg._rvec_to_quaternion(rvec) # pylint: disable=protected-access\n\n assert quaternion[0] == math.cos(math.pi/4.0)\n assert quaternion[1] == 1.0 * math.sin(math.pi/4.0)\n assert quaternion[2] == 0.0\n assert quaternion[3] == 0.0\n\n\ndef test_quaterion_to_matrix():\n \"\"\"\n Test conversion on a 90 degree rotation about y axis.\n \"\"\"\n quaternion = np.array([math.cos(math.pi/4.0), 0.0,\n 1.0 * math.sin(math.pi/4.0), 0.0])\n\n rot_mat = reg.quaternion_to_matrix(quaternion)\n\n rot_mat1 = np.eye(3, dtype=np.float64)\n\n rot_mat1[0, 0] = 0.0\n rot_mat1[0, 2] = 1.0\n rot_mat1[2, 0] = -1.0\n rot_mat1[2, 2] = 0.0\n\n assert np.allclose(rot_mat, rot_mat1, rtol=1e-05, atol=1e-10)\n\ndef test_rolling_mean_no_buffer():\n \"\"\"\n Try doing a rolling mean with zero buffer.\n \"\"\"\n with pytest.raises(ValueError):\n _ = reg.RollingMean(vector_size=3, buffer_size=0)\n\n\n\ndef test_rolling_mean_returns_nan():\n \"\"\"\n Tests for rolling mean class.\n \"\"\"\n\n mean_buffer = reg.RollingMean(vector_size=3, buffer_size=5)\n\n assert np.isnan(mean_buffer.getmean()).all\n\ndef test_rolling_mean_single_value():\n \"\"\"\n Test rolling mean returns vector value for single entry\n \"\"\"\n vector = [5.4, 1.2, 3.4]\n\n mean_buffer = reg.RollingMean(vector_size=3, buffer_size=5)\n\n mean_buffer.pop(vector)\n\n assert np.allclose(vector, mean_buffer.getmean(), rtol=1e-05, atol=1e-10)\n\ndef test_rolling_mean_four_values():\n \"\"\"\n Test rolling mean returns vector value for single entry\n \"\"\"\n vector0 = [5.4, 1.2, 3.4]\n vector1 = [7.4, -1.2, -1.4]\n vector2 = [-2.6, 4.2, 2.6]\n vector3 = [9.0, 3.3, 3.6]\n\n expected_answer0 = [3.4, 1.4, 1.533333]\n expected_answer1 = [4.6, 2.1, 1.6]\n\n mean_buffer = reg.RollingMean(vector_size=3, buffer_size=3)\n mean_buffer.pop(vector0)\n mean_buffer.pop(vector1)\n mean_buffer.pop(vector2)\n assert np.allclose(expected_answer0, mean_buffer.getmean(), rtol=1e-05,\n atol=1e-6)\n\n mean_buffer.pop(vector3)\n\n assert np.allclose(expected_answer1, mean_buffer.getmean(), rtol=1e-05,\n atol=1e-10)\n\n\ndef test_rolling_rotation_no_buffer():\n \"\"\"\n Try doing a rolling rotation mean with zero buffer.\n \"\"\"\n with pytest.raises(ValueError):\n _ = reg.RollingMeanRotation(buffer_size=0)\n\n\ndef test_rolling_rot_returns_nan():\n \"\"\"\n Tests for rolling mean rotation class.\n \"\"\"\n\n mean_buffer = reg.RollingMeanRotation(buffer_size=5)\n\n assert np.isnan(mean_buffer.getmean()).all\n\n\ndef test_rolling_rot_single_value():\n \"\"\"\n Test rolling mean rotation returns vector value for single entry\n \"\"\"\n\n rvec = np.array([0.0, -math.pi/2.0, 0.0])\n expected_quaternion = np.array([math.cos(math.pi/4.0), 0.0,\n -1.0 * math.sin(math.pi/4.0), 0.0])\n\n mean_buffer = reg.RollingMeanRotation(buffer_size=5)\n\n mean_buffer.pop(rvec)\n\n assert np.allclose(expected_quaternion, mean_buffer.getmean(),\n rtol=1e-05, atol=1e-10)\n\n\ndef test_r_rot_sgl_value_sgl_buff():\n \"\"\"\n Test rolling mean rotation returns vector value for single entry\n \"\"\"\n\n rvec = np.array([0.0, 0.0, -math.pi/2.0])\n expected_quaternion = np.array([math.cos(math.pi/4.0), 0.0, 0.0,\n -1.0 * math.sin(math.pi/4.0)])\n\n mean_buffer = reg.RollingMeanRotation(buffer_size=1)\n\n mean_buffer.pop(rvec)\n\n assert np.allclose(expected_quaternion, mean_buffer.getmean(),\n rtol=1e-05, atol=1e-10)\n\n\ndef test_rolling_rot_four_values():\n \"\"\"\n Test rolling mean returns vector value for single entry\n \"\"\"\n rvec0 = [0.0, 0.0, 0.0]\n rvec1 = [np.NaN, np.NaN, np.NaN]\n rvec2 = [0.0, 0.0, -math.pi/2.0]\n rvec3 = [0.0, math.pi/3.0, 0.0]\n\n expected_answer0 = reg._rvec_to_quaternion([0.0, 0.0, -math.pi/4.0]) # pylint: disable=protected-access\n #the next ones more of a regression test, I haven't independently\n #calculated this answer.\n expected_answer1 = [-0.87602709, 0.0, -0.27843404, 0.39376519]\n\n mean_buffer = reg.RollingMeanRotation(buffer_size=3)\n mean_buffer.pop(rvec0)\n mean_buffer.pop(rvec1)\n mean_buffer.pop(rvec2)\n assert np.allclose(expected_answer0, mean_buffer.getmean(), rtol=1e-05,\n atol=1e-6)\n\n mean_buffer.pop(rvec3)\n\n assert np.allclose(expected_answer1, mean_buffer.getmean(), rtol=1e-05,\n atol=1e-10)\n"
] | [
[
"numpy.eye",
"numpy.array",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bourov/probability | [
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2",
"1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2"
] | [
"tensorflow_probability/python/distributions/deterministic.py",
"tensorflow_probability/python/internal/backend/numpy/debugging.py",
"tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_diag.py",
"tensorflow_probability/examples/grammar_vae.py",
"tensorflow_probability/python/distributions/sample_test.py",
"tensorflow_probability/python/experimental/nn/variational_base.py",
"tensorflow_probability/python/mcmc/replica_exchange_mc_test.py",
"tensorflow_probability/python/sts/local_linear_trend_test.py",
"tensorflow_probability/python/experimental/edward2/random_variable_test.py",
"tensorflow_probability/python/distributions/internal/slicing.py",
"tensorflow_probability/python/distributions/inverse_gaussian_test.py",
"tensorflow_probability/python/distributions/transformed_distribution_test.py",
"tensorflow_probability/python/math/ode/xla_test.py",
"tensorflow_probability/python/bijectors/normal_cdf.py",
"tensorflow_probability/python/optimizer/linesearch/__init__.py",
"tensorflow_probability/python/bijectors/scale_matvec_diag.py",
"tensorflow_probability/python/bijectors/bijector.py",
"tensorflow_probability/python/stats/ranking_test.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Deterministic distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\n# Dependency imports\nimport six\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import kullback_leibler\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n__all__ = [\n 'Deterministic',\n 'VectorDeterministic',\n]\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass _BaseDeterministic(distribution.Distribution):\n \"\"\"Base class for Deterministic distributions.\"\"\"\n\n def __init__(self,\n loc,\n atol=None,\n rtol=None,\n is_vector=False,\n validate_args=False,\n allow_nan_stats=True,\n parameters=None,\n name='_BaseDeterministic'):\n \"\"\"Initialize a batch of `_BaseDeterministic` distributions.\n\n The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`\n computations, e.g. due to floating-point error.\n\n ```\n pmf(x; loc)\n = 1, if Abs(x - loc) <= atol + rtol * Abs(loc),\n = 0, otherwise.\n ```\n\n Args:\n loc: Numeric `Tensor`. The point (or batch of points) on which this\n distribution is supported.\n atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The absolute tolerance for comparing closeness to `loc`.\n Default is `0`.\n rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The relative tolerance for comparing closeness to `loc`.\n Default is `0`.\n is_vector: Python `bool`. If `True`, this is for `VectorDeterministic`,\n else `Deterministic`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value '`NaN`' to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n parameters: Dict of locals to facilitate copy construction.\n name: Python `str` name prefixed to Ops created by this class.\n\n Raises:\n ValueError: If `loc` is a scalar.\n \"\"\"\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([loc, atol, rtol], dtype_hint=tf.float32)\n self._loc = tensor_util.convert_nonref_to_tensor(\n loc, dtype_hint=dtype, name='loc')\n self._atol = tensor_util.convert_nonref_to_tensor(\n 0 if atol is None else atol, dtype=dtype, name='atol')\n self._rtol = tensor_util.convert_nonref_to_tensor(\n 0 if rtol is None else rtol, dtype=dtype, name='rtol')\n self._is_vector = is_vector\n\n super(_BaseDeterministic, self).__init__(\n dtype=self._loc.dtype,\n reparameterization_type=(\n reparameterization.FULLY_REPARAMETERIZED\n if dtype_util.is_floating(self._loc.dtype)\n else reparameterization.NOT_REPARAMETERIZED),\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n def _slack(self, loc):\n # Avoid using the large broadcast with self.loc if possible.\n if self.parameters['rtol'] is None:\n return self.atol\n else:\n return self.atol + self.rtol * tf.abs(loc)\n\n @property\n def loc(self):\n \"\"\"Point (or batch of points) at which this distribution is supported.\"\"\"\n return self._loc\n\n @property\n def atol(self):\n \"\"\"Absolute tolerance for comparing points to `self.loc`.\"\"\"\n return self._atol\n\n @property\n def rtol(self):\n \"\"\"Relative tolerance for comparing points to `self.loc`.\"\"\"\n return self._rtol\n\n def _entropy(self):\n return tf.zeros(self.batch_shape_tensor(), dtype=self.dtype)\n\n def _mean(self):\n return tf.identity(self.loc)\n\n def _variance(self):\n return tf.zeros_like(self.loc)\n\n def _mode(self):\n return self.mean()\n\n def _sample_n(self, n, seed=None):\n del seed # unused\n loc = tf.convert_to_tensor(self.loc)\n return tf.broadcast_to(\n loc,\n tf.concat([[n], self._batch_shape_tensor(loc=loc),\n self._event_shape_tensor(loc=loc)],\n axis=0))\n\n def _default_event_space_bijector(self):\n return\n\n def _parameter_control_dependencies(self, is_init):\n assertions = []\n\n # In init, we can always build shape and dtype checks because\n # we assume shape doesn't change for Variable backed args.\n if is_init and self._is_vector:\n msg = 'Argument `loc` must be at least rank 1.'\n if tensorshape_util.rank(self.loc.shape) is not None:\n if tensorshape_util.rank(self.loc.shape) < 1:\n raise ValueError(msg)\n elif self.validate_args:\n assertions.append(\n assert_util.assert_rank_at_least(self.loc, 1, message=msg))\n\n if not self.validate_args:\n assert not assertions # Should never happen\n return []\n\n if is_init != tensor_util.is_ref(self.atol):\n assertions.append(\n assert_util.assert_non_negative(\n self.atol, message='Argument \"atol\" must be non-negative'))\n if is_init != tensor_util.is_ref(self.rtol):\n assertions.append(\n assert_util.assert_non_negative(\n self.rtol, message='Argument \"rtol\" must be non-negative'))\n return assertions\n\n\nclass Deterministic(_BaseDeterministic):\n \"\"\"Scalar `Deterministic` distribution on the real line.\n\n The scalar `Deterministic` distribution is parameterized by a [batch] point\n `loc` on the real line. The distribution is supported at this point only,\n and corresponds to a random variable that is constant, equal to `loc`.\n\n See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).\n\n #### Mathematical Details\n\n The probability mass function (pmf) and cumulative distribution function (cdf)\n are\n\n ```none\n pmf(x; loc) = 1, if x == loc, else 0\n cdf(x; loc) = 1, if x >= loc, else 0\n ```\n\n #### Examples\n\n ```python\n # Initialize a single Deterministic supported at zero.\n constant = tfp.distributions.Deterministic(0.)\n constant.prob(0.)\n ==> 1.\n constant.prob(2.)\n ==> 0.\n\n # Initialize a [2, 2] batch of scalar constants.\n loc = [[0., 1.], [2., 3.]]\n x = [[0., 1.1], [1.99, 3.]]\n constant = tfp.distributions.Deterministic(loc)\n constant.prob(x)\n ==> [[1., 0.], [0., 1.]]\n ```\n\n \"\"\"\n\n def __init__(self,\n loc,\n atol=None,\n rtol=None,\n validate_args=False,\n allow_nan_stats=True,\n name='Deterministic'):\n \"\"\"Initialize a scalar `Deterministic` distribution.\n\n The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`\n computations, e.g. due to floating-point error.\n\n ```\n pmf(x; loc)\n = 1, if Abs(x - loc) <= atol + rtol * Abs(loc),\n = 0, otherwise.\n ```\n\n Args:\n loc: Numeric `Tensor` of shape `[B1, ..., Bb]`, with `b >= 0`.\n The point (or batch of points) on which this distribution is supported.\n atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The absolute tolerance for comparing closeness to `loc`.\n Default is `0`.\n rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The relative tolerance for comparing closeness to `loc`.\n Default is `0`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value '`NaN`' to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n super(Deterministic, self).__init__(\n loc,\n atol=atol,\n rtol=rtol,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _params_event_ndims(cls):\n return dict(loc=0, atol=0, rtol=0)\n\n def _batch_shape_tensor(self, loc=None):\n return tf.broadcast_dynamic_shape(\n tf.shape(self.loc if loc is None else loc),\n tf.broadcast_dynamic_shape(tf.shape(self.atol), tf.shape(self.rtol)))\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(\n self.loc.shape,\n tf.broadcast_static_shape(self.atol.shape, self.rtol.shape))\n\n def _event_shape_tensor(self, loc=None):\n del loc\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _prob(self, x):\n loc = tf.convert_to_tensor(self.loc)\n # Enforces dtype of probability to be float, when self.dtype is not.\n prob_dtype = self.dtype if dtype_util.is_floating(\n self.dtype) else tf.float32\n return tf.cast(tf.abs(x - loc) <= self._slack(loc), dtype=prob_dtype)\n\n def _cdf(self, x):\n loc = tf.identity(self.loc)\n return tf.cast(x >= loc - self._slack(loc), dtype=self.dtype)\n\n\nclass VectorDeterministic(_BaseDeterministic):\n \"\"\"Vector `Deterministic` distribution on `R^k`.\n\n The `VectorDeterministic` distribution is parameterized by a [batch] point\n `loc in R^k`. The distribution is supported at this point only,\n and corresponds to a random variable that is constant, equal to `loc`.\n\n See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).\n\n #### Mathematical Details\n\n The probability mass function (pmf) is\n\n ```none\n pmf(x; loc)\n = 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],\n = 0, otherwise.\n ```\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n\n # Initialize a single VectorDeterministic supported at [0., 2.] in R^2.\n constant = tfd.Deterministic([0., 2.])\n constant.prob([0., 2.])\n ==> 1.\n constant.prob([0., 3.])\n ==> 0.\n\n # Initialize a [3] batch of constants on R^2.\n loc = [[0., 1.], [2., 3.], [4., 5.]]\n constant = tfd.VectorDeterministic(loc)\n constant.prob([[0., 1.], [1.9, 3.], [3.99, 5.]])\n ==> [1., 0., 0.]\n ```\n\n \"\"\"\n\n def __init__(self,\n loc,\n atol=None,\n rtol=None,\n validate_args=False,\n allow_nan_stats=True,\n name='VectorDeterministic'):\n \"\"\"Initialize a `VectorDeterministic` distribution on `R^k`, for `k >= 0`.\n\n Note that there is only one point in `R^0`, the 'point' `[]`. So if `k = 0`\n then `self.prob([]) == 1`.\n\n The `atol` and `rtol` parameters allow for some slack in `pmf`\n computations, e.g. due to floating-point error.\n\n ```\n pmf(x; loc)\n = 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],\n = 0, otherwise\n ```\n\n Args:\n loc: Numeric `Tensor` of shape `[B1, ..., Bb, k]`, with `b >= 0`, `k >= 0`\n The point (or batch of points) on which this distribution is supported.\n atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The absolute tolerance for comparing closeness to `loc`.\n Default is `0`.\n rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The relative tolerance for comparing closeness to `loc`.\n Default is `0`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value '`NaN`' to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n super(VectorDeterministic, self).__init__(\n loc,\n atol=atol,\n rtol=rtol,\n is_vector=True,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _params_event_ndims(cls):\n return dict(loc=1, atol=1, rtol=1)\n\n def _batch_shape_tensor(self, loc=None):\n return tf.broadcast_dynamic_shape(\n tf.shape(self.loc if loc is None else loc),\n tf.broadcast_dynamic_shape(tf.shape(self.atol),\n tf.shape(self.rtol)))[:-1]\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(\n self.loc.shape,\n tf.broadcast_static_shape(self.atol.shape, self.rtol.shape))[:-1]\n\n def _event_shape_tensor(self, loc=None):\n return tf.shape(self.loc if loc is None else loc)[-1:]\n\n def _event_shape(self):\n return self.loc.shape[-1:]\n\n def _prob(self, x):\n loc = tf.convert_to_tensor(self.loc)\n return tf.cast(\n tf.reduce_all(tf.abs(x - loc) <= self._slack(loc), axis=-1),\n dtype=self.dtype)\n\n def _sample_control_dependencies(self, x):\n assertions = []\n if not self.validate_args:\n return assertions\n assertions.append(assert_util.assert_rank_at_least(x, 1))\n assertions.append(assert_util.assert_equal(\n self.event_shape_tensor(), tf.gather(tf.shape(x), tf.rank(x) - 1),\n message=('Argument `x` not defined in the same space '\n 'R**k as this distribution')))\n return assertions\n\n\n@kullback_leibler.RegisterKL(_BaseDeterministic, distribution.Distribution)\ndef _kl_deterministic_distribution(a, b, name=None):\n \"\"\"Calculate the batched KL divergence `KL(a || b)` with `a` Deterministic.\n\n Args:\n a: instance of a Deterministic distribution object.\n b: instance of a Distribution distribution object.\n name: (optional) Name to use for created operations. Default is\n 'kl_deterministic_distribution'.\n\n Returns:\n Batchwise `KL(a || b)`.\n \"\"\"\n with tf.name_scope(name or 'kl_deterministic_distribution'):\n return -b.log_prob(a.loc)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Experimental Numpy backend.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\n\nfrom tensorflow_probability.python.internal.backend.numpy import _utils as utils\nfrom tensorflow_probability.python.internal.backend.numpy.ops import convert_to_tensor\nfrom tensorflow_probability.python.internal.backend.numpy.ops import is_tensor\nfrom tensorflow_probability.python.internal.backend.numpy.ops import Tensor\n\n\n__all__ = [\n 'Assert',\n 'assert_equal',\n 'assert_greater',\n 'assert_greater_equal',\n 'assert_integer',\n 'assert_less',\n 'assert_less_equal',\n 'assert_near',\n 'assert_negative',\n 'assert_non_negative',\n 'assert_non_positive',\n 'assert_none_equal',\n 'assert_positive',\n 'assert_proper_iterable',\n 'assert_rank',\n 'assert_rank_at_least',\n 'assert_rank_in',\n 'assert_scalar',\n 'check_numerics',\n]\n\nJAX_MODE = False\n\n\ndef skip_assert_for_tracers(f):\n \"\"\"Function decorator that returns None if JAX tracers are detected.\"\"\"\n if not JAX_MODE:\n return f\n from jax import core as jax_core # pylint: disable=g-import-not-at-top\n def wrapped(*args, **kwargs):\n if any(isinstance(arg, jax_core.Tracer) for arg\n in args + tuple(kwargs.values())):\n print('skip assert ' + f.__name__)\n return None\n return f(*args, **kwargs)\n return wrapped\n\n\n@skip_assert_for_tracers\ndef _assert_binary(\n x, y, comparator, sym, summarize=None, message=None, name=None):\n del summarize\n del name\n x = convert_to_tensor(x)\n y = convert_to_tensor(y)\n if not np.all(comparator(x, y)):\n raise ValueError('Condition x {} y did not hold element-wise. {}'.format(\n sym, message or ''))\n\n\n@skip_assert_for_tracers\ndef _assert_equal(x, y, summarize=None, message=None, name=None):\n del summarize\n del name\n x = convert_to_tensor(x)\n y = convert_to_tensor(y)\n if not np.all(np.equal(x, y)):\n raise ValueError('Expected x == y but got {} vs {} {}'.format(\n x, y, message or ''))\n\n\ndef _assert_greater(x, y, summarize=None, message=None, name=None):\n return _assert_binary(\n x, y, np.greater, '>', summarize=summarize,\n message=message, name=name)\n\n\ndef _assert_less(x, y, summarize=None, message=None, name=None):\n return _assert_binary(\n x, y, np.less, '<', summarize=summarize,\n message=message, name=name)\n\n\ndef _assert_greater_equal(\n x, y, summarize=None, message=None, name=None):\n return _assert_binary(\n x, y, np.greater_equal, '>=', summarize=summarize,\n message=message, name=name)\n\n\ndef _assert_less_equal(\n x, y, summarize=None, message=None, name=None):\n return _assert_binary(\n x, y, np.less_equal, '<=', summarize=summarize,\n message=message, name=name)\n\n\n@skip_assert_for_tracers\ndef _assert_compare_to_zero(\n x, comparator, sym, summarize=None, message=None, name=None):\n del summarize\n del name\n x = convert_to_tensor(x)\n if not np.all(comparator(x, 0)):\n raise ValueError(\n 'Condition x {} 0 did not hold element-wise; got {} {}'.format(\n sym, x, message or ''))\n\n\ndef _assert_positive(x, summarize=None, message=None, name=None):\n return _assert_compare_to_zero(\n x, np.greater, '>', summarize=summarize, message=message, name=name)\n\n\ndef _assert_negative(x, summarize=None, message=None, name=None):\n return _assert_compare_to_zero(\n x, np.less, '<', summarize=summarize, message=message, name=name)\n\n\ndef _assert_non_negative(x, summarize=None, message=None, name=None):\n return _assert_compare_to_zero(\n x, np.greater_equal, '>=',\n summarize=summarize, message=message, name=name)\n\n\ndef _assert_non_positive(x, summarize=None, message=None, name=None):\n return _assert_compare_to_zero(\n x, np.less_equal, '<=', summarize=summarize, message=message, name=name)\n\n\ndef _assert_rank(x, rank, message=None, name=None): # pylint: disable=unused-argument\n return _assert_equal(x=len(np.shape(x)), y=rank, message=message)\n\n\ndef _assert_scalar(*_, **__): # pylint: disable=unused-argument\n pass\n\n\ndef _assert_integer(*_, **__): # pylint: disable=unused-argument\n pass\n\n\n@skip_assert_for_tracers\ndef _assert_near(x, y, rtol=None, atol=None,\n message=None, summarize=None, name=None): # pylint: disable=unused-argument\n \"\"\"Raises an error if abs(x - y) > atol + rtol * abs(y).\"\"\"\n del summarize\n del name\n x = convert_to_tensor(x)\n y = convert_to_tensor(y)\n rtol = rtol if rtol else 10 * np.finfo(x.dtype).eps\n atol = atol if atol else 10 * np.finfo(x.dtype).eps\n if np.any(np.abs(x - y) > atol + rtol * np.abs(y)):\n raise ValueError('x = {} and y = {} are not equal to tolerance rtol = {}, '\n 'atol = {} {}'.format(x, y, rtol, atol, message or ''))\n\n\n@skip_assert_for_tracers\ndef _assert_none_equal(x, y, summarize=None, message=None, name=None):\n del summarize\n del name\n x = convert_to_tensor(x)\n y = convert_to_tensor(y)\n if np.any(np.equal(x, y)):\n raise ValueError('Expected x != y but got {} vs {} {}'.format(\n x, y, message or ''))\n\n\ndef _assert_proper_iterable(values):\n unintentional_iterables = (Tensor, np.ndarray, bytes, six.text_type)\n if isinstance(values, unintentional_iterables):\n raise TypeError(\n 'Expected argument \"values\" to be a \"proper\" iterable. Found: %s' %\n type(values))\n\n if not hasattr(values, '__iter__'):\n raise TypeError(\n 'Expected argument \"values\" to be iterable. Found: %s' % type(values))\n\n\ndef _assert_rank_at_least(x, rank, message=None, name=None):\n del name\n if len(x.shape) < rank:\n raise ValueError('Expected rank at least {} but got shape {} {}'.format(\n rank, x.shape, message or ''))\n\n\ndef _assert_rank_in(*_, **__): # pylint: disable=unused-argument\n pass\n\n\n# --- Begin Public Functions --------------------------------------------------\n\n\nAssert = utils.copy_docstring( # pylint: disable=invalid-name\n 'tf.debugging.Assert',\n lambda condition, data, summarize=None, name=None: None)\n\nassert_equal = utils.copy_docstring(\n 'tf.debugging.assert_equal',\n _assert_equal)\n\nassert_greater = utils.copy_docstring(\n 'tf.debugging.assert_greater',\n _assert_greater)\n\nassert_less = utils.copy_docstring(\n 'tf.debugging.assert_less',\n _assert_less)\n\nassert_rank = utils.copy_docstring(\n 'tf.debugging.assert_rank',\n _assert_rank)\n\nassert_scalar = utils.copy_docstring(\n 'tf.debugging.assert_scalar',\n _assert_scalar)\n\nassert_greater_equal = utils.copy_docstring(\n 'tf.debugging.assert_greater_equal',\n _assert_greater_equal)\n\nassert_integer = utils.copy_docstring(\n 'tf.debugging.assert_integer',\n _assert_integer)\n\nassert_less_equal = utils.copy_docstring(\n 'tf.debugging.assert_less_equal',\n _assert_less_equal)\n\nassert_near = utils.copy_docstring(\n 'tf.debugging.assert_near',\n _assert_near)\n\nassert_negative = utils.copy_docstring(\n 'tf.debugging.assert_negative',\n _assert_negative)\n\nassert_non_negative = utils.copy_docstring(\n 'tf.debugging.assert_non_negative',\n _assert_non_negative)\n\nassert_non_positive = utils.copy_docstring(\n 'tf.debugging.assert_non_positive',\n _assert_non_positive)\n\nassert_none_equal = utils.copy_docstring(\n 'tf.debugging.assert_none_equal',\n _assert_none_equal)\n\nassert_positive = utils.copy_docstring(\n 'tf.debugging.assert_positive',\n _assert_positive)\n\nassert_proper_iterable = utils.copy_docstring(\n 'tf.debugging.assert_proper_iterable',\n _assert_proper_iterable)\n\nassert_rank_at_least = utils.copy_docstring(\n 'tf.debugging.assert_rank_at_least',\n _assert_rank_at_least)\n\nassert_rank_in = utils.copy_docstring(\n 'tf.debugging.assert_rank_in',\n _assert_rank_in)\n\ncheck_numerics = utils.copy_docstring(\n 'tf.debugging.check_numerics',\n lambda x, *_, **__: x)\n\nis_numeric_tensor = utils.copy_docstring(\n 'tf.debugging.is_numeric_tensor',\n lambda x: is_tensor(x) and np.issubdtype(x.dtype, np.number))\n",
"# Copyright 2020 The TensorFlow Probability Authors. All Rights Reserved.\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n# THIS FILE IS AUTO-GENERATED BY `gen_linear_operators.py`.\n# DO NOT MODIFY DIRECTLY.\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n# pylint: disable=g-import-not-at-top\n# pylint: disable=g-direct-tensorflow-import\n# pylint: disable=g-bad-import-order\n# pylint: disable=unused-import\n# pylint: disable=line-too-long\n# pylint: disable=reimported\n# pylint: disable=g-bool-id-comparison\n# pylint: disable=g-statement-before-imports\n# pylint: disable=bad-continuation\n# pylint: disable=useless-import-alias\n# pylint: disable=property-with-parameters\n# pylint: disable=trailing-whitespace\n\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"`LinearOperator` acting like a diagonal matrix.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# [internal] enable type annotations\nfrom __future__ import print_function\n\nfrom tensorflow_probability.python.internal.backend.numpy import ops\nfrom tensorflow_probability.python.internal.backend.numpy import numpy_array as array_ops\nfrom tensorflow_probability.python.internal.backend.numpy import debugging as check_ops\nfrom tensorflow_probability.python.internal.backend.numpy import numpy_math as math_ops\nfrom tensorflow_probability.python.internal.backend.numpy import linalg_impl as linalg\nfrom tensorflow_probability.python.internal.backend.numpy.gen import linear_operator\nfrom tensorflow_probability.python.internal.backend.numpy.gen import linear_operator_util\n# from tensorflow.python.util.tf_export import tf_export\n\n__all__ = [\"LinearOperatorDiag\",]\n\n\n# @tf_export(\"linalg.LinearOperatorDiag\")\nclass LinearOperatorDiag(linear_operator.LinearOperator):\n \"\"\"`LinearOperator` acting like a [batch] square diagonal matrix.\n\n This operator acts like a [batch] diagonal matrix `A` with shape\n `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a\n batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is\n an `N x N` matrix. This matrix `A` is not materialized, but for\n purposes of broadcasting this shape will be relevant.\n\n `LinearOperatorDiag` is initialized with a (batch) vector.\n\n ```python\n # Create a 2 x 2 diagonal linear operator.\n diag = [1., -1.]\n operator = LinearOperatorDiag(diag)\n\n operator.to_dense()\n ==> [[1., 0.]\n [0., -1.]]\n\n tensor_shape.TensorShape(operator.shape)\n ==> [2, 2]\n\n operator.log_abs_determinant()\n ==> scalar Tensor\n\n x = ... Shape [2, 4] Tensor\n operator.matmul(x)\n ==> Shape [2, 4] Tensor\n\n # Create a [2, 3] batch of 4 x 4 linear operators.\n diag = tf.random.normal(shape=[2, 3, 4])\n operator = LinearOperatorDiag(diag)\n\n # Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible\n # since the batch dimensions, [2, 1], are broadcast to\n # operator.batch_shape = [2, 3].\n y = tf.random.normal(shape=[2, 1, 4, 2])\n x = operator.solve(y)\n ==> operator.matmul(x) = y\n ```\n\n #### Shape compatibility\n\n This operator acts on [batch] matrix with compatible shape.\n `x` is a batch matrix with compatible shape for `matmul` and `solve` if\n\n ```\n tensor_shape.TensorShape(operator.shape) = [B1,...,Bb] + [N, N], with b >= 0\n tensor_shape.TensorShape(x.shape) = [C1,...,Cc] + [N, R],\n and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]\n ```\n\n #### Performance\n\n Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`,\n and `tensor_shape.TensorShape(x.shape) = [N, R]`. Then\n\n * `operator.matmul(x)` involves `N * R` multiplications.\n * `operator.solve(x)` involves `N` divisions and `N * R` multiplications.\n * `operator.determinant()` involves a size `N` `reduce_prod`.\n\n If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and\n `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.\n\n #### Matrix property hints\n\n This `LinearOperator` is initialized with boolean flags of the form `is_X`,\n for `X = non_singular, self_adjoint, positive_definite, square`.\n These have the following meaning:\n\n * If `is_X == True`, callers should expect the operator to have the\n property `X`. This is a promise that should be fulfilled, but is *not* a\n runtime assert. For example, finite floating point precision may result\n in these promises being violated.\n * If `is_X == False`, callers should expect the operator to not have `X`.\n * If `is_X == None` (the default), callers should have no expectation either\n way.\n \"\"\"\n\n def __init__(self,\n diag,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=\"LinearOperatorDiag\"):\n r\"\"\"Initialize a `LinearOperatorDiag`.\n\n Args:\n diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.\n The diagonal of the operator. Allowed dtypes: `float16`, `float32`,\n `float64`, `complex64`, `complex128`.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose. If `diag.dtype` is real, this is auto-set to `True`.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`.\n\n Raises:\n TypeError: If `diag.dtype` is not an allowed type.\n ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.\n \"\"\"\n\n with ops.name_scope(name, values=[diag]):\n self._diag = linear_operator_util.convert_nonref_to_tensor(\n diag, name=\"diag\")\n self._check_diag(self._diag)\n\n # Check and auto-set hints.\n if not np.issubdtype(self._diag.dtype, np.complexfloating):\n if is_self_adjoint is False:\n raise ValueError(\"A real diagonal operator is always self adjoint.\")\n else:\n is_self_adjoint = True\n\n if is_square is False:\n raise ValueError(\"Only square diagonal operators currently supported.\")\n is_square = True\n\n super(LinearOperatorDiag, self).__init__(\n dtype=self._diag.dtype,\n graph_parents=None,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n # TODO(b/143910018) Remove graph_parents in V3.\n self._set_graph_parents([self._diag])\n\n def _check_diag(self, diag):\n \"\"\"Static check of diag.\"\"\"\n if tensor_shape.TensorShape(diag.shape).ndims is not None and tensor_shape.TensorShape(diag.shape).ndims < 1:\n raise ValueError(\"Argument diag must have at least 1 dimension. \"\n \"Found: %s\" % diag)\n\n def _shape(self):\n # If d_shape = [5, 3], we return [5, 3, 3].\n d_shape = tensor_shape.TensorShape(self._diag.shape)\n return d_shape.concatenate(d_shape[-1:])\n\n def _shape_tensor(self):\n d_shape = array_ops.shape(self._diag)\n k = d_shape[-1]\n return array_ops.concat((d_shape, [k]), 0)\n\n @property\n def diag(self):\n return self._diag\n\n def _assert_non_singular(self):\n return linear_operator_util.assert_no_entries_with_modulus_zero(\n self._diag,\n message=\"Singular operator: Diagonal contained zero values.\")\n\n def _assert_positive_definite(self):\n if np.issubdtype(self.dtype, np.complexfloating):\n message = (\n \"Diagonal operator had diagonal entries with non-positive real part, \"\n \"thus was not positive definite.\")\n else:\n message = (\n \"Real diagonal operator had non-positive diagonal entries, \"\n \"thus was not positive definite.\")\n\n return check_ops.assert_positive(\n math_ops.real(self._diag),\n message=message)\n\n def _assert_self_adjoint(self):\n return linear_operator_util.assert_zero_imag_part(\n self._diag,\n message=(\n \"This diagonal operator contained non-zero imaginary values. \"\n \" Thus it was not self-adjoint.\"))\n\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n diag_term = math_ops.conj(self._diag) if adjoint else self._diag\n x = linalg.adjoint(x) if adjoint_arg else x\n diag_mat = array_ops.expand_dims(diag_term, -1)\n return diag_mat * x\n\n def _matvec(self, x, adjoint=False):\n diag_term = math_ops.conj(self._diag) if adjoint else self._diag\n return diag_term * x\n\n def _determinant(self):\n return math_ops.reduce_prod(self._diag, axis=[-1])\n\n def _log_abs_determinant(self):\n log_det = math_ops.reduce_sum(\n math_ops.log(math_ops.abs(self._diag)), axis=[-1])\n if np.issubdtype(self.dtype, np.complexfloating):\n log_det = _ops.cast(log_det, dtype=self.dtype)\n return log_det\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n diag_term = math_ops.conj(self._diag) if adjoint else self._diag\n rhs = linalg.adjoint(rhs) if adjoint_arg else rhs\n inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1)\n return rhs * inv_diag_mat\n\n def _to_dense(self):\n return _linalg.diag(self._diag)\n\n def _diag_part(self):\n return self.diag\n\n def _add_to_tensor(self, x):\n x_diag = _linalg.diag_part(x)\n new_diag = self._diag + x_diag\n return _linalg.set_diag(x, new_diag)\n\n def _eigvals(self):\n return ops.convert_to_tensor(self.diag)\n\n def _cond(self):\n abs_diag = math_ops.abs(self.diag)\n return (math_ops.reduce_max(abs_diag, axis=-1) /\n math_ops.reduce_min(abs_diag, axis=-1))\n\nimport numpy as np\nfrom tensorflow_probability.python.internal.backend.numpy import linalg_impl as _linalg\nfrom tensorflow_probability.python.internal.backend.numpy import ops as _ops\nfrom tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape\n\nfrom tensorflow_probability.python.internal.backend.numpy import private\ndistribution_util = private.LazyLoader(\n \"distribution_util\", globals(),\n \"tensorflow_probability.python.internal._numpy.distribution_util\")\ntensorshape_util = private.LazyLoader(\n \"tensorshape_util\", globals(),\n \"tensorflow_probability.python.internal._numpy.tensorshape_util\")\n\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Trains a grammar variational auto-encoder on synthetic data.\n\nThe grammar variational auto-encoder (VAE) [1] posits a generative model over\nproductions from a context-free grammar, and it posits an amortized variational\napproximation for efficient posterior inference. We train the grammar VAE\non synthetic data using the grammar from [1] (Figure 1). Note for real data\nanalyses, one should implement a parser to convert examples into lists of\nproduction rules.\n\nThis example showcases eager execution in order to train a model where data\npoints have a variable number of time steps (that is, without padding). However,\nnote that handling a variable number of time steps requires a batch size of 1.\nIn this example, we assume data points arrive in a stream, one at a time. Such a\nsetting has an unbounded maximum length which prevents padding.\n\nSummaries are written under the flag `model_dir`. Point TensorBoard to that\ndirectory in order to monitor progress.\n\nExample output:\n\n```none\nRandom examples from synthetic data distribution:\n222N1N21c\n1c2N2C2C12C1N\nC11C12c\n2C\nNCC\n\nStep: 0 Loss: -13.724 (0.494 sec)\nStep: 500 Loss: -0.004 (145.741 sec)\nStep: 1000 Loss: -0.000 (292.205 sec)\nStep: 1500 Loss: -0.000 (438.819 sec)\n```\n\n#### References\n\n[1]: Matt J. Kusner, Brooks Paige, and Jose Miguel Hernandez-Lobato. Grammar\n Variational Autoencoder. In _International Conference on Machine Learning_,\n 2017. https://arxiv.org/abs/1703.01925\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\n\n# Dependency imports\nfrom absl import flags\nimport six\nimport tensorflow.compat.v1 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability import edward2 as ed\n\nflags.DEFINE_float(\"learning_rate\",\n default=1e-4,\n help=\"Initial learning rate.\")\nflags.DEFINE_integer(\"max_steps\",\n default=5000,\n help=\"Number of training steps to run.\")\nflags.DEFINE_integer(\"latent_size\",\n default=128,\n help=\"Number of dimensions in the latent code.\")\nflags.DEFINE_integer(\"num_units\",\n default=256,\n help=\"Number of units in the generative model's LSTM.\")\nflags.DEFINE_string(\"model_dir\",\n default=os.path.join(os.getenv(\"TEST_TMPDIR\", \"/tmp\"),\n \"grammar_vae/\"),\n help=\"Directory to put the model's fit.\")\n\nFLAGS = flags.FLAGS\n\n\nclass SmilesGrammar(object):\n \"\"\"Context-free grammar for SMILES strings.\n\n A context-free grammar is a 4-tuple consisting of the following elements:\n\n + `nonterminal_symbols`: finite set of strings.\n + `alphabet`: finite set of strings (terminal symbols). It is disjoint from\n `nonterminal_symbols`.\n + `production_rules`: list of 2-tuples. The first and second elements of\n each tuple respectively denote the left-hand-side and right-hand-side of a\n production rule. All right-hand-sides are written as lists, since the\n number of right-hand-side symbols may be greater than 1.\n + `start_symbol`: string, a distinct nonterminal symbol.\n \"\"\"\n\n @property\n def nonterminal_symbols(self):\n return {\"smiles\", \"chain\", \"branched atom\", \"atom\", \"ringbond\",\n \"aromatic organic\", \"aliphatic organic\", \"digit\"}\n\n @property\n def alphabet(self):\n return {\"c\", \"C\", \"N\", \"1\", \"2\"}\n\n @property\n def production_rules(self):\n return [\n (\"smiles\", [\"chain\"]),\n (\"chain\", [\"chain\", \"branched atom\"]),\n (\"chain\", [\"branched atom\"]),\n (\"branched atom\", [\"atom\", \"ringbond\"]),\n (\"branched atom\", [\"atom\"]),\n (\"atom\", [\"aromatic organic\"]),\n (\"atom\", [\"aliphatic organic\"]),\n (\"ringbond\", [\"digit\"]),\n (\"aromatic organic\", [\"c\"]),\n (\"aliphatic organic\", [\"C\"]),\n (\"aliphatic organic\", [\"N\"]),\n (\"digit\", [\"1\"]),\n (\"digit\", [\"2\"]),\n ]\n\n @property\n def start_symbol(self):\n return \"smiles\"\n\n def convert_to_string(self, productions):\n \"\"\"Converts a sequence of productions into a string of terminal symbols.\n\n Args:\n productions: Tensor of shape [1, num_productions, num_production_rules].\n Slices along the `num_productions` dimension represent one-hot vectors.\n\n Returns:\n str that concatenates all terminal symbols from `productions`.\n\n Raises:\n ValueError: If the first production rule does not begin with\n `self.start_symbol`.\n \"\"\"\n symbols = []\n for production in tf.unstack(productions, axis=1):\n lhs, rhs = self.production_rules[\n tf.argmax(input=tf.squeeze(production), axis=-1)]\n if not symbols: # first iteration\n if lhs != self.start_symbol:\n raise ValueError(\"`productions` must begin with `self.start_symbol`.\")\n symbols = rhs\n else:\n # Greedily unroll the nonterminal symbols based on the first occurrence\n # in a linear sequence.\n index = symbols.index(lhs)\n symbols = symbols[:index] + rhs + symbols[index + 1:]\n string = \"\".join(symbols)\n return string\n\n def mask(self, symbol, on_value, off_value):\n \"\"\"Produces a masking tensor for (in)valid production rules.\n\n Args:\n symbol: str, a symbol in the grammar.\n on_value: Value to use for a valid production rule.\n off_value: Value to use for an invalid production rule.\n\n Returns:\n Tensor of shape [1, num_production_rules]. An element is `on_value`\n if its corresponding production rule has `symbol` on its left-hand-side;\n the element is `off_value` otherwise.\n \"\"\"\n mask_values = [on_value if lhs == symbol else off_value\n for lhs, _ in self.production_rules]\n mask_values = tf.reshape(mask_values, [1, len(self.production_rules)])\n return mask_values\n\n\nclass ProbabilisticGrammar(tf.keras.Model):\n \"\"\"Deep generative model over productions that follow a grammar.\"\"\"\n\n def __init__(self, grammar, latent_size, num_units):\n \"\"\"Constructs a probabilistic grammar.\n\n Args:\n grammar: An object representing a grammar. It has members\n `nonterminal_symbols`, `alphabet`, `production_rules`, and\n `start_symbol`, and a method `mask` determining (in)valid\n production rules given a symbol.\n latent_size: Number of dimensions in the latent code.\n num_units: Number of units in the LSTM cell.\n \"\"\"\n super(ProbabilisticGrammar, self).__init__()\n self.grammar = grammar\n self.latent_size = latent_size\n self.lstm = tf.compat.v1.nn.rnn_cell.LSTMCell(num_units)\n self.output_layer = tf.keras.layers.Dense(len(grammar.production_rules))\n\n def __call__(self, *args, **kwargs):\n inputs = 0. # fixes a dummy variable so Model can be called without inputs\n return super(ProbabilisticGrammar, self).__call__(inputs, *args, **kwargs)\n\n def call(self, inputs):\n \"\"\"Runs the model forward to generate a sequence of productions.\n\n Args:\n inputs: Unused.\n\n Returns:\n productions: Tensor of shape [1, num_productions, num_production_rules].\n Slices along the `num_productions` dimension represent one-hot vectors.\n \"\"\"\n del inputs # unused\n latent_code = ed.MultivariateNormalDiag(loc=tf.zeros(self.latent_size),\n sample_shape=1,\n name=\"latent_code\")\n state = self.lstm.zero_state(1, dtype=tf.float32)\n t = 0\n productions = []\n stack = [self.grammar.start_symbol]\n while stack:\n symbol = stack.pop()\n net, state = self.lstm(latent_code, state)\n logits = (self.output_layer(net) +\n self.grammar.mask(symbol, on_value=0., off_value=-1e9))\n production = ed.OneHotCategorical(logits=logits,\n name=\"production_\" + str(t))\n _, rhs = self.grammar.production_rules[tf.argmax(\n input=tf.squeeze(production), axis=-1)]\n for symbol in rhs:\n if symbol in self.grammar.nonterminal_symbols:\n stack.append(symbol)\n productions.append(production)\n t += 1\n return tf.stack(productions, axis=1)\n\n\nclass ProbabilisticGrammarVariational(tf.keras.Model):\n \"\"\"Amortized variational posterior for a probabilistic grammar.\"\"\"\n\n def __init__(self, latent_size):\n \"\"\"Constructs a variational posterior for a probabilistic grammar.\n\n Args:\n latent_size: Number of dimensions in the latent code.\n \"\"\"\n super(ProbabilisticGrammarVariational, self).__init__()\n self.latent_size = latent_size\n self.encoder_net = tf.keras.Sequential([\n tf.keras.layers.Conv1D(64, 3, padding=\"SAME\"),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation(tf.nn.elu),\n tf.keras.layers.Conv1D(128, 3, padding=\"SAME\"),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation(tf.nn.elu),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.GlobalAveragePooling1D(),\n tf.keras.layers.Dense(latent_size * 2, activation=None),\n ])\n\n def call(self, inputs):\n \"\"\"Runs the model forward to return a stochastic encoding.\n\n Args:\n inputs: Tensor of shape [1, num_productions, num_production_rules]. It is\n a sequence of productions of length `num_productions`. Each production\n is a one-hot vector of length `num_production_rules`: it determines\n which production rule the production corresponds to.\n\n Returns:\n latent_code_posterior: A random variable capturing a sample from the\n variational distribution, of shape [1, self.latent_size].\n \"\"\"\n net = self.encoder_net(tf.cast(inputs, tf.float32))\n return ed.MultivariateNormalDiag(\n loc=net[..., :self.latent_size],\n scale_diag=tf.nn.softplus(net[..., self.latent_size:]),\n name=\"latent_code_posterior\")\n\n\ndef main(argv):\n del argv # unused\n if tf.io.gfile.exists(FLAGS.model_dir):\n tf.compat.v1.logging.warning(\n \"Warning: deleting old log directory at {}\".format(FLAGS.model_dir))\n tf.io.gfile.rmtree(FLAGS.model_dir)\n tf.io.gfile.makedirs(FLAGS.model_dir)\n tf.compat.v1.enable_eager_execution()\n\n grammar = SmilesGrammar()\n synthetic_data_distribution = ProbabilisticGrammar(\n grammar=grammar, latent_size=FLAGS.latent_size, num_units=FLAGS.num_units)\n\n print(\"Random examples from synthetic data distribution:\")\n for _ in range(5):\n productions = synthetic_data_distribution()\n string = grammar.convert_to_string(productions)\n print(string)\n\n probabilistic_grammar = ProbabilisticGrammar(\n grammar=grammar, latent_size=FLAGS.latent_size, num_units=FLAGS.num_units)\n probabilistic_grammar_variational = ProbabilisticGrammarVariational(\n latent_size=FLAGS.latent_size)\n\n checkpoint = tf.train.Checkpoint(\n synthetic_data_distribution=synthetic_data_distribution,\n probabilistic_grammar=probabilistic_grammar,\n probabilistic_grammar_variational=probabilistic_grammar_variational)\n global_step = tf.compat.v1.train.get_or_create_global_step()\n optimizer = tf.compat.v1.train.AdamOptimizer(FLAGS.learning_rate)\n writer = tf.compat.v2.summary.create_file_writer(FLAGS.model_dir)\n writer.set_as_default()\n\n start_time = time.time()\n for step in range(FLAGS.max_steps):\n productions = synthetic_data_distribution()\n with tf.GradientTape() as tape:\n # Sample from amortized variational distribution and record its trace.\n with ed.tape() as variational_tape:\n _ = probabilistic_grammar_variational(productions)\n\n # Set model trace to take on the data's values and the sample from the\n # variational distribution.\n values = {\"latent_code\": variational_tape[\"latent_code_posterior\"]}\n values.update({\"production_\" + str(t): production for t, production\n in enumerate(tf.unstack(productions, axis=1))})\n with ed.tape() as model_tape:\n with ed.interception(ed.make_value_setter(**values)):\n _ = probabilistic_grammar()\n\n # Compute the ELBO given the variational sample, averaged over the batch\n # size and the number of time steps (number of productions). Although the\n # ELBO per data point sums over time steps, we average in order to have a\n # value that remains on the same scale across batches.\n log_likelihood = 0.\n for name, rv in six.iteritems(model_tape):\n if name.startswith(\"production\"):\n log_likelihood += rv.distribution.log_prob(rv.value)\n\n kl = tfp.distributions.kl_divergence(\n variational_tape[\"latent_code_posterior\"].distribution,\n model_tape[\"latent_code\"].distribution)\n\n timesteps = tf.cast(productions.shape[1], dtype=tf.float32)\n elbo = tf.reduce_mean(input_tensor=log_likelihood - kl) / timesteps\n loss = -elbo\n with tf.compat.v2.summary.record_if(\n lambda: tf.math.equal(0, global_step % 500)):\n tf.compat.v2.summary.scalar(\n \"log_likelihood\",\n tf.reduce_mean(input_tensor=log_likelihood) / timesteps,\n step=global_step)\n tf.compat.v2.summary.scalar(\n \"kl\", tf.reduce_mean(input_tensor=kl) / timesteps, step=global_step)\n tf.compat.v2.summary.scalar(\"elbo\", elbo, step=global_step)\n\n variables = (probabilistic_grammar.variables\n + probabilistic_grammar_variational.variables)\n grads = tape.gradient(loss, variables)\n grads_and_vars = list(zip(grads, variables))\n optimizer.apply_gradients(grads_and_vars, global_step)\n\n if step % 500 == 0:\n duration = time.time() - start_time\n print(\"Step: {:>3d} Loss: {:.3f} ({:.3f} sec)\".format(\n step, loss, duration))\n checkpoint.save(file_prefix=FLAGS.model_dir)\n\nif __name__ == \"__main__\":\n tf.compat.v1.app.run()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for the Sample distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import test_util\n\n\n@test_util.test_all_tf_execution_regimes\nclass SampleDistributionTest(test_util.TestCase):\n\n def test_everything_scalar(self):\n s = tfd.Sample(tfd.Normal(loc=0, scale=1), 5, validate_args=True)\n x = s.sample(seed=test_util.test_seed())\n actual_lp = s.log_prob(x)\n # Sample.log_prob will reduce over event space, ie, dims [0, 2]\n # corresponding to sizes concat([[5], [2]]).\n expected_lp = tf.reduce_sum(s.distribution.log_prob(x), axis=0)\n x_, actual_lp_, expected_lp_ = self.evaluate([x, actual_lp, expected_lp])\n self.assertEqual((5,), x_.shape)\n self.assertEqual((), actual_lp_.shape)\n self.assertAllClose(expected_lp_, actual_lp_, atol=0, rtol=1e-3)\n\n def test_everything_nonscalar(self):\n s = tfd.Sample(\n tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=1), 1), [5, 4],\n validate_args=True)\n x = s.sample([6, 1], seed=test_util.test_seed())\n actual_lp = s.log_prob(x)\n # Sample.log_prob will reduce over event space, ie, dims [2, 3, 5]\n # corresponding to sizes concat([[5, 4], [2]]).\n expected_lp = tf.reduce_sum(\n s.distribution.log_prob(tf.transpose(a=x, perm=[0, 1, 3, 4, 2, 5])),\n axis=[2, 3])\n x_, actual_lp_, expected_lp_ = self.evaluate([x, actual_lp, expected_lp])\n self.assertEqual((6, 1, 3, 5, 4, 2), x_.shape)\n self.assertEqual((6, 1, 3), actual_lp_.shape)\n self.assertAllClose(expected_lp_, actual_lp_, atol=0, rtol=1e-3)\n\n def test_mixed_scalar(self):\n s = tfd.Sample(tfd.Independent(tfd.Normal(loc=[0., 0], scale=1), 1),\n 3, validate_args=False)\n x = s.sample(4, seed=test_util.test_seed())\n lp = s.log_prob(x)\n self.assertEqual((4, 3, 2), x.shape)\n self.assertEqual((4,), lp.shape)\n\n def test_kl_divergence(self):\n q_scale = 2.\n p = tfd.Sample(\n tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=1), 1), [5, 4],\n validate_args=True)\n q = tfd.Sample(\n tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=2.), 1), [5, 4],\n validate_args=True)\n actual_kl = tfd.kl_divergence(p, q)\n expected_kl = ((5 * 4) *\n (0.5 * q_scale**-2. - 0.5 + np.log(q_scale)) * # Actual KL.\n np.ones([3]) * 2) # Batch, events.\n self.assertAllClose(expected_kl, self.evaluate(actual_kl))\n\n def test_transformed_affine(self):\n sample_shape = 3\n mvn = tfd.Independent(tfd.Normal(loc=[0., 0], scale=1), 1)\n aff = tfb.Affine(scale_tril=[[0.75, 0.],\n [0.05, 0.5]])\n\n def expected_lp(y):\n x = aff.inverse(y) # Ie, tf.random.normal([4, 3, 2])\n fldj = aff.forward_log_det_jacobian(x, event_ndims=1)\n return tf.reduce_sum(mvn.log_prob(x) - fldj, axis=1)\n\n # Transform a Sample.\n d = tfd.TransformedDistribution(\n tfd.Sample(mvn, sample_shape, validate_args=True),\n bijector=aff)\n y = d.sample(4, seed=test_util.test_seed())\n actual_lp = d.log_prob(y)\n self.assertAllEqual((4,) + (sample_shape,) + (2,), y.shape)\n self.assertAllEqual((4,), actual_lp.shape)\n self.assertAllClose(\n *self.evaluate([expected_lp(y), actual_lp]),\n atol=0., rtol=1e-3)\n\n # Sample a Transform.\n d = tfd.Sample(\n tfd.TransformedDistribution(mvn, bijector=aff),\n sample_shape,\n validate_args=True)\n y = d.sample(4, seed=test_util.test_seed())\n actual_lp = d.log_prob(y)\n self.assertAllEqual((4,) + (sample_shape,) + (2,), y.shape)\n self.assertAllEqual((4,), actual_lp.shape)\n self.assertAllClose(\n *self.evaluate([expected_lp(y), actual_lp]),\n atol=0., rtol=1e-3)\n\n def test_transformed_exp(self):\n sample_shape = 3\n mvn = tfd.Independent(tfd.Normal(loc=[0., 0], scale=1), 1)\n exp = tfb.Exp()\n\n def expected_lp(y):\n x = exp.inverse(y) # Ie, tf.random.normal([4, 3, 2])\n fldj = exp.forward_log_det_jacobian(x, event_ndims=1)\n return tf.reduce_sum(mvn.log_prob(x) - fldj, axis=1)\n\n # Transform a Sample.\n d = tfd.TransformedDistribution(\n tfd.Sample(mvn, sample_shape, validate_args=True),\n bijector=exp)\n y = d.sample(4, seed=test_util.test_seed())\n actual_lp = d.log_prob(y)\n self.assertAllEqual((4,) + (sample_shape,) + (2,), y.shape)\n self.assertAllEqual((4,), actual_lp.shape)\n # If `TransformedDistribution` didn't scale the jacobian by\n # `_sample_distribution_size`, then `scale_fldj` would need to be `False`.\n self.assertAllClose(\n *self.evaluate([expected_lp(y), actual_lp]),\n atol=0., rtol=1e-3)\n\n # Sample a Transform.\n d = tfd.Sample(\n tfd.TransformedDistribution(mvn, bijector=exp),\n sample_shape,\n validate_args=True)\n y = d.sample(4, seed=test_util.test_seed())\n actual_lp = d.log_prob(y)\n self.assertAllEqual((4,) + (sample_shape,) + (2,), y.shape)\n self.assertAllEqual((4,), actual_lp.shape)\n # Regardless of whether `TransformedDistribution` scales the jacobian by\n # `_sample_distribution_size`, `scale_fldj` is `True`.\n self.assertAllClose(\n *self.evaluate([expected_lp(y), actual_lp]),\n atol=0., rtol=1e-3)\n\n @parameterized.parameters(\n 'mean',\n 'stddev',\n 'variance',\n 'mode',\n )\n def test_summary_statistic(self, attr):\n sample_shape = [5, 4]\n mvn = tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=1), 1)\n d = tfd.Sample(mvn, sample_shape, validate_args=True)\n self.assertEqual((3,), d.batch_shape)\n expected_stat = (\n getattr(mvn, attr)()[:, tf.newaxis, tf.newaxis, :] *\n tf.ones([3, 5, 4, 2]))\n actual_stat = getattr(d, attr)()\n self.assertAllEqual(*self.evaluate([expected_stat, actual_stat]))\n\n def test_entropy(self):\n sample_shape = [3, 4]\n mvn = tfd.Independent(tfd.Normal(loc=0, scale=[[0.25, 0.5]]), 1)\n d = tfd.Sample(mvn, sample_shape, validate_args=True)\n expected_entropy = 12 * tf.reduce_sum(mvn.distribution.entropy(), axis=-1)\n actual_entropy = d.entropy()\n self.assertAllEqual(*self.evaluate([expected_entropy, actual_entropy]))\n\n @test_util.tf_tape_safety_test\n def test_gradients_through_params(self):\n loc = tf.Variable(tf.zeros([4, 5, 3]), shape=tf.TensorShape(None))\n scale = tf.Variable(tf.ones([]), shape=tf.TensorShape(None))\n # In real life, you'd really always want `sample_shape` to be\n # `trainable=False`.\n sample_shape = tf.Variable([1, 2], shape=tf.TensorShape(None))\n dist = tfd.Sample(\n tfd.Independent(tfd.Logistic(loc=loc, scale=scale),\n reinterpreted_batch_ndims=1),\n sample_shape=sample_shape,\n validate_args=True)\n with tf.GradientTape() as tape:\n loss = -dist.log_prob(0.)\n self.assertLen(dist.trainable_variables, 3)\n grad = tape.gradient(loss, [loc, scale, sample_shape])\n self.assertAllNotNone(grad[:-1])\n self.assertIs(grad[-1], None)\n\n @test_util.tf_tape_safety_test\n def test_variable_shape_change(self):\n loc = tf.Variable(tf.zeros([4, 5, 3]), shape=tf.TensorShape(None))\n scale = tf.Variable(tf.ones([]), shape=tf.TensorShape(None))\n # In real life, you'd really always want `sample_shape` to be\n # `trainable=False`.\n sample_shape = tf.Variable([1, 2], shape=tf.TensorShape(None))\n dist = tfd.Sample(\n tfd.Independent(tfd.Logistic(loc=loc, scale=scale),\n reinterpreted_batch_ndims=1),\n sample_shape=sample_shape,\n validate_args=True)\n self.evaluate([v.initializer for v in dist.trainable_variables])\n\n x = dist.mean()\n y = dist.sample([7, 2], seed=test_util.test_seed())\n loss_x = -dist.log_prob(x)\n loss_0 = -dist.log_prob(0.)\n batch_shape = dist.batch_shape_tensor()\n event_shape = dist.event_shape_tensor()\n [x_, y_, loss_x_, loss_0_, batch_shape_, event_shape_] = self.evaluate([\n x, y, loss_x, loss_0, batch_shape, event_shape])\n self.assertAllEqual([4, 5, 1, 2, 3], x_.shape)\n self.assertAllEqual([7, 2, 4, 5, 1, 2, 3], y_.shape)\n self.assertAllEqual([4, 5], loss_x_.shape)\n self.assertAllEqual([4, 5], loss_0_.shape)\n self.assertAllEqual([4, 5], batch_shape_)\n self.assertAllEqual([1, 2, 3], event_shape_)\n self.assertLen(dist.trainable_variables, 3)\n\n with tf.control_dependencies([\n loc.assign(tf.zeros([])),\n scale.assign(tf.ones([3, 1, 2])),\n sample_shape.assign(6),\n ]):\n x = dist.mean()\n y = dist.sample([7, 2], seed=test_util.test_seed())\n loss_x = -dist.log_prob(x)\n loss_0 = -dist.log_prob(0.)\n batch_shape = dist.batch_shape_tensor()\n event_shape = dist.event_shape_tensor()\n [x_, y_, loss_x_, loss_0_, batch_shape_, event_shape_] = self.evaluate([\n x, y, loss_x, loss_0, batch_shape, event_shape])\n self.assertAllEqual([3, 1, 6, 2], x_.shape)\n self.assertAllEqual([7, 2, 3, 1, 6, 2], y_.shape)\n self.assertAllEqual([3, 1], loss_x_.shape)\n self.assertAllEqual([3, 1], loss_0_.shape)\n self.assertAllEqual([3, 1], batch_shape_)\n self.assertAllEqual([6, 2], event_shape_)\n self.assertLen(dist.trainable_variables, 3)\n\n def test_variable_sample_shape_exception(self):\n loc = tf.Variable(tf.zeros([4, 5, 3]), shape=tf.TensorShape(None))\n scale = tf.Variable(tf.ones([]), shape=tf.TensorShape(None))\n sample_shape = tf.Variable([[1, 2]], shape=tf.TensorShape(None))\n with self.assertRaisesWithPredicateMatch(\n Exception,\n 'Argument `sample_shape` must be either a scalar or a vector.'):\n dist = tfd.Sample(\n tfd.Independent(tfd.Logistic(loc=loc, scale=scale),\n reinterpreted_batch_ndims=1),\n sample_shape=sample_shape,\n validate_args=True)\n self.evaluate([v.initializer for v in dist.trainable_variables])\n self.evaluate(dist.mean())\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Base class for variational layers for building neural networks.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import random as tfp_random\nfrom tensorflow_probability.python.distributions import distribution as distribution_lib\nfrom tensorflow_probability.python.distributions import independent as independent_lib\nfrom tensorflow_probability.python.distributions import kullback_leibler as kl_lib\nfrom tensorflow_probability.python.distributions import mvn_diag as mvn_diag_lib\nfrom tensorflow_probability.python.distributions import normal as normal_lib\nfrom tensorflow_probability.python.experimental.nn import layers as layers_lib\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.internal.reparameterization import FULLY_REPARAMETERIZED\nfrom tensorflow_probability.python.monte_carlo import expectation\nfrom tensorflow_probability.python.util.seed_stream import SeedStream\n\n\n__all__ = [\n 'VariationalLayer',\n]\n\n\n# The following aliases ensure docstrings read more succinctly.\ntfd = distribution_lib\n\n\ndef kl_divergence_monte_carlo(q, r, w):\n \"\"\"Monte Carlo KL Divergence.\"\"\"\n return expectation(\n lambda w: q.log_prob(w) - r.log_prob(w),\n samples=w,\n log_prob=q.log_prob,\n use_reparameterization=all(\n rt == FULLY_REPARAMETERIZED\n for rt in tf.nest.flatten(q.reparameterization_type)),\n axis=())\n\n\ndef kl_divergence_exact(q, r, w): # pylint: disable=unused-argument\n \"\"\"Exact KL Divergence.\"\"\"\n return kl_lib.kl_divergence(q, r)\n\n\ndef unpack_kernel_and_bias(weights):\n \"\"\"Returns `kernel`, `bias` tuple.\"\"\"\n if isinstance(weights, collections.Mapping):\n kernel = weights.get('kernel', None)\n bias = weights.get('bias', None)\n elif len(weights) == 1:\n kernel, bias = weights, None\n elif len(weights) == 2:\n kernel, bias = weights\n else:\n raise ValueError('Unable to unpack weights: {}.'.format(weights))\n return kernel, bias\n\n\nclass VariationalLayer(layers_lib.Layer):\n \"\"\"Base class for all variational layers.\"\"\"\n\n def __init__(\n self,\n posterior,\n prior,\n activation_fn=None,\n penalty_weight=None,\n posterior_penalty_fn=kl_divergence_monte_carlo,\n posterior_value_fn=tfd.Distribution.sample,\n seed=None,\n dtype=tf.float32,\n name=None):\n \"\"\"Base class for variational layers.\n\n # mean ==> penalty_weight = 1 / train_size\n # sum ==> penalty_weight = batch_size / train_size\n\n Args:\n posterior: ...\n prior: ...\n activation_fn: ...\n penalty_weight: ...\n posterior_penalty_fn: ...\n posterior_value_fn: ...\n seed: ...\n dtype: ...\n name: Python `str` prepeneded to ops created by this object.\n Default value: `None` (i.e., `type(self).__name__`).\n \"\"\"\n super(VariationalLayer, self).__init__(name=name)\n self._posterior = posterior\n self._prior = prior\n self._activation_fn = activation_fn\n self._penalty_weight = penalty_weight\n self._posterior_penalty_fn = posterior_penalty_fn\n self._posterior_value_fn = posterior_value_fn\n self._seed = SeedStream(seed, salt=self.name)\n self._dtype = dtype\n tf.nest.assert_same_structure(prior.dtype, posterior.dtype,\n check_types=False)\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def posterior(self):\n return self._posterior\n\n @property\n def prior(self):\n return self._prior\n\n @property\n def activation_fn(self):\n return self._activation_fn\n\n @property\n def penalty_weight(self):\n return self._penalty_weight\n\n @property\n def posterior_penalty_fn(self):\n return self._posterior_penalty_fn\n\n @property\n def posterior_value_fn(self):\n return self._posterior_value_fn\n\n def eval(self, inputs, is_training=True, **kwargs):\n inputs = tf.convert_to_tensor(inputs, dtype=self.dtype, name='inputs')\n w = self.posterior_value_fn(self.posterior, seed=self._seed()) # pylint: disable=not-callable\n if is_training:\n penalty = self.posterior_penalty_fn(self.posterior, self.prior, w) # pylint: disable=not-callable\n if penalty is not None and self.penalty_weight is not None:\n penalty *= tf.cast(self.penalty_weight, dtype=penalty.dtype)\n else:\n penalty = None\n outputs = self._eval(inputs, w, **kwargs)\n self._set_extra_loss(penalty)\n self._set_extra_result(w)\n return outputs\n\n def _eval(self, inputs, weights):\n raise NotImplementedError('Subclass failed to implement `_eval`.')\n\n\nclass VariationalReparameterizationKernelBiasLayer(VariationalLayer):\n \"\"\"Variational reparameterization linear layer.\"\"\"\n\n def __init__(\n self,\n posterior,\n prior,\n apply_kernel_fn,\n activation_fn=None,\n penalty_weight=None,\n posterior_penalty_fn=kl_divergence_monte_carlo,\n posterior_value_fn=tfd.Distribution.sample,\n unpack_weights_fn=unpack_kernel_and_bias,\n seed=None,\n dtype=tf.float32,\n name=None):\n super(VariationalReparameterizationKernelBiasLayer, self).__init__(\n posterior,\n prior,\n activation_fn=activation_fn,\n penalty_weight=penalty_weight,\n posterior_penalty_fn=posterior_penalty_fn,\n posterior_value_fn=posterior_value_fn,\n seed=seed,\n dtype=dtype,\n name=name)\n self._apply_kernel_fn = apply_kernel_fn\n self._unpack_weights_fn = unpack_weights_fn\n\n @property\n def unpack_weights_fn(self):\n return self._unpack_weights_fn\n\n def _eval(self, x, weights):\n kernel, bias = self.unpack_weights_fn(weights) # pylint: disable=not-callable\n y = x\n if kernel is not None:\n y = self._apply_kernel_fn(y, kernel)\n if bias is not None:\n y = y + bias\n if self.activation_fn is not None:\n y = self.activation_fn(y) # pylint: disable=not-callable\n return y\n\n\nclass VariationalFlipoutKernelBiasLayer(VariationalLayer):\n \"\"\"Variational flipout linear layer.\"\"\"\n\n def __init__(\n self,\n posterior,\n prior,\n apply_kernel_fn,\n activation_fn=None,\n penalty_weight=None,\n posterior_penalty_fn=kl_divergence_monte_carlo,\n posterior_value_fn=tfd.Distribution.sample,\n unpack_weights_fn=unpack_kernel_and_bias,\n seed=None,\n dtype=tf.float32,\n name=None):\n super(VariationalFlipoutKernelBiasLayer, self).__init__(\n posterior,\n prior,\n activation_fn=activation_fn,\n penalty_weight=penalty_weight,\n posterior_penalty_fn=posterior_penalty_fn,\n posterior_value_fn=posterior_value_fn,\n seed=seed,\n dtype=dtype,\n name=name)\n self._apply_kernel_fn = apply_kernel_fn\n self._unpack_weights_fn = unpack_weights_fn\n\n @property\n def unpack_weights_fn(self):\n return self._unpack_weights_fn\n\n def _eval(self, x, weights):\n kernel, bias = self.unpack_weights_fn(weights) # pylint: disable=not-callable\n y = x\n\n if kernel is not None:\n kernel_dist, _ = self.unpack_weights_fn( # pylint: disable=not-callable\n self.posterior.sample_distributions(value=weights)[0])\n kernel_loc, kernel_scale = get_spherical_normal_loc_scale(kernel_dist)\n\n # batch_size = tf.shape(x)[0]\n # sign_input_shape = ([batch_size] +\n # [1] * self._rank +\n # [self._input_channels])\n y *= tfp_random.rademacher(prefer_static.shape(y),\n dtype=y.dtype,\n seed=self._seed())\n kernel_perturb = normal_lib.Normal(loc=0., scale=kernel_scale)\n y = self._apply_kernel_fn( # E.g., tf.matmul.\n y,\n kernel_perturb.sample(seed=self._seed()))\n y *= tfp_random.rademacher(prefer_static.shape(y),\n dtype=y.dtype,\n seed=self._seed())\n y += self._apply_kernel_fn(x, kernel_loc)\n\n if bias is not None:\n y = y + bias\n\n if self.activation_fn is not None:\n y = self.activation_fn(y) # pylint: disable=not-callable\n\n return y\n\n\ndef get_spherical_normal_loc_scale(d):\n if isinstance(d, independent_lib.Independent):\n return get_spherical_normal_loc_scale(d.distribution)\n if isinstance(d, (normal_lib.Normal, mvn_diag_lib.MultivariateNormalDiag)):\n return d.loc, d.scale\n raise TypeError('Expected kernel `posterior` to be spherical Normal; '\n 'saw: \"{}\".'.format(type(d).__name__))\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for ReplicaExchangeMC.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import logging\nfrom absl.testing import parameterized\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import test_util\n\n\ntfd = tfp.distributions\n\nJAX_MODE = False\n\n\ndef init_tfp_randomwalkmetropolis(\n target_log_prob_fn,\n step_size,\n seed=None, store_parameters_in_results=False, num_leapfrog_steps=None): # pylint: disable=unused-argument\n return tfp.mcmc.RandomWalkMetropolis(\n target_log_prob_fn,\n new_state_fn=tfp.mcmc.random_walk_normal_fn(scale=step_size),\n seed=seed)\n\n\ndef effective_sample_size(x, **kwargs):\n \"\"\"tfp.mcmc.effective_sample_size, with a maximum appropriate for HMC.\"\"\"\n # Since ESS is an estimate, it can go wrong... E.g. we can have negatively\n # correlated samples, which *do* have ESS > N, but this ESS is only applicable\n # for variance reduction power for estimation of the mean. We want to\n # (blindly) use ESS everywhere (e.g. variance estimates)....and so...\n ess = tfp.mcmc.effective_sample_size(x, **kwargs)\n n = tf.cast(prefer_static.size0(x), x.dtype)\n return tf.minimum(ess, n)\n\n\ndef _set_seed():\n \"\"\"Helper which uses graph seed if using TFE.\"\"\"\n # TODO(b/68017812): Deprecate once TFE supports seed.\n seed = test_util.test_seed()\n if tf.executing_eagerly() and not JAX_MODE:\n tf.random.set_seed(seed)\n return None\n return seed\n\n\n@test_util.test_graph_and_eager_modes\nclass DefaultSwapProposedFnTest(test_util.TestCase):\n\n @parameterized.named_parameters(\n ('prob1p0_n1', 1.0, 1),\n ('prob1p0_n2', 1.0, 2),\n ('prob1p0_n4', 1.0, 4),\n ('prob1p0_n5', 1.0, 5),\n ('prob0p5_n1', 0.5, 1),\n ('prob0p5_n4', 0.5, 4),\n ('prob0p5_n7', 0.5, 7),\n ('prob0p0_n1', 0.0, 1),\n ('prob0p0_n2', 0.0, 2),\n ('prob0p0_n5', 0.0, 5),\n )\n def testProbSwapNumReplicaNoBatch(self, prob_swap, num_replica):\n fn = tfp.mcmc.default_swap_proposal_fn(prob_swap)\n num_results = 100\n seeds = samplers.split_seed(test_util.test_seed(), n=num_results)\n swaps = tf.stack(\n [fn(num_replica, seed=seeds[i]) for i in range(num_results)],\n axis=0)\n\n self.assertAllEqual((num_results, num_replica), swaps.shape)\n self.check_swaps_with_no_batch_shape(self.evaluate(swaps), prob_swap)\n\n @parameterized.named_parameters(\n ('prob1p0_n1', 1.0, 1),\n ('prob1p0_n2', 1.0, 2),\n ('prob1p0_n5', 1.0, 5),\n ('prob0p5_n1', 0.5, 1),\n ('prob0p5_n2', 0.5, 2),\n ('prob0p5_n3', 0.5, 3),\n ('prob0p0_n1', 0.0, 1),\n ('prob0p0_n2', 0.0, 2),\n ('prob0p0_n5', 0.0, 5),\n )\n def testProbSwapNumReplicaWithBatch(self, prob_swap, num_replica):\n fn = tfp.mcmc.default_swap_proposal_fn(prob_swap)\n num_results = 100\n seeds = samplers.split_seed(test_util.test_seed(), n=num_results)\n swaps = tf.stack(\n [fn(num_replica, batch_shape=[2], seed=seeds[i])\n for i in range(num_results)],\n axis=0)\n\n self.assertAllEqual((num_results, num_replica, 2), swaps.shape)\n swaps_ = self.evaluate(swaps)\n\n # Batch members should have distinct swaps in most cases.\n frac_same = np.mean(swaps_[..., 0] == swaps_[..., 1])\n\n # If prob_swap == 0, swap is the null_swap always.\n if (prob_swap == 0 or\n # If num_replica == 1, swap = [0] always.\n num_replica == 1 or\n # In this case, we always swap and it's always [1, 0].\n (num_replica == 2 and prob_swap == 1)):\n self.assertEqual(1.0, frac_same)\n else:\n self.assertLess(frac_same, 0.9)\n\n # Check that each batch member has proper statistics.\n for i in range(swaps_.shape[-1]):\n self.check_swaps_with_no_batch_shape(swaps_[..., i], prob_swap)\n\n def check_swaps_with_no_batch_shape(self, swaps_, prob_swap):\n assert swaps_.ndim == 2, 'Expected shape [num_results, num_replica]'\n num_results, num_replica = swaps_.shape\n\n null_swaps = np.arange(num_replica)\n\n # Check that we propose at least one swap, prob_swap fraction of the\n # time.\n # An exception is made for when num_replica == 1, since in this case the\n # only swap is the null swap.\n expected_prob_swap = prob_swap * np.float32(num_replica > 1)\n observed_prob_swap = np.mean(np.any(swaps_ != null_swaps, axis=1))\n self.assertAllClose(\n expected_prob_swap,\n observed_prob_swap,\n rtol=0,\n # Accurate to 4 standard errors.\n atol=4 * np.sqrt(prob_swap * (1 - prob_swap) / num_results))\n\n # Verify the swap is \"once only.\"\n for n in range(20):\n self.assertAllEqual(null_swaps, np.take(swaps_[n], swaps_[n]))\n\n\n@test_util.test_graph_and_eager_modes\nclass REMCTest(test_util.TestCase):\n\n def setUp(self):\n tf.random.set_seed(123)\n super(REMCTest, self).setUp()\n\n @parameterized.named_parameters([\n dict( # pylint: disable=g-complex-comprehension\n testcase_name=(testcase_name + kernel_name +\n ['_fast_execute_only', '_slow_asserts'][asserts]),\n tfp_transition_kernel=tfp_transition_kernel,\n inverse_temperatures=inverse_temperatures,\n store_parameters_in_results=store_param,\n asserts=asserts)\n for asserts in [True, False]\n for kernel_name, tfp_transition_kernel, store_param in [\n ('HMC', tfp.mcmc.HamiltonianMonteCarlo, True), # NUMPY_DISABLE\n ('RWMH', init_tfp_randomwalkmetropolis, False),\n ]\n for testcase_name, inverse_temperatures in [\n ('OddNumReplicas', [1.0, 0.8, 0.6]),\n ('EvenNumReplicas', [1.0, 0.8, 0.7, 0.6]),\n ('HighTemperatureOnly', [0.5]),\n ('LowTemperatureOnly', [2.0]),\n ]\n ])\n def testNormal(self,\n tfp_transition_kernel,\n inverse_temperatures,\n store_parameters_in_results,\n asserts,\n prob_swap=1.0,\n dtype=np.float32):\n \"\"\"Sampling from standard normal with REMC.\"\"\"\n\n target = tfd.Normal(dtype(0.), dtype(1.))\n inverse_temperatures = dtype(inverse_temperatures)\n num_replica = len(inverse_temperatures)\n\n step_size = 0.51234 / np.sqrt(inverse_temperatures)\n num_leapfrog_steps = 3\n\n def make_kernel_fn(target_log_prob_fn):\n return tfp_transition_kernel(\n target_log_prob_fn=target_log_prob_fn,\n step_size=step_size,\n store_parameters_in_results=store_parameters_in_results,\n num_leapfrog_steps=num_leapfrog_steps)\n\n remc = tfp.mcmc.ReplicaExchangeMC(\n target_log_prob_fn=target.log_prob,\n inverse_temperatures=inverse_temperatures,\n make_kernel_fn=make_kernel_fn,\n swap_proposal_fn=tfp.mcmc.default_swap_proposal_fn(prob_swap))\n\n num_results = 17\n if asserts:\n num_results = 2000\n remc.one_step = tf.function(remc.one_step, autograph=False)\n\n states, kernel_results = tfp.mcmc.sample_chain(\n num_results=num_results,\n current_state=target.sample(seed=_set_seed()),\n kernel=remc,\n num_burnin_steps=50,\n trace_fn=lambda _, results: results,\n seed=_set_seed())\n\n self.assertAllEqual((num_results,), states.shape)\n\n states_, kr_, replica_ess_ = self.evaluate([\n states,\n kernel_results,\n # Get the first (and only) state part for all replicas.\n effective_sample_size(kernel_results.post_swap_replica_states[0]),\n ])\n\n logging.vlog(\n 2, '---- execution:{} mean:{} stddev:{}'.format(\n 'eager' if tf.executing_eagerly() else 'graph',\n states_.mean(), states_.std()))\n\n # Some shortened names.\n replica_log_accept_ratio = (\n kr_.post_swap_replica_results.log_accept_ratio)\n replica_states_ = kr_.post_swap_replica_states[0] # Get rid of \"parts\"\n\n # Target state is at index 0.\n self.assertAllClose(states_, replica_states_[:, 0])\n\n # Check that *each* replica has correct marginal.\n def _check_sample_stats(replica_idx):\n x = replica_states_[:, replica_idx]\n ess = replica_ess_[replica_idx]\n\n err_msg = 'replica_idx={}'.format(replica_idx)\n\n mean_atol = 6 * 1.0 / np.sqrt(ess)\n self.assertAllClose(x.mean(), 0.0, atol=mean_atol, msg=err_msg)\n\n # For a tempered Normal, Variance = T.\n expected_var = 1 / inverse_temperatures[replica_idx]\n var_atol = 6 * expected_var * np.sqrt(2) / np.sqrt(ess)\n self.assertAllClose(np.var(x), expected_var, atol=var_atol, msg=err_msg)\n\n if not asserts:\n return\n\n for replica_idx in range(num_replica):\n _check_sample_stats(replica_idx)\n\n # Test log_accept_ratio and replica_log_accept_ratio.\n self.assertAllEqual((num_results, num_replica),\n replica_log_accept_ratio.shape)\n replica_mean_accept_ratio = np.mean(\n np.exp(np.minimum(0, replica_log_accept_ratio)), axis=0)\n for accept_ratio in replica_mean_accept_ratio:\n # Every single replica should have a decent P[Accept]\n self.assertBetween(accept_ratio, 0.2, 0.99)\n\n # Check swap probabilities for adjacent swaps.\n self.assertAllEqual((num_results, num_replica - 1),\n kr_.is_swap_accepted_adjacent.shape)\n conditional_swap_prob = (\n np.sum(kr_.is_swap_accepted_adjacent, axis=0) /\n np.sum(kr_.is_swap_proposed_adjacent, axis=0)\n )\n if num_replica > 1 and prob_swap > 0:\n # If temperatures are reasonable, this should be the case.\n # Ideally conditional_swap_prob is near 30%, but we're not tuning here\n self.assertGreater(np.min(conditional_swap_prob), 0.01)\n self.assertLess(np.max(conditional_swap_prob), 0.99)\n\n # Check swap probabilities for all swaps.\n def _check_swap_matrix(matrix):\n self.assertAllEqual((num_results, num_replica, num_replica),\n matrix.shape)\n # Matrix is stochastic (since you either get swapped with another\n # replica, or yourself), and symmetric, since we do once-only swaps.\n self.assertAllEqual(np.ones((num_results, num_replica)),\n matrix.sum(axis=-1))\n self.assertAllEqual(matrix, np.transpose(matrix, (0, 2, 1)))\n # By default, all swaps are between adjacent replicas.\n for i in range(num_replica):\n for j in range(i + 2, num_replica):\n self.assertEqual(0.0, np.max(np.abs(matrix[..., i, j])))\n _check_swap_matrix(kr_.is_swap_proposed)\n _check_swap_matrix(kr_.is_swap_accepted)\n\n # Check inverse_temperatures never change.\n self.assertAllEqual(\n np.repeat([inverse_temperatures], axis=0, repeats=num_results),\n kr_.inverse_temperatures)\n\n if store_parameters_in_results:\n # Check that store_parameters_in_results=True worked for HMC.\n self.assertAllEqual(\n np.repeat([step_size], axis=0, repeats=num_results),\n kr_.post_swap_replica_results.accepted_results.step_size)\n\n self.assertAllEqual(\n np.repeat([num_leapfrog_steps], axis=0, repeats=num_results),\n kr_.post_swap_replica_results.accepted_results.num_leapfrog_steps)\n\n @parameterized.named_parameters([\n ('HMC', tfp.mcmc.HamiltonianMonteCarlo), # NUMPY_DISABLE\n ('RWMH', init_tfp_randomwalkmetropolis),\n ])\n def test2DMixNormal(self, tfp_transition_kernel):\n \"\"\"Sampling from a 2-D Mixture Normal Distribution.\"\"\"\n dtype = np.float32\n\n # By symmetry, target has mean [0, 0]\n # Therefore, Var = E[X^2] = E[E[X^2 | c]], where c is the component.\n # Now..., for the first component,\n # E[X1^2] = Var[X1] + Mean[X1]^2\n # = 0.3^2 + 1^2,\n # and similarly for the second. As a result, Var[mixture] = 1.09.\n target = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(probs=[0.5, 0.5]),\n components_distribution=tfd.MultivariateNormalDiag(\n loc=[[-1., -1], [1., 1.]],\n scale_identity_multiplier=0.3))\n\n inverse_temperatures = 10.**tf.linspace(start=0., stop=-1., num=4)\n # We need to pad the step_size so it broadcasts against MCMC samples. In\n # this case we have 1 replica dim, 0 batch dims, and 1 event dim hence need\n # to right pad the step_size by one dim (for the event).\n step_size = 0.2 / tf.math.sqrt(inverse_temperatures[:, tf.newaxis])\n def make_kernel_fn(target_log_prob_fn):\n return tfp_transition_kernel(\n target_log_prob_fn=target_log_prob_fn,\n step_size=step_size,\n num_leapfrog_steps=5)\n\n remc = tfp.mcmc.ReplicaExchangeMC(\n target_log_prob_fn=target.log_prob,\n # Verified that test fails if inverse_temperatures = [1.]\n inverse_temperatures=inverse_temperatures,\n make_kernel_fn=make_kernel_fn)\n remc.one_step = tf.function(remc.one_step, autograph=False)\n\n def trace_fn(state, results): # pylint: disable=unused-argument\n return results.post_swap_replica_results.log_accept_ratio\n\n num_results = 2000\n states, replica_log_accept_ratio = tfp.mcmc.sample_chain(\n num_results=num_results,\n # Start at one of the modes, in order to make mode jumping necessary\n # if we want to pass test.\n current_state=tf.ones(2, dtype=dtype),\n kernel=remc,\n num_burnin_steps=50,\n trace_fn=trace_fn,\n seed=test_util.test_seed())\n self.assertAllEqual((num_results, 2), states.shape)\n replica_accept_ratio = tf.reduce_mean(\n tf.math.exp(tf.minimum(0., replica_log_accept_ratio)),\n axis=0)\n\n [\n sample_mean_,\n sample_variance_,\n replica_accept_ratio_,\n expected_mean_,\n expected_stddev_,\n expected_variance_,\n ess_,\n ] = self.evaluate([\n tf.reduce_mean(states, axis=0),\n tfp.stats.variance(states),\n replica_accept_ratio,\n target.mean(),\n target.stddev(),\n target.variance(),\n effective_sample_size(states),\n ])\n\n logging.vlog(\n 2, '---- execution:{} accept_ratio:{} mean:{}'.format(\n 'eager' if tf.executing_eagerly() else 'graph',\n replica_accept_ratio_, sample_mean_))\n\n mean_atol = 6 * expected_stddev_ / np.sqrt(np.min(ess_))\n var_atol = 6 * expected_variance_ / np.sqrt(np.min(ess_))\n for i in range(mean_atol.shape[0]):\n self.assertAllClose(\n expected_mean_[i],\n sample_mean_[i],\n atol=mean_atol[i],\n msg='position {}'.format(i))\n self.assertAllClose(\n expected_variance_[i],\n sample_variance_[i],\n atol=var_atol[i],\n msg=i)\n\n @test_util.numpy_disable_gradient_test('HMC')\n def testMultipleCorrelatedStatesWithNoBatchDims(self):\n dtype = np.float32\n num_results = 2000\n true_mean = dtype([0, 0])\n true_cov = dtype([[1, 0.5], [0.5, 1]])\n # Use LinearOperatorLowerTriangular to get broadcasting ability.\n linop = tf.linalg.LinearOperatorLowerTriangular(\n tf.linalg.cholesky(true_cov))\n\n # Its ok to decorate this since we only need to stress the TransitionKernel.\n def target_log_prob(x, y):\n # Corresponds to unnormalized MVN.\n # z = matmul(inv(chol(true_cov)), [x, y] - true_mean)\n xy = tf.stack([x, y], axis=-1) - true_mean\n z = linop.solvevec(xy)\n return -0.5 * tf.reduce_sum(z**2., axis=-1)\n\n inverse_temperatures = tf.constant([1., 0.75, 0.5])\n # We need to pad the step_size so it broadcasts against MCMC samples. In\n # this case we have 1 replica dim, 0 batch dims, and 0 event dims (per each\n # of 2 state parts) hence no padding is needed.\n # We do however supply a step size for each state part.\n step_sizes = [0.9 / tf.math.sqrt(inverse_temperatures)]*2\n\n def make_kernel_fn(target_log_prob_fn):\n return tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n step_size=step_sizes,\n num_leapfrog_steps=3)\n\n remc = tfp.mcmc.ReplicaExchangeMC(\n target_log_prob_fn=target_log_prob,\n inverse_temperatures=inverse_temperatures,\n make_kernel_fn=make_kernel_fn)\n remc.one_step = tf.function(remc.one_step, autograph=False)\n\n def trace_fn(state, results): # pylint: disable=unused-argument\n return results.post_swap_replica_results.log_accept_ratio\n\n [samples_x, samples_y], replica_log_accept_ratio = tfp.mcmc.sample_chain(\n num_results=num_results,\n num_burnin_steps=200,\n current_state=[1., 1.],\n kernel=remc,\n trace_fn=trace_fn,\n seed=test_util.test_seed())\n samples = tf.stack([samples_x, samples_y], axis=-1)\n sample_mean = tf.reduce_mean(samples, axis=0)\n sample_cov = tfp.stats.covariance(samples, sample_axis=0)\n\n replica_accept_ratio = tf.reduce_mean(\n tf.math.exp(tf.minimum(0., replica_log_accept_ratio)),\n axis=0)\n\n [\n sample_mean_,\n sample_cov_,\n replica_accept_ratio_,\n ess_,\n ] = self.evaluate([\n sample_mean,\n sample_cov,\n replica_accept_ratio,\n effective_sample_size(samples),\n ])\n logging.vlog(\n 2, '---- execution:{} accept_ratio:{} mean:{} cov:{}'.format(\n 'eager' if tf.executing_eagerly() else 'graph',\n replica_accept_ratio_, sample_mean_, sample_cov_))\n\n self.assertAllEqual([num_results], samples_x.shape)\n self.assertAllEqual([num_results], samples_y.shape)\n\n max_scale = np.sqrt(np.max(true_cov))\n\n self.assertAllClose(\n true_mean, sample_mean_, atol=6 * max_scale / np.sqrt(np.min(ess_)))\n self.assertAllClose(\n true_cov, sample_cov_, atol=6 * max_scale**2 / np.sqrt(np.min(ess_)))\n\n @parameterized.named_parameters([\n dict( # pylint: disable=g-complex-comprehension\n testcase_name=testcase_name + kernel_name,\n tfp_transition_kernel=tfp_transition_kernel,\n inverse_temperatures=inverse_temperatures,\n step_size_fn=step_size_fn,\n ess_scaling=ess_scaling)\n for kernel_name, tfp_transition_kernel, ess_scaling in [\n ('HMC', tfp.mcmc.HamiltonianMonteCarlo, .1), # NUMPY_DISABLE\n ('RWMH', init_tfp_randomwalkmetropolis, .009),\n ]\n for testcase_name, inverse_temperatures, step_size_fn in [\n ('1DTemperatureScalarStep',\n np.float32([1.0, 0.5, 0.25]),\n lambda x: 0.5),\n ('1DTemperature1DStep',\n np.float32([1.0, 0.5, 0.25]),\n lambda x: 0.5 / np.sqrt(x).reshape(3, 1, 1)),\n ('1DTemperature2DStep',\n np.float32([1.0, 0.5, 0.25]),\n lambda x: np.stack( # pylint: disable=g-long-lambda\n [0.5 / np.sqrt(x), 0.5 / np.sqrt(x)],\n axis=-1).reshape(3, 2, 1)),\n ('2DTemperature1DStep',\n np.float32(np.stack([[1.0, 0.5, 0.25], [1.0, 0.25, 0.05]], axis=-1)),\n lambda x: 0.5 / np.sqrt( # pylint: disable=g-long-lambda\n x.mean(axis=-1).reshape(3, 1, 1))),\n ('2DTemperature2DStep',\n np.float32(np.stack([[1.0, 0.5, 0.25], [1.0, 0.25, 0.05]], axis=-1)),\n lambda x: 0.5 / np.sqrt(x).reshape(3, 2, 1))\n ]\n ])\n def test1EventDim2BatchDim3Replica(self,\n tfp_transition_kernel,\n inverse_temperatures,\n step_size_fn,\n ess_scaling):\n \"\"\"Sampling from two batch diagonal multivariate normal.\"\"\"\n step_size = (step_size_fn(inverse_temperatures) +\n np.exp(np.pi) / 100).astype(np.float32) # Prevent resonances.\n\n # Small scale and well-separated modes mean we need replica swap to\n # work or else tests fail.\n loc = np.array(\n [\n # Use 3-D normals, ensuring batch and event sizes don't broadcast.\n [-1., -0.5, 0.], # loc of first batch\n [1., 0.5, 0.], # loc of second batch\n ],\n dtype=np.float32)\n scale_identity_multiplier = [0.5, 0.8]\n target = tfd.MultivariateNormalDiag(\n loc=loc, scale_identity_multiplier=scale_identity_multiplier)\n\n def make_kernel_fn(target_log_prob_fn):\n return tfp_transition_kernel(\n target_log_prob_fn=target_log_prob_fn,\n step_size=step_size,\n num_leapfrog_steps=3)\n\n remc = tfp.mcmc.ReplicaExchangeMC(\n target_log_prob_fn=tf.function(target.log_prob, autograph=False),\n inverse_temperatures=inverse_temperatures,\n make_kernel_fn=make_kernel_fn)\n remc.one_step = tf.function(remc.one_step, autograph=False)\n\n def trace_fn(state, results): # pylint: disable=unused-argument\n return [\n results.post_swap_replica_results.log_accept_ratio,\n results.post_swap_replica_states\n ]\n\n num_results = 2000\n states, (log_accept_ratio, replica_states) = tfp.mcmc.sample_chain(\n num_results=num_results,\n current_state=loc[::-1], # Batch members far from their mode!\n kernel=remc,\n num_burnin_steps=100,\n trace_fn=trace_fn,\n seed=test_util.test_seed())\n\n num_replica = inverse_temperatures.shape[0]\n\n self.assertLen(replica_states, 1) # One state part\n replica_states = replica_states[0]\n\n self.assertAllEqual((num_results, num_replica) + loc.shape,\n replica_states.shape)\n self.assertAllEqual((num_results,) + loc.shape, states.shape)\n\n (\n states_,\n replica_states_,\n replica_mean_,\n replica_cov_,\n accept_probs_,\n ess_,\n ) = self.evaluate([\n states,\n replica_states,\n tf.reduce_mean(replica_states, axis=0),\n tfp.stats.covariance(replica_states),\n tf.math.exp(tf.minimum(0., log_accept_ratio)),\n effective_sample_size(replica_states),\n ])\n\n logging.vlog(\n 2, '---- execution:{} Min[ESS]: {} mean_accept: {}'.format(\n 'eager' if tf.executing_eagerly() else 'graph',\n np.min(ess_), np.mean(accept_probs_, axis=0)))\n\n self.assertAllEqual(states_, replica_states_[:, 0])\n\n def _check_stats(replica_idx, batch_idx, ess_scaling):\n err_msg = 'Failure in replica {}, batch {}'.format(replica_idx, batch_idx)\n assert inverse_temperatures.ndim in [1, 2]\n if inverse_temperatures.ndim == 1:\n temperature = 1 / inverse_temperatures[replica_idx]\n elif inverse_temperatures.ndim == 2:\n temperature = 1 / inverse_temperatures[replica_idx, batch_idx]\n\n expected_scale = (\n scale_identity_multiplier[batch_idx] * np.sqrt(temperature))\n\n ess = np.min(ess_[replica_idx, batch_idx]) # Conservative estimate.\n self.assertGreater(ess, num_results * ess_scaling, msg='Bad sampling!')\n\n self.assertAllClose(\n replica_mean_[replica_idx, batch_idx],\n loc[batch_idx],\n # 6 standard errors of a mean estimate.\n atol=6 * expected_scale / np.sqrt(ess),\n msg=err_msg)\n self.assertAllClose(\n expected_scale**2 * np.eye(loc.shape[1]),\n replica_cov_[replica_idx, batch_idx],\n # 12 standard errors of a variance estimate.\n atol=12 * np.sqrt(2) * expected_scale**2 / np.sqrt(ess),\n msg=err_msg)\n\n for replica_idx in range(num_replica):\n for batch_idx in range(loc.shape[0]):\n _check_stats(replica_idx, batch_idx, ess_scaling)\n\n @parameterized.named_parameters([dict(testcase_name='_slow_asserts',\n asserts=True),\n dict(testcase_name='_fast_execute_only',\n asserts=False)])\n @test_util.numpy_disable_gradient_test('HMC')\n def testMultipleCorrelatedStatesWithOneBatchDim(self, asserts):\n dtype = np.float32\n true_mean = dtype([0, 0])\n true_cov = dtype([[1, 0.5], [0.5, 1]])\n # Use LinearOperatorLowerTriangular to get broadcasting ability.\n linop = tf.linalg.LinearOperatorLowerTriangular(\n tf.linalg.cholesky(true_cov))\n\n def target_log_prob(x, y):\n # Corresponds to unnormalized MVN.\n # z = matmul(inv(chol(true_cov)), [x, y] - true_mean)\n xy = tf.stack([x, y], axis=-1) - true_mean\n z = linop.solvevec(xy)\n return -0.5 * tf.reduce_sum(z**2., axis=-1)\n\n def make_kernel_fn(target_log_prob_fn):\n return tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n step_size=[0.75, 0.75],\n num_leapfrog_steps=3)\n\n remc = tfp.mcmc.ReplicaExchangeMC(\n target_log_prob_fn=target_log_prob,\n inverse_temperatures=[1., 0.9, 0.8],\n make_kernel_fn=make_kernel_fn)\n\n num_results = 13\n if asserts:\n num_results = 2000\n remc.one_step = tf.function(remc.one_step, autograph=False)\n\n states = tfp.mcmc.sample_chain(\n num_results=num_results,\n # batch_shape = [4] for each initial state\n current_state=[tf.ones(4), tf.ones(4)],\n kernel=remc,\n num_burnin_steps=400,\n trace_fn=None,\n seed=test_util.test_seed())\n\n states = tf.stack(states, axis=-1)\n self.assertAllEqual((num_results, 4, 2), states.shape)\n\n states_, ess_, cov_ = self.evaluate([\n states,\n effective_sample_size(states),\n tfp.stats.covariance(states)\n ])\n\n if not asserts:\n return\n\n self.assertGreater(np.min(ess_), num_results / 10, 'Bad sampling found!')\n\n # 6 standard errors for mean/variance estimates.\n mean_atol = 6 / np.sqrt(np.min(ess_))\n cov_atol = 6 * np.sqrt(2) / np.sqrt(np.min(ess_))\n\n self.assertAllClose(\n true_mean, states_[:, 0, :].mean(axis=0), atol=mean_atol)\n self.assertAllClose(\n true_mean, states_[:, 1, :].mean(axis=0), atol=mean_atol)\n self.assertAllClose(true_cov, cov_[0], atol=cov_atol)\n self.assertAllClose(true_cov, cov_[1], atol=cov_atol)\n\n def testInversePermutationError(self):\n \"\"\"Using invalid `inverse_temperatures`.\"\"\"\n dtype = np.float32\n def bad_swap_fn(num_replica, batch_shape=(), seed=None): # pylint: disable=unused-argument\n return [1, 2, 0]\n remc = tfp.mcmc.ReplicaExchangeMC(\n target_log_prob_fn=tfd.Normal(loc=dtype(0), scale=dtype(1)).log_prob,\n inverse_temperatures=dtype([1., 0.5, 0.25]),\n make_kernel_fn=lambda tlp: tfp.mcmc.RandomWalkMetropolis( # pylint: disable=g-long-lambda\n target_log_prob_fn=tlp,\n new_state_fn=tfp.mcmc.random_walk_normal_fn(scale=1.)),\n # Fun fact: of the six length-3 permutations, only two are not\n # \"one-time swap\" permutations: [1, 2, 0], [2, 0, 1]\n swap_proposal_fn=bad_swap_fn,\n validate_args=True)\n with self.assertRaisesOpError('must be.*self-inverse permutation'):\n self.evaluate(tfp.mcmc.sample_chain(\n num_results=10,\n num_burnin_steps=2,\n current_state=[dtype(1)],\n kernel=remc,\n trace_fn=None,\n seed=test_util.test_seed()))\n\n def testKernelResultsHaveCorrectShapeWhenMultipleStatesAndBatchDims(self):\n def target_log_prob(x, y):\n xy = tf.concat([x, y], axis=-1)\n return -0.5 * tf.reduce_sum(xy**2, axis=-1)\n\n def make_kernel_fn(target_log_prob_fn):\n return tfp.mcmc.RandomWalkMetropolis(\n target_log_prob_fn=target_log_prob_fn,\n new_state_fn=tfp.mcmc.random_walk_normal_fn(scale=[0.3, 0.1]))\n\n inverse_temperatures = [1., 0.5, 0.25, 0.1]\n remc = tfp.mcmc.ReplicaExchangeMC(\n target_log_prob_fn=target_log_prob,\n inverse_temperatures=inverse_temperatures,\n make_kernel_fn=make_kernel_fn)\n\n num_results = 6\n n_batch = 5\n n_events = 3\n n_states = 2 # Set by target_log_prob.\n num_replica = len(inverse_temperatures)\n\n samples, kernel_results = tfp.mcmc.sample_chain(\n num_results=num_results,\n current_state=[tf.zeros((n_batch, n_events))] * n_states,\n kernel=remc,\n num_burnin_steps=2,\n trace_fn=lambda _, results: results,\n seed=test_util.test_seed())\n\n self.assertLen(samples, n_states)\n self.assertAllEqual((num_results, n_batch, n_events), samples[0].shape)\n self.assertAllEqual((num_results, n_batch, n_events), samples[1].shape)\n\n kr_ = self.evaluate(kernel_results)\n\n # Boring checks of existence/shape.\n self.assertEqual(\n (num_results, num_replica, n_batch, n_states, n_events),\n tf.stack(kr_.post_swap_replica_states, axis=-2).shape)\n\n self.assertEqual(\n (num_results, num_replica, n_batch),\n kr_.pre_swap_replica_results.log_accept_ratio.shape)\n\n self.assertEqual(\n (num_results, num_replica, n_batch),\n kr_.post_swap_replica_results.log_accept_ratio.shape)\n\n self.assertEqual(\n (num_results, num_replica, num_replica, n_batch),\n kr_.is_swap_proposed.shape)\n self.assertEqual(\n (num_results, num_replica, num_replica, n_batch),\n kr_.is_swap_accepted.shape)\n\n self.assertEqual(\n (num_results, num_replica - 1, n_batch),\n kr_.is_swap_proposed_adjacent.shape)\n self.assertEqual(\n (num_results, num_replica - 1, n_batch),\n kr_.is_swap_accepted_adjacent.shape)\n\n self.assertEqual(\n (num_results, num_replica),\n tf.stack(kr_.inverse_temperatures, axis=1).shape)\n\n self.assertEqual(\n (num_results, num_replica, n_batch),\n kr_.swaps.shape)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Local Linear Trend State Space Model Tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import test_util\nfrom tensorflow_probability.python.sts import LocalLinearTrendStateSpaceModel\n\n\ntfl = tf.linalg\n\n\nclass _LocalLinearTrendStateSpaceModelTest(object):\n\n def test_logprob(self):\n\n y = self._build_placeholder([1.0, 2.5, 4.3, 6.1, 7.8])\n\n ssm = LocalLinearTrendStateSpaceModel(\n num_timesteps=5,\n level_scale=0.5,\n slope_scale=0.5,\n initial_state_prior=tfd.MultivariateNormalDiag(\n scale_diag=self._build_placeholder([1., 1.])))\n\n lp = ssm.log_prob(y[..., np.newaxis])\n expected_lp = -5.801624298095703\n self.assertAllClose(self.evaluate(lp), expected_lp)\n\n def test_stats(self):\n\n # Build a model with expected initial loc 0 and slope 1.\n level_scale = self._build_placeholder(1.0)\n slope_scale = self._build_placeholder(1.0)\n initial_state_prior = tfd.MultivariateNormalDiag(\n loc=self._build_placeholder([0, 1.]),\n scale_diag=self._build_placeholder([1., 1.]))\n\n ssm = LocalLinearTrendStateSpaceModel(\n num_timesteps=10,\n level_scale=level_scale,\n slope_scale=slope_scale,\n initial_state_prior=initial_state_prior)\n\n # In expectation, the process grows linearly.\n mean = self.evaluate(ssm.mean())\n self.assertAllClose(mean, np.arange(0, 10)[:, np.newaxis])\n\n # slope variance at time T is linear: T * slope_scale\n expected_variance = [1, 3, 8, 18, 35, 61, 98, 148, 213, 295]\n variance = self.evaluate(ssm.variance())\n self.assertAllClose(variance, np.array(expected_variance)[:, np.newaxis])\n\n def test_batch_shape(self):\n batch_shape = [4, 2]\n partial_batch_shape = [2]\n\n level_scale = self._build_placeholder(\n np.exp(np.random.randn(*partial_batch_shape)))\n slope_scale = self._build_placeholder(np.exp(np.random.randn(*batch_shape)))\n initial_state_prior = tfd.MultivariateNormalDiag(\n scale_diag=self._build_placeholder([1., 1.]))\n\n ssm = LocalLinearTrendStateSpaceModel(\n num_timesteps=10,\n level_scale=level_scale,\n slope_scale=slope_scale,\n initial_state_prior=initial_state_prior)\n self.assertAllEqual(self.evaluate(ssm.batch_shape_tensor()), batch_shape)\n\n y = ssm.sample()\n self.assertAllEqual(self.evaluate(tf.shape(y))[:-2], batch_shape)\n\n def _build_placeholder(self, ndarray):\n \"\"\"Convert a numpy array to a TF placeholder.\n\n Args:\n ndarray: any object convertible to a numpy array via `np.asarray()`.\n\n Returns:\n placeholder: a TensorFlow `placeholder` with default value given by the\n provided `ndarray`, dtype given by `self.dtype`, and shape specified\n statically only if `self.use_static_shape` is `True`.\n \"\"\"\n\n ndarray = np.asarray(ndarray).astype(self.dtype)\n return tf1.placeholder_with_default(\n ndarray, shape=ndarray.shape if self.use_static_shape else None)\n\n\n@test_util.test_all_tf_execution_regimes\nclass LocalLinearTrendStateSpaceModelTestStaticShape32(\n test_util.TestCase, _LocalLinearTrendStateSpaceModelTest):\n dtype = np.float32\n use_static_shape = True\n\n\n@test_util.test_all_tf_execution_regimes\nclass LocalLinearTrendStateSpaceModelTestDynamicShape32(\n test_util.TestCase, _LocalLinearTrendStateSpaceModelTest):\n dtype = np.float32\n use_static_shape = False\n\n\n@test_util.test_all_tf_execution_regimes\nclass LocalLinearTrendStateSpaceModelTestStaticShape64(\n test_util.TestCase, _LocalLinearTrendStateSpaceModelTest):\n dtype = np.float64\n use_static_shape = True\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for random variable.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability import edward2 as ed\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import test_util\n\n\nclass FakeDistribution(tfd.Distribution):\n \"\"\"Fake distribution class for testing.\"\"\"\n\n def __init__(self):\n super(FakeDistribution, self).__init__(\n dtype=None,\n reparameterization_type=tfd.FULLY_REPARAMETERIZED,\n validate_args=False,\n allow_nan_stats=True)\n\n\n@test_util.test_all_tf_execution_regimes\nclass RandomVariableTest(test_util.TestCase):\n\n def testConstructor(self):\n x = ed.RandomVariable(tfd.Poisson(rate=tf.ones([2, 5])),\n value=tf.ones([2, 5]))\n x_sample, x_value = self.evaluate([tf.convert_to_tensor(value=x), x.value])\n self.assertAllEqual(x_sample, x_value)\n with self.assertRaises(ValueError):\n _ = ed.RandomVariable(tfd.Bernoulli(probs=0.5),\n value=tf.zeros([2, 5], dtype=tf.int32))\n x = ed.RandomVariable(FakeDistribution())\n with self.assertRaises(NotImplementedError):\n _ = x.value\n\n def testGradientsFirstOrder(self):\n f = lambda x: 2. * x\n x = ed.RandomVariable(tfd.Normal(0., 1.))\n _, dydx = tfp.math.value_and_gradient(f, x)\n self.assertEqual(self.evaluate(dydx), 2.)\n\n def testGradientsSecondOrder(self):\n f = lambda x: 2. * x**2.\n df = lambda x: tfp.math.value_and_gradient(f, x)[1]\n x = ed.RandomVariable(tfd.Normal(0., 1.))\n _, d2ydx2 = tfp.math.value_and_gradient(df, x)\n self.assertEqual(self.evaluate(d2ydx2), 4.)\n\n def testStr(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0), value=1.234)\n if tf.executing_eagerly():\n pattern = \"RandomVariable(\\\"1.234\\\", shape=(), dtype=float32\"\n else:\n pattern = \"RandomVariable(\\\"Normal\\\", shape=(), dtype=float32\"\n regexp = re.escape(pattern)\n self.assertRegexpMatches(str(x), regexp)\n\n def testRepr(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0), value=1.234)\n if tf.executing_eagerly():\n string = (\"<ed.RandomVariable 'Normal' shape=() \"\n \"dtype=float32 numpy=1.234>\")\n else:\n string = \"<ed.RandomVariable 'Normal' shape=() dtype=float32>\"\n self.assertEqual(repr(x), string)\n\n def testNumpy(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0), value=1.23)\n if tf.executing_eagerly():\n self.assertEqual(x.numpy(), tf.constant(1.23).numpy())\n else:\n with self.assertRaises(NotImplementedError):\n _ = x.numpy()\n\n def testOperatorsAdd(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = x + y\n z_value = x.value + y\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsRadd(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = y + x\n z_value = y + x.value\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsSub(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = x - y\n z_value = x.value - y\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsRsub(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = y - x\n z_value = y - x.value\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsMul(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = x * y\n z_value = x.value * y\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsRmul(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = y * x\n z_value = y * x.value\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsDiv(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = x / y\n z_value = x.value / y\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsRdiv(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = y / x\n z_value = y / x.value\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsFloordiv(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = x // y\n z_value = x.value // y\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsRfloordiv(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = y // x\n z_value = y // x.value\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsMod(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = x % y\n z_value = x.value % y\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsRmod(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = y % x\n z_value = y % x.value\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsLt(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = x < y\n z_value = x.value < y\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsLe(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = x <= y\n z_value = x.value <= y\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsGt(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = x > y\n z_value = x.value > y\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsGe(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = x >= y\n z_value = x.value >= y\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsGetitem(self):\n x = ed.RandomVariable(tfd.Normal(tf.zeros([3, 4]), tf.ones([3, 4])))\n z = x[0:2, 2:3]\n z_value = x.value[0:2, 2:3]\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsPow(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = x ** y\n z_value = x.value ** y\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsRpow(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n z = y ** x\n z_value = y ** x.value\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsNeg(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n z = -x\n z_value = -x.value\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsAbs(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n z = abs(x)\n z_value = abs(x.value)\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testOperatorsHash(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n self.assertNotEqual(hash(x), hash(y))\n self.assertEqual(hash(x), id(x))\n\n def testOperatorsEq(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n self.assertEqual(x, x)\n\n def testOperatorsNe(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = 5.0\n self.assertNotEqual(x, y)\n\n def testOperatorsBoolNonzero(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n with self.assertRaises(TypeError):\n _ = not x\n\n def testArrayPriority(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 1.0))\n y = np.array(5.0, dtype=np.float32)\n z = y / x\n z_value = y / x.value\n z_eval, z_value_eval = self.evaluate([z, z_value])\n self.assertAllEqual(z_eval, z_value_eval)\n\n def testConvertToTensor(self):\n x = ed.RandomVariable(tfd.Normal(0.0, 0.1))\n with self.assertRaises(ValueError):\n _ = tf.convert_to_tensor(value=x, dtype=tf.int32)\n\n def testSessionEval(self):\n if tf.executing_eagerly(): return\n with self.cached_session() as sess:\n x = ed.RandomVariable(tfd.Normal(0.0, 0.1))\n x_ph = tf1.placeholder(tf.float32, [])\n y = ed.RandomVariable(tfd.Normal(x_ph, 0.1))\n self.assertLess(x.eval(), 5.0)\n self.assertLess(x.eval(sess), 5.0)\n self.assertLess(x.eval(feed_dict={x_ph: 100.0}), 5.0)\n self.assertGreater(y.eval(feed_dict={x_ph: 100.0}), 5.0)\n self.assertGreater(y.eval(sess, feed_dict={x_ph: 100.0}), 5.0)\n self.assertRaises(tf.errors.InvalidArgumentError, y.eval)\n self.assertRaises(tf.errors.InvalidArgumentError, y.eval, sess)\n\n def testSessionRun(self):\n if tf.executing_eagerly(): return\n with self.cached_session() as sess:\n x = ed.RandomVariable(tfd.Normal(0.0, 0.1))\n x_ph = tf1.placeholder(tf.float32, [])\n y = ed.RandomVariable(tfd.Normal(x_ph, 0.1))\n self.assertLess(sess.run(x), 5.0)\n self.assertLess(sess.run(x, feed_dict={x_ph: 100.0}), 5.0)\n self.assertGreater(sess.run(y, feed_dict={x_ph: 100.0}), 5.0)\n self.assertRaises(tf.errors.InvalidArgumentError, sess.run, y)\n\n # Note: we must defer creation of any tensors until after tf.test.main().\n # pylint: disable=g-long-lambda\n @parameterized.parameters(\n {\"rv\": lambda: ed.RandomVariable(tfd.Bernoulli(probs=0.5)),\n \"sample_shape\": [],\n \"batch_shape\": [],\n \"event_shape\": []},\n {\"rv\": lambda: ed.RandomVariable(tfd.Bernoulli(tf.zeros([2, 3]))),\n \"sample_shape\": [],\n \"batch_shape\": [2, 3],\n \"event_shape\": []},\n {\"rv\": lambda: ed.RandomVariable(tfd.Bernoulli(probs=0.5),\n sample_shape=2),\n \"sample_shape\": [2],\n \"batch_shape\": [],\n \"event_shape\": []},\n {\"rv\": lambda: ed.RandomVariable(tfd.Bernoulli(probs=0.5),\n sample_shape=[2, 1]),\n \"sample_shape\": [2, 1],\n \"batch_shape\": [],\n \"event_shape\": []},\n {\"rv\": lambda: ed.RandomVariable(tfd.Bernoulli(probs=0.5),\n sample_shape=tf.constant([2])),\n \"sample_shape\": [2],\n \"batch_shape\": [],\n \"event_shape\": []},\n {\"rv\": lambda: ed.RandomVariable(tfd.Bernoulli(probs=0.5),\n sample_shape=tf.constant([2, 4])),\n \"sample_shape\": [2, 4],\n \"batch_shape\": [],\n \"event_shape\": []},\n )\n # pylint: enable=g-long-lambda\n def testShape(self, rv, sample_shape, batch_shape, event_shape):\n rv = rv()\n self.assertEqual(rv.shape, sample_shape + batch_shape + event_shape)\n self.assertEqual(rv.shape, rv.shape)\n self.assertEqual(rv.sample_shape, sample_shape)\n self.assertEqual(rv.distribution.batch_shape, batch_shape)\n self.assertEqual(rv.distribution.event_shape, event_shape)\n\n def testRandomTensorSample(self):\n num_samples = tf.cast(tfd.Poisson(rate=5.).sample(), tf.int32)\n _ = ed.RandomVariable(tfd.Normal(loc=0.0, scale=1.0),\n sample_shape=num_samples)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Slicing utility for tfd.Distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport warnings\n\nimport six\nimport tensorflow.compat.v2 as tf\n\n__all__ = ['batch_slice']\n\n\n# We track the provenance of a sliced or copied distribution all the way back to\n# the arguments initially provided to the first tfd.Distribution constructor.\n# This allows us to ensure that sub-sliced and copied distributions retain the\n# gradient back to any source variables provided up-front. e.g. we want the\n# following to work:\n# v = tf.compat.v2.Variable(tf.random.uniform([]))\n# dist = tfd.Normal(v, 1)\n# with tf.GradientTape() as tape:\n# lp = dist[...].log_prob(0.)\n# dlpdv = tape.gradient(lp, v)\n# dlpdv should not be None.\nPROVENANCE_ATTR = '_tfp_batch_slice_provenance'\n\nALL_SLICE = slice(None)\n\n\ndef _slice_single_param(param, param_event_ndims, slices, dist_batch_shape):\n \"\"\"Slices a single parameter of a distribution.\n\n Args:\n param: A `Tensor`, the original parameter to slice.\n param_event_ndims: `int` event parameterization rank for this parameter.\n slices: A `tuple` of normalized slices.\n dist_batch_shape: The distribution's batch shape `Tensor`.\n\n Returns:\n new_param: A `Tensor`, batch-sliced according to slices.\n \"\"\"\n # Extend param shape with ones on the left to match dist_batch_shape.\n param_shape = tf.shape(param)\n insert_ones = tf.ones(\n [tf.size(dist_batch_shape) + param_event_ndims - tf.rank(param)],\n dtype=param_shape.dtype)\n new_param_shape = tf.concat([insert_ones, param_shape], axis=0)\n full_batch_param = tf.reshape(param, new_param_shape)\n param_slices = []\n # We separately track the batch axis from the parameter axis because we want\n # them to align for positive indexing, and be offset by param_event_ndims for\n # negative indexing.\n param_dim_idx = 0\n batch_dim_idx = 0\n for slc in slices:\n if slc is tf.newaxis:\n param_slices.append(slc)\n continue\n if slc is Ellipsis:\n if batch_dim_idx < 0:\n raise ValueError('Found multiple `...` in slices {}'.format(slices))\n param_slices.append(slc)\n # Switch over to negative indexing for the broadcast check.\n num_remaining_non_newaxis_slices = sum(\n [s is not tf.newaxis for s in slices[slices.index(Ellipsis) + 1:]])\n batch_dim_idx = -num_remaining_non_newaxis_slices\n param_dim_idx = batch_dim_idx - param_event_ndims\n continue\n # Find the batch dimension sizes for both parameter and distribution.\n param_dim_size = new_param_shape[param_dim_idx]\n batch_dim_size = dist_batch_shape[batch_dim_idx]\n is_broadcast = batch_dim_size > param_dim_size\n # Slices are denoted by start:stop:step.\n if isinstance(slc, slice):\n start, stop, step = slc.start, slc.stop, slc.step\n if start is not None:\n start = tf.where(is_broadcast, 0, start)\n if stop is not None:\n stop = tf.where(is_broadcast, 1, stop)\n if step is not None:\n step = tf.where(is_broadcast, 1, step)\n param_slices.append(slice(start, stop, step))\n else: # int, or int Tensor, e.g. d[d.batch_shape_tensor()[0] // 2]\n param_slices.append(tf.where(is_broadcast, 0, slc))\n param_dim_idx += 1\n batch_dim_idx += 1\n param_slices.extend([ALL_SLICE] * param_event_ndims)\n return full_batch_param.__getitem__(tuple(param_slices))\n\n\ndef _slice_params_to_dict(dist, params_event_ndims, slices):\n \"\"\"Computes the override dictionary of sliced parameters.\n\n Args:\n dist: The tfd.Distribution being batch-sliced.\n params_event_ndims: Per-event parameter ranks, a `str->int` `dict`.\n slices: Slices as received by __getitem__.\n\n Returns:\n overrides: `str->Tensor` `dict` of batch-sliced parameter overrides.\n \"\"\"\n override_dict = {}\n for param_name, param_event_ndims in six.iteritems(params_event_ndims):\n # Verify that either None or a legit value is in the parameters dict.\n if param_name not in dist.parameters:\n raise ValueError('Distribution {} is missing advertised '\n 'parameter {}'.format(dist, param_name))\n param = dist.parameters[param_name]\n if param is None:\n # some distributions have multiple possible parameterizations; this\n # param was not provided\n continue\n dtype = None\n if hasattr(dist, param_name):\n attr = getattr(dist, param_name)\n dtype = getattr(attr, 'dtype', None)\n if dtype is None:\n dtype = dist.dtype\n warnings.warn('Unable to find property getter for parameter Tensor {} '\n 'on {}, falling back to Distribution.dtype {}'.format(\n param_name, dist, dtype))\n param = tf.convert_to_tensor(value=param, dtype=dtype)\n override_dict[param_name] = _slice_single_param(param, param_event_ndims,\n slices,\n dist.batch_shape_tensor())\n return override_dict\n\n\ndef _apply_single_step(dist, params_event_ndims, slices, params_overrides):\n \"\"\"Applies a single slicing step to `dist`, returning a new instance.\"\"\"\n if len(slices) == 1 and slices[0] is Ellipsis:\n # The path used by Distribution.copy: batch_slice(...args..., Ellipsis)\n override_dict = {}\n else:\n override_dict = _slice_params_to_dict(dist, params_event_ndims, slices)\n override_dict.update(params_overrides)\n parameters = dict(dist.parameters, **override_dict)\n new_dist = type(dist)(**parameters)\n return new_dist\n\n\ndef _apply_slice_sequence(dist, params_event_ndims, slice_overrides_seq):\n \"\"\"Applies a sequence of slice or copy-with-overrides operations to `dist`.\"\"\"\n for slices, overrides in slice_overrides_seq:\n dist = _apply_single_step(dist, params_event_ndims, slices, overrides)\n return dist\n\n\ndef batch_slice(dist, params_event_ndims, params_overrides, slices):\n \"\"\"Slices `dist` along its batch dimensions. Helper for tfd.Distribution.\n\n Args:\n dist: A `tfd.Distribution` instance.\n params_event_ndims: A `dict` of `str->int` indicating the number of\n dimensions of a given parameter required to parameterize a single event.\n params_overrides: A `dict` of parameter overrides. (e.g. from\n `Distribution.copy`).\n slices: A `slice` or `int` or `int` `Tensor` or `tf.newaxis` or `tuple`\n thereof. (e.g. the argument of a `__getitem__` method).\n\n Returns:\n new_dist: A batch-sliced `tfd.Distribution`.\n \"\"\"\n if not isinstance(slices, collections.Sequence):\n slices = (slices,)\n # We track the history of slice and copy(**param_overrides) in order to trace\n # back to the original distribution's source variables.\n orig_dist, slice_overrides_seq = getattr(dist, PROVENANCE_ATTR, (dist, []))\n slice_overrides_seq += [(slices, params_overrides)]\n # Re-doing the full sequence of slice+copy override work here enables\n # gradients all the way back to the original distribution's arguments.\n dist = _apply_slice_sequence(orig_dist, params_event_ndims,\n slice_overrides_seq)\n setattr(dist, PROVENANCE_ATTR, (orig_dist, slice_overrides_seq))\n return dist\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom scipy import stats\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import test_util\n\n\ndef _scipy_invgauss(loc, concentration):\n # Wrapper of scipy's invgauss function, which is used to generate expected\n # output.\n # scipy uses a different parameterization.\n # See https://github.com/scipy/scipy/issues/4654.\n return stats.invgauss(mu=loc/concentration, scale=concentration)\n\n\n@test_util.test_all_tf_execution_regimes\nclass _InverseGaussianTest(object):\n\n def make_tensor(self, x):\n x = tf.cast(x, self.dtype)\n return tf1.placeholder_with_default(\n x, shape=x.shape if self.use_static_shape else None)\n\n def testInverseGaussianShape(self):\n loc = self.make_tensor([2.] * 5)\n concentration = self.make_tensor([2.] * 5)\n inverse_gaussian = tfd.InverseGaussian(\n loc, concentration, validate_args=True)\n\n self.assertEqual(self.evaluate(inverse_gaussian.batch_shape_tensor()), (5,))\n if self.use_static_shape:\n self.assertEqual(inverse_gaussian.batch_shape, tf.TensorShape([5]))\n self.assertAllEqual(self.evaluate(inverse_gaussian.event_shape_tensor()),\n [])\n self.assertEqual(inverse_gaussian.event_shape, tf.TensorShape([]))\n\n def testInverseGaussianShapeBroadcast(self):\n loc = self.make_tensor([[4.], [5.], [6.]])\n concentration = self.make_tensor([[3., 2.]])\n inverse_gaussian = tfd.InverseGaussian(\n loc, concentration, validate_args=True)\n\n self.assertAllEqual(self.evaluate(inverse_gaussian.batch_shape_tensor()),\n (3, 2))\n if self.use_static_shape:\n self.assertAllEqual(inverse_gaussian.batch_shape, tf.TensorShape([3, 2]))\n self.assertAllEqual(self.evaluate(inverse_gaussian.event_shape_tensor()),\n [])\n self.assertEqual(inverse_gaussian.event_shape, tf.TensorShape([]))\n\n def testInvalidLoc(self):\n invalid_locs = [-.01, 0., -2.]\n concentration_v = 1.\n\n for loc_v in invalid_locs:\n with self.assertRaisesOpError('`loc` must be positive'):\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor(loc_v),\n self.make_tensor(concentration_v),\n validate_args=True)\n self.evaluate(inverse_gaussian.mean())\n\n def testInvalidConcentration(self):\n loc_v = 3.\n invalid_concentrations = [-.01, 0., -2.]\n\n for concentration_v in invalid_concentrations:\n with self.assertRaisesOpError('`concentration` must be positive'):\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor(loc_v),\n self.make_tensor(concentration_v),\n validate_args=True)\n self.evaluate(inverse_gaussian.mean())\n\n def testInverseGaussianLogPdf(self):\n batch_size = 6\n loc_v = 2.\n concentration_v = 3.\n x_v = [3., 3.1, 4., 5., 6., 7.]\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor([loc_v] * batch_size),\n self.make_tensor([concentration_v] * batch_size),\n validate_args=True)\n\n log_prob = inverse_gaussian.log_prob(self.make_tensor(x_v))\n if self.use_static_shape:\n self.assertEqual(log_prob.shape, (6,))\n self.assertAllClose(\n self.evaluate(log_prob),\n _scipy_invgauss(loc_v, concentration_v).logpdf(x_v))\n\n pdf = inverse_gaussian.prob(self.make_tensor(x_v))\n if self.use_static_shape:\n self.assertEqual(pdf.shape, (6,))\n self.assertAllClose(\n self.evaluate(pdf),\n _scipy_invgauss(loc_v, concentration_v).pdf(x_v))\n\n def testInverseGaussianLogPdfValidateArgs(self):\n batch_size = 2\n loc = self.make_tensor([2.] * batch_size)\n concentration = self.make_tensor([2., 3.])\n x = self.make_tensor([-1., 2.])\n inverse_gaussian = tfd.InverseGaussian(loc, concentration,\n validate_args=True)\n\n with self.assertRaisesOpError('must be non-negative.'):\n self.evaluate(inverse_gaussian.log_prob(x))\n\n def testInverseGaussianPdfValidateArgs(self):\n batch_size = 2\n loc = self.make_tensor([2.] * batch_size)\n concentration = self.make_tensor([2., 3.])\n x = self.make_tensor([-1., 2.])\n inverse_gaussian = tfd.InverseGaussian(loc, concentration,\n validate_args=True)\n\n with self.assertRaisesOpError('must be non-negative.'):\n self.evaluate(inverse_gaussian.prob(x))\n\n def testInverseGaussianLogPdfMultidimensional(self):\n batch_size = 6\n loc_v = 1.\n concentration_v = [2., 4., 5.]\n x_v = np.array([[6., 7., 9.2, 5., 6., 7.]]).T\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor([[loc_v]] * batch_size),\n self.make_tensor([concentration_v] * batch_size),\n validate_args=True)\n\n log_prob = inverse_gaussian.log_prob(self.make_tensor(x_v))\n if self.use_static_shape:\n self.assertEqual(log_prob.shape, (6, 3))\n self.assertAllClose(\n self.evaluate(log_prob),\n _scipy_invgauss(loc_v, np.array(concentration_v)).logpdf(x_v))\n\n prob = inverse_gaussian.prob(self.make_tensor(x_v))\n if self.use_static_shape:\n self.assertEqual(prob.shape, (6, 3))\n self.assertAllClose(\n self.evaluate(prob),\n _scipy_invgauss(loc_v, np.array(concentration_v)).pdf(x_v))\n\n def testInverseGaussianLogCdf(self):\n batch_size = 6\n loc_v = 2.\n concentration_v = 3.\n x_v = [3., 3.1, 4., 5., 6., 7.]\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor([loc_v] * batch_size),\n self.make_tensor([concentration_v] * batch_size),\n validate_args=True)\n\n log_cdf = inverse_gaussian.log_cdf(self.make_tensor(x_v))\n if self.use_static_shape:\n self.assertEqual(log_cdf.shape, (6,))\n self.assertAllClose(\n self.evaluate(log_cdf),\n _scipy_invgauss(loc_v, concentration_v).logcdf(x_v))\n\n cdf = inverse_gaussian.cdf(self.make_tensor(x_v))\n if self.use_static_shape:\n self.assertEqual(cdf.shape, (6,))\n self.assertAllClose(\n self.evaluate(cdf),\n _scipy_invgauss(loc_v, concentration_v).cdf(x_v))\n\n # TODO(b/144948687) Avoid `nan` at boundary. Ideally we'd do this test:\n # def testInverseGaussianPdfAtBoundary(self):\n # dist = tfd.InverseGaussian(loc=1., concentration=[2., 4., 5.],\n # validate_args=True)\n # pdf = self.evaluate(dist.prob(0.))\n # log_pdf = self.evaluate(dist.log_prob(0.))\n # self.assertAllEqual(pdf, np.zeros_like(pdf))\n # self.assertTrue(np.isinf(log_pdf).all())\n\n def testInverseGaussianLogCdfValidateArgs(self):\n batch_size = 2\n loc = self.make_tensor([2.] * batch_size)\n concentration = self.make_tensor([2., 3.])\n x = self.make_tensor([-1., 2.])\n inverse_gaussian = tfd.InverseGaussian(loc, concentration,\n validate_args=True)\n\n with self.assertRaisesOpError('must be non-negative.'):\n self.evaluate(inverse_gaussian.log_cdf(x))\n\n def testInverseGaussianCdfValidateArgs(self):\n batch_size = 2\n loc = self.make_tensor([2.] * batch_size)\n concentration = self.make_tensor([2., 3.])\n x = self.make_tensor([-1., 2.])\n inverse_gaussian = tfd.InverseGaussian(loc, concentration,\n validate_args=True)\n\n with self.assertRaisesOpError('must be non-negative.'):\n self.evaluate(inverse_gaussian.cdf(x))\n\n def testInverseGaussianLogCdfMultidimensional(self):\n batch_size = 6\n loc_v = 1.\n concentration_v = [2., 4., 5.]\n x_v = np.array([[6., 7., 9.2, 5., 6., 7.]]).T\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor([[loc_v]] * batch_size),\n self.make_tensor([concentration_v] * batch_size),\n validate_args=True)\n\n log_cdf = inverse_gaussian.log_cdf(self.make_tensor(x_v))\n if self.use_static_shape:\n self.assertEqual(log_cdf.shape, (6, 3))\n self.assertAllClose(\n self.evaluate(log_cdf),\n _scipy_invgauss(loc_v, np.array(concentration_v)).logcdf(x_v))\n\n cdf = inverse_gaussian.cdf(self.make_tensor(x_v))\n if self.use_static_shape:\n self.assertEqual(cdf.shape, (6, 3))\n self.assertAllClose(\n self.evaluate(cdf),\n _scipy_invgauss(loc_v, np.array(concentration_v)).cdf(x_v))\n\n def testInverseGaussianMean(self):\n loc_v = [2., 3., 2.5]\n concentration_v = [1.4, 2., 2.5]\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor(loc_v),\n self.make_tensor(concentration_v),\n validate_args=True)\n if self.use_static_shape:\n self.assertEqual(inverse_gaussian.mean().shape, (3,))\n self.assertAllClose(\n self.evaluate(inverse_gaussian.mean()),\n _scipy_invgauss(np.array(loc_v), np.array(concentration_v)).mean())\n\n def testInverseGaussianMeanBroadCast(self):\n loc_v = 2.\n concentration_v = [1.4, 2., 2.5]\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor(loc_v),\n self.make_tensor(concentration_v),\n validate_args=True)\n if self.use_static_shape:\n self.assertEqual(inverse_gaussian.mean().shape, (3,))\n self.assertAllClose(\n self.evaluate(inverse_gaussian.mean()),\n _scipy_invgauss(np.array(loc_v), np.array(concentration_v)).mean())\n\n def testInverseGaussianVariance(self):\n loc_v = [2., 3., 2.5]\n concentration_v = [1.4, 2., 2.5]\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor(loc_v),\n self.make_tensor(concentration_v),\n validate_args=True)\n\n if self.use_static_shape:\n self.assertEqual(inverse_gaussian.variance().shape, (3,))\n self.assertAllClose(\n self.evaluate(inverse_gaussian.variance()),\n _scipy_invgauss(np.array(loc_v), np.array(concentration_v)).var())\n\n def testInverseGaussianVarianceBroadcast(self):\n loc_v = 2.\n concentration_v = [1.4, 2., 2.5]\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor(loc_v),\n self.make_tensor(concentration_v),\n validate_args=True)\n\n if self.use_static_shape:\n self.assertEqual(inverse_gaussian.variance().shape, (3,))\n self.assertAllClose(\n self.evaluate(inverse_gaussian.variance()),\n _scipy_invgauss(np.array(loc_v), np.array(concentration_v)).var())\n\n def testInverseGaussianSampleMean(self):\n loc_v = 3.\n concentration_v = 4.\n n = int(1e6)\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor(loc_v),\n self.make_tensor(concentration_v),\n validate_args=True)\n samples = inverse_gaussian.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n\n if self.use_static_shape:\n self.assertEqual(samples.shape, (n,))\n self.assertEqual(sample_values.shape, (n,))\n self.assertAllClose(\n sample_values.mean(),\n _scipy_invgauss(loc_v, concentration_v).mean(),\n rtol=.02,\n atol=0)\n\n def testInverseGaussianSampleVariance(self):\n loc_v = 3.\n concentration_v = 4.\n n = int(1e6)\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor(loc_v),\n self.make_tensor(concentration_v),\n validate_args=True)\n samples = inverse_gaussian.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n\n if self.use_static_shape:\n self.assertEqual(samples.shape, (n,))\n self.assertEqual(sample_values.shape, (n,))\n self.assertAllClose(\n sample_values.var(),\n _scipy_invgauss(loc_v, concentration_v).var(),\n rtol=.02,\n atol=0)\n\n def testInverseGaussianSampleMultidimensionalMean(self):\n loc_v = 3.\n concentration_v = np.array([np.arange(1, 11)])\n n = int(1e6)\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor(loc_v),\n self.make_tensor(concentration_v),\n validate_args=True)\n samples = inverse_gaussian.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n\n if self.use_static_shape:\n self.assertEqual(samples.shape, (n, 1, 10))\n self.assertEqual(sample_values.shape, (n, 1, 10))\n self.assertAllClose(\n sample_values.mean(axis=0),\n _scipy_invgauss(loc_v, concentration_v).mean(),\n rtol=.02,\n atol=0)\n\n def testInverseGaussianSampleMultidimensionalVariance(self):\n loc_v = 3.\n concentration_v = np.array([np.arange(1, 11)])\n n = int(1e6)\n inverse_gaussian = tfd.InverseGaussian(\n self.make_tensor(loc_v),\n self.make_tensor(concentration_v),\n validate_args=True)\n samples = inverse_gaussian.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n\n if self.use_static_shape:\n self.assertEqual(samples.shape, (n, 1, 10))\n self.assertEqual(sample_values.shape, (n, 1, 10))\n self.assertAllClose(\n sample_values.var(axis=0),\n _scipy_invgauss(loc_v, concentration_v).var(),\n rtol=.02,\n atol=0)\n\n def testModifiedVariableAssertion(self):\n concentration = tf.Variable(0.9)\n loc = tf.Variable(1.2)\n self.evaluate([concentration.initializer, loc.initializer])\n inverse_gaussian = tfd.InverseGaussian(\n loc=loc, concentration=concentration, validate_args=True)\n with self.assertRaisesOpError('`concentration` must be positive'):\n with tf.control_dependencies([concentration.assign(-2.)]):\n self.evaluate(inverse_gaussian.mean())\n with self.assertRaisesOpError('`loc` must be positive'):\n with tf.control_dependencies([loc.assign(-2.), concentration.assign(2.)]):\n self.evaluate(inverse_gaussian.mean())\n\n def testSupportBijectorOutsideRange(self):\n dist = tfd.InverseGaussian(\n loc=[7., 2., 5.],\n concentration=2.,\n validate_args=True)\n eps = 1e-6\n x = np.array([[-7.2, -eps, -1.3], [-5., -12., -eps]])\n bijector_inverse_x = dist._experimental_default_event_space_bijector(\n ).inverse(x)\n self.assertAllNan(self.evaluate(bijector_inverse_x))\n\n\nclass InverseGaussianTestStaticShapeFloat32(test_util.TestCase,\n _InverseGaussianTest):\n dtype = tf.float32\n use_static_shape = True\n\n\nclass InverseGaussianTestDynamicShapeFloat32(test_util.TestCase,\n _InverseGaussianTest):\n dtype = tf.float32\n use_static_shape = False\n\n\nclass InverseGaussianTestStaticShapeFloat64(test_util.TestCase,\n _InverseGaussianTest):\n dtype = tf.float64\n use_static_shape = True\n\n\nclass InverseGaussianTestDynamicShapeFloat64(test_util.TestCase,\n _InverseGaussianTest):\n dtype = tf.float64\n use_static_shape = False\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for TransformedDistribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nfrom absl.testing import parameterized\n\nimport numpy as np\nfrom scipy import stats\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.internal import test_util\n\n\nclass DummyMatrixTransform(tfb.Bijector):\n \"\"\"Tractable matrix transformation.\n\n This is a non-sensical bijector that has forward/inverse_min_event_ndims=2.\n The main use is to check that transformed distribution calculations are done\n appropriately.\n \"\"\"\n\n def __init__(self):\n super(DummyMatrixTransform, self).__init__(\n forward_min_event_ndims=2,\n is_constant_jacobian=False,\n validate_args=False,\n name='dummy')\n\n def _forward(self, x):\n return x\n\n def _inverse(self, y):\n return y\n\n # Note: These jacobians don't make sense.\n def _forward_log_det_jacobian(self, x):\n return -tf.linalg.det(x)\n\n def _inverse_log_det_jacobian(self, x):\n return tf.linalg.det(x)\n\n\nclass _ChooseLocation(tfp.bijectors.Bijector):\n \"\"\"A Bijector which chooses between one of two location parameters.\"\"\"\n\n def __init__(self, loc, name='ChooseLocation'):\n with tf.name_scope(name) as name:\n self._loc = tf.convert_to_tensor(loc, name='loc')\n super(_ChooseLocation, self).__init__(\n is_constant_jacobian=True,\n validate_args=False,\n forward_min_event_ndims=0,\n name=name)\n\n def _forward(self, x, z=0.):\n return x + self._gather_loc(z)\n\n def _inverse(self, x, z=0.):\n return x - self._gather_loc(z)\n\n def _inverse_log_det_jacobian(self, x, event_ndims, z=None):\n return 0.\n\n def _gather_loc(self, z=0.):\n z = tf.convert_to_tensor(z)\n z = tf.cast((1 + z) / 2, tf.int32)\n return tf.gather(self._loc, z)\n\n\n@test_util.test_all_tf_execution_regimes\nclass TransformedDistributionTest(test_util.TestCase):\n\n def _cls(self):\n return tfd.TransformedDistribution\n\n def _make_unimplemented(self, name):\n def _unimplemented(self, *args): # pylint: disable=unused-argument\n raise NotImplementedError('{} not implemented'.format(name))\n return _unimplemented\n\n def testTransformedDistribution(self):\n mu = 3.0\n sigma = 2.0\n # Note: the Jacobian callable only works for this example; more generally\n # you may or may not need a reduce_sum.\n log_normal = self._cls()(\n distribution=tfd.Normal(loc=mu, scale=sigma),\n bijector=tfb.Exp(),\n validate_args=True)\n sp_dist = stats.lognorm(s=sigma, scale=np.exp(mu))\n\n # sample\n sample = log_normal.sample(100000, seed=test_util.test_seed())\n self.assertAllEqual([], log_normal.event_shape)\n self.assertAllEqual([], self.evaluate(log_normal.event_shape_tensor()))\n self.assertAllClose(\n sp_dist.mean(), np.mean(self.evaluate(sample)), atol=0.0, rtol=0.05)\n\n # pdf, log_pdf, cdf, etc...\n # The mean of the lognormal is around 148.\n test_vals = np.linspace(0.1, 1000., num=20).astype(np.float32)\n for func in [[log_normal.log_prob, sp_dist.logpdf],\n [log_normal.prob, sp_dist.pdf],\n [log_normal.log_cdf, sp_dist.logcdf],\n [log_normal.cdf, sp_dist.cdf],\n [log_normal.survival_function, sp_dist.sf],\n [log_normal.log_survival_function, sp_dist.logsf]]:\n actual = func[0](test_vals)\n expected = func[1](test_vals)\n self.assertAllClose(\n expected, self.evaluate(actual), atol=0, rtol=0.01)\n\n def testNonInjectiveTransformedDistribution(self):\n mu = 1.\n sigma = 2.0\n abs_normal = self._cls()(\n distribution=tfd.Normal(loc=mu, scale=sigma),\n bijector=tfb.AbsoluteValue(),\n validate_args=True)\n sp_normal = stats.norm(mu, sigma)\n\n # sample\n sample = abs_normal.sample(100000, seed=test_util.test_seed())\n self.assertAllEqual([], abs_normal.event_shape)\n sample_ = self.evaluate(sample)\n self.assertAllEqual([], self.evaluate(abs_normal.event_shape_tensor()))\n\n # Abs > 0, duh!\n np.testing.assert_array_less(0, sample_)\n\n # Let X ~ Normal(mu, sigma), Y := |X|, then\n # P[Y < 0.77] = P[-0.77 < X < 0.77]\n self.assertAllClose(\n sp_normal.cdf(0.77) - sp_normal.cdf(-0.77),\n (sample_ < 0.77).mean(), rtol=0.01)\n\n # p_Y(y) = p_X(-y) + p_X(y),\n self.assertAllClose(\n sp_normal.pdf(1.13) + sp_normal.pdf(-1.13),\n self.evaluate(abs_normal.prob(1.13)))\n\n # Log[p_Y(y)] = Log[p_X(-y) + p_X(y)]\n self.assertAllClose(\n np.log(sp_normal.pdf(2.13) + sp_normal.pdf(-2.13)),\n self.evaluate(abs_normal.log_prob(2.13)))\n\n def testQuantile(self):\n logit_normal = self._cls()(\n distribution=tfd.Normal(loc=0., scale=1.),\n bijector=tfb.Sigmoid(),\n validate_args=True)\n grid = [0., 0.25, 0.5, 0.75, 1.]\n q = logit_normal.quantile(grid)\n cdf = logit_normal.cdf(q)\n cdf_ = self.evaluate(cdf)\n self.assertAllClose(grid, cdf_, rtol=1e-6, atol=0.)\n\n def testCdfDescending(self):\n td = self._cls()(\n distribution=tfd.Normal(loc=0., scale=[1., 1.]),\n bijector=tfb.Shift(shift=1.)(tfb.Scale(scale=[2., -2.])),\n validate_args=True)\n nd = tfd.Normal(loc=1., scale=2., validate_args=True)\n self.assertAllEqual(tf.ones(td.batch_shape, dtype=tf.bool),\n td.cdf(nd.quantile(.8)) < td.cdf(nd.quantile(.9)))\n\n def testCdfDescendingChained(self):\n bij1 = tfb.Shift(shift=1.)(tfb.Scale(scale=[1., -2.]))\n bij2 = tfb.Shift(shift=1.)(tfb.Scale(scale=[[3.], [-5.]]))\n bij3 = tfb.Shift(shift=1.)(tfb.Scale(scale=[[[7.]], [[-11.]]]))\n for chain in bij2(bij1), bij3(bij2(bij1)):\n td = self._cls()(\n distribution=tfd.Normal(loc=0., scale=tf.ones([2, 2, 2])),\n bijector=chain,\n validate_args=True)\n nd = tfd.Normal(loc=1., scale=3., validate_args=True)\n self.assertAllEqual(tf.ones(td.batch_shape, dtype=tf.bool),\n td.cdf(nd.quantile(.4)) < td.cdf(nd.quantile(.6)),\n msg=chain.name)\n\n def testSfDescending(self):\n td = self._cls()(\n distribution=tfd.Normal(loc=0., scale=[1., 1.]),\n bijector=tfb.Shift(shift=1.)(tfb.Scale(scale=[2., -2.])),\n validate_args=True)\n nd = tfd.Normal(loc=1., scale=2., validate_args=True)\n self.assertAllEqual(tf.ones(td.batch_shape, dtype=tf.bool),\n td.survival_function(nd.quantile(.8)) >\n td.survival_function(nd.quantile(.9)))\n\n def testQuantileDescending(self):\n td = self._cls()(\n distribution=tfd.Normal(loc=0., scale=[1., 1.]),\n bijector=tfb.Shift(shift=1.)(tfb.Scale(scale=[2., -2.])),\n validate_args=True)\n self.assertAllEqual(tf.ones(td.batch_shape, dtype=tf.bool),\n td.quantile(.8) < td.quantile(.9))\n\n def testCachedSamples(self):\n class ExpForwardOnly(tfb.Bijector):\n\n def __init__(self):\n super(ExpForwardOnly, self).__init__(forward_min_event_ndims=0)\n\n def _forward(self, x):\n return tf.exp(x)\n\n def _forward_log_det_jacobian(self, x):\n return tf.convert_to_tensor(x)\n\n exp_forward_only = ExpForwardOnly()\n\n mu = 3.0\n sigma = 0.02\n log_normal = self._cls()(\n distribution=tfd.Normal(loc=mu, scale=sigma),\n bijector=exp_forward_only,\n validate_args=True)\n\n sample = log_normal.sample([2, 3], seed=test_util.test_seed())\n sample_val, log_pdf_val = self.evaluate(\n [sample, log_normal.log_prob(sample)])\n expected_log_pdf = stats.lognorm.logpdf(\n sample_val, s=sigma, scale=np.exp(mu))\n self.assertAllClose(expected_log_pdf, log_pdf_val, rtol=1e-4, atol=0.)\n\n def testCachedSamplesInvert(self):\n class ExpInverseOnly(tfb.Bijector):\n\n def __init__(self):\n super(ExpInverseOnly, self).__init__(inverse_min_event_ndims=0)\n\n def _inverse(self, y):\n return tf.math.log(y)\n\n def _inverse_log_det_jacobian(self, y):\n return -tf.math.log(y)\n\n exp_inverse_only = ExpInverseOnly()\n\n log_forward_only = tfb.Invert(exp_inverse_only)\n\n # The log bijector isn't defined over the whole real line, so we make\n # sigma sufficiently small so that the draws are positive.\n mu = 2.\n sigma = 1e-2\n exp_normal = self._cls()(\n distribution=tfd.Normal(loc=mu, scale=sigma),\n bijector=log_forward_only,\n validate_args=True)\n\n sample = exp_normal.sample(\n [2, 3], seed=test_util.test_seed(hardcoded_seed=42))\n sample_val, log_pdf_val = self.evaluate(\n [sample, exp_normal.log_prob(sample)])\n expected_log_pdf = sample_val + stats.norm.logpdf(\n np.exp(sample_val), loc=mu, scale=sigma)\n self.assertAllClose(expected_log_pdf, log_pdf_val, atol=0., rtol=1e-5)\n\n def testShapeChangingBijector(self):\n softmax = tfb.SoftmaxCentered()\n standard_normal = tfd.Normal(loc=0., scale=1.)\n multi_logit_normal = self._cls()(\n distribution=standard_normal,\n bijector=softmax,\n event_shape=[1],\n validate_args=True)\n x = [[[-np.log(3.)], [0.]], [[np.log(3)], [np.log(5)]]]\n x = np.float32(x)\n y = self.evaluate(softmax.forward(x))\n expected_log_pdf = -0.5 * np.log(2) + (\n np.squeeze(stats.norm(loc=0., scale=1.).logpdf(x)) -\n np.sum(np.log(y), axis=-1))\n self.assertAllClose(expected_log_pdf,\n self.evaluate(multi_logit_normal.log_prob(y)))\n self.assertAllClose(\n [1, 2, 3, 2],\n self.evaluate(tf.shape(multi_logit_normal.sample(\n [1, 2, 3], seed=test_util.test_seed()))))\n self.assertAllEqual([2], multi_logit_normal.event_shape)\n self.assertAllEqual([2],\n self.evaluate(multi_logit_normal.event_shape_tensor()))\n\n def testCastLogDetJacobian(self):\n \"\"\"Test log_prob when Jacobian and log_prob dtypes do not match.\"\"\"\n\n # Create an identity bijector whose jacobians have dtype int32\n int_identity = tfb.Inline(\n forward_fn=tf.identity,\n inverse_fn=tf.identity,\n inverse_log_det_jacobian_fn=(lambda y: tf.cast(0, tf.int32)),\n forward_log_det_jacobian_fn=(lambda x: tf.cast(0, tf.int32)),\n forward_min_event_ndims=0,\n is_constant_jacobian=True)\n normal = self._cls()(\n distribution=tfd.Normal(loc=0., scale=1.),\n bijector=int_identity,\n validate_args=True)\n\n y = normal.sample(seed=test_util.test_seed())\n self.evaluate(normal.log_prob(y))\n self.evaluate(normal.prob(y))\n self.evaluate(normal.mean())\n self.evaluate(normal.entropy())\n\n def testMode(self):\n dist = self._cls()(\n tfd.Beta(\n concentration1=[5., 10.],\n concentration0=15.,\n validate_args=True),\n tfb.Shift(2., validate_args=True)(tfb.Scale(10., validate_args=True)),\n validate_args=True)\n self.assertAllClose(2. + 10. * dist.distribution.mode(),\n self.evaluate(dist.mode()),\n atol=0., rtol=1e-6)\n\n def testMean(self):\n shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32)\n diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32)\n fake_mvn = self._cls()(\n tfd.MultivariateNormalDiag(\n loc=tf.zeros_like(shift),\n scale_diag=tf.ones_like(diag),\n validate_args=True),\n tfb.AffineLinearOperator(\n shift,\n scale=tf.linalg.LinearOperatorDiag(diag, is_non_singular=True),\n validate_args=True),\n validate_args=True)\n self.assertAllClose(shift, self.evaluate(fake_mvn.mean()))\n\n def testMeanShapeOverride(self):\n shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32)\n diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32)\n fake_mvn = self._cls()(\n tfd.Normal(loc=0.0, scale=1.0),\n tfb.AffineLinearOperator(\n shift,\n scale=tf.linalg.LinearOperatorDiag(diag, is_non_singular=True),\n validate_args=True),\n batch_shape=[2],\n event_shape=[3],\n validate_args=True)\n self.assertAllClose(shift, self.evaluate(fake_mvn.mean()))\n\n def testEntropy(self):\n shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32)\n diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32)\n actual_mvn_entropy = np.concatenate(\n [[stats.multivariate_normal(shift[i], np.diag(diag[i]**2)).entropy()]\n for i in range(len(diag))])\n fake_mvn = self._cls()(\n tfd.MultivariateNormalDiag(\n loc=tf.zeros_like(shift),\n scale_diag=tf.ones_like(diag),\n validate_args=True),\n tfb.AffineLinearOperator(\n shift,\n scale=tf.linalg.LinearOperatorDiag(diag, is_non_singular=True),\n validate_args=True),\n validate_args=True)\n self.assertAllClose(actual_mvn_entropy, self.evaluate(fake_mvn.entropy()))\n\n def testScalarBatchScalarEventIdentityScale(self):\n exp2 = self._cls()(\n tfd.Exponential(rate=0.25),\n bijector=tfb.Scale(scale=2.),\n validate_args=True)\n log_prob = exp2.log_prob(1.)\n log_prob_ = self.evaluate(log_prob)\n base_log_prob = -0.5 * 0.25 + np.log(0.25)\n ildj = np.log(2.)\n self.assertAllClose(base_log_prob - ildj, log_prob_, rtol=1e-6, atol=0.)\n\n\n@test_util.test_all_tf_execution_regimes\nclass ScalarToMultiTest(test_util.TestCase):\n\n def _cls(self):\n return tfd.TransformedDistribution\n\n def setUp(self):\n self._shift = np.array([-1, 0, 1], dtype=np.float32)\n self._tril = np.array([[[1., 0, 0],\n [2, 1, 0],\n [3, 2, 1]],\n [[2, 0, 0],\n [3, 2, 0],\n [4, 3, 2]]],\n dtype=np.float32)\n super(ScalarToMultiTest, self).setUp()\n\n def _testMVN(self,\n base_distribution_class,\n base_distribution_kwargs,\n batch_shape=(),\n event_shape=(),\n not_implemented_message=None):\n # Overriding shapes must be compatible w/bijector; most bijectors are\n # batch_shape agnostic and only care about event_ndims.\n # In the case of `Affine`, if we got it wrong then it would fire an\n # exception due to incompatible dimensions.\n batch_shape_var = tf.Variable(\n np.int32(batch_shape),\n shape=tf.TensorShape(None),\n name='dynamic_batch_shape')\n event_shape_var = tf.Variable(\n np.int32(event_shape),\n shape=tf.TensorShape(None),\n name='dynamic_event_shape')\n\n fake_mvn_dynamic = self._cls()(\n distribution=base_distribution_class(\n validate_args=True, **base_distribution_kwargs),\n bijector=tfb.Affine(shift=self._shift, scale_tril=self._tril),\n batch_shape=batch_shape_var,\n event_shape=event_shape_var,\n validate_args=True)\n\n fake_mvn_static = self._cls()(\n distribution=base_distribution_class(\n validate_args=True, **base_distribution_kwargs),\n bijector=tfb.Affine(shift=self._shift, scale_tril=self._tril),\n batch_shape=batch_shape,\n event_shape=event_shape,\n validate_args=True)\n\n actual_mean = np.tile(self._shift, [2, 1]) # Affine elided this tile.\n actual_cov = np.matmul(self._tril, np.transpose(self._tril, [0, 2, 1]))\n\n def actual_mvn_log_prob(x):\n return np.concatenate([[ # pylint: disable=g-complex-comprehension\n stats.multivariate_normal(actual_mean[i],\n actual_cov[i]).logpdf(x[:, i, :])\n ] for i in range(len(actual_cov))]).T\n\n actual_mvn_entropy = np.concatenate(\n [[stats.multivariate_normal(actual_mean[i], actual_cov[i]).entropy()]\n for i in range(len(actual_cov))])\n\n self.assertAllEqual([3], fake_mvn_static.event_shape)\n self.assertAllEqual([2], fake_mvn_static.batch_shape)\n\n if not tf.executing_eagerly():\n self.assertAllEqual(tf.TensorShape(None), fake_mvn_dynamic.event_shape)\n self.assertAllEqual(tf.TensorShape(None), fake_mvn_dynamic.batch_shape)\n\n x = self.evaluate(fake_mvn_static.sample(5, seed=test_util.test_seed()))\n for unsupported_fn in (fake_mvn_static.log_cdf, fake_mvn_static.cdf,\n fake_mvn_static.survival_function,\n fake_mvn_static.log_survival_function):\n with self.assertRaisesRegexp(NotImplementedError,\n not_implemented_message):\n unsupported_fn(x)\n\n num_samples = 7e3\n for fake_mvn in [fake_mvn_static, fake_mvn_dynamic]:\n # Ensure sample works by checking first, second moments.\n y = fake_mvn.sample(int(num_samples), seed=test_util.test_seed())\n x = y[0:5, ...]\n sample_mean = tf.reduce_mean(y, axis=0)\n centered_y = tf.transpose(a=y - sample_mean, perm=[1, 2, 0])\n sample_cov = tf.matmul(\n centered_y, centered_y, transpose_b=True) / num_samples\n self.evaluate([batch_shape_var.initializer, event_shape_var.initializer])\n [\n sample_mean_,\n sample_cov_,\n x_,\n fake_event_shape_,\n fake_batch_shape_,\n fake_log_prob_,\n fake_prob_,\n fake_mean_,\n fake_entropy_,\n ] = self.evaluate([\n sample_mean,\n sample_cov,\n x,\n fake_mvn.event_shape_tensor(),\n fake_mvn.batch_shape_tensor(),\n fake_mvn.log_prob(x),\n fake_mvn.prob(x),\n fake_mvn.mean(),\n fake_mvn.entropy(),\n ])\n\n self.assertAllClose(actual_mean, sample_mean_, atol=0.1, rtol=0.1)\n self.assertAllClose(actual_cov, sample_cov_, atol=0., rtol=0.1)\n\n # Ensure all other functions work as intended.\n self.assertAllEqual([5, 2, 3], x_.shape)\n self.assertAllEqual([3], fake_event_shape_)\n self.assertAllEqual([2], fake_batch_shape_)\n self.assertAllClose(\n actual_mvn_log_prob(x_), fake_log_prob_, atol=0., rtol=1e-6)\n self.assertAllClose(\n np.exp(actual_mvn_log_prob(x_)), fake_prob_, atol=0., rtol=1e-5)\n self.assertAllClose(actual_mean, fake_mean_, atol=0., rtol=1e-6)\n self.assertAllClose(actual_mvn_entropy, fake_entropy_, atol=0., rtol=1e-6)\n\n def testScalarBatchScalarEvent(self):\n self._testMVN(\n base_distribution_class=tfd.Normal,\n base_distribution_kwargs={\n 'loc': 0.,\n 'scale': 1.\n },\n batch_shape=[2],\n event_shape=[3],\n not_implemented_message='not implemented when overriding `event_shape`')\n\n def testScalarBatchNonScalarEvent(self):\n self._testMVN(\n base_distribution_class=tfd.MultivariateNormalDiag,\n base_distribution_kwargs={\n 'loc': [0., 0., 0.],\n 'scale_diag': [1., 1, 1]\n },\n batch_shape=[2],\n not_implemented_message='not implemented')\n\n # Can't override event_shape for scalar batch, non-scalar event.\n with self.assertRaisesWithPredicateMatch(\n Exception, 'Base distribution is not scalar.'):\n\n self._cls()(\n distribution=tfd.MultivariateNormalDiag(loc=[0.], scale_diag=[1.]),\n bijector=tfb.Affine(shift=self._shift, scale_tril=self._tril),\n batch_shape=[2],\n event_shape=[3],\n validate_args=True)\n\n def testNonScalarBatchScalarEvent(self):\n\n self._testMVN(\n base_distribution_class=tfd.Normal,\n base_distribution_kwargs={\n 'loc': [0., 0],\n 'scale': [1., 1]\n },\n event_shape=[3],\n not_implemented_message='not implemented when overriding'\n ' `event_shape`')\n\n # Can't override batch_shape for non-scalar batch, scalar event.\n with self.assertRaisesWithPredicateMatch(\n Exception, 'Base distribution is not scalar.'):\n self._cls()(\n distribution=tfd.Normal(loc=[0.], scale=[1.]),\n bijector=tfb.Affine(shift=self._shift, scale_tril=self._tril),\n batch_shape=[2],\n event_shape=[3],\n validate_args=True)\n\n def testNonScalarBatchNonScalarEvent(self):\n # Can't override event_shape and/or batch_shape for non_scalar batch,\n # non-scalar event.\n with self.assertRaisesRegexp(ValueError, 'Base distribution is not scalar'):\n self._cls()(\n distribution=tfd.MultivariateNormalDiag(\n loc=[[0.]], scale_diag=[[1.]]),\n bijector=tfb.Affine(shift=self._shift, scale_tril=self._tril),\n batch_shape=[2],\n event_shape=[3],\n validate_args=True)\n\n def testMatrixEvent(self):\n batch_shape = [2]\n event_shape = [2, 3, 3]\n batch_shape_var = tf.Variable(\n np.int32(batch_shape),\n shape=tf.TensorShape(None),\n name='dynamic_batch_shape')\n event_shape_var = tf.Variable(\n np.int32(event_shape),\n shape=tf.TensorShape(None),\n name='dynamic_event_shape')\n\n scale = 2.\n loc = 0.\n fake_mvn_dynamic = self._cls()(\n distribution=tfd.Normal(loc=loc, scale=scale),\n bijector=DummyMatrixTransform(),\n batch_shape=batch_shape_var,\n event_shape=event_shape_var,\n validate_args=True)\n\n fake_mvn_static = self._cls()(\n distribution=tfd.Normal(loc=loc, scale=scale),\n bijector=DummyMatrixTransform(),\n batch_shape=batch_shape,\n event_shape=event_shape,\n validate_args=True)\n\n def actual_mvn_log_prob(x):\n # This distribution is the normal PDF, reduced over the\n # last 3 dimensions + a jacobian term which corresponds\n # to the determinant of x.\n return (np.sum(stats.norm(loc, scale).logpdf(x), axis=(-1, -2, -3)) +\n np.sum(np.linalg.det(x), axis=-1))\n\n self.assertAllEqual([2, 3, 3], fake_mvn_static.event_shape)\n self.assertAllEqual([2], fake_mvn_static.batch_shape)\n\n if not tf.executing_eagerly():\n self.assertAllEqual(tf.TensorShape(None), fake_mvn_dynamic.event_shape)\n self.assertAllEqual(tf.TensorShape(None), fake_mvn_dynamic.batch_shape)\n\n num_samples = 5e3\n self.evaluate([event_shape_var.initializer, batch_shape_var.initializer])\n for fake_mvn in [fake_mvn_static, fake_mvn_dynamic]:\n # Ensure sample works by checking first, second moments.\n y = fake_mvn.sample(int(num_samples), seed=test_util.test_seed())\n x = y[0:5, ...]\n [\n x_,\n fake_event_shape_,\n fake_batch_shape_,\n fake_log_prob_,\n fake_prob_,\n ] = self.evaluate([\n x,\n fake_mvn.event_shape_tensor(),\n fake_mvn.batch_shape_tensor(),\n fake_mvn.log_prob(x),\n fake_mvn.prob(x),\n ])\n\n # Ensure all other functions work as intended.\n self.assertAllEqual([5, 2, 2, 3, 3], x_.shape)\n self.assertAllEqual([2, 3, 3], fake_event_shape_)\n self.assertAllEqual([2], fake_batch_shape_)\n self.assertAllClose(\n actual_mvn_log_prob(x_), fake_log_prob_, atol=0., rtol=1e-6)\n # With this many dimensions and samples, the direct space probability\n # may underflow.\n self.assertAllClose(\n np.exp(actual_mvn_log_prob(x_)), fake_prob_, atol=1e-12, rtol=1e-5)\n\n def testEmptyEvent(self):\n # Verify that zero-dimensional multivariate Normal distributions still\n # return reasonable shapes and a log-prob of 0.0.\n\n event_shape = [0]\n for batch_shape in ([2], []):\n for shapes_are_dynamic in (True, False):\n loc = tf.zeros(batch_shape + event_shape)\n scale_diag = tf.ones(batch_shape + event_shape)\n\n if shapes_are_dynamic:\n loc = tf.Variable(\n loc, shape=tf.TensorShape(None), name='dynamic_loc')\n scale_diag = tf.Variable(\n scale_diag, shape=tf.TensorShape(None), name='dynamic_scale_diag')\n self.evaluate([loc.initializer, scale_diag.initializer])\n\n mvn = tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)\n\n self.assertAllEqual(self.evaluate(mvn.event_shape_tensor()),\n event_shape)\n self.assertAllEqual(self.evaluate(mvn.batch_shape_tensor()),\n batch_shape)\n if not shapes_are_dynamic:\n self.assertAllEqual(\n tensorshape_util.as_list(mvn.event_shape), event_shape)\n self.assertAllEqual(\n tensorshape_util.as_list(mvn.batch_shape), batch_shape)\n\n for sample_shape in ([3], []):\n sample_ = self.evaluate(mvn.sample(\n sample_shape, seed=test_util.test_seed()))\n self.assertAllEqual(sample_.shape,\n sample_shape + batch_shape + event_shape)\n self.assertAllEqual(\n self.evaluate(mvn.log_prob(sample_)),\n np.zeros(sample_shape + batch_shape))\n\n def testConditioning(self):\n conditional_normal = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=0., scale=1.),\n bijector=_ChooseLocation(loc=[-100., 100.]))\n z = [-1, +1, -1, -1, +1]\n self.assertAllClose(\n np.sign(\n self.evaluate(\n conditional_normal.sample(\n 5, seed=test_util.test_seed(),\n bijector_kwargs={'z': z}))), z)\n\n @test_util.jax_disable_test_missing_functionality(\n 'JAX only has static shapes.')\n def testVectorDynamicShapeOverrideWithMutation(self):\n batch_shape = tf.Variable([4], shape=tf.TensorShape(None), dtype=tf.int32)\n d = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=2., scale=1.),\n bijector=tfb.Exp(),\n batch_shape=batch_shape,\n validate_args=True)\n self.evaluate(batch_shape.initializer)\n self.evaluate(d.sample(seed=test_util.test_seed()))\n with tf.control_dependencies(\n [batch_shape.assign([[4, 2]])]):\n with self.assertRaisesOpError('must be a vector'):\n self.evaluate(d.sample(seed=test_util.test_seed()))\n\n def testNonNegativeDynamicShapeOverrideWithMutation(self):\n batch_shape = tf.Variable([4], shape=tf.TensorShape(None), dtype=tf.int32)\n d = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=-1., scale=1.),\n bijector=tfb.Exp(),\n batch_shape=batch_shape,\n validate_args=True)\n self.evaluate(batch_shape.initializer)\n self.evaluate(d.sample(seed=test_util.test_seed()))\n with tf.control_dependencies([batch_shape.assign([-4])]):\n with self.assertRaisesOpError('must have non-negative elements'):\n self.evaluate(d.sample(seed=test_util.test_seed()))\n\n @test_util.jax_disable_test_missing_functionality(\n 'JAX only has static shapes.')\n def testNonScalarDynamicShapeOverrideWithMutation(self):\n loc = tf.Variable(3., shape=tf.TensorShape(None))\n base_dist = tfd.Normal(loc=loc, scale=1.)\n d = tfd.TransformedDistribution(\n distribution=base_dist,\n bijector=tfb.Exp(),\n batch_shape=tf.convert_to_tensor([3], dtype=tf.int32),\n validate_args=True)\n self.evaluate(loc.initializer)\n self.evaluate(d.sample(seed=test_util.test_seed()))\n with tf.control_dependencies([loc.assign([4., 2.])]):\n with self.assertRaisesWithPredicateMatch(\n Exception, 'Base distribution is not scalar'):\n self.evaluate(d.sample(seed=test_util.test_seed()))\n\n def testSupportBijectorOutsideRange(self):\n log_normal = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=1., scale=2.),\n bijector=tfb.Exp(),\n validate_args=True)\n x = np.array([-4.2, -1e-6, -1.3])\n bijector_inverse_x = (\n log_normal._experimental_default_event_space_bijector().inverse(x))\n self.assertAllNan(self.evaluate(bijector_inverse_x))\n\n\n@test_util.test_all_tf_execution_regimes\nclass ExcessiveConcretizationTest(test_util.TestCase):\n\n def setUp(self):\n super(ExcessiveConcretizationTest, self).setUp()\n\n self.max_permissible = {\n\n # extra concretizations primarily of base distribution parameters\n 'mean': 4,\n 'sample': 3,\n 'log_cdf': 4,\n 'cdf': 4,\n 'survival_function': 4,\n 'log_survival_function': 4,\n\n # extra concretizations primarily of bijector parameters\n 'entropy': 6,\n 'log_prob': 7,\n 'prob': 7,\n 'quantile': 4,\n\n 'event_shape_tensor': 2,\n 'batch_shape_tensor': 2,\n }\n\n self.shape = None\n\n def testExcessiveConcretizationOfParams(self):\n loc = tfp_hps.defer_and_count_usage(\n tf.Variable(0., name='loc', dtype=tf.float32, shape=self.shape))\n scale = tfp_hps.defer_and_count_usage(\n tf.Variable(2., name='scale', dtype=tf.float32, shape=self.shape))\n bij_scale = tfp_hps.defer_and_count_usage(\n tf.Variable(2., name='bij_scale', dtype=tf.float32, shape=self.shape))\n event_shape = tfp_hps.defer_and_count_usage(\n tf.Variable([2, 2], name='input_event_shape', dtype=tf.int32,\n shape=self.shape))\n batch_shape = tfp_hps.defer_and_count_usage(\n tf.Variable([4, 3, 5], name='input_batch_shape', dtype=tf.int32,\n shape=self.shape))\n\n dist = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=loc, scale=scale, validate_args=True),\n bijector=tfb.Scale(scale=bij_scale, validate_args=True),\n event_shape=event_shape,\n batch_shape=batch_shape,\n validate_args=True)\n\n for method in ('mean', 'entropy', 'event_shape_tensor',\n 'batch_shape_tensor'):\n with tfp_hps.assert_no_excessive_var_usage(\n method, max_permissible=self.max_permissible[method]):\n getattr(dist, method)()\n\n with tfp_hps.assert_no_excessive_var_usage(\n 'sample', max_permissible=self.max_permissible['sample']):\n dist.sample(seed=test_util.test_seed())\n\n for method in ('log_prob', 'prob'):\n with tfp_hps.assert_no_excessive_var_usage(\n method, max_permissible=self.max_permissible[method]):\n getattr(dist, method)(np.ones((4, 3, 5, 2, 2)) / 3.)\n\n def testExcessiveConcretizationOfParamsBatchShapeOverride(self):\n # Test methods that are not implemented if event_shape is overriden.\n loc = tfp_hps.defer_and_count_usage(\n tf.Variable(0., name='loc', dtype=tf.float32, shape=self.shape))\n scale = tfp_hps.defer_and_count_usage(\n tf.Variable(2., name='scale', dtype=tf.float32, shape=self.shape))\n bij_scale = tfp_hps.defer_and_count_usage(\n tf.Variable(2., name='bij_scale', dtype=tf.float32, shape=self.shape))\n batch_shape = tfp_hps.defer_and_count_usage(\n tf.Variable([4, 3, 5], name='input_batch_shape', dtype=tf.int32,\n shape=self.shape))\n dist = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=loc, scale=scale, validate_args=True),\n bijector=tfb.Scale(scale=bij_scale, validate_args=True),\n batch_shape=batch_shape,\n validate_args=True)\n\n for method in (\n 'log_cdf', 'cdf', 'survival_function', 'log_survival_function'):\n with tfp_hps.assert_no_excessive_var_usage(\n method, max_permissible=self.max_permissible[method]):\n getattr(dist, method)(np.ones((4, 3, 2)) / 3.)\n\n with tfp_hps.assert_no_excessive_var_usage(\n 'quantile', max_permissible=self.max_permissible['quantile']):\n dist.quantile(.1)\n\n\n@test_util.test_all_tf_execution_regimes\nclass ExcessiveConcretizationTestUnknownShape(ExcessiveConcretizationTest):\n\n def setUp(self):\n super(ExcessiveConcretizationTestUnknownShape, self).setUp()\n\n self.max_permissible = {\n\n # extra concretizations primarily of base distribution parameters\n 'mean': 9,\n 'sample': 9,\n 'log_cdf': 7,\n 'cdf': 7,\n 'survival_function': 7,\n 'log_survival_function': 7,\n 'entropy': 9,\n 'quantile': 5,\n 'event_shape_tensor': 5,\n 'batch_shape_tensor': 6,\n 'log_prob': 10,\n 'prob': 13,\n }\n\n self.shape = tf.TensorShape(None)\n\n\n# TODO(emilyaf): Check in `ZipMap` bijector and remove this.\nclass ToyZipMap(tfb.Bijector):\n\n def __init__(self, bijectors):\n self._bijectors = bijectors\n super(ToyZipMap, self).__init__(\n forward_min_event_ndims=0,\n inverse_min_event_ndims=0,\n is_constant_jacobian=all([\n b.is_constant_jacobian for b in tf.nest.flatten(bijectors)]))\n\n @property\n def bijectors(self):\n return self._bijectors\n\n def forward(self, x):\n return tf.nest.map_structure(lambda b_i, x_i: b_i.forward(x_i),\n self.bijectors, x)\n\n def inverse(self, y):\n return tf.nest.map_structure(lambda b_i, y_i: b_i.inverse(y_i),\n self.bijectors, y)\n\n def forward_dtype(self, dtype):\n return tf.nest.map_structure(lambda b_i, d_i: b_i.forward_dtype(d_i),\n self.bijectors, dtype)\n\n def inverse_dtype(self, dtype):\n return tf.nest.map_structure(lambda b_i, d_i: b_i.inverse_dtype(d_i),\n self.bijectors, dtype)\n\n def forward_event_shape(self, x_shape):\n return tf.nest.map_structure(\n lambda b_i, x_i: b_i.forward_event_shape(x_i),\n self.bijectors, x_shape)\n\n def inverse_event_shape(self, y_shape):\n return tf.nest.map_structure(\n lambda b_i, y_i: b_i.inverse_event_shape(y_i),\n self.bijectors, y_shape)\n\n def forward_event_shape_tensor(self, x_shape_tensor):\n return tf.nest.map_structure(\n lambda b_i, x_i: b_i.forward_event_shape_tensor(x_i),\n self.bijectors, x_shape_tensor)\n\n def inverse_event_shape_tensor(self, y_shape_tensor):\n return tf.nest.map_structure(\n lambda b_i, y_i: b_i.inverse_event_shape_tensor(y_i),\n self.bijectors, y_shape_tensor)\n\n def _forward_log_det_jacobian(self, x, event_ndims):\n fldj_parts = tf.nest.map_structure(\n lambda b, y, n: b.forward_log_det_jacobian(x, event_ndims=n),\n self.bijectors, x, event_ndims)\n return sum(tf.nest.flatten(fldj_parts))\n\n def inverse_log_det_jacobian(self, y, event_ndims):\n ildj_parts = tf.nest.map_structure(\n lambda b, y, n: b.inverse_log_det_jacobian(y, event_ndims=n),\n self.bijectors, y, event_ndims)\n return sum(tf.nest.flatten(ildj_parts))\n\n\n@test_util.test_all_tf_execution_regimes\nclass MultipartBijectorsTest(test_util.TestCase):\n\n @parameterized.named_parameters(\n {'testcase_name': 'split_sizes_known',\n 'known_split_sizes': [1, 3, 2]},\n {'testcase_name': 'split_size_unknown',\n 'known_split_sizes': [1, -1, 2]}\n )\n def test_transform_parts_to_vector(self, known_split_sizes):\n batch_shape = [4, 2]\n true_split_sizes = [1, 3, 2]\n\n # Create a joint distribution with parts of the specified sizes.\n seed = test_util.test_seed_stream()\n component_dists = tf.nest.map_structure(\n lambda size: tfd.MultivariateNormalDiag( # pylint: disable=g-long-lambda\n loc=tf.random.normal(batch_shape + [size], seed=seed()),\n scale_diag=tf.exp(\n tf.random.normal(batch_shape + [size], seed=seed()))),\n true_split_sizes)\n base_dist = tfd.JointDistributionSequential(component_dists)\n\n # Transform to a vector-valued distribution by concatenating the parts.\n bijector = tfb.Invert(tfb.Split(known_split_sizes, axis=-1))\n\n with self.assertRaisesRegexp(ValueError, 'Overriding the batch shape'):\n tfd.TransformedDistribution(base_dist, bijector, batch_shape=[3])\n\n with self.assertRaisesRegexp(ValueError, 'Overriding the event shape'):\n tfd.TransformedDistribution(base_dist, bijector, event_shape=[3])\n\n concat_dist = tfd.TransformedDistribution(base_dist, bijector)\n self.assertAllEqual(concat_dist.event_shape, [sum(true_split_sizes)])\n self.assertAllEqual(self.evaluate(concat_dist.event_shape_tensor()),\n [sum(true_split_sizes)])\n self.assertAllEqual(concat_dist.batch_shape, batch_shape)\n self.assertAllEqual(self.evaluate(concat_dist.batch_shape_tensor()),\n batch_shape)\n\n # Since the Split bijector has (constant) unit Jacobian, the transformed\n # entropy and mean/mode should match the base entropy and (split) base\n # mean/mode.\n self.assertAllEqual(*self.evaluate(\n (base_dist.entropy(), concat_dist.entropy())))\n\n self.assertAllEqual(*self.evaluate(\n (concat_dist.mean(), bijector.forward(base_dist.mean()))))\n self.assertAllEqual(*self.evaluate(\n (concat_dist.mode(), bijector.forward(base_dist.mode()))))\n\n # Since the Split bijector has zero Jacobian, the transformed `log_prob`\n # and `prob` should match the base distribution.\n sample_shape = [3]\n x = base_dist.sample(sample_shape, seed=seed())\n y = bijector.forward(x)\n for attr in ('log_prob', 'prob'):\n base_attr = getattr(base_dist, attr)(x)\n concat_attr = getattr(concat_dist, attr)(y)\n self.assertAllClose(*self.evaluate((base_attr, concat_attr)))\n\n # Test that `.sample()` works and returns a result of the expected structure\n # and shape.\n y_sampled = concat_dist.sample(sample_shape, seed=seed())\n self.assertAllEqual(y.shape, y_sampled.shape)\n\n @parameterized.named_parameters(\n {'testcase_name': 'split_sizes_known',\n 'known_split_sizes': [1, 3, 2]},\n {'testcase_name': 'split_size_unknown',\n 'known_split_sizes': [1, -1, 2]}\n )\n def test_transform_vector_to_parts(self, known_split_sizes):\n batch_shape = [4, 2]\n true_split_sizes = [1, 3, 2]\n\n base_event_size = sum(true_split_sizes)\n base_dist = tfd.MultivariateNormalDiag(\n loc=tf.random.normal(\n batch_shape + [base_event_size], seed=test_util.test_seed()),\n scale_diag=tf.exp(tf.random.normal(\n batch_shape + [base_event_size], seed=test_util.test_seed())))\n\n bijector = tfb.Split(known_split_sizes, axis=-1)\n split_dist = tfd.TransformedDistribution(base_dist, bijector)\n\n expected_event_shape = [np.array([s]) for s in true_split_sizes]\n output_event_shape = [np.array(s) for s in split_dist.event_shape]\n self.assertAllEqual(output_event_shape, expected_event_shape)\n self.assertAllEqual(self.evaluate(split_dist.event_shape_tensor()),\n expected_event_shape)\n self.assertAllEqual(split_dist.batch_shape, batch_shape)\n self.assertAllEqual(self.evaluate(split_dist.batch_shape_tensor()),\n batch_shape)\n\n # Since the Split bijector has (constant) unit Jacobian, the transformed\n # entropy and mean/mode should match the base entropy and (split) base\n # mean/mode.\n self.assertAllEqual(*self.evaluate(\n (base_dist.entropy(), split_dist.entropy())))\n self.assertAllEqualNested(\n *self.evaluate((split_dist.mean(),\n bijector.forward(base_dist.mean()))))\n self.assertAllEqualNested(\n *self.evaluate((split_dist.mode(),\n bijector.forward(base_dist.mode()))))\n\n # Since the Split bijector has zero Jacobian, the transformed `log_prob`\n # and `prob` should match the base distribution.\n sample_shape = [3]\n x = base_dist.sample(sample_shape, seed=test_util.test_seed())\n y = bijector.forward(x)\n for attr in ('log_prob', 'prob'):\n split_attr = getattr(split_dist, attr)(y)\n base_attr = getattr(base_dist, attr)(x)\n self.assertAllClose(*self.evaluate((base_attr, split_attr)), rtol=1e-5)\n\n # Test that `.sample()` works and returns a result of the expected structure\n # and shape.\n y_sampled = split_dist.sample(sample_shape, seed=test_util.test_seed())\n self.assertAllEqual([x.shape for x in y], [x.shape for x in y_sampled])\n\n # Test that `batch_shape` override works and does not affect the event shape\n base_dist = tfd.Independent(\n tfd.Normal(loc=list(range(6)), scale=1.),\n reinterpreted_batch_ndims=1, validate_args=True)\n override_batch_shape = [5, 2]\n split_dist_batch_override = tfd.TransformedDistribution(\n base_dist, bijector, batch_shape=override_batch_shape)\n self.assertAllEqual(\n split_dist_batch_override.event_shape, expected_event_shape)\n self.assertAllEqual(\n self.evaluate(split_dist_batch_override.event_shape_tensor()),\n expected_event_shape)\n self.assertAllEqual(split_dist_batch_override.batch_shape,\n override_batch_shape)\n self.assertAllEqual(\n self.evaluate(split_dist_batch_override.batch_shape_tensor()),\n override_batch_shape)\n\n # Test that `event_shape` override works as expected with `Split`\n override_event_shape = [6]\n base_dist = tfd.Normal(0., [2., 1.])\n split_dist_event_override = tfd.TransformedDistribution(\n base_dist, bijector, event_shape=override_event_shape)\n self.assertAllEqual(\n split_dist_event_override.event_shape, expected_event_shape)\n self.assertAllEqual(\n self.evaluate(split_dist_event_override.event_shape_tensor()),\n expected_event_shape)\n self.assertAllEqual(\n split_dist_event_override.batch_shape, base_dist.batch_shape)\n self.assertAllEqual(\n self.evaluate(split_dist_event_override.batch_shape_tensor()),\n self.evaluate(base_dist.batch_shape_tensor()))\n\n @parameterized.named_parameters(\n {'testcase_name': 'sequential',\n 'split_sizes': [1, 3, 2]},\n {'testcase_name': 'named',\n 'split_sizes': {'a': 1, 'b': 3, 'c': 2}},)\n def test_transform_joint_to_joint(self, split_sizes):\n dist_batch_shape = tf.nest.pack_sequence_as(\n split_sizes,\n [tensorshape_util.constant_value_as_shape(s)\n for s in [[2, 3], [2, 1], [1, 3]]])\n bijector_batch_shape = [1, 3]\n\n # Build a joint distribution with parts of the specified sizes.\n seed = test_util.test_seed_stream()\n component_dists = tf.nest.map_structure(\n lambda size, batch_shape: tfd.MultivariateNormalDiag( # pylint: disable=g-long-lambda\n loc=tf.random.normal(batch_shape + [size], seed=seed()),\n scale_diag=tf.random.uniform(\n minval=1., maxval=2.,\n shape=batch_shape + [size], seed=seed())),\n split_sizes, dist_batch_shape)\n if isinstance(split_sizes, dict):\n base_dist = tfd.JointDistributionNamed(component_dists)\n else:\n base_dist = tfd.JointDistributionSequential(component_dists)\n\n # Transform the distribution by applying a separate bijector to each part.\n bijectors = [tfb.Exp(),\n tfb.Scale(\n tf.random.uniform(\n minval=1., maxval=2.,\n shape=bijector_batch_shape, seed=seed())),\n tfb.Reshape([2, 1])]\n bijector = ToyZipMap(tf.nest.pack_sequence_as(split_sizes, bijectors))\n\n with self.assertRaisesRegexp(ValueError, 'Overriding the batch shape'):\n tfd.TransformedDistribution(base_dist, bijector, batch_shape=[3])\n\n with self.assertRaisesRegexp(ValueError, 'Overriding the event shape'):\n tfd.TransformedDistribution(base_dist, bijector, event_shape=[3])\n\n # Transform a joint distribution that has different batch shape components\n transformed_dist = tfd.TransformedDistribution(base_dist, bijector)\n\n self.assertAllEqualNested(\n transformed_dist.event_shape,\n bijector.forward_event_shape(base_dist.event_shape))\n self.assertAllEqualNested(*self.evaluate((\n transformed_dist.event_shape_tensor(),\n bijector.forward_event_shape_tensor(base_dist.event_shape_tensor()))))\n\n # Test that the batch shape components of the input are the same as those of\n # the output.\n self.assertAllEqualNested(transformed_dist.batch_shape, dist_batch_shape)\n self.assertAllEqualNested(\n self.evaluate(transformed_dist.batch_shape_tensor()), dist_batch_shape)\n self.assertAllEqualNested(dist_batch_shape, base_dist.batch_shape)\n\n # Check transformed `log_prob` against the base distribution.\n sample_shape = [3]\n sample = base_dist.sample(sample_shape, seed=seed())\n x = tf.nest.map_structure(tf.zeros_like, sample)\n y = bijector.forward(x)\n base_logprob = base_dist.log_prob(x)\n event_ndims = tf.nest.map_structure(lambda s: s.ndims,\n transformed_dist.event_shape)\n ildj = bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims)\n\n (transformed_logprob,\n base_logprob_plus_ildj,\n log_transformed_prob\n ) = self.evaluate([\n transformed_dist.log_prob(y),\n base_logprob + ildj,\n tf.math.log(transformed_dist.prob(y))\n ])\n self.assertAllClose(base_logprob_plus_ildj, transformed_logprob)\n self.assertAllClose(transformed_logprob, log_transformed_prob)\n\n # Test that `.sample()` works and returns a result of the expected structure\n # and shape.\n y_sampled = transformed_dist.sample(sample_shape, seed=seed())\n self.assertAllEqual(tf.nest.map_structure(lambda y: y.shape, y),\n tf.nest.map_structure(lambda y: y.shape, y_sampled))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"XLA tests for TensorFlow Probability ODE solvers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nfrom absl import flags\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_util\n\nflags.DEFINE_string('test_device', None,\n 'TensorFlow device on which to place operators under test')\nFLAGS = flags.FLAGS\n\n_RTOL = 1e-8\n_ATOL = 1e-12\n\n\ndef linear(solver, jacobian_diag_part, initial_state):\n ode_fn = lambda time, state: jacobian_diag_part * state\n initial_time = 0.\n jacobian = np.diag(jacobian_diag_part)\n solver_instance = solver(rtol=_RTOL, atol=_ATOL)\n results = solver_instance.solve(\n ode_fn,\n initial_time,\n initial_state,\n solution_times=[1.],\n jacobian_fn=jacobian)\n return results.times, results.states\n\n\[email protected]_parameters([('bdf', tfp.math.ode.BDF)])\nclass XLATest(test_util.TestCase):\n\n def test_linear(self, solver):\n jacobian_diag_part = np.float32([-0.5, -1.])\n initial_state = np.float32([1., 2.])\n fn = lambda: linear(solver, jacobian_diag_part, initial_state)\n fn = tf.function(fn, autograph=False, experimental_compile=True)\n with tf.device(FLAGS.test_device):\n times, states = self.evaluate(fn())\n states_exact = np.exp(jacobian_diag_part[np.newaxis, :] *\n times[:, np.newaxis]) * initial_state\n self.assertAllClose(states, states_exact, rtol=1e-4)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"NormalCDF bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import special_math\n\n__all__ = [\n \"NormalCDF\",\n]\n\n\nclass NormalCDF(bijector.Bijector):\n \"\"\"Compute `Y = g(X) = NormalCDF(x)`.\n\n This bijector maps inputs from `[-inf, inf]` to `[0, 1]`. The inverse of the\n bijector applied to a uniform random variable `X ~ U(0, 1)` gives back a\n random variable with the\n [Normal distribution](https://en.wikipedia.org/wiki/Normal_distribution):\n\n ```none\n Y ~ Normal(0, 1)\n pdf(y; 0., 1.) = 1 / sqrt(2 * pi) * exp(-y ** 2 / 2)\n ```\n \"\"\"\n\n def __init__(self,\n validate_args=False,\n name=\"normal\"):\n \"\"\"Instantiates the `NormalCDF` bijector.\n\n Args:\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str` name given to ops managed by this object.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n super(NormalCDF, self).__init__(\n validate_args=validate_args,\n forward_min_event_ndims=0,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _is_increasing(cls):\n return True\n\n def _forward(self, x):\n return special_math.ndtr(x)\n\n def _inverse(self, y):\n with tf.control_dependencies(self._assertions(y)):\n return tf.math.ndtri(y)\n\n def _forward_log_det_jacobian(self, x):\n return -0.5 * np.log(2 * np.pi) - tf.square(x) / 2.\n\n def _assertions(self, t):\n if not self.validate_args:\n return []\n return [\n assert_util.assert_non_negative(\n t, message=\"Inverse transformation input must be greater than 0.\"),\n assert_util.assert_less_equal(\n t,\n dtype_util.as_numpy_dtype(t.dtype)(1.),\n message=\"Inverse transformation input must be less than or equal \"\n \"to 1.\")]\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Line-search optimizers package.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow_probability.python.optimizer.linesearch.hager_zhang import hager_zhang\n\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n_allowed_symbols = [\n 'hager_zhang',\n]\n\nremove_undocumented(__name__, _allowed_symbols)\n",
"# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"ScaleMatvecDiag bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import scale_matvec_linear_operator\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import tensor_util\n\n\n__all__ = [\n 'ScaleMatvecDiag',\n]\n\n\nclass ScaleMatvecDiag(scale_matvec_linear_operator.ScaleMatvecLinearOperator):\n \"\"\"Compute `Y = g(X; scale) = scale @ X`.\n\n In TF parlance, the `scale` term is logically equivalent to:\n\n ```python\n scale = tf.diag(scale_diag)\n ```\n\n The `scale` term is applied without materializing a full dense matrix.\n\n #### Examples\n\n ```python\n # Y = tf.diag(d1) @ X\n b = ScaleMatvecDiag(scale_diag=[-1., 2, 1]) # Implicitly 3x3.\n ```\n\n \"\"\"\n\n def __init__(self,\n scale_diag,\n adjoint=False,\n validate_args=False,\n name='scale_matvec_diag',\n dtype=None):\n \"\"\"Instantiates the `ScaleMatvecDiag` bijector.\n\n This `Bijector`'s forward operation is:\n\n ```none\n Y = g(X) = scale @ X\n ```\n\n where the `scale` term is logically equivalent to:\n\n ```python\n scale = tf.diag(scale_diag)\n ```\n\n Args:\n scale_diag: Floating-point `Tensor` representing the diagonal matrix.\n `scale_diag` has shape `[N1, N2, ... k]`, which represents a k x k\n diagonal matrix.\n adjoint: Python `bool` indicating whether to use the `scale` matrix as\n specified or its adjoint.\n Default value: `False`.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str` name given to ops managed by this object.\n dtype: `tf.DType` to prefer when converting args to `Tensor`s. Else, we\n fall back to a common dtype inferred from the args, finally falling back\n to float32.\n\n Raises:\n ValueError: if `scale_diag` is not specified.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n if dtype is None:\n dtype = dtype_util.common_dtype([scale_diag, scale_diag])\n\n scale_diag = tensor_util.convert_nonref_to_tensor(\n scale_diag, name='scale_diag', dtype=dtype)\n\n super(ScaleMatvecDiag, self).__init__(\n scale=tf.linalg.LinearOperatorDiag(\n diag=scale_diag,\n is_non_singular=True),\n adjoint=adjoint,\n validate_args=validate_args,\n parameters=parameters,\n name=name)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Bijector base.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport contextlib\n\n# Dependency imports\nimport numpy as np\nimport six\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import cache_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import name_util\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n\n__all__ = [\n 'Bijector',\n]\n\n\nSKIP_DTYPE_CHECKS = False\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Bijector(tf.Module):\n r\"\"\"Interface for transformations of a `Distribution` sample.\n\n Bijectors can be used to represent any differentiable and injective\n (one to one) function defined on an open subset of `R^n`. Some non-injective\n transformations are also supported (see 'Non Injective Transforms' below).\n\n #### Mathematical Details\n\n A `Bijector` implements a [smooth covering map](\n https://en.wikipedia.org/wiki/Local_diffeomorphism), i.e., a local\n diffeomorphism such that every point in the target has a neighborhood evenly\n covered by a map ([see also](\n https://en.wikipedia.org/wiki/Covering_space#Covering_of_a_manifold)).\n A `Bijector` is used by `TransformedDistribution` but can be generally used\n for transforming a `Distribution` generated `Tensor`. A `Bijector` is\n characterized by three operations:\n\n 1. Forward\n\n Useful for turning one random outcome into another random outcome from a\n different distribution.\n\n 2. Inverse\n\n Useful for 'reversing' a transformation to compute one probability in\n terms of another.\n\n 3. `log_det_jacobian(x)`\n\n 'The log of the absolute value of the determinant of the matrix of all\n first-order partial derivatives of the inverse function.'\n\n Useful for inverting a transformation to compute one probability in terms\n of another. Geometrically, the Jacobian determinant is the volume of the\n transformation and is used to scale the probability.\n\n We take the absolute value of the determinant before log to avoid NaN\n values. Geometrically, a negative determinant corresponds to an\n orientation-reversing transformation. It is ok for us to discard the sign\n of the determinant because we only integrate everywhere-nonnegative\n functions (probability densities) and the correct orientation is always the\n one that produces a nonnegative integrand.\n\n By convention, transformations of random variables are named in terms of the\n forward transformation. The forward transformation creates samples, the\n inverse is useful for computing probabilities.\n\n #### Example Uses\n\n - Basic properties:\n\n ```python\n x = ... # A tensor.\n # Evaluate forward transformation.\n fwd_x = my_bijector.forward(x)\n x == my_bijector.inverse(fwd_x)\n x != my_bijector.forward(fwd_x) # Not equal because x != g(g(x)).\n ```\n\n - Computing a log-likelihood:\n\n ```python\n def transformed_log_prob(bijector, log_prob, x):\n return (bijector.inverse_log_det_jacobian(x, event_ndims=0) +\n log_prob(bijector.inverse(x)))\n ```\n\n - Transforming a random outcome:\n\n ```python\n def transformed_sample(bijector, x):\n return bijector.forward(x)\n ```\n\n #### Example Bijectors\n\n - 'Exponential'\n\n ```none\n Y = g(X) = exp(X)\n X ~ Normal(0, 1) # Univariate.\n ```\n\n Implies:\n\n ```none\n g^{-1}(Y) = log(Y)\n |Jacobian(g^{-1})(y)| = 1 / y\n Y ~ LogNormal(0, 1), i.e.,\n prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))\n = (1 / y) Normal(log(y); 0, 1)\n ```\n\n Here is an example of how one might implement the `Exp` bijector:\n\n ```python\n class Exp(Bijector):\n\n def __init__(self, validate_args=False, name='exp'):\n super(Exp, self).__init__(\n validate_args=validate_args,\n forward_min_event_ndims=0,\n name=name)\n\n def _forward(self, x):\n return tf.exp(x)\n\n def _inverse(self, y):\n return tf.log(y)\n\n def _inverse_log_det_jacobian(self, y):\n return -self._forward_log_det_jacobian(self._inverse(y))\n\n def _forward_log_det_jacobian(self, x):\n # Notice that we needn't do any reducing, even when`event_ndims > 0`.\n # The base Bijector class will handle reducing for us; it knows how\n # to do so because we called `super` `__init__` with\n # `forward_min_event_ndims = 0`.\n return x\n ```\n\n - 'Affine'\n\n ```none\n Y = g(X) = sqrtSigma * X + mu\n X ~ MultivariateNormal(0, I_d)\n ```\n\n Implies:\n\n ```none\n g^{-1}(Y) = inv(sqrtSigma) * (Y - mu)\n |Jacobian(g^{-1})(y)| = det(inv(sqrtSigma))\n Y ~ MultivariateNormal(mu, sqrtSigma) , i.e.,\n prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))\n = det(sqrtSigma)^(-d) *\n MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d)\n ```\n\n #### Min_event_ndims and Naming\n\n Bijectors are named for the dimensionality of data they act on (i.e. without\n broadcasting). We can think of bijectors having an intrinsic `min_event_ndims`\n , which is the minimum number of dimensions for the bijector act on. For\n instance, a Cholesky decomposition requires a matrix, and hence\n `min_event_ndims=2`.\n\n Some examples:\n\n `AffineScalar: min_event_ndims=0`\n `Affine: min_event_ndims=1`\n `Cholesky: min_event_ndims=2`\n `Exp: min_event_ndims=0`\n `Sigmoid: min_event_ndims=0`\n `SoftmaxCentered: min_event_ndims=1`\n\n Note the difference between `Affine` and `AffineScalar`. `AffineScalar`\n operates on scalar events, whereas `Affine` operates on vector-valued events.\n\n More generally, there is a `forward_min_event_ndims` and an\n `inverse_min_event_ndims`. In most cases, these will be the same.\n However, for some shape changing bijectors, these will be different\n (e.g. a bijector which pads an extra dimension at the end, might have\n `forward_min_event_ndims=0` and `inverse_min_event_ndims=1`.\n\n\n #### Jacobian Determinant\n\n The Jacobian determinant is a reduction over `event_ndims - min_event_ndims`\n (`forward_min_event_ndims` for `forward_log_det_jacobian` and\n `inverse_min_event_ndims` for `inverse_log_det_jacobian`).\n To see this, consider the `Exp` `Bijector` applied to a `Tensor` which has\n sample, batch, and event (S, B, E) shape semantics. Suppose the `Tensor`'s\n partitioned-shape is `(S=[4], B=[2], E=[3, 3])`. The shape of the `Tensor`\n returned by `forward` and `inverse` is unchanged, i.e., `[4, 2, 3, 3]`.\n However the shape returned by `inverse_log_det_jacobian` is `[4, 2]` because\n the Jacobian determinant is a reduction over the event dimensions.\n\n Another example is the `Affine` `Bijector`. Because `min_event_ndims = 1`, the\n Jacobian determinant reduction is over `event_ndims - 1`.\n\n It is sometimes useful to implement the inverse Jacobian determinant as the\n negative forward Jacobian determinant. For example,\n\n ```python\n def _inverse_log_det_jacobian(self, y):\n return -self._forward_log_det_jac(self._inverse(y)) # Note negation.\n ```\n\n The correctness of this approach can be seen from the following claim.\n\n - Claim:\n\n Assume `Y = g(X)` is a bijection whose derivative exists and is nonzero\n for its domain, i.e., `dY/dX = d/dX g(X) != 0`. Then:\n\n ```none\n (log o det o jacobian o g^{-1})(Y) = -(log o det o jacobian o g)(X)\n ```\n\n - Proof:\n\n From the bijective, nonzero differentiability of `g`, the\n [inverse function theorem](\n https://en.wikipedia.org/wiki/Inverse_function_theorem)\n implies `g^{-1}` is differentiable in the image of `g`.\n Applying the chain rule to `y = g(x) = g(g^{-1}(y))` yields\n `I = g'(g^{-1}(y))*g^{-1}'(y)`.\n The same theorem also implies `g^{-1}'` is non-singular therefore:\n `inv[ g'(g^{-1}(y)) ] = g^{-1}'(y)`.\n The claim follows from [properties of determinant](\n https://en.wikipedia.org/wiki/Determinant#Multiplicativity_and_matrix_groups).\n\n Generally its preferable to directly implement the inverse Jacobian\n determinant. This should have superior numerical stability and will often\n share subgraphs with the `_inverse` implementation.\n\n #### Is_constant_jacobian\n\n Certain bijectors will have constant jacobian matrices. For instance, the\n `Affine` bijector encodes multiplication by a matrix plus a shift, with\n jacobian matrix, the same aforementioned matrix.\n\n `is_constant_jacobian` encodes the fact that the jacobian matrix is constant.\n The semantics of this argument are the following:\n\n * Repeated calls to 'log_det_jacobian' functions with the same\n `event_ndims` (but not necessarily same input), will return the first\n computed jacobian (because the matrix is constant, and hence is input\n independent).\n * `log_det_jacobian` implementations are merely broadcastable to the true\n `log_det_jacobian` (because, again, the jacobian matrix is input\n independent). Specifically, `log_det_jacobian` is implemented as the\n log jacobian determinant for a single input.\n\n ```python\n class Identity(Bijector):\n\n def __init__(self, validate_args=False, name='identity'):\n super(Identity, self).__init__(\n is_constant_jacobian=True,\n validate_args=validate_args,\n forward_min_event_ndims=0,\n name=name)\n\n def _forward(self, x):\n return x\n\n def _inverse(self, y):\n return y\n\n def _inverse_log_det_jacobian(self, y):\n return -self._forward_log_det_jacobian(self._inverse(y))\n\n def _forward_log_det_jacobian(self, x):\n # The full log jacobian determinant would be tf.zero_like(x).\n # However, we circumvent materializing that, since the jacobian\n # calculation is input independent, and we specify it for one input.\n return tf.constant(0., x.dtype)\n\n ```\n\n #### Subclass Requirements\n\n - Subclasses typically implement:\n\n - `_forward`,\n - `_inverse`,\n - `_inverse_log_det_jacobian`,\n - `_forward_log_det_jacobian` (optional),\n - `_is_increasing` (scalar bijectors only)\n\n The `_forward_log_det_jacobian` is called when the bijector is inverted via\n the `Invert` bijector. If undefined, a slightly less efficiently\n calculation, `-1 * _inverse_log_det_jacobian`, is used.\n\n If the bijector changes the shape of the input, you must also implement:\n\n - _forward_event_shape_tensor,\n - _forward_event_shape (optional),\n - _inverse_event_shape_tensor,\n - _inverse_event_shape (optional).\n\n By default the event-shape is assumed unchanged from input.\n\n - If the `Bijector`'s use is limited to `TransformedDistribution` (or friends\n like `QuantizedDistribution`) then depending on your use, you may not need\n to implement all of `_forward` and `_inverse` functions.\n\n Examples:\n\n 1. Sampling (e.g., `sample`) only requires `_forward`.\n 2. Probability functions (e.g., `prob`, `cdf`, `survival`) only require\n `_inverse` (and related).\n 3. Only calling probability functions on the output of `sample` means\n `_inverse` can be implemented as a cache lookup.\n\n See 'Example Uses' [above] which shows how these functions are used to\n transform a distribution. (Note: `_forward` could theoretically be\n implemented as a cache lookup but this would require controlling the\n underlying sample generation mechanism.)\n\n #### Non Injective Transforms\n\n **WARNING** Handling of non-injective transforms is subject to change.\n\n Non injective maps `g` are supported, provided their domain `D` can be\n partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that,\n ignoring sets of measure zero, the restriction of `g` to each subset is a\n differentiable bijection onto `g(D)`. In particular, this implies that for\n `y in g(D)`, the set inverse, i.e. `g^{-1}(y) = {x in D : g(x) = y}`, always\n contains exactly `k` distinct points.\n\n The property, `_is_injective` is set to `False` to indicate that the bijector\n is not injective, yet satisfies the above condition.\n\n The usual bijector API is modified in the case `_is_injective is False` (see\n method docstrings for specifics). Here we show by example the `AbsoluteValue`\n bijector. In this case, the domain `D = (-inf, inf)`, can be partitioned\n into `D1 = (-inf, 0)`, `D2 = {0}`, and `D3 = (0, inf)`. Let `gi` be the\n restriction of `g` to `Di`, then both `g1` and `g3` are bijections onto\n `(0, inf)`, with `g1^{-1}(y) = -y`, and `g3^{-1}(y) = y`. We will use\n `g1` and `g3` to define bijector methods over `D1` and `D3`. `D2 = {0}` is\n an oddball in that `g2` is one to one, and the derivative is not well defined.\n Fortunately, when considering transformations of probability densities\n (e.g. in `TransformedDistribution`), sets of measure zero have no effect in\n theory, and only a small effect in 32 or 64 bit precision. For that reason,\n we define `inverse(0)` and `inverse_log_det_jacobian(0)` both as `[0, 0]`,\n which is convenient and results in a left-semicontinuous pdf.\n\n\n ```python\n abs = tfp.bijectors.AbsoluteValue()\n\n abs.forward(-1.)\n ==> 1.\n\n abs.forward(1.)\n ==> 1.\n\n abs.inverse(1.)\n ==> (-1., 1.)\n\n # The |dX/dY| is constant, == 1. So Log|dX/dY| == 0.\n abs.inverse_log_det_jacobian(1., event_ndims=0)\n ==> (0., 0.)\n\n # Special case handling of 0.\n abs.inverse(0.)\n ==> (0., 0.)\n\n abs.inverse_log_det_jacobian(0., event_ndims=0)\n ==> (0., 0.)\n ```\n\n \"\"\"\n\n _TF_MODULE_IGNORED_PROPERTIES = tf.Module._TF_MODULE_IGNORED_PROPERTIES.union(\n (\n '_graph_parents',\n '_is_constant_jacobian',\n '_cache',\n ))\n\n @abc.abstractmethod\n def __init__(self,\n graph_parents=None,\n is_constant_jacobian=False,\n validate_args=False,\n dtype=None,\n forward_min_event_ndims=None,\n inverse_min_event_ndims=None,\n parameters=None,\n name=None):\n \"\"\"Constructs Bijector.\n\n A `Bijector` transforms random variables into new random variables.\n\n Examples:\n\n ```python\n # Create the Y = g(X) = X transform.\n identity = Identity()\n\n # Create the Y = g(X) = exp(X) transform.\n exp = Exp()\n ```\n\n See `Bijector` subclass docstring for more details and specific examples.\n\n Args:\n graph_parents: Python list of graph prerequisites of this `Bijector`.\n is_constant_jacobian: Python `bool` indicating that the Jacobian matrix is\n not a function of the input.\n validate_args: Python `bool`, default `False`. Whether to validate input\n with asserts. If `validate_args` is `False`, and the inputs are invalid,\n correct behavior is not guaranteed.\n dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not\n enforced.\n forward_min_event_ndims: Python `integer` indicating the minimum number of\n dimensions `forward` operates on.\n inverse_min_event_ndims: Python `integer` indicating the minimum number of\n dimensions `inverse` operates on. Will be set to\n `forward_min_event_ndims` by default, if no value is provided.\n parameters: Python `dict` of parameters used to instantiate this\n `Bijector`.\n name: The name to give Ops created by the initializer.\n\n Raises:\n ValueError: If neither `forward_min_event_ndims` and\n `inverse_min_event_ndims` are specified, or if either of them is\n negative.\n ValueError: If a member of `graph_parents` is not a `Tensor`.\n \"\"\"\n if not name:\n name = type(self).__name__\n name = name_util.camel_to_lower_snake(name)\n name = name_util.get_name_scope_name(name)\n name = name_util.strip_invalid_chars(name)\n super(Bijector, self).__init__(name=name)\n self._name = name\n self._parameters = self._no_dependency(parameters)\n\n self._graph_parents = self._no_dependency(graph_parents or [])\n\n self._is_constant_jacobian = is_constant_jacobian\n self._validate_args = validate_args\n self._dtype = dtype\n\n self._initial_parameter_control_dependencies = tuple(\n d for d in self._parameter_control_dependencies(is_init=True)\n if d is not None)\n if self._initial_parameter_control_dependencies:\n self._initial_parameter_control_dependencies = (\n tf.group(*self._initial_parameter_control_dependencies),)\n\n if forward_min_event_ndims is None and inverse_min_event_ndims is None:\n raise ValueError('Must specify at least one of `forward_min_event_ndims` '\n 'and `inverse_min_event_ndims`.')\n elif inverse_min_event_ndims is None:\n inverse_min_event_ndims = forward_min_event_ndims\n elif forward_min_event_ndims is None:\n forward_min_event_ndims = inverse_min_event_ndims\n\n if not isinstance(forward_min_event_ndims, int):\n raise TypeError('Expected forward_min_event_ndims to be of '\n 'type int, got {}'.format(\n type(forward_min_event_ndims).__name__))\n\n if not isinstance(inverse_min_event_ndims, int):\n raise TypeError('Expected inverse_min_event_ndims to be of '\n 'type int, got {}'.format(\n type(inverse_min_event_ndims).__name__))\n\n if forward_min_event_ndims < 0:\n raise ValueError('forward_min_event_ndims must be a non-negative '\n 'integer.')\n if inverse_min_event_ndims < 0:\n raise ValueError('inverse_min_event_ndims must be a non-negative '\n 'integer.')\n\n self._forward_min_event_ndims = forward_min_event_ndims\n self._inverse_min_event_ndims = inverse_min_event_ndims\n\n for i, t in enumerate(self._graph_parents):\n if t is None or not tf.is_tensor(t):\n raise ValueError('Graph parent item %d is not a Tensor; %s.' % (i, t))\n\n # Setup caching after everything else is done.\n self._cache = self._setup_cache()\n\n def _setup_cache(self):\n \"\"\"Defines the cache for this bijector.\"\"\"\n # Wrap forward/inverse with getters so instance methods can be patched.\n return cache_util.BijectorCache(\n forward_impl=(lambda x, **kwargs: self._forward(x, **kwargs)), # pylint: disable=unnecessary-lambda\n inverse_impl=(lambda y, **kwargs: self._inverse(y, **kwargs)), # pylint: disable=unnecessary-lambda\n cache_type=cache_util.CachedDirectedFunction)\n\n @property\n def graph_parents(self):\n \"\"\"Returns this `Bijector`'s graph_parents as a Python list.\"\"\"\n return self._graph_parents\n\n @property\n def forward_min_event_ndims(self):\n \"\"\"Returns the minimal number of dimensions bijector.forward operates on.\"\"\"\n return self._forward_min_event_ndims\n\n @property\n def inverse_min_event_ndims(self):\n \"\"\"Returns the minimal number of dimensions bijector.inverse operates on.\"\"\"\n return self._inverse_min_event_ndims\n\n @property\n def is_constant_jacobian(self):\n \"\"\"Returns true iff the Jacobian matrix is not a function of x.\n\n Note: Jacobian matrix is either constant for both forward and inverse or\n neither.\n\n Returns:\n is_constant_jacobian: Python `bool`.\n \"\"\"\n return self._is_constant_jacobian\n\n @property\n def _is_injective(self):\n \"\"\"Returns true iff the forward map `g` is injective (one-to-one function).\n\n **WARNING** This hidden property and its behavior are subject to change.\n\n Note: Non-injective maps `g` are supported, provided their domain `D` can\n be partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that,\n ignoring sets of measure zero, the restriction of `g` to each subset is a\n differentiable bijection onto `g(D)`.\n\n Returns:\n is_injective: Python `bool`.\n \"\"\"\n return True\n\n @property\n def validate_args(self):\n \"\"\"Returns True if Tensor arguments will be validated.\"\"\"\n return self._validate_args\n\n @property\n def dtype(self):\n \"\"\"dtype of `Tensor`s transformable by this distribution.\"\"\"\n return self._dtype\n\n @property\n def name(self):\n \"\"\"Returns the string name of this `Bijector`.\"\"\"\n return self._name\n\n @property\n def parameters(self):\n \"\"\"Dictionary of parameters used to instantiate this `Bijector`.\"\"\"\n # Remove \"self\", \"__class__\", or other special variables. These can appear\n # if the subclass used:\n # `parameters = dict(locals())`.\n return {k: v for k, v in self._parameters.items()\n if not k.startswith('__') and k != 'self'}\n\n def __call__(self, value, name=None, **kwargs):\n \"\"\"Applies or composes the `Bijector`, depending on input type.\n\n This is a convenience function which applies the `Bijector` instance in\n three different ways, depending on the input:\n\n 1. If the input is a `tfd.Distribution` instance, return\n `tfd.TransformedDistribution(distribution=input, bijector=self)`.\n 2. If the input is a `tfb.Bijector` instance, return\n `tfb.Chain([self, input])`.\n 3. Otherwise, return `self.forward(input)`\n\n Args:\n value: A `tfd.Distribution`, `tfb.Bijector`, or a `Tensor`.\n name: Python `str` name given to ops created by this function.\n **kwargs: Additional keyword arguments passed into the created\n `tfd.TransformedDistribution`, `tfb.Bijector`, or `self.forward`.\n\n Returns:\n composition: A `tfd.TransformedDistribution` if the input was a\n `tfd.Distribution`, a `tfb.Chain` if the input was a `tfb.Bijector`, or\n a `Tensor` computed by `self.forward`.\n\n #### Examples\n\n ```python\n sigmoid = tfb.Reciprocal()(\n tfb.AffineScalar(shift=1.)(\n tfb.Exp()(\n tfb.AffineScalar(scale=-1.))))\n # ==> `tfb.Chain([\n # tfb.Reciprocal(),\n # tfb.AffineScalar(shift=1.),\n # tfb.Exp(),\n # tfb.AffineScalar(scale=-1.),\n # ])` # ie, `tfb.Sigmoid()`\n\n log_normal = tfb.Exp()(tfd.Normal(0, 1))\n # ==> `tfd.TransformedDistribution(tfd.Normal(0, 1), tfb.Exp())`\n\n tfb.Exp()([-1., 0., 1.])\n # ==> tf.exp([-1., 0., 1.])\n ```\n\n \"\"\"\n\n # To avoid circular dependencies and keep the implementation local to the\n # `Bijector` class, we violate PEP8 guidelines and import here rather than\n # at the top of the file.\n from tensorflow_probability.python.bijectors import chain # pylint: disable=g-import-not-at-top\n from tensorflow_probability.python.distributions import distribution # pylint: disable=g-import-not-at-top\n from tensorflow_probability.python.distributions import transformed_distribution # pylint: disable=g-import-not-at-top\n\n # TODO(b/128841942): Handle Conditional distributions and bijectors.\n if type(value) is transformed_distribution.TransformedDistribution: # pylint: disable=unidiomatic-typecheck\n # We cannot accept subclasses with different constructors here, because\n # subclass constructors may accept constructor arguments TD doesn't know\n # how to handle. e.g. `TypeError: __init__() got an unexpected keyword\n # argument 'allow_nan_stats'` when doing\n # `tfb.Identity()(tfd.Chi(df=1., allow_nan_stats=True))`.\n new_kwargs = value.parameters\n new_kwargs.update(kwargs)\n new_kwargs['name'] = name or new_kwargs.get('name', None)\n new_kwargs['bijector'] = self(value.bijector)\n return transformed_distribution.TransformedDistribution(**new_kwargs)\n\n if isinstance(value, distribution.Distribution):\n return transformed_distribution.TransformedDistribution(\n distribution=value,\n bijector=self,\n name=name,\n **kwargs)\n\n if isinstance(value, chain.Chain):\n new_kwargs = kwargs.copy()\n new_kwargs['bijectors'] = [self] + ([] if value.bijectors is None\n else list(value.bijectors))\n if 'validate_args' not in new_kwargs:\n new_kwargs['validate_args'] = value.validate_args\n new_kwargs['name'] = name or value.name\n return chain.Chain(**new_kwargs)\n\n if isinstance(value, Bijector):\n return chain.Chain([self, value], name=name, **kwargs)\n\n return self.forward(value, name=name or 'forward', **kwargs)\n\n def _forward_event_shape_tensor(self, input_shape):\n \"\"\"Subclass implementation for `forward_event_shape_tensor` function.\"\"\"\n # By default, we assume event_shape is unchanged.\n return input_shape\n\n def forward_event_shape_tensor(self,\n input_shape,\n name='forward_event_shape_tensor'):\n \"\"\"Shape of a single sample from a single batch as an `int32` 1D `Tensor`.\n\n Args:\n input_shape: `Tensor`, `int32` vector indicating event-portion shape\n passed into `forward` function.\n name: name to give to the op\n\n Returns:\n forward_event_shape_tensor: `Tensor`, `int32` vector indicating\n event-portion shape after applying `forward`.\n \"\"\"\n with self._name_and_control_scope(name):\n input_shape = tf.convert_to_tensor(\n input_shape, dtype_hint=tf.int32, name='input_shape')\n return tf.identity(\n tf.convert_to_tensor(self._forward_event_shape_tensor(input_shape),\n dtype_hint=tf.int32),\n name='forward_event_shape')\n\n def _forward_event_shape(self, input_shape):\n \"\"\"Subclass implementation for `forward_event_shape` public function.\"\"\"\n # By default, we assume event_shape is unchanged.\n return input_shape\n\n def forward_event_shape(self, input_shape):\n \"\"\"Shape of a single sample from a single batch as a `TensorShape`.\n\n Same meaning as `forward_event_shape_tensor`. May be only partially defined.\n\n Args:\n input_shape: `TensorShape` indicating event-portion shape passed into\n `forward` function.\n\n Returns:\n forward_event_shape_tensor: `TensorShape` indicating event-portion shape\n after applying `forward`. Possibly unknown.\n \"\"\"\n input_shape = tf.TensorShape(input_shape)\n return tf.TensorShape(self._forward_event_shape(input_shape))\n\n def _inverse_event_shape_tensor(self, output_shape):\n \"\"\"Subclass implementation for `inverse_event_shape_tensor` function.\"\"\"\n # By default, we assume event_shape is unchanged.\n return output_shape\n\n def inverse_event_shape_tensor(self,\n output_shape,\n name='inverse_event_shape_tensor'):\n \"\"\"Shape of a single sample from a single batch as an `int32` 1D `Tensor`.\n\n Args:\n output_shape: `Tensor`, `int32` vector indicating event-portion shape\n passed into `inverse` function.\n name: name to give to the op\n\n Returns:\n inverse_event_shape_tensor: `Tensor`, `int32` vector indicating\n event-portion shape after applying `inverse`.\n \"\"\"\n with self._name_and_control_scope(name):\n output_shape = tf.convert_to_tensor(\n output_shape, dtype_hint=tf.int32, name='output_shape')\n return tf.identity(\n tf.convert_to_tensor(self._inverse_event_shape_tensor(output_shape),\n dtype_hint=tf.int32),\n name='inverse_event_shape')\n\n def _inverse_event_shape(self, output_shape):\n \"\"\"Subclass implementation for `inverse_event_shape` public function.\"\"\"\n # By default, we assume event_shape is unchanged.\n return output_shape\n\n def inverse_event_shape(self, output_shape):\n \"\"\"Shape of a single sample from a single batch as a `TensorShape`.\n\n Same meaning as `inverse_event_shape_tensor`. May be only partially defined.\n\n Args:\n output_shape: `TensorShape` indicating event-portion shape passed into\n `inverse` function.\n\n Returns:\n inverse_event_shape_tensor: `TensorShape` indicating event-portion shape\n after applying `inverse`. Possibly unknown.\n \"\"\"\n output_shape = tf.TensorShape(output_shape)\n return tf.TensorShape(self._inverse_event_shape(output_shape))\n\n def _forward(self, x):\n \"\"\"Subclass implementation for `forward` public function.\"\"\"\n raise NotImplementedError('forward not implemented.')\n\n def _call_forward(self, x, name, **kwargs):\n \"\"\"Wraps call to _forward, allowing extra shared logic.\"\"\"\n with self._name_and_control_scope(name):\n x = tf.convert_to_tensor(x, dtype_hint=self.dtype, name='x')\n self._maybe_assert_dtype(x)\n if not self._is_injective: # No caching for non-injective\n return self._forward(x, **kwargs)\n return self._cache.forward(x, **kwargs)\n\n def forward(self, x, name='forward', **kwargs):\n \"\"\"Returns the forward `Bijector` evaluation, i.e., X = g(Y).\n\n Args:\n x: `Tensor`. The input to the 'forward' evaluation.\n name: The name to give this op.\n **kwargs: Named arguments forwarded to subclass implementation.\n\n Returns:\n `Tensor`.\n\n Raises:\n TypeError: if `self.dtype` is specified and `x.dtype` is not\n `self.dtype`.\n NotImplementedError: if `_forward` is not implemented.\n \"\"\"\n return self._call_forward(x, name, **kwargs)\n\n @classmethod\n def _is_increasing(cls, **kwargs):\n \"\"\"Subclass implementation for `is_increasing` public function.\"\"\"\n raise NotImplementedError('`_is_increasing` not implemented.')\n\n def _call_is_increasing(self, name, **kwargs):\n \"\"\"Wraps call to _is_increasing, allowing extra shared logic.\"\"\"\n with self._name_and_control_scope(name):\n return tf.identity(self._is_increasing(**kwargs))\n\n def _internal_is_increasing(self, name='is_increasing', **kwargs):\n \"\"\"For scalar bijectors, returns True where `d forward(x) / d x > 0`.\n\n This method, like `_is_injective`, is part of a contract with\n `TransformedDistribution`. This method supports the correctness of scalar\n `quantile` / `cdf` / `survival_function` for transformed distributions.\n\n Args:\n name: The name to give this op.\n **kwargs: Named arguments forwarded to subclass implementation.\n\n Returns:\n A python `bool` or a `tf.bool` `Tensor`.\n \"\"\"\n return self._call_is_increasing(name, **kwargs)\n\n def _inverse(self, y):\n \"\"\"Subclass implementation for `inverse` public function.\"\"\"\n raise NotImplementedError('inverse not implemented')\n\n def _call_inverse(self, y, name, **kwargs):\n \"\"\"Wraps call to _inverse, allowing extra shared logic.\"\"\"\n with self._name_and_control_scope(name):\n y = tf.convert_to_tensor(y, dtype_hint=self.dtype, name='y')\n self._maybe_assert_dtype(y)\n if not self._is_injective: # No caching for non-injective\n return self._inverse(y, **kwargs)\n return self._cache.inverse(y, **kwargs)\n\n def inverse(self, y, name='inverse', **kwargs):\n \"\"\"Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).\n\n Args:\n y: `Tensor`. The input to the 'inverse' evaluation.\n name: The name to give this op.\n **kwargs: Named arguments forwarded to subclass implementation.\n\n Returns:\n `Tensor`, if this bijector is injective.\n If not injective, returns the k-tuple containing the unique\n `k` points `(x1, ..., xk)` such that `g(xi) = y`.\n\n Raises:\n TypeError: if `self.dtype` is specified and `y.dtype` is not\n `self.dtype`.\n NotImplementedError: if `_inverse` is not implemented.\n \"\"\"\n return self._call_inverse(y, name, **kwargs)\n\n def _compute_inverse_log_det_jacobian_with_caching(\n self, x, y, prefer_inverse_ldj_fn, event_ndims, kwargs):\n \"\"\"Compute ILDJ by the best available means, and ensure it is cached.\n\n Sub-classes of Bijector may implement either `_forward_log_det_jacobian` or\n `_inverse_log_det_jacobian`, or both, and may prefer one or the other for\n reasons of computational efficiency and/or numerics. In general, to compute\n the [I]LDJ, we need one of `x` or `y` and one of `_forward_log_det_jacobian`\n or `_inverse_log_det_jacobian` (all bijectors implement `forward` and\n `inverse`, so either of `x` and `y` may always computed from the other).\n\n This method encapsulates the logic of selecting the best possible method of\n computing the inverse log det jacobian, and executing it. Possible avenues\n to obtaining this value are:\n - recovering it from the cache,\n - computing it as `-_forward_log_det_jacobian(x)`, where `x` may need to\n be computed as `inverse(y)`, or\n - computing it as `_inverse_log_det_jacobian(y)`, where `y` may need to\n be computed as `forward(x)`.\n\n To make things more interesting, we may be able to take advantage of the\n knowledge that the jacobian is constant, for given `event_ndims`, and may\n even cache that result.\n\n To make things even more interesting still, we may need to perform an\n additional `reduce_sum` over the event dims of an input after computing the\n ILDJ (see `_reduce_jacobian_det_over_event` for the reduction logic). In\n order to prevent spurious TF graph dependencies on past inputs in cached\n results, we need to take care to do this reduction after the cache lookups.\n\n This method takes care of all the above concerns.\n\n Args:\n x: a `Tensor`, the pre-Bijector transform value at whose post-Bijector\n transform value the ILDJ is to be computed. Can be `None` as long as\n `y` is not `None`.\n y: a `Tensor`, a point in the output space of the Bijector's `forward`\n transformation, at whose value the ILDJ is to be computed. Can be\n `None` as long as `x` is not `None`.\n prefer_inverse_ldj_fn: Python `bool`, if `True`, will strictly prefer to\n use the `_inverse_log_det_jacobian` to compute ILDJ; else, will strictly\n prefer to use `_forward_log_det_jacobian`. The switching behavior allows\n the caller to communicate that one of the inverse or forward LDJ\n computations may be more efficient (usually because it can avoid an\n extra call to `inverse` or `forward`). It's only a \"preference\" because\n it may not always be possible, namely if the underlying implementation\n only has one of `_inverse_log_det_jacobian` or\n `_forward_log_det_jacobian` defined.\n event_ndims: int-like `Tensor`, the number of dims of an event (in the\n pre- or post-transformed space, as appropriate). These need to be summed\n over to compute the total ildj.\n kwargs: dictionary of keyword args that will be passed to calls to\n `self.forward` or `self.inverse` (if either of those are required), and\n to `self._compute_unreduced_ildj_with_caching` (which uses the\n conditioning kwargs for caching and for their underlying computations).\n\n Returns:\n ildj: a Tensor of ILDJ['s] at the given input (as specified by the args).\n Also updates the cache as needed.\n \"\"\"\n # Ensure at least one of _inverse/_forward_log_det_jacobian is defined.\n if not (hasattr(self, '_inverse_log_det_jacobian') or\n hasattr(self, '_forward_log_det_jacobian')):\n raise NotImplementedError(\n 'Neither _forward_log_det_jacobian nor _inverse_log_det_jacobian '\n 'is implemented. One or the other is required.')\n\n # Use inverse_log_det_jacobian if either\n # 1. it is preferred to *and* we are able, or\n # 2. forward ldj fn isn't implemented (so we have no choice).\n use_inverse_ldj_fn = (\n (prefer_inverse_ldj_fn and hasattr(self, '_inverse_log_det_jacobian'))\n or not hasattr(self, '_forward_log_det_jacobian'))\n\n if use_inverse_ldj_fn:\n tensor_to_use = y if y is not None else self.forward(x, **kwargs)\n min_event_ndims = self.inverse_min_event_ndims\n else:\n tensor_to_use = x if x is not None else self.inverse(y, **kwargs)\n min_event_ndims = self.forward_min_event_ndims\n\n unreduced_ildj = self._compute_unreduced_ildj_with_caching(\n x, y, tensor_to_use, use_inverse_ldj_fn, kwargs)\n\n return self._reduce_jacobian_det_over_event(\n tf.shape(tensor_to_use), unreduced_ildj, min_event_ndims, event_ndims)\n\n def _compute_unreduced_ildj_with_caching(\n self, x, y, tensor_to_use, use_inverse_ldj_fn, kwargs):\n \"\"\"Helper for computing ILDJ, with caching.\n\n Does not do the 'reduce' step which is necessary in some cases; this is left\n to the caller.\n\n Args:\n x: a `Tensor`, the pre-Bijector transform value at whose post-Bijector\n transform value the ILDJ is to be computed. Can be `None` as long as\n `y` is not `None`. This method only uses the value for cache\n lookup/updating.\n y: a `Tensor`, a point in the output space of the Bijector's `forward`\n transformation, at whose value the ILDJ is to be computed. Can be\n `None` as long as `x` is not `None`. This method only uses the value\n for cache lookup/updating.\n tensor_to_use: a `Tensor`, the one to actually pass to the chosen compute\n function (`_inverse_log_det_jacobian` or `_forward_log_det_jacobian`).\n It is presumed that the caller has already figured out what input to use\n (it will either be the x or y value corresponding to the location where\n we are computing the ILDJ).\n use_inverse_ldj_fn: Python `bool`, if `True`, will use the\n `_inverse_log_det_jacobian` to compute ILDJ; else, will use\n `_forward_log_det_jacobian`.\n kwargs: dictionary of keyword args that will be passed to calls to to\n `_inverse_log_det_jacobian` or `_forward_log_det_jacobian`, as well as\n for lookup/updating of the result in the cache.\n\n Returns:\n ildj: the (un-reduce_sum'ed) value of the ILDJ at the specified input\n location. Also updates the cache as needed.\n \"\"\"\n if use_inverse_ldj_fn:\n attrs = self._cache.inverse.attributes(tensor_to_use, **kwargs)\n if 'ildj' not in attrs:\n attrs['ildj'] = self._inverse_log_det_jacobian(tensor_to_use, **kwargs)\n else:\n attrs = self._cache.forward.attributes(tensor_to_use, **kwargs)\n if 'ildj' not in attrs:\n attrs['ildj'] = -self._forward_log_det_jacobian(tensor_to_use, **kwargs)\n\n return attrs['ildj']\n\n def _call_inverse_log_det_jacobian(self, y, event_ndims, name, **kwargs):\n \"\"\"Wraps call to _inverse_log_det_jacobian, allowing extra shared logic.\n\n Specifically, this method\n - adds a name scope,\n - performs validations,\n - handles the special case of non-injective Bijector (skip caching and\n reduce_sum over the values at the multiple points in the preimage of `y`\n under the non-injective transformation)\n\n so that sub-classes don't have to worry about this stuff.\n\n Args:\n y: same as in `inverse_log_det_jacobian`\n event_ndims: same as in `inverse_log_det_jacobian`\n name: same as in `inverse_log_det_jacobian`\n **kwargs: same as in `inverse_log_det_jacobian`\n\n Returns:\n ildj: the inverse log det jacobian at `y`. Also updates the cache as\n needed.\n \"\"\"\n with self._name_and_control_scope(name), tf.control_dependencies(\n self._check_valid_event_ndims(\n min_event_ndims=self.inverse_min_event_ndims,\n event_ndims=event_ndims)):\n y = tf.convert_to_tensor(y, name='y')\n self._maybe_assert_dtype(y)\n\n if not self._is_injective:\n ildjs = self._inverse_log_det_jacobian(y, **kwargs)\n return tuple(\n self._reduce_jacobian_det_over_event( # pylint: disable=g-complex-comprehension\n tf.shape(y), ildj, self.inverse_min_event_ndims, event_ndims)\n for ildj in ildjs)\n\n return self._compute_inverse_log_det_jacobian_with_caching(\n x=None,\n y=y,\n prefer_inverse_ldj_fn=True,\n event_ndims=event_ndims,\n kwargs=kwargs)\n\n def inverse_log_det_jacobian(self,\n y,\n event_ndims,\n name='inverse_log_det_jacobian',\n **kwargs):\n \"\"\"Returns the (log o det o Jacobian o inverse)(y).\n\n Mathematically, returns: `log(det(dX/dY))(Y)`. (Recall that: `X=g^{-1}(Y)`.)\n\n Note that `forward_log_det_jacobian` is the negative of this function,\n evaluated at `g^{-1}(y)`.\n\n Args:\n y: `Tensor`. The input to the 'inverse' Jacobian determinant evaluation.\n event_ndims: Number of dimensions in the probabilistic events being\n transformed. Must be greater than or equal to\n `self.inverse_min_event_ndims`. The result is summed over the final\n dimensions to produce a scalar Jacobian determinant for each event, i.e.\n it has shape `rank(y) - event_ndims` dimensions.\n name: The name to give this op.\n **kwargs: Named arguments forwarded to subclass implementation.\n\n Returns:\n ildj: `Tensor`, if this bijector is injective.\n If not injective, returns the tuple of local log det\n Jacobians, `log(det(Dg_i^{-1}(y)))`, where `g_i` is the restriction\n of `g` to the `ith` partition `Di`.\n\n Raises:\n TypeError: if `self.dtype` is specified and `y.dtype` is not\n `self.dtype`.\n NotImplementedError: if `_inverse_log_det_jacobian` is not implemented.\n \"\"\"\n return self._call_inverse_log_det_jacobian(y, event_ndims, name, **kwargs)\n\n def _call_forward_log_det_jacobian(self, x, event_ndims, name, **kwargs):\n \"\"\"Wraps call to _forward_log_det_jacobian, allowing extra shared logic.\n\n Specifically, this method\n - adds a name scope,\n - performs validations,\n - handles the special case of non-injective Bijector (forward jacobian is\n ill-defined in this case and we raise an exception)\n\n so that sub-classes don't have to worry about this stuff.\n\n Args:\n x: same as in `forward_log_det_jacobian`\n event_ndims: same as in `forward_log_det_jacobian`\n name: same as in `forward_log_det_jacobian`\n **kwargs: same as in `forward_log_det_jacobian`\n\n Returns:\n fldj: the forward log det jacobian at `x`. Also updates the cache as\n needed.\n \"\"\"\n if not self._is_injective:\n raise NotImplementedError(\n 'forward_log_det_jacobian cannot be implemented for non-injective '\n 'transforms.')\n\n with self._name_and_control_scope(name), tf.control_dependencies(\n self._check_valid_event_ndims(\n min_event_ndims=self.forward_min_event_ndims,\n event_ndims=event_ndims)):\n x = tf.convert_to_tensor(x, name='x')\n self._maybe_assert_dtype(x)\n\n return -self._compute_inverse_log_det_jacobian_with_caching(\n x=x,\n y=None,\n prefer_inverse_ldj_fn=False,\n event_ndims=event_ndims,\n kwargs=kwargs)\n\n def forward_log_det_jacobian(self,\n x,\n event_ndims,\n name='forward_log_det_jacobian',\n **kwargs):\n \"\"\"Returns both the forward_log_det_jacobian.\n\n Args:\n x: `Tensor`. The input to the 'forward' Jacobian determinant evaluation.\n event_ndims: Number of dimensions in the probabilistic events being\n transformed. Must be greater than or equal to\n `self.forward_min_event_ndims`. The result is summed over the final\n dimensions to produce a scalar Jacobian determinant for each event, i.e.\n it has shape `rank(x) - event_ndims` dimensions.\n name: The name to give this op.\n **kwargs: Named arguments forwarded to subclass implementation.\n\n Returns:\n `Tensor`, if this bijector is injective.\n If not injective this is not implemented.\n\n Raises:\n TypeError: if `self.dtype` is specified and `y.dtype` is not\n `self.dtype`.\n NotImplementedError: if neither `_forward_log_det_jacobian`\n nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented, or\n this is a non-injective bijector.\n \"\"\"\n return self._call_forward_log_det_jacobian(x, event_ndims, name, **kwargs)\n\n def _forward_dtype(self, dtype):\n # TODO(emilyaf): Raise an error if not implemented for bijectors with\n # multipart forward or inverse event shapes.\n return dtype\n\n def _inverse_dtype(self, dtype):\n # TODO(emilyaf): Raise an error if not implemented for bijectors with\n # multipart forward or inverse event shapes.\n return dtype\n\n def forward_dtype(self,\n dtype,\n name='forward_dtype',\n **kwargs):\n \"\"\"Returns the dtype of the output of the forward transformation.\n\n Args:\n dtype: `tf.dtype`, or nested structure of `tf.dtype`s, of the input to\n `forward`.\n name: The name to give this op.\n **kwargs: Named arguments forwarded to subclass implementation.\n\n Returns:\n `tf.dtype` or nested structure of `tf.dtype`s of the output of `forward`.\n \"\"\"\n with self._name_and_control_scope(name):\n return self._forward_dtype(dtype, **kwargs)\n\n def inverse_dtype(self,\n dtype,\n name='inverse_dtype',\n **kwargs):\n \"\"\"Returns the dtype of the output of the inverse transformation.\n\n Args:\n dtype: `tf.dtype`, or nested structure of `tf.dtype`s, of the input to\n `inverse`.\n name: The name to give this op.\n **kwargs: Named arguments forwarded to subclass implementation.\n\n Returns:\n `tf.dtype` or nested structure of `tf.dtype`s of the output of `inverse`.\n \"\"\"\n with self._name_and_control_scope(name):\n return self._inverse_dtype(dtype, **kwargs)\n\n @contextlib.contextmanager\n def _name_and_control_scope(self, name=None):\n \"\"\"Helper function to standardize op scope.\"\"\"\n with tf.name_scope(self.name):\n with tf.name_scope(name) as name_scope:\n deps = tuple(\n d for d in ( # pylint: disable=g-complex-comprehension\n tuple(self._initial_parameter_control_dependencies) +\n tuple(self._parameter_control_dependencies(is_init=False)))\n if d is not None)\n if not deps:\n yield name_scope\n return\n with tf.control_dependencies(deps) as deps_scope:\n yield deps_scope\n\n def _maybe_assert_dtype(self, x):\n \"\"\"Helper to check dtype when self.dtype is known.\"\"\"\n if SKIP_DTYPE_CHECKS:\n return\n if (self.dtype is not None and\n not dtype_util.base_equal(self.dtype, x.dtype)):\n raise TypeError(\n 'Input had dtype %s but expected %s.' % (x.dtype, self.dtype))\n\n def _reduce_jacobian_det_over_event(\n self, shape_tensor, ildj, min_event_ndims, event_ndims):\n \"\"\"Reduce jacobian over event_ndims - min_event_ndims.\"\"\"\n # In this case, we need to tile the Jacobian over the event and reduce.\n rank = tf.size(shape_tensor)\n shape_tensor = shape_tensor[rank - event_ndims:rank - min_event_ndims]\n\n ones = tf.ones(shape_tensor, ildj.dtype)\n reduced_ildj = tf.reduce_sum(\n ones * ildj,\n axis=self._get_event_reduce_dims(min_event_ndims, event_ndims))\n\n return reduced_ildj\n\n def _get_event_reduce_dims(self, min_event_ndims, event_ndims):\n \"\"\"Compute the reduction dimensions given event_ndims.\"\"\"\n event_ndims_ = self._maybe_get_static_event_ndims(event_ndims)\n\n if event_ndims_ is not None:\n return [-index for index in range(1, event_ndims_ - min_event_ndims + 1)]\n else:\n reduce_ndims = event_ndims - min_event_ndims\n return tf.range(-reduce_ndims, 0)\n\n def _check_valid_event_ndims(self, min_event_ndims, event_ndims):\n \"\"\"Check whether event_ndims is atleast min_event_ndims.\"\"\"\n event_ndims = tf.convert_to_tensor(event_ndims, name='event_ndims')\n event_ndims_ = tf.get_static_value(event_ndims)\n assertions = []\n\n if not dtype_util.is_integer(event_ndims.dtype):\n raise ValueError('Expected integer dtype, got dtype {}'.format(\n event_ndims.dtype))\n\n if event_ndims_ is not None:\n if tensorshape_util.rank(event_ndims.shape) != 0:\n raise ValueError('Expected scalar event_ndims, got shape {}'.format(\n event_ndims.shape))\n if min_event_ndims > event_ndims_:\n raise ValueError('event_ndims ({}) must be larger than '\n 'min_event_ndims ({})'.format(event_ndims_,\n min_event_ndims))\n elif self.validate_args:\n assertions += [\n assert_util.assert_greater_equal(event_ndims, min_event_ndims)\n ]\n\n if tensorshape_util.is_fully_defined(event_ndims.shape):\n if tensorshape_util.rank(event_ndims.shape) != 0:\n raise ValueError('Expected scalar shape, got ndims {}'.format(\n tensorshape_util.rank(event_ndims.shape)))\n\n elif self.validate_args:\n assertions += [\n assert_util.assert_rank(event_ndims, 0, message='Expected scalar.')\n ]\n return assertions\n\n def _maybe_get_static_event_ndims(self, event_ndims):\n \"\"\"Helper which returns tries to return an integer static value.\"\"\"\n event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)\n\n if isinstance(event_ndims_, (np.generic, np.ndarray)):\n if event_ndims_.dtype not in (np.int32, np.int64):\n raise ValueError('Expected integer dtype, got dtype {}'.format(\n event_ndims_.dtype))\n\n if isinstance(event_ndims_, np.ndarray) and len(event_ndims_.shape):\n raise ValueError(\n 'Expected a scalar integer, got {}'.format(event_ndims_))\n event_ndims_ = int(event_ndims_)\n\n return event_ndims_\n\n def _parameter_control_dependencies(self, is_init):\n \"\"\"Returns a list of ops to be executed in members with graph deps.\n\n Typically subclasses override this function to return parameter specific\n assertions (eg, positivity of `scale`, etc.).\n\n Args:\n is_init: Python `bool` indicating that the call site is `__init__`.\n\n Returns:\n dependencies: `list`-like of ops to be executed in member functions with\n graph dependencies.\n \"\"\"\n return ()\n",
"# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tensorflow_probability.python.stats.ranking_stats.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nfrom absl.testing import parameterized\n\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python.internal import test_util\n\n\ntfd = tfp.distributions\n\n\n@test_util.test_all_tf_execution_regimes\nclass AurocAuprcTest(test_util.TestCase):\n\n @parameterized.parameters(\n ('ROC', ((.7, .8), (.6, .5)), ((.2, .4), (.6, .7))),\n ('ROC', .6, .3),\n ('PR', (.8, .6), (.4, .7)),\n ('PR', .5, .4)\n )\n def testAurocAuprc(self, curve, positive_means, negative_means):\n\n num_positive_trials = 4001\n num_negative_trials = 5156\n num_positive_quantiles = 445\n num_negative_quantiles = 393\n\n dist_positive = tfd.TruncatedNormal(\n positive_means, scale=0.2, low=0., high=1.)\n dist_negative = tfd.TruncatedNormal(\n negative_means, scale=0.2, low=0., high=1.)\n\n positive_trials = dist_positive.sample(\n num_positive_trials, seed=test_util.test_seed())\n negative_trials = dist_negative.sample(\n num_negative_trials, seed=test_util.test_seed())\n\n positive_trials_, negative_trials_ = self.evaluate(\n [positive_trials, negative_trials])\n q1 = tfp.stats.quantiles(positive_trials_, num_positive_quantiles, axis=0)\n q0 = tfp.stats.quantiles(negative_trials_, num_negative_quantiles, axis=0)\n\n y_true = np.array([1] * num_positive_trials + [0] * num_negative_trials)\n y_pred = np.concatenate([positive_trials_, negative_trials_])\n\n def auc_fn(y_pred):\n if curve == 'PR':\n sort_indices = np.argsort(-y_pred)\n sorted_y_true = y_true[sort_indices]\n true_positives = np.cumsum(sorted_y_true)\n precision = true_positives / np.arange(1, len(sorted_y_true)+1)\n recall = true_positives / true_positives[-1]\n return np.sum(np.diff(recall) * precision[1:])\n else:\n # 'ROC'\n sort_indices = np.argsort(y_pred)\n sorted_y_true = y_true[sort_indices]\n false_count = np.cumsum(1 - sorted_y_true)\n area = np.sum(sorted_y_true * false_count)\n return area / (false_count[-1] * (len(sorted_y_true) - false_count[-1]))\n\n batch_shape = np.array(positive_means).shape\n batch_rank = len(batch_shape)\n if batch_rank > 0:\n # Transpose so that batch dimensions are first and data dimension is last\n transpose_axes = list(range(1, batch_rank + 1)) + [0]\n q0 = tf.transpose(q0, transpose_axes)\n q1 = tf.transpose(q1, transpose_axes)\n\n true_auc = np.apply_along_axis(auc_fn, 0, y_pred)\n\n auc = tfp.stats.quantile_auc(\n q0, num_negative_trials, q1, num_positive_trials, curve=curve)\n auc_ = self.evaluate(auc)\n\n self.assertAllClose(auc_, true_auc, atol=5e-3, rtol=0.)\n self.assertAllEqual(batch_shape, auc_.shape)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.abs",
"tensorflow.compat.v2.rank",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.broadcast_static_shape",
"tensorflow.compat.v2.identity",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.constant"
],
[
"numpy.abs",
"numpy.issubdtype",
"numpy.finfo",
"numpy.shape",
"numpy.equal"
],
[
"numpy.issubdtype"
],
[
"tensorflow.compat.v1.keras.layers.Dropout",
"tensorflow.compat.v1.io.gfile.exists",
"tensorflow.compat.v1.io.gfile.makedirs",
"tensorflow.compat.v1.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v1.train.Checkpoint",
"tensorflow.compat.v1.GradientTape",
"tensorflow.compat.v1.compat.v2.summary.create_file_writer",
"tensorflow.compat.v1.keras.layers.Dense",
"tensorflow.compat.v1.math.equal",
"tensorflow.compat.v1.compat.v1.nn.rnn_cell.LSTMCell",
"tensorflow.compat.v1.io.gfile.rmtree",
"tensorflow.compat.v1.compat.v1.app.run",
"tensorflow.compat.v1.compat.v1.enable_eager_execution",
"tensorflow.compat.v1.unstack",
"tensorflow.compat.v1.keras.layers.BatchNormalization",
"tensorflow.compat.v1.keras.layers.Activation",
"tensorflow.compat.v1.nn.softplus",
"tensorflow.compat.v1.keras.layers.GlobalAveragePooling1D",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.keras.layers.Conv1D",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.compat.v2.summary.scalar",
"tensorflow.compat.v1.squeeze"
],
[
"numpy.log",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.transpose",
"tensorflow.compat.v2.GradientTape",
"numpy.ones",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.TensorShape"
],
[
"tensorflow.compat.v2.nest.assert_same_structure",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.nest.flatten"
],
[
"tensorflow.compat.v2.executing_eagerly",
"numpy.sqrt",
"numpy.take",
"numpy.minimum",
"tensorflow.compat.v2.minimum",
"tensorflow.compat.v2.linalg.cholesky",
"numpy.max",
"tensorflow.compat.v2.ones",
"numpy.mean",
"numpy.any",
"tensorflow.compat.v2.reduce_sum",
"numpy.var",
"numpy.exp",
"tensorflow.compat.v2.linspace",
"numpy.arange",
"numpy.eye",
"numpy.stack",
"tensorflow.compat.v2.stack",
"tensorflow.compat.v2.zeros",
"numpy.float32",
"numpy.repeat",
"tensorflow.compat.v2.function",
"tensorflow.compat.v2.test.main",
"numpy.min",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.math.sqrt",
"numpy.transpose",
"numpy.array",
"tensorflow.compat.v2.constant",
"numpy.sum",
"numpy.abs",
"tensorflow.compat.v2.concat",
"numpy.ones",
"tensorflow.compat.v2.random.set_seed"
],
[
"tensorflow.compat.v2.test.main",
"numpy.asarray",
"numpy.arange",
"tensorflow.compat.v2.shape",
"numpy.random.randn",
"numpy.array",
"tensorflow.compat.v1.placeholder_with_default"
],
[
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.zeros",
"numpy.array"
],
[
"tensorflow.compat.v2.size",
"tensorflow.compat.v2.rank",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.reshape"
],
[
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.test.main",
"scipy.stats.invgauss",
"numpy.arange",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.TensorShape",
"numpy.array",
"tensorflow.compat.v1.placeholder_with_default"
],
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.exp",
"numpy.diag",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.transpose",
"numpy.linspace",
"tensorflow.compat.v2.linalg.det",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.TensorShape",
"numpy.exp",
"tensorflow.compat.v2.name_scope",
"numpy.linalg.det",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.gather",
"numpy.float32",
"tensorflow.compat.v2.math.log",
"numpy.zeros",
"numpy.log",
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.nest.pack_sequence_as",
"scipy.stats.multivariate_normal",
"tensorflow.compat.v2.matmul",
"numpy.transpose",
"tensorflow.compat.v2.nest.flatten",
"numpy.array",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.ones_like",
"numpy.int32",
"tensorflow.compat.v2.cast",
"numpy.tile",
"numpy.ones",
"numpy.testing.assert_array_less",
"scipy.stats.norm",
"tensorflow.compat.v2.linalg.LinearOperatorDiag"
],
[
"numpy.diag",
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.test.main",
"numpy.float32",
"numpy.exp",
"tensorflow.compat.v1.function"
],
[
"tensorflow.compat.v2.math.ndtri",
"tensorflow.compat.v2.square",
"numpy.log",
"tensorflow.compat.v2.name_scope"
],
[
"tensorflow.python.util.all_util.remove_undocumented"
],
[
"tensorflow.compat.v2.linalg.LinearOperatorDiag",
"tensorflow.compat.v2.name_scope"
],
[
"tensorflow.compat.v2.get_static_value",
"tensorflow.compat.v2.is_tensor",
"tensorflow.compat.v2.size",
"tensorflow.compat.v2.control_dependencies",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.Module._TF_MODULE_IGNORED_PROPERTIES.union",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.range",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.group",
"tensorflow.compat.v2.TensorShape"
],
[
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.transpose",
"numpy.cumsum",
"numpy.concatenate",
"numpy.apply_along_axis",
"numpy.diff",
"numpy.argsort",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danielgrassinger/yt_new_frontend | [
"5f91d2fb8721c4c5da0af543a6256ed979cd9fc9",
"5f91d2fb8721c4c5da0af543a6256ed979cd9fc9",
"5f91d2fb8721c4c5da0af543a6256ed979cd9fc9",
"5f91d2fb8721c4c5da0af543a6256ed979cd9fc9",
"5f91d2fb8721c4c5da0af543a6256ed979cd9fc9",
"5f91d2fb8721c4c5da0af543a6256ed979cd9fc9"
] | [
"yt/frontends/athena/io.py",
"yt/frontends/tipsy/setup.py",
"yt/units/tests/test_ytarray.py",
"yt/frontends/enzo/data_structures.py",
"yt/utilities/tests/test_particle_generator.py",
"yt/fields/particle_fields.py"
] | [
"\"\"\"\nThe data-file handling functions\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom yt.utilities.io_handler import \\\n BaseIOHandler\nimport numpy as np\nfrom yt.funcs import mylog, defaultdict\nfrom .data_structures import chk23\n\nfloat_size = {\"float\":np.dtype(\">f4\").itemsize,\n \"double\":np.dtype(\">f8\").itemsize}\n\naxis_list = [\"_x\",\"_y\",\"_z\"]\n\nclass IOHandlerAthena(BaseIOHandler):\n _dataset_type = \"athena\"\n _offset_string = 'data:offsets=0'\n _data_string = 'data:datatype=0'\n _read_table_offset = None\n\n def _field_dict(self,fhandle):\n keys = fhandle['field_types'].keys()\n val = fhandle['field_types'].keys()\n return dict(zip(keys,val))\n\n def _read_field_names(self,grid):\n pass\n\n def _read_chunk_data(self,chunk,fields):\n data = {}\n if len(chunk.objs) == 0: return data\n for grid in chunk.objs:\n if grid.filename is None:\n continue\n f = open(grid.filename, \"rb\")\n data[grid.id] = {}\n grid_dims = grid.ActiveDimensions\n read_dims = grid.read_dims.astype(\"int64\")\n grid_ncells = np.prod(read_dims)\n grid0_ncells = np.prod(grid.index.grids[0].read_dims)\n read_table_offset = get_read_table_offset(f)\n for field in fields:\n ftype, offsetr, dtype = grid.index._field_map[field]\n if grid_ncells != grid0_ncells:\n offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))\n if grid_ncells == grid0_ncells:\n offset = offsetr\n offset = int(offset) # Casting to be certain.\n file_offset = grid.file_offset[2]*read_dims[0]*read_dims[1]*float_size[dtype]\n xread = slice(grid.file_offset[0],grid.file_offset[0]+grid_dims[0])\n yread = slice(grid.file_offset[1],grid.file_offset[1]+grid_dims[1])\n f.seek(read_table_offset+offset+file_offset)\n if dtype == 'float':\n dt = '>f4'\n elif dtype == 'double':\n dt = '>f8'\n if ftype == 'scalar':\n f.seek(read_table_offset+offset+file_offset)\n v = np.fromfile(f, dtype=dt,\n count=grid_ncells).reshape(read_dims,order='F')\n if ftype == 'vector':\n vec_offset = axis_list.index(field[-1][-2:])\n f.seek(read_table_offset+offset+3*file_offset)\n v = np.fromfile(f, dtype=dt, count=3*grid_ncells)\n v = v[vec_offset::3].reshape(read_dims,order='F')\n if grid.ds.field_ordering == 1:\n data[grid.id][field] = v[xread,yread,:].T.astype(\"float64\")\n else:\n data[grid.id][field] = v[xread,yread,:].astype(\"float64\")\n f.close()\n return data\n \n def _read_data_slice(self, grid, field, axis, coord):\n sl = [slice(None), slice(None), slice(None)]\n sl[axis] = slice(coord, coord + 1)\n if grid.ds.field_ordering == 1:\n sl.reverse()\n return self._read_data_set(grid, field)[sl]\n\n def _read_fluid_selection(self, chunks, selector, fields, size):\n chunks = list(chunks)\n if any((ftype != \"athena\" for ftype, fname in fields)):\n raise NotImplementedError\n rv = {}\n for field in fields:\n rv[field] = np.empty(size, dtype=\"float64\")\n ng = sum(len(c.objs) for c in chunks)\n mylog.debug(\"Reading %s cells of %s fields in %s grids\",\n size, [f2 for f1, f2 in fields], ng)\n ind = 0\n for chunk in chunks:\n data = self._read_chunk_data(chunk, fields)\n for g in chunk.objs:\n for field in fields:\n ftype, fname = field\n ds = data[g.id].pop(field)\n nd = g.select(selector, ds, rv[field], ind) # caches\n ind += nd\n data.pop(g.id)\n return rv\n\ndef get_read_table_offset(f):\n line = f.readline()\n while True:\n splitup = line.strip().split()\n chkc = chk23('CELL_DATA')\n chkp = chk23('POINT_DATA')\n if chkc in splitup or chkp in splitup:\n f.readline()\n read_table_offset = f.tell()\n break\n line = f.readline()\n return read_table_offset\n\n\n",
"#!/usr/bin/env python\nimport setuptools\nimport os\nimport sys\nimport os.path\n\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration('tipsy', parent_package, top_path)\n config.make_config_py() # installs __config__.py\n #config.make_svn_version_py()\n return config\n",
"\"\"\"\nTest ndarray subclass that handles symbolic units.\n\n\n\n\n\"\"\"\n\n# ----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport copy\nfrom yt.extern.six.moves import cPickle as pickle\nimport itertools\nimport numpy as np\nimport operator\nimport os\nimport shutil\nimport tempfile\n\nfrom nose.tools import assert_true\nfrom numpy.testing import \\\n assert_array_equal, \\\n assert_equal, assert_raises, \\\n assert_array_almost_equal_nulp, \\\n assert_array_almost_equal\nfrom numpy import array\nfrom yt.units.yt_array import \\\n YTArray, YTQuantity, \\\n unary_operators, binary_operators, \\\n uconcatenate, uintersect1d, \\\n uunion1d\nfrom yt.utilities.exceptions import \\\n YTUnitOperationError, YTUfuncUnitError\nfrom yt.testing import \\\n fake_random_ds, requires_module, \\\n assert_allclose_units\nfrom yt.funcs import fix_length\nfrom yt.units.unit_symbols import \\\n cm, m, g\nfrom yt.utilities.physical_ratios import \\\n metallicity_sun\n\ndef operate_and_compare(a, b, op, answer):\n # Test generator for YTArrays tests\n assert_array_equal(op(a, b), answer)\n\n\ndef assert_isinstance(a, type):\n assert isinstance(a, type)\n\n\ndef test_addition():\n \"\"\"\n Test addition of two YTArrays\n\n \"\"\"\n\n # Same units\n a1 = YTArray([1, 2, 3], 'cm')\n a2 = YTArray([4, 5, 6], 'cm')\n a3 = [4*cm, 5*cm, 6*cm]\n answer = YTArray([5, 7, 9], 'cm')\n\n yield operate_and_compare, a1, a2, operator.add, answer\n yield operate_and_compare, a2, a1, operator.add, answer\n yield operate_and_compare, a1, a3, operator.add, answer\n yield operate_and_compare, a3, a1, operator.add, answer\n yield operate_and_compare, a2, a1, np.add, answer\n yield operate_and_compare, a1, a2, np.add, answer\n yield operate_and_compare, a1, a3, np.add, answer\n yield operate_and_compare, a3, a1, np.add, answer\n\n # different units\n a1 = YTArray([1, 2, 3], 'cm')\n a2 = YTArray([4, 5, 6], 'm')\n a3 = [4*m, 5*m, 6*m]\n answer1 = YTArray([401, 502, 603], 'cm')\n answer2 = YTArray([4.01, 5.02, 6.03], 'm')\n\n yield operate_and_compare, a1, a2, operator.add, answer1\n yield operate_and_compare, a2, a1, operator.add, answer2\n yield operate_and_compare, a1, a3, operator.add, answer1\n yield operate_and_compare, a3, a1, operator.add, answer1\n yield assert_raises, YTUfuncUnitError, np.add, a1, a2\n yield assert_raises, YTUfuncUnitError, np.add, a1, a3\n\n # Test dimensionless quantities\n a1 = YTArray([1, 2, 3])\n a2 = array([4, 5, 6])\n a3 = [4, 5, 6]\n answer = YTArray([5, 7, 9])\n\n yield operate_and_compare, a1, a2, operator.add, answer\n yield operate_and_compare, a2, a1, operator.add, answer\n yield operate_and_compare, a1, a3, operator.add, answer\n yield operate_and_compare, a3, a1, operator.add, answer\n yield operate_and_compare, a1, a2, np.add, answer\n yield operate_and_compare, a2, a1, np.add, answer\n yield operate_and_compare, a1, a3, np.add, answer\n yield operate_and_compare, a3, a1, np.add, answer\n\n # Catch the different dimensions error\n a1 = YTArray([1, 2, 3], 'm')\n a2 = YTArray([4, 5, 6], 'kg')\n\n yield assert_raises, YTUnitOperationError, operator.add, a1, a2\n yield assert_raises, YTUnitOperationError, operator.iadd, a1, a2\n\n\ndef test_subtraction():\n \"\"\"\n Test subtraction of two YTArrays\n\n \"\"\"\n\n # Same units\n a1 = YTArray([1, 2, 3], 'cm')\n a2 = YTArray([4, 5, 6], 'cm')\n a3 = [4*cm, 5*cm, 6*cm]\n answer1 = YTArray([-3, -3, -3], 'cm')\n answer2 = YTArray([3, 3, 3], 'cm')\n\n yield operate_and_compare, a1, a2, operator.sub, answer1\n yield operate_and_compare, a2, a1, operator.sub, answer2\n yield operate_and_compare, a1, a3, operator.sub, answer1\n yield operate_and_compare, a3, a1, operator.sub, answer2\n yield operate_and_compare, a1, a2, np.subtract, answer1\n yield operate_and_compare, a2, a1, np.subtract, answer2\n yield operate_and_compare, a1, a3, np.subtract, answer1\n yield operate_and_compare, a3, a1, np.subtract, answer2\n\n # different units\n a1 = YTArray([1, 2, 3], 'cm')\n a2 = YTArray([4, 5, 6], 'm')\n a3 = [4*m, 5*m, 6*m]\n answer1 = YTArray([-399, -498, -597], 'cm')\n answer2 = YTArray([3.99, 4.98, 5.97], 'm')\n answer3 = YTArray([399, 498, 597], 'cm')\n\n yield operate_and_compare, a1, a2, operator.sub, answer1\n yield operate_and_compare, a2, a1, operator.sub, answer2\n yield operate_and_compare, a1, a3, operator.sub, answer1\n yield operate_and_compare, a3, a1, operator.sub, answer3\n yield assert_raises, YTUfuncUnitError, np.subtract, a1, a2\n yield assert_raises, YTUfuncUnitError, np.subtract, a1, a3\n\n # Test dimensionless quantities\n a1 = YTArray([1, 2, 3])\n a2 = array([4, 5, 6])\n a3 = [4, 5, 6]\n answer1 = YTArray([-3, -3, -3])\n answer2 = YTArray([3, 3, 3])\n\n yield operate_and_compare, a1, a2, operator.sub, answer1\n yield operate_and_compare, a2, a1, operator.sub, answer2\n yield operate_and_compare, a1, a3, operator.sub, answer1\n yield operate_and_compare, a3, a1, operator.sub, answer2\n yield operate_and_compare, a1, a2, np.subtract, answer1\n yield operate_and_compare, a2, a1, np.subtract, answer2\n yield operate_and_compare, a1, a3, np.subtract, answer1\n yield operate_and_compare, a3, a1, np.subtract, answer2\n\n # Catch the different dimensions error\n a1 = YTArray([1, 2, 3], 'm')\n a2 = YTArray([4, 5, 6], 'kg')\n\n yield assert_raises, YTUnitOperationError, operator.sub, a1, a2\n yield assert_raises, YTUnitOperationError, operator.isub, a1, a2\n\n\ndef test_multiplication():\n \"\"\"\n Test multiplication of two YTArrays\n\n \"\"\"\n\n # Same units\n a1 = YTArray([1, 2, 3], 'cm')\n a2 = YTArray([4, 5, 6], 'cm')\n a3 = [4*cm, 5*cm, 6*cm]\n answer = YTArray([4, 10, 18], 'cm**2')\n\n yield operate_and_compare, a1, a2, operator.mul, answer\n yield operate_and_compare, a2, a1, operator.mul, answer\n yield operate_and_compare, a1, a3, operator.mul, answer\n yield operate_and_compare, a3, a1, operator.mul, answer\n yield operate_and_compare, a1, a2, np.multiply, answer\n yield operate_and_compare, a2, a1, np.multiply, answer\n yield operate_and_compare, a1, a3, np.multiply, answer\n yield operate_and_compare, a3, a1, np.multiply, answer\n\n # different units, same dimension\n a1 = YTArray([1, 2, 3], 'cm')\n a2 = YTArray([4, 5, 6], 'm')\n a3 = [4*m, 5*m, 6*m]\n answer1 = YTArray([400, 1000, 1800], 'cm**2')\n answer2 = YTArray([.04, .10, .18], 'm**2')\n answer3 = YTArray([4, 10, 18], 'cm*m')\n\n yield operate_and_compare, a1, a2, operator.mul, answer1\n yield operate_and_compare, a2, a1, operator.mul, answer2\n yield operate_and_compare, a1, a3, operator.mul, answer1\n yield operate_and_compare, a3, a1, operator.mul, answer2\n yield operate_and_compare, a1, a2, np.multiply, answer3\n yield operate_and_compare, a2, a1, np.multiply, answer3\n yield operate_and_compare, a1, a3, np.multiply, answer3\n yield operate_and_compare, a3, a1, np.multiply, answer3\n\n # different dimensions\n a1 = YTArray([1, 2, 3], 'cm')\n a2 = YTArray([4, 5, 6], 'g')\n a3 = [4*g, 5*g, 6*g]\n answer = YTArray([4, 10, 18], 'cm*g')\n\n yield operate_and_compare, a1, a2, operator.mul, answer\n yield operate_and_compare, a2, a1, operator.mul, answer\n yield operate_and_compare, a1, a3, operator.mul, answer\n yield operate_and_compare, a3, a1, operator.mul, answer\n yield operate_and_compare, a1, a2, np.multiply, answer\n yield operate_and_compare, a2, a1, np.multiply, answer\n yield operate_and_compare, a1, a3, np.multiply, answer\n yield operate_and_compare, a3, a1, np.multiply, answer\n\n # One dimensionless, one unitful\n a1 = YTArray([1, 2, 3], 'cm')\n a2 = array([4, 5, 6])\n a3 = [4, 5, 6]\n answer = YTArray([4, 10, 18], 'cm')\n\n yield operate_and_compare, a1, a2, operator.mul, answer\n yield operate_and_compare, a2, a1, operator.mul, answer\n yield operate_and_compare, a1, a3, operator.mul, answer\n yield operate_and_compare, a3, a1, operator.mul, answer\n yield operate_and_compare, a1, a2, np.multiply, answer\n yield operate_and_compare, a2, a1, np.multiply, answer\n yield operate_and_compare, a1, a3, np.multiply, answer\n yield operate_and_compare, a3, a1, np.multiply, answer\n\n # Both dimensionless quantities\n a1 = YTArray([1, 2, 3])\n a2 = array([4, 5, 6])\n a3 = [4, 5, 6]\n answer = YTArray([4, 10, 18])\n\n yield operate_and_compare, a1, a2, operator.mul, answer\n yield operate_and_compare, a2, a1, operator.mul, answer\n yield operate_and_compare, a1, a3, operator.mul, answer\n yield operate_and_compare, a3, a1, operator.mul, answer\n yield operate_and_compare, a1, a2, np.multiply, answer\n yield operate_and_compare, a2, a1, np.multiply, answer\n yield operate_and_compare, a1, a3, np.multiply, answer\n yield operate_and_compare, a3, a1, np.multiply, answer\n\n\ndef test_division():\n \"\"\"\n Test multiplication of two YTArrays\n\n \"\"\"\n\n # Same units\n a1 = YTArray([1., 2., 3.], 'cm')\n a2 = YTArray([4., 5., 6.], 'cm')\n a3 = [4*cm, 5*cm, 6*cm]\n answer1 = YTArray([0.25, 0.4, 0.5])\n answer2 = YTArray([4, 2.5, 2])\n if \"div\" in dir(operator):\n op = operator.div\n else:\n op = operator.truediv\n\n yield operate_and_compare, a1, a2, op, answer1\n yield operate_and_compare, a2, a1, op, answer2\n yield operate_and_compare, a1, a3, op, answer1\n yield operate_and_compare, a3, a1, op, answer2\n yield operate_and_compare, a1, a2, np.divide, answer1\n yield operate_and_compare, a2, a1, np.divide, answer2\n yield operate_and_compare, a1, a3, np.divide, answer1\n yield operate_and_compare, a3, a1, np.divide, answer2\n\n # different units, same dimension\n a1 = YTArray([1., 2., 3.], 'cm')\n a2 = YTArray([4., 5., 6.], 'm')\n a3 = [4*m, 5*m, 6*m]\n answer1 = YTArray([.0025, .004, .005])\n answer2 = YTArray([400, 250, 200])\n answer3 = YTArray([0.25, 0.4, 0.5], 'cm/m')\n answer4 = YTArray([4.0, 2.5, 2.0], 'm/cm')\n\n yield operate_and_compare, a1, a2, op, answer1\n yield operate_and_compare, a2, a1, op, answer2\n yield operate_and_compare, a1, a3, op, answer1\n yield operate_and_compare, a3, a1, op, answer2\n yield operate_and_compare, a1, a2, np.divide, answer3\n yield operate_and_compare, a2, a1, np.divide, answer4\n yield operate_and_compare, a1, a3, np.divide, answer3\n yield operate_and_compare, a3, a1, np.divide, answer4\n\n # different dimensions\n a1 = YTArray([1., 2., 3.], 'cm')\n a2 = YTArray([4., 5., 6.], 'g')\n a3 = [4*g, 5*g, 6*g]\n answer1 = YTArray([0.25, 0.4, 0.5], 'cm/g')\n answer2 = YTArray([4, 2.5, 2], 'g/cm')\n\n yield operate_and_compare, a1, a2, op, answer1\n yield operate_and_compare, a2, a1, op, answer2\n yield operate_and_compare, a1, a3, op, answer1\n yield operate_and_compare, a3, a1, op, answer2\n yield operate_and_compare, a1, a2, np.divide, answer1\n yield operate_and_compare, a2, a1, np.divide, answer2\n yield operate_and_compare, a1, a3, np.divide, answer1\n yield operate_and_compare, a3, a1, np.divide, answer2\n\n # One dimensionless, one unitful\n a1 = YTArray([1., 2., 3.], 'cm')\n a2 = array([4., 5., 6.])\n a3 = [4, 5, 6]\n answer1 = YTArray([0.25, 0.4, 0.5], 'cm')\n answer2 = YTArray([4, 2.5, 2], '1/cm')\n\n yield operate_and_compare, a1, a2, op, answer1\n yield operate_and_compare, a2, a1, op, answer2\n yield operate_and_compare, a1, a3, op, answer1\n yield operate_and_compare, a3, a1, op, answer2\n yield operate_and_compare, a1, a2, np.divide, answer1\n yield operate_and_compare, a2, a1, np.divide, answer2\n yield operate_and_compare, a1, a3, np.divide, answer1\n yield operate_and_compare, a3, a1, np.divide, answer2\n\n # Both dimensionless quantities\n a1 = YTArray([1., 2., 3.])\n a2 = array([4., 5., 6.])\n a3 = [4, 5, 6]\n answer1 = YTArray([0.25, 0.4, 0.5])\n answer2 = YTArray([4, 2.5, 2])\n\n yield operate_and_compare, a1, a2, op, answer1\n yield operate_and_compare, a2, a1, op, answer2\n yield operate_and_compare, a1, a3, op, answer1\n yield operate_and_compare, a3, a1, op, answer2\n yield operate_and_compare, a1, a3, np.divide, answer1\n yield operate_and_compare, a3, a1, np.divide, answer2\n yield operate_and_compare, a1, a3, np.divide, answer1\n yield operate_and_compare, a3, a1, np.divide, answer2\n\n\ndef test_power():\n \"\"\"\n Test power operator ensure units are correct.\n\n \"\"\"\n\n from yt.units import cm\n\n cm_arr = np.array([1.0, 1.0]) * cm\n\n assert_equal, cm**3, YTQuantity(1, 'cm**3')\n assert_equal, np.power(cm, 3), YTQuantity(1, 'cm**3')\n assert_equal, cm**YTQuantity(3), YTQuantity(1, 'cm**3')\n assert_raises, YTUnitOperationError, np.power, cm, YTQuantity(3, 'g')\n\n assert_equal, cm_arr**3, YTArray([1, 1], 'cm**3')\n assert_equal, np.power(cm_arr, 3), YTArray([1, 1], 'cm**3')\n assert_equal, cm_arr**YTQuantity(3), YTArray([1, 1], 'cm**3')\n assert_raises, YTUnitOperationError, np.power, cm_arr, YTQuantity(3, 'g')\n\n\ndef test_comparisons():\n \"\"\"\n Test numpy ufunc comparison operators for unit consistency.\n\n \"\"\"\n from yt.units.yt_array import YTArray\n\n a1 = YTArray([1, 2, 3], 'cm')\n a2 = YTArray([2, 1, 3], 'cm')\n a3 = YTArray([.02, .01, .03], 'm')\n\n ops = (\n np.less,\n np.less_equal,\n np.greater,\n np.greater_equal,\n np.equal,\n np.not_equal\n )\n\n answers = (\n [True, False, False],\n [True, False, True],\n [False, True, False],\n [False, True, True],\n [False, False, True],\n [True, True, False],\n )\n\n for op, answer in zip(ops, answers):\n yield operate_and_compare, a1, a2, op, answer\n\n for op in ops:\n yield assert_raises, YTUfuncUnitError, op, a1, a3\n\n for op, answer in zip(ops, answers):\n yield operate_and_compare, a1, a3.in_units('cm'), op, answer\n\n\ndef test_unit_conversions():\n \"\"\"\n Test operations that convert to different units or cast to ndarray\n\n \"\"\"\n from yt.units.yt_array import YTQuantity\n from yt.units.unit_object import Unit\n\n km = YTQuantity(1, 'km')\n km_in_cm = km.in_units('cm')\n cm_unit = Unit('cm')\n kpc_unit = Unit('kpc')\n\n yield assert_equal, km_in_cm, km\n yield assert_equal, km_in_cm.in_cgs(), 1e5\n yield assert_equal, km_in_cm.in_mks(), 1e3\n yield assert_equal, km_in_cm.units, cm_unit\n\n km_view = km.ndarray_view()\n km.convert_to_units('cm')\n assert_true(km_view.base is km.base)\n\n yield assert_equal, km, YTQuantity(1, 'km')\n yield assert_equal, km.in_cgs(), 1e5\n yield assert_equal, km.in_mks(), 1e3\n yield assert_equal, km.units, cm_unit\n\n km.convert_to_units('kpc')\n assert_true(km_view.base is km.base)\n\n yield assert_array_almost_equal_nulp, km, YTQuantity(1, 'km')\n yield assert_array_almost_equal_nulp, km.in_cgs(), YTQuantity(1e5, 'cm')\n yield assert_array_almost_equal_nulp, km.in_mks(), YTQuantity(1e3, 'm')\n yield assert_equal, km.units, kpc_unit\n\n yield assert_isinstance, km.to_ndarray(), np.ndarray\n yield assert_isinstance, km.ndarray_view(), np.ndarray\n\n dyne = YTQuantity(1.0, 'dyne')\n\n yield assert_equal, dyne.in_cgs(), dyne\n yield assert_equal, dyne.in_cgs(), 1.0\n yield assert_equal, dyne.in_mks(), dyne\n yield assert_equal, dyne.in_mks(), 1e-5\n yield assert_equal, str(dyne.in_mks().units), 'kg*m/s**2'\n yield assert_equal, str(dyne.in_cgs().units), 'cm*g/s**2'\n\n em3 = YTQuantity(1.0, 'erg/m**3')\n\n yield assert_equal, em3.in_cgs(), em3\n yield assert_equal, em3.in_cgs(), 1e-6\n yield assert_equal, em3.in_mks(), em3\n yield assert_equal, em3.in_mks(), 1e-7\n yield assert_equal, str(em3.in_mks().units), 'kg/(m*s**2)'\n yield assert_equal, str(em3.in_cgs().units), 'g/(cm*s**2)'\n\ndef test_temperature_conversions():\n \"\"\"\n Test conversions between various supported temperatue scales.\n\n Also ensure we only allow compound units with temperature\n scales that have a proper zero point.\n\n \"\"\"\n from yt.units.unit_object import InvalidUnitOperation\n\n km = YTQuantity(1, 'km')\n balmy = YTQuantity(300, 'K')\n balmy_F = YTQuantity(80.33, 'degF')\n balmy_C = YTQuantity(26.85, 'degC')\n balmy_R = YTQuantity(540, 'R')\n\n assert_array_almost_equal(balmy.in_units('degF'), balmy_F)\n assert_array_almost_equal(balmy.in_units('degC'), balmy_C)\n assert_array_almost_equal(balmy.in_units('R'), balmy_R)\n\n balmy_view = balmy.ndarray_view()\n\n balmy.convert_to_units('degF')\n yield assert_true, balmy_view.base is balmy.base\n yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)\n\n balmy.convert_to_units('degC')\n yield assert_true, balmy_view.base is balmy.base\n yield assert_array_almost_equal, np.array(balmy), np.array(balmy_C)\n\n balmy.convert_to_units('R')\n yield assert_true, balmy_view.base is balmy.base\n yield assert_array_almost_equal, np.array(balmy), np.array(balmy_R)\n\n balmy.convert_to_units('degF')\n yield assert_true, balmy_view.base is balmy.base\n yield assert_array_almost_equal, np.array(balmy), np.array(balmy_F)\n\n yield assert_raises, InvalidUnitOperation, np.multiply, balmy, km\n\n # Does CGS conversion from F to K work?\n yield assert_array_almost_equal, balmy.in_cgs(), YTQuantity(300, 'K')\n\n\ndef test_yt_array_yt_quantity_ops():\n \"\"\"\n Test operations that combine YTArray and YTQuantity\n \"\"\"\n a = YTArray(range(10), 'cm')\n b = YTQuantity(5, 'g')\n\n yield assert_isinstance, a*b, YTArray\n yield assert_isinstance, b*a, YTArray\n\n yield assert_isinstance, a/b, YTArray\n yield assert_isinstance, b/a, YTArray\n\n yield assert_isinstance, a*a, YTArray\n yield assert_isinstance, a/a, YTArray\n\n yield assert_isinstance, b*b, YTQuantity\n yield assert_isinstance, b/b, YTQuantity\n\n\ndef test_selecting():\n \"\"\"\n Test slicing of two YTArrays\n\n \"\"\"\n a = YTArray(range(10), 'cm')\n a_slice = a[:3]\n a_fancy_index = a[[1, 1, 3, 5]]\n a_array_fancy_index = a[array([[1, 1], [3, 5]])]\n a_boolean_index = a[a > 5]\n a_selection = a[0]\n\n yield assert_array_equal, a_slice, YTArray([0, 1, 2], 'cm')\n yield assert_array_equal, a_fancy_index, YTArray([1, 1, 3, 5], 'cm')\n yield assert_array_equal, a_array_fancy_index, \\\n YTArray([[1, 1, ], [3, 5]], 'cm')\n yield assert_array_equal, a_boolean_index, YTArray([6, 7, 8, 9], 'cm')\n yield assert_isinstance, a_selection, YTQuantity\n\n # .base points to the original array for a numpy view. If it is not a\n # view, .base is None.\n yield assert_true, a_slice.base is a\n\n\ndef test_fix_length():\n \"\"\"\n Test fixing the length of an array. Used in spheres and other data objects\n \"\"\"\n ds = fake_random_ds(64, nprocs=1, length_unit=10)\n length = ds.quan(1.0, 'code_length')\n new_length = fix_length(length, ds=ds)\n yield assert_equal, YTQuantity(10, 'cm'), new_length\n\n\ndef test_ytarray_pickle():\n ds = fake_random_ds(64, nprocs=1)\n test_data = [ds.quan(12.0, 'code_length'),\n ds.arr([1, 2, 3], 'code_length')]\n\n for data in test_data:\n tempf = tempfile.NamedTemporaryFile(delete=False)\n pickle.dump(data, tempf)\n tempf.close()\n\n with open(tempf.name, \"rb\") as fname:\n loaded_data = pickle.load(fname)\n os.unlink(tempf.name)\n\n yield assert_array_equal, data, loaded_data\n yield assert_equal, data.units, loaded_data.units\n yield assert_array_equal, array(data.in_cgs()), \\\n array(loaded_data.in_cgs())\n yield assert_equal, float(data.units.base_value), \\\n float(loaded_data.units.base_value)\n\n\ndef test_copy():\n quan = YTQuantity(1, 'g')\n arr = YTArray([1, 2, 3], 'cm')\n\n yield assert_equal, copy.copy(quan), quan\n yield assert_array_equal, copy.copy(arr), arr\n\n yield assert_equal, copy.deepcopy(quan), quan\n yield assert_array_equal, copy.deepcopy(arr), arr\n\n yield assert_equal, quan.copy(), quan\n yield assert_array_equal, arr.copy(), arr\n\n yield assert_equal, np.copy(quan), quan\n yield assert_array_equal, np.copy(arr), arr\n\n\ndef unary_ufunc_comparison(ufunc, a):\n out = a.copy()\n a_array = a.to_ndarray()\n if ufunc in (np.isreal, np.iscomplex, ):\n # According to the numpy docs, these two explicitly do not do\n # in-place copies.\n ret = ufunc(a)\n assert_true(not hasattr(ret, 'units'))\n assert_array_equal(ret, ufunc(a))\n elif ufunc in (np.exp, np.exp2, np.log, np.log2, np.log10, np.expm1,\n np.log1p, np.sin, np.cos, np.tan, np.arcsin, np.arccos,\n np.arctan, np.sinh, np.cosh, np.tanh, np.arccosh,\n np.arcsinh, np.arctanh, np.deg2rad, np.rad2deg,\n np.isfinite, np.isinf, np.isnan, np.signbit, np.sign,\n np.rint, np.logical_not):\n # These operations should return identical results compared to numpy.\n\n try:\n ret = ufunc(a, out=out)\n except YTUnitOperationError:\n assert_true(ufunc in (np.deg2rad, np.rad2deg))\n ret = ufunc(YTArray(a, '1'))\n\n assert_array_equal(ret, out)\n assert_array_equal(ret, ufunc(a_array))\n # In-place copies do not drop units.\n assert_true(hasattr(out, 'units'))\n assert_true(not hasattr(ret, 'units'))\n elif ufunc in (np.absolute, np.fabs, np.conjugate, np.floor, np.ceil,\n np.trunc, np.negative):\n ret = ufunc(a, out=out)\n\n assert_array_equal(ret, out)\n assert_array_equal(ret.to_ndarray(), ufunc(a_array))\n assert_true(ret.units == out.units)\n elif ufunc in (np.ones_like, np.square, np.sqrt, np.reciprocal):\n if ufunc is np.ones_like:\n ret = ufunc(a)\n else:\n ret = ufunc(a, out=out)\n assert_array_equal(ret, out)\n\n assert_array_equal(ret.to_ndarray(), ufunc(a_array))\n if ufunc is np.square:\n assert_true(out.units == a.units**2)\n assert_true(ret.units == a.units**2)\n elif ufunc is np.sqrt:\n assert_true(out.units == a.units**0.5)\n assert_true(ret.units == a.units**0.5)\n elif ufunc is np.reciprocal:\n assert_true(out.units == a.units**-1)\n assert_true(ret.units == a.units**-1)\n elif ufunc is np.modf:\n ret1, ret2 = ufunc(a)\n npret1, npret2 = ufunc(a_array)\n\n assert_array_equal(ret1.to_ndarray(), npret1)\n assert_array_equal(ret2.to_ndarray(), npret2)\n elif ufunc is np.frexp:\n ret1, ret2 = ufunc(a)\n npret1, npret2 = ufunc(a_array)\n\n assert_array_equal(ret1, npret1)\n assert_array_equal(ret2, npret2)\n else:\n # There shouldn't be any untested ufuncs.\n assert_true(False)\n\n\ndef binary_ufunc_comparison(ufunc, a, b):\n out = a.copy()\n if ufunc in (np.add, np.subtract, np.remainder, np.fmod, np.mod,\n np.arctan2, np.hypot, np.greater, np.greater_equal, np.less,\n np.less_equal, np.equal, np.not_equal, np.logical_and,\n np.logical_or, np.logical_xor, np.maximum, np.minimum,\n np.fmax, np.fmin, np.nextafter):\n if a.units != b.units and a.units.dimensions == b.units.dimensions:\n assert_raises(YTUfuncUnitError, ufunc, a, b)\n return\n elif a.units != b.units:\n assert_raises(YTUnitOperationError, ufunc, a, b)\n return\n\n ret = ufunc(a, b, out=out)\n\n if ufunc is np.multiply:\n assert_true(ret.units == a.units*b.units)\n elif ufunc in (np.divide, np.true_divide, np.arctan2):\n assert_true(ret.units.dimensions == (a.units/b.units).dimensions)\n elif ufunc in (np.greater, np.greater_equal, np.less, np.less_equal,\n np.not_equal, np.equal, np.logical_and, np.logical_or,\n np.logical_xor):\n assert_true(not isinstance(ret, YTArray) and\n isinstance(ret, np.ndarray))\n assert_array_equal(ret, out)\n if (ufunc in (np.divide, np.true_divide, np.arctan2) and\n (a.units.dimensions == b.units.dimensions)):\n assert_array_almost_equal(\n np.array(ret), ufunc(np.array(a.in_cgs()), np.array(b.in_cgs())))\n else:\n assert_array_almost_equal(np.array(ret), ufunc(np.array(a), np.array(b)))\n\n\ndef test_ufuncs():\n for ufunc in unary_operators:\n yield unary_ufunc_comparison, ufunc, YTArray([.3, .4, .5], 'cm')\n yield unary_ufunc_comparison, ufunc, YTArray([12, 23, 47], 'g')\n yield unary_ufunc_comparison, ufunc, YTArray([2, 4, -6], 'erg/m**3')\n\n for ufunc in binary_operators:\n\n # arr**arr is undefined for arrays with units because\n # each element of the result would have different units.\n if ufunc is np.power:\n a = YTArray([.3, .4, .5], 'cm')\n b = YTArray([.1, .2, .3], 'dimensionless')\n c = np.array(b)\n yield binary_ufunc_comparison, ufunc, a, b\n yield binary_ufunc_comparison, ufunc, a, c\n continue\n\n a = YTArray([.3, .4, .5], 'cm')\n b = YTArray([.1, .2, .3], 'cm')\n c = YTArray([.1, .2, .3], 'm')\n d = YTArray([.1, .2, .3], 'g')\n e = YTArray([.1, .2, .3], 'erg/m**3')\n\n for pair in itertools.product([a, b, c, d, e], repeat=2):\n yield binary_ufunc_comparison, ufunc, pair[0], pair[1]\n\n\ndef test_convenience():\n\n arr = YTArray([1, 2, 3], 'cm')\n\n yield assert_equal, arr.unit_quantity, YTQuantity(1, 'cm')\n yield assert_equal, arr.uq, YTQuantity(1, 'cm')\n yield assert_isinstance, arr.unit_quantity, YTQuantity\n yield assert_isinstance, arr.uq, YTQuantity\n\n yield assert_array_equal, arr.unit_array, YTArray(np.ones_like(arr), 'cm')\n yield assert_array_equal, arr.ua, YTArray(np.ones_like(arr), 'cm')\n yield assert_isinstance, arr.unit_array, YTArray\n yield assert_isinstance, arr.ua, YTArray\n\n yield assert_array_equal, arr.ndview, arr.view(np.ndarray)\n yield assert_array_equal, arr.d, arr.view(np.ndarray)\n yield assert_true, arr.ndview.base is arr.base\n yield assert_true, arr.d.base is arr.base\n\n yield assert_array_equal, arr.value, np.array(arr)\n yield assert_array_equal, arr.v, np.array(arr)\n\n\ndef test_registry_association():\n ds = fake_random_ds(64, nprocs=1, length_unit=10)\n a = ds.quan(3, 'cm')\n b = YTQuantity(4, 'm')\n c = ds.quan(6, '')\n d = 5\n\n yield assert_equal, id(a.units.registry), id(ds.unit_registry)\n\n def binary_op_registry_comparison(op):\n e = op(a, b)\n f = op(b, a)\n g = op(c, d)\n h = op(d, c)\n\n assert_equal(id(e.units.registry), id(ds.unit_registry))\n assert_equal(id(f.units.registry), id(b.units.registry))\n assert_equal(id(g.units.registry), id(h.units.registry))\n assert_equal(id(g.units.registry), id(ds.unit_registry))\n\n def unary_op_registry_comparison(op):\n c = op(a)\n d = op(b)\n\n assert_equal(id(c.units.registry), id(ds.unit_registry))\n assert_equal(id(d.units.registry), id(b.units.registry))\n\n binary_ops = [operator.add, operator.sub, operator.mul, \n operator.truediv]\n if hasattr(operator, \"div\"):\n binary_ops.append(operator.div)\n for op in binary_ops:\n yield binary_op_registry_comparison, op\n\n for op in [operator.abs, operator.neg, operator.pos]:\n yield unary_op_registry_comparison, op\n\n@requires_module(\"astropy\")\ndef test_astropy():\n from yt.utilities.on_demand_imports import _astropy\n\n ap_arr = np.arange(10)*_astropy.units.km/_astropy.units.hr\n yt_arr = YTArray(np.arange(10), \"km/hr\")\n yt_arr2 = YTArray.from_astropy(ap_arr)\n\n ap_quan = 10.*_astropy.units.Msun**0.5/(_astropy.units.kpc**3)\n yt_quan = YTQuantity(10., \"sqrt(Msun)/kpc**3\")\n yt_quan2 = YTQuantity.from_astropy(ap_quan)\n\n yield assert_array_equal, ap_arr, yt_arr.to_astropy()\n yield assert_array_equal, yt_arr, YTArray.from_astropy(ap_arr)\n yield assert_array_equal, yt_arr, yt_arr2\n\n yield assert_equal, ap_quan, yt_quan.to_astropy()\n yield assert_equal, yt_quan, YTQuantity.from_astropy(ap_quan)\n yield assert_equal, yt_quan, yt_quan2\n\n yield assert_array_equal, yt_arr, YTArray.from_astropy(yt_arr.to_astropy())\n yield assert_equal, yt_quan, YTQuantity.from_astropy(yt_quan.to_astropy())\n\ndef test_subclass():\n\n class YTASubclass(YTArray):\n pass\n\n a = YTASubclass([4, 5, 6], 'g')\n b = YTASubclass([7, 8, 9], 'kg')\n nu = YTASubclass([10, 11, 12], '')\n nda = np.array([3, 4, 5])\n yta = YTArray([6, 7, 8], 'mg')\n loq = [YTQuantity(6, 'mg'), YTQuantity(7, 'mg'), YTQuantity(8, 'mg')]\n ytq = YTQuantity(4, 'cm')\n ndf = np.float64(3)\n\n def op_comparison(op, inst1, inst2, compare_class):\n assert_isinstance(op(inst1, inst2), compare_class)\n assert_isinstance(op(inst2, inst1), compare_class)\n\n ops = [operator.mul, operator.truediv]\n if hasattr(operator, \"div\"):\n ops.append(operator.div)\n for op in ops:\n for inst in (b, ytq, ndf, yta, nda, loq):\n yield op_comparison, op, a, inst, YTASubclass\n\n yield op_comparison, op, ytq, nda, YTArray\n yield op_comparison, op, ytq, yta, YTArray\n\n for op in (operator.add, operator.sub):\n yield op_comparison, op, nu, nda, YTASubclass\n yield op_comparison, op, a, b, YTASubclass\n yield op_comparison, op, a, yta, YTASubclass\n yield op_comparison, op, a, loq, YTASubclass\n\n yield assert_isinstance, a[0], YTQuantity\n yield assert_isinstance, a[:], YTASubclass\n yield assert_isinstance, a[:2], YTASubclass\n\ndef test_h5_io():\n tmpdir = tempfile.mkdtemp()\n curdir = os.getcwd()\n os.chdir(tmpdir)\n\n ds = fake_random_ds(64, nprocs=1, length_unit=10)\n\n warr = ds.arr(np.random.random((256, 256)), 'code_length')\n\n warr.write_hdf5('test.h5')\n\n iarr = YTArray.from_hdf5('test.h5')\n\n yield assert_equal, warr, iarr\n yield assert_equal, warr.units.registry['code_length'], iarr.units.registry['code_length']\n\n os.chdir(curdir)\n shutil.rmtree(tmpdir)\n\ndef test_equivalencies():\n from yt.utilities.physical_constants import clight, mp, kboltz, hcgs, mh, me, \\\n mass_sun_cgs, G, stefan_boltzmann_constant_cgs\n import yt.units as u\n\n # Mass-energy\n\n E = mp.to_equivalent(\"keV\",\"mass_energy\")\n yield assert_equal, E, mp*clight*clight\n yield assert_allclose_units, mp, E.to_equivalent(\"g\", \"mass_energy\")\n\n # Thermal\n\n T = YTQuantity(1.0e8,\"K\")\n E = T.to_equivalent(\"W*hr\",\"thermal\")\n yield assert_equal, E, (kboltz*T).in_units(\"W*hr\")\n yield assert_allclose_units, T, E.to_equivalent(\"K\", \"thermal\")\n\n # Spectral\n\n l = YTQuantity(4000.,\"angstrom\")\n nu = l.to_equivalent(\"Hz\",\"spectral\")\n yield assert_equal, nu, clight/l\n E = hcgs*nu\n l2 = E.to_equivalent(\"angstrom\", \"spectral\")\n yield assert_allclose_units, l, l2\n nu2 = clight/l2.in_units(\"cm\")\n yield assert_allclose_units, nu, nu2\n E2 = nu2.to_equivalent(\"keV\", \"spectral\")\n yield assert_allclose_units, E2, E.in_units(\"keV\")\n\n # Sound-speed\n\n mu = 0.6\n gg = 5./3.\n c_s = T.to_equivalent(\"km/s\",\"sound_speed\")\n yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))\n yield assert_allclose_units, T, c_s.to_equivalent(\"K\",\"sound_speed\")\n\n mu = 0.5\n gg = 4./3.\n c_s = T.to_equivalent(\"km/s\",\"sound_speed\", mu=mu, gamma=gg)\n yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))\n yield assert_allclose_units, T, c_s.to_equivalent(\"K\",\"sound_speed\",\n mu=mu, gamma=gg)\n\n # Lorentz\n\n v = 0.8*clight\n g = v.to_equivalent(\"dimensionless\",\"lorentz\")\n g2 = YTQuantity(1./np.sqrt(1.-0.8*0.8), \"dimensionless\")\n yield assert_allclose_units, g, g2\n v2 = g2.to_equivalent(\"mile/hr\", \"lorentz\")\n yield assert_allclose_units, v2, v.in_units(\"mile/hr\")\n\n # Schwarzschild\n\n R = mass_sun_cgs.to_equivalent(\"kpc\",\"schwarzschild\")\n yield assert_equal, R.in_cgs(), 2*G*mass_sun_cgs/(clight*clight)\n yield assert_allclose_units, mass_sun_cgs, R.to_equivalent(\"g\", \"schwarzschild\")\n\n # Compton\n\n l = me.to_equivalent(\"angstrom\",\"compton\")\n yield assert_equal, l, hcgs/(me*clight)\n yield assert_allclose_units, me, l.to_equivalent(\"g\", \"compton\")\n\n # Number density\n\n rho = mp/u.cm**3\n\n n = rho.to_equivalent(\"cm**-3\",\"number_density\")\n yield assert_equal, n, rho/(mh*0.6)\n yield assert_allclose_units, rho, n.to_equivalent(\"g/cm**3\",\"number_density\")\n\n n = rho.to_equivalent(\"cm**-3\",\"number_density\", mu=0.75)\n yield assert_equal, n, rho/(mh*0.75)\n yield assert_allclose_units, rho, n.to_equivalent(\"g/cm**3\",\"number_density\", mu=0.75)\n\n # Effective temperature\n\n T = YTQuantity(1.0e4, \"K\")\n F = T.to_equivalent(\"erg/s/cm**2\",\"effective_temperature\")\n yield assert_equal, F, stefan_boltzmann_constant_cgs*T**4\n yield assert_allclose_units, T, F.to_equivalent(\"K\", \"effective_temperature\")\n\ndef test_electromagnetic():\n from yt.units.dimensions import charge_mks, pressure, current_cgs, \\\n magnetic_field_mks, magnetic_field_cgs, power\n from yt.utilities.physical_constants import mu_0, qp\n from yt.utilities.physical_ratios import speed_of_light_cm_per_s\n\n # Various tests of SI and CGS electromagnetic units\n\n qp_mks = qp.to_equivalent(\"C\", \"SI\")\n yield assert_equal, qp_mks.units.dimensions, charge_mks\n yield assert_array_almost_equal, qp_mks.v, 10.0*qp.v/speed_of_light_cm_per_s\n\n qp_cgs = qp_mks.to_equivalent(\"esu\", \"CGS\")\n yield assert_array_almost_equal, qp_cgs, qp\n yield assert_equal, qp_cgs.units.dimensions, qp.units.dimensions\n \n qp_mks_k = qp.to_equivalent(\"kC\", \"SI\")\n yield assert_array_almost_equal, qp_mks_k.v, 1.0e-2*qp.v/speed_of_light_cm_per_s\n\n B = YTQuantity(1.0, \"T\")\n B_cgs = B.to_equivalent(\"gauss\", \"CGS\")\n yield assert_equal, B.units.dimensions, magnetic_field_mks\n yield assert_equal, B_cgs.units.dimensions, magnetic_field_cgs\n yield assert_array_almost_equal, B_cgs, YTQuantity(1.0e4, \"gauss\")\n\n u_mks = B*B/(2*mu_0)\n yield assert_equal, u_mks.units.dimensions, pressure\n u_cgs = B_cgs*B_cgs/(8*np.pi)\n yield assert_equal, u_cgs.units.dimensions, pressure\n yield assert_array_almost_equal, u_mks.in_cgs(), u_cgs\n \n I = YTQuantity(1.0, \"A\")\n I_cgs = I.to_equivalent(\"statA\", \"CGS\")\n yield assert_array_almost_equal, I_cgs, YTQuantity(0.1*speed_of_light_cm_per_s, \"statA\")\n yield assert_array_almost_equal, I_cgs.to_equivalent(\"mA\", \"SI\"), I.in_units(\"mA\")\n yield assert_equal, I_cgs.units.dimensions, current_cgs\n \n R = YTQuantity(1.0, \"ohm\")\n R_cgs = R.to_equivalent(\"statohm\", \"CGS\")\n P_mks = I*I*R\n P_cgs = I_cgs*I_cgs*R_cgs\n yield assert_equal, P_mks.units.dimensions, power\n yield assert_equal, P_cgs.units.dimensions, power\n yield assert_array_almost_equal, P_cgs.in_cgs(), P_mks.in_cgs()\n yield assert_array_almost_equal, P_cgs.in_mks(), YTQuantity(1.0, \"W\")\n \n V = YTQuantity(1.0, \"statV\")\n V_mks = V.to_equivalent(\"V\", \"SI\")\n yield assert_array_almost_equal, V_mks.v, 1.0e8*V.v/speed_of_light_cm_per_s\n\ndef test_ytarray_coercion():\n a = YTArray([1, 2, 3], 'cm')\n q = YTQuantity(3, 'cm')\n na = np.array([1, 2, 3])\n\n assert_isinstance(a*q, YTArray)\n assert_isinstance(q*na, YTArray)\n assert_isinstance(q*3, YTQuantity)\n assert_isinstance(q*np.float64(3), YTQuantity)\n assert_isinstance(q*np.array(3), YTQuantity)\n\ndef test_numpy_wrappers():\n a1 = YTArray([1, 2, 3], 'cm')\n a2 = YTArray([2, 3, 4, 5, 6], 'cm')\n catenate_answer = [1, 2, 3, 2, 3, 4, 5, 6]\n intersect_answer = [2, 3]\n union_answer = [1, 2, 3, 4, 5, 6]\n\n yield (assert_array_equal, YTArray(catenate_answer, 'cm'),\n uconcatenate((a1, a2)))\n yield assert_array_equal, catenate_answer, np.concatenate((a1, a2))\n\n yield (assert_array_equal, YTArray(intersect_answer, 'cm'),\n uintersect1d(a1, a2))\n yield assert_array_equal, intersect_answer, np.intersect1d(a1, a2)\n\n yield assert_array_equal, YTArray(union_answer, 'cm'), uunion1d(a1, a2)\n yield assert_array_equal, union_answer, np.union1d(a1, a2)\n\ndef test_dimensionless_conversion():\n a = YTQuantity(1, 'Zsun')\n b = a.in_units('Zsun')\n a.convert_to_units('Zsun')\n yield assert_true, a.units.base_value == metallicity_sun\n yield assert_true, b.units.base_value == metallicity_sun\n\ndef test_modified_unit_division():\n ds1 = fake_random_ds(64)\n ds2 = fake_random_ds(64)\n\n # this mocks comoving coordinates without going through the trouble\n # of setting up a fake cosmological dataset\n ds1.unit_registry.modify('m', 50)\n\n a = ds1.quan(3, 'm')\n b = ds2.quan(3, 'm')\n\n ret = a/b\n yield assert_true, ret == 0.5\n yield assert_true, ret.units.is_dimensionless\n yield assert_true, ret.units.base_value == 1.0\n",
"\"\"\"\nData structures for Enzo\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport h5py\nimport weakref\nimport numpy as np\nimport os\nimport stat\nimport string\nimport re\n\nfrom threading import Thread\n\nfrom yt.extern.six.moves import zip as izip\n\nfrom yt.funcs import *\nfrom yt.config import ytcfg\nfrom yt.data_objects.grid_patch import \\\n AMRGridPatch\nfrom yt.geometry.grid_geometry_handler import \\\n GridIndex\nfrom yt.geometry.geometry_handler import \\\n YTDataChunk\nfrom yt.data_objects.static_output import \\\n Dataset\nfrom yt.fields.field_info_container import \\\n FieldInfoContainer, NullFunc\nfrom yt.utilities.definitions import \\\n mpc_conversion, sec_conversion\nfrom yt.utilities.physical_constants import \\\n rho_crit_g_cm3_h2, cm_per_mpc\nfrom yt.utilities.io_handler import io_registry\nfrom yt.utilities.logger import ytLogger as mylog\nfrom yt.utilities.pyparselibconfig import libconfig\n\nfrom .fields import \\\n EnzoFieldInfo\n\nfrom yt.utilities.parallel_tools.parallel_analysis_interface import \\\n parallel_blocking_call\n\nclass EnzoGrid(AMRGridPatch):\n \"\"\"\n Class representing a single Enzo Grid instance.\n \"\"\"\n\n def __init__(self, id, index):\n \"\"\"\n Returns an instance of EnzoGrid with *id*, associated with\n *filename* and *index*.\n \"\"\"\n #All of the field parameters will be passed to us as needed.\n AMRGridPatch.__init__(self, id, filename = None, index = index)\n self._children_ids = []\n self._parent_id = -1\n self.Level = -1\n\n def _guess_properties_from_parent(self):\n \"\"\"\n We know that our grid boundary occurs on the cell boundary of our\n parent. This can be a very expensive process, but it is necessary\n in some indexs, where yt is unable to generate a completely\n space-filling tiling of grids, possibly due to the finite accuracy in a\n standard Enzo index file.\n \"\"\"\n rf = self.ds.refine_by\n my_ind = self.id - self._id_offset\n le = self.LeftEdge\n self.dds = self.Parent.dds/rf\n ParentLeftIndex = np.rint((self.LeftEdge-self.Parent.LeftEdge)/self.Parent.dds)\n self.start_index = rf*(ParentLeftIndex + self.Parent.get_global_startindex()).astype('int64')\n self.LeftEdge = self.Parent.LeftEdge + self.Parent.dds * ParentLeftIndex\n self.RightEdge = self.LeftEdge + self.ActiveDimensions*self.dds\n self.index.grid_left_edge[my_ind,:] = self.LeftEdge\n self.index.grid_right_edge[my_ind,:] = self.RightEdge\n self._child_mask = None\n self._child_index_mask = None\n self._child_indices = None\n self._setup_dx()\n\n def set_filename(self, filename):\n \"\"\"\n Intelligently set the filename.\n \"\"\"\n if filename is None:\n self.filename = filename\n return\n if self.index._strip_path:\n self.filename = os.path.join(self.index.directory,\n os.path.basename(filename))\n elif filename[0] == os.path.sep:\n self.filename = filename\n else:\n self.filename = os.path.join(self.index.directory, filename)\n return\n\n def __repr__(self):\n return \"EnzoGrid_%04i\" % (self.id)\n\n @property\n def Parent(self):\n if self._parent_id == -1: return None\n return self.index.grids[self._parent_id - self._id_offset]\n\n @property\n def Children(self):\n return [self.index.grids[cid - self._id_offset]\n for cid in self._children_ids]\n\n @property\n def NumberOfActiveParticles(self):\n if not hasattr(self.index, \"grid_active_particle_count\"): return {}\n id = self.id - self._id_offset\n nap = dict((ptype, self.index.grid_active_particle_count[ptype][id]) \\\n for ptype in self.index.grid_active_particle_count)\n return nap\n\nclass EnzoGridInMemory(EnzoGrid):\n __slots__ = ['proc_num']\n def set_filename(self, filename):\n pass\n\nclass EnzoGridGZ(EnzoGrid):\n\n __slots__ = ()\n\n def retrieve_ghost_zones(self, n_zones, fields, all_levels=False,\n smoothed=False):\n NGZ = self.ds.parameters.get(\"NumberOfGhostZones\", 3)\n if n_zones > NGZ:\n return EnzoGrid.retrieve_ghost_zones(\n self, n_zones, fields, all_levels, smoothed)\n\n # ----- Below is mostly the original code, except we remove the field\n # ----- access section\n # We will attempt this by creating a datacube that is exactly bigger\n # than the grid by nZones*dx in each direction\n nl = self.get_global_startindex() - n_zones\n nr = nl + self.ActiveDimensions + 2*n_zones\n new_left_edge = nl * self.dds + self.ds.domain_left_edge\n new_right_edge = nr * self.dds + self.ds.domain_left_edge\n # Something different needs to be done for the root grid, though\n level = self.Level\n args = (level, new_left_edge, new_right_edge)\n kwargs = {'dims': self.ActiveDimensions + 2*n_zones,\n 'num_ghost_zones':n_zones,\n 'use_pbar':False}\n # This should update the arguments to set the field parameters to be\n # those of this grid.\n kwargs.update(self.field_parameters)\n if smoothed:\n #cube = self.index.smoothed_covering_grid(\n # level, new_left_edge, new_right_edge, **kwargs)\n cube = self.index.smoothed_covering_grid(\n level, new_left_edge, **kwargs)\n else:\n cube = self.index.covering_grid(\n level, new_left_edge, **kwargs)\n # ----- This is EnzoGrid.get_data, duplicated here mostly for\n # ---- efficiency's sake.\n start_zone = NGZ - n_zones\n if start_zone == 0:\n end_zone = None\n else:\n end_zone = -(NGZ - n_zones)\n sl = [slice(start_zone, end_zone) for i in range(3)]\n if fields is None: return cube\n for field in ensure_list(fields):\n if field in self.field_list:\n conv_factor = 1.0\n if field in self.ds.field_info:\n conv_factor = self.ds.field_info[field]._convert_function(self)\n if self.ds.field_info[field].particle_type: continue\n temp = self.index.io._read_raw_data_set(self, field)\n temp = temp.swapaxes(0, 2)\n cube.field_data[field] = np.multiply(temp, conv_factor, temp)[sl]\n return cube\n\nclass EnzoHierarchy(GridIndex):\n\n _strip_path = False\n grid = EnzoGrid\n _preload_implemented = True\n\n def __init__(self, ds, dataset_type):\n\n self.dataset_type = dataset_type\n if ds.file_style != None:\n self._bn = ds.file_style\n else:\n self._bn = \"%s.cpu%%04i\"\n self.index_filename = os.path.abspath(\n \"%s.hierarchy\" % (ds.parameter_filename))\n if os.path.getsize(self.index_filename) == 0:\n raise IOError(-1,\"File empty\", self.index_filename)\n self.directory = os.path.dirname(self.index_filename)\n\n # For some reason, r8 seems to want Float64\n if \"CompilerPrecision\" in ds \\\n and ds[\"CompilerPrecision\"] == \"r4\":\n self.float_type = 'float32'\n else:\n self.float_type = 'float64'\n\n GridIndex.__init__(self, ds, dataset_type)\n # sync it back\n self.dataset.dataset_type = self.dataset_type\n\n def _count_grids(self):\n self.num_grids = None\n test_grid = test_grid_id = None\n self.num_stars = 0\n for line in rlines(open(self.index_filename, \"rb\")):\n if line.startswith(\"BaryonFileName\") or \\\n line.startswith(\"ParticleFileName\") or \\\n line.startswith(\"FileName \"):\n test_grid = line.split(\"=\")[-1].strip().rstrip()\n if line.startswith(\"NumberOfStarParticles\"):\n self.num_stars = int(line.split(\"=\")[-1])\n if line.startswith(\"Grid \"):\n if self.num_grids is None:\n self.num_grids = int(line.split(\"=\")[-1])\n test_grid_id = int(line.split(\"=\")[-1])\n if test_grid is not None:\n break\n self._guess_dataset_type(self.ds.dimensionality, test_grid, test_grid_id)\n\n def _guess_dataset_type(self, rank, test_grid, test_grid_id):\n if test_grid[0] != os.path.sep:\n test_grid = os.path.join(self.directory, test_grid)\n if not os.path.exists(test_grid):\n test_grid = os.path.join(self.directory,\n os.path.basename(test_grid))\n mylog.debug(\"Your data uses the annoying hardcoded path.\")\n self._strip_path = True\n if self.dataset_type is not None: return\n if rank == 3:\n mylog.debug(\"Detected packed HDF5\")\n if self.parameters.get(\"WriteGhostZones\", 0) == 1:\n self.dataset_type= \"enzo_packed_3d_gz\"\n self.grid = EnzoGridGZ\n else:\n self.dataset_type = 'enzo_packed_3d'\n elif rank == 2:\n mylog.debug(\"Detect packed 2D\")\n self.dataset_type = 'enzo_packed_2d'\n elif rank == 1:\n mylog.debug(\"Detect packed 1D\")\n self.dataset_type = 'enzo_packed_1d'\n else:\n raise NotImplementedError\n\n # Sets are sorted, so that won't work!\n def _parse_index(self):\n def _next_token_line(token, f):\n for line in f:\n if line.startswith(token):\n return line.split()[2:]\n t1 = time.time()\n pattern = r\"Pointer: Grid\\[(\\d*)\\]->NextGrid(Next|This)Level = (\\d*)\\s+$\"\n patt = re.compile(pattern)\n f = open(self.index_filename, \"rt\")\n self.grids = [self.grid(1, self)]\n self.grids[0].Level = 0\n si, ei, LE, RE, fn, npart = [], [], [], [], [], []\n all = [si, ei, LE, RE, fn]\n pbar = get_pbar(\"Parsing Hierarchy \", self.num_grids)\n version = self.dataset.parameters.get(\"VersionNumber\", None)\n params = self.dataset.parameters\n if version is None and \"Internal\" in params:\n version = float(params[\"Internal\"][\"Provenance\"][\"VersionNumber\"])\n if version >= 3.0:\n active_particles = True\n nap = dict((ap_type, []) for ap_type in \n params[\"Physics\"][\"ActiveParticles\"][\"ActiveParticlesEnabled\"])\n elif version == 2.2:\n active_particles = True\n nap = {}\n for type in self.parameters.get(\"AppendActiveParticleType\", []):\n nap[type] = []\n else:\n active_particles = False\n nap = None\n for grid_id in range(self.num_grids):\n pbar.update(grid_id)\n # We will unroll this list\n si.append(_next_token_line(\"GridStartIndex\", f))\n ei.append(_next_token_line(\"GridEndIndex\", f))\n LE.append(_next_token_line(\"GridLeftEdge\", f))\n RE.append(_next_token_line(\"GridRightEdge\", f))\n nb = int(_next_token_line(\"NumberOfBaryonFields\", f)[0])\n fn.append([None])\n if nb > 0: fn[-1] = _next_token_line(\"BaryonFileName\", f)\n npart.append(int(_next_token_line(\"NumberOfParticles\", f)[0]))\n # Below we find out what active particles exist in this grid,\n # and add their counts individually.\n if active_particles:\n ptypes = _next_token_line(\"PresentParticleTypes\", f)\n counts = [int(c) for c in _next_token_line(\"ParticleTypeCounts\", f)]\n for ptype in self.parameters.get(\"AppendActiveParticleType\", []):\n if ptype in ptypes:\n nap[ptype].append(counts[ptypes.index(ptype)])\n else:\n nap[ptype].append(0)\n if nb == 0 and npart[-1] > 0: fn[-1] = _next_token_line(\"ParticleFileName\", f)\n for line in f:\n if len(line) < 2: break\n if line.startswith(\"Pointer:\"):\n vv = patt.findall(line)[0]\n self.__pointer_handler(vv)\n pbar.finish()\n self._fill_arrays(ei, si, LE, RE, npart, nap)\n temp_grids = np.empty(self.num_grids, dtype='object')\n temp_grids[:] = self.grids\n self.grids = temp_grids\n self.filenames = fn\n t2 = time.time()\n\n def _initialize_grid_arrays(self):\n super(EnzoHierarchy, self)._initialize_grid_arrays()\n if \"AppendActiveParticleType\" in self.parameters.keys() and \\\n len(self.parameters[\"AppendActiveParticleType\"]):\n gac = dict((ptype, np.zeros(self.num_grids, dtype='i4')) \\\n for ptype in self.parameters[\"AppendActiveParticleType\"])\n self.grid_active_particle_count = gac\n\n def _fill_arrays(self, ei, si, LE, RE, npart, nap):\n self.grid_dimensions.flat[:] = ei\n self.grid_dimensions -= np.array(si, dtype='i4')\n self.grid_dimensions += 1\n self.grid_left_edge.flat[:] = LE\n self.grid_right_edge.flat[:] = RE\n self.grid_particle_count.flat[:] = npart\n if nap is not None:\n for ptype in nap:\n self.grid_active_particle_count[ptype].flat[:] = nap[ptype]\n\n def __pointer_handler(self, m):\n sgi = int(m[2])-1\n if sgi == -1: return # if it's 0, then we're done with that lineage\n # Okay, so, we have a pointer. We make a new grid, with an id of the length+1\n # (recall, Enzo grids are 1-indexed)\n self.grids.append(self.grid(len(self.grids)+1, self))\n # We'll just go ahead and make a weakref to cache\n second_grid = self.grids[sgi] # zero-indexed already\n first_grid = self.grids[int(m[0])-1]\n if m[1] == \"Next\":\n first_grid._children_ids.append(second_grid.id)\n second_grid._parent_id = first_grid.id\n second_grid.Level = first_grid.Level + 1\n elif m[1] == \"This\":\n if first_grid.Parent is not None:\n first_grid.Parent._children_ids.append(second_grid.id)\n second_grid._parent_id = first_grid._parent_id\n second_grid.Level = first_grid.Level\n self.grid_levels[sgi] = second_grid.Level\n\n def _rebuild_top_grids(self, level = 0):\n mylog.info(\"Rebuilding grids on level %s\", level)\n cmask = (self.grid_levels.flat == (level + 1))\n cmsum = cmask.sum()\n mask = np.zeros(self.num_grids, dtype='bool')\n for grid in self.select_grids(level):\n mask[:] = 0\n LE = self.grid_left_edge[grid.id - grid._id_offset]\n RE = self.grid_right_edge[grid.id - grid._id_offset]\n grids, grid_i = self.get_box_grids(LE, RE)\n mask[grid_i] = 1\n grid._children_ids = []\n cgrids = self.grids[ ( mask * cmask).astype('bool') ]\n mylog.info(\"%s: %s / %s\", grid, len(cgrids), cmsum)\n for cgrid in cgrids:\n grid._children_ids.append(cgrid.id)\n cgrid._parent_id = grid.id\n mylog.info(\"Finished rebuilding\")\n\n def _populate_grid_objects(self):\n reconstruct = ytcfg.getboolean(\"yt\",\"reconstruct_index\")\n for g,f in izip(self.grids, self.filenames):\n g._prepare_grid()\n g._setup_dx()\n g.set_filename(f[0])\n if reconstruct:\n if g.Parent is not None: g._guess_properties_from_parent()\n del self.filenames # No longer needed.\n self.max_level = self.grid_levels.max()\n\n def _detect_active_particle_fields(self):\n ap_list = self.dataset[\"AppendActiveParticleType\"]\n _fields = dict((ap, []) for ap in ap_list)\n fields = []\n for ptype in self.dataset[\"AppendActiveParticleType\"]:\n select_grids = self.grid_active_particle_count[ptype].flat\n if np.any(select_grids) == False:\n current_ptypes = self.dataset.particle_types\n new_ptypes = [p for p in current_ptypes if p != ptype]\n self.dataset.particle_types = new_ptypes\n self.dataset.particle_types_raw = new_ptypes\n continue\n gs = self.grids[select_grids > 0]\n g = gs[0]\n handle = h5py.File(g.filename, \"r\")\n node = handle[\"/Grid%08i/Particles/\" % g.id]\n for ptype in (str(p) for p in node):\n if ptype not in _fields: continue\n for field in (str(f) for f in node[ptype]):\n _fields[ptype].append(field)\n fields += [(ptype, field) for field in _fields.pop(ptype)]\n handle.close()\n return set(fields)\n\n def _setup_derived_fields(self):\n super(EnzoHierarchy, self)._setup_derived_fields()\n aps = self.dataset.parameters.get(\n \"AppendActiveParticleType\", [])\n for fname, field in self.ds.field_info.items():\n if not field.particle_type: continue\n if isinstance(fname, tuple): continue\n if field._function is NullFunc: continue\n for apt in aps:\n dd = field._copy_def()\n dd.pop(\"name\")\n self.ds.field_info.add_field((apt, fname), **dd)\n\n def _detect_output_fields(self):\n self.field_list = []\n # Do this only on the root processor to save disk work.\n if self.comm.rank in (0, None):\n mylog.info(\"Gathering a field list (this may take a moment.)\")\n field_list = set()\n random_sample = self._generate_random_grids()\n for grid in random_sample:\n if not hasattr(grid, 'filename'): continue\n try:\n gf = self.io._read_field_names(grid)\n except self.io._read_exception:\n raise IOError(\"Grid %s is a bit funky?\", grid.id)\n mylog.debug(\"Grid %s has: %s\", grid.id, gf)\n field_list = field_list.union(gf)\n if \"AppendActiveParticleType\" in self.dataset.parameters:\n ap_fields = self._detect_active_particle_fields()\n field_list = list(set(field_list).union(ap_fields))\n ptypes = self.dataset.particle_types\n ptypes_raw = self.dataset.particle_types_raw\n else:\n field_list = None\n ptypes = None\n ptypes_raw = None\n self.field_list = list(self.comm.mpi_bcast(field_list))\n self.dataset.particle_types = list(self.comm.mpi_bcast(ptypes))\n self.dataset.particle_types_raw = list(self.comm.mpi_bcast(ptypes_raw))\n\n\n def _generate_random_grids(self):\n if self.num_grids > 40:\n starter = np.random.randint(0, 20)\n random_sample = np.mgrid[starter:len(self.grids)-1:20j].astype(\"int32\")\n # We also add in a bit to make sure that some of the grids have\n # particles\n gwp = self.grid_particle_count > 0\n if np.any(gwp) and not np.any(gwp[(random_sample,)]):\n # We just add one grid. This is not terribly efficient.\n first_grid = np.where(gwp)[0][0]\n random_sample.resize((21,))\n random_sample[-1] = first_grid\n mylog.debug(\"Added additional grid %s\", first_grid)\n mylog.debug(\"Checking grids: %s\", random_sample.tolist())\n else:\n random_sample = np.mgrid[0:max(len(self.grids),1)].astype(\"int32\")\n return self.grids[(random_sample,)]\n\n def find_particles_by_type(self, ptype, max_num=None, additional_fields=None):\n \"\"\"\n Returns a structure of arrays with all of the particles'\n positions, velocities, masses, types, IDs, and attributes for\n a particle type **ptype** for a maximum of **max_num**\n particles. If non-default particle fields are used, provide\n them in **additional_fields**.\n \"\"\"\n # Not sure whether this routine should be in the general HierarchyType.\n if self.grid_particle_count.sum() == 0:\n mylog.info(\"Data contains no particles.\");\n return None\n if additional_fields is None:\n additional_fields = ['metallicity_fraction', 'creation_time',\n 'dynamical_time']\n pfields = [f for f in self.field_list if f.startswith('particle_')]\n nattr = self.dataset['NumberOfParticleAttributes']\n if nattr > 0:\n pfields += additional_fields[:nattr]\n # Find where the particles reside and count them\n if max_num is None: max_num = 1e100\n total = 0\n pstore = []\n for level in range(self.max_level, -1, -1):\n for grid in self.select_grids(level):\n index = np.where(grid['particle_type'] == ptype)[0]\n total += len(index)\n pstore.append(index)\n if total >= max_num: break\n if total >= max_num: break\n result = None\n if total > 0:\n result = {}\n for p in pfields:\n result[p] = np.zeros(total, 'float64')\n # Now we retrieve data for each field\n ig = count = 0\n for level in range(self.max_level, -1, -1):\n for grid in self.select_grids(level):\n nidx = len(pstore[ig])\n if nidx > 0:\n for p in pfields:\n result[p][count:count+nidx] = grid[p][pstore[ig]]\n count += nidx\n ig += 1\n if count >= total: break\n if count >= total: break\n # Crop data if retrieved more than max_num\n if count > max_num:\n for p in pfields:\n result[p] = result[p][0:max_num]\n return result\n\nclass EnzoHierarchyInMemory(EnzoHierarchy):\n\n grid = EnzoGridInMemory\n _enzo = None\n\n @property\n def enzo(self):\n if self._enzo is None:\n import enzo\n self._enzo = enzo\n return self._enzo\n\n def __init__(self, ds, dataset_type = None):\n self.dataset_type = dataset_type\n self.float_type = 'float64'\n self.dataset = weakref.proxy(ds) # for _obtain_enzo\n self.float_type = self.enzo.hierarchy_information[\"GridLeftEdge\"].dtype\n self.directory = os.getcwd()\n GridIndex.__init__(self, ds, dataset_type)\n\n def _initialize_data_storage(self):\n pass\n\n def _count_grids(self):\n self.num_grids = self.enzo.hierarchy_information[\"GridDimensions\"].shape[0]\n\n def _parse_index(self):\n self._copy_index_structure()\n mylog.debug(\"Copying reverse tree\")\n reverse_tree = self.enzo.hierarchy_information[\"GridParentIDs\"].ravel().tolist()\n # Initial setup:\n mylog.debug(\"Reconstructing parent-child relationships\")\n grids = []\n # We enumerate, so it's 0-indexed id and 1-indexed pid\n self.filenames = [\"-1\"] * self.num_grids\n for id,pid in enumerate(reverse_tree):\n grids.append(self.grid(id+1, self))\n grids[-1].Level = self.grid_levels[id, 0]\n if pid > 0:\n grids[-1]._parent_id = pid\n grids[pid-1]._children_ids.append(grids[-1].id)\n self.max_level = self.grid_levels.max()\n mylog.debug(\"Preparing grids\")\n self.grids = np.empty(len(grids), dtype='object')\n for i, grid in enumerate(grids):\n if (i%1e4) == 0: mylog.debug(\"Prepared % 7i / % 7i grids\", i, self.num_grids)\n grid.filename = \"Inline_processor_%07i\" % (self.grid_procs[i,0])\n grid._prepare_grid()\n grid.proc_num = self.grid_procs[i,0]\n self.grids[i] = grid\n mylog.debug(\"Prepared\")\n\n def _initialize_grid_arrays(self):\n EnzoHierarchy._initialize_grid_arrays(self)\n self.grid_procs = np.zeros((self.num_grids,1),'int32')\n\n def _copy_index_structure(self):\n # Dimensions are important!\n self.grid_dimensions[:] = self.enzo.hierarchy_information[\"GridEndIndices\"][:]\n self.grid_dimensions -= self.enzo.hierarchy_information[\"GridStartIndices\"][:]\n self.grid_dimensions += 1\n self.grid_left_edge[:] = self.enzo.hierarchy_information[\"GridLeftEdge\"][:]\n self.grid_right_edge[:] = self.enzo.hierarchy_information[\"GridRightEdge\"][:]\n self.grid_levels[:] = self.enzo.hierarchy_information[\"GridLevels\"][:]\n self.grid_procs = self.enzo.hierarchy_information[\"GridProcs\"].copy()\n self.grid_particle_count[:] = self.enzo.hierarchy_information[\"GridNumberOfParticles\"][:]\n\n def save_data(self, *args, **kwargs):\n pass\n\n _cached_field_list = None\n _cached_derived_field_list = None\n\n def _generate_random_grids(self):\n my_rank = self.comm.rank\n my_grids = self.grids[self.grid_procs.ravel() == my_rank]\n if len(my_grids) > 40:\n starter = np.random.randint(0, 20)\n random_sample = np.mgrid[starter:len(my_grids)-1:20j].astype(\"int32\")\n mylog.debug(\"Checking grids: %s\", random_sample.tolist())\n else:\n random_sample = np.mgrid[0:max(len(my_grids)-1,1)].astype(\"int32\")\n return my_grids[(random_sample,)]\n\n def _chunk_io(self, dobj, cache = True, local_only = False):\n gfiles = defaultdict(list)\n gobjs = getattr(dobj._current_chunk, \"objs\", dobj._chunk_info)\n for g in gobjs:\n gfiles[g.filename].append(g)\n for fn in sorted(gfiles):\n if local_only:\n gobjs = [g for g in gfiles[fn] if g.proc_num == self.comm.rank]\n gfiles[fn] = gobjs\n gs = gfiles[fn]\n count = self._count_selection(dobj, gs)\n yield YTDataChunk(dobj, \"io\", gs, count, cache = cache)\n\n\nclass EnzoHierarchy1D(EnzoHierarchy):\n\n def _fill_arrays(self, ei, si, LE, RE, npart, nap):\n self.grid_dimensions[:,:1] = ei\n self.grid_dimensions[:,:1] -= np.array(si, self.float_type)\n self.grid_dimensions += 1\n self.grid_left_edge[:,:1] = LE\n self.grid_right_edge[:,:1] = RE\n self.grid_particle_count.flat[:] = npart\n self.grid_left_edge[:,1:] = 0.0\n self.grid_right_edge[:,1:] = 1.0\n self.grid_dimensions[:,1:] = 1\n if nap is not None:\n raise NotImplementedError\n\nclass EnzoHierarchy2D(EnzoHierarchy):\n\n def _fill_arrays(self, ei, si, LE, RE, npart, nap):\n self.grid_dimensions[:,:2] = ei\n self.grid_dimensions[:,:2] -= np.array(si, self.float_type)\n self.grid_dimensions += 1\n self.grid_left_edge[:,:2] = LE\n self.grid_right_edge[:,:2] = RE\n self.grid_particle_count.flat[:] = npart\n self.grid_left_edge[:,2] = 0.0\n self.grid_right_edge[:,2] = 1.0\n self.grid_dimensions[:,2] = 1\n if nap is not None:\n raise NotImplementedError\n\nclass EnzoDataset(Dataset):\n \"\"\"\n Enzo-specific output, set at a fixed time.\n \"\"\"\n _index_class = EnzoHierarchy\n _field_info_class = EnzoFieldInfo\n\n def __init__(self, filename, dataset_type=None,\n file_style = None,\n parameter_override = None,\n conversion_override = None,\n storage_filename = None,\n units_override=None):\n \"\"\"\n This class is a stripped down class that simply reads and parses\n *filename* without looking at the index. *dataset_type* gets passed\n to the index to pre-determine the style of data-output. However,\n it is not strictly necessary. Optionally you may specify a\n *parameter_override* dictionary that will override anything in the\n paarmeter file and a *conversion_override* dictionary that consists\n of {fieldname : conversion_to_cgs} that will override the #DataCGS.\n \"\"\"\n self.fluid_types += (\"enzo\",)\n if filename.endswith(\".hierarchy\"): filename = filename[:-10]\n if parameter_override is None: parameter_override = {}\n self._parameter_override = parameter_override\n if conversion_override is None: conversion_override = {}\n self._conversion_override = conversion_override\n self.storage_filename = storage_filename\n Dataset.__init__(self, filename, dataset_type, file_style=file_style,\n units_override=units_override)\n\n def _setup_1d(self):\n self._index_class = EnzoHierarchy1D\n self.domain_left_edge = \\\n np.concatenate([[self.domain_left_edge], [0.0, 0.0]])\n self.domain_right_edge = \\\n np.concatenate([[self.domain_right_edge], [1.0, 1.0]])\n\n def _setup_2d(self):\n self._index_class = EnzoHierarchy2D\n self.domain_left_edge = \\\n np.concatenate([self.domain_left_edge, [0.0]])\n self.domain_right_edge = \\\n np.concatenate([self.domain_right_edge, [1.0]])\n\n def get_parameter(self,parameter,type=None):\n \"\"\"\n Gets a parameter not in the parameterDict.\n \"\"\"\n if parameter in self.parameters:\n return self.parameters[parameter]\n for line in open(self.parameter_filename):\n if line.find(\"#\") >= 1: # Keep the commented lines\n line=line[:line.find(\"#\")]\n line=line.strip().rstrip()\n if len(line) < 2:\n continue\n try:\n param, vals = map(string.strip,map(string.rstrip,\n line.split(\"=\")))\n except ValueError:\n mylog.error(\"ValueError: '%s'\", line)\n if parameter == param:\n if type is None:\n t = vals.split()\n else:\n t = map(type, vals.split())\n if len(t) == 1:\n self.parameters[param] = t[0]\n else:\n self.parameters[param] = t\n if param.endswith(\"Units\") and not param.startswith(\"Temperature\"):\n dataType = param[:-5]\n self.conversion_factors[dataType] = self.parameters[param]\n return self.parameters[parameter]\n\n return \"\"\n\n def _parse_parameter_file(self):\n \"\"\"\n Parses the parameter file and establishes the various\n dictionaries.\n \"\"\"\n # Let's read the file\n with open(self.parameter_filename, \"r\") as f:\n line = f.readline().strip() \n f.seek(0)\n if line == \"Internal:\":\n self._parse_enzo3_parameter_file(f)\n else:\n self._parse_enzo2_parameter_file(f)\n\n def _parse_enzo3_parameter_file(self, f):\n self.parameters = p = libconfig(f)\n sim = p[\"SimulationControl\"]\n internal = p[\"Internal\"]\n phys = p[\"Physics\"]\n self.refine_by = sim[\"AMR\"][\"RefineBy\"]\n self.periodicity = tuple(a == 3 for a in\n sim[\"Domain\"][\"LeftFaceBoundaryCondition\"])\n self.dimensionality = sim[\"Domain\"][\"TopGridRank\"]\n self.domain_dimensions = np.array(sim[\"Domain\"][\"TopGridDimensions\"],\n dtype=\"int64\")\n self.domain_left_edge = np.array(sim[\"Domain\"][\"DomainLeftEdge\"],\n dtype=\"float64\")\n self.domain_right_edge = np.array(sim[\"Domain\"][\"DomainRightEdge\"],\n dtype=\"float64\")\n self.gamma = phys[\"Hydro\"][\"Gamma\"]\n self.unique_identifier = internal[\"Provenance\"][\"CurrentTimeIdentifier\"]\n self.current_time = internal[\"InitialTime\"]\n self.cosmological_simulation = phys[\"Cosmology\"][\"ComovingCoordinates\"]\n if self.cosmological_simulation == 1:\n cosmo = phys[\"Cosmology\"]\n self.current_redshift = internal[\"CosmologyCurrentRedshift\"]\n self.omega_lambda = cosmo[\"OmegaLambdaNow\"]\n self.omega_matter = cosmo[\"OmegaMatterNow\"]\n self.hubble_constant = cosmo[\"HubbleConstantNow\"]\n else:\n self.current_redshift = self.omega_lambda = self.omega_matter = \\\n self.hubble_constant = self.cosmological_simulation = 0.0\n self.particle_types = [\"DarkMatter\"] + \\\n phys[\"ActiveParticles\"][\"ActiveParticlesEnabled\"]\n self.particle_types = tuple(self.particle_types)\n self.particle_types_raw = self.particle_types\n if self.dimensionality == 1:\n self._setup_1d()\n elif self.dimensionality == 2:\n self._setup_2d()\n\n def _parse_enzo2_parameter_file(self, f):\n for line in (l.strip() for l in f):\n if len(line) < 2: continue\n param, vals = (i.strip() for i in line.split(\"=\",1))\n # First we try to decipher what type of value it is.\n vals = vals.split()\n # Special case approaching.\n if \"(do\" in vals: vals = vals[:1]\n if len(vals) == 0:\n pcast = str # Assume NULL output\n else:\n v = vals[0]\n # Figure out if it's castable to floating point:\n try:\n float(v)\n except ValueError:\n pcast = str\n else:\n if any(\".\" in v or \"e+\" in v or \"e-\" in v for v in vals):\n pcast = float\n elif v == \"inf\":\n pcast = str\n else:\n pcast = int\n # Now we figure out what to do with it.\n if len(vals) == 0:\n vals = \"\"\n elif len(vals) == 1:\n vals = pcast(vals[0])\n else:\n vals = np.array([pcast(i) for i in vals if i != \"-99999\"])\n if param.startswith(\"Append\"):\n if param not in self.parameters:\n self.parameters[param] = []\n self.parameters[param].append(vals)\n else:\n self.parameters[param] = vals\n self.refine_by = self.parameters[\"RefineBy\"]\n self.periodicity = ensure_tuple(\n self.parameters[\"LeftFaceBoundaryCondition\"] == 3)\n self.dimensionality = self.parameters[\"TopGridRank\"]\n if \"MetaDataDatasetUUID\" in self.parameters:\n self.unique_identifier = self.parameters[\"MetaDataDatasetUUID\"]\n elif \"CurrentTimeIdentifier\" in self.parameters:\n self.unique_identifier = self.parameters[\"CurrentTimeIdentifier\"]\n else:\n self.unique_identifier = \\\n int(os.stat(self.parameter_filename)[stat.ST_CTIME])\n if self.dimensionality > 1:\n self.domain_dimensions = self.parameters[\"TopGridDimensions\"]\n if len(self.domain_dimensions) < 3:\n tmp = self.domain_dimensions.tolist()\n tmp.append(1)\n self.domain_dimensions = np.array(tmp)\n self.periodicity += (False,)\n self.domain_left_edge = np.array(self.parameters[\"DomainLeftEdge\"],\n \"float64\").copy()\n self.domain_right_edge = np.array(self.parameters[\"DomainRightEdge\"],\n \"float64\").copy()\n else:\n self.domain_left_edge = np.array(self.parameters[\"DomainLeftEdge\"],\n \"float64\")\n self.domain_right_edge = np.array(self.parameters[\"DomainRightEdge\"],\n \"float64\")\n self.domain_dimensions = np.array([self.parameters[\"TopGridDimensions\"],1,1])\n self.periodicity += (False, False)\n\n self.gamma = self.parameters[\"Gamma\"]\n # To be enabled when we can break old pickles:\n #if \"MetaDataSimulationUUID\" in self.parameters:\n # self.unique_identifier = self.parameters[\"MetaDataSimulationUUID\"]\n self.unique_identifier = self.parameters.get(\"MetaDataDatasetUUID\",\n self.parameters.get(\"CurrentTimeIdentifier\", None))\n if self.parameters[\"ComovingCoordinates\"]:\n self.cosmological_simulation = 1\n self.current_redshift = self.parameters[\"CosmologyCurrentRedshift\"]\n self.omega_lambda = self.parameters[\"CosmologyOmegaLambdaNow\"]\n self.omega_matter = self.parameters[\"CosmologyOmegaMatterNow\"]\n self.hubble_constant = self.parameters[\"CosmologyHubbleConstantNow\"]\n else:\n self.current_redshift = self.omega_lambda = self.omega_matter = \\\n self.hubble_constant = self.cosmological_simulation = 0.0\n self.particle_types = []\n self.current_time = self.parameters[\"InitialTime\"]\n if self.parameters[\"NumberOfParticles\"] > 0 and \\\n \"AppendActiveParticleType\" in self.parameters.keys():\n # If this is the case, then we know we should have a DarkMatter\n # particle type, and we don't need the \"io\" type.\n self.parameters[\"AppendActiveParticleType\"].append(\"DarkMatter\")\n else:\n # We do not have an \"io\" type for Enzo particles if the\n # ActiveParticle machinery is on, as we simply will ignore any of\n # the non-DarkMatter particles in that case. However, for older\n # datasets, we call this particle type \"io\".\n self.particle_types = [\"io\"]\n for ptype in self.parameters.get(\"AppendActiveParticleType\", []):\n self.particle_types.append(ptype)\n self.particle_types = tuple(self.particle_types)\n self.particle_types_raw = self.particle_types\n\n if self.dimensionality == 1:\n self._setup_1d()\n elif self.dimensionality == 2:\n self._setup_2d()\n\n def _set_code_unit_attributes(self):\n if self.cosmological_simulation:\n k = self.cosmology_get_units()\n # Now some CGS values\n box_size = self.parameters.get(\"CosmologyComovingBoxSize\", None)\n if box_size is None:\n box_size = self.parameters[\"Physics\"][\"Cosmology\"]\\\n [\"CosmologyComovingBoxSize\"]\n self.length_unit = self.quan(box_size, \"Mpccm/h\")\n self.mass_unit = \\\n self.quan(k['urho'], 'g/cm**3') * (self.length_unit.in_cgs())**3\n self.time_unit = self.quan(k['utim'], 's')\n self.velocity_unit = self.quan(k['uvel'], 'cm/s')\n else:\n if \"LengthUnits\" in self.parameters:\n length_unit = self.parameters[\"LengthUnits\"]\n mass_unit = self.parameters[\"DensityUnits\"] * length_unit**3\n time_unit = self.parameters[\"TimeUnits\"]\n elif \"SimulationControl\" in self.parameters:\n units = self.parameters[\"SimulationControl\"][\"Units\"]\n length_unit = units[\"Length\"]\n mass_unit = units[\"Density\"] * length_unit**3\n time_unit = units[\"Time\"]\n else:\n mylog.warning(\"Setting 1.0 in code units to be 1.0 cm\")\n mylog.warning(\"Setting 1.0 in code units to be 1.0 s\")\n length_unit = mass_unit = time_unit = 1.0\n\n self.length_unit = self.quan(length_unit, \"cm\")\n self.mass_unit = self.quan(mass_unit, \"g\")\n self.time_unit = self.quan(time_unit, \"s\")\n self.velocity_unit = self.length_unit / self.time_unit\n\n magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /\n (self.time_unit**2 * self.length_unit))\n magnetic_unit = np.float64(magnetic_unit.in_cgs())\n self.magnetic_unit = self.quan(magnetic_unit, \"gauss\")\n\n def cosmology_get_units(self):\n \"\"\"\n Return an Enzo-fortran style dictionary of units to feed into custom\n routines. This is typically only necessary if you are interacting\n with fortran code.\n \"\"\"\n k = {}\n k[\"utim\"] = 2.52e17/np.sqrt(self.omega_matter)\\\n / self.hubble_constant \\\n / (1+self.parameters[\"CosmologyInitialRedshift\"])**1.5\n k[\"urho\"] = rho_crit_g_cm3_h2 * self.omega_matter \\\n * self.hubble_constant**2 \\\n * (1.0 + self.current_redshift)**3\n k[\"uxyz\"] = cm_per_mpc * \\\n self.parameters[\"CosmologyComovingBoxSize\"] / \\\n self.hubble_constant / \\\n (1.0 + self.current_redshift)\n k[\"uaye\"] = 1.0/(1.0 + self.parameters[\"CosmologyInitialRedshift\"])\n k[\"uvel\"] = 1.225e7*self.parameters[\"CosmologyComovingBoxSize\"] \\\n *np.sqrt(self.omega_matter) \\\n *np.sqrt(1+ self.parameters[\"CosmologyInitialRedshift\"])\n k[\"utem\"] = 1.88e6 * (self.parameters[\"CosmologyComovingBoxSize\"]**2) \\\n * self.omega_matter \\\n * (1.0 + self.parameters[\"CosmologyInitialRedshift\"])\n k[\"aye\"] = (1.0 + self.parameters[\"CosmologyInitialRedshift\"]) / \\\n (1.0 + self.current_redshift)\n return k\n\n @classmethod\n def _is_valid(cls, *args, **kwargs):\n if (\"%s\" % (args[0])).endswith(\".hierarchy\"):\n return True\n return os.path.exists(\"%s.hierarchy\" % args[0])\n\nclass EnzoDatasetInMemory(EnzoDataset):\n _index_class = EnzoHierarchyInMemory\n _dataset_type = 'enzo_inline'\n\n def __new__(cls, *args, **kwargs):\n obj = object.__new__(cls)\n obj.__init__(*args, **kwargs)\n return obj\n\n def __init__(self, parameter_override=None, conversion_override=None):\n self.fluid_types += (\"enzo\",)\n if parameter_override is None: parameter_override = {}\n self._parameter_override = parameter_override\n if conversion_override is None: conversion_override = {}\n self._conversion_override = conversion_override\n\n Dataset.__init__(self, \"InMemoryParameterFile\", self._dataset_type)\n\n def _parse_parameter_file(self):\n enzo = self._obtain_enzo()\n self.basename = \"cycle%08i\" % (\n enzo.yt_parameter_file[\"NumberOfPythonCalls\"])\n self.parameters['CurrentTimeIdentifier'] = time.time()\n self.parameters.update(enzo.yt_parameter_file)\n self.conversion_factors.update(enzo.conversion_factors)\n for i in self.parameters:\n if isinstance(self.parameters[i], tuple):\n self.parameters[i] = np.array(self.parameters[i])\n if i.endswith(\"Units\") and not i.startswith(\"Temperature\"):\n dataType = i[:-5]\n self.conversion_factors[dataType] = self.parameters[i]\n self.domain_left_edge = self.parameters[\"DomainLeftEdge\"].copy()\n self.domain_right_edge = self.parameters[\"DomainRightEdge\"].copy()\n for i in self.conversion_factors:\n if isinstance(self.conversion_factors[i], tuple):\n self.conversion_factors[i] = np.array(self.conversion_factors[i])\n for p, v in self._parameter_override.items():\n self.parameters[p] = v\n for p, v in self._conversion_override.items():\n self.conversion_factors[p] = v\n self.refine_by = self.parameters[\"RefineBy\"]\n self.periodicity = ensure_tuple(self.parameters[\"LeftFaceBoundaryCondition\"] == 3)\n self.dimensionality = self.parameters[\"TopGridRank\"]\n self.domain_dimensions = self.parameters[\"TopGridDimensions\"]\n self.current_time = self.parameters[\"InitialTime\"]\n if \"CurrentTimeIdentifier\" in self.parameters:\n self.unique_identifier = self.parameters[\"CurrentTimeIdentifier\"]\n if self.parameters[\"ComovingCoordinates\"]:\n self.cosmological_simulation = 1\n self.current_redshift = self.parameters[\"CosmologyCurrentRedshift\"]\n self.omega_lambda = self.parameters[\"CosmologyOmegaLambdaNow\"]\n self.omega_matter = self.parameters[\"CosmologyOmegaMatterNow\"]\n self.hubble_constant = self.parameters[\"CosmologyHubbleConstantNow\"]\n else:\n self.current_redshift = self.omega_lambda = self.omega_matter = \\\n self.hubble_constant = self.cosmological_simulation = 0.0\n\n def _obtain_enzo(self):\n import enzo; return enzo\n\n @classmethod\n def _is_valid(cls, *args, **kwargs):\n return False\n\n# These next two functions are taken from\n# http://www.reddit.com/r/Python/comments/6hj75/reverse_file_iterator/c03vms4\n# Credit goes to \"Brian\" on Reddit\n\ndef rblocks(f, blocksize=4096):\n \"\"\"Read file as series of blocks from end of file to start.\n\n The data itself is in normal order, only the order of the blocks is reversed.\n ie. \"hello world\" -> [\"ld\",\"wor\", \"lo \", \"hel\"]\n Note that the file must be opened in binary mode.\n \"\"\"\n if 'b' not in f.mode.lower():\n raise Exception(\"File must be opened using binary mode.\")\n size = os.stat(f.name).st_size\n fullblocks, lastblock = divmod(size, blocksize)\n\n # The first(end of file) block will be short, since this leaves\n # the rest aligned on a blocksize boundary. This may be more\n # efficient than having the last (first in file) block be short\n f.seek(-lastblock,2)\n yield f.read(lastblock).decode('ascii')\n\n for i in range(fullblocks-1,-1, -1):\n f.seek(i * blocksize)\n yield f.read(blocksize).decode('ascii')\n\ndef rlines(f, keepends=False):\n \"\"\"Iterate through the lines of a file in reverse order.\n\n If keepends is true, line endings are kept as part of the line.\n \"\"\"\n buf = ''\n for block in rblocks(f):\n buf = block + buf\n lines = buf.splitlines(keepends)\n # Return all lines except the first (since may be partial)\n if lines:\n lines.reverse()\n buf = lines.pop() # Last line becomes end of new first line.\n for line in lines:\n yield line\n yield buf # First line.\n\n",
"import numpy as np\nfrom yt.mods import *\nfrom yt.testing import *\nfrom yt.utilities.particle_generator import *\nfrom yt.frontends.stream.api import load_uniform_grid, refine_amr\nimport yt.utilities.initial_conditions as ic\nimport yt.utilities.flagging_methods as fm\nfrom yt.units.yt_array import uconcatenate\n\ndef setup() :\n pass\n\ndef test_particle_generator():\n # First generate our dataset\n domain_dims = (128, 128, 128)\n dens = np.zeros(domain_dims) + 0.1\n temp = 4.*np.ones(domain_dims)\n fields = {\"density\": (dens, 'code_mass/code_length**3'),\n \"temperature\": (temp, 'K')}\n ug = load_uniform_grid(fields, domain_dims, 1.0)\n fo = [ic.BetaModelSphere(1.0,0.1,0.5,[0.5,0.5,0.5],{\"density\":(10.0)})]\n rc = [fm.flagging_method_registry[\"overdensity\"](4.0)]\n ds = refine_amr(ug, rc, fo, 3)\n\n # Now generate particles from density\n\n field_list = [(\"io\", \"particle_position_x\"),\n (\"io\", \"particle_position_y\"),\n (\"io\", \"particle_position_z\"),\n (\"io\", \"particle_index\"),\n (\"io\", \"particle_gas_density\")]\n num_particles = 1000000\n field_dict = {(\"gas\", \"density\"): (\"io\", \"particle_gas_density\")}\n sphere = ds.sphere(ds.domain_center, 0.45)\n\n particles1 = WithDensityParticleGenerator(ds, sphere, num_particles, field_list)\n particles1.assign_indices()\n particles1.map_grid_fields_to_particles(field_dict)\n \n # Test to make sure we ended up with the right number of particles per grid\n particles1.apply_to_stream()\n particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]\n yield assert_equal, particles_per_grid1, particles1.NumberOfParticles\n particles_per_grid1 = [len(grid[\"particle_position_x\"]) for grid in ds.index.grids]\n yield assert_equal, particles_per_grid1, particles1.NumberOfParticles\n\n tags = uconcatenate([grid[\"particle_index\"] for grid in ds.index.grids])\n assert(np.unique(tags).size == num_particles)\n # Set up a lattice of particles\n pdims = np.array([64,64,64])\n def new_indices() :\n # We just add new indices onto the existing ones\n return np.arange((np.product(pdims)))+num_particles\n le = np.array([0.25,0.25,0.25])\n re = np.array([0.75,0.75,0.75])\n new_field_list = field_list + [(\"io\", \"particle_gas_temperature\")]\n new_field_dict = {(\"gas\", \"density\"): (\"io\", \"particle_gas_density\"),\n (\"gas\", \"temperature\"): (\"io\", \"particle_gas_temperature\")}\n\n particles2 = LatticeParticleGenerator(ds, pdims, le, re, new_field_list)\n particles2.assign_indices(function=new_indices)\n particles2.map_grid_fields_to_particles(new_field_dict)\n\n #Test lattice positions\n xpos = np.unique(particles2[\"io\", \"particle_position_x\"])\n ypos = np.unique(particles2[\"io\", \"particle_position_y\"])\n zpos = np.unique(particles2[\"io\", \"particle_position_z\"])\n\n xpred = np.linspace(le[0],re[0],num=pdims[0],endpoint=True)\n ypred = np.linspace(le[1],re[1],num=pdims[1],endpoint=True)\n zpred = np.linspace(le[2],re[2],num=pdims[2],endpoint=True)\n\n assert_almost_equal( xpos, xpred)\n assert_almost_equal( ypos, ypred)\n assert_almost_equal( zpos, zpred)\n\n #Test the number of particles again\n particles2.apply_to_stream()\n particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]\n yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles\n\n [grid.field_data.clear() for grid in ds.index.grids]\n particles_per_grid2 = [len(grid[\"particle_position_x\"]) for grid in ds.index.grids]\n yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles\n\n #Test the uniqueness of tags\n tags = np.concatenate([grid[\"particle_index\"] for grid in ds.index.grids])\n tags.sort()\n yield assert_equal, tags, np.arange((np.product(pdims)+num_particles))\n\n # Test that the old particles have zero for the new field\n old_particle_temps = [grid[\"particle_gas_temperature\"][:particles_per_grid1[i]]\n for i, grid in enumerate(ds.index.grids)]\n test_zeros = [np.zeros((particles_per_grid1[i])) \n for i, grid in enumerate(ds.index.grids)]\n yield assert_equal, old_particle_temps, test_zeros\n\n #Now dump all of these particle fields out into a dict\n pdata = {}\n dd = ds.all_data()\n for field in new_field_list :\n pdata[field] = dd[field]\n\n #Test the \"from-list\" generator and particle field clobber\n particles3 = FromListParticleGenerator(ds, num_particles+np.product(pdims), pdata)\n particles3.apply_to_stream(clobber=True)\n \n #Test the number of particles again\n particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]\n yield assert_equal, particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles\n particles_per_grid2 = [len(grid[\"particle_position_z\"]) for grid in ds.index.grids]\n yield assert_equal, particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles\n\nif __name__==\"__main__\":\n for n, i in enumerate(test_particle_generator()):\n i[0](*i[1:])\n",
"\"\"\"\nThese are common particle fields.\n\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom yt.funcs import *\nfrom yt.units.yt_array import YTArray\nfrom yt.fields.derived_field import \\\n ValidateParameter, \\\n ValidateSpatial\n\nfrom yt.units.yt_array import \\\n uconcatenate, \\\n ucross\n\nfrom yt.utilities.math_utils import \\\n get_sph_r_component, \\\n get_sph_theta_component, \\\n get_sph_phi_component, \\\n get_cyl_r_component, \\\n get_cyl_z_component, \\\n get_cyl_theta_component, \\\n get_cyl_r, get_cyl_theta, \\\n get_cyl_z, \\\n get_sph_theta, get_sph_phi, \\\n modify_reference_frame\n\nfrom .vector_operations import \\\n create_magnitude_field\n\nfrom .field_functions import \\\n get_radius\n\nsph_whitelist_fields = (\n 'particle_velocity_x',\n 'particle_velocity_y',\n 'particle_velocity_z',\n 'density',\n 'temperature',\n 'metallicity',\n 'thermal_energy',\n 'H_fraction',\n 'He_fraction',\n 'C_fraction',\n 'N_fraction',\n 'O_fraction',\n 'Ne_fraction',\n 'Mg_fraction',\n 'Si_fraction',\n 'Fe_fraction',\n 'C_density',\n 'O_density',\n 'Si_density',\n 'Fe_density'\n)\n\n\ndef _field_concat(fname):\n def _AllFields(field, data):\n v = []\n for ptype in data.ds.particle_types:\n data.ds._last_freq = (ptype, None)\n if ptype == \"all\" or \\\n ptype in data.ds.known_filters:\n continue\n v.append(data[ptype, fname].copy())\n rv = uconcatenate(v, axis=0)\n return rv\n return _AllFields\n\ndef _field_concat_slice(fname, axi):\n def _AllFields(field, data):\n v = []\n for ptype in data.ds.particle_types:\n data.ds._last_freq = (ptype, None)\n if ptype == \"all\" or \\\n ptype in data.ds.known_filters:\n continue\n v.append(data[ptype, fname][:,axi])\n rv = uconcatenate(v, axis=0)\n return rv\n return _AllFields\n\ndef particle_deposition_functions(ptype, coord_name, mass_name, registry):\n orig = set(registry.keys())\n ptype_dn = ptype.replace(\"_\",\" \").title()\n def particle_count(field, data):\n pos = data[ptype, coord_name]\n d = data.deposit(pos, method = \"count\")\n d = data.ds.arr(d, input_units = \"cm**-3\")\n return data.apply_units(d, field.units)\n\n registry.add_field((\"deposit\", \"%s_count\" % ptype),\n function = particle_count,\n validators = [ValidateSpatial()],\n units = '',\n display_name = r\"\\mathrm{%s Count}\" % ptype_dn)\n\n def particle_mass(field, data):\n pos = data[ptype, coord_name]\n pmass = data[ptype, mass_name]\n pmass.convert_to_units(field.units)\n d = data.deposit(pos, [pmass], method = \"sum\")\n return data.apply_units(d, field.units)\n\n registry.add_field((\"deposit\", \"%s_mass\" % ptype),\n function = particle_mass,\n validators = [ValidateSpatial()],\n display_name = r\"\\mathrm{%s Mass}\" % ptype_dn,\n units = \"g\")\n\n def particle_density(field, data):\n pos = data[ptype, coord_name].convert_to_units(\"code_length\")\n mass = data[ptype, mass_name].convert_to_units(\"code_mass\")\n d = data.deposit(pos, [data[ptype, mass_name]], method = \"sum\")\n d = data.ds.arr(d, \"code_mass\")\n d /= data[\"index\", \"cell_volume\"]\n return d\n\n registry.add_field((\"deposit\", \"%s_density\" % ptype),\n function = particle_density,\n validators = [ValidateSpatial()],\n display_name = r\"\\mathrm{%s Density}\" % ptype_dn,\n units = \"g/cm**3\")\n\n def particle_cic(field, data):\n pos = data[ptype, coord_name]\n d = data.deposit(pos, [data[ptype, mass_name]], method = \"cic\")\n d = data.apply_units(d, data[ptype, mass_name].units)\n d /= data[\"index\", \"cell_volume\"]\n return d\n\n registry.add_field((\"deposit\", \"%s_cic\" % ptype),\n function = particle_cic,\n validators = [ValidateSpatial()],\n display_name = r\"\\mathrm{%s CIC Density}\" % ptype_dn,\n units = \"g/cm**3\")\n\n def _get_density_weighted_deposit_field(fname, units, method):\n def _deposit_field(field, data):\n \"\"\"\n Create a grid field for particle quantities weighted by particle\n mass, using cloud-in-cell deposit.\n \"\"\"\n pos = data[ptype, \"particle_position\"]\n # Get back into density\n pden = data[ptype, 'particle_mass']\n top = data.deposit(pos, [data[(ptype, fname)]*pden], method=method)\n bottom = data.deposit(pos, [pden], method=method)\n top[bottom == 0] = 0.0\n bnz = bottom.nonzero()\n top[bnz] /= bottom[bnz]\n d = data.ds.arr(top, input_units=units)\n return d\n return _deposit_field\n\n for ax in 'xyz':\n for method, name in zip((\"cic\", \"sum\"), (\"cic\", \"nn\")):\n function = _get_density_weighted_deposit_field(\n \"particle_velocity_%s\" % ax, \"cm/s\", method)\n registry.add_field(\n (\"deposit\", (\"%s_\"+name+\"_velocity_%s\") % (ptype, ax)),\n function=function, units=\"cm/s\", take_log=False,\n validators=[ValidateSpatial(0)])\n\n # Now some translation functions.\n\n def particle_ones(field, data):\n v = np.ones(data[ptype, mass_name].shape, dtype=\"float64\")\n return data.apply_units(v, field.units)\n\n registry.add_field((ptype, \"particle_ones\"),\n function = particle_ones,\n particle_type = True,\n units = \"\",\n display_name = r\"Particle Count\")\n\n def particle_mesh_ids(field, data):\n pos = data[ptype, coord_name]\n ids = np.zeros(pos.shape[0], dtype=\"float64\") - 1\n # This is float64 in name only. It will be properly cast inside the\n # deposit operation.\n #_ids = ids.view(\"float64\")\n data.deposit(pos, [ids], method = \"mesh_id\")\n return data.apply_units(ids, \"\")\n registry.add_field((ptype, \"mesh_id\"),\n function = particle_mesh_ids,\n validators = [ValidateSpatial()],\n units = '',\n particle_type = True)\n\n return list(set(registry.keys()).difference(orig))\n\ndef particle_scalar_functions(ptype, coord_name, vel_name, registry):\n\n # Now we have to set up the various velocity and coordinate things. In the\n # future, we'll actually invert this and use the 3-component items\n # elsewhere, and stop using these.\n\n # Note that we pass in _ptype here so that it's defined inside the closure.\n\n def _get_coord_funcs(axi, _ptype):\n def _particle_velocity(field, data):\n return data[_ptype, vel_name][:,axi]\n def _particle_position(field, data):\n return data[_ptype, coord_name][:, axi]\n return _particle_velocity, _particle_position\n for axi, ax in enumerate(\"xyz\"):\n v, p = _get_coord_funcs(axi, ptype)\n registry.add_field((ptype, \"particle_velocity_%s\" % ax),\n particle_type = True, function = v,\n units = \"code_velocity\")\n registry.add_field((ptype, \"particle_position_%s\" % ax),\n particle_type = True, function = p,\n units = \"code_length\")\n\ndef particle_vector_functions(ptype, coord_names, vel_names, registry):\n\n # This will column_stack a set of scalars to create vector fields.\n\n def _get_vec_func(_ptype, names):\n def particle_vectors(field, data):\n v = [data[_ptype, name].in_units(field.units)\n for name in names]\n c = np.column_stack(v)\n return data.apply_units(c, field.units)\n return particle_vectors\n registry.add_field((ptype, \"particle_position\"),\n function=_get_vec_func(ptype, coord_names),\n units = \"code_length\",\n particle_type=True)\n registry.add_field((ptype, \"particle_velocity\"),\n function=_get_vec_func(ptype, vel_names),\n units = \"cm / s\",\n particle_type=True)\n\ndef get_angular_momentum_components(ptype, data, spos, svel):\n if data.has_field_parameter(\"normal\"):\n normal = data.get_field_parameter(\"normal\")\n else:\n normal = data.ds.arr([0.0,0.0,1.0],\"code_length\") # default to simulation axis\n bv = data.get_field_parameter(\"bulk_velocity\")\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"]).T\n vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate(\"xyz\")]).T\n return pos, vel, normal, bv\n\ndef standard_particle_fields(registry, ptype,\n spos = \"particle_position_%s\",\n svel = \"particle_velocity_%s\"):\n # This function will set things up based on the scalar fields and standard\n # yt field names.\n # data.get_field_parameter(\"bulk_velocity\") defaults to YTArray([0,0,0] cm/s)\n\n def _particle_velocity_magnitude(field, data):\n \"\"\" M{|v|} \"\"\"\n bulk_velocity = data.get_field_parameter(\"bulk_velocity\")\n return np.sqrt((data[ptype, svel % 'x'] - bulk_velocity[0])**2\n + (data[ptype, svel % 'y'] - bulk_velocity[1])**2\n + (data[ptype, svel % 'z'] - bulk_velocity[2])**2 )\n\n registry.add_field((ptype, \"particle_velocity_magnitude\"),\n function=_particle_velocity_magnitude,\n particle_type=True,\n take_log=False,\n units=\"cm/s\")\n\n def _particle_specific_angular_momentum(field, data):\n \"\"\"\n Calculate the angular of a particle velocity. Returns a vector for each\n particle.\n \"\"\"\n center = data.get_field_parameter('center')\n pos, vel, normal, bv = get_angular_momentum_components(ptype, data, spos, svel)\n L, r_vec, v_vec = modify_reference_frame(center, normal, P=pos, V=vel)\n # adding in the unit registry allows us to have a reference to the dataset\n # and thus we will always get the correct units after applying the cross product.\n return -ucross(r_vec, v_vec, registry=data.ds.unit_registry)\n\n\n registry.add_field((ptype, \"particle_specific_angular_momentum\"),\n function=_particle_specific_angular_momentum,\n particle_type=True,\n units=\"cm**2/s\",\n validators=[ValidateParameter(\"center\")])\n\n def _get_spec_ang_mom_comp(axi, ax, _ptype):\n def _particle_specific_angular_momentum_component(field, data):\n return data[_ptype, \"particle_specific_angular_momentum\"][:, axi]\n def _particle_angular_momentum_component(field, data):\n return data[_ptype, \"particle_mass\"] * \\\n data[ptype, \"particle_specific_angular_momentum_%s\" % ax]\n return _particle_specific_angular_momentum_component, \\\n _particle_angular_momentum_component\n for axi, ax in enumerate(\"xyz\"):\n f, v = _get_spec_ang_mom_comp(axi, ax, ptype)\n registry.add_field(\n (ptype, \"particle_specific_angular_momentum_%s\" % ax),\n particle_type = True, function=f, units=\"cm**2/s\",\n validators=[ValidateParameter(\"center\")]\n )\n registry.add_field((ptype, \"particle_angular_momentum_%s\" % ax),\n function=v, units=\"g*cm**2/s\", particle_type=True,\n validators=[ValidateParameter('center')])\n\n def _particle_angular_momentum(field, data):\n am = data[ptype, \"particle_mass\"] * data[ptype, \"particle_specific_angular_momentum\"].T\n return am.T\n\n registry.add_field((ptype, \"particle_angular_momentum\"),\n function=_particle_angular_momentum,\n particle_type=True,\n units=\"g*cm**2/s\",\n validators=[ValidateParameter(\"center\")])\n\n create_magnitude_field(registry, \"particle_angular_momentum\",\n \"g*cm**2/s\", ftype=ptype, particle_type=True)\n\n def _particle_radius(field, data):\n \"\"\"The spherical radius component of the particle positions\n\n Relative to the coordinate system defined by the *normal* vector,\n and *center* field parameters.\n \"\"\"\n return get_radius(data, \"particle_position_\")\n\n registry.add_field(\n (ptype, \"particle_radius\"),\n function=_particle_radius,\n units=\"cm\",\n particle_type=True,\n validators=[ValidateParameter(\"center\")])\n\n def _particle_position_relative(field, data):\n \"\"\"The cartesian particle positions in a rotated reference frame\n\n Relative to the coordinate system defined by the *normal* vector and\n *center* field parameters.\n\n Note that the orientation of the x and y axes are arbitrary.\n \"\"\"\n normal = data.get_field_parameter('normal')\n center = data.get_field_parameter('center')\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"]).T\n L, pos = modify_reference_frame(center, normal, P=pos)\n return pos\n\n registry.add_field(\n (ptype, \"particle_position_relative\"),\n function=_particle_position_relative,\n particle_type=True,\n units=\"cm\",\n validators=[ValidateParameter(\"normal\"), ValidateParameter(\"center\")])\n\n def _particle_velocity_relative(field, data):\n \"\"\"The vector particle velocities in an arbitrary coordinate system\n\n Relative to the coordinate system defined by the *normal* vector,\n *bulk_velocity* vector and *center* field parameters.\n\n Note that the orientation of the x and y axes are arbitrary.\n \"\"\"\n normal = data.get_field_parameter('normal')\n center = data.get_field_parameter('center')\n bv = data.get_field_parameter(\"bulk_velocity\")\n vel = data.ds.arr([data[ptype, svel % ax] - bv[iax] for iax, ax in enumerate(\"xyz\")]).T\n L, vel = modify_reference_frame(center, normal, V=vel)\n return vel\n\n registry.add_field((ptype, \"particle_velocity_relative\"),\n function=_particle_velocity_relative,\n particle_type=True, units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"),\n ValidateParameter(\"center\")])\n\n\n def _get_coord_funcs_relative(axi, _ptype):\n def _particle_pos_rel(field, data):\n return data[_ptype, \"particle_position_relative\"][:, axi]\n def _particle_vel_rel(field, data):\n return data[_ptype, \"particle_velocity_relative\"][:, axi]\n return _particle_vel_rel, _particle_pos_rel\n for axi, ax in enumerate(\"xyz\"):\n v, p = _get_coord_funcs_relative(axi, ptype)\n registry.add_field((ptype, \"particle_velocity_relative_%s\" % ax),\n particle_type = True, function = v,\n units = \"code_velocity\")\n registry.add_field((ptype, \"particle_position_relative_%s\" % ax),\n particle_type = True, function = p,\n units = \"code_length\")\n\n\n # this is just particle radius but we add it with an alias for the sake of\n # consistent naming\n registry.add_field((ptype, \"particle_position_spherical_radius\"),\n function=_particle_radius,\n particle_type=True, units=\"cm\",\n validators=[ValidateParameter(\"normal\"),\n ValidateParameter(\"center\")])\n\n def _particle_spherical_position_radius(field, data):\n \"\"\"This field is deprecated and will be removed in a future release\"\"\"\n return data[ptype, 'particle_position_spherical_radius']\n\n registry.add_field((ptype, \"particle_spherical_position_radius\"),\n function=_particle_spherical_position_radius,\n particle_type=True, units=\"cm\",\n validators=[ValidateParameter(\"normal\"),\n ValidateParameter(\"center\")])\n\n def _particle_position_spherical_theta(field, data):\n \"\"\"The spherical theta coordinate of the particle positions.\n\n Relative to the coordinate system defined by the *normal* vector\n and *center* field parameters.\n \"\"\"\n normal = data.get_field_parameter(\"normal\")\n center = data.get_field_parameter(\"center\")\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"])\n pos = pos - np.reshape(center, (3, 1))\n return data.ds.arr(get_sph_theta(pos, normal), \"\")\n\n registry.add_field(\n (ptype, \"particle_position_spherical_theta\"),\n function=_particle_position_spherical_theta,\n particle_type=True,\n units=\"\",\n validators=[ValidateParameter(\"center\"), ValidateParameter(\"normal\")])\n\n def _particle_spherical_position_theta(field, data):\n \"\"\"This field is deprecated and will be removed in a future release\"\"\"\n return data[ptype, 'particle_position_spherical_theta']\n\n registry.add_field((ptype, \"particle_spherical_position_theta\"),\n function=_particle_spherical_position_theta,\n particle_type=True, units=\"\",\n validators=[ValidateParameter(\"normal\"),\n ValidateParameter(\"center\")])\n\n def _particle_position_spherical_phi(field, data):\n \"\"\"The spherical phi component of the particle positions\n\n Relative to the coordinate system defined by the *normal* vector\n and *center* field parameters.\n \"\"\"\n normal = data.get_field_parameter(\"normal\")\n center = data.get_field_parameter(\"center\")\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"])\n pos = pos - np.reshape(center, (3, 1))\n return data.ds.arr(get_sph_phi(pos, normal), \"\")\n\n registry.add_field(\n (ptype, \"particle_position_spherical_phi\"),\n function=_particle_position_spherical_phi,\n particle_type=True,\n units=\"\",\n validators=[ValidateParameter(\"normal\"), ValidateParameter(\"center\")])\n\n def _particle_spherical_position_phi(field, data):\n \"\"\"This field is deprecated and will be removed in a future release\"\"\"\n return data[ptype, 'particle_position_spherical_phi']\n\n registry.add_field((ptype, \"particle_spherical_position_phi\"),\n function=_particle_spherical_position_phi,\n particle_type=True, units=\"\",\n validators=[ValidateParameter(\"center\"),\n ValidateParameter(\"normal\")])\n\n def _particle_velocity_spherical_radius(field, data):\n \"\"\"The spherical radius component of the particle velocities in an\n arbitrary coordinate system\n\n Relative to the coordinate system defined by the *normal* vector,\n *bulk_velocity* vector and *center* field parameters.\n \"\"\"\n normal = data.get_field_parameter('normal')\n center = data.get_field_parameter('center')\n bv = data.get_field_parameter(\"bulk_velocity\")\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"])\n vel = data.ds.arr([data[ptype, svel % ax] for ax in \"xyz\"])\n theta = get_sph_theta(pos, normal)\n phi = get_sph_phi(pos, normal)\n pos = pos - np.reshape(center, (3, 1))\n vel = vel - np.reshape(bv, (3, 1))\n sphr = get_sph_r_component(vel, theta, phi, normal)\n return sphr\n\n registry.add_field((ptype, \"particle_velocity_spherical_radius\"),\n function=_particle_velocity_spherical_radius,\n particle_type=True, units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"),\n ValidateParameter(\"center\")])\n\n def _particle_spherical_velocity_radius(field, data):\n \"\"\"This field is deprecated and will be removed in a future release\"\"\"\n return data[ptype, 'particle_velocity_spherical_radius']\n\n registry.add_field((ptype, \"particle_spherical_velocity_radius\"),\n function=_particle_spherical_velocity_radius,\n particle_type=True, units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"),\n ValidateParameter(\"center\")])\n\n # particel_velocity_spherical_radius is simply aliased to\n # \"particle_radial_velocity\" for convenience\n registry.add_field((ptype, \"particle_radial_velocity\"),\n function=_particle_spherical_velocity_radius,\n particle_type=True, units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"),\n ValidateParameter(\"center\")])\n\n def _particle_velocity_spherical_theta(field, data):\n \"\"\"The spherical theta component of the particle velocities in an\n arbitrary coordinate system\n\n Relative to the coordinate system defined by the *normal* vector,\n *bulk_velocity* vector and *center* field parameters.\n \"\"\"\n normal = data.get_field_parameter('normal')\n center = data.get_field_parameter('center')\n bv = data.get_field_parameter(\"bulk_velocity\")\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"])\n vel = data.ds.arr([data[ptype, svel % ax] for ax in \"xyz\"])\n theta = get_sph_theta(pos, normal)\n phi = get_sph_phi(pos, normal)\n pos = pos - np.reshape(center, (3, 1))\n vel = vel - np.reshape(bv, (3, 1))\n spht = get_sph_theta_component(vel, theta, phi, normal)\n return spht\n\n registry.add_field(\n (ptype, \"particle_velocity_spherical_theta\"),\n function=_particle_velocity_spherical_theta,\n particle_type=True,\n units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"), ValidateParameter(\"center\")])\n\n def _particle_spherical_velocity_theta(field, data):\n \"\"\"This field is deprecated and will be removed in a future release\"\"\"\n return data[ptype, 'particle_velocity_spherical_theta']\n\n registry.add_field((ptype, \"particle_spherical_velocity_theta\"),\n function=_particle_spherical_velocity_theta,\n particle_type=True, units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"),\n ValidateParameter(\"center\")])\n\n def _particle_velocity_spherical_phi(field, data):\n \"\"\"The spherical phi component of the particle velocities\n\n Relative to the coordinate system defined by the *normal* vector,\n *bulk_velocity* vector and *center* field parameters.\n \"\"\"\n normal = data.get_field_parameter('normal')\n center = data.get_field_parameter('center')\n bv = data.get_field_parameter(\"bulk_velocity\")\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"])\n vel = data.ds.arr([data[ptype, svel % ax] for ax in \"xyz\"])\n phi = get_sph_phi(pos, normal)\n pos = pos - np.reshape(center, (3, 1))\n vel = vel - np.reshape(bv, (3, 1))\n sphp = get_sph_phi_component(vel, phi, normal)\n return sphp\n\n registry.add_field(\n (ptype, \"particle_velocity_spherical_phi\"),\n function=_particle_velocity_spherical_phi,\n particle_type=True,\n units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"), ValidateParameter(\"center\")])\n\n def _particle_spherical_velocity_phi(field, data):\n \"\"\"This field is deprecated and will be removed in a future release\"\"\"\n return data[ptype, 'particle_spherical_velocity_theta']\n\n registry.add_field((ptype, \"particle_spherical_velocity_phi\"),\n function=_particle_spherical_velocity_phi,\n particle_type=True, units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"),\n ValidateParameter(\"center\")])\n\n def _particle_position_cylindrical_radius(field, data):\n \"\"\"The cylindrical radius component of the particle positions\n\n Relative to the coordinate system defined by the *normal* vector\n and *center* field parameters.\n \"\"\"\n normal = data.get_field_parameter(\"normal\")\n center = data.get_field_parameter('center')\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"])\n pos = pos - np.reshape(center, (3, 1))\n return data.ds.arr(get_cyl_r(pos, normal),\n 'code_length')\n\n registry.add_field(\n (ptype, \"particle_position_cylindrical_radius\"),\n function=_particle_position_cylindrical_radius,\n units=\"cm\",\n particle_type=True,\n validators=[ValidateParameter(\"normal\"), ValidateParameter(\"center\")])\n\n def _particle_position_cylindrical_theta(field,data):\n \"\"\"The cylindrical theta component of the particle positions\n\n Relative to the coordinate system defined by the *normal* vector\n and *center* field parameters.\n \"\"\"\n normal = data.get_field_parameter(\"normal\")\n center = data.get_field_parameter('center')\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"])\n pos = pos - np.reshape(center, (3, 1))\n return data.ds.arr(get_cyl_theta(pos, normal), \"\")\n\n registry.add_field(\n (ptype, \"particle_position_cylindrical_theta\"),\n function=_particle_position_cylindrical_theta,\n particle_type=True,\n units=\"\",\n validators=[ValidateParameter(\"center\"), ValidateParameter(\"normal\")])\n\n def _particle_position_cylindrical_z(field,data):\n \"\"\"The cylindrical z component of the particle positions\n\n Relative to the coordinate system defined by the *normal* vector\n and *center* field parameters.\n \"\"\"\n normal = data.get_field_parameter(\"normal\")\n center = data.get_field_parameter('center')\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"])\n pos = pos - np.reshape(center, (3, 1))\n return data.ds.arr(get_cyl_z(pos, normal),\n 'code_length')\n\n registry.add_field(\n (ptype, \"particle_position_cylindrical_z\"),\n function=_particle_position_cylindrical_z,\n units=\"cm\",\n particle_type=True,\n validators=[ValidateParameter(\"normal\"), ValidateParameter(\"center\")])\n\n def _particle_velocity_cylindrical_radius(field, data):\n \"\"\"The cylindrical radius component of the particle velocities\n\n Relative to the coordinate system defined by the *normal* vector,\n *bulk_velocity* vector and *center* field parameters.\n \"\"\"\n normal = data.get_field_parameter('normal')\n center = data.get_field_parameter('center')\n bv = data.get_field_parameter(\"bulk_velocity\")\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"])\n vel = data.ds.arr([data[ptype, svel % ax] for ax in \"xyz\"])\n theta = get_cyl_theta(pos, normal)\n pos = pos - np.reshape(center, (3, 1))\n vel = vel - np.reshape(bv, (3, 1))\n cylr = get_cyl_r_component(vel, theta, normal)\n return cylr\n\n registry.add_field(\n (ptype, \"particle_velocity_cylindrical_radius\"),\n function=_particle_velocity_cylindrical_radius,\n particle_type=True,\n units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"), ValidateParameter(\"center\")])\n\n def _particle_velocity_cylindrical_theta(field, data):\n \"\"\"The cylindrical theta component of the particle velocities\n\n Relative to the coordinate system defined by the *normal* vector,\n *bulk_velocity* vector and *center* field parameters.\n \"\"\"\n normal = data.get_field_parameter('normal')\n center = data.get_field_parameter('center')\n bv = data.get_field_parameter(\"bulk_velocity\")\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"])\n vel = data.ds.arr([data[ptype, svel % ax] for ax in \"xyz\"])\n theta = get_cyl_theta(pos, normal)\n pos = pos - np.reshape(center, (3, 1))\n vel = vel - np.reshape(bv, (3, 1))\n cylt = get_cyl_theta_component(vel, theta, normal)\n return cylt\n\n registry.add_field(\n (ptype, \"particle_velocity_cylindrical_theta\"),\n function=_particle_velocity_cylindrical_theta,\n particle_type=True,\n units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"), ValidateParameter(\"center\")])\n\n def _particle_cylindrical_velocity_theta(field, data):\n \"\"\"This field is deprecated and will be removed in a future release\"\"\"\n return data[ptype, 'particle_velocity_cylindrical_theta']\n\n registry.add_field((ptype, \"particle_cylindrical_velocity_theta\"),\n function=_particle_cylindrical_velocity_theta,\n particle_type=True, units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"),\n ValidateParameter(\"center\")])\n\n def _particle_velocity_cylindrical_z(field, data):\n \"\"\"The cylindrical z component of the particle velocities\n\n Relative to the coordinate system defined by the *normal* vector,\n *bulk_velocity* vector and *center* field parameters.\n \"\"\"\n normal = data.get_field_parameter('normal')\n center = data.get_field_parameter('center')\n bv = data.get_field_parameter(\"bulk_velocity\")\n pos = data.ds.arr([data[ptype, spos % ax] for ax in \"xyz\"])\n vel = data.ds.arr([data[ptype, svel % ax] for ax in \"xyz\"])\n pos = pos - np.reshape(center, (3, 1))\n vel = vel - np.reshape(bv, (3, 1))\n cylz = get_cyl_z_component(vel, normal)\n return cylz\n\n registry.add_field(\n (ptype, \"particle_velocity_cylindrical_z\"),\n function=_particle_velocity_cylindrical_z,\n particle_type=True,\n units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"), ValidateParameter(\"center\")])\n\n def _particle_cylindrical_velocity_z(field, data):\n \"\"\"This field is deprecated and will be removed in a future release\"\"\"\n return data[ptype, \"particle_velocity_cylindrical_z\"]\n\n registry.add_field((ptype, \"particle_cylindrical_velocity_z\"),\n function=_particle_cylindrical_velocity_z,\n particle_type=True, units=\"cm/s\",\n validators=[ValidateParameter(\"normal\"),\n ValidateParameter(\"center\")])\n\n\ndef add_particle_average(registry, ptype, field_name,\n weight = \"particle_mass\",\n density = True):\n field_units = registry[ptype, field_name].units\n def _pfunc_avg(field, data):\n pos = data[ptype, \"particle_position\"]\n f = data[ptype, field_name]\n wf = data[ptype, weight]\n f *= wf\n v = data.deposit(pos, [f], method = \"sum\")\n w = data.deposit(pos, [wf], method = \"sum\")\n v /= w\n if density: v /= data[\"index\", \"cell_volume\"]\n v[np.isnan(v)] = 0.0\n return v\n fn = (\"deposit\", \"%s_avg_%s\" % (ptype, field_name))\n registry.add_field(fn, function=_pfunc_avg,\n validators = [ValidateSpatial(0)],\n particle_type = False,\n units = field_units)\n return fn\n\ndef add_volume_weighted_smoothed_field(ptype, coord_name, mass_name,\n smoothing_length_name, density_name, smoothed_field, registry,\n nneighbors = None):\n field_name = (\"deposit\", \"%s_smoothed_%s\" % (ptype, smoothed_field))\n field_units = registry[ptype, smoothed_field].units\n def _vol_weight(field, data):\n pos = data[ptype, coord_name]\n pos = pos.convert_to_units(\"code_length\")\n mass = data[ptype, mass_name].in_cgs()\n dens = data[ptype, density_name].in_cgs()\n quan = data[ptype, smoothed_field]\n quan = quan.convert_to_units(field_units)\n\n if smoothing_length_name is None:\n hsml = np.zeros(quan.shape, dtype='float64') - 1\n hsml = data.apply_units(hsml, \"code_length\")\n else:\n hsml = data[ptype, smoothing_length_name]\n hsml.convert_to_units(\"code_length\")\n kwargs = {}\n if nneighbors:\n kwargs['nneighbors'] = nneighbors\n # volume_weighted smooth operations return lists of length 1.\n rv = data.smooth(pos, [mass, hsml, dens, quan],\n method=\"volume_weighted\",\n create_octree=True)[0]\n rv[np.isnan(rv)] = 0.0\n # Now some quick unit conversions.\n rv = data.apply_units(rv, field_units)\n return rv\n registry.add_field(field_name, function = _vol_weight,\n validators = [ValidateSpatial(0)],\n units = field_units)\n return [field_name]\n\ndef add_nearest_neighbor_field(ptype, coord_name, registry, nneighbors = 64):\n field_name = (ptype, \"nearest_neighbor_distance_%s\" % (nneighbors))\n def _nth_neighbor(field, data):\n pos = data[ptype, coord_name]\n pos.convert_to_units(\"code_length\")\n distances = 0.0 * pos[:,0]\n data.particle_operation(pos, [distances],\n method=\"nth_neighbor\",\n nneighbors = nneighbors)\n # Now some quick unit conversions.\n return distances\n registry.add_field(field_name, function = _nth_neighbor,\n validators = [ValidateSpatial(0)],\n particle_type = True,\n units = \"code_length\")\n return [field_name]\n\ndef add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64):\n field_name = (ptype, \"smoothed_density\")\n field_units = registry[ptype, mass_name].units\n def _nth_neighbor(field, data):\n pos = data[ptype, coord_name]\n pos.convert_to_units(\"code_length\")\n mass = data[ptype, mass_name]\n mass.convert_to_units(\"g\")\n densities = mass * 0.0\n data.particle_operation(pos, [mass, densities],\n method=\"density\",\n nneighbors = nneighbors)\n ones = pos.prod(axis=1) # Get us in code_length**3\n ones[:] = 1.0\n densities /= ones\n # Now some quick unit conversions.\n return densities\n registry.add_field(field_name, function = _nth_neighbor,\n validators = [ValidateSpatial(0)],\n particle_type = True,\n units = \"g/cm**3\")\n return [field_name]\n\ndef add_union_field(registry, ptype, field_name, units):\n \"\"\"\n Create a field that is the concatenation of multiple particle types.\n This allows us to create fields for particle unions using alias names.\n \"\"\"\n\n def _cat_field(field, data):\n return uconcatenate([data[dep_type, field_name]\n for dep_type in data.ds.particle_types_raw])\n\n registry.add_field((ptype, field_name),\n function=_cat_field,\n particle_type=True,\n units=units)\n"
] | [
[
"numpy.prod",
"numpy.empty",
"numpy.fromfile",
"numpy.dtype"
],
[
"numpy.distutils.misc_util.Configuration"
],
[
"numpy.random.random",
"numpy.sqrt",
"numpy.ones_like",
"numpy.power",
"numpy.arange",
"numpy.union1d",
"numpy.testing.assert_array_equal",
"numpy.concatenate",
"numpy.copy",
"numpy.intersect1d",
"numpy.testing.assert_raises",
"numpy.float64",
"numpy.array"
],
[
"numpy.sqrt",
"numpy.multiply",
"numpy.rint",
"numpy.concatenate",
"numpy.any",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.empty",
"numpy.random.randint"
],
[
"numpy.product",
"numpy.linspace",
"numpy.unique",
"numpy.ones",
"numpy.concatenate",
"numpy.array",
"numpy.zeros"
],
[
"numpy.sqrt",
"numpy.reshape",
"numpy.isnan",
"numpy.ones",
"numpy.column_stack",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.19",
"1.24",
"1.16",
"1.23",
"1.20",
"1.7",
"1.12",
"1.21",
"1.22",
"1.14",
"1.6",
"1.13",
"1.9",
"1.17",
"1.10",
"1.18",
"1.15",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nbingo/sMOOth | [
"aacdc5d24b931e534e984681923ec74f1103ca2f"
] | [
"src/configs/adult/adult_mlp_weighted.py"
] | [
"\"\"\"\nAn example config file to train a ImageNet classifier with detectron2.\nModel and dataloader both come from torchvision.\nThis shows how to use detectron2 as a general engine for any new models and tasks.\n\nTo run, use the following command:\n\npython tools/lazyconfig_train_net.py --config-file configs/Misc/torchvision_imagenet_R_50.py \\\n --num-gpus 8 dataloader.train.dataset.root=/path/to/imagenet/\n\n\"\"\"\n\nimport yaml\nimport torch\nfrom omegaconf import OmegaConf\nfrom fvcore.common.param_scheduler import CosineParamScheduler\n\nfrom detectron2.solver import WarmupParamScheduler\nfrom detectron2.solver.build import get_default_optimizer_params\nfrom detectron2.config import LazyConfig, LazyCall as L\nfrom detectron2.evaluation import DatasetEvaluators\n\nfrom src.configs.common.utils import build_data_loader\nfrom src.models.adult_mlp import IncomeClassifier\nfrom src.loaders.adult_loader import FeatDataset\nfrom src.metrics.evaluators import ClassificationAcc, BinaryEqualizedOddsViolation\nfrom src.metrics.losses import cross_entropy_loss, equalized_odds_violation, MultiObjectiveLoss\nfrom src.harnesses.harnesses import MultiProcessHarness, SimpleHarness\n\ndataloader = OmegaConf.create()\ndataloader.train = L(build_data_loader)(\n dataset=L(FeatDataset)(\n subset='train',\n income_const=yaml.load(open('/lfs/local/0/nomir/sMOOth/data/Adult/income.yml'), Loader=yaml.FullLoader)\n ),\n batch_size=256,\n num_workers=4,\n training=True,\n)\n\ndataloader.test = L(build_data_loader)(\n dataset=L(FeatDataset)(\n subset='val',\n income_const=yaml.load(open('/lfs/local/0/nomir/sMOOth/data/Adult/income.yml'), Loader=yaml.FullLoader)\n ),\n batch_size=256,\n num_workers=4,\n training=False,\n)\n\n# Can also be list of DatasetEvaluators\ndataloader.evaluator = L(DatasetEvaluators)(evaluators=(ClassificationAcc(), BinaryEqualizedOddsViolation()))\n\ntrain = LazyConfig.load(\"/lfs/local/0/nomir/sMOOth/src/configs/common/train.py\").train\ntrain.init_checkpoint = None\n# max_iter = number epochs * (train dataset size / batch size)\ntrain.max_iter = 50 * 30162 // 256\ntrain.eval_period = 30162 // 256\ntrain.loss_fn = L(MultiObjectiveLoss)(losses=[cross_entropy_loss, equalized_odds_violation])\ntrain.loss_tradeoff = torch.Tensor([0.5, 0.5])\n# Arguments for multiprocess training\ntrain.harness = SimpleHarness\ntrain.num_workers = 1\ntrain.gpus = [0] # TODO: Eventually want this to be a commandline arg\ntrain.process_over_key = 'model.loss_fn'\ntrain.process_over_vals = [cross_entropy_loss]\n\nmodel = L(IncomeClassifier)(\n in_dim=105,\n hidden_dim=105,\n num_hidden_blocks=2,\n drop_prob=0.2,\n out_dim=2,\n loss_fn=train.loss_fn,\n device=train.device,\n)\n\noptimizer = L(torch.optim.Adam)(\n params=L(get_default_optimizer_params)(),\n lr=1e-3,\n weight_decay=1e-4,\n)\n\nlr_multiplier = L(WarmupParamScheduler)(\n scheduler=L(CosineParamScheduler)(\n start_value=0.1,\n end_value=1e-4,\n ),\n warmup_length=1 / 100,\n warmup_factor=0.1,\n)\n"
] | [
[
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FujitsuResearch/automatic_pruning | [
"b3bb525b736ca3e465cb6fb87f134748424a0fe5"
] | [
"examples/resnet34_imagenet/resnet34.py"
] | [
"# resnet34.py COPYRIGHT Fujitsu Limited 2022\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef zero_padding(x1, x2):\n num_ch1 = x1.size()[1]\n num_ch2 = x2.size()[1]\n ch_diff = num_ch1 - num_ch2\n # path1 < path2 : zero padding to path1 tensor\n if num_ch1 < num_ch2:\n ch_diff = -1 * ch_diff\n if ch_diff%2 ==0:\n x1 = F.pad(x1[:, :, :, :], (0, 0, 0, 0, ch_diff//2, ch_diff//2), \"constant\", 0)\n else:\n x1 = F.pad(x1[:, :, :, :], (0, 0, 0, 0, ch_diff//2, (ch_diff//2)+1), \"constant\", 0)\n # path1 > path2 : zero padding to path2 tensor\n elif num_ch1 > num_ch2:\n if ch_diff%2 ==0:\n x2 = F.pad(x2[:, :, :, :], (0, 0, 0, 0, ch_diff//2, ch_diff//2), \"constant\", 0)\n else:\n x2 = F.pad(x2[:, :, :, :], (0, 0, 0, 0, ch_diff//2, (ch_diff//2)+1), \"constant\", 0)\n return x1, x2\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,\n dilation=dilation,\n )\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(\n self,\n inplanes,\n planes,\n stride=1,\n downsample=None,\n groups=1,\n base_width=64,\n dilation=1,\n norm_layer=None,\n n_in_channels=None,\n n_channels1=None,\n n_channels2=None,\n ):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError(\"BasicBlock only supports groups=1 and base_width=64\")\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(n_in_channels, n_channels1, stride)\n self.bn1 = norm_layer(n_channels1)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(n_channels1, n_channels2)\n self.bn2 = norm_layer(n_channels2)\n self.downsample = downsample #if dawnsample else downsample(n_in_channels, n_channels3)\n self.stride = stride\n\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out, identity = zero_padding(out, identity) # zero padding\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet34(nn.Module):\n def __init__(\n self,\n block=BasicBlock,\n layers=[3, 4, 6, 3],\n num_classes=1000,\n zero_init_residual=False,\n groups=1,\n width_per_group=64,\n replace_stride_with_dilation=None,\n norm_layer=None,\n ch_conv1=64,\n\n ch_l10_1=64,\n ch_l10_2=64,\n ch_l11_1=64,\n ch_l11_2=64,\n ch_l12_1=64,\n ch_l12_2=64,\n\n ch_l20_1=128,\n ch_l20_2=128,\n ch_l20_ds=128,\n ch_l21_1=128,\n ch_l21_2=128,\n ch_l22_1=128,\n ch_l22_2=128,\n ch_l23_1=128,\n ch_l23_2=128,\n\n ch_l30_1=256,\n ch_l30_2=256,\n ch_l30_ds=256,\n ch_l31_1=256,\n ch_l31_2=256,\n ch_l32_1=256,\n ch_l32_2=256,\n ch_l33_1=256,\n ch_l33_2=256,\n ch_l34_1=256,\n ch_l34_2=256,\n ch_l35_1=256,\n ch_l35_2=256,\n\n ch_l40_1=512,\n ch_l40_2=512,\n ch_l40_ds=512,\n ch_l41_1=512,\n ch_l41_2=512,\n ch_l42_1=512,\n ch_l42_2=512,\n ):\n super(ResNet34, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\n \"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation)\n )\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3, ch_conv1, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = norm_layer(ch_conv1)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n in_ch_l11 = max(ch_conv1, ch_l10_2)\n in_ch_l12 = max(in_ch_l11, ch_l11_2)\n self.layer1 = self._make_layer_3(block=block, planes=64, blocks=layers[0],\n n_in_channels0=ch_conv1,\n n_channels00=ch_l10_1,\n n_channels01=ch_l10_2,\n n_channels_ds=None,\n n_in_channels1=in_ch_l11,\n n_channels10=ch_l11_1,\n n_channels11=ch_l11_2,\n n_in_channels2=in_ch_l12,\n n_channels20=ch_l12_1,\n n_channels21=ch_l12_2,\n )\n\n in_ch_l20 = max(in_ch_l12, ch_l12_2)\n in_ch_l21 = max(ch_l20_ds, ch_l20_2)\n in_ch_l22 = max(in_ch_l21, ch_l21_2)\n in_ch_l23 = max(in_ch_l22, ch_l22_2) \n self.layer2 = self._make_layer_4(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0],\n n_in_channels0=in_ch_l20,\n n_channels00=ch_l20_1,\n n_channels01=ch_l20_2,\n n_channels_ds=ch_l20_ds,\n n_in_channels1=in_ch_l21,\n n_channels10=ch_l21_1,\n n_channels11=ch_l21_2,\n n_in_channels2=in_ch_l22,\n n_channels20=ch_l22_1,\n n_channels21=ch_l22_2,\n n_in_channels3=in_ch_l23,\n n_channels30=ch_l23_1,\n n_channels31=ch_l23_2,\n )\n\n in_ch_l30 = max(in_ch_l23, ch_l23_2)\n in_ch_l31 = max(ch_l30_ds, ch_l30_2)\n in_ch_l32 = max(in_ch_l31, ch_l31_2)\n in_ch_l33 = max(in_ch_l32, ch_l32_2)\n in_ch_l34 = max(in_ch_l33, ch_l33_2)\n in_ch_l35 = max(in_ch_l34, ch_l34_2)\n self.layer3 = self._make_layer_6(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1],\n n_in_channels0=in_ch_l30,\n n_channels00=ch_l30_1,\n n_channels01=ch_l30_2,\n n_channels_ds=ch_l30_ds,\n n_in_channels1=in_ch_l31,\n n_channels10=ch_l31_1,\n n_channels11=ch_l31_2,\n n_in_channels2=in_ch_l32,\n n_channels20=ch_l32_1,\n n_channels21=ch_l32_2,\n n_in_channels3=in_ch_l33,\n n_channels30=ch_l33_1,\n n_channels31=ch_l33_2,\n n_in_channels4=in_ch_l34,\n n_channels40=ch_l34_1,\n n_channels41=ch_l34_2,\n n_in_channels5=in_ch_l35,\n n_channels50=ch_l35_1,\n n_channels51=ch_l35_2,\n )\n\n in_ch_l40 = max(in_ch_l35, ch_l35_2)\n in_ch_l41 = max(ch_l40_ds, ch_l40_2)\n in_ch_l42 = max(in_ch_l41, ch_l41_2)\n self.layer4 = self._make_layer_3(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2],\n n_in_channels0=in_ch_l40,\n n_channels00=ch_l40_1,\n n_channels01=ch_l40_2,\n n_channels_ds=ch_l40_ds,\n n_in_channels1=in_ch_l41,\n n_channels10=ch_l41_1,\n n_channels11=ch_l41_2,\n n_in_channels2=in_ch_l42,\n n_channels20=ch_l42_1,\n n_channels21=ch_l42_2,\n )\n\n in_ch_fc = max(in_ch_l42, ch_l42_2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(in_ch_fc, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer_3(self, block, planes, blocks, stride=1, dilate=False,\n n_in_channels0=None,\n n_channels00=None, n_channels01=None,\n n_channels_ds=None,\n n_in_channels1=None,\n n_channels10=None, n_channels11=None,\n n_in_channels2=None,\n n_channels20=None, n_channels21=None,\n ):\n\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential( conv1x1(n_in_channels0, n_channels_ds, stride), norm_layer(n_channels_ds) )\n\n self.inplanes = planes * block.expansion\n layers = []\n\n # layer_0\n layers.append(\n block(\n self.inplanes,\n planes,\n stride,\n downsample,\n self.groups,\n self.base_width,\n previous_dilation,\n norm_layer,\n n_in_channels=n_in_channels0,\n n_channels1=n_channels00,\n n_channels2=n_channels01,\n )\n )\n # layer_1\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels1,\n n_channels1=n_channels10,\n n_channels2=n_channels11,\n )\n )\n # layer_2\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels2,\n n_channels1=n_channels20,\n n_channels2=n_channels21,\n )\n )\n return nn.Sequential(*layers)\n\n\n def _make_layer_4(self, block, planes, blocks, stride=1, dilate=False,\n n_in_channels0=None,\n n_channels00=None, n_channels01=None,\n n_channels_ds=None,\n n_in_channels1=None,\n n_channels10=None, n_channels11=None,\n n_in_channels2=None,\n n_channels20=None, n_channels21=None,\n n_in_channels3=None,\n n_channels30=None, n_channels31=None,\n ):\n\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential( conv1x1(n_in_channels0, n_channels_ds, stride), norm_layer(n_channels_ds) )\n\n self.inplanes = planes * block.expansion\n layers = []\n\n # layer_0\n layers.append(\n block(\n self.inplanes,\n planes,\n stride,\n downsample,\n self.groups,\n self.base_width,\n previous_dilation,\n norm_layer,\n n_in_channels=n_in_channels0,\n n_channels1=n_channels00,\n n_channels2=n_channels01,\n )\n )\n # layer_1\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels1,\n n_channels1=n_channels10,\n n_channels2=n_channels11,\n )\n )\n # layer_2\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels2,\n n_channels1=n_channels20,\n n_channels2=n_channels21,\n )\n )\n # layer_3\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels3,\n n_channels1=n_channels30,\n n_channels2=n_channels31,\n )\n )\n return nn.Sequential(*layers)\n\n\n def _make_layer_6(self, block, planes, blocks, stride=1, dilate=False,\n n_in_channels0=None,\n n_channels00=None, n_channels01=None,\n n_channels_ds=None,\n n_in_channels1=None,\n n_channels10=None, n_channels11=None,\n n_in_channels2=None,\n n_channels20=None, n_channels21=None,\n n_in_channels3=None,\n n_channels30=None, n_channels31=None,\n n_in_channels4=None,\n n_channels40=None, n_channels41=None,\n n_in_channels5=None,\n n_channels50=None, n_channels51=None,\n ):\n\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential( conv1x1(n_in_channels0, n_channels_ds, stride), norm_layer(n_channels_ds) )\n\n self.inplanes = planes * block.expansion\n layers = []\n\n # layer_0\n layers.append(\n block(\n self.inplanes,\n planes,\n stride,\n downsample,\n self.groups,\n self.base_width,\n previous_dilation,\n norm_layer,\n n_in_channels=n_in_channels0,\n n_channels1=n_channels00,\n n_channels2=n_channels01,\n )\n )\n # layer_1\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels1,\n n_channels1=n_channels10,\n n_channels2=n_channels11,\n )\n )\n # layer_2\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels2,\n n_channels1=n_channels20,\n n_channels2=n_channels21,\n )\n )\n # layer_3\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels3,\n n_channels1=n_channels30,\n n_channels2=n_channels31,\n )\n )\n # layer_4\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels4,\n n_channels1=n_channels40,\n n_channels2=n_channels41,\n )\n )\n # layer_5\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels5,\n n_channels1=n_channels50,\n n_channels2=n_channels51,\n )\n )\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.reshape(x.size(0), -1)\n x = self.fc(x)\n\n return x\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU",
"torch.nn.functional.pad",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hobinkwak/Stock-Movements-Classification | [
"dac2e90d9ef2294f5c4dc8f6605b9051c71b3f45"
] | [
"utils/dataload.py"
] | [
"from itertools import combinations\nimport pandas as pd\n\nfrom utils.utils import *\n\n\ndef load_etf():\n etf_data = pd.read_csv(\n \"data/etf_data.csv\", encoding=\"euc_kr\", parse_dates=[\"tdate\"]\n )\n etf_ohlcv = etf_data.set_index([\"tdate\", \"etf_code\", \"data_name\"])[\n \"value\"\n ].unstack()\n etf_close = etf_ohlcv[\"종가\"].unstack()\n return etf_close\n\ndef load_macro_data():\n macro_data = pd.read_csv('외부데이터/macro_final.csv', index_col='Item Name').iloc[1:, :]\n macro_data.index = pd.to_datetime(macro_data.index)\n macro_data = macro_data.fillna(method='ffill')\n macro_data = (macro_data.resample('m').last() / macro_data.resample('m').first())\n\n macro_data.columns = ['FOMC정책금리', '한국정책금리', '중국정책금리', '미국국채_1m', '미국국채_3m', '미국국채_6m', '미국국채_1y', '미국국채_5y',\n '미국국채_10y', '리보_달러_1m', '리보_달러_1y', '리보_달러_3m', '리보_달러_6m', '리보_달러_1w',\n 'DDR4 16G (2G*8) 2666 MHZ', 'NAND 16Gb 2Gx8 SLC', 'DDR4 16G (2G*8) eTT MHZ',\n 'DDR3 4Gb 512Mx8 1600/1866Mbps', 'DDR3 4Gb 512Mx8 eTT',\n 'NAND 8Gb 1Gx8 SLC', 'NAND 64Gb 8Gx8 MLC', 'WTI_1M', 'BRENT_1M', 'DUBAI_ASIA1M',\n '난방유_선물_NYMEX', '천연가스_선물_NYMEX', '가스오일_선물_IPE', '천연가스_선물_IPE', '금_선물', '은_선물', '알루미늄_선물',\n '전기동_선물', '납_선물', '니켈_선물', '주석_선물', '아연_선물', '10YR BEI', 'T10Y2Y', 'DFF',\n 'HY Ef Yield', 'Trade DI', 'VIX', 'USDKRW', 'Eco Policy Uncertainty']\n\n macro_data = macro_data[\n ['FOMC정책금리', '한국정책금리', '중국정책금리', '미국국채_1m', '미국국채_3m', '미국국채_6m', '미국국채_1y', '미국국채_5y', '미국국채_10y', '리보_달러_1m',\n '리보_달러_1y', '리보_달러_3m', '리보_달러_6m', '리보_달러_1w', 'DDR3 4Gb 512Mx8 eTT',\n 'NAND 8Gb 1Gx8 SLC', 'WTI_1M', 'BRENT_1M', 'DUBAI_ASIA1M', '난방유_선물_NYMEX', '천연가스_선물_NYMEX', '가스오일_선물_IPE',\n '천연가스_선물_IPE', '금_선물', '은_선물', '알루미늄_선물', '전기동_선물', '납_선물', '니켈_선물', '주석_선물', '아연_선물', '10YR BEI', 'T10Y2Y',\n 'HY Ef Yield', 'Trade DI', 'VIX', 'USDKRW', 'Eco Policy Uncertainty']]\n return macro_data\n\n\n\ndef load_wics_data():\n WICS대_exposure = process_wics_data(\"./외부데이터/ETF별 업종 exposure.csv\")\n WICS업종 = process_wics_data(\"./외부데이터/WICS 업종별 투자정보 데이터.csv\")\n WICS대 = WICS업종[\n [\n \"에너지\",\n \"소재\",\n \"산업재\",\n \"경기관련소비재\",\n \"필수소비재\",\n \"건강관리\",\n \"금융\",\n \"IT\",\n \"커뮤니케이션서비스\",\n \"유틸리티\",\n ]\n ]\n WICS대 = WICS대.T.drop_duplicates().T\n return WICS대, WICS대_exposure\n\n\n\ndef features_from_wics(wics):\n \"\"\"\n wics : WICS대 (from load_wics_data())\n \"\"\"\n wics_price = wics.xs(\"종가지수\", level=1, axis=1)\n momentums = get_moving_features(wics_price, type='price')\n\n wics_trd_volume = wics.xs(\"거래대금\", level=1, axis=1)\n trd_volumes = get_moving_features(wics_trd_volume, type='volume')\n wics_retail_volume = wics.xs(\"개인 순매수대금(일간)\", level=1, axis=1).fillna(0)\n retail_volumes = get_moving_features(wics_retail_volume, type='volume')\n wics_for_volume = wics.xs(\"외국인총합계순매수대금(일간)\", level=1, axis=1).fillna(0)\n for_volumes = get_moving_features(wics_for_volume, type='volume')\n wics_inst_volume = wics.xs(\"기관 순매수대금(일간)\", level=1,axis=1).fillna(0)\n inst_volumes = get_moving_features(wics_inst_volume, type='volume')\n\n wics_pe = wics.xs(\"P/E(FY0)\", level=1,axis=1)\n pe_scale = wics_pe.resample('M').last().apply(lambda X: minmaxscale(X), axis=1)\n\n wics_fwd_pe = wics.xs(\"P/E(Fwd.12M)\", level=1,axis=1)\n fwd_pe_changes = get_moving_features(wics_fwd_pe, type='fwd')\n wics_fwd_eps = wics.xs(\"EPS(Fwd.12M, 지배)\", level=1,axis=1)\n fwd_eps_changes =get_moving_features(wics_fwd_eps, type='fwd')\n\n size_ = wics.xs(\"시가총액\", level=1,axis=1).resample('M').last()\n\n features = {\n \"macro\": load_macro_data(),\n \"size\": size_,\n \"mom_1m\": momentums[0],\n \"mom_3m\": momentums[1],\n \"mom_6m\": momentums[2],\n \"mom_1y\": momentums[3],\n \"trd_1m\": trd_volumes[0],\n \"trd_3m\": trd_volumes[1],\n \"trd_6m\": trd_volumes[2],\n \"trd_1y\": trd_volumes[3],\n \"retail_trd_1m\": retail_volumes[0],\n \"retail_trd_3m\": retail_volumes[1],\n \"retail_trd_6m\": retail_volumes[2],\n \"retail_trd_1y\": retail_volumes[3],\n \"for_trd_1m\": for_volumes[0],\n \"for_trd_3m\": for_volumes[1],\n \"for_trd_6m\": for_volumes[2],\n \"for_trd_1y\": for_volumes[3],\n \"inst_trd_1m\": inst_volumes[0],\n \"inst_trd_3m\": inst_volumes[1],\n \"inst_trd_6m\": inst_volumes[2],\n \"inst_trd_1y\": inst_volumes[3],\n \"fwd_pe_1m\": fwd_pe_changes[0],\n \"fwd_pe_3m\": fwd_pe_changes[1],\n \"fwd_eps_1m\": fwd_eps_changes[0],\n \"fwd_eps_3m\": fwd_eps_changes[1],\n \"pe\": pe_scale,\n }\n\n return wics_price, features\n\n\ndef combination_set(pair, start, end, price, features):\n \"\"\"\n :param pair: WICS대분류 pair\n :param start: 기간\n :param end: 기간\n :param price: wics_prices (from features_from_wics())\n :param features: features (from features_from_wics())\n \"\"\"\n comb_price = price[list(pair)]\n comb_ret = (comb_price.resample('m').last() / comb_price.resample('m').first()).loc[start:end]\n\n feature_table = features['macro'].loc[start:end]\n for key in list(features.keys())[1:6]:\n feature_table[key] = features[key].apply(lambda x: (x[pair[0]] / x[pair[1]]), axis=1).loc[start:end]\n for key in list(features.keys())[6:]:\n feature_table[key] = features[key].apply(lambda x: (x[pair[0]] - x[pair[1]]), axis=1).loc[start:end]\n\n comb_ret['winner'] = comb_ret.apply(\n lambda x: comb_ret.columns[0] if (x[comb_ret.columns[0]] > x[comb_ret.columns[1]]) else comb_ret.columns[1],\n axis=1)\n\n feature_table = feature_table.replace([-np.inf, np.inf], np.nan).fillna(method='ffill')\n comb_ret = comb_ret.replace([-np.inf, np.inf], np.nan).fillna(method='ffill')\n\n feature_table = feature_table.shift(1).iloc[1:]\n comb_ret = comb_ret.iloc[1:]\n\n X_data = feature_table\n y_data = comb_ret[['winner']].astype('category')\n\n return X_data, y_data\n\ndef load_dataset():\n WICS대,_ = load_wics_data()\n price, features = features_from_wics(WICS대)\n columns = ['에너지', '소재', '산업재', '경기관련소비재', '필수소비재', '건강관리', '금융', 'IT', '커뮤니케이션서비스', '유틸리티']\n pairs = list(combinations(columns, 2))\n total_dataset = {pair : combination_set(pair,'2011-12','2021-05', price, features) for pair in pairs}\n return total_dataset\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
haideraltahan/datasets | [
"aad5c7ea705949d20817fcc49a892bb2a21532f0"
] | [
"tensorflow_datasets/testing/starcraft.py"
] | [
"# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tool for preparing test example of Starcraft dataset.\n\n\n./starcraft --resolution=64 --output_file=test.tfrecords\n./starcraft --resolution=64 --output_file=train_0.tfrecords\n./starcraft --resolution=64 --output_file=train_1.tfrecords\n./starcraft --resolution=64 --output_file=valid.tfrecords\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nimport png\nimport six\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\"resolution\", 64, \"Resolution of the video.\")\nflags.DEFINE_string(\"output_file\", None, \"Path to the output file.\")\n\n\ndef main(argv):\n if len(argv) > 1:\n raise tf.app.UsageError(\"Too many command-line arguments.\")\n\n writer = tf.io.TFRecordWriter(FLAGS.output_file)\n\n feature_list = {}\n frame_list = []\n for _ in range(20):\n # generate 20 frames.\n png_image = six.StringIO()\n png.from_array(\n np.random.randint(\n low=0,\n high=255,\n size=(FLAGS.resolution, FLAGS.resolution, 3),\n dtype=np.uint8), \"RGB\").save(png_image)\n frame_list.append(\n tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[png_image.getvalue()])))\n png_image.close()\n\n feature_list[\"rgb_screen\"] = tf.train.FeatureList(feature=frame_list)\n\n context_feature = {}\n context_feature[\"game_duration_loops\"] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[20]))\n context_feature[\"game_duration_seconds\"] = tf.train.Feature(\n float_list=tf.train.FloatList(value=[20.0]))\n context_feature[\"n_steps\"] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[20]))\n context_feature[\"screen_size\"] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[FLAGS.resolution, FLAGS.resolution]))\n\n example = tf.train.SequenceExample(\n feature_lists=tf.train.FeatureLists(feature_list=feature_list),\n context=tf.train.Features(feature=context_feature))\n writer.write(example.SerializeToString())\n writer.close()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n"
] | [
[
"tensorflow.io.TFRecordWriter",
"tensorflow.train.FeatureLists",
"tensorflow.app.UsageError",
"tensorflow.train.FeatureList",
"numpy.random.randint",
"tensorflow.train.FloatList",
"tensorflow.train.Features",
"tensorflow.train.Int64List"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhao-david/ACORE-LFI | [
"91de88b77f0be110e42ed91bbb7a50b7ca83319a",
"91de88b77f0be110e42ed91bbb7a50b7ca83319a",
"91de88b77f0be110e42ed91bbb7a50b7ca83319a",
"91de88b77f0be110e42ed91bbb7a50b7ca83319a"
] | [
"acore/classifier_cov_pow_toy_pvalue.py",
"acore/classifier_power_multid_truth.py",
"acore/tests/test_sampling_mechanisms.py",
"acore/utils/pytorch_functions.py"
] | [
"from warnings import simplefilter\nsimplefilter(action='ignore', category=FutureWarning)\n\nimport numpy as np\nimport argparse\nimport pandas as pd\nfrom tqdm.auto import tqdm\nfrom datetime import datetime\nfrom sklearn.metrics import log_loss\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom utils.functions import train_clf, compute_statistics_single_t0, clf_prob_value, compute_bayesfactor_single_t0, \\\n odds_ratio_loss, train_pvalue_clf\nfrom models.toy_poisson import ToyPoissonLoader\nfrom models.toy_gmm import ToyGMMLoader\nfrom models.toy_gamma import ToyGammaLoader\nfrom or_classifiers.toy_example_list import classifier_dict, classifier_dict_mlpcomp, classifier_pvalue_dict\n\nmodel_dict = {\n 'poisson': ToyPoissonLoader,\n 'gmm': ToyGMMLoader,\n 'gamma': ToyGammaLoader\n}\n\n\ndef main(run, rep, b, b_prime, alpha, t0_val, sample_size_obs, test_statistic, mlp_comp=False,\n monte_carlo_samples=500, debug=False, seed=7, size_check=1000, verbose=False, marginal=False,\n size_marginal=1000, guided_sim=False, guided_sample=1000, empirical_marginal=True):\n\n # Changing values if debugging\n b = b if not debug else 100\n b_prime = b_prime if not debug else 100\n size_check = size_check if not debug else 100\n rep = rep if not debug else 2\n model_obj = model_dict[run](marginal=marginal, size_marginal=size_marginal, empirical_marginal=empirical_marginal)\n classifier_dict_run = classifier_dict_mlpcomp if mlp_comp else classifier_dict\n\n # Get the correct functions\n msnh_sampling_func = model_obj.sample_msnh_algo5\n grid_param = model_obj.grid\n gen_obs_func = model_obj.sample_sim\n gen_sample_func = model_obj.generate_sample\n gen_param_fun = model_obj.sample_param_values\n t0_grid = model_obj.pred_grid\n tp_func = model_obj.compute_exact_prob\n\n # Creating sample to check entropy about\n np.random.seed(seed)\n sample_check = gen_sample_func(sample_size=size_check, marginal=marginal)\n theta_vec = sample_check[:, :model_obj.d]\n x_vec = sample_check[:, (model_obj.d + 1):]\n bern_vec = sample_check[:, model_obj.d]\n\n true_prob_vec = tp_func(theta_vec=theta_vec, x_vec=x_vec)\n entropy_est = -np.average([np.log(true_prob_vec[kk]) if el == 1\n else np.log(1 - true_prob_vec[kk])\n for kk, el in enumerate(bern_vec)])\n\n # Loop over repetitions and classifiers\n # Each time we train the different classifiers, we build the intervals and we record\n # whether the point is in or not.\n out_val = []\n out_cols = ['test_statistic', 'b_prime', 'b', 'classifier', 'classifier_pvalue', 'run', 'rep', 'sample_size_obs',\n 'cross_entropy_loss', 'cross_entropy_loss_pvalue', 't0_true_val', 'theta_0_current', 'on_true_t0',\n 'estimated_pvalue', 'in_confint', 'out_confint', 'size_CI', 'true_entropy', 'or_loss_value',\n 'monte_carlo_samples', 'guided_sim', 'empirical_marginal', 'guided_sample']\n pbar = tqdm(total=rep, desc='Toy Example for Simulations, n=%s, b=%s' % (sample_size_obs, b))\n rep_counter = 0\n not_update_flag = False\n while rep_counter < rep:\n # Generates samples for each t0 values, so to be able to check both coverage and power\n x_obs = gen_obs_func(sample_size=sample_size_obs, true_param=t0_val)\n\n # Train the classifier for the odds\n clf_odds_fitted = {}\n clf_pvalue_fitted = {}\n for clf_name, clf_model in sorted(classifier_dict_run.items(), key=lambda x: x[0]):\n clf_odds = train_clf(sample_size=b, clf_model=clf_model, gen_function=gen_sample_func,\n clf_name=clf_name, nn_square_root=True)\n if verbose:\n print('----- %s Trained' % clf_name)\n\n if test_statistic == 'acore':\n tau_obs = np.array([\n compute_statistics_single_t0(\n clf=clf_odds, obs_sample=x_obs, t0=theta_0, grid_param_t1=grid_param,\n d=model_obj.d, d_obs=model_obj.d_obs) for theta_0 in t0_grid])\n elif test_statistic == 'avgacore':\n tau_obs = np.array([\n compute_bayesfactor_single_t0(\n clf=clf_odds, obs_sample=x_obs, t0=theta_0, gen_param_fun=gen_param_fun,\n d=model_obj.d, d_obs=model_obj.d_obs, log_out=False) for theta_0 in t0_grid])\n elif test_statistic == 'logavgacore':\n tau_obs = np.array([\n compute_bayesfactor_single_t0(\n clf=clf_odds, obs_sample=x_obs, t0=theta_0, gen_param_fun=gen_param_fun,\n d=model_obj.d, d_obs=model_obj.d_obs, log_out=True) for theta_0 in t0_grid])\n else:\n raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'\n ' Currently %s' % test_statistic)\n\n # Calculating cross-entropy\n est_prob_vec = clf_prob_value(clf=clf_odds, x_vec=x_vec, theta_vec=theta_vec, d=model_obj.d,\n d_obs=model_obj.d_obs)\n loss_value = log_loss(y_true=bern_vec, y_pred=est_prob_vec)\n\n # Calculating or loss\n or_loss_value = odds_ratio_loss(clf=clf_odds, x_vec=x_vec, theta_vec=theta_vec,\n bern_vec=bern_vec, d=1, d_obs=1)\n clf_odds_fitted[clf_name] = (tau_obs, loss_value, or_loss_value)\n\n # Train the P-value regression algorithm for confidence levels\n\n if guided_sim:\n # Commenting the above -- we now sample a set of thetas from the parameter (of size guided_sample)\n # budget, then resample them according to the odds values, fit a gaussian and then sample the\n # datasets from that.\n theta_mat_sample = gen_param_fun(sample_size=guided_sample)\n\n if test_statistic == 'acore':\n stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,\n func1d=lambda row: compute_statistics_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row,\n grid_param_t1=grid_param,\n d=model_obj.d,\n d_obs=model_obj.d_obs\n ))\n elif test_statistic == 'avgacore':\n stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row,\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples\n ))\n elif test_statistic == 'logavgacore':\n stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row,\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples,\n log_out=True\n ))\n else:\n raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'\n ' Currently %s' % test_statistic)\n\n # If there are log-odds, then some of the values might be negative, so we need to exponentiate them\n # so to make sure that the large negative numbers are counted correctly (i.e. as very low probability,\n # not probabilities with large magnitudes).\n if test_statistic in ['acore', 'logavgacore']:\n stats_sample = np.exp(stats_sample)\n stats_sample = stats_sample/np.sum(stats_sample)\n theta_mat_gaussian_fit = np.random.choice(a=theta_mat_sample, p=stats_sample.reshape(-1, ),\n size=guided_sample)\n std_gaussian_fit = np.std(theta_mat_gaussian_fit) if np.std(theta_mat_gaussian_fit) == 0.0 else 1.0\n theta_mat = np.clip(\n a=np.random.normal(size=b_prime, loc=np.mean(theta_mat_gaussian_fit),\n scale=std_gaussian_fit),\n a_min=model_obj.low_int, a_max=model_obj.high_int)\n sample_mat = np.apply_along_axis(arr=theta_mat.reshape(-1, 1), axis=1,\n func1d=lambda row: gen_obs_func(sample_size=sample_size_obs,\n true_param=row))\n else:\n # Generate a matrix with values for both the sampled thetas as the actual samples\n theta_mat, sample_mat = msnh_sampling_func(b_prime=b_prime, sample_size=sample_size_obs)\n\n full_mat = np.hstack((theta_mat.reshape(-1, 1), sample_mat))\n if test_statistic == 'acore':\n stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_statistics_single_t0(\n clf=clf_odds,\n obs_sample=row[model_obj.d:],\n t0=row[:model_obj.d],\n grid_param_t1=grid_param,\n d=model_obj.d,\n d_obs=model_obj.d_obs\n ))\n stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_statistics_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row[:model_obj.d],\n grid_param_t1=grid_param,\n d=model_obj.d,\n d_obs=model_obj.d_obs\n ))\n elif test_statistic == 'avgacore':\n stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=row[model_obj.d:],\n t0=row[:model_obj.d],\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples\n ))\n stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row[:model_obj.d],\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples\n ))\n elif test_statistic == 'logavgacore':\n stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=row[model_obj.d:],\n t0=row[:model_obj.d],\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples,\n log_out=True\n ))\n stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row[:model_obj.d],\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples,\n log_out=True\n ))\n else:\n raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'\n ' Currently %s' % test_statistic)\n\n if np.any(np.isnan(stats_mat_generated)) or not np.all(np.isfinite(stats_mat_generated)) or \\\n np.any(np.isnan(stats_mat_observed)) or not np.all(np.isfinite(stats_mat_observed)):\n not_update_flag = True\n break\n\n # Comparing the two vectors of values\n clf_pvalue_fitted[clf_name] = {}\n indicator_vec = np.greater(stats_mat_observed, stats_mat_generated).astype(int)\n for clf_name_pvalue, clf_model_pvalue in sorted(classifier_pvalue_dict.items(), key=lambda x: x[0]):\n\n # If there the indicator_vec is either all 0 or all 1, do not fit a classifier or sklearn will throw\n # an error out. Just return the class.\n if sum(indicator_vec) <= 1 or sum(indicator_vec) >= len(indicator_vec) - 1:\n pval_pred = np.repeat(sum(indicator_vec) / len(indicator_vec), b_prime)\n loss_value_pval = np.nan\n else:\n clf_pvalue = train_pvalue_clf(clf_model=clf_model_pvalue, X=theta_mat.reshape(-1, model_obj.d),\n y=indicator_vec.reshape(-1, ), clf_name=clf_name_pvalue,\n nn_square_root=True)\n pval_pred = clf_pvalue.predict_proba(t0_grid.reshape(-1, model_obj.d))[:, 1]\n theta_mat_pred = clf_pvalue.predict_proba(theta_mat.reshape(-1, model_obj.d))[:, 1]\n loss_value_pval = log_loss(y_true=indicator_vec, y_pred=theta_mat_pred)\n clf_pvalue_fitted[clf_name][clf_name_pvalue] = (pval_pred, loss_value_pval)\n\n # If there were some problems in calculating the statistics, get out of the loop\n if not_update_flag:\n not_update_flag = False\n continue\n\n # At this point all it's left is to record\n for clf_name, (tau_obs_val, cross_ent_loss, or_loss_value) in clf_odds_fitted.items():\n for clf_name_qr, (pvalue_val, pvalue_celoss_val) in clf_pvalue_fitted[clf_name].items():\n size_temp = np.mean((pvalue_val > alpha).astype(int))\n for kk, theta_0_current in enumerate(t0_grid):\n out_val.append([\n test_statistic, b_prime, b, clf_name, clf_name_qr, run, rep_counter, sample_size_obs,\n cross_ent_loss, pvalue_celoss_val, t0_val, theta_0_current, int(t0_val == theta_0_current),\n pvalue_val[kk], int(pvalue_val[kk] > alpha),\n int(pvalue_val[kk] <= alpha), size_temp, entropy_est, or_loss_value,\n monte_carlo_samples, int(guided_sim), int(empirical_marginal), guided_sample\n ])\n pbar.update(1)\n rep_counter += 1\n\n # Saving the results\n out_df = pd.DataFrame.from_records(data=out_val, index=range(len(out_val)), columns=out_cols)\n out_dir = 'sims/classifier_cov_pow_toy/'\n out_filename = 'classifier_reps_cov_pow_toy_pvalues_%steststats_%s_%sB_%sBprime_%s_%srep_alpha%s_sampleobs%s_t0val%s%s_%s.csv' % (\n test_statistic, 'mlp_comp' if mlp_comp else 'toyclassifiers', b, b_prime, run, rep,\n str(alpha).replace('.', '-'), sample_size_obs,\n str(t0_val).replace('.', '-'),\n '_empirmarg' if empirical_marginal else '',\n datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')\n )\n out_df.to_csv(out_dir + out_filename)\n\n # Print results\n cov_df = out_df[out_df['on_true_t0'] == 1][['classifier', 'classifier_pvalue', 'in_confint',\n 'cross_entropy_loss', 'cross_entropy_loss_pvalue', 'size_CI']]\n print(cov_df.groupby(['classifier', 'classifier_pvalue']).agg({'in_confint': [np.average],\n 'size_CI': [np.average, np.std],\n 'cross_entropy_loss': [np.average],\n 'cross_entropy_loss_pvalue': [np.average]}))\n\n # Power plots\n out_df['class_combo'] = out_df[['classifier', 'classifier_pvalue']].apply(lambda x: x[0] + '---' + x[1], axis = 1)\n plot_df = out_df[['class_combo', 'theta_0_current', 'out_confint']].groupby(\n ['class_combo', 'theta_0_current']).mean().reset_index()\n fig = plt.figure(figsize=(20, 10))\n sns.lineplot(x='theta_0_current', y='out_confint', hue='class_combo', data=plot_df, palette='cubehelix')\n plt.legend(loc='best', fontsize=25)\n plt.xlabel(r'$\\theta$', fontsize=25)\n plt.ylabel('Power', fontsize=25)\n plt.title(\"Power of Hypothesis Test, B=%s, B'=%s, n=%s, %s\" % (\n b, b_prime, sample_size_obs, run.title()), fontsize=25)\n out_dir = 'images/classifier_cov_pow_toy/'\n outfile_name = 'power_classifier_reps_pvalue_%steststats_%sB_%sBprime_%s_%srep_alpha%s_sampleobs%s_t0val%s_%s.pdf' % (\n test_statistic, b, b_prime, run, rep, str(alpha).replace('.', '-'), sample_size_obs,\n str(t0_val).replace('.', '-'),\n datetime.strftime(datetime.today(), '%Y-%m-%d')\n )\n plt.tight_layout()\n plt.savefig(out_dir + outfile_name)\n plt.close()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--seed', action=\"store\", type=int, default=7,\n help='Random State')\n parser.add_argument('--rep', action=\"store\", type=int, default=10,\n help='Number of Repetitions for calculating the Pinball loss')\n parser.add_argument('--b', action=\"store\", type=int, default=5000,\n help='Sample size to train the classifier for calculating odds')\n parser.add_argument('--b_prime', action=\"store\", type=int, default=1000,\n help='Sample size to train the quantile regression algorithm')\n parser.add_argument('--marginal', action='store_true', default=False,\n help='Whether we are using a parametric approximation of the marginal or'\n 'the baseline reference G')\n parser.add_argument('--alpha', action=\"store\", type=float, default=0.1,\n help='Statistical confidence level')\n parser.add_argument('--run', action=\"store\", type=str, default='poisson',\n help='Problem to run')\n parser.add_argument('--debug', action='store_true', default=False,\n help='If true, a very small value for the sample sizes is fit to make sure the'\n 'file can run quickly for debugging purposes')\n parser.add_argument('--verbose', action='store_true', default=False,\n help='If true, logs are printed to the terminal')\n parser.add_argument('--sample_size_obs', action=\"store\", type=int, default=10,\n help='Sample size of the actual observed data.')\n parser.add_argument('--t0_val', action=\"store\", type=float, default=10.0,\n help='True parameter which generates the observed dataset')\n parser.add_argument('--size_marginal', action=\"store\", type=int, default=1000,\n help='Sample size of the actual marginal distribution, if marginal is True.')\n parser.add_argument('--monte_carlo_samples', action=\"store\", type=int, default=500,\n help='Sample size for the calculation of the avgacore and logavgacore statistic.')\n parser.add_argument('--test_statistic', action=\"store\", type=str, default='acore',\n help='Test statistic to compute confidence intervals. Can be acore|avgacore|logavgacore')\n parser.add_argument('--mlp_comp', action='store_true', default=False,\n help='If true, we compare different MLP training algorithm.')\n parser.add_argument('--empirical_marginal', action='store_true', default=False,\n help='Whether we are sampling directly from the empirical marginal for G')\n parser.add_argument('--guided_sim', action='store_true', default=False,\n help='If true, we guided the sampling for the B prime in order to get meaningful results.')\n parser.add_argument('--guided_sample', action=\"store\", type=int, default=2500,\n help='The sample size to be used for the guided simulation. Only used if guided_sim is True.')\n argument_parsed = parser.parse_args()\n\n # b_vec = [100, 500, 1000]\n # for b_val in b_vec:\n main(\n run=argument_parsed.run,\n rep=argument_parsed.rep,\n marginal=argument_parsed.marginal,\n b=argument_parsed.b,\n b_prime=argument_parsed.b_prime,\n alpha=argument_parsed.alpha,\n debug=argument_parsed.debug,\n sample_size_obs=argument_parsed.sample_size_obs,\n t0_val=argument_parsed.t0_val,\n seed=argument_parsed.seed,\n verbose=argument_parsed.verbose,\n size_marginal=argument_parsed.size_marginal,\n monte_carlo_samples=argument_parsed.monte_carlo_samples,\n test_statistic=argument_parsed.test_statistic,\n mlp_comp=argument_parsed.mlp_comp,\n empirical_marginal=argument_parsed.empirical_marginal,\n guided_sim=argument_parsed.guided_sim,\n guided_sample=argument_parsed.guided_sample\n )\n",
"from warnings import simplefilter\nsimplefilter(action='ignore', category=FutureWarning)\n\nimport numpy as np\nimport argparse\nimport pandas as pd\nfrom tqdm.auto import tqdm\nfrom datetime import datetime\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom utils.functions import compute_exact_tau, compute_exact_tau_distr\nfrom models.toy_gmm_multid import ToyGMMMultiDLoader\n\nmodel_dict = {\n 'gmm': ToyGMMMultiDLoader\n}\n\n\ndef main(d_obs, run, rep, alpha, sample_size_obs, n_sampled_true_tau, debug=False, seed=7, verbose=False,\n marginal=False, size_marginal=1000, size_check=10000):\n\n # Changing values if debugging\n rep = rep if not debug else 2\n n_sampled_true_tau = n_sampled_true_tau if not debug else 10\n model_obj = model_dict[run](d_obs=d_obs, marginal=marginal, size_marginal=size_marginal)\n\n # Get the correct functions\n grid_param = model_obj.grid\n gen_obs_func = model_obj.sample_sim\n gen_sample_func = model_obj.generate_sample\n or_func = model_obj.compute_exact_or\n t0_grid = model_obj.pred_grid\n tp_func = model_obj.compute_exact_prob\n t0_val = model_obj.true_param\n\n # Loop over repetitions and classifiers\n # Each time we train the different classifiers, we build the intervals and we record\n # whether the point is in or not.\n np.random.seed(seed)\n out_val = []\n out_cols = ['d_obs', 'run', 'rep', 'classifier', 'sample_size_obs', 't0_true_val', 'theta_0_current', 'on_true_t0',\n 'in_true_interval', 'size_true_int', 'true_entropy']\n pbar = tqdm(total=rep, desc='Toy Example for Simulations, n=%s' % sample_size_obs)\n for jj in range(rep):\n\n # Creating sample to check entropy about\n sample_check = gen_sample_func(sample_size=size_check, marginal=False)\n theta_vec = sample_check[:, :model_obj.d]\n x_vec = sample_check[:, (model_obj.d + 1):]\n bern_vec = sample_check[:, model_obj.d]\n\n true_prob_vec = tp_func(theta_vec=theta_vec, x_vec=x_vec)\n entropy_est = -np.average([np.log(true_prob_vec[kk]) if el == 1\n else np.log(1 - true_prob_vec[kk])\n for kk, el in enumerate(bern_vec)])\n\n # TRUE CONFIDENCE INTERVAL\n # print('------ Calculate true Confidence Interval')\n # Generates samples for each t0 values, so to be able to check both coverage and power\n x_obs = gen_obs_func(sample_size=sample_size_obs, true_param=t0_val)\n\n # # Calculate the true LRT value\n tau_obs = np.array([compute_exact_tau(\n or_func=or_func, x_obs=x_obs, t0_val=theta_0, t1_linspace=grid_param) for theta_0 in t0_grid])\n\n tau_distr = np.apply_along_axis(arr=t0_grid.reshape(-1, model_obj.d), axis=1,\n func1d=lambda t0: compute_exact_tau_distr(\n gen_obs_func=gen_obs_func, or_func=or_func, t0_val=t0,\n t1_linspace=grid_param, n_sampled=n_sampled_true_tau,\n sample_size_obs=sample_size_obs, d_obs=model_obj.d_obs))\n assert tau_distr.shape == (t0_grid.shape[0], n_sampled_true_tau)\n\n quantile_pred_tau = np.quantile(a=tau_distr, q=alpha, axis=1)\n true_interval = (tau_obs > quantile_pred_tau).astype(int)\n true_interval_size = (np.sum(true_interval) / true_interval.shape[0])\n\n # At this point all it's left is to record\n for kk, theta_0_current in enumerate(t0_grid):\n out_val.append([\n d_obs, run, jj, 'Exact', sample_size_obs,\n t0_val, theta_0_current, int(t0_val == theta_0_current),\n true_interval[kk], true_interval_size, entropy_est\n ])\n pbar.update(1)\n\n # Saving the results\n out_df = pd.DataFrame.from_records(data=out_val, index=range(len(out_val)), columns=out_cols)\n out_dir = 'sims/classifier_power_multid/'\n out_filename = 'truth_classifier_power_multid%s_%s_%srep_alpha%s_sampleobs%s_t0val%s_%ssampletau_%s.csv' % (\n d_obs, run, rep, str(alpha).replace('.', '-'), sample_size_obs,\n str(t0_val).replace('.', '-'), n_sampled_true_tau,\n datetime.strftime(datetime.today(), '%Y-%m-%d')\n )\n out_df.to_csv(out_dir + out_filename)\n\n # Print results\n cov_df = out_df[out_df['on_true_t0'] == 1][['classifier', 'in_true_interval', 'true_entropy', 'size_true_int']]\n print(cov_df.groupby(['classifier']).agg({'in_true_interval': [np.average],\n 'size_true_int': [np.average, np.std],\n 'true_entropy': [np.average, np.std]}))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--seed', action=\"store\", type=int, default=7,\n help='Random State')\n parser.add_argument('--d_obs', action=\"store\", type=int, default=2,\n help='Dimensionality of the observed data (feature space)')\n parser.add_argument('--rep', action=\"store\", type=int, default=10,\n help='Number of Repetitions for calculating the Pinball loss')\n parser.add_argument('--alpha', action=\"store\", type=float, default=0.1,\n help='Statistical confidence level')\n parser.add_argument('--run', action=\"store\", type=str, default='gmm',\n help='Problem to run')\n parser.add_argument('--debug', action='store_true', default=False,\n help='If true, a very small value for the sample sizes is fit to make sure the'\n 'file can run quickly for debugging purposes')\n parser.add_argument('--verbose', action='store_true', default=False,\n help='If true, logs are printed to the terminal')\n parser.add_argument('--sample_size_obs', action=\"store\", type=int, default=10,\n help='Sample size of the actual observed data.')\n parser.add_argument('--n_sampled_true_tau', action=\"store\", type=int, default=100,\n help='Number of Monte Carlo samples for calculating distribution of tau sample.')\n argument_parsed = parser.parse_args()\n\n main(\n d_obs=argument_parsed.d_obs,\n run=argument_parsed.run,\n rep=argument_parsed.rep,\n alpha=argument_parsed.alpha,\n debug=argument_parsed.debug,\n sample_size_obs=argument_parsed.sample_size_obs,\n seed=argument_parsed.seed,\n verbose=argument_parsed.verbose,\n n_sampled_true_tau=argument_parsed.n_sampled_true_tau\n )\n",
"import sys\nsys.path.append(\"..\")\nimport pytest\nimport numpy as np\n\nfrom models.sen_poisson import SenPoissonLoader\nfrom models.toy_gmm import ToyGMMLoader\nfrom scipy.stats import multivariate_normal, norm\n\n\ndef test__camelus_linc_sampling():\n\n np.random.seed(7)\n mean_instrumental = np.repeat(0, 7)\n cov_instrumental = np.diag(np.repeat(1, 7))\n\n instrumental_distr = multivariate_normal(mean=mean_instrumental, cov=cov_instrumental)\n\n # Create sample concat_mat\n concat_mat = np.array([[1.5, 1.5, 0], [0.5, 0.5, 1]])\n obs_value = np.array([1, 2, 3, 4, 5, 6, 7])\n random_sample = np.abs(instrumental_distr.rvs(size=1)).astype(int)\n\n # Sample matrix\n sample_mat_1 = np.apply_along_axis(arr=concat_mat, axis=1,\n func1d=lambda row: obs_value if row[2] else random_sample)\n sample_mat_2 = np.apply_along_axis(arr=concat_mat, axis=1,\n func1d=lambda row: obs_value.reshape(1, 7) if row[2]\n else random_sample.reshape(1, 7)).reshape(-1, 7)\n expected_mat = np.vstack((random_sample.reshape(-1, 7), obs_value.reshape(-1, 7)))\n\n np.testing.assert_array_equal(sample_mat_1, expected_mat)\n np.testing.assert_array_equal(sample_mat_2, expected_mat)\n\n\ndef test__poisson_2d_sampling_msnh():\n\n def sample_msnh_algo5_poisson_two_params(b_prime, sample_size):\n background_vec = np.random.uniform(low=80, high=100,\n size=b_prime).reshape(-1, 1)\n mu_vec = np.random.uniform(low=0, high=20,\n size=b_prime).reshape(-1, 1)\n theta_mat = np.hstack((background_vec, mu_vec))\n assert theta_mat.shape == (b_prime, 2)\n\n sample_mat = np.apply_along_axis(arr=theta_mat, axis=1,\n func1d=lambda row: np.hstack(\n (np.random.poisson(lam=row[0] + row[1], size=sample_size).reshape(-1, 1),\n np.random.poisson(lam=1 * row[0], size=sample_size).reshape(-1, 1))))\n return theta_mat, sample_mat\n\n np.random.seed(7)\n t1, s1 = sample_msnh_algo5_poisson_two_params(100, 10)\n\n model_obj = SenPoissonLoader()\n model_obj.set_reference_g(size_reference=100)\n np.random.seed(7)\n t2, s2 = model_obj.sample_msnh_algo5(100, 10)\n\n np.testing.assert_array_equal(t1, t2)\n np.testing.assert_array_equal(s1, s2)\n\n\ndef test__poisson_2d_sampling():\n\n def generate_sample_poisson_two_params(sample_size, mean_instrumental_poisson, cov_instrumental_poisson,\n p=0.5, marginal=False):\n background_vec = np.random.uniform(low=80, high=100,\n size=sample_size).reshape(-1, 1)\n mu_vec = np.random.uniform(low=0, high=20,\n size=sample_size).reshape(-1, 1)\n theta_mat = np.hstack((background_vec, mu_vec))\n assert theta_mat.shape == (sample_size, 2)\n\n bern_vec = np.random.binomial(n=1, p=p, size=sample_size)\n concat_mat = np.hstack((theta_mat.reshape(-1, 2),\n bern_vec.reshape(-1, 1)))\n\n if marginal:\n raise ValueError('Marginal not implemented for this example')\n else:\n instrumental_distr = multivariate_normal(mean=mean_instrumental_poisson, cov=cov_instrumental_poisson)\n\n sample = np.apply_along_axis(arr=concat_mat, axis=1,\n func1d=lambda row: np.array([\n np.random.poisson(lam=row[0] + row[1], size=1),\n np.random.poisson(lam=1 * row[0], size=1)]).reshape(1, 2) if row[\n 2] else\n np.abs(instrumental_distr.rvs(size=1)).astype(int).reshape(1, 2))\n return np.hstack((concat_mat, sample.reshape(-1, 2)))\n\n model_obj = SenPoissonLoader()\n model_obj.set_reference_g(size_reference=100)\n np.random.seed(7)\n t1 = model_obj.generate_sample(100)\n\n np.random.seed(7)\n t2 = generate_sample_poisson_two_params(100, model_obj.mean_instrumental, model_obj.cov_instrumental)\n\n np.testing.assert_array_equal(t1, t2)\n\n\ndef test__msnh_algo5_gmm():\n\n def gmm_manual_sampling(sample_size, mix_param=0.5, mu_param=[-5, 5], sigma_param=[1, 1]):\n cluster = np.random.binomial(n=1, p=mix_param, size=sample_size)\n means = np.take(mu_param, cluster)\n sigmas = np.take(sigma_param, cluster)\n return np.random.normal(loc=means, scale=sigmas, size=sample_size)\n\n def sample_msnh_algo5_gmm(b_prime, sample_size):\n theta_mat = np.random.uniform(low=0, high=10, size=b_prime).reshape(-1,\n 1)\n sample_mat = np.apply_along_axis(arr=theta_mat, axis=1,\n func1d=lambda row: gmm_manual_sampling(\n sample_size=sample_size, mu_param=[-row, row]))\n full_mat = np.hstack((theta_mat, sample_mat))\n return theta_mat, full_mat\n\n def generate_sample_gmm(sample_size=1000, p=0.5, marginal=False, sample_marginal=1000):\n\n theta_vec = np.random.uniform(low=0, high=10, size=sample_size) # Reference Distribution\n bern_vec = np.random.binomial(n=1, p=p, size=sample_size) # Generating Y_1,...,Y_n\n\n # Chaining in columns the two above and then sample either from F or G\n # according to Y_i for i=1,...,n\n concat_mat = np.hstack((theta_vec.reshape(-1, 1),\n bern_vec.reshape(-1, 1)))\n\n if marginal:\n theta_vec_marg = np.random.uniform(low=0, high=10,\n size=sample_marginal)\n marginal_sample = np.apply_along_axis(arr=theta_vec_marg.reshape(-1, 1), axis=1,\n func1d=lambda row: gmm_manual_sampling(\n sample_size=1, mu_param=[-row, row])).reshape(-1, )\n mean_mle = np.average(marginal_sample)\n std_mle = np.std(marginal_sample)\n instrumental_distr = norm(loc=mean_mle, scale=std_mle)\n else:\n instrumental_distr = norm(loc=0, scale=10)\n\n sample = np.apply_along_axis(arr=concat_mat, axis=1,\n func1d=lambda row: gmm_manual_sampling(\n sample_size=1, mu_param=[-row[0], row[0]]) if row[1] else\n instrumental_distr.rvs(size=1))\n return np.hstack((concat_mat, sample.reshape(-1, 1)))\n\n model_obj = ToyGMMLoader()\n np.random.seed(7)\n t1, s1 = model_obj.sample_msnh_algo5(10, 10)\n\n np.random.seed(7)\n t2, f2 = sample_msnh_algo5_gmm(10, 10)\n\n np.testing.assert_array_equal(t1, t2)\n np.testing.assert_array_equal(np.hstack((t1, s1)), f2)\n\n np.random.seed(7)\n b1 = model_obj.generate_sample(100)\n\n np.random.seed(7)\n b2 = generate_sample_gmm(100)\n\n np.testing.assert_array_equal(b1[:, :2], b2[:, :2])\n np.testing.assert_array_equal(b1[:, 2:], b2[:, 2:])",
"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom itertools import chain\n\n# Quantile regression code taken from https://colab.research.google.com/drive/1nXOlrmVHqCHiixqiMF6H8LSciz583_W2\n\n\nclass q_model(nn.Module):\n def __init__(self,\n quantiles,\n neur_shapes,\n in_shape=1,\n dropout=0.5,\n seed=7):\n super().__init__()\n self.quantiles = quantiles\n self.num_quantiles = len(quantiles)\n self.neur_shapes = neur_shapes\n self.in_shape = in_shape\n self.seed = seed\n self.out_shape = len(quantiles)\n self.dropout = dropout\n self.build_model()\n self.init_weights()\n\n def build_model(self):\n self.base_model = nn.Sequential(\n nn.Linear(self.in_shape, self.neur_shapes[0]),\n nn.ReLU(),\n # nn.BatchNorm1d(64),\n nn.Dropout(self.dropout),\n nn.Linear(self.neur_shapes[0], self.neur_shapes[1]),\n nn.ReLU(),\n # nn.BatchNorm1d(64),\n nn.Dropout(self.dropout),\n )\n final_layers = [\n nn.Linear(self.neur_shapes[1], 1) for _ in range(len(self.quantiles))\n ]\n self.final_layers = nn.ModuleList(final_layers)\n\n def init_weights(self):\n torch.manual_seed(self.seed)\n for m in chain(self.base_model, self.final_layers):\n if isinstance(m, nn.Linear):\n nn.init.orthogonal_(m.weight)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n tmp_ = self.base_model(x)\n return torch.cat([layer(tmp_) for layer in self.final_layers], dim=1)\n\n\nclass q_model_3l(nn.Module):\n def __init__(self,\n quantiles,\n neur_shapes,\n seed=7,\n in_shape=1,\n dropout=0.5):\n super().__init__()\n self.quantiles = quantiles\n self.num_quantiles = len(quantiles)\n self.neur_shapes = neur_shapes\n self.in_shape = in_shape\n self.seed = seed\n self.out_shape = len(quantiles)\n self.dropout = dropout\n self.build_model()\n self.init_weights()\n\n def build_model(self):\n self.base_model = nn.Sequential(\n nn.Linear(self.in_shape, self.neur_shapes[0]),\n nn.ReLU(),\n # nn.BatchNorm1d(64),\n nn.Dropout(self.dropout),\n nn.Linear(self.neur_shapes[0], self.neur_shapes[1]),\n nn.ReLU(),\n # nn.BatchNorm1d(64),\n nn.Linear(self.neur_shapes[1], self.neur_shapes[2]),\n nn.ReLU(),\n nn.Dropout(self.dropout),\n )\n final_layers = [\n nn.Linear(self.neur_shapes[-1], 1) for _ in range(len(self.quantiles))\n ]\n self.final_layers = nn.ModuleList(final_layers)\n\n def init_weights(self):\n torch.manual_seed(self.seed)\n for m in chain(self.base_model, self.final_layers):\n if isinstance(m, nn.Linear):\n nn.init.orthogonal_(m.weight)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n tmp_ = self.base_model(x)\n return torch.cat([layer(tmp_) for layer in self.final_layers], dim=1)\n\n\nclass QuantileLoss(nn.Module):\n def __init__(self, quantiles):\n super().__init__()\n self.quantiles = quantiles\n\n def forward(self, preds, target):\n assert not target.requires_grad\n assert preds.size(0) == target.size(0)\n losses = []\n for i, q in enumerate(self.quantiles):\n errors = target - preds[:, i]\n losses.append(torch.max((q - 1) * errors, q * errors).unsqueeze(1))\n loss = torch.mean(torch.sum(torch.cat(losses, dim=1), dim=1))\n return loss\n\n\nclass Learner:\n def __init__(self, model, optimizer_class, loss_func, device='cpu', seed=7):\n self.model = model.to(device)\n self.optimizer = optimizer_class(self.model.parameters())\n self.loss_func = loss_func.to(device)\n self.device = device\n self.seed = seed\n self.loss_history = []\n\n def fit(self, x, y, epochs, batch_size):\n torch.manual_seed(self.seed)\n self.model.train()\n for e in range(epochs):\n shuffle_idx = np.arange(x.shape[0])\n np.random.shuffle(shuffle_idx)\n x = x[shuffle_idx]\n y = y[shuffle_idx]\n epoch_losses = []\n for idx in range(0, x.shape[0], batch_size):\n self.optimizer.zero_grad()\n batch_x = torch.from_numpy(\n x[idx: min(idx + batch_size, x.shape[0]), :]\n ).float().to(self.device).requires_grad_(False)\n batch_y = torch.from_numpy(\n y[idx: min(idx + batch_size, y.shape[0])]\n ).float().to(self.device).requires_grad_(False)\n preds = self.model(batch_x)\n loss = self.loss_func(preds, batch_y)\n loss.backward()\n self.optimizer.step()\n epoch_losses.append(loss.cpu().detach().numpy())\n epoch_loss = np.mean(epoch_losses)\n self.loss_history.append(epoch_loss)\n\n def predict(self, x, mc=False):\n if mc:\n self.model.train()\n else:\n self.model.eval()\n return self.model(torch.from_numpy(x).to(self.device).requires_grad_(False)).cpu().detach().numpy()"
] | [
[
"matplotlib.pyplot.legend",
"numpy.log",
"matplotlib.pyplot.tight_layout",
"numpy.greater",
"numpy.random.seed",
"numpy.isfinite",
"numpy.isnan",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"sklearn.metrics.log_loss",
"numpy.std",
"numpy.mean",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"numpy.exp",
"numpy.sum",
"matplotlib.pyplot.ylabel"
],
[
"numpy.log",
"numpy.quantile",
"numpy.sum",
"numpy.random.seed"
],
[
"numpy.hstack",
"numpy.take",
"numpy.random.seed",
"numpy.testing.assert_array_equal",
"scipy.stats.norm",
"numpy.random.normal",
"numpy.apply_along_axis",
"scipy.stats.multivariate_normal",
"numpy.std",
"numpy.random.poisson",
"numpy.average",
"numpy.random.binomial",
"numpy.random.uniform",
"numpy.repeat",
"numpy.array"
],
[
"torch.nn.Dropout",
"torch.max",
"torch.cat",
"torch.nn.init.constant_",
"torch.manual_seed",
"torch.nn.ModuleList",
"numpy.arange",
"torch.from_numpy",
"numpy.random.shuffle",
"torch.nn.Linear",
"numpy.mean",
"torch.nn.init.orthogonal_",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
onlyrico/AliceMind | [
"a6a070b1610e4c4bfe84ee6c4195b2bc4f725ded",
"a6a070b1610e4c4bfe84ee6c4195b2bc4f725ded",
"a6a070b1610e4c4bfe84ee6c4195b2bc4f725ded"
] | [
"StructVBERT/tasks/vqa.py",
"StructVBERT/tasks/nlvr2.py",
"LatticeBERT/tokenization.py"
] | [
"# coding=utf-8\n# Copyleft 2019 project LXRT.\n\nimport os\nimport collections\n\nimport torch\nimport torch.nn as nn\nimport logging\nfrom torch.utils.data.dataloader import DataLoader\nfrom tqdm import tqdm\n\nfrom param import args\nfrom lxrt.qa_answer_table import load_lxmert_qa\nfrom tasks.vqa_model import VQAModel\nfrom tasks.vqa_data import VQADataset, VQATorchDataset, VQAEvaluator\n\nDataTuple = collections.namedtuple(\"DataTuple\", 'dataset loader evaluator')\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef get_data_tuple(splits: str, bs:int, shuffle=False, drop_last=False) -> DataTuple:\n dset = VQADataset(splits)\n tset = VQATorchDataset(dset)\n evaluator = VQAEvaluator(dset)\n data_loader = DataLoader(\n tset, batch_size=bs,\n shuffle=shuffle, num_workers=args.num_workers,\n drop_last=drop_last, pin_memory=True\n )\n\n return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator)\n\nclass WarmupOptimizer(object):\n def __init__(self, _lr_base, optimizer, _data_size, _batch_size):\n self.optimizer = optimizer\n self._step = 0\n self._lr_base = _lr_base\n self._rate = 0\n self._data_size = _data_size\n self._batch_size = _batch_size\n\n def step(self):\n self._step += 1\n rate = self.rate()\n for p in self.optimizer.param_groups:\n p['lr'] = rate\n self._rate = rate\n self.optimizer.step()\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def rate(self, step=None):\n if step is None:\n step = self._step\n if step <= int(self._data_size / self._batch_size * 1):\n r = self._lr_base * 1/4.\n elif step <= int(self._data_size / self._batch_size * 2):\n r = self._lr_base * 2/4.\n elif step <= int(self._data_size / self._batch_size * 3):\n r = self._lr_base * 3/4.\n else:\n r = self._lr_base\n return r\n\ndef adjust_learning_rate(optimizer, decay_rate):\n optimizer._lr_base *= decay_rate\n\nclass VQA:\n def __init__(self):\n # Datasets\n self.train_tuple = get_data_tuple(\n args.train, bs=args.batch_size, shuffle=True, drop_last=True\n )\n if args.valid != \"\":\n self.valid_tuple = get_data_tuple(\n args.valid, bs=256, # for large model\n shuffle=False, drop_last=False\n )\n else:\n self.valid_tuple = None\n \n # Model\n self.model = VQAModel(self.train_tuple.dataset.num_answers)\n self._lr_decay_epoch_list = [8, 10]\n self._lr_decay_rate = 0.2\n\n # Load pre-trained weights\n if args.load_lxmert is not None:\n self.model.lxrt_encoder.load(args.load_lxmert)\n if args.load_lxmert_qa is not None:\n load_lxmert_qa(args.load_lxmert_qa, self.model,\n label2ans=self.train_tuple.dataset.label2ans)\n if args.fix_language_bert:\n assert args.patial_load\n state_dict = torch.load(args.patial_load)\n for k in state_dict.copy():\n if not k.startswith('bert.'):\n state_dict['bert.' + k.replace('gamma', 'weight').replace('beta', 'bias')] = state_dict.pop(k)\n\n # fix bert parameters\n for name, param in self.model.lxrt_encoder.model.named_parameters():\n # if 'pooler' in name: # pooler not fixed\n # continue\n if name in state_dict:\n logger.info('fix param for: {}'.format(name))\n param.requires_grad = False\n\n # GPU options\n self.model = self.model.cuda()\n\n # Loss and Optimizer\n self.bce_loss = nn.BCEWithLogitsLoss()\n if 'bert' in args.optim:\n batch_per_epoch = len(self.train_tuple.loader)\n t_total = int(batch_per_epoch * args.epochs)\n logger.info(\"BertAdam Total Iters: %d\" % t_total)\n from lxrt.optimization import BertAdam\n self.optim = BertAdam(list(self.model.parameters()),\n lr=args.lr,\n warmup=0.1,\n t_total=t_total)\n elif 'adam' in args.optim:\n batch_per_epoch = len(self.train_tuple.loader)\n optim = args.optimizer(filter(lambda p: p.requires_grad, self.model.parameters()), lr=0, betas=(0.9, 0.98), eps=1e-9)\n self.optim = WarmupOptimizer(args.lr, optim, batch_per_epoch * args.batch_size, args.batch_size)\n else:\n self.optim = args.optimizer(self.model.parameters(), args.lr)\n\n if args.amp_type is not None:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to run this example.\")\n self.model, self.optim = amp.initialize(self.model, self.optim, opt_level=args.amp_type)\n\n if args.multiGPU:\n self.model.lxrt_encoder.multi_gpu()\n # Output Directory\n self.output = args.output\n os.makedirs(self.output, exist_ok=True)\n\n def train(self, train_tuple, eval_tuple):\n dset, loader, evaluator = train_tuple\n iter_wrapper = (lambda x: tqdm(x, total=len(loader))) if args.tqdm else (lambda x: x)\n\n best_valid = 0.\n for epoch in range(args.epochs):\n quesid2ans = {}\n if 'adam' in args.optim and epoch in self._lr_decay_epoch_list:\n adjust_learning_rate(self.optim, self._lr_decay_rate)\n for i, (ques_id, feats, boxes, sent, target) in iter_wrapper(enumerate(loader)):\n\n self.model.train()\n self.optim.zero_grad()\n\n feats, boxes, target = feats.cuda(), boxes.cuda(), target.cuda()\n\n logit = self.model(feats, boxes, sent)\n assert logit.dim() == target.dim() == 2\n loss = self.bce_loss(logit, target)\n loss = loss * logit.size(1)\n if args.multiGPU:\n loss = loss.mean() # mean() to average on multi-gpu.\n\n if args.amp_type is not None:\n from apex import amp\n with amp.scale_loss(loss, self.optim) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), args.clip_norm)\n self.optim.step()\n\n score, label = logit.max(1)\n for qid, l in zip(ques_id, label.cpu().numpy()):\n ans = dset.label2ans[l]\n quesid2ans[qid.item()] = ans\n\n log_str = \"\\nEpoch %d: Train %0.2f\\n\" % (epoch, evaluator.evaluate(quesid2ans) * 100.)\n\n if self.valid_tuple is not None: # Do Validation\n valid_score = self.evaluate(eval_tuple)\n if valid_score > best_valid:\n best_valid = valid_score\n self.save(\"BEST\")\n\n log_str += \"Epoch %d: Valid %0.2f\\n\" % (epoch, valid_score * 100.) + \\\n \"Epoch %d: Best %0.2f\\n\" % (epoch, best_valid * 100.)\n\n logger.info(log_str)\n\n with open(self.output + \"/log.log\", 'a') as f:\n f.write(log_str)\n f.flush()\n\n self.save(\"LAST\")\n\n def predict(self, eval_tuple: DataTuple, dump=None):\n \"\"\"\n Predict the answers to questions in a data split.\n\n :param eval_tuple: The data tuple to be evaluated.\n :param dump: The path of saved file to dump results.\n :return: A dict of question_id to answer.\n \"\"\"\n self.model.eval()\n dset, loader, evaluator = eval_tuple\n quesid2ans = {}\n for i, datum_tuple in enumerate(loader):\n ques_id, feats, boxes, sent = datum_tuple[:4] # Avoid seeing ground truth\n with torch.no_grad():\n feats, boxes = feats.cuda(), boxes.cuda()\n logit = self.model(feats, boxes, sent)\n if args.with_score:\n logit = nn.Softmax(dim=1)(logit)\n score, label = logit.max(1)\n if args.with_score:\n for qid, l, s in zip(ques_id, label.cpu().numpy(), score.cpu().numpy()):\n ans = dset.label2ans[l]\n quesid2ans[qid.item()] = (ans, str(s))\n else:\n for qid, l in zip(ques_id, label.cpu().numpy()):\n ans = dset.label2ans[l]\n quesid2ans[qid.item()] = ans\n if dump is not None:\n evaluator.dump_result(quesid2ans, dump)\n return quesid2ans\n\n def evaluate(self, eval_tuple: DataTuple, dump=None):\n \"\"\"Evaluate all data in data_tuple.\"\"\"\n quesid2ans = self.predict(eval_tuple, dump)\n return eval_tuple.evaluator.evaluate(quesid2ans)\n\n @staticmethod\n def oracle_score(data_tuple):\n dset, loader, evaluator = data_tuple\n quesid2ans = {}\n for i, (ques_id, feats, boxes, sent, target) in enumerate(loader):\n _, label = target.max(1)\n for qid, l in zip(ques_id, label.cpu().numpy()):\n ans = dset.label2ans[l]\n quesid2ans[qid.item()] = ans\n return evaluator.evaluate(quesid2ans)\n\n def save(self, name):\n torch.save(self.model.state_dict(),\n os.path.join(self.output, \"%s.pth\" % name))\n\n def load(self, path):\n logger.info(\"Load model from %s\" % path)\n state_dict = torch.load(\"%s.pth\" % path)\n self.model.load_state_dict(state_dict)\n\n\nif __name__ == \"__main__\":\n # Build Class\n vqa = VQA()\n\n # Load VQA model weights\n if args.load is not None:\n vqa.load(args.load)\n\n # Test or Train\n if args.test is not None:\n args.fast = args.tiny = False # Always loading all data in test\n if 'test' in args.test:\n vqa.predict(\n get_data_tuple(args.test, bs=950,\n shuffle=False, drop_last=False),\n dump=os.path.join(args.output, 'test_predict.json')\n )\n elif 'val' in args.test: \n # Since part of valididation data are used in pre-training/fine-tuning,\n # only validate on the minival set.\n result = vqa.evaluate(\n get_data_tuple('minival', bs=950,\n shuffle=False, drop_last=False),\n dump=os.path.join(args.output, 'minival_predict.json')\n )\n logger.info(result)\n else:\n assert False, \"No such test option for %s\" % args.test\n else:\n # print('Splits in Train data:', vqa.train_tuple.dataset.splits)\n logger.info('Splits in Train data: {}'.format(vqa.train_tuple.dataset.splits))\n if vqa.valid_tuple is not None:\n logger.info('Splits in Valid data: {}'.format(vqa.valid_tuple.dataset.splits))\n logger.info(\"Valid Oracle: %0.2f\" % (vqa.oracle_score(vqa.valid_tuple) * 100))\n else:\n logger.info(\"DO NOT USE VALIDATION\")\n vqa.train(vqa.train_tuple, vqa.valid_tuple)\n\n\n",
"# coding=utf-8\n# Copyleft 2019 project LXRT.\n\nimport os\nimport collections\n\nfrom tqdm import tqdm\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data.dataloader import DataLoader\nimport logging\n\nfrom param import args\nfrom tasks.nlvr2_model import NLVR2Model\nfrom tasks.nlvr2_data import NLVR2Dataset, NLVR2TorchDataset, NLVR2Evaluator\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nDataTuple = collections.namedtuple(\"DataTuple\", 'dataset loader evaluator')\n\n\ndef get_tuple(splits: str, bs:int, shuffle=False, drop_last=False) -> DataTuple:\n dset = NLVR2Dataset(splits)\n tset = NLVR2TorchDataset(dset)\n evaluator = NLVR2Evaluator(dset)\n data_loader = DataLoader(\n tset, batch_size=bs,\n shuffle=shuffle, num_workers=args.num_workers,\n drop_last=drop_last, pin_memory=True\n )\n\n return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator)\n\n\nclass NLVR2:\n def __init__(self):\n self.train_tuple = get_tuple(\n args.train, bs=args.batch_size, shuffle=True, drop_last=True\n )\n if args.valid != \"\":\n valid_bsize = 256 if args.multiGPU else 256\n self.valid_tuple = get_tuple(\n args.valid, bs=valid_bsize,\n shuffle=False, drop_last=False\n )\n else:\n self.valid_tuple = None\n\n self.model = NLVR2Model()\n\n # Load pre-trained weights\n if args.load_lxmert is not None:\n self.model.lxrt_encoder.load(args.load_lxmert)\n\n # GPU options\n self.model = self.model.cuda()\n\n # Losses and optimizer\n self.mce_loss = nn.CrossEntropyLoss(ignore_index=-1)\n if 'bert' in args.optim:\n batch_per_epoch = len(self.train_tuple.loader)\n t_total = int(batch_per_epoch * args.epochs)\n logger.info(\"Total Iters: %d\" % t_total)\n from lxrt.optimization import BertAdam\n self.optim = BertAdam(list(self.model.parameters()),\n lr=args.lr,\n warmup=0.1,\n t_total=t_total)\n else:\n self.optim = args.optimizer(list(self.model.parameters()), args.lr)\n\n if args.amp_type is not None:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to run this example.\")\n self.model, self.optim = amp.initialize(self.model, self.optim, opt_level=args.amp_type)\n\n if args.multiGPU:\n self.model.lxrt_encoder.multi_gpu()\n self.output = args.output\n os.makedirs(self.output, exist_ok=True)\n\n def train(self, train_tuple, eval_tuple):\n dset, loader, evaluator = train_tuple\n iter_wrapper = (lambda x: tqdm(x, total=len(loader))) if args.tqdm else (lambda x: x)\n\n best_valid = 0.\n for epoch in range(args.epochs):\n quesid2ans = {}\n for i, (ques_id, feats, boxes, sent, label) in iter_wrapper(enumerate(loader)):\n self.model.train()\n\n self.optim.zero_grad()\n feats, boxes, label = feats.cuda(), boxes.cuda(), label.cuda()\n logit = self.model(feats, boxes, sent)\n\n loss = self.mce_loss(logit, label)\n\n if args.amp_type is not None:\n from apex import amp\n with amp.scale_loss(loss, self.optim) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), args.clip_norm)\n self.optim.step()\n\n score, predict = logit.max(1)\n for qid, l in zip(ques_id, predict.cpu().numpy()):\n quesid2ans[qid] = l\n\n log_str = \"\\nEpoch %d: Train %0.2f\\n\" % (epoch, evaluator.evaluate(quesid2ans) * 100.)\n\n if self.valid_tuple is not None: # Do Validation\n valid_score = self.evaluate(eval_tuple)\n if valid_score > best_valid:\n best_valid = valid_score\n self.save(\"BEST\")\n\n log_str += \"Epoch %d: Valid %0.2f\\n\" % (epoch, valid_score * 100.) + \\\n \"Epoch %d: Best %0.2f\\n\" % (epoch, best_valid * 100.)\n\n logger.info(log_str)\n\n with open(self.output + \"/log.log\", 'a') as f:\n f.write(log_str)\n f.flush()\n\n self.save(\"LAST\")\n\n def predict(self, eval_tuple: DataTuple, dump=None):\n self.model.eval()\n dset, loader, evaluator = eval_tuple\n quesid2ans = {}\n for i, datum_tuple in enumerate(loader):\n ques_id, feats, boxes, sent = datum_tuple[:4] # avoid handling target\n with torch.no_grad():\n feats, boxes = feats.cuda(), boxes.cuda()\n logit = self.model(feats, boxes, sent)\n score, predict = logit.max(1)\n for qid, l in zip(ques_id, predict.cpu().numpy()):\n quesid2ans[qid] = l\n if dump is not None:\n evaluator.dump_result(quesid2ans, dump)\n return quesid2ans\n\n def evaluate(self, eval_tuple: DataTuple, dump=None):\n dset, loader, evaluator = eval_tuple\n quesid2ans = self.predict(eval_tuple, dump)\n return evaluator.evaluate(quesid2ans)\n\n def save(self, name):\n torch.save(self.model.state_dict(),\n os.path.join(self.output, \"%s.pth\" % name))\n\n def load(self, path):\n logger.info(\"Load model from %s\" % path)\n state_dict = torch.load(\"%s.pth\" % path)\n new_state_dict = dict()\n for item in state_dict:\n if 'module' in item:\n new_state_dict[item.replace('module.', '')] = state_dict[item]\n else:\n new_state_dict[item] = state_dict[item]\n self.model.load_state_dict(new_state_dict)\n\n\nif __name__ == \"__main__\":\n # Build Class\n nlvr2 = NLVR2()\n\n # Load Model\n if args.load is not None:\n nlvr2.load(args.load)\n\n # Test or Train\n if args.test is not None:\n args.fast = args.tiny = False # Always loading all data in test\n if 'hidden' in args.test:\n nlvr2.predict(\n get_tuple(args.test, bs=args.batch_size,\n shuffle=False, drop_last=False),\n dump=os.path.join(args.output, 'hidden_predict.csv')\n )\n elif 'test' in args.test or 'valid' in args.test:\n result = nlvr2.evaluate(\n get_tuple(args.test, bs=args.batch_size,\n shuffle=False, drop_last=False),\n dump=os.path.join(args.output, '%s_predict.csv' % args.test)\n )\n logger.info(result)\n else:\n assert False, \"No such test option for %s\" % args.test\n else:\n logger.info('Splits in Train data: {}'.format(nlvr2.train_tuple.dataset.splits))\n if nlvr2.valid_tuple is not None:\n logger.info('Splits in Valid data: {}'.format(nlvr2.valid_tuple.dataset.splits))\n else:\n logger.info(\"DO NOT USE VALIDATION\")\n nlvr2.train(nlvr2.train_tuple, nlvr2.valid_tuple)\n\n\n",
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tokenization classes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport re\nimport unicodedata\nimport six\nimport tensorflow as tf\n\n\ndef validate_case_matches_checkpoint(do_lower_case, init_checkpoint):\n \"\"\"Checks whether the casing config is consistent with the checkpoint name.\"\"\"\n\n # The casing has to be passed in by the user and there is no explicit check\n # as to whether it matches the checkpoint. The casing information probably\n # should have been stored in the bert_config.json file, but it's not, so\n # we have to heuristically detect it to validate.\n\n if not init_checkpoint:\n return\n\n m = re.match(\"^.*?([A-Za-z0-9_-]+)/bert_model.ckpt\", init_checkpoint)\n if m is None:\n return\n\n model_name = m.group(1)\n\n lower_models = [\n \"uncased_L-24_H-1024_A-16\", \"uncased_L-12_H-768_A-12\",\n \"multilingual_L-12_H-768_A-12\", \"chinese_L-12_H-768_A-12\"\n ]\n\n cased_models = [\n \"cased_L-12_H-768_A-12\", \"cased_L-24_H-1024_A-16\",\n \"multi_cased_L-12_H-768_A-12\"\n ]\n\n is_bad_config = False\n if model_name in lower_models and not do_lower_case:\n is_bad_config = True\n actual_flag = \"False\"\n case_name = \"lowercased\"\n opposite_flag = \"True\"\n\n if model_name in cased_models and do_lower_case:\n is_bad_config = True\n actual_flag = \"True\"\n case_name = \"cased\"\n opposite_flag = \"False\"\n\n if is_bad_config:\n raise ValueError(\n \"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"\n \"However, `%s` seems to be a %s model, so you \"\n \"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"\n \"how the model was pre-training. If this error is wrong, please \"\n \"just comment out this check.\" % (actual_flag, init_checkpoint,\n model_name, case_name, opposite_flag))\n\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab\n\n\ndef convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n for item in items:\n output.append(vocab[item])\n return output\n\n\ndef convert_tokens_to_ids(vocab, tokens):\n return convert_by_vocab(vocab, tokens)\n\n\ndef convert_ids_to_tokens(inv_vocab, ids):\n return convert_by_vocab(inv_vocab, ids)\n\n\ndef whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens\n\n\nclass FullTokenizer(object):\n \"\"\"Runs end-to-end tokenziation.\"\"\"\n\n def __init__(self, vocab_file, do_lower_case=True):\n self.vocab = load_vocab(vocab_file)\n self.inv_vocab = {v: k for k, v in self.vocab.items()}\n self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)\n\n def tokenize(self, text):\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text):\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n\n return split_tokens\n\n def convert_tokens_to_ids(self, tokens):\n return convert_by_vocab(self.vocab, tokens)\n\n def convert_ids_to_tokens(self, ids):\n return convert_by_vocab(self.inv_vocab, ids)\n\n\nclass BasicTokenizer(object):\n \"\"\"Runs basic tokenization (punctuation splitting, lower casing, etc.).\"\"\"\n\n def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens\n\n def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)\n\n def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]\n\n def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False\n\n def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n\nclass WordpieceTokenizer(object):\n \"\"\"Runs WordPiece tokenziation.\"\"\"\n\n def __init__(self, vocab, unk_token=\"[UNK]\", max_input_chars_per_word=200):\n self.vocab = vocab\n self.unk_token = unk_token\n self.max_input_chars_per_word = max_input_chars_per_word\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens\n\n\ndef _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False\n\n\ndef _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False\n\n\ndef _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False"
] | [
[
"torch.nn.Softmax",
"torch.load",
"torch.nn.BCEWithLogitsLoss",
"torch.no_grad",
"torch.utils.data.dataloader.DataLoader"
],
[
"torch.nn.CrossEntropyLoss",
"torch.utils.data.dataloader.DataLoader",
"torch.no_grad",
"torch.load"
],
[
"tensorflow.gfile.GFile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ziniuwan/maed | [
"9e1f1c37eba81da86c8d9c62dc9be41a01abff5b"
] | [
"lib/models/spin.py"
] | [
"\"\"\"\nThis script is brought from https://github.com/nkolot/SPIN\nAdhere to their licence to use this script\n\"\"\"\n\nimport math\nimport torch\nimport numpy as np\nimport os.path as osp\nimport torch.nn as nn\n\nfrom lib.core.config import DATA_DIR\nfrom lib.utils.geometry import rotation_matrix_to_angle_axis, rot6d_to_rotmat\nfrom lib.models.smpl import SMPL, SMPL_MODEL_DIR, H36M_TO_J17, SMPL_MEAN_PARAMS\n\n\nclass Regressor(nn.Module):\n def __init__(self, smpl_mean_params=SMPL_MEAN_PARAMS, feat_dim=2048, hidden_dim=1024, **kwargs):\n super(Regressor, self).__init__()\n\n self.smpl = SMPL(\n SMPL_MODEL_DIR,\n create_transl=False,\n create_global_orient=False,\n create_body_pose=False,\n create_betas=False,\n )\n npose = 24 * 6\n nshape = 10\n\n self.fc1 = nn.Linear(feat_dim + npose + nshape + 3, hidden_dim)\n self.drop1 = nn.Dropout()\n self.fc2 = nn.Linear(hidden_dim, hidden_dim)\n self.drop2 = nn.Dropout()\n self.decpose = nn.Linear(hidden_dim, npose)\n self.decshape = nn.Linear(hidden_dim, nshape)\n self.deccam = nn.Linear(hidden_dim, 3)\n nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)\n nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)\n nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)\n\n mean_params = np.load(smpl_mean_params)\n init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)\n init_shape = torch.from_numpy(mean_params['shape'][:].astype('float32')).unsqueeze(0)\n init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)\n self.register_buffer('init_pose', init_pose)\n self.register_buffer('init_shape', init_shape)\n self.register_buffer('init_cam', init_cam)\n\n\n def iterative_regress(self, x, init_pose=None, init_shape=None, init_cam=None, n_iter=3):\n nt = x.shape[0]\n\n if init_pose is None:\n init_pose = self.init_pose.expand(nt, -1)\n if init_shape is None:\n init_shape = self.init_shape.expand(nt, -1)\n if init_cam is None:\n init_cam = self.init_cam.expand(nt, -1)\n\n pred_pose = init_pose\n pred_shape = init_shape\n pred_cam = init_cam\n for i in range(n_iter):\n xc = torch.cat([x, pred_pose, pred_shape, pred_cam], 1)\n xc = self.fc1(xc)\n xc = self.drop1(xc)\n xc = self.fc2(xc)\n xc = self.drop2(xc)\n pred_pose = self.decpose(xc) + pred_pose\n pred_shape = self.decshape(xc) + pred_shape\n pred_cam = self.deccam(xc) + pred_cam\n\n return pred_pose, pred_shape, pred_cam\n\n def forward(self, x, seqlen, J_regressor=None,\n init_pose=None, init_shape=None, init_cam=None, n_iter=3, **kwargs):\n nt = x.shape[0]\n N = nt//seqlen\n\n pred_pose, pred_shape, pred_cam = self.iterative_regress(x, init_pose, init_shape, init_cam, n_iter=3)\n output_regress = self.get_output(pred_pose, pred_shape, pred_cam, J_regressor)\n\n return output_regress\n\n\n def get_output(self, pred_pose, pred_shape, pred_cam, J_regressor):\n output = {}\n nt = pred_pose.shape[0]\n pred_rotmat = rot6d_to_rotmat(pred_pose).reshape(nt, -1, 3, 3)\n \n pred_output = self.smpl(\n betas=pred_shape,\n body_pose=pred_rotmat[:, 1:],\n global_orient=pred_rotmat[:, 0].unsqueeze(1),\n pose2rot=False\n )\n pred_vertices = pred_output.vertices[:nt]\n pred_joints = pred_output.joints[:nt]\n if J_regressor is not None:\n J_regressor_batch = J_regressor[None, :].expand(pred_vertices.shape[0], -1, -1).to(pred_vertices.device)\n pred_joints = torch.matmul(J_regressor_batch, pred_vertices)\n pred_keypoints_2d = projection(pred_joints, pred_cam)\n pose = rotation_matrix_to_angle_axis(pred_rotmat.reshape(-1, 3, 3)).reshape(nt, -1)\n output['theta'] = torch.cat([pred_cam, pose, pred_shape], dim=1)\n output['verts'] = pred_vertices\n output['kp_2d'] = pred_keypoints_2d\n output['kp_3d'] = pred_joints\n output['rotmat'] = pred_rotmat\n return output\n\n\ndef projection(pred_joints, pred_camera):\n pred_cam_t = torch.stack([pred_camera[:, 1],\n pred_camera[:, 2],\n 2 * 5000. / (224. * pred_camera[:, 0] + 1e-9)], dim=-1)\n batch_size = pred_joints.shape[0]\n camera_center = torch.zeros(batch_size, 2)\n pred_keypoints_2d = perspective_projection(pred_joints,\n rotation=torch.eye(3).unsqueeze(0).expand(batch_size, -1, -1).to(pred_joints.device),\n translation=pred_cam_t,\n focal_length=5000.,\n camera_center=camera_center)\n # Normalize keypoints to [-1,1]\n pred_keypoints_2d = pred_keypoints_2d / (224. / 2.)\n return pred_keypoints_2d\n\n\ndef perspective_projection(points, rotation, translation,\n focal_length, camera_center):\n \"\"\"\n This function computes the perspective projection of a set of points.\n Input:\n points (bs, N, 3): 3D points\n rotation (bs, 3, 3): Camera rotation\n translation (bs, 3): Camera translation\n focal_length (bs,) or scalar: Focal length\n camera_center (bs, 2): Camera center\n \"\"\"\n batch_size = points.shape[0]\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:,0,0] = focal_length\n K[:,1,1] = focal_length\n K[:,2,2] = 1.\n K[:,:-1, -1] = camera_center\n\n # Transform points\n points = torch.einsum('bij,bkj->bki', rotation, points)\n points = points + translation.unsqueeze(1)\n\n # Apply perspective distortion\n projected_points = points / points[:,:,-1].unsqueeze(-1)\n\n # Apply camera intrinsics\n projected_points = torch.einsum('bij,bkj->bki', K, projected_points)\n\n return projected_points[:, :, :-1]\n"
] | [
[
"torch.nn.Dropout",
"torch.zeros",
"torch.cat",
"torch.einsum",
"torch.eye",
"torch.from_numpy",
"torch.nn.Linear",
"torch.matmul",
"torch.nn.init.xavier_uniform_",
"torch.stack",
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
soma2000-lang/colour | [
"bb7ee23ac65e09613af78bd18dd98dffb1a2904a",
"bb7ee23ac65e09613af78bd18dd98dffb1a2904a",
"bb7ee23ac65e09613af78bd18dd98dffb1a2904a",
"bb7ee23ac65e09613af78bd18dd98dffb1a2904a",
"bb7ee23ac65e09613af78bd18dd98dffb1a2904a"
] | [
"colour/models/rgb/transfer_functions/canon_log.py",
"colour/models/rgb/datasets/p3_d65.py",
"colour/examples/appearance/examples_llab.py",
"colour/quality/tests/test_tm3018.py",
"colour/models/rgb/datasets/sharp.py"
] | [
"\"\"\"\nCanon Log Encodings\n===================\n\nDefines the *Canon Log* encodings:\n\n- :func:`colour.models.log_encoding_CanonLog`\n- :func:`colour.models.log_decoding_CanonLog`\n- :func:`colour.models.log_encoding_CanonLog2`\n- :func:`colour.models.log_decoding_CanonLog2`\n- :func:`colour.models.log_encoding_CanonLog3`\n- :func:`colour.models.log_decoding_CanonLog3`\n\nNotes\n-----\n- :cite:`Canona` is available as a *Drivers & Downloads* *Software* for\n Windows 10 (x64) *Operating System*, a copy of the archive is hosted at\n this url: https://drive.google.com/open?id=0B_IQZQdc4Vy8ZGYyY29pMEVwZU0\n\nReferences\n----------\n- :cite:`Canona` : Canon. (2016). EOS C300 Mark II - EOS C300 Mark II Input\n Transform Version 2.0 (for Cinema Gamut / BT.2020). Retrieved August 23,\n 2016, from\n https://www.usa.canon.com/internet/portal/us/home/support/details/cameras/cinema-eos/eos-c300-mark-ii\n- :cite:`Thorpe2012a` : Thorpe, L. (2012). CANON-LOG TRANSFER CHARACTERISTIC.\n Retrieved September 25, 2014, from\n http://downloads.canon.com/CDLC/Canon-Log_Transfer_Characteristic_6-20-2012.pdf\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\n\nfrom colour.hints import (\n Boolean,\n FloatingOrArrayLike,\n FloatingOrNDArray,\n Integer,\n)\nfrom colour.models.rgb.transfer_functions import full_to_legal, legal_to_full\nfrom colour.utilities import (\n as_float,\n domain_range_scale,\n from_range_1,\n to_domain_1,\n)\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright (C) 2013-2022 - Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"log_encoding_CanonLog\",\n \"log_decoding_CanonLog\",\n \"log_encoding_CanonLog2\",\n \"log_decoding_CanonLog2\",\n \"log_encoding_CanonLog3\",\n \"log_decoding_CanonLog3\",\n]\n\n\ndef log_encoding_CanonLog(\n x: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n out_normalised_code_value: Boolean = True,\n in_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log* log encoding curve / opto-electronic transfer\n function.\n\n Parameters\n ----------\n x\n Linear data :math:`x`.\n bit_depth\n Bit depth used for conversion.\n out_normalised_code_value\n Whether the *Canon Log* non-linear data is encoded as normalised code\n values.\n in_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n *Canon Log* non-linear data.\n\n References\n ----------\n :cite:`Thorpe2012a`\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n Examples\n --------\n >>> log_encoding_CanonLog(0.18) * 100 # doctest: +ELLIPSIS\n 34.3389651...\n\n The values of *Table 2 Canon-Log Code Values* table in :cite:`Thorpe2012a`\n are obtained as follows:\n\n >>> x = np.array([0, 2, 18, 90, 720]) / 100\n >>> np.around(log_encoding_CanonLog(x) * (2 ** 10 - 1)).astype(np.int)\n array([ 128, 169, 351, 614, 1016])\n >>> np.around(log_encoding_CanonLog(x, 10, False) * 100, 1)\n array([ 7.3, 12. , 32.8, 62.7, 108.7])\n \"\"\"\n\n x = to_domain_1(x)\n\n if in_reflection:\n x = x / 0.9\n\n with domain_range_scale(\"ignore\"):\n clog = np.where(\n x < log_decoding_CanonLog(0.0730597, bit_depth, False),\n -(0.529136 * (np.log10(-x * 10.1596 + 1)) - 0.0730597),\n 0.529136 * np.log10(10.1596 * x + 1) + 0.0730597,\n )\n\n clog_cv = (\n full_to_legal(clog, bit_depth) if out_normalised_code_value else clog\n )\n\n return as_float(from_range_1(clog_cv))\n\n\ndef log_decoding_CanonLog(\n clog: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n in_normalised_code_value: Boolean = True,\n out_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log* log decoding curve / electro-optical transfer\n function.\n\n Parameters\n ----------\n clog\n *Canon Log* non-linear data.\n bit_depth\n Bit depth used for conversion.\n in_normalised_code_value\n Whether the *Canon Log* non-linear data is encoded with normalised\n code values.\n out_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n Linear data :math:`x`.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Thorpe2012a`\n\n Examples\n --------\n >>> log_decoding_CanonLog(34.338965172606912 / 100) # doctest: +ELLIPSIS\n 0.17999999...\n \"\"\"\n\n clog = to_domain_1(clog)\n\n clog = legal_to_full(clog, bit_depth) if in_normalised_code_value else clog\n\n x = np.where(\n clog < 0.0730597,\n -(10 ** ((0.0730597 - clog) / 0.529136) - 1) / 10.1596,\n (10 ** ((clog - 0.0730597) / 0.529136) - 1) / 10.1596,\n )\n\n if out_reflection:\n x = x * 0.9\n\n return as_float(from_range_1(x))\n\n\ndef log_encoding_CanonLog2(\n x: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n out_normalised_code_value: Boolean = True,\n in_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log 2* log encoding curve / opto-electronic transfer\n function.\n\n Parameters\n ----------\n x\n Linear data :math:`x`.\n bit_depth\n Bit depth used for conversion.\n out_normalised_code_value\n Whether the *Canon Log 2* non-linear data is encoded as normalised\n code values.\n in_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n *Canon Log 2* non-linear data.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog2`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Canona`\n\n Examples\n --------\n >>> log_encoding_CanonLog2(0.18) * 100 # doctest: +ELLIPSIS\n 39.8254694...\n \"\"\"\n\n x = to_domain_1(x)\n\n if in_reflection:\n x = x / 0.9\n\n with domain_range_scale(\"ignore\"):\n clog2 = np.where(\n x < log_decoding_CanonLog2(0.035388128, bit_depth, False),\n -(0.281863093 * (np.log10(-x * 87.09937546 + 1)) - 0.035388128),\n 0.281863093 * np.log10(x * 87.09937546 + 1) + 0.035388128,\n )\n\n clog2_cv = (\n full_to_legal(clog2, bit_depth) if out_normalised_code_value else clog2\n )\n\n return as_float(from_range_1(clog2_cv))\n\n\ndef log_decoding_CanonLog2(\n clog2: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n in_normalised_code_value: Boolean = True,\n out_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log 2* log decoding curve / electro-optical transfer\n function.\n\n Parameters\n ----------\n clog2\n *Canon Log 2* non-linear data.\n bit_depth\n Bit depth used for conversion.\n in_normalised_code_value\n Whether the *Canon Log 2* non-linear data is encoded with normalised\n code values.\n out_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n Linear data :math:`x`.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog2`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Canona`\n\n Examples\n --------\n >>> log_decoding_CanonLog2(39.825469498316735 / 100) # doctest: +ELLIPSIS\n 0.1799999...\n \"\"\"\n\n clog2 = to_domain_1(clog2)\n\n clog2 = (\n legal_to_full(clog2, bit_depth) if in_normalised_code_value else clog2\n )\n\n x = np.where(\n clog2 < 0.035388128,\n -(10 ** ((0.035388128 - clog2) / 0.281863093) - 1) / 87.09937546,\n (10 ** ((clog2 - 0.035388128) / 0.281863093) - 1) / 87.09937546,\n )\n\n if out_reflection:\n x = x * 0.9\n\n return as_float(from_range_1(x))\n\n\ndef log_encoding_CanonLog3(\n x: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n out_normalised_code_value: Boolean = True,\n in_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log 3* log encoding curve / opto-electronic transfer\n function.\n\n Parameters\n ----------\n x\n Linear data :math:`x`.\n bit_depth\n Bit depth used for conversion.\n out_normalised_code_value\n Whether the *Canon Log 3* non-linear data is encoded as normalised code\n values.\n in_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n *Canon Log 3* non-linear data.\n\n Notes\n -----\n - Introspection of the grafting points by Shaw, N. (2018) shows that the\n *Canon Log 3* IDT was likely derived from its encoding curve as the\n later is grafted at *+/-0.014*::\n\n >>> clog3 = 0.04076162\n >>> (clog3 - 0.073059361) / 2.3069815\n -0.014000000000000002\n >>> clog3 = 0.105357102\n >>> (clog3 - 0.073059361) / 2.3069815\n 0.013999999999999997\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog3`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Canona`\n\n Examples\n --------\n >>> log_encoding_CanonLog3(0.18) * 100 # doctest: +ELLIPSIS\n 34.3389369...\n \"\"\"\n\n x = to_domain_1(x)\n\n if in_reflection:\n x = x / 0.9\n\n with domain_range_scale(\"ignore\"):\n clog3 = np.select(\n (\n x\n < log_decoding_CanonLog3(0.04076162, bit_depth, False, False),\n x\n <= log_decoding_CanonLog3(\n 0.105357102, bit_depth, False, False\n ),\n x\n > log_decoding_CanonLog3(0.105357102, bit_depth, False, False),\n ),\n (\n -0.42889912 * np.log10(-x * 14.98325 + 1) + 0.07623209,\n 2.3069815 * x + 0.073059361,\n 0.42889912 * np.log10(x * 14.98325 + 1) + 0.069886632,\n ),\n )\n\n clog3_cv = (\n full_to_legal(clog3, bit_depth) if out_normalised_code_value else clog3\n )\n\n return as_float(from_range_1(clog3_cv))\n\n\ndef log_decoding_CanonLog3(\n clog3: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n in_normalised_code_value: Boolean = True,\n out_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log 3* log decoding curve / electro-optical transfer\n function.\n\n Parameters\n ----------\n clog3\n *Canon Log 3* non-linear data.\n bit_depth\n Bit depth used for conversion.\n in_normalised_code_value\n Whether the *Canon Log 3* non-linear data is encoded with normalised\n code values.\n out_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n Linear data :math:`x`.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog3`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Canona`\n\n Examples\n --------\n >>> log_decoding_CanonLog3(34.338936938868677 / 100) # doctest: +ELLIPSIS\n 0.1800000...\n \"\"\"\n\n clog3 = to_domain_1(clog3)\n\n clog3 = (\n legal_to_full(clog3, bit_depth) if in_normalised_code_value else clog3\n )\n\n x = np.select(\n (clog3 < 0.04076162, clog3 <= 0.105357102, clog3 > 0.105357102),\n (\n -(10 ** ((0.07623209 - clog3) / 0.42889912) - 1) / 14.98325,\n (clog3 - 0.073059361) / 2.3069815,\n (10 ** ((clog3 - 0.069886632) / 0.42889912) - 1) / 14.98325,\n ),\n )\n\n if out_reflection:\n x = x * 0.9\n\n return as_float(from_range_1(x))\n",
"\"\"\"\nP3-D65 Colourspace\n==================\n\nDefines the *P3-D65* colourspace:\n\n- :attr:`colour.models.RGB_COLOURSPACE_P3_D65`.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\nfrom functools import partial\n\nfrom colour.colorimetry import CCS_ILLUMINANTS\nfrom colour.hints import NDArray\nfrom colour.models.rgb import (\n RGB_Colourspace,\n gamma_function,\n normalised_primary_matrix,\n)\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright (C) 2013-2022 - Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"PRIMARIES_P3_D65\",\n \"WHITEPOINT_NAME_P3_D65\",\n \"CCS_WHITEPOINT_P3_D65\",\n \"MATRIX_P3_D65_TO_XYZ\",\n \"MATRIX_XYZ_TO_P3_D65\",\n \"RGB_COLOURSPACE_P3_D65\",\n]\n\nPRIMARIES_P3_D65: NDArray = np.array(\n [\n [0.6800, 0.3200],\n [0.2650, 0.6900],\n [0.1500, 0.0600],\n ]\n)\n\"\"\"\n*P3-D65* colourspace primaries.\n\"\"\"\n\nWHITEPOINT_NAME_P3_D65: str = \"D65\"\n\"\"\"\n*P3-D65* colourspace whitepoint name.\n\"\"\"\n\nCCS_WHITEPOINT_P3_D65: NDArray = CCS_ILLUMINANTS[\n \"CIE 1931 2 Degree Standard Observer\"\n][WHITEPOINT_NAME_P3_D65]\n\"\"\"\n*P3-D65* colourspace whitepoint chromaticity coordinates.\n\"\"\"\n\nMATRIX_P3_D65_TO_XYZ: NDArray = normalised_primary_matrix(\n PRIMARIES_P3_D65, CCS_WHITEPOINT_P3_D65\n)\n\"\"\"\n*P3-D65* colourspace to *CIE XYZ* tristimulus values matrix.\n\"\"\"\n\nMATRIX_XYZ_TO_P3_D65: NDArray = np.linalg.inv(MATRIX_P3_D65_TO_XYZ)\n\"\"\"\n*CIE XYZ* tristimulus values to *P3-D65* colourspace matrix.\n\"\"\"\n\nRGB_COLOURSPACE_P3_D65: RGB_Colourspace = RGB_Colourspace(\n \"P3-D65\",\n PRIMARIES_P3_D65,\n CCS_WHITEPOINT_P3_D65,\n WHITEPOINT_NAME_P3_D65,\n MATRIX_P3_D65_TO_XYZ,\n MATRIX_XYZ_TO_P3_D65,\n partial(gamma_function, exponent=1 / 2.6),\n partial(gamma_function, exponent=2.6),\n)\nRGB_COLOURSPACE_P3_D65.__doc__ = \"\"\"\n*P3-D65* colourspace.\n\"\"\"\n",
"\"\"\"\nShowcases *LLAB(l:c)* colour appearance model computations.\n\"\"\"\n\nimport numpy as np\n\nimport colour\nfrom colour.appearance.llab import CAM_ReferenceSpecification_LLAB\nfrom colour.utilities import message_box\n\nmessage_box('\"LLAB(l:c)\" Colour Appearance Model Computations')\n\nXYZ = np.array([19.01, 20.00, 21.78])\nXYZ_0 = np.array([95.05, 100.00, 108.88])\nY_b = 20.0\nL = 318.31\nsurround = colour.VIEWING_CONDITIONS_LLAB[\"ref_average_4_minus\"]\nmessage_box(\n f'Converting to the \"LLAB(l:c)\" colour appearance model specification '\n f\"using given parameters:\\n\\n\"\n f\"\\tXYZ: {XYZ}\\n\"\n f\"\\tXYZ_0: {XYZ_0}\\n\"\n f\"\\tY_b: {Y_b}\\n\"\n f\"\\tL: {L}\\n\"\n f\"\\tsurround: {surround}\"\n)\nspecification = colour.XYZ_to_LLAB(XYZ, XYZ_0, Y_b, L, surround)\nprint(specification)\n\nprint(\"\\n\")\n\nmessage_box(\n 'Broadcasting the current output \"LLAB(l:c)\" colour appearance '\n \"model specification to the reference specification.\\n\"\n \"The intent of this reference specification is to provide names \"\n 'as closest as possible to the \"Mark D. Fairchild\" reference.\\n'\n \"The current output specification is meant to be consistent with \"\n \"the other colour appearance model specification by using same \"\n \"argument names for consistency wherever possible.\"\n)\n\nprint(CAM_ReferenceSpecification_LLAB(*specification.values))\n",
"\"\"\"\nDefines the unit tests for the :mod:`colour.quality.tm3018` module.\n\nNotes\n-----\n- Reference data was created using the official Excel spreadsheet, published\n by the IES at this URL:\n http://media.ies.org/docs/errata/TM-30-18_tools_etc.zip.\n\"\"\"\n\nimport numpy as np\nimport unittest\n\nfrom colour.colorimetry import SDS_ILLUMINANTS\nfrom colour.quality.tm3018 import (\n averages_area,\n colour_fidelity_index_ANSIIESTM3018,\n)\nfrom colour.utilities import as_float_array\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright (C) 2013-2022 - Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"TestColourFidelityIndexANSIIESTM3018\",\n \"TestAveragesArea\",\n]\n\n\nclass TestColourFidelityIndexANSIIESTM3018(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.quality.tm3018.colour_fidelity_index_ANSIIESTM3018`\n definition unit tests methods.\n \"\"\"\n\n def test_colour_fidelity_index_ANSIIESTM3018(self):\n \"\"\"\n Tests :func:`colour.quality.tm3018.colour_fidelity_index_ANSIIESTM3018`\n definition.\n \"\"\"\n\n specification = colour_fidelity_index_ANSIIESTM3018(\n SDS_ILLUMINANTS[\"FL2\"], additional_data=True\n )\n\n np.testing.assert_almost_equal(specification.R_f, 70, 0)\n np.testing.assert_almost_equal(specification.R_g, 86, 0)\n np.testing.assert_almost_equal(specification.CCT, 4225, 0)\n np.testing.assert_almost_equal(specification.D_uv, 0.0019, 4)\n\n np.testing.assert_almost_equal(\n specification.R_s,\n [\n 79,\n 59,\n 67,\n 66,\n 36,\n 66,\n 40,\n 35,\n 95,\n 54,\n 48,\n 45,\n 64,\n 87,\n 72,\n 49,\n 56,\n 69,\n 57,\n 44,\n 47,\n 47,\n 80,\n 63,\n 48,\n 59,\n 82,\n 85,\n 62,\n 70,\n 68,\n 62,\n 74,\n 74,\n 86,\n 88,\n 80,\n 76,\n 97,\n 93,\n 91,\n 89,\n 83,\n 99,\n 83,\n 81,\n 87,\n 66,\n 80,\n 81,\n 81,\n 76,\n 69,\n 77,\n 77,\n 66,\n 66,\n 67,\n 79,\n 90,\n 78,\n 87,\n 77,\n 60,\n 61,\n 58,\n 56,\n 62,\n 73,\n 58,\n 64,\n 84,\n 53,\n 96,\n 67,\n 57,\n 76,\n 63,\n 82,\n 85,\n 74,\n 94,\n 91,\n 86,\n 81,\n 64,\n 74,\n 69,\n 66,\n 68,\n 93,\n 51,\n 70,\n 41,\n 62,\n 70,\n 80,\n 67,\n 45,\n ],\n 0,\n )\n\n np.testing.assert_almost_equal(\n specification.R_fs,\n [60, 61, 53, 68, 80, 88, 77, 73, 76, 62, 70, 77, 81, 71, 64, 65],\n 0,\n )\n np.testing.assert_almost_equal(\n specification.R_cs,\n [-25, -18, -9, 5, 11, 4, -8, -15, -17, -15, -4, 5, 11, 7, -6, -16],\n 0,\n )\n np.testing.assert_almost_equal(\n specification.R_hs,\n [\n -0.02,\n 0.14,\n 0.24,\n 0.20,\n 0.09,\n -0.07,\n -0.12,\n -0.08,\n 0.01,\n 0.17,\n 0.19,\n 0.11,\n -0.08,\n -0.15,\n -0.26,\n -0.17,\n ],\n 2,\n )\n\n\nclass TestAveragesArea(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.quality.tm3018.averages_area` definition unit tests\n methods.\n \"\"\"\n\n def test_averages_area(self):\n \"\"\"\n Tests :func:`colour.quality.tm3018.averages_area` definition.\n \"\"\"\n\n # Simple 3 * sqrt(2) by sqrt(2) rectangle.\n rectangle = as_float_array([[2, 1], [1, 2], [-2, -1], [-1, -2]])\n np.allclose(averages_area(rectangle), 6)\n\n # Concave polygon.\n poly = np.array([[1.0, -1], [1, 1], [3, 1], [3, 3], [-1, 3], [-1, -1]])\n np.allclose(averages_area(poly), 12)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nSharp RGB Colourspace\n=====================\n\nDefines the *Sharp RGB* colourspace:\n\n- :attr:`colour.models.RGB_COLOURSPACE_SHARP_RGB`\n\nReferences\n----------\n- :cite:`Susstrunk2000` : Susstrunk, S. E., Holm, J. M., & Finlayson, G. D.\n (2000). Chromatic adaptation performance of different RGB sensors. In R.\n Eschbach & G. G. Marcu (Eds.), Photonics West 2001 - Electronic Imaging\n (Vol. 4300, Issue January, pp. 172-183). doi:10.1117/12.410788\n- :cite:`Ward2002` : Ward, G., & Eydelberg-Vileshin, E. (2002). Picture\n Perfect RGB Rendering Using Spectral Prefiltering and Sharp Color\n Primaries. Eurographics Workshop on Rendering, 117-124.\n doi:10.2312/EGWR/EGWR02/117-124\n- :cite:`Ward2016` : Borer, T. (2017). Private Discussion with Mansencal, T.\n and Shaw, N.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\n\nfrom colour.colorimetry import CCS_ILLUMINANTS\nfrom colour.hints import NDArray\nfrom colour.models.rgb import (\n RGB_Colourspace,\n linear_function,\n normalised_primary_matrix,\n)\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright (C) 2013-2022 - Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"PRIMARIES_SHARP_RGB\",\n \"WHITEPOINT_NAME_SHARP_RGB\",\n \"CCS_WHITEPOINT_SHARP_RGB\",\n \"MATRIX_SHARP_RGB_TO_XYZ\",\n \"MATRIX_XYZ_TO_SHARP_RGB\",\n \"RGB_COLOURSPACE_SHARP_RGB\",\n]\n\nPRIMARIES_SHARP_RGB: NDArray = np.array(\n [\n [0.6898, 0.3206],\n [0.0736, 0.9003],\n [0.1166, 0.0374],\n ]\n)\n\"\"\"\n*Sharp RGB* colourspace primaries.\n\nNotes\n-----\nThe primaries were originally derived from the :math:`M_{Sharp}` matrix as\ngiven in *Ward and Eydelberg-Vileshin (2002)*:\n\n M_Sharp = np.array(\n [[1.2694, -0.0988, -0.1706],\n [-0.8364, 1.8006, 0.0357],\n [0.0297, -0.0315, 1.0018]])\n\n P, W = (\n array([[ 0.68976058, 0.32060751],\n [ 0.07358274, 0.90029055],\n [ 0.1166078 , 0.0373923 ]]),\n array([ 0.33332778, 0.33334544]))\n\nPrivate discussion with Ward (2016) confirmed he used the following primaries\nand whitepoint:\n\n [0.6898, 0.3206, 0.0736, 0.9003, 0.1166, 0.0374, 1 / 3, 1 / 3]\n\"\"\"\n\nWHITEPOINT_NAME_SHARP_RGB: str = \"E\"\n\"\"\"\n*Sharp RGB* colourspace whitepoint name.\n\"\"\"\n\nCCS_WHITEPOINT_SHARP_RGB: NDArray = CCS_ILLUMINANTS[\n \"CIE 1931 2 Degree Standard Observer\"\n][WHITEPOINT_NAME_SHARP_RGB]\n\"\"\"\n*Sharp RGB* colourspace whitepoint chromaticity coordinates.\n\"\"\"\n\nMATRIX_SHARP_RGB_TO_XYZ: NDArray = normalised_primary_matrix(\n PRIMARIES_SHARP_RGB, CCS_WHITEPOINT_SHARP_RGB\n)\n\"\"\"\n*Sharp RGB* colourspace to *CIE XYZ* tristimulus values matrix.\n\"\"\"\n\nMATRIX_XYZ_TO_SHARP_RGB: NDArray = np.linalg.inv(MATRIX_SHARP_RGB_TO_XYZ)\n\"\"\"\n*CIE XYZ* tristimulus values to *Sharp RGB* colourspace matrix.\n\"\"\"\n\nRGB_COLOURSPACE_SHARP_RGB: RGB_Colourspace = RGB_Colourspace(\n \"Sharp RGB\",\n PRIMARIES_SHARP_RGB,\n CCS_WHITEPOINT_SHARP_RGB,\n WHITEPOINT_NAME_SHARP_RGB,\n MATRIX_SHARP_RGB_TO_XYZ,\n MATRIX_XYZ_TO_SHARP_RGB,\n linear_function,\n linear_function,\n)\nRGB_COLOURSPACE_SHARP_RGB.__doc__ = \"\"\"\n*Sharp RGB* colourspace.\n\nReferences\n----------\n:cite:`Susstrunk2000`, :cite:`Ward2002`, :cite:`Ward2016`\n\"\"\"\n"
] | [
[
"numpy.log10",
"numpy.where",
"numpy.select"
],
[
"numpy.linalg.inv",
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.testing.assert_almost_equal",
"numpy.array"
],
[
"numpy.linalg.inv",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chaitanyamalaviya/NeuralFactorGraph | [
"6cd664b7edc43d56c6f1165baa7e7625eb0f7cd8"
] | [
"utils.py"
] | [
"from __future__ import division, print_function\nfrom conllu.parser import parse, parse_tree\nfrom tags import Tags, Tag, Label\n\nimport os\nimport re\nimport math\nimport numpy as np\nimport itertools\nimport pdb\nimport pickle\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nnp.set_printoptions(threshold=np.nan)\n\n\nFROZEN_TAG = \"__frozen__\"\n\ndef freeze_dict(obj):\n if isinstance(obj, dict):\n dict_items = list(obj.items())\n dict_items.append((FROZEN_TAG, True))\n return tuple([(k, freeze_dict(v)) for k, v in dict_items])\n return obj\n\ndef unfreeze_dict(obj):\n if isinstance(obj, tuple):\n if (FROZEN_TAG, True) in obj:\n out = dict((k, unfreeze_dict(v)) for k, v in obj)\n del out[FROZEN_TAG]\n return out\n return obj\n\n\ndef get_lang_code_dicts():\n \"\"\"\n Returns lang_to_code, code_to_lang dictionaries\n\n \"\"\"\n lang_to_code = {}\n code_to_lang = {}\n bad_chars = \",''\"\n rgx = re.compile('[%s]' % bad_chars)\n\n with open(\"data/lang_codes.txt\") as f:\n data = f.read()\n lines = data.split(\"\\n\")\n split_line = [line.split() for line in lines]\n for line in split_line[:-2]:\n lang = rgx.sub('', line[0])\n code = rgx.sub('', line[2]) \n lang_to_code[lang] = code\n code_to_lang = {v: k for k, v in lang_to_code.iteritems()}\n return lang_to_code, code_to_lang\n\n\ndef read_conll(treebank_path, langs, code_to_lang, train_or_dev, tgt_size=None, test=False):\n \n \"\"\"\n Reads conll formatted file\n\n langs: list of languages\n train: read training data\n returns: dict with data for each language\n as list of tuples of sentences and morph-tags\n \"\"\"\n\n annot_sents = {}\n unique = []\n for lang in langs:\n\n train = train_or_dev if not test else \"test\"\n\n if not test:\n for file in os.listdir(treebank_path + \"UD_\" + code_to_lang[lang]):\n if file.endswith(\"train.conllu\"):\n filepath = os.path.join(treebank_path + \"UD_\" + code_to_lang[lang], file)\n break\n else:\n for file in os.listdir(treebank_path + \"UD_\" + code_to_lang[lang]):\n if file.endswith(\"dev.conllu\"):\n filepath = os.path.join(treebank_path+ \"UD_\" + code_to_lang[lang], file)\n break\n\n with open(filepath) as f:\n data = f.readlines()[:-1]\n data = [line for line in data if line[0]!='#']\n split_data = \" \".join(data).split(\"\\n \\n\")\n ud = [parse(sent)[0] for sent in split_data]\n\n all_text = []\n all_tags = []\n if langs[-1]==lang and tgt_size:\n tgt_size = min(tgt_size, len(ud))\n ud = ud[:tgt_size]\n for sent in ud:\n sent_text = []\n sent_tags = []\n for word in sent:\n word_tags = {}\n if word['feats']:\n word_tags = dict(word['feats'])\n if word['upostag']:\n if word_tags:\n word_tags.update({'POS':word['upostag']})\n else:\n word_tags = {'POS':word['upostag']}\n \n if word_tags:\n word_tags = freeze_dict(word_tags)\n if word_tags not in unique:\n unique.append(word_tags)\n\n sent_text.append(word['form'])\n sent_tags.append(freeze_dict(word_tags))\n\n all_text.append(sent_text)\n all_tags.append(sent_tags)\n\n annot_sents[lang] = [(w, m) for w, m in zip(all_text, all_tags)]\n\n return annot_sents, unique\n\n\ndef addNullLabels(annot_sents, langs, unique_tags):\n\n for lang in langs:\n i = 0\n for w, m in annot_sents[lang]:\n new_tags = []\n for tags in m:\n tag_dict = unfreeze_dict(tags)\n for tag in unique_tags:\n if tag.name not in tag_dict:\n tag_dict[tag.name] = \"NULL\"\n new_tags.append(freeze_dict(tag_dict))\n\n annot_sents[lang][i] = (w, new_tags)\n i += 1\n\n return annot_sents\n\n\ndef sortbylength(data, lang_ids, maxlen=500):\n \"\"\"\n :param data: List of tuples of source sentences and morph tags\n :param lang_ids: List of lang IDs for each sentence\n :param maxlen: Maximum sentence length permitted\n :return: Sorted data and sorted langIDs\n \"\"\"\n src = [elem[0] for elem in data]\n tgt = [elem[1] for elem in data]\n indexed_src = [(i,src[i]) for i in range(len(src))]\n sorted_indexed_src = sorted(indexed_src, key=lambda x: -len(x[1]))\n sorted_src = [item[1] for item in sorted_indexed_src if len(item[1])<maxlen]\n sort_order = [item[0] for item in sorted_indexed_src if len(item[1])<maxlen]\n sorted_tgt = [tgt[i] for i in sort_order]\n sorted_lang_ids = [lang_ids[i] for i in sort_order]\n sorted_data = [(src, tgt) for src, tgt in zip(sorted_src, sorted_tgt)]\n\n return sorted_data, sorted_lang_ids\n\n\ndef get_train_order(training_data, batch_size, startIdx=0):\n \"\"\"\n :param data: List of tuples of source sentences and morph tags\n :return: start idxs of batches\n \"\"\"\n\n lengths = [len(elem[0]) for elem in training_data]\n start_idxs = []\n end_idxs = []\n prev_length=-1\n batch_counter = 0\n\n for i, length in enumerate(lengths, start=startIdx):\n \n if length!=prev_length or batch_counter>batch_size:\n start_idxs.append(i)\n if prev_length!=-1:\n end_idxs.append(i-1)\n batch_counter = 1\n\n batch_counter += 1 \n prev_length = length\n\n end_idxs.append(startIdx + len(lengths)-1)\n\n return [(s,e) for s,e in zip(start_idxs, end_idxs)]\n\ndef find_unique_tags(train_data_tags, null_label=False):\n\n unique_tags = Tags()\n\n for tags in train_data_tags:\n for tag, label in unfreeze_dict(tags).items():\n if not unique_tags.tagExists(tag):\n unique_tags.addTag(tag)\n \n curTag = unique_tags.getTagbyName(tag)\n\n if not curTag.labelExists(label):\n curTag.addLabel(label)\n\n # Add null labels to unseen tags in each tag set\n if null_label:\n for tag in unique_tags:\n tag.addLabel(\"NULL\")\n\n return unique_tags\n\n\ndef plot_heatmap(uniqueTags, weights, kind):\n\n font = {'family' : 'normal',\n 'size' : 14,\n 'weight' : 'bold'}\n\n matplotlib.rc('font', **font)\n\n pairs = list(itertools.combinations(range(uniqueTags.size()), 2))\n\n # weights is a ParameterList\n for k, weight in enumerate(weights):\n if kind==\"pair\":\n i, j = pairs[k]\n tag1 = uniqueTags.getTagbyIdx(i)\n tag2 = uniqueTags.getTagbyIdx(j)\n tag1_labels = [label.name for label in tag1.labels]\n tag2_labels = [label.name for label in tag2.labels]\n \n plt.figure(figsize=(20, 18), dpi=80)\n plt.xticks(range(0, len(tag2_labels)), tag2_labels)\n plt.yticks(range(0, len(tag1_labels)), tag1_labels)\n plt.tick_params(labelsize=25)\n plt.xlabel(tag2.name, fontsize=40)\n plt.ylabel(tag1.name, fontsize=50)\n plt.imshow(weight.data.cpu().numpy(), cmap='Reds', interpolation='nearest')\n plt.savefig(\"figures/\" + tag1.name + \"_\" + tag2.name + \".png\", bbox_inches='tight')\n plt.close()\n \n elif kind==\"trans\":\n tag = uniqueTags.getTagbyIdx(k)\n tag_labels = [label.name for label in tag.labels]\n\n plt.figure(figsize=(20, 18), dpi=80)\n plt.xticks(range(0, len(tag_labels)), tag_labels, rotation=45)\n plt.yticks(range(0, len(tag_labels)), tag_labels)\n plt.tick_params(labelsize=40)\n plt.xlabel(tag.name, fontsize=50)\n plt.ylabel(tag.name, fontsize=50)\n plt.imshow(weight.data.cpu().numpy(), cmap='Greys', interpolation='nearest')\n plt.savefig(\"figures/\" + tag.name + \"_\" + tag.name + \".png\", bbox_inches='tight')\n plt.close()\n\n\ndef get_var(x, gpu=False, volatile=False):\n x = Variable(x, volatile=volatile)\n if gpu:\n x = x.cuda()\n return x\n\ndef prepare_sequence(seq, to_ix, gpu=False):\n if isinstance(to_ix, dict):\n idxs = [to_ix[w] if w in to_ix else to_ix[\"UNK\"] for w in seq]\n elif isinstance(to_ix, list):\n idxs = [to_ix.index(w) if w in to_ix else to_ix.index(\"UNK\") for w in seq]\n tensor = torch.LongTensor(idxs)\n return get_var(tensor, gpu)\n\ndef to_scalar(var):\n # returns a python float\n return var.view(-1).data.tolist()[0]\n\ndef argmax(vec):\n # return the argmax as a python int\n _, idx = torch.max(vec, 1)\n return to_scalar(idx)\n\ndef logSumExp(a, b):\n maxi = np.maximum(a, b)\n aexp = a - maxi\n bexp = b - maxi\n sumOfExp = np.exp(aexp) + np.exp(bexp)\n return maxi + np.log(sumOfExp)\n\ndef logSumExpTensor(vec):\n # vec -> 16, tag_size\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + \\\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 1))\n\ndef logSumExpTensors(a, b):\n\n maxi = torch.max(a, b)\n aexp = a - maxi\n bexp = b - maxi\n sumOfExp = torch.exp(aexp) + torch.exp(bexp)\n return maxi + torch.log(sumOfExp)\n\ndef logDot(a, b, redAxis=None):\n\n if redAxis==1:\n b = b.transpose()\n\n max_a = np.amax(a)\n max_b = np.amax(b)\n\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n # else:\n # np.log(C + 1e-300, out=C)\n\n C += max_a + max_b\n\n return C.transpose() if redAxis==1 else C\n\n\ndef logMax(a, b, redAxis=None):\n\n if redAxis==1:\n b = b.transpose()\n\n max_a = np.amax(a)\n max_b = np.amax(b)\n\n C = np.max(np.exp(a[:, :, None]-max_a) * np.exp(b[None, :, :]-max_b), axis=1)\n\n # if np.isfinite(C).all():\n np.log(C, out=C)\n # else:\n # np.log(C + 1e-300, out=C)\n\n C += max_a + max_b\n\n return C.transpose() if redAxis==1 else C\n\ndef logNormalize(a):\n\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose()- denom).transpose()\n\ndef logNormalizeTensor(a):\n\n denom = logSumExpTensor(a)\n if len(a.size())==2:\n denom = denom.view(-1, 1).expand(-1, a.size()[1])\n elif len(a.size())==3:\n denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.size()[2])\n\n return (a-denom)\n\ndef computeF1(hyps, golds, prefix, labels_to_ix=None, baseline=False, write_results=False):\n \"\"\"\n hyps: List of dicts for predicted morphological tags\n golds: List of dicts for gold morphological tags\n \"\"\"\n\n f1_precision_scores = {}\n f1_precision_total = {}\n f1_recall_scores = {}\n f1_recall_total = {}\n f1_average = 0.0\n \n if baseline:\n hyps = [unfreeze_dict(h) for h in hyps]\n golds = [unfreeze_dict(t) for t in golds]\n\n # calculate precision\n for i, word_tags in enumerate(hyps, start=0):\n for k, v in word_tags.items():\n if v==\"NULL\":\n continue\n if k not in f1_precision_scores:\n f1_precision_scores[k] = 0\n f1_precision_total[k] = 0\n if k in golds[i]:\n if v==golds[i][k]:\n f1_precision_scores[k] += 1\n f1_precision_total[k] += 1\n \n f1_micro_precision = sum(f1_precision_scores.values())/sum(f1_precision_total.values())\n\n for k in f1_precision_scores.keys():\n f1_precision_scores[k] = f1_precision_scores[k]/f1_precision_total[k]\n \n # calculate recall\n for i, word_tags in enumerate(golds, start=0):\n for k, v in word_tags.items():\n if v==\"NULL\":\n continue\n if k not in f1_recall_scores:\n f1_recall_scores[k] = 0\n f1_recall_total[k] = 0\n if k in hyps[i]:\n if v==hyps[i][k]:\n f1_recall_scores[k] += 1\n f1_recall_total[k] += 1\n\n f1_micro_recall = sum(f1_recall_scores.values())/sum(f1_recall_total.values())\n\n f1_scores = {}\n for k in f1_recall_scores.keys():\n f1_recall_scores[k] = f1_recall_scores[k]/f1_recall_total[k]\n \n if f1_recall_scores[k]==0 or k not in f1_precision_scores:\n f1_scores[k] = 0\n else:\n f1_scores[k] = 2 * (f1_precision_scores[k] * f1_recall_scores[k]) / (f1_precision_scores[k] + f1_recall_scores[k])\n\n f1_average += f1_recall_total[k] * f1_scores[k]\n\n f1_average /= sum(f1_recall_total.values())\n f1_micro_score = 2 * (f1_micro_precision * f1_micro_recall) / (f1_micro_precision + f1_micro_recall)\n\n\n if write_results:\n print(\"Writing F1 scores...\")\n with open(prefix + '_results_f1.txt', 'ab') as file:\n file.write(pickle.dumps(f1_scores))\n file.write(\"\\nMacro-averaged F1 Score: \" + str(f1_average))\n file.write(\"\\nMicro-averaged F1 Score: \" + str(f1_micro_score))\n\n\n return f1_average, f1_micro_score\n\n\ndef getCorrectCount(golds, hyps):\n\n correct = 0\n\n for i, word_tags in enumerate(golds, start=0):\n allCorrect = True\n for k, v in word_tags.items():\n if k in hyps[i]:\n if v!=hyps[i][k]:\n allCorrect = False\n break\n\n if allCorrect==True:\n correct += 1\n\n return correct\n"
] | [
[
"numpy.amax",
"torch.max",
"numpy.exp",
"torch.autograd.Variable",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"torch.LongTensor",
"numpy.log",
"matplotlib.pyplot.savefig",
"torch.exp",
"torch.log",
"numpy.logaddexp.reduce",
"matplotlib.rc",
"matplotlib.pyplot.ylabel",
"numpy.maximum",
"matplotlib.use",
"numpy.set_printoptions",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tick_params"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bwingert/ProDy | [
"7377a20b4a4841ec59dccaa93fa58e2ee0fe89bc"
] | [
"prody/utilities/catchall.py"
] | [
"\"\"\"This module defines miscellaneous utility functions that is public to users.\"\"\"\n\nimport numpy as np\nfrom numpy import unique, linalg, diag, sqrt, dot\n\nfrom Bio.Phylo.BaseTree import Tree, Clade\n\nfrom prody import PY3K\nfrom .misctools import addEnds, interpY, index, isListLike\nfrom .checkers import checkCoords\nfrom .logger import LOGGER\n\n\n__all__ = ['calcTree', 'clusterMatrix', 'showLines', 'showMatrix', \n 'reorderMatrix', 'findSubgroups', 'getCoords', \n 'getLinkage', 'getTreeFromLinkage', 'clusterSubfamilies']\n\nclass LinkageError(Exception):\n pass\n\ndef clusterSubfamilies(similarities, n_clusters=0, linkage='all', method='tsne', cutoff=0.0, **kwargs):\n \"\"\"Perform clustering based on members of the *ensemble* projected into lower a reduced\n dimension.\n \n :arg similarities: a matrix of similarities for each structure in the ensemble, such as\n RMSD-matrix, dynamics-based spectral overlap, sequence similarity\n :type similarities: :class:`~numpy.ndarray`\n\n :arg n_clusters: the number of clusters to generate. If **0**, will scan a range of \n number of clusters and return the best one based on highest\n silhouette score. Default is **0**.\n :type n_clusters: int\n\n :arg linkage: if **all**, will test all linkage types (ward, average, complete,\n single). Otherwise will use only the one(s) given as input. Default is\n **all**.\n :type linkage: str, list, tuple, :class:`~numpy.ndarray`\n\n :arg method: if set to **spectral**, will generate a Kirchoff matrix based on the \n cutoff value given and use that as input as clustering instead of\n the values themselves. Default is **tsne**.\n :type method: str\n\n :arg cutoff: only used if *method* is set to **spectral**. This value is used for \n generating the Kirchoff matrix to use for generating clusters when\n doing spectral clustering. Default is **0.0**.\n :type cutoff: float\n \"\"\"\n\n # Import necessary packages\n try:\n from sklearn.manifold import SpectralEmbedding\n from sklearn.cluster import AgglomerativeClustering\n from sklearn.metrics import silhouette_score\n from sklearn.manifold import TSNE\n except ImportError:\n raise ImportError('need sklearn module')\n '''\n try: \n import Bio \n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n '''\n \n\n # Check inputs to make sure are of valid types/values\n if not isinstance(similarities, np.ndarray):\n raise TypeError('similarities should be a numpy ndarray')\n\n dim = similarities.shape\n if dim[0] != dim[1]:\n raise ValueError('similarities must be a square matrix')\n\n if n_clusters != 0:\n if not isinstance(n_clusters, int):\n raise TypeError('clusters must be an instance of int')\n if n_clusters < 1:\n raise ValueError('clusters must be a positive integer')\n elif n_clusters > similarities.shape[0]:\n raise ValueError('clusters can\\'t be longer than similarities matrix')\n nclusts = range(n_clusters,n_clusters+1)\n else:\n nclusts = range(2,10,1)\n\n if linkage != 'all':\n # Check if given input for linkage is list-like\n if isListLike(linkage):\n for val in linkage:\n if val.lower() not in ['ward', 'average', 'complete', 'single']:\n raise ValueError('linkage must be one or more of: \\'ward\\', \\'average\\', \\'complete\\', or \\'single\\'')\n if len(linkage) > 4:\n raise ValueError('linkage must be one or more of: \\'ward\\', \\'average\\', \\'complete\\', or \\'single\\'')\n linkages = [ x.lower() for x in linkage ]\n\n # If not, check if it is a valid string and method name\n else:\n if not isinstance(linkage, str):\n raise TypeError('linkage must be an instance of str or list-like of strs')\n\n if linkage not in ['ward', 'average', 'complete', 'single']:\n raise ValueError('linkage must one or more of: \\'ward\\', \\'average\\', \\'complete\\', or \\'single\\'')\n\n linkages = [linkage]\n else:\n linkages = ['ward', 'average', 'complete', 'single']\n\n if method != 'tsne':\n if not isinstance(method, str):\n raise TypeError('method must be an instance of str')\n if method != 'spectral':\n raise ValueError('method must be either \\'tsne\\' or \\'spectral\\'')\n\n if not isinstance(cutoff, float):\n raise TypeError('cutoff must be an instance of float')\n\n best_score = -1\n best_nclust = 0\n best_link = ''\n best_labels = []\n\n # Scan over range of clusters\n for x in nclusts:\n if method == 'tsne':\n embedding = TSNE(n_components=2)\n transform = embedding.fit_transform(similarities)\n\n else:\n kirchhoff = np.where(similarities > cutoff, 0, -1)\n embedding = SpectralEmbedding(n_components=2)\n transform = embedding.fit_transform(kirchhoff)\n\n for link in linkages:\n clustering = AgglomerativeClustering(linkage=link, n_clusters=x)\n clustering.fit(transform)\n\n silhouette_avg = silhouette_score(transform, clustering.labels_)\n \n if silhouette_avg > best_score:\n best_score = silhouette_avg\n best_nclust = x\n best_link = link\n best_labels = clustering.labels_\n\n\n return best_labels\n\ndef getCoords(data):\n\n try:\n data = (data._getCoords() if hasattr(data, '_getCoords') else\n data.getCoords())\n except AttributeError:\n try:\n checkCoords(data)\n except TypeError:\n raise TypeError('data must be a Numpy array or an object '\n 'with `getCoords` method')\n\n return data\n\ndef getLinkage(names, tree):\n \"\"\" Obtain the :func:`~scipy.cluster.hierarchy.linkage` matrix encoding \n ``tree``. \n \n :arg names: a list of names, the order determines the values in the \n linkage matrix\n :type names: list, :class:`~numpy.ndarray`\n\n :arg tree: tree to be converted\n :type tree: :class:`~Bio.Phylo.BaseTree.Tree`\n \"\"\"\n\n tree_terminals = tree.get_terminals()\n\n if len(tree_terminals) != len(names):\n raise ValueError('inconsistent number of terminals in tree and names')\n \n terminals = [None] * len(names)\n for clade in tree_terminals:\n i = index(names, clade.name)\n terminals[i] = clade\n\n n = len(terminals)\n nonterminals = [c for c in reversed(tree.get_nonterminals())]\n if len(nonterminals) != n-1:\n raise LinkageError('wrong number of terminal clades')\n\n Z = np.zeros((n-1, 4))\n\n root = tree.root\n\n def _indexOfClade(clade):\n if clade.is_terminal():\n i = index(terminals, clade)\n else:\n i = index(nonterminals, clade) + n\n return i\n\n def _height_of(clade):\n if clade.is_terminal():\n height = 0 \n else:\n height = max(_height_of(c) + c.branch_length for c in clade.clades)\n\n return height\n\n def _dfs(clade):\n if clade.is_terminal():\n return\n\n i = _indexOfClade(clade)\n clade_a = clade.clades[0]\n clade_b = clade.clades[1]\n\n a = _indexOfClade(clade_a)\n b = _indexOfClade(clade_b) \n\n l = min(a, b)\n r = max(a, b)\n\n Z[i-n, 0] = l\n Z[i-n, 1] = r\n Z[i-n, 2] = _height_of(clade) * 2.\n Z[i-n, 3] = clade.count_terminals()\n\n _dfs(clade_a)\n _dfs(clade_b)\n \n _dfs(root)\n\n return Z\n\ndef getTreeFromLinkage(names, linkage):\n \"\"\" Obtain the tree encoded by ``linkage``. \n \n :arg names: a list of names, the order should correspond to the values in \n linkage\n :type names: list, :class:`~numpy.ndarray`\n\n :arg linkage: linkage matrix\n :type linkage: :class:`~numpy.ndarray`\n \"\"\"\n try: \n import Bio \n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n\n from Bio.Phylo.BaseTree import Tree, Clade\n \n if not isinstance(linkage, np.ndarray):\n raise TypeError('linkage must be a numpy.ndarray instance')\n\n if linkage.ndim != 2:\n raise LinkageError('linkage must be a 2-dimensional matrix')\n\n if linkage.shape[1] != 4:\n raise LinkageError('linkage must have exactly 4 columns')\n\n n_terms = len(names)\n if linkage.shape[0] != n_terms-1:\n raise LinkageError('linkage must have exactly len(names)-1 rows')\n \n clades = []\n heights = []\n for name in names:\n clade = Clade(None, name)\n clades.append(clade)\n heights.append(0.)\n\n for link in linkage:\n l = int(link[0])\n r = int(link[1])\n height = link[2]\n\n left = clades[l]\n right = clades[r]\n\n lh = heights[l]\n rh = heights[r]\n\n left.branch_length = height - lh\n right.branch_length = height - rh\n\n clade = Clade(None, None)\n clade.clades.append(left)\n clade.clades.append(right)\n\n clades.append(clade)\n heights.append(height)\n\n return Tree(clade)\n\ndef calcTree(names, distance_matrix, method='upgma', linkage=False):\n \"\"\" Given a distance matrix, it creates an returns a tree structure.\n\n :arg names: a list of names\n :type names: list, :class:`~numpy.ndarray`\n\n :arg distance_matrix: a square matrix with length of ensemble. If numbers does not match *names*\n it will raise an error\n :type distance_matrix: :class:`~numpy.ndarray`\n\n :arg method: method used for constructing the tree. Acceptable options are ``\"upgma\"``, ``\"nj\"``, \n or methods supported by :func:`~scipy.cluster.hierarchy.linkage` such as ``\"single\"``, \n ``\"average\"``, ``\"ward\"``, etc. Default is ``\"upgma\"``\n :type method: str\n\n :arg linkage: whether the linkage matrix is returned. Note that NJ trees do not support linkage\n :type linkage: bool\n \"\"\"\n try: \n import Bio \n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n \n from .TreeConstruction import DistanceMatrix, DistanceTreeConstructor\n \n if len(names) != distance_matrix.shape[0] or len(names) != distance_matrix.shape[1]:\n raise ValueError(\"Mismatch between the sizes of matrix and names.\")\n \n method = method.lower().strip()\n\n if method in ['ward', 'single', 'average', 'weighted', 'centroid', 'median']:\n from scipy.cluster.hierarchy import linkage as hlinkage\n from scipy.spatial.distance import squareform\n \n Z = hlinkage(squareform(distance_matrix), method=method)\n tree = getTreeFromLinkage(names, Z)\n else:\n matrix = []\n k = 1\n Z = None\n for row in distance_matrix:\n matrix.append(list(row[:k]))\n k = k + 1\n \n if isinstance(names, np.ndarray):\n names = names.tolist()\n dm = DistanceMatrix(names, matrix)\n constructor = DistanceTreeConstructor()\n\n method = method.strip().lower()\n if method == 'nj':\n tree = constructor.nj(dm)\n elif method == 'upgma':\n tree = constructor.upgma(dm)\n if linkage:\n Z = getLinkage(names, tree)\n else:\n raise ValueError('Method can be only either \"nj\", \"upgma\" or '\n 'hierarchical clustering such as \"single\", \"average\", etc.')\n\n for node in tree.get_nonterminals():\n node.name = None\n\n if linkage:\n return tree, Z\n else:\n return tree\n\ndef writeTree(filename, tree, format_str='newick'):\n \"\"\" Write a tree to file using Biopython.\n\n :arg filename: name for output file\n :type filename: str\n\n :arg tree: a square matrix with length of ensemble. If numbers does not match *names*\n it will raise an error\n :type tree: :class:`~Bio.Phylo.BaseTree.Tree`\n\n :arg format_str: a string specifying the format for the tree\n :type format_str: str\n \"\"\"\n try: \n from Bio import Phylo\n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n\n if not isinstance(filename, str):\n raise TypeError('filename should be a string')\n\n if not isinstance(tree, Phylo.BaseTree.Tree):\n raise TypeError('tree should be a Biopython.Phylo Tree object')\n\n if not isinstance(format_str, str):\n raise TypeError('format_str should be a string')\n\n Phylo.write(tree, filename, format_str)\n\n\ndef clusterMatrix(distance_matrix=None, similarity_matrix=None, labels=None, return_linkage=None, **kwargs):\n \"\"\"\n Cluster a distance matrix using scipy.cluster.hierarchy and \n return the sorted matrix, indices used for sorting, sorted labels (if **labels** are passed), \n and linkage matrix (if **return_linkage** is **True**). Set ``similarity=True`` for clustering a similarity matrix\n \n :arg distance_matrix: an N-by-N matrix containing some measure of distance \n such as 1. - seqid_matrix, rmsds, or distances in PCA space\n :type similarity_matrix: :class:`~numpy.ndarray`\n\n :arg similarity_matrix: an N-by-N matrix containing some measure of similarity \n such as sequence identity, mode-mode overlap, or spectral overlap\n :type similarity_matrix: :class:`~numpy.ndarray`\n \n :arg labels: labels for each matrix row that can be returned sorted\n :type labels: list\n\n :arg no_plot: if **True**, don't plot the dendrogram.\n default is **True**\n :type no_plot: bool\n \n :arg reversed: if set to **True**, then the sorting indices will be reversed.\n :type reversed: bool\n\n Other arguments for :func:`~scipy.hierarchy.linkage` and :func:`~scipy.hierarchy.dendrogram`\n can also be provided and will be taken as **kwargs**.\n \"\"\"\n\n import scipy.cluster.hierarchy as sch\n from scipy import spatial\n if similarity_matrix is None and distance_matrix is None:\n raise ValueError('Please provide a distance matrix or a similarity matrix')\n \n orientation = kwargs.pop('orientiation', 'right')\n reversed = kwargs.pop('reversed', False)\n no_plot = kwargs.pop('no_plot', True)\n\n if distance_matrix is None:\n matrix = similarity_matrix\n distance_matrix = 1. - similarity_matrix\n else:\n matrix = distance_matrix\n \n formatted_distance_matrix = spatial.distance.squareform(distance_matrix)\n linkage_matrix = sch.linkage(formatted_distance_matrix, **kwargs)\n sorting_dendrogram = sch.dendrogram(linkage_matrix, orientation=orientation, labels=labels, no_plot=no_plot)\n\n indices = sorting_dendrogram['leaves']\n sorted_labels = sorting_dendrogram['ivl']\n\n if reversed:\n indices = indices[::-1]\n sorted_labels = sorted_labels[::-1]\n \n sorted_matrix = matrix[indices, :]\n sorted_matrix = sorted_matrix[:, indices]\n \n return_vals = [sorted_matrix, indices]\n\n if labels is not None:\n return_vals.append(sorted_labels)\n if return_linkage:\n return_vals.append(linkage_matrix)\n return tuple(return_vals) # convert to tuple to avoid [pylint] E0632:Possible unbalanced tuple unpacking\n\ndef showLines(*args, **kwargs):\n \"\"\"\n Show 1-D data using :func:`~matplotlib.axes.Axes.plot`. \n \n :arg x: (optional) x coordinates. *x* can be an 1-D array or a 2-D matrix of \n column vectors.\n :type x: :class:`~numpy.ndarray`\n\n :arg y: data array. *y* can be an 1-D array or a 2-D matrix of \n column vectors.\n :type y: :class:`~numpy.ndarray`\n\n :arg dy: an array of variances of *y* which will be plotted as a \n band along *y*. It should have the same shape with *y*.\n :type dy: :class:`~numpy.ndarray`\n\n :arg lower: an array of lower bounds which will be plotted as a \n band along *y*. It should have the same shape with *y* and should be \n paired with *upper*.\n :type lower: :class:`~numpy.ndarray`\n\n :arg upper: an array of upper bounds which will be plotted as a \n band along *y*. It should have the same shape with *y* and should be \n paired with *lower*.\n :type upper: :class:`~numpy.ndarray`\n\n :arg alpha: the transparency of the band(s) for plotting *dy*.\n :type alpha: float\n\n :arg beta: the transparency of the band(s) for plotting *miny* and *maxy*.\n :type beta: float\n\n :arg ticklabels: user-defined tick labels for x-axis.\n :type ticklabels: list\n \"\"\"\n \n # note for developers: this function serves as a low-level \n # plotting function which provides basic utilities for other \n # plotting functions. Therefore showFigure is not handled \n # in this function as it should be already handled in the caller.\n\n ticklabels = kwargs.pop('ticklabels', None)\n dy = kwargs.pop('dy', None)\n miny = kwargs.pop('lower', None)\n maxy = kwargs.pop('upper', None)\n alpha = kwargs.pop('alpha', 0.5)\n beta = kwargs.pop('beta', 0.25)\n gap = kwargs.pop('gap', False)\n labels = kwargs.pop('label', None)\n\n from matplotlib import cm, ticker\n from matplotlib.pyplot import figure, gca, xlim\n\n ax = gca()\n lines = ax.plot(*args, **kwargs)\n\n polys = []\n \n for i, line in enumerate(lines):\n color = line.get_color()\n x, y = line.get_data()\n \n if gap:\n x_new, y_new = addEnds(x, y)\n line.set_data(x_new, y_new)\n else:\n x_new, y_new = x, y\n \n if labels is not None:\n if np.isscalar(labels):\n line.set_label(labels)\n else:\n try:\n line.set_label(labels[i])\n except IndexError:\n raise ValueError('The number of labels ({0}) and that of y ({1}) do not match.'\n .format(len(labels), len(line)))\n \n # the following function needs to be here so that line exists\n def sub_array(a, i, tag='a'):\n ndim = 0\n if a is not None:\n if np.isscalar(a[0]):\n ndim = 1 # a plain list (array)\n else:\n ndim = 2 # a nested list (array)\n else:\n return None\n\n if ndim == 1:\n _a = a\n else:\n try:\n _a = a[i]\n except IndexError:\n raise ValueError('The number of {2} ({0}) and that of y ({1}) do not match.'\n .format(len(miny), len(line), tag))\n\n if len(_a) != len(y):\n raise ValueError('The shapes of {2} ({0}) and y ({1}) do not match.'\n .format(len(_miny), len(y), tag))\n return _a\n\n if miny is not None and maxy is not None:\n _miny = sub_array(miny, i)\n _maxy = sub_array(maxy, i)\n\n if gap:\n _, _miny = addEnds(x, _miny)\n _, _maxy = addEnds(x, _maxy)\n \n poly = ax.fill_between(x_new, _miny, _maxy,\n alpha=beta, facecolor=color, edgecolor=None,\n linewidth=1, antialiased=True)\n polys.append(poly)\n\n if dy is not None:\n _dy = sub_array(dy, i)\n\n if gap:\n _, _dy = addEnds(x, _dy)\n \n poly = ax.fill_between(x_new, y_new-_dy, y_new+_dy,\n alpha=alpha, facecolor=color, edgecolor=None,\n linewidth=1, antialiased=True)\n polys.append(poly)\n\n ax.margins(x=0)\n if ticklabels is not None:\n if callable(ticklabels):\n ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(ticklabels))\n else:\n ax.get_xaxis().set_major_formatter(ticker.IndexFormatter(ticklabels))\n \n ax.xaxis.set_major_locator(ticker.AutoLocator())\n ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())\n\n return lines, polys\n\ndef showMatrix(matrix, x_array=None, y_array=None, **kwargs):\n \"\"\"Show a matrix using :meth:`~matplotlib.axes.Axes.imshow`. Curves on x- and y-axis can be added.\n\n :arg matrix: matrix to be displayed\n :type matrix: :class:`~numpy.ndarray`\n\n :arg x_array: data to be plotted above the matrix\n :type x_array: :class:`~numpy.ndarray`\n\n :arg y_array: data to be plotted on the left side of the matrix\n :type y_array: :class:`~numpy.ndarray`\n\n :arg percentile: a percentile threshold to remove outliers, i.e. only showing data within *p*-th \n to *100-p*-th percentile\n :type percentile: float\n\n :arg interactive: turn on or off the interactive options\n :type interactive: bool\n\n :arg xtickrotation: how much to rotate the xticklabels in degrees\n default is 0\n :type xtickrotation: float\n \"\"\"\n\n from matplotlib import ticker\n from matplotlib.gridspec import GridSpec\n from matplotlib.collections import LineCollection\n from matplotlib.pyplot import gca, sca, sci, colorbar, subplot\n\n from .drawtools import drawTree\n\n p = kwargs.pop('percentile', None)\n vmin = vmax = None\n if p is not None:\n vmin = np.percentile(matrix, p)\n vmax = np.percentile(matrix, 100-p)\n \n vmin = kwargs.pop('vmin', vmin)\n vmax = kwargs.pop('vmax', vmax)\n vcenter = kwargs.pop('vcenter', None)\n norm = kwargs.pop('norm', None)\n\n if vcenter is not None and norm is None:\n if PY3K:\n try:\n from matplotlib.colors import DivergingNorm\n except ImportError:\n from matplotlib.colors import TwoSlopeNorm as DivergingNorm\n\n norm = DivergingNorm(vmin=vmin, vcenter=0., vmax=vmax)\n else:\n LOGGER.warn('vcenter cannot be used in Python 2 so norm remains None')\n\n lw = kwargs.pop('linewidth', 1)\n \n W = H = kwargs.pop('ratio', 6)\n\n ticklabels = kwargs.pop('ticklabels', None)\n xticklabels = kwargs.pop('xticklabels', ticklabels)\n yticklabels = kwargs.pop('yticklabels', ticklabels)\n\n xtickrotation = kwargs.pop('xtickrotation', 0.)\n\n show_colorbar = kwargs.pop('colorbar', True)\n cb_extend = kwargs.pop('cb_extend', 'neither')\n allticks = kwargs.pop('allticks', False) # this argument is temporary and will be replaced by better implementation\n interactive = kwargs.pop('interactive', True)\n\n cmap = kwargs.pop('cmap', 'jet')\n origin = kwargs.pop('origin', 'lower')\n\n try: \n from Bio import Phylo\n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n tree_mode_y = isinstance(y_array, Phylo.BaseTree.Tree)\n tree_mode_x = isinstance(x_array, Phylo.BaseTree.Tree)\n\n if x_array is not None and y_array is not None:\n nrow = 2; ncol = 2\n i = 1; j = 1\n width_ratios = [1, W]\n height_ratios = [1, H]\n aspect = 'auto'\n elif x_array is not None and y_array is None:\n nrow = 2; ncol = 1\n i = 1; j = 0\n width_ratios = [W]\n height_ratios = [1, H]\n aspect = 'auto'\n elif x_array is None and y_array is not None:\n nrow = 1; ncol = 2\n i = 0; j = 1\n width_ratios = [1, W]\n height_ratios = [H]\n aspect = 'auto'\n else:\n nrow = 1; ncol = 1\n i = 0; j = 0\n width_ratios = [W]\n height_ratios = [H]\n aspect = kwargs.pop('aspect', None)\n\n main_index = (i, j)\n upper_index = (i-1, j)\n left_index = (i, j-1)\n\n complex_layout = nrow > 1 or ncol > 1\n\n ax1 = ax2 = ax3 = None\n\n if complex_layout:\n gs = GridSpec(nrow, ncol, width_ratios=width_ratios, \n height_ratios=height_ratios, hspace=0., wspace=0.)\n\n ## draw matrix\n if complex_layout:\n ax3 = subplot(gs[main_index])\n else:\n ax3 = gca()\n \n im = ax3.imshow(matrix, aspect=aspect, vmin=vmin, vmax=vmax, \n norm=norm, cmap=cmap, origin=origin, **kwargs)\n \n #ax3.set_xlim([-0.5, matrix.shape[0]+0.5])\n #ax3.set_ylim([-0.5, matrix.shape[1]+0.5])\n\n if xticklabels is not None:\n ax3.xaxis.set_major_formatter(ticker.IndexFormatter(xticklabels))\n if yticklabels is not None and ncol == 1:\n ax3.yaxis.set_major_formatter(ticker.IndexFormatter(yticklabels))\n\n if allticks:\n ax3.xaxis.set_major_locator(ticker.IndexLocator(offset=0.5, base=1.))\n ax3.yaxis.set_major_locator(ticker.IndexLocator(offset=0.5, base=1.))\n else:\n locator = ticker.AutoLocator()\n locator.set_params(integer=True)\n minor_locator = ticker.AutoMinorLocator()\n\n ax3.xaxis.set_major_locator(locator)\n ax3.xaxis.set_minor_locator(minor_locator)\n\n locator = ticker.AutoLocator()\n locator.set_params(integer=True)\n minor_locator = ticker.AutoMinorLocator()\n \n ax3.yaxis.set_major_locator(locator)\n ax3.yaxis.set_minor_locator(minor_locator)\n\n if ncol > 1:\n ax3.yaxis.set_major_formatter(ticker.NullFormatter())\n \n ## draw x_ and y_array\n lines = []\n\n if nrow > 1:\n ax1 = subplot(gs[upper_index])\n\n if tree_mode_x:\n Y, X = drawTree(x_array, label_func=None, orientation='vertical', \n inverted=True)\n miny = min(Y.values())\n maxy = max(Y.values())\n\n minx = min(X.values())\n maxx = max(X.values())\n\n ax1.set_xlim(minx-.5, maxx+.5)\n ax1.set_ylim(miny, 1.05*maxy)\n else:\n ax1.set_xticklabels([])\n \n y = x_array\n xp, yp = interpY(y)\n points = np.array([xp, yp]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lcy = LineCollection(segments, array=yp, linewidths=lw, cmap=cmap)\n lines.append(lcy)\n ax1.add_collection(lcy)\n\n ax1.set_xlim(xp.min()-.5, xp.max()+.5)\n ax1.set_ylim(yp.min(), yp.max())\n\n if ax3.xaxis_inverted():\n ax2.invert_xaxis()\n\n ax1.axis('off')\n\n if ncol > 1:\n ax2 = subplot(gs[left_index])\n \n if tree_mode_y:\n X, Y = drawTree(y_array, label_func=None, inverted=True)\n miny = min(Y.values())\n maxy = max(Y.values())\n\n minx = min(X.values())\n maxx = max(X.values())\n\n ax2.set_ylim(miny-.5, maxy+.5)\n ax2.set_xlim(minx, 1.05*maxx)\n else:\n ax2.set_xticklabels([])\n \n y = y_array\n xp, yp = interpY(y)\n points = np.array([yp, xp]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lcx = LineCollection(segments, array=yp, linewidths=lw, cmap=cmap)\n lines.append(lcx)\n ax2.add_collection(lcx)\n ax2.set_xlim(yp.min(), yp.max())\n ax2.set_ylim(xp.min()-.5, xp.max()+.5)\n \n ax2.invert_xaxis()\n\n if ax3.yaxis_inverted():\n ax2.invert_yaxis()\n\n ax2.axis('off')\n\n ## draw colorbar\n sca(ax3)\n cb = None\n if show_colorbar:\n if nrow > 1:\n axes = [ax1, ax2, ax3]\n while None in axes:\n axes.remove(None)\n s = H / (H + 1.)\n cb = colorbar(mappable=im, ax=axes, anchor=(0, 0), shrink=s, extend=cb_extend)\n else:\n cb = colorbar(mappable=im, extend=cb_extend)\n\n sca(ax3)\n sci(im)\n\n if interactive:\n from prody.utilities import ImageCursor\n from matplotlib.pyplot import connect\n cursor = ImageCursor(ax3, im)\n connect('button_press_event', cursor.onClick)\n\n ax3.tick_params(axis='x', rotation=xtickrotation)\n\n return im, lines, cb\n\ndef reorderMatrix(names, matrix, tree, axis=None):\n \"\"\"\n Reorder a matrix based on a tree and return the reordered matrix \n and indices for reordering other things.\n\n :arg names: a list of names associated with the rows of the matrix\n These names must match the ones used to generate the tree\n :type names: list\n\n :arg matrix: any square matrix\n :type matrix: :class:`~numpy.ndarray`\n\n :arg tree: any tree from :func:`calcTree`\n :type tree: :class:`~Bio.Phylo.BaseTree.Tree`\n\n :arg axis: along which axis the matrix should be reordered. \n Default is **None** which reorder along all the axes\n :type axis: int\n \"\"\"\n\n try:\n from Bio import Phylo\n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n\n try:\n if matrix.ndim != 2:\n raise ValueError('matrix should be a 2D matrix.')\n except AttributeError:\n raise TypeError('matrix should be a numpy array.')\n\n if np.shape(matrix)[0] != np.shape(matrix)[1]:\n raise ValueError('matrix should be a square matrix')\n \n names = np.asarray(names)\n\n if np.isscalar(names):\n raise TypeError('names should be list-like')\n \n if not len(names):\n raise TypeError('names is empty')\n\n if not isinstance(tree, Phylo.BaseTree.Tree):\n raise TypeError('tree should be a BioPython Tree')\n\n if len(names) != len(matrix):\n raise ValueError('names should have entries for each matrix row/column')\n \n terminals = tree.get_terminals()\n if len(names) != len(terminals):\n raise ValueError('names should have entries for each tree terminal')\n\n if len(terminals) != len(matrix):\n raise ValueError('matrix should have a row for each tree terminal')\n\n indices = []\n for terminal in terminals:\n name = terminal.name\n locs = np.where(names == name)[0]\n if not len(locs):\n raise ValueError('inconsistent names and tree: %s not in names'%name)\n\n if len(locs) > 1:\n raise ValueError('inconsistent names and tree: duplicate name %s in names'%name)\n indices.append(locs[0])\n\n # rmatrix = matrix[:, indices]\n # rmatrix = rmatrix[indices, :]\n\n if axis is not None:\n I = [np.arange(s) for s in matrix.shape] \n axes = [axis] if np.isscalar(axis) else axis\n for ax in axes:\n I[ax] = indices\n else:\n I = [indices] * matrix.ndim\n \n rmatrix = matrix[np.ix_(*I)]\n \n return rmatrix, indices\n\ndef findSubgroups(tree, c, method='naive', **kwargs):\n \"\"\"\n Divide a tree into subgroups using a criterion and a cutoff.\n Returns a list of lists with labels divided into subgroups.\n \"\"\"\n\n method = method.lower().strip()\n terminals = tree.get_terminals()\n names = [clade.name for clade in terminals]\n Z = None\n\n if method != 'naive':\n try:\n Z = getLinkage(names, tree)\n except LinkageError:\n print('Failed to build linkage; fall back to naive criterion')\n method = 'naive'\n \n if method == 'naive':\n subgroups = [[names[0]]]\n for i in range(len(terminals)-1):\n curr_clade = terminals[i]\n next_clade = terminals[i + 1]\n d = tree.distance(curr_clade, next_clade)\n if d > c:\n subgroups.append([])\n subgroups[-1].append(next_clade.name)\n else:\n from scipy.cluster.hierarchy import fcluster\n \n T = fcluster(Z, c, criterion=method, **kwargs)\n labels = np.unique(T)\n subgroups = [[] for _ in range(len(labels))]\n\n for i, t in enumerate(T):\n subgroups[t-1].append(names[i])\n\n return subgroups\n"
] | [
[
"sklearn.manifold.SpectralEmbedding",
"matplotlib.pyplot.connect",
"sklearn.metrics.silhouette_score",
"numpy.asarray",
"matplotlib.ticker.AutoMinorLocator",
"matplotlib.ticker.AutoLocator",
"numpy.concatenate",
"sklearn.manifold.TSNE",
"matplotlib.colors.TwoSlopeNorm",
"scipy.spatial.distance.squareform",
"sklearn.cluster.AgglomerativeClustering",
"numpy.where",
"matplotlib.pyplot.gca",
"numpy.ix_",
"numpy.unique",
"numpy.arange",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"scipy.cluster.hierarchy.linkage",
"numpy.zeros",
"matplotlib.ticker.IndexLocator",
"matplotlib.pyplot.sci",
"matplotlib.collections.LineCollection",
"matplotlib.ticker.IndexFormatter",
"matplotlib.ticker.NullFormatter",
"scipy.cluster.hierarchy.dendrogram",
"numpy.array",
"scipy.cluster.hierarchy.fcluster",
"matplotlib.pyplot.sca",
"numpy.percentile",
"matplotlib.pyplot.colorbar",
"numpy.shape",
"numpy.isscalar",
"matplotlib.ticker.FuncFormatter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
ZHANG-CAIQI/COMP1001 | [
"abfad8101b4b58697dfbc8599eebf466beebb9ec"
] | [
"Assessments 1-8/Ass8/Q2_b_1.py"
] | [
"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef stockUp(priceFile):\r\n\r\n # read the file\r\n infile = open(priceFile, \"r\")\r\n date = []\r\n stock = []\r\n\r\n # store only the dates and closing price\r\n day = 1\r\n firstLine = True\r\n for line in infile:\r\n if firstLine:\r\n firstLine = False\r\n else:\r\n count_item = 0\r\n for item in line.split(\",\"):\r\n if count_item == 0:\r\n date.append(day)\r\n elif count_item == 4:\r\n stock.append(float(item))\r\n count_item += 1\r\n day += 1\r\n\r\n infile.close()\r\n\r\n # Compute the up periods\r\n up = len(date)*[0]\r\n for k in range(1,len(stock)): # skip the heading\r\n i = k # i = k = 1\r\n while ((i>0) and float(stock[k])>=float(stock[i])):\r\n up[k] += 1\r\n i -= 1\r\n\r\n\r\n fig, ax1 = plt.subplots()\r\n\r\n color = 'tab:red'\r\n ax1.set_xlabel('Days started from 11/13/2017 and end on 11/12/2018')\r\n ax1.set_ylabel('Stock prices', color=color)\r\n ax1.plot(date, stock, color=color)\r\n ax1.tick_params(axis='y', labelcolor=color)\r\n\r\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\r\n\r\n color = 'tab:blue'\r\n ax2.set_ylabel('Up periods', color=color) # we already handled the x-label with ax1\r\n ax2.plot(date, up, color=color)\r\n ax2.tick_params(axis='y', labelcolor=color)\r\n\r\n fig.tight_layout() # otherwise the right y-label is slightly clipped\r\n plt.show()\r\n\r\n return\r\n\r\n\"\"\"\r\n plt.plot(date, up, marker='x')\r\n plt.plot(date, stock, marker='o')\r\n plt.title('The up periods for 11/13/2017-11/12/2018')\r\n plt.xlabel('Days started from 11/13/2017 and end on 11/12/2018')\r\n plt.ylabel('The up periods of GOOGL at closing')\r\n plt.show()\r\n\"\"\" \r\n\r\n\r\nstockUp(\"GOOGL.csv\")\r\n\r\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
steffi7574/lbann | [
"665797a112dc96d15bd1d958de61f48bf5d3d21f"
] | [
"bamboo/unit_tests/test_unit_layer_gather.py"
] | [
"import functools\nimport operator\nimport os\nimport os.path\nimport sys\nimport numpy as np\n\n# Bamboo utilities\ncurrent_file = os.path.realpath(__file__)\ncurrent_dir = os.path.dirname(current_file)\nsys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))\nimport tools\n\n# ==============================================\n# Objects for Python data reader\n# ==============================================\n# Note: The Python data reader imports this file as a module and calls\n# the functions below to ingest data.\n\n# Data\ninput_size = 23\noutput_size = 15\nseed = 202101280\n\n# Sample access functions\ndef get_sample(index):\n np.random.seed(seed+index)\n values = [np.random.normal() for _ in range(input_size)]\n indices = [\n np.random.uniform(-1, input_size+1)\n for _ in range(output_size)\n ]\n return values + indices\ndef num_samples():\n return 25\ndef sample_dims():\n return (input_size+output_size,)\n\n# ==============================================\n# Setup LBANN experiment\n# ==============================================\n\ndef setup_experiment(lbann):\n \"\"\"Construct LBANN experiment.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n mini_batch_size = num_samples() // 2\n trainer = lbann.Trainer(mini_batch_size)\n model = construct_model(lbann)\n data_reader = construct_data_reader(lbann)\n optimizer = lbann.NoOptimizer()\n return trainer, model, data_reader, optimizer\n\ndef construct_model(lbann):\n \"\"\"Construct LBANN model.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n\n # Input data\n # Note: Sum with a weights layer so that gradient checking will\n # verify that error signals are correct.\n x = lbann.Identity(lbann.Input())\n x_slice = lbann.Slice(\n x,\n slice_points=tools.str_list([0,input_size,input_size+output_size]),\n )\n x0_weights = lbann.Weights(\n optimizer=lbann.SGD(),\n initializer=lbann.ConstantInitializer(value=0.0),\n name='input_weights',\n )\n x0 = lbann.Sum(\n lbann.Identity(x_slice),\n lbann.WeightsLayer(weights=x0_weights, dims=tools.str_list(input_size)),\n )\n x1 = lbann.Identity(x_slice)\n\n # Apply gather\n y0 = lbann.Gather(x0, x1)\n y1 = lbann.Concatenation([\n lbann.Constant(value=i+1, num_neurons='1')\n for i in range(output_size)\n ])\n y = lbann.Multiply(y0, y1)\n z = lbann.L2Norm2(y)\n\n # Objects for LBANN model\n layers = list(lbann.traverse_layer_graph(x))\n metric = lbann.Metric(z, name='obj')\n obj = lbann.ObjectiveFunction(z)\n callbacks = []\n\n # Compute expected metric value\n vals = []\n for i in range(num_samples()):\n x = get_sample(i)\n x0 = x[:input_size]\n x1 = x[input_size:]\n y0 = np.zeros(output_size)\n for i in range(output_size):\n if 0 <= x1[i] < input_size:\n y0[i] = x0[int(x1[i])]\n z = 0\n for i in range(output_size):\n z += ((i+1)*y0[i]) ** 2\n vals.append(z)\n val = np.mean(vals)\n tol = 8 * val * np.finfo(np.float32).eps\n callbacks.append(lbann.CallbackCheckMetric(\n metric=metric.name,\n lower_bound=val-tol,\n upper_bound=val+tol,\n error_on_failure=True,\n execution_modes='test'))\n\n # Gradient checking\n callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))\n\n # Construct model\n num_epochs = 0\n return lbann.Model(num_epochs,\n layers=layers,\n objective_function=obj,\n metrics=[metric],\n callbacks=callbacks)\n\ndef construct_data_reader(lbann):\n \"\"\"Construct Protobuf message for Python data reader.\n\n The Python data reader will import the current Python file to\n access the sample access functions.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n\n # Note: The training data reader should be removed when\n # https://github.com/LLNL/lbann/issues/1098 is resolved.\n message = lbann.reader_pb2.DataReader()\n message.reader.extend([\n tools.create_python_data_reader(\n lbann,\n current_file,\n 'get_sample',\n 'num_samples',\n 'sample_dims',\n 'train'\n )\n ])\n message.reader.extend([\n tools.create_python_data_reader(\n lbann,\n current_file,\n 'get_sample',\n 'num_samples',\n 'sample_dims',\n 'test'\n )\n ])\n return message\n\n# ==============================================\n# Setup PyTest\n# ==============================================\n\n# Create test functions that can interact with PyTest\nfor _test_func in tools.create_tests(setup_experiment, __file__):\n globals()[_test_func.__name__] = _test_func\n"
] | [
[
"numpy.random.seed",
"numpy.finfo",
"numpy.random.normal",
"numpy.mean",
"numpy.random.uniform",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bigvideoresearch/SCC | [
"f26cdb6aaf248b5112812dbdac1f1b5086aebccc",
"f26cdb6aaf248b5112812dbdac1f1b5086aebccc",
"f26cdb6aaf248b5112812dbdac1f1b5086aebccc"
] | [
"datasets/imagename_dataset.py",
"runner_master/runner/transforms/image/autoaugment_operators.py",
"runner_master/runner/setup.py"
] | [
"from runner_master import runner\nimport os\nimport io\nimport torch\nimport logging\nfrom PIL import Image, ImageFile\nfrom runner_master.runner.data import datasets\n# to fix \"OSError: image file is truncated\"\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nclass ImagenameDataset(datasets.ImglistDatasetV2):\n\n def getitem(self, index):\n line = self.imglist[index].strip('\\n')\n tokens = line.split(' ', maxsplit=1)\n #if len(tokens) != 2:\n # raise RuntimeError('split tokens < 2')\n\n image_name, extra_str = tokens[0], tokens[1]\n if self.root != '' and image_name.startswith('/'):\n raise RuntimeError('root not empty but image_name starts with \"/\"')\n path = os.path.join(self.root, image_name)\n sample = dict()\n sample['image_name'] = image_name\n try:\n if not self.dummy_read:\n filebytes = self.reader(path)\n buff = io.BytesIO(filebytes)\n if self.dummy_size is not None:\n sample['data'] = torch.rand(self.dummy_size)\n else:\n image = Image.open(buff)\n sample['data'] = self.transform_image(image)\n for key, value in self.transform_extra(extra_str).items():\n sample[key] = value\n except Exception as e:\n logging.error('[{}] broken'.format(path))\n raise e\n return sample\n\nrunner.patch_dataset('ImagenameDataset', ImagenameDataset)\n",
"'''\nReferences:\n https://arxiv.org/abs/1805.09501\n https://github.com/DeepVoltaire/Autoaugment/blob/master/autoaugment.py\n https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py\n'''\n\n\nimport math\nimport random\nimport numpy as np\nfrom PIL import Image, ImageOps, ImageEnhance\nimport torchvision.transforms.functional as F\nfrom .interpolate_mapping import interpolate_int2str, interpolate_any2int\n\n\nclass ResampleOp:\n\n def set_resample_mode(self, resample):\n self.resample = resample\n\n\nclass ShearX(ResampleOp):\n\n def __init__(self, level, upper=0.3, resample=0):\n self.magnitude = level * upper / 10\n self.resample = resample\n\n def __call__(self, img):\n m = self.magnitude * random.choice([-1, 1])\n return img.transform(img.size, Image.AFFINE, (1, m, 0, 0, 1, 0), self.resample, fillcolor=(128, 128, 128))\n\n\nclass ShearY(ResampleOp):\n\n def __init__(self, level, upper=0.3, resample=0):\n self.magnitude = level * upper / 10\n self.resample = resample\n\n def __call__(self, img):\n m = self.magnitude * random.choice([-1, 1])\n return img.transform(img.size, Image.AFFINE, (1, 0, 0, m, 1, 0), self.resample, fillcolor=(128, 128, 128))\n\n\nclass TranslateX(ResampleOp):\n\n def __init__(self, level, upper=150 / 331, resample=0):\n self.magnitude = level * upper / 10\n self.resample = resample\n\n def __call__(self, img):\n m = self.magnitude * img.size[0] * random.choice([-1, 1])\n return img.transform(img.size, Image.AFFINE, (1, 0, m, 0, 1, 0), self.resample, fillcolor=(128, 128, 128))\n\n\nclass TranslateY(ResampleOp):\n\n def __init__(self, level, upper=150 / 331, resample=0):\n self.magnitude = level * upper / 10\n self.resample = resample\n\n def __call__(self, img):\n m = self.magnitude * img.size[1] * random.choice([-1, 1])\n return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, m), self.resample, fillcolor=(128, 128, 128))\n\n\nclass Rotate(ResampleOp):\n\n def __init__(self, level, upper=30, resample=0):\n self.magnitude = level * upper / 10\n self.resample = resample\n\n def __call__(self, img):\n m = self.magnitude * random.choice([-1, 1])\n rot = img.convert(\"RGBA\").rotate(m, self.resample)\n return Image.composite(rot, Image.new(\"RGBA\", rot.size, (128,) * 4), rot).convert(img.mode)\n\n\nclass AutoContrast:\n\n def __init__(self, level):\n pass\n\n def __call__(self, img):\n return ImageOps.autocontrast(img)\n\n\nclass Invert:\n\n def __init__(self, level):\n pass\n\n def __call__(self, img):\n return ImageOps.invert(img)\n\n\nclass Equalize:\n\n def __init__(self, level):\n pass\n\n def __call__(self, img):\n return ImageOps.equalize(img)\n\n\nclass Solarize:\n\n def __init__(self, level, upper=256):\n '''From https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py:\n self.magnitude = level * upper / 10\n However under this implementation, higher magnitude level actually leads to weaker regularization.\n So I invert it by \"256 -\".\n '''\n self.magnitude = 256 - level * upper / 10\n\n def __call__(self, img):\n return ImageOps.solarize(img, self.magnitude)\n\n\nclass SolarizeAdd:\n '''\n The original Autoaugment paper does not mention this operator.\n It comes from the so-called Policy V0 in the efficientnet repo:\n https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py#L181\n '''\n def __init__(self, level, upper=110):\n self.magnitude = level * upper / 10\n\n def __call__(self, img):\n img = np.array(img)\n added_img = (img.astype('float64') + self.magnitude).clip(0, 255).astype('uint8')\n img = np.where(img < 128, added_img, img)\n return Image.fromarray(img)\n\n\nclass Posterize:\n\n def __init__(self, level, upper=4):\n self.magnitude = max(1, 8 - int(level * upper / 10))\n\n def __call__(self, img):\n return ImageOps.posterize(img, self.magnitude)\n\n\nclass Enhance:\n\n def __init__(self, level, upper=0.9):\n self.magnitude = level * upper / 10\n\n def __call__(self, img):\n magnitude = max(0, 1 + self.magnitude * random.choice([-1, 1]))\n return self.enhance(img, magnitude)\n\n def enhance(self, img, magnitude):\n raise NotImplementedError\n\n\nclass Contrast(Enhance):\n\n def enhance(self, img, magnitude):\n return ImageEnhance.Contrast(img).enhance(magnitude)\n\n\nclass Color(Enhance):\n\n def enhance(self, img, magnitude):\n return ImageEnhance.Color(img).enhance(magnitude)\n\n\nclass Brightness(Enhance):\n\n def enhance(self, img, magnitude):\n return ImageEnhance.Brightness(img).enhance(magnitude)\n\n\nclass Sharpness(Enhance):\n\n def enhance(self, img, magnitude):\n return ImageEnhance.Brightness(img).enhance(self.magnitude)\n\n\nclass Cutout:\n\n def __init__(self, level, upper=60 / 331, fillcolor=0):\n self.magnitude = level * upper / 10\n self.fillcolor = fillcolor\n\n def __call__(self, img):\n w, h = img.size\n x = np.random.randint(w)\n y = np.random.randint(h)\n offset = int(self.magnitude * min(img.size) / 2)\n x1 = np.clip(x - offset, 0, w)\n x2 = np.clip(x + offset, 0, w)\n y1 = np.clip(y - offset, 0, h)\n y2 = np.clip(y + offset, 0, h)\n\n paste = Image.new(img.mode, (x2 - x1, y2 - y1), self.fillcolor)\n img.paste(paste, (x1, y1, x2, y2))\n return img\n\n\nclass RandAugmentResizedCrop:\n\n def __init__(self, size, level,\n scale_anchors=(0.08, 0.6457, 1.0),\n ratio_anchors=(3 / 4, 1.0, 4 / 3),\n priority='ratio', interpolation='bilinear'):\n if scale_anchors[2] > 1:\n raise RuntimeError('max scale should not > 1, but got {}'.format(scale_anchors[2]))\n if ratio_anchors[0] > ratio_anchors[2]:\n raise RuntimeError('ratio[0] > ratio[2]: {} '.format(ratio_anchors))\n if priority not in ('scale', 'ratio'):\n raise RuntimeError('priority should be \"scale\" or \"ratio\" , but got {}'.format(repr(priority)))\n\n if isinstance(size, tuple):\n self.size = size\n else:\n self.size = (size, size)\n self.level = level\n\n min_scale, std_scale, max_scale = scale_anchors\n min_ratio, std_ratio, max_ratio = ratio_anchors\n if level > 10:\n self.scale = (min_scale, max_scale)\n self.ratio = (min_ratio, max_ratio)\n else:\n power = level / 10\n lower_scale = (min_scale / std_scale) ** power * std_scale\n upper_scale = (max_scale / std_scale) ** power * std_scale\n self.scale = (lower_scale, upper_scale)\n lower_ratio = (min_ratio / std_ratio) ** power * std_ratio\n upper_ratio = (max_ratio / std_ratio) ** power * std_ratio\n self.ratio = (lower_ratio, upper_ratio)\n\n self.priority = priority\n self.interpolation = interpolate_any2int[interpolation]\n\n @staticmethod\n def get_params(img, scale, ratio, priority):\n full_area = img.size[0] * img.size[1]\n part_area = random.uniform(*scale) * full_area\n log_ratio = (math.log(ratio[0]), math.log(ratio[1]))\n aspect_ratio = math.exp(random.uniform(*log_ratio))\n w = math.sqrt(part_area * aspect_ratio)\n h = math.sqrt(part_area / aspect_ratio)\n\n rel_w = w / img.size[0]\n rel_h = h / img.size[1]\n assert rel_w <= 1 or rel_h <= 1\n if rel_w > 1 or rel_h > 1:\n if priority == 'scale':\n if rel_w > 1:\n w /= rel_w\n h *= rel_w\n elif rel_h > 1:\n w *= rel_h\n h /= rel_h\n elif priority == 'ratio':\n rel = max(rel_w, rel_h)\n w /= rel\n h /= rel\n else:\n raise RuntimeError('invalid argument priority: {}'.format(priority))\n\n w = round(w)\n h = round(h)\n if w > img.size[0] or h > img.size[1]:\n raise RuntimeError('w = {}, h = {}, img size {}'.format(w, h, img.size))\n\n i = random.randint(0, img.size[1] - h)\n j = random.randint(0, img.size[0] - w)\n return i, j, h, w\n\n def __call__(self, img):\n i, j, h, w = self.get_params(img, self.scale, self.ratio, self.priority)\n return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = interpolate_int2str[self.interpolation]\n format_string = self.__class__.__name__ + '(size={0}'.format(self.size)\n format_string += ', level={0}'.format(self.level)\n format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))\n format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))\n format_string += ', interpolation={0})'.format(interpolate_str)\n return format_string\n",
"__all__ = ['setup']\n\n\nimport os\nimport time\nimport torch\nimport random\nimport numpy as np\nfrom .config import setup_config\nfrom .distributed import dist_init\n\n\ndef setup(seed=1024,\n port=23333,\n backend='nccl',\n mp_method='spawn',\n cudnn_benchmark=True,\n cudnn_deterministic=False,\n config_first=True,\n verbose=True):\n\n config = setup_config()\n\n if config_first:\n # if an argument is already specified in config, use config, else use function's input\n seed = config.get('seed', seed)\n port = config.get('port', port)\n backend = config.get('backend', backend)\n mp_method = config.get('mp_method', mp_method)\n cudnn_benchmark = config.get('cudnn_benchmark', cudnn_benchmark)\n cudnn_deterministic = config.get('cudnn_deterministic', cudnn_deterministic)\n\n # random seed\n random.seed(config.seed)\n np.random.seed(config.seed)\n torch.manual_seed(config.seed)\n torch.cuda.manual_seed(config.seed)\n\n # setup distributed environment\n rank, world_size = dist_init(port, backend, mp_method)\n\n # cudnn setup\n torch.backends.cudnn.benchmark = cudnn_benchmark\n torch.backends.cudnn.deterministic = cudnn_deterministic\n\n if rank == 0 and verbose:\n print('=' * 80)\n print('runner config:')\n print(config)\n print('=' * 80)\n print('runner setup:')\n print(' date [{}]'.format(time.strftime('%Y-%m-%d-%H:%M:%S')))\n print(' seed [{}]'.format(config.seed))\n print(' port [{}]'.format(port))\n print(' backend [{}]'.format(backend))\n print(' mp_method [{}]'.format(mp_method))\n print(' world_size [{}]'.format(world_size))\n print(' cudnn_benchmark [{}]'.format(cudnn_benchmark))\n print(' cudnn_deterministic [{}]'.format(cudnn_deterministic))\n print('=' * 80, flush=True)\n\n return config, rank, world_size\n"
] | [
[
"torch.rand"
],
[
"numpy.array",
"numpy.where",
"numpy.clip",
"numpy.random.randint"
],
[
"torch.manual_seed",
"torch.cuda.manual_seed",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hlzhang109/OpenPrompt | [
"8a1ec1ceac3805a11b09dda9b96ad7406d222f26",
"8a1ec1ceac3805a11b09dda9b96ad7406d222f26"
] | [
"openprompt/prompts/one2one_verbalizer.py",
"openprompt/prompt_base.py"
] | [
"import json\nfrom transformers.tokenization_utils import PreTrainedTokenizer\nfrom yacs.config import CfgNode\nfrom openprompt.data_utils.data_utils import InputFeatures\nimport re\nfrom openprompt import Verbalizer\nfrom typing import *\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom openprompt.utils.logging import logger\n\n\n\nclass One2oneVerbalizer(Verbalizer):\n r\"\"\"\n The basic manually defined verbalizer class, this class is inherited from the :obj:`Verbalizer` class.\n This class restrict the use of label words to one words per label. For a verbalzer with less constraints,\n please use Basic ManualVerbalizer.\n\n Args: \n tokenizer (:obj:`PreTrainedTokenizer`): The tokenizer of the current pre-trained model to point out the vocabulary.\n classes (:obj:`classes`): The classes (or labels) of the current task.\n num_classes (:obj:`int`): Optional. The number of classes of the verbalizer. Only one of `classes` and `num_classes` should be used.\n label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.\n prefix (:obj:`str`, optional): The prefix string of the verbalizer. (used in PLMs like RoBERTa, which is sensitive to prefix space)\n multi_token_handler (:obj:`str`, optional): The handling strategy for multiple tokens produced by the tokenizer.\n \"\"\"\n def __init__(self, \n tokenizer: PreTrainedTokenizer,\n num_classes: Optional[int] = None,\n classes: Optional[List] = None,\n label_words: Optional[Union[Sequence[str], Mapping[str, str]]] = None,\n prefix: Optional[str] = \" \",\n multi_token_handler: Optional[str] = \"first\",\n ):\n super().__init__(tokenizer=tokenizer, num_classes=num_classes, classes=classes)\n self.prefix = prefix\n self.multi_token_handler = multi_token_handler\n self.label_words = label_words\n\n def on_label_words_set(self):\n super().on_label_words_set()\n self.label_words = self.add_prefix(self.label_words, self.prefix)\n self.generate_parameters()\n \n @staticmethod\n def add_prefix(label_words, prefix):\n r\"\"\"Add prefix to label words. For example, if a label words is in the middle of a template,\n the prefix should be ``' '``.\n\n Args:\n label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.\n prefix (:obj:`str`, optional): The prefix string of the verbalizer.\n \n Returns:\n :obj:`Sequence[str]`: New label words with prefix.\n \"\"\"\n new_label_words = []\n if isinstance(label_words[0], list):\n assert max([len(w) for w in label_words]) == 1, \"Providing multiple label words, you should use other verbalizers instead.\"\n label_words = [w[0] for w in label_words] \n\n for word in label_words:\n if word.startswith(\"<!>\"):\n new_label_words.append(word.split(\"<!>\")[1])\n else:\n new_label_words.append(prefix + word)\n\n return new_label_words\n \n def generate_parameters(self) -> List:\n r\"\"\"In basic manual template, the parameters are generated from label words directly.\n In this implementation, the label_words should not be tokenized into more than one token. \n \"\"\"\n words_ids = []\n for word in self.label_words:\n word_ids = self.tokenizer.encode(word, add_special_tokens=False)\n if len(word_ids) > 1:\n logger.warning(\"Word {} is split into multiple tokens: {}. \\\n If this is not what you expect, try using another word for this verbalizer\" \\\n .format(word, self.tokenizer.convert_ids_to_tokens(word_ids)))\n words_ids.append(word_ids)\n \n \n max_len = max([len(ids) for ids in words_ids])\n words_ids_mask = [[1]*len(ids) + [0]*(max_len-len(ids)) for ids in words_ids]\n words_ids = [ids+[0]*(max_len-len(ids)) for ids in words_ids]\n \n words_ids_tensor = torch.tensor(words_ids)\n words_ids_mask = torch.tensor(words_ids_mask)\n self.label_words_ids = nn.Parameter(words_ids_tensor, requires_grad=False)\n self.label_words_mask = nn.Parameter(words_ids_mask, requires_grad=False)\n\n def project(self,\n logits: torch.Tensor,\n **kwargs,\n ) -> torch.Tensor:\n r\"\"\"\n Project the labels, the return value is the normalized (sum to 1) probs of label words. \n \n Args:\n logits (:obj:`torch.Tensor`): The orginal logits of label words.\n \n Returns:\n :obj:`torch.Tensor`: The normalized logits of label words\n \"\"\"\n label_words_logits = logits[:, self.label_words_ids]\n label_words_logits = self.handle_multi_token(label_words_logits, self.label_words_mask)\n return label_words_logits\n\n def process_logits(self, logits: torch.Tensor, **kwargs):\n r\"\"\"A whole framework to process the original logits over the vocabulary, which contains four steps: \n (1) Project the logits into logits of label words\n (2) Normalize over all label words\n (3) Calibrate (optional)\n Args:\n logits (:obj:`torch.Tensor`): The orginal logits.\n \n Returns:\n (:obj:`torch.Tensor`): The final processed logits over the label words set.\n \"\"\"\n # project\n label_words_logits = self.project(logits, **kwargs) #Output: (batch_size, num_classes) or (batch_size, num_classes, num_label_words_per_label)\n\n # normalize\n label_words_probs = self.normalize(label_words_logits)\n\n # calibrate\n if hasattr(self, \"_calibrate_logits\") and self._calibrate_logits is not None:\n label_words_probs = self.calibrate(label_words_probs=label_words_probs)\n\n # convert to logits\n label_words_logits = torch.log(label_words_probs+1e-15)\n return label_words_logits\n \n def normalize(self, logits: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Given logits regarding the entire vocabulary, return the probs over the label words set.\n \n Args:\n logits (:obj:`Tensor`): The logits over the entire vocabulary.\n\n Returns:\n :obj:`Tensor`: The logits over the label words set.\n \n \"\"\"\n batch_size = logits.shape[0]\n return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)\n\n\n def calibrate(self, label_words_probs: torch.Tensor, **kwargs) -> torch.Tensor:\n r\"\"\"\n \n Args:\n label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]\n \n Returns:\n :obj:`torch.Tensor`: The calibrated probability of label words.\n \"\"\"\n shape = label_words_probs.shape\n assert self._calibrate_logits.dim() == 1, \"self._calibrate_logits are not 1-d tensor\"\n calibrate_label_words_probs = self.normalize(self.project(self._calibrate_logits.unsqueeze(0), **kwargs))\n assert calibrate_label_words_probs.shape[1:] == label_words_probs.shape[1:] \\\n and calibrate_label_words_probs.shape[0]==1, \"shape not match\"\n label_words_probs /= (calibrate_label_words_probs+1e-15)\n # normalize # TODO Test the performance\n norm = label_words_probs.reshape(shape[0], -1).sum(dim=-1,keepdim=True) # TODO Test the performance of detaching()\n label_words_probs /= norm\n return label_words_probs\n \n\n \n\n\n \n \n",
"from abc import abstractmethod\nimport json\nfrom openprompt.config import convert_cfg_to_dict\n\nfrom transformers.utils.dummy_pt_objects import PreTrainedModel\nfrom openprompt.utils.utils import signature\n\nfrom yacs.config import CfgNode\nfrom openprompt.data_utils.data_utils import InputFeatures\nimport torch\nimport torch.nn as nn\nfrom openprompt.data_utils import InputExample\nfrom typing import *\nfrom transformers.tokenization_utils import PreTrainedTokenizer\n\nfrom openprompt.utils.logging import logger\nimport numpy as np\nimport torch.nn.functional as F\n\n\n\n\nclass Template(nn.Module):\n r'''\n Base class for all the templates. \n Most of methods are abstract, with some expections to hold the common methods for all template, such as ``loss_ids``, ``save``, ``load``.\n\n Args: \n tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.\n mask_token (:obj:`str`): The special token that is masked and need to be predicted by the model.\n placeholder_mapping (:obj:`dict`): A place holder to represent the original input text. \n '''\n\n registered_inputflag_names = [\"loss_ids\", \"shortenable_ids\"]\n\n def __init__(self, \n tokenizer: PreTrainedTokenizer,\n mask_token: str = '<mask>',\n placeholder_mapping: dict = {'<text_a>':'text_a','<text_b>':'text_b'},\n ):\n super().__init__()\n self.mask_token = mask_token\n self.tokenizer = tokenizer\n self.placeholder_mapping = placeholder_mapping\n self._in_on_text_set = False\n\n def get_default_loss_ids(self):\n r'''Get the loss indices for the template using mask.\n E.g. when self.text is ``['<text_a>', 'it', 'is', '<mask>', '.']``, output is ``[0, 0, 0, 1, 0]``.\n\n Returns:\n :obj:`List[int]`: A list of integers in the range [0, 1]:\n \n - 1 for a masked tokens.\n - 0 for a sequence tokens.\n '''\n idx = [\n 1 if token==self.mask_token\n else 0\n for token in self.text\n ]\n return idx\n\n def get_default_shortenable_ids(self) -> List[int]:\n r\"\"\"Every template needs shortenable_ids, denoting which part of the template can be trucate to fit\n the language model's ``max_seq_length``. Default: the input text is shortenable, while the template text and other\n special tokens are not shortenable. As far as we are concerned, almost every template will use this default setting. \n e.g. when self.text is ``['<text_a>', '<meta:word>', 'is', '<mask>', '.']``, output is ``[1, 0, 0, 0, 0]``\n \n Returns:\n :obj:`List[int]`: A list of integers in the range ``[0, 1]``:\n\n - 1 for the input tokens.\n - 0 for the template sequence tokens.\n \"\"\"\n idx = [\n 1 if any([placeholder in token for placeholder in self.placeholder_mapping.keys()])\n else 0\n for token in self.text\n ]\n return idx\n\n def get_default_new_token_ids(self) -> List[int]:\n r'''\n Sometimes tokens in the template are not from the vocabulary, \n but a sequence of newly iniliazed tokens (ones may say, soft-encoding).\n In this case, you need to implement this function.\n\n Raises:\n NotImplementedError: if needed, add ``new_token_ids`` into ``registered_inputflag_names`` attribute of Template class and implement this method.\n '''\n raise NotImplementedError\n\n def get_default_soft_token_ids(self) -> List[int]:\n r'''\n This function identifies which tokens are soft tokens.\n\n Raises:\n NotImplementedError: if needed, add ``soft_token_ids`` into ``registered_inputflag_names`` attribute of Template class and implement this method.\n '''\n raise NotImplementedError\n \n # @abstractmethod\n def wrap_one_example(self, \n example: InputExample) -> List[Dict]:\n r'''Given an input example which contains input text, which can be referenced\n by self.template.placeholder_mapping 's value. \n This function process the example into a list of dict,\n Each dict functions as a group, which has the sample properties, such as\n whether it's shortenable, whether it's the masked position, whether it's soft token, etc.\n Since a text will be tokenized in the subsequent processing procedure,\n these attributes are broadcasted along the tokenized sentence.\n \n Args:\n example (:obj:`Object`): An :py:class:`~openprompt.data_utils.data_utils.InputExample` object, which should have attributes that are able to be filled in the template.\n \n Returns:\n :obj:`List[Dict]`\n '''\n \n not_empty_keys = example.keys()\n if self.text is None:\n raise ValueError(\"template text has not been initialized\")\n if isinstance(example, InputExample):\n text = self.text.copy()\n for placeholder_token in self.placeholder_mapping:\n for i in range(len(text)):\n text[i] = text[i].replace(placeholder_token, getattr(example, self.placeholder_mapping[placeholder_token]))\n not_empty_keys.remove(self.placeholder_mapping[placeholder_token]) # this key has been processed, remove\n for key, value in example.meta.items():\n for i in range(len(text)):\n text[i] = text[i].replace(\"<meta:\"+key+\">\", value)\n not_empty_keys.remove('meta') # meta has been processed\n # TODO <a!> rstrip punctuation support\n # print(text) # for debug\n\n keys, values= ['text'], [text]\n for inputflag_name in self.registered_inputflag_names:\n keys.append(inputflag_name)\n v = None\n if hasattr(self, inputflag_name) and getattr(self, inputflag_name) is not None:\n v = getattr(self, inputflag_name)\n elif hasattr(self, \"get_default_\"+inputflag_name):\n v = getattr(self, \"get_default_\"+inputflag_name)()\n else:\n raise ValueError(\"\"\"\n Template's inputflag '{}' is registered but not initialize.\n Try using template.{} = [...] to initialize\n or create an method get_default_{}(self) in your template.\n \"\"\".format(inputflag_name, inputflag_name, inputflag_name))\n \n if len(v) != len(text):\n raise ValueError(\"Template: len({})={} doesn't match len(text)={}.\"\\\n .format(inputflag_name, len(v), len(text)))\n values.append(v)\n wrapped_parts_to_tokenize = []\n for piece in list(zip(*values)):\n wrapped_parts_to_tokenize.append(dict(zip(keys, piece)))\n\n wrapped_parts_not_tokenize = {key: getattr(example, key) for key in not_empty_keys}\n return [wrapped_parts_to_tokenize, wrapped_parts_not_tokenize]\n else:\n raise TypeError(\"InputExample\") \n \n @abstractmethod\n def process_batch(self, batch):\n r\"\"\"All template should rewrite this method to process the batch input\n such as substituting embeddings.\n \"\"\"\n raise NotImplementedError\n\n def save(self,\n path: str,\n **kwargs) -> None:\n r'''\n A save method API.\n \n Args:\n path (str): A path to save your template.\n '''\n raise NotImplementedError\n\n @property\n def text(self):\n return self._text\n\n @text.setter \n def text(self, text):\n self._text = text\n if text is None:\n return\n if (not isinstance(text, list)) and (not isinstance(text, tuple)):\n raise ValueError(\"Template text must be a list or a tuple\")\n if not self._in_on_text_set:\n self.safe_on_text_set()\n # else:\n # logger.warning(\"Reset text in on_text_set function. Is this intended?\")\n\n def safe_on_text_set(self) -> None:\n r\"\"\"With this wrapper function, setting text inside ``on_text_set()``\n will not trigger ``on_text_set()`` again to prevent endless recursion.\n \"\"\"\n self._in_on_text_sett = True\n self.on_text_set()\n self._in_on_text_set = False\n \n def on_text_set(self):\n r\"\"\"\n A hook to do something when template text was set.\n \"\"\"\n pass\n \n def from_file(self,\n path: str,\n choice: int = 0,\n separator: str = \" \",\n ):\n r'''\n Read the template from a local file.\n\n Args: \n path (:obj:`str`): The path of the local template file.\n choice (:obj:`int`): The id-th line of the file.\n separator (:obj:`str`, optional): A sparator to delimit the text in the file. Default to \" \"\n '''\n with open(path, 'r') as fin:\n text = fin.readlines()[choice]\n text = text.strip().split(separator)\n self.text = text\n return self\n\n @classmethod\n def from_config(cls,\n config: CfgNode,\n **kwargs):\n r\"\"\"load a template from template's configuration node. \n\n Args:\n config (:obj:`CfgNode`): the sub-configuration of template, i.e. config[config.template]\n if config is a global config node. \n kwargs: Other kwargs that might be used in initialize the verbalizer. \n The actual value should match the arguments of __init__ functions.\n \"\"\"\n\n init_args = signature(cls.__init__).args\n _init_dict = {**convert_cfg_to_dict(config), **kwargs}\n init_dict = {key: _init_dict[key] for key in _init_dict if key in init_args}\n template = cls(**init_dict)\n if hasattr(template, \"from_file\"):\n if not hasattr(config, \"file_path\"):\n pass\n else:\n if (not hasattr(config, \"text\") or config.text is None) and config.file_path is not None:\n if config.choice is None:\n config.choice = 0\n template.from_file(config.file_path, config.choice)\n elif (hasattr(config, \"text\") and config.text is not None) and config.file_path is not None:\n raise RuntimeError(\"The text can't be both set from `text` and `file_path`.\")\n return template\n \n\n\n\n\nclass Verbalizer(nn.Module):\n r'''\n Base class for all the verbalizers. \n\n Args: \n tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.\n classes (:obj:`Sequence[str]`): A sequence of classes that need to be projected.\n '''\n def __init__(self,\n tokenizer: Optional[PreTrainedTokenizer] = None,\n classes: Optional[Sequence[str]] = None,\n num_classes: Optional[int] = None,\n ):\n super().__init__()\n self.tokenizer = tokenizer\n self.classes = classes\n if classes is not None and num_classes is not None:\n assert len(classes) == num_classes, \"len(classes) != num_classes, Check you config.\"\n self.num_classes = num_classes\n elif num_classes is not None:\n self.num_classes = num_classes\n elif classes is not None:\n self.num_classes = len(classes)\n else:\n self.num_classes = None\n # raise AttributeError(\"No able to configure num_classes\")\n self._in_on_label_words_set = False\n\n @property\n def label_words(self,):\n r'''\n Label words means the words in the vocabulary projected by the labels. \n E.g. if we want to establish a projection in sentiment classification: positive :math:`\\rightarrow` {`wonderful`, `good`},\n in this case, `wonderful` and `good` are label words.\n '''\n return self._label_words\n \n @label_words.setter\n def label_words(self, label_words):\n if label_words is None:\n return\n self._label_words = self._match_label_words_to_label_ids(label_words)\n if not self._in_on_label_words_set:\n self.safe_on_label_words_set()\n # else:\n # logger.warning(\"Reset label words in on_label_words_set function. Is this intended?\")\n\n def _match_label_words_to_label_ids(self, label_words): # TODO newly add function after docs written # TODO rename this function\n \"\"\"\n sort label words dict of verbalizer to match the label order of the classes\n \"\"\"\n if isinstance(label_words, dict):\n if self.classes is None:\n raise ValueError(\"\"\"\n classes attribute of the Verbalizer should be set since your given label words is a dict.\n Since we will match the label word with respect to class A, to A's index in classes\n \"\"\")\n if set(label_words.keys()) != set(self.classes):\n raise ValueError(\"name of classes in verbalizer are differnt from those of dataset\")\n label_words = [ # sort the dict to match dataset\n label_words[c]\n for c in self.classes\n ] # length: label_size of the whole task\n elif isinstance(label_words, list) or isinstance(label_words, tuple):\n pass\n # logger.info(\"\"\"\n # Your given label words is a list, by default, the ith label word in the list will match class i of the dataset.\n # Please make sure that they have the same order.\n # Or you can pass label words as a dict, mapping from class names to label words.\n # \"\"\")\n else:\n raise ValueError(\"Verbalizer label words must be list, tuple or dict\")\n return label_words\n\n def safe_on_label_words_set(self,):\n self._in_on_label_words_set = True\n self.on_label_words_set()\n self._in_on_label_words_set = False\n\n def on_label_words_set(self,):\n r\"\"\"A hook to do something when textual label words were set.\n \"\"\"\n pass\n\n @property\n def vocab(self,) -> Dict:\n if not hasattr(self, '_vocab'):\n self._vocab = self.tokenizer.convert_ids_to_tokens(np.arange(self.vocab_size).tolist())\n return self._vocab\n\n @property\n def vocab_size(self,) -> int:\n return self.tokenizer.vocab_size\n \n @abstractmethod\n def generate_parameters(self, **kwargs) -> List:\n r\"\"\"\n The verbalizer can be seen as an extra layer on top of the originial\n pre-trained models. In manual verbalizer, it is a fixed one-hot vector of dimension\n vocab_size, with the position of the label word being 1 and 0 everywhere else. \n In other situation, the parameters may be a continuous vector over the \n vocab, with each dimension representing a weight of that token.\n Moreover, the parameters may be set to trainable to allow label words selection.\n \n Therefore, this function serves as an abstract methods for generating the parameters\n of the verbalizer, and must be instantiated in any derived class.\n\n Note that the parameters need to be registered as a part of pytorch's module to \n It can be acheived by wrapping a tensor using nn.Parameter().\n \"\"\"\n raise NotImplementedError\n\n def register_calibrate_logits(self, logits: torch.Tensor):\n r\"\"\"\n This function aims to register logits that need to be calibrated, and detach the orginal logits from the current graph.\n See XX for more details.\n \"\"\"\n if logits.requires_grad:\n logits = logits.detach()\n self._calibrate_logits = logits\n \n def process_logits(self,\n logits: torch.Tensor,\n batch: InputFeatures,\n **kwargs):\n r\"\"\"Abstract method for process logits.\n \n Args:\n logits (:obj:`torch.Tensor`): The current logits generated by pre-trained language models.\n batch (:obj:`InputFeatures`): The input features of the data.\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def aggregate(label_words_logits: torch.Tensor) -> torch.Tensor:\n r\"\"\" To aggregate logits on multiple label words into the label's logits\n Basic aggregator: mean of each label words' logits to a label's logits\n Can be re-implemented in advanced verbaliezer.\n\n Args:\n label_words_logits (:obj:`torch.Tensor`): The logits of the label words only.\n\n Return:\n :obj:`torch.Tensor`: The final logits calculated by the label words.\n \"\"\"\n if label_words_logits.dim()>2:\n return label_words_logits.mean(dim=-1)\n else:\n return label_words_logits\n\n\n def normalize(self, logits: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n Given logits regarding the entire vocab, calculate the probs over the label words set by softmax.\n \n Args:\n logits(:obj:`Tensor`): The logits of the entire vocab.\n\n Returns:\n :obj:`Tensor`: The probability distribution over the label words set.\n \"\"\"\n batch_size = logits.shape[0]\n return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)\n\n @abstractmethod\n def project(self,\n logits: torch.Tensor,\n **kwargs) -> torch.Tensor:\n r\"\"\"This method receives input logits of shape (batch_size, vocab_size), and use the \n parameters of this verbalizer to project the logits over entire vocab into the\n logits of labels words. \n\n Args: \n logits (:obj:`Tensor`): The logits over entire vocab generated by the pre-trained lanuage model with shape [``batch_size``, ``max_seq_length``, ``vocab_size``] \n \n Returns:\n :obj:`Tensor`: The normalized probs (sum to 1) of each label .\n \"\"\"\n raise NotImplementedError\n \n def handle_multi_token(self, label_words_logits, mask):\n r\"\"\"\n Support multiple methods to handle the multi tokens produced by the tokenizer.\n We suggest using 'first' or 'max' if the some parts of the tokenization is not meaningful.\n Can broadcast to 3-d tensor.\n \n Args:\n label_words_logits (:obj:`torch.Tensor`):\n \n Returns:\n :obj:`torch.Tensor`\n \"\"\"\n if self.multi_token_handler == \"first\":\n label_words_logits = label_words_logits.select(dim=-1, index=0)\n elif self.multi_token_handler == \"max\":\n label_words_logits = label_words_logits - 1000*(1-mask.unsqueeze(0))\n label_words_logits = label_words_logits.max(dim=-1).values\n elif self.multi_token_handler == \"mean\":\n label_words_logits = (label_words_logits*mask.unsqueeze(0)).sum(dim=-1)/(mask.unsqueeze(0).sum(dim=-1)+1e-15)\n else:\n raise ValueError(\"multi_token_handler {} not configured\".format(self.multi_token_handler))\n return label_words_logits\n \n @classmethod\n def from_config(cls, \n config: CfgNode, \n **kwargs):\n r\"\"\"load a verbalizer from verbalizer's configuration node. \n\n Args:\n config (:obj:`CfgNode`): the sub-configuration of verbalizer, i.e. config[config.verbalizer]\n if config is a global config node. \n kwargs: Other kwargs that might be used in initialize the verbalizer. \n The actual value should match the arguments of __init__ functions.\n \"\"\"\n\n init_args = signature(cls.__init__).args\n _init_dict = {**convert_cfg_to_dict(config), **kwargs}\n init_dict = {key: _init_dict[key] for key in _init_dict if key in init_args}\n verbalizer = cls(**init_dict)\n if hasattr(verbalizer, \"from_file\"):\n if not hasattr(config, \"file_path\"):\n pass\n else:\n if (not hasattr(config, \"label_words\") or config.label_words is None) and config.file_path is not None:\n if config.choice is None:\n config.choice = 0\n verbalizer.from_file(config.file_path, config.choice)\n elif (hasattr(config, \"label_words\") and config.label_words is not None) and config.file_path is not None:\n raise RuntimeError(\"The text can't be both set from `text` and `file_path`.\")\n return verbalizer\n \n def from_file(self,\n path: str, \n choice: Optional[int] = 0 ):\n r\"\"\"Load the predefined label words from verbalizer file.\n Currently support three types of file format:\n 1. a .jsonl or .json file, in which is a single verbalizer \n in dict format.\n 2. a .jsonal or .json file, in which is a list of verbalizers in dict format\n 3. a .txt or a .csv file, in which is the label words of a class are listed in line, \n seperated by commas. Begin a new verbalizer by an empty line.\n This format is recommended when you don't know the name of each class.\n\n The details of verbalizer format can be seen in :ref:`How_to_write_a_verbalizer`. \n\n Args: \n path (:obj:`str`): The path of the local template file.\n choice (:obj:`int`): The choice of verbalizer in a file containing\n multiple verbalizers.\n \n Returns:\n Template : `self` object\n \"\"\"\n if path.endswith(\".txt\") or path.endswith(\".csv\"):\n with open(path, 'r') as f:\n lines = f.readlines()\n label_words_all = []\n label_words_single_group = []\n for line in lines:\n line = line.strip().strip(\" \")\n if line == \"\":\n if len(label_words_single_group)>0:\n label_words_all.append(label_words_single_group)\n label_words_single_group = []\n else:\n label_words_single_group.append(line)\n if len(label_words_single_group) > 0: # if no empty line in the last\n label_words_all.append(label_words_single_group)\n if choice >= len(label_words_all):\n raise RuntimeError(\"choice {} exceed the number of verbalizers {}\"\n .format(choice, len(label_words_all)))\n\n label_words = label_words_all[choice]\n label_words = [label_words_per_label.strip().split(\",\") \\\n for label_words_per_label in label_words]\n \n elif path.endswith(\".jsonl\") or path.endswith(\".json\"):\n with open(path, \"r\") as f:\n label_words_all = json.load(f)\n # if it is a file containing multiple verbalizers\n if isinstance(label_words_all, list):\n if choice >= len(label_words_all):\n raise RuntimeError(\"choice {} exceed the number of verbalizers {}\"\n .format(choice, len(label_words_all)))\n label_words = label_words_all[choice]\n elif isinstance(label_words_all, dict):\n label_words = label_words_all\n if choice>0:\n logger.warning(\"Choice of verbalizer is 1, but the file \\\n only contains one verbalizer.\")\n \n self.label_words = label_words\n if self.num_classes is not None:\n num_classes = len(self.label_words)\n assert num_classes==self.num_classes, 'number of classes in the verbalizer file\\\n does not match the predefined num_classes.'\n return self\n\n"
] | [
[
"torch.nn.Parameter",
"torch.log",
"torch.tensor"
],
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ascend/modelzoo | [
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509",
"f018cfed33dbb1cc2110b9ea2e233333f71cc509"
] | [
"built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1d_for_PyTorch/timm/optim/radam.py",
"built-in/ACL_TensorFlow/Official/recommendation/DCN_for_ACL/scripts/eval.py",
"built-in/ACL_TensorFlow/Official/recommendation/KGAT_for_ACL/Model/offline_inference/xacl_inference.py",
"built-in/PyTorch/Official/cv/image_object_detection/YoloV3_ID1790_for_PyTorch/tools/publish_model.py",
"built-in/PyTorch/Official/cv/image_object_detection/YoloV3_ID1790_for_PyTorch/mmcv_need/distributed.py",
"built-in/PyTorch/Official/cv/scene_text_detection/PSENet_for_PyTorch/NPU/test/test_npu.py",
"contrib/PyTorch/Official/cv/image_classification/SPNASNet_100_for_PyTorch/timm/models/layers/test_time_pool.py",
"built-in/TensorFlow/Official/cv/image_classification/Resnet50v1.5_for_TensorFlow/01-generalization/01-inference/02-offlineInfer/resnet50tf_for_ACL/script/cv2bin.py",
"built-in/TensorFlow/Official/cv/detection/SSD-Resnet34_for_TensorFlow/dataloader.py",
"contrib/TensorFlow/Official/cv/east/EAST_ID0238_for_TensorFlow/npu_train.py",
"built-in/MindSpore/Official/nlp/TinyBERT_for_MindSpore/src/tinybert_model.py",
"built-in/ACL_TensorFlow/Official/cv/YOLOv2_for_ACL/scripts/yolov2_postprocess/script/Main.py",
"built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch_Dynamic_Shape/detectron2/export/caffe2_inference.py",
"built-in/PyTorch/Official/cv/semantic_segmentation/AttU_Net_for_PyTorch/solver.py",
"built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1c_for_PyTorch/timm/optim/adamw.py",
"built-in/PyTorch/Official/cv/semantic_segmentation/DeepLabv3+_ID1695_for_PyTorch/dataloaders/utils.py",
"built-in/TensorFlow/Official/cv/detection/CRNN_for_TensorFlow/local_utils/evaluation_tools.py",
"built-in/PyTorch/Official/cv/image_object_detection/YOLOV4_ID0396_for_PyTorch/train.py",
"built-in/MindSpore/Official/ocr/CRNN_for_MindSpore/infer/sdk/compute_crnn_sdk_opencv.py",
"built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1c_for_PyTorch/timm/data/tf_preprocessing.py",
"built-in/PyTorch/Official/cv/image_classification/DenseNet169_ID0454_for_Pytorch/torchvision/models/detection/mask_rcnn.py",
"built-in/ACL_TensorFlow/Official/cv/3DUNET_for_ACL/scripts/transforms.py",
"built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch_Dynamic_Shape/detectron2/engine/train_loop.py",
"contrib/PyTorch/Official/cv/image_classification/SPNASNet_100_for_PyTorch/pthtar2onnx.py",
"built-in/PyTorch/Official/cv/image_classification/MobileNetV2_for_PyTorch/train/modelarts/train_start.py",
"built-in/TensorFlow/Official/cv/detection/SSD-Resnet34_for_TensorFlow/infer_from_pb.py",
"built-in/TensorFlow/Official/cv/image_classification/MobileNetV2_for_TensorFlow/nets/resnet_v2.py",
"contrib/PyTorch/Official/cv/image_object_detection/RetinaNet/tests/test_model_analysis.py",
"built-in/TensorFlow/Official/cv/image_classification/MobileNetV1_for_TensorFlow/infer_from_pb.py",
"built-in/TensorFlow/Official/nlp/ALBERT-lcqmc-ZH_ID1461_for_TensorFlow/optimization.py",
"built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch_Dynamic_Shape/detectron2/evaluation/fast_eval_api.py",
"built-in/TensorFlow/Official/cv/image_classification/MobileNetV2_for_TensorFlow/datasets/cifar10.py",
"built-in/PyTorch/Official/cv/image_classification/ResNet50_for_PyTorch/DistributedResnet50/image_classification/mixup.py",
"built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch/detectron2/utils/env.py",
"built-in/ACL_PyTorch/Official/cv/Yolov3_for_Pytorch/preprocess_yolov3_pytorch.py",
"built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch_Dynamic_Shape/detectron2/modeling/roi_heads/box_head.py",
"built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/export.py",
"built-in/TensorFlow/Official/cv/image_classification/InceptionV3_ID0491_for_TensorFlow/net/GoogLeNet/InceptionV3.py",
"built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1d_for_PyTorch/timm/models/pnasnet.py",
"built-in/PyTorch/Official/cv/image_object_detection/YoloV3_ID1790_for_PyTorch/mmdet/models/losses/cross_entropy_loss.py",
"contrib/PyTorch/Official/cv/image_classification/WideResNet101_2_for_Pytorch/main_npu_1p.py",
"built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1d_for_PyTorch/timm/data/parsers/parser_tfds.py",
"built-in/TensorFlow/Official/nlp/BertLarge_ID0634_for_TensorFlow2.X/bert/bert_models.py",
"built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1d_for_PyTorch/timm/optim/nvnovograd.py",
"built-in/MindSpore/Official/nlp/TinyBERT_for_MindSpore/src/assessment_method.py",
"built-in/PyTorch/Official/cv/image_object_detection/YoloV3_ID1790_for_PyTorch/mmdet/models/roi_heads/bbox_heads/sabl_head.py",
"built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1c_for_PyTorch/timm/utils/clip_grad.py",
"built-in/PyTorch/Official/cv/image_object_detection/YoloV3_ID1790_for_PyTorch/tests/async_benchmark.py",
"built-in/TensorFlow/Official/cv/image_classification/MobileNetV2_for_TensorFlow/nets/mobilenet_v1_test.py",
"built-in/TensorFlow/Official/cv/image_classification/ResNext50_for_TensorFlow/code/resnext50_train/configs/res50_32bs_1p_host.py",
"built-in/PyTorch/Official/cv/image_object_detection/RetinaNet_for_PyTorch/mmcv_need/dist_utils.py",
"built-in/PyTorch/Official/nlp/Transformer_for_PyTorch/data/data_utils.py",
"built-in/PyTorch/Official/cv/image_object_detection/YoloV3_ID1790_for_PyTorch/mmdet/models/dense_heads/fcos_head.py",
"contrib/PyTorch/Official/cv/image_classification/SPNASNet_100_for_PyTorch/timm/data/transforms_factory.py",
"built-in/PyTorch/Official/cv/image_object_detection/YoloV3_ID1790_for_PyTorch/mmdet/models/losses/ae_loss.py",
"contrib/PyTorch/Official/cv/image_object_detection/RetinaNet/detectron2/modeling/postprocessing.py"
] | [
"# Copyright [yyyy] [name of copyright owner]\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"RAdam Optimizer.\nImplementation lifted from: https://github.com/LiyuanLucasLiu/RAdam\nPaper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265\n\"\"\"\nimport math\nimport torch\nfrom torch.optim.optimizer import Optimizer, required\n\n\nclass RAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n self.buffer = [[None, None, None] for ind in range(10)]\n super(RAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n buffered = self.buffer[int(state['step'] % 10)]\n if state['step'] == buffered[0]:\n N_sma, step_size = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n step_size = group['lr'] * math.sqrt(\n (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n N_sma_max - 2)) / (1 - beta1 ** state['step'])\n else:\n step_size = group['lr'] / (1 - beta1 ** state['step'])\n buffered[2] = step_size\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n else:\n p_data_fp32.add_(-step_size, exp_avg)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n\n\nclass PlainRAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n\n super(PlainRAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(PlainRAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n step_size = group['lr'] * math.sqrt(\n (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n N_sma_max - 2)) / (1 - beta1 ** state['step'])\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n else:\n step_size = group['lr'] / (1 - beta1 ** state['step'])\n p_data_fp32.add_(-step_size, exp_avg)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom sklearn.metrics import average_precision_score, roc_auc_score\nimport numpy as np\nimport sys\n\ndef aucPerformance(mse, labels):\n \"\"\"\n :param mse:\n :param labels:\n :return:\n \"\"\"\n roc_auc = roc_auc_score(labels, mse)\n ap = average_precision_score(labels, mse)\n print(\"AUC-ROC: %.4f, AUC-PR: %.4f\" % (roc_auc, ap))\n return roc_auc, ap\n\n\ndef eval_om(label_dir, om_output_dir):\n \"\"\"\n :param label_dir:\n :param om_output_dir:\n :return:\n \"\"\"\n label, score = read_directory(label_dir, om_output_dir)\n aucPerformance(score, label)\n\n\ndef read_directory(label_dir, om_output_dir):\n \"\"\"\n :param label_dir:\n :param om_output_dir:\n :return:\n \"\"\"\n # get label bin files\n labels = os.listdir(label_dir)\n labels.sort()\n labels_data = list()\n\n # get om output files\n outputs = os.listdir(om_output_dir)\n outputs.sort()\n outputs_data = list()\n\n for i in range(len(outputs)):\n label_data = np.fromfile(os.path.join(label_dir, labels[i]), dtype=np.int32)\n labels_data.extend(label_data)\n output_data = np.fromfile(os.path.join(om_output_dir, outputs[i]), dtype=np.float32)\n outputs_data.extend(output_data)\n return labels_data, outputs_data\n\ngt_dir = sys.argv[1]\npredict_dir = sys.argv[2]\neval_om(gt_dir, predict_dir)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport numpy as np\nimport multiprocessing\nimport sys\nsys.path.append('./')\nfrom utility.batch_test import cores, Ks, BATCH_SIZE, ITEM_NUM, data_generator, test_one_user\nfrom utility.parser import parse_args\n\n\ndef xaclPath(output_path, inference_path, model_path):\n \"\"\"\n 使用文件夹推理\n \"\"\"\n if os.path.isdir(inference_path):\n os.system(\"rm -rf \" + inference_path)\n os.makedirs(inference_path)\n output_path = output_path if output_path[-1] == \"/\" else output_path + \"/\"\n output_path_lst = [output_path + \"input1\", output_path + \"input2\", output_path + \"input3\"]\n output_paths = ','.join(output_path_lst)\n print(\"xacl_fmk -m \" + model_path + \" -i \" + output_paths +\n \" -o \" + inference_path + '/kgat_output_bin')\n os.system(\"xacl_fmk -m \" + model_path + \" -i \" +\n output_paths + \" -o \" + inference_path + '/kgat_output_bin')\n print(inference_path)\n print(\"[INFO] 推理结果生成结束\")\n\n\ndef inference_files(inference_path):\n result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),\n 'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}\n\n pool = multiprocessing.Pool(cores)\n\n u_batch_size = BATCH_SIZE * 2\n\n users_to_test = list(data_generator.test_user_dict.keys())\n test_users = users_to_test\n n_test_users = len(test_users)\n\n files = sorted(os.listdir(inference_path))\n files = [inference_path + '/' + i for i in files]\n\n for u_batch_id, f in enumerate(files):\n if f.endswith(\".bin\"):\n rate_batch = np.fromfile(f, dtype='float32')\n rate_batch = rate_batch.reshape((-1, ITEM_NUM))\n\n start = u_batch_id * u_batch_size\n end = (u_batch_id + 1) * u_batch_size\n\n user_batch = test_users[start: end]\n\n user_batch_rating_uid = zip(rate_batch, user_batch)\n batch_result = pool.map(test_one_user, user_batch_rating_uid)\n\n for re in batch_result:\n result['precision'] += re['precision'] / n_test_users\n result['recall'] += re['recall'] / n_test_users\n result['ndcg'] += re['ndcg'] / n_test_users\n result['hit_ratio'] += re['hit_ratio'] / n_test_users\n result['auc'] += re['auc'] / n_test_users\n pool.close()\n print(result)\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n output_path = args.output_path\n inference_path = args.inference_path\n model_path = args.model_path\n\n xaclPath(output_path, inference_path, model_path)\n inference_files(inference_path)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport subprocess\n\nimport torch\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Process a checkpoint to be published')\n parser.add_argument('in_file', help='input checkpoint filename')\n parser.add_argument('out_file', help='output checkpoint filename')\n args = parser.parse_args()\n return args\n\n\ndef process_checkpoint(in_file, out_file):\n checkpoint = torch.load(in_file, map_location='cpu')\n # remove optimizer for smaller file size\n if 'optimizer' in checkpoint:\n del checkpoint['optimizer']\n # if it is necessary to remove some sensitive data in checkpoint['meta'],\n # add the code here.\n torch.save(checkpoint, out_file)\n sha = subprocess.check_output(['sha256sum', out_file]).decode()\n if out_file.endswith('.pth'):\n out_file_name = out_file[:-4]\n else:\n out_file_name = out_file\n final_file = out_file_name + f'-{sha[:8]}.pth'\n subprocess.Popen(['mv', out_file, final_file])\n\n\ndef main():\n args = parse_args()\n process_checkpoint(args.in_file, args.out_file)\n\n\nif __name__ == '__main__':\n main()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom torch.nn.parallel.distributed import (DistributedDataParallel,\n _find_tensors)\n\nfrom mmcv import print_log\nfrom mmcv.utils import TORCH_VERSION\nfrom .scatter_gather import scatter_kwargs\n\n\nclass MMDistributedDataParallel(DistributedDataParallel):\n \"\"\"The DDP module that supports DataContainer.\n\n MMDDP has two main differences with PyTorch DDP:\n\n - It supports a custom type :class:`DataContainer` which allows more\n flexible control of input data.\n - It implement two APIs ``train_step()`` and ``val_step()``.\n \"\"\"\n\n def scatter(self, inputs, kwargs, device_ids):\n return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)\n\n def train_step(self, *inputs, **kwargs):\n \"\"\"train_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.train_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n \"\"\"\n\n # In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the\n # end of backward to the beginning of forward.\n if (TORCH_VERSION >= '1.7' and 'parrots'\n not in TORCH_VERSION) and self.reducer._rebuild_buckets():\n print_log(\n 'Reducer buckets have been rebuilt in this iteration.',\n logger='mmcv')\n\n if getattr(self, 'require_forward_param_sync', True):\n self._sync_params()\n if self.device_ids and False:\n inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)\n if len(self.device_ids) == 1:\n output = self.module.train_step(*inputs[0], **kwargs[0])\n else:\n outputs = self.parallel_apply(\n self._module_copies[:len(inputs)], inputs, kwargs)\n output = self.gather(outputs, self.output_device)\n else:\n inputs, kwargs = self.scatter(inputs, kwargs, [-1])\n output = self.module.train_step(*inputs[0], **kwargs[0])\n\n if torch.is_grad_enabled() and getattr(\n self, 'require_backward_grad_sync', True):\n if self.find_unused_parameters:\n self.reducer.prepare_for_backward(list(_find_tensors(output)))\n else:\n self.reducer.prepare_for_backward([])\n else:\n if TORCH_VERSION > '1.2':\n self.require_forward_param_sync = False\n return output\n\n def val_step(self, *inputs, **kwargs):\n \"\"\"val_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.val_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n \"\"\"\n # In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the\n # end of backward to the beginning of forward.\n if (TORCH_VERSION >= '1.7' and 'parrots'\n not in TORCH_VERSION) and self.reducer._rebuild_buckets():\n print_log(\n 'Reducer buckets have been rebuilt in this iteration.',\n logger='mmcv')\n\n if getattr(self, 'require_forward_param_sync', True):\n self._sync_params()\n if self.device_ids:\n inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)\n if len(self.device_ids) == 1:\n output = self.module.val_step(*inputs[0], **kwargs[0])\n else:\n outputs = self.parallel_apply(\n self._module_copies[:len(inputs)], inputs, kwargs)\n output = self.gather(outputs, self.output_device)\n else:\n output = self.module.val_step(*inputs, **kwargs)\n\n if torch.is_grad_enabled() and getattr(\n self, 'require_backward_grad_sync', True):\n if self.find_unused_parameters:\n self.reducer.prepare_for_backward(list(_find_tensors(output)))\n else:\n self.reducer.prepare_for_backward([])\n else:\n if TORCH_VERSION > '1.2':\n self.require_forward_param_sync = False\n return output\n",
"# Copyright [yyyy] [name of copyright owner]\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\nimport collections\nimport os\nimport sys\nimport time\n\nimport cv2\nimport numpy as np\nimport torch\nfrom torch.utils import data\n\nimport models\nimport util\nfrom dataset import IC15TestLoader\n# c++ version pse based on opencv 3+\n# from pse import pse\n# python pse\nfrom pypse import pse as pypse\n\n\ndef extend_3c(img):\n img = img.reshape(img.shape[0], img.shape[1], 1)\n img = np.concatenate((img, img, img), axis=2)\n return img\n\n\ndef debug(idx, img_paths, imgs, output_root):\n if not os.path.exists(output_root):\n os.makedirs(output_root)\n\n col = []\n for i in range(len(imgs)):\n row = []\n for j in range(len(imgs[i])):\n # img = cv2.copyMakeBorder(imgs[i][j], 3, 3, 3, 3, cv2.BORDER_CONSTANT, value=[255, 0, 0])\n row.append(imgs[i][j])\n res = np.concatenate(row, axis=1)\n col.append(res)\n res = np.concatenate(col, axis=0)\n img_name = img_paths[idx].split('/')[-1]\n print(idx, '/', len(img_paths), img_name)\n cv2.imwrite(output_root + img_name, res)\n\n\ndef write_result_as_txt(image_name, bboxes, path):\n if not os.path.exists(path):\n os.mkdir(path)\n filename = util.io.join_path(path, 'res_%s.txt' % (image_name))\n lines = []\n for b_idx, bbox in enumerate(bboxes):\n values = [int(v) for v in bbox]\n line = \"%d, %d, %d, %d, %d, %d, %d, %d\\n\" % tuple(values)\n lines.append(line)\n util.io.write_lines(filename, lines)\n\n\ndef polygon_from_points(points):\n \"\"\"\n Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4\n \"\"\"\n resBoxes = np.empty([1, 8], dtype='int32')\n resBoxes[0, 0] = int(points[0])\n resBoxes[0, 4] = int(points[1])\n resBoxes[0, 1] = int(points[2])\n resBoxes[0, 5] = int(points[3])\n resBoxes[0, 2] = int(points[4])\n resBoxes[0, 6] = int(points[5])\n resBoxes[0, 3] = int(points[6])\n resBoxes[0, 7] = int(points[7])\n pointMat = resBoxes[0].reshape([2, 4]).T\n return plg.Polygon(pointMat)\n\n\[email protected]_grad()\ndef test(args):\n data_loader = IC15TestLoader(long_size=args.long_size, datadir=args.data_dir)\n test_loader = torch.utils.data.DataLoader(\n data_loader,\n batch_size=1,\n shuffle=False,\n num_workers=1,\n drop_last=True)\n\n # Setup Model\n if args.arch == \"resnet50\":\n model = models.resnet50(pretrained=True, num_classes=7, scale=args.scale)\n elif args.arch == \"resnet101\":\n model = models.resnet101(pretrained=True, num_classes=7, scale=args.scale)\n elif args.arch == \"resnet152\":\n model = models.resnet152(pretrained=True, num_classes=7, scale=args.scale)\n\n for param in model.parameters():\n param.requires_grad = False\n\n model = model.to(CALCULATE_DEVICE)\n\n if args.resume is not None:\n if os.path.isfile(args.resume):\n print(\"Loading model and optimizer from checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume, map_location=CALCULATE_DEVICE)\n\n # model.load_state_dict(checkpoint['state_dict'])\n d = collections.OrderedDict()\n for key, value in checkpoint['state_dict'].items():\n if key.startswith('module.'):\n tmp = key[7:]\n else:\n tmp = key\n d[tmp] = value\n model.load_state_dict(d)\n\n print(\"Loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n sys.stdout.flush()\n else:\n print(\"No checkpoint found at '{}'\".format(args.resume))\n sys.stdout.flush()\n\n model.eval()\n total_frame = 0.0\n total_time = 0.0\n for idx, (org_img, img) in enumerate(test_loader):\n print('progress: %d / %d' % (idx, len(test_loader)))\n sys.stdout.flush()\n\n img = img.to(CALCULATE_DEVICE)\n org_img = org_img.numpy().astype('uint8')[0]\n text_box = org_img.copy()\n\n torch.npu.synchronize()\n start = time.time()\n outputs = model(img)\n\n score = torch.sigmoid(outputs[:, 0, :, :])\n outputs = (torch.sign(outputs - args.binary_th) + 1) / 2\n\n text = outputs[:, 0, :, :]\n kernels = outputs[:, 0:args.kernel_num, :, :] * text\n\n score = score.data.cpu().numpy()[0].astype(np.float32)\n text = text.data.cpu().numpy()[0].astype(np.uint8)\n kernels = kernels.data.cpu().numpy()[0].astype(np.uint8)\n\n # c++ version pse\n # pred = pse(kernels, args.min_kernel_area / (args.scale * args.scale))\n # python version pse\n pred = pypse(kernels, args.min_kernel_area / (args.scale * args.scale))\n print(pred.shape)\n\n # scale = (org_img.shape[0] * 1.0 / pred.shape[0], org_img.shape[1] * 1.0 / pred.shape[1])\n scale = (org_img.shape[1] * 1.0 / pred.shape[1], org_img.shape[0] * 1.0 / pred.shape[0])\n label = pred\n label_num = np.max(label) + 1\n bboxes = []\n for i in range(1, label_num):\n points = np.array(np.where(label == i)).transpose((1, 0))[:, ::-1]\n\n if points.shape[0] < args.min_area / (args.scale * args.scale):\n continue\n\n score_i = np.mean(score[label == i])\n if score_i < args.min_score:\n continue\n\n rect = cv2.minAreaRect(points)\n bbox = cv2.boxPoints(rect) * scale\n bbox = bbox.astype('int32')\n bboxes.append(bbox.reshape(-1))\n\n torch.npu.synchronize()\n end = time.time()\n total_frame += 1\n total_time += (end - start)\n print('fps: %.2f' % (total_frame / total_time))\n sys.stdout.flush()\n\n for bbox in bboxes:\n cv2.drawContours(text_box, [bbox.reshape(4, 2)], -1, (0, 255, 0), 2)\n\n image_name = data_loader.img_paths[idx].split('/')[-1].split('.')[0]\n write_result_as_txt(image_name, bboxes, 'outputs/' + args.output_file)\n\n # text_box = cv2.resize(text_box, (text.shape[1], text.shape[0]))\n # debug(idx, data_loader.img_paths, [[text_box]], 'outputs/vis_ic15/')\n\n cmd = 'cd %s;zip -j %s %s/*' % ('./outputs/', args.output_file + '.zip', args.output_file)\n print(cmd)\n sys.stdout.flush()\n util.cmd.cmd(cmd)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Hyperparams')\n parser.add_argument('--arch', nargs='?', type=str, default='resnet50')\n parser.add_argument('--resume', nargs='?', type=str, default=None,\n help='Path to previous saved model to restart from')\n parser.add_argument('--binary_th', nargs='?', type=float, default=1.0,\n help='Path to previous saved model to restart from')\n parser.add_argument('--kernel_num', nargs='?', type=int, default=7,\n help='Path to previous saved model to restart from')\n parser.add_argument('--scale', nargs='?', type=int, default=1,\n help='Path to previous saved model to restart from')\n parser.add_argument('--long_size', nargs='?', type=int, default=2240,\n help='Path to previous saved model to restart from')\n parser.add_argument('--min_kernel_area', nargs='?', type=float, default=5.0,\n help='min kernel area')\n parser.add_argument('--min_area', nargs='?', type=float, default=800.0,\n help='min area')\n parser.add_argument('--min_score', nargs='?', type=float, default=0.93,\n help='min score')\n parser.add_argument('--npu', default=None, type=int,\n help='NPU id to use.')\n parser.add_argument('--data_dir', default='./data/ICDAR/Challenge/', type=str)\n parser.add_argument('--output_file', default='submit_ic15', type=str)\n args = parser.parse_args()\n CALCULATE_DEVICE = f\"npu:{args.npu}\"\n PRINT_DEVICE = \"cpu\"\n torch.npu.set_device(CALCULATE_DEVICE)\n test(args)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" Test Time Pooling (Average-Max Pool)\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\n\nimport logging\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom .adaptive_avgmax_pool import adaptive_avgmax_pool2d\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass TestTimePoolHead(nn.Module):\n def __init__(self, base, original_pool=7):\n super(TestTimePoolHead, self).__init__()\n self.base = base\n self.original_pool = original_pool\n base_fc = self.base.get_classifier()\n if isinstance(base_fc, nn.Conv2d):\n self.fc = base_fc\n else:\n self.fc = nn.Conv2d(\n self.base.num_features, self.base.num_classes, kernel_size=1, bias=True)\n self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size()))\n self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size()))\n self.base.reset_classifier(0) # delete original fc layer\n\n def forward(self, x):\n x = self.base.forward_features(x)\n x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1)\n x = self.fc(x)\n x = adaptive_avgmax_pool2d(x, 1)\n return x.view(x.size(0), -1)\n\n\ndef apply_test_time_pool(model, config, use_test_size=True):\n test_time_pool = False\n if not hasattr(model, 'default_cfg') or not model.default_cfg:\n return model, False\n if use_test_size and 'test_input_size' in model.default_cfg:\n df_input_size = model.default_cfg['test_input_size']\n else:\n df_input_size = model.default_cfg['input_size']\n if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]:\n _logger.info('Target input size %s > pretrained default %s, using test time pooling' %\n (str(config['input_size'][-2:]), str(df_input_size[-2:])))\n model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size'])\n test_time_pool = True\n return model, test_time_pool\n",
"import os\nimport sys\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\nimage_root = r'./image-50000'\nresize_min = 256\n\ndef trans():\n images = os.listdir(image_root)\n for image_name in images:\n if image_name.endswith(\"txt\"):\n continue\n # image_name = \"20180522135150.jpg\"\n print(\"the image name is {}....\".format(image_name))\n image_path = os.path.join(image_root, image_name)\n # image = read_image(image_path, 1)\n # img = Image.open(image_path)\n img = cv2.imread(image_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n '''\n img = img.astype(np.float32)\n cv2.normalize(img, img, 0, 1, cv2.NORM_MINMAX)\n '''\n # height, width = img.size\n height = img.shape[1]\n width = img.shape[0]\n # print('=====',height,width)\n smaller_dim = np.minimum(height, width)\n scale_ratio = resize_min / smaller_dim\n new_height = int(height * scale_ratio)\n new_width = int(width * scale_ratio)\n # img = img.resize((new_height, new_width)) ##\n\n img = cv2.resize(img, (new_height, new_width))\n\n img = np.array(img)\n if len(img.shape) != 3:\n continue\n height, width, c = img.shape\n crop_height = crop_width = 224\n amount_to_be_cropped_h = (height - crop_height)\n crop_top = amount_to_be_cropped_h // 2\n amount_to_be_cropped_w = (width - crop_width)\n crop_left = amount_to_be_cropped_w // 2\n img = img[crop_top:crop_top + crop_height, crop_left:crop_left + crop_width] ##[y0:y1,x0:x1]\n\n #img = np.array(img,dtype=np.float32)[np.newaxis, :, :, :]\n # means = [103.939, 116.779,123.68 ]\n means = [123.68, 116.779, 103.939]\n #means = np.array(means, dtype=np.float32)\n img = img - means \n img = np.array(img,dtype=np.float32)[np.newaxis, :, :, :]\n #print('===',img.shape)\n #print('===',img.size)\n\n\n img.tofile('./cv2bin-50000-xzm/{}.bin'.format(image_name))\n # print(image)\n # print(image.dtype)\n # print(image.shape)\ntrans()\n",
"# Copyright 2018 Google. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Data loader and processing.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools as it\nimport math\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom object_detection import argmax_matcher\nfrom object_detection import box_list\nfrom object_detection import faster_rcnn_box_coder\nfrom object_detection import preprocessor\nfrom object_detection import region_similarity_calculator\nfrom object_detection import target_assigner\nfrom object_detection import tf_example_decoder\nimport ssd_constants\n\n\ndef get_rank_size():\n return int(os.environ['RANK_SIZE'])\n\ndef get_rank_id():\n return int(os.environ['DEVICE_ID'])\n\nclass DefaultBoxes(object):\n \"\"\"Default bounding boxes for 300x300 5 layer SSD.\n\n Default bounding boxes generation follows the order of (W, H, anchor_sizes).\n Therefore, the tensor converted from DefaultBoxes has a shape of\n [anchor_sizes, H, W, 4]. The last dimension is the box coordinates; 'ltrb'\n is [ymin, xmin, ymax, xmax] while 'xywh' is [cy, cx, h, w].\n \"\"\"\n\n def __init__(self):\n fk = ssd_constants.IMAGE_SIZE / np.array(ssd_constants.STEPS)\n\n self.default_boxes = []\n # size of feature and number of feature\n for idx, feature_size in enumerate(ssd_constants.FEATURE_SIZES):\n sk1 = ssd_constants.SCALES[idx] / ssd_constants.IMAGE_SIZE\n sk2 = ssd_constants.SCALES[idx+1] / ssd_constants.IMAGE_SIZE\n sk3 = math.sqrt(sk1*sk2)\n all_sizes = [(sk1, sk1), (sk3, sk3)]\n\n for alpha in ssd_constants.ASPECT_RATIOS[idx]:\n w, h = sk1 * math.sqrt(alpha), sk1 / math.sqrt(alpha)\n all_sizes.append((w, h))\n all_sizes.append((h, w))\n\n assert len(all_sizes) == ssd_constants.NUM_DEFAULTS[idx]\n\n for i, j in it.product(range(feature_size), repeat=2):\n for w, h in all_sizes:\n cx, cy = (j + 0.5) / fk[idx], (i + 0.5) / fk[idx]\n box = tuple(np.clip(k, 0, 1) for k in (cy, cx, h, w))\n self.default_boxes.append(box)\n\n assert len(self.default_boxes) == ssd_constants.NUM_SSD_BOXES\n\n def to_ltrb(cy, cx, h, w):\n return cy - h / 2, cx - w / 2, cy + h / 2, cx + w / 2\n\n # For IoU calculation\n self.default_boxes_ltrb = tuple(to_ltrb(*i) for i in self.default_boxes)\n\n def __call__(self, order='ltrb'):\n if order == 'ltrb': return self.default_boxes_ltrb\n if order == 'xywh': return self.default_boxes\n\n\ndef calc_iou_tensor(box1, box2):\n \"\"\" Calculation of IoU based on two boxes tensor,\n Reference to https://github.com/kuangliu/pytorch-ssd\n input:\n box1 (N, 4)\n box2 (M, 4)\n output:\n IoU (N, M)\n \"\"\"\n N = tf.shape(box1)[0]\n M = tf.shape(box2)[0]\n\n be1 = tf.tile(tf.expand_dims(box1, axis=1), (1, M, 1))\n be2 = tf.tile(tf.expand_dims(box2, axis=0), (N, 1, 1))\n\n # Left Top & Right Bottom\n lt = tf.maximum(be1[:,:,:2], be2[:,:,:2])\n\n rb = tf.minimum(be1[:,:,2:], be2[:,:,2:])\n\n delta = tf.maximum(rb - lt, 0)\n\n intersect = delta[:,:,0]*delta[:,:,1]\n\n delta1 = be1[:,:,2:] - be1[:,:,:2]\n area1 = delta1[:,:,0]*delta1[:,:,1]\n delta2 = be2[:,:,2:] - be2[:,:,:2]\n area2 = delta2[:,:,0]*delta2[:,:,1]\n\n iou = intersect/(area1 + area2 - intersect)\n return iou\n\n\ndef ssd_crop(image, boxes, classes):\n \"\"\"IoU biassed random crop.\n\n Reference: https://github.com/chauhan-utk/ssd.DomainAdaptation\n \"\"\"\n\n num_boxes = tf.shape(boxes)[0]\n\n def no_crop_check():\n return (tf.random_uniform(shape=(), minval=0, maxval=1, dtype=tf.float32)\n < ssd_constants.P_NO_CROP_PER_PASS)\n\n def no_crop_proposal():\n return (\n tf.ones((), tf.bool),\n tf.convert_to_tensor([0, 0, 1, 1], dtype=tf.float32),\n tf.ones((num_boxes,), tf.bool),\n )\n\n def crop_proposal():\n rand_vec = lambda minval, maxval: tf.random_uniform(\n shape=(ssd_constants.NUM_CROP_PASSES, 1), minval=minval, maxval=maxval,\n dtype=tf.float32)\n\n width, height = rand_vec(0.3, 1), rand_vec(0.3, 1)\n left, top = rand_vec(0, 1-width), rand_vec(0, 1-height)\n\n right = left + width\n bottom = top + height\n\n ltrb = tf.concat([left, top, right, bottom], axis=1)\n\n min_iou = tf.random_shuffle(ssd_constants.CROP_MIN_IOU_CHOICES)[0]\n ious = calc_iou_tensor(ltrb, boxes)\n\n # discard any bboxes whose center not in the cropped image\n xc, yc = [tf.tile(0.5 * (boxes[:, i + 0] + boxes[:, i + 2])[tf.newaxis, :],\n (ssd_constants.NUM_CROP_PASSES, 1)) for i in range(2)]\n\n masks = tf.reduce_all(tf.stack([\n tf.greater(xc, tf.tile(left, (1, num_boxes))),\n tf.less(xc, tf.tile(right, (1, num_boxes))),\n tf.greater(yc, tf.tile(top, (1, num_boxes))),\n tf.less(yc, tf.tile(bottom, (1, num_boxes))),\n ], axis=2), axis=2)\n\n # Checks of whether a crop is valid.\n valid_aspect = tf.logical_and(tf.less(height/width, 2),\n tf.less(width/height, 2))\n valid_ious = tf.reduce_all(tf.greater(ious, min_iou), axis=1, keepdims=True)\n valid_masks = tf.reduce_any(masks, axis=1, keepdims=True)\n\n valid_all = tf.cast(tf.reduce_all(tf.concat(\n [valid_aspect, valid_ious, valid_masks], axis=1), axis=1), tf.int32)\n\n # One indexed, as zero is needed for the case of no matches.\n index = tf.range(1, 1 + ssd_constants.NUM_CROP_PASSES, dtype=tf.int32)\n\n # Either one-hot, or zeros if there is no valid crop.\n selection = tf.equal(tf.reduce_max(index * valid_all), index)\n\n use_crop = tf.reduce_any(selection)\n output_ltrb = tf.reduce_sum(tf.multiply(ltrb, tf.tile(tf.cast(\n selection, tf.float32)[:, tf.newaxis], (1, 4))), axis=0)\n output_masks = tf.reduce_any(tf.logical_and(masks, tf.tile(\n selection[:, tf.newaxis], (1, num_boxes))), axis=0)\n\n return use_crop, output_ltrb, output_masks\n\n def proposal(*args):\n return tf.cond(\n pred=no_crop_check(),\n true_fn=no_crop_proposal,\n false_fn=crop_proposal,\n )\n\n _, crop_bounds, box_masks = tf.while_loop(\n cond=lambda x, *_: tf.logical_not(x),\n body=proposal,\n loop_vars=[tf.zeros((), tf.bool), tf.zeros((4,), tf.float32), tf.zeros((num_boxes,), tf.bool)],\n )\n\n filtered_boxes = tf.boolean_mask(boxes, box_masks, axis=0)\n\n # Clip boxes to the cropped region.\n filtered_boxes = tf.stack([\n tf.maximum(filtered_boxes[:, 0], crop_bounds[0]),\n tf.maximum(filtered_boxes[:, 1], crop_bounds[1]),\n tf.minimum(filtered_boxes[:, 2], crop_bounds[2]),\n tf.minimum(filtered_boxes[:, 3], crop_bounds[3]),\n ], axis=1)\n\n left = crop_bounds[0]\n top = crop_bounds[1]\n width = crop_bounds[2] - left\n height = crop_bounds[3] - top\n\n cropped_boxes = tf.stack([\n (filtered_boxes[:, 0] - left) / width,\n (filtered_boxes[:, 1] - top) / height,\n (filtered_boxes[:, 2] - left) / width,\n (filtered_boxes[:, 3] - top) / height,\n ], axis=1)\n\n cropped_image = tf.image.crop_and_resize(\n image=image[tf.newaxis, :, :, :],\n boxes=crop_bounds[tf.newaxis, :],\n box_ind=tf.zeros((1,), tf.int32),\n crop_size=(ssd_constants.IMAGE_SIZE, ssd_constants.IMAGE_SIZE),\n )[0, :, :, :]\n\n cropped_classes = tf.boolean_mask(classes, box_masks, axis=0)\n\n return cropped_image, cropped_boxes, cropped_classes\n\n\ndef color_jitter(image, brightness=0, contrast=0, saturation=0, hue=0):\n \"\"\"Distorts the color of the image.\n\n Args:\n image: The input image tensor.\n brightness: A float, specifying the brightness for color jitter.\n contrast: A float, specifying the contrast for color jitter.\n saturation: A float, specifying the saturation for color jitter.\n hue: A float, specifying the hue for color jitter.\n\n Returns:\n The distorted image tensor.\n \"\"\"\n with tf.name_scope('distort_color'):\n if brightness > 0:\n image = tf.image.random_brightness(image, max_delta=brightness)\n if contrast > 0:\n image = tf.image.random_contrast(\n image, lower=1-contrast, upper=1+contrast)\n if saturation > 0:\n image = tf.image.random_saturation(\n image, lower=1-saturation, upper=1+saturation)\n if hue > 0:\n image = tf.image.random_hue(image, max_delta=hue)\n return image\n\n\ndef encode_labels(gt_boxes, gt_labels):\n \"\"\"Labels anchors with ground truth inputs.\n\n Args:\n gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes.\n For each row, it stores [y0, x0, y1, x1] for four corners of a box.\n gt_labels: A integer tensor with shape [N, 1] representing groundtruth\n classes.\n Returns:\n encoded_classes: a tensor with shape [num_anchors, 1].\n encoded_boxes: a tensor with shape [num_anchors, 4].\n num_positives: scalar tensor storing number of positives in an image.\n \"\"\"\n similarity_calc = region_similarity_calculator.IouSimilarity()\n matcher = argmax_matcher.ArgMaxMatcher(\n matched_threshold=ssd_constants.MATCH_THRESHOLD,\n unmatched_threshold=ssd_constants.MATCH_THRESHOLD,\n negatives_lower_than_unmatched=True,\n force_match_for_each_row=True)\n\n box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(\n scale_factors=ssd_constants.BOX_CODER_SCALES)\n\n default_boxes = box_list.BoxList(tf.convert_to_tensor(DefaultBoxes()('ltrb')))\n target_boxes = box_list.BoxList(gt_boxes)\n\n assigner = target_assigner.TargetAssigner(\n similarity_calc, matcher, box_coder)\n\n encoded_classes, _, encoded_boxes, _, matches = assigner.assign(\n default_boxes, target_boxes, gt_labels)\n num_matched_boxes = tf.reduce_sum(\n tf.cast(tf.not_equal(matches.match_results, -1), tf.float32))\n return encoded_classes, encoded_boxes, num_matched_boxes\n\nclass SSDInputReader(object):\n \"\"\"Input reader for dataset.\"\"\"\n\n def __init__(self,\n file_pattern,\n transpose_input=False,\n is_training=False,\n distributed_eval=False,\n count=-1):\n self._file_pattern = file_pattern\n self._transpose_input = transpose_input\n self._is_training = is_training\n self._distributed_eval = distributed_eval\n self._count = count\n\n def __call__(self, params):\n example_decoder = tf_example_decoder.TfExampleDecoder()\n\n def _parse_example(data):\n with tf.name_scope('augmentation'):\n source_id = data['source_id']\n image = data['image'] # dtype uint8\n raw_shape = tf.shape(image)\n boxes = data['groundtruth_boxes']\n classes = tf.reshape(data['groundtruth_classes'], [-1, 1])\n\n # Only 80 of the 90 COCO classes are used.\n class_map = tf.convert_to_tensor(ssd_constants.CLASS_MAP)\n classes = tf.gather(class_map, classes)\n classes = tf.cast(classes, dtype=tf.float32)\n\n if self._is_training:\n image, boxes, classes = ssd_crop(image, boxes, classes)\n # ssd_crop resizes and returns image of dtype float32 and does not\n # change its range (i.e., value in between 0--255). Divide by 255.\n # converts it to [0, 1] range. Not doing this before cropping to\n # avoid dtype cast (which incurs additional memory copy).\n image /= 255.0\n\n # random_horizontal_flip() is hard coded to flip with 50% chance.\n image, boxes = preprocessor.random_horizontal_flip(\n image=image, boxes=boxes)\n\n # TODO(shibow): Investigate the parameters for color jitter.\n image = color_jitter(\n image, brightness=0.125, contrast=0.5, saturation=0.5, hue=0.05)\n\n\n encoded_classes, encoded_boxes, num_matched_boxes = encode_labels(\n boxes, classes)\n\n # TODO(taylorrobie): Check that this cast is valid.\n encoded_classes = tf.cast(encoded_classes, tf.int32)\n\n labels = {\n ssd_constants.NUM_MATCHED_BOXES: num_matched_boxes,\n ssd_constants.BOXES: encoded_boxes,\n ssd_constants.CLASSES: tf.squeeze(encoded_classes, axis=1),\n }\n\n return image, labels\n\n else:\n image = tf.image.resize_images(\n image, size=(ssd_constants.IMAGE_SIZE, ssd_constants.IMAGE_SIZE))\n # resize_image returns image of dtype float32 and does not change its\n # range. Divide by 255 to convert image to [0, 1] range.\n image /= 255.\n\n def trim_and_pad(inp_tensor, dim_1):\n \"\"\"Limit the number of boxes, and pad if necessary.\"\"\"\n inp_tensor = inp_tensor[:ssd_constants.MAX_NUM_EVAL_BOXES]\n num_pad = ssd_constants.MAX_NUM_EVAL_BOXES - tf.shape(inp_tensor)[0]\n inp_tensor = tf.pad(inp_tensor, [[0, num_pad], [0, 0]])\n return tf.reshape(\n inp_tensor, [ssd_constants.MAX_NUM_EVAL_BOXES, dim_1])\n\n boxes, classes = trim_and_pad(boxes, 4), trim_and_pad(classes, 1)\n\n sample = {\n ssd_constants.IMAGE: image,\n ssd_constants.BOXES: boxes,\n ssd_constants.CLASSES: classes,\n ssd_constants.SOURCE_ID: tf.string_to_number(source_id, tf.int32),\n ssd_constants.RAW_SHAPE: raw_shape,\n }\n\n return sample\n\n batch_size = params['batch_size']\n dataset = tf.data.Dataset.list_files(self._file_pattern, shuffle=False)\n\n if self._is_training or self._distributed_eval:\n if get_rank_size() == 1:\n dataset = dataset.shard(1, 0)\n else:\n dataset = dataset.shard(get_rank_size(), get_rank_id())\n if self._is_training:\n dataset = dataset.shuffle( tf.to_int64(256))\n\n # Prefetch data from files.\n def _prefetch_dataset(filename):\n dataset = tf.data.TFRecordDataset(filename).prefetch(1)\n return dataset\n dataset = dataset.apply(\n tf.data.experimental.parallel_interleave(\n _prefetch_dataset, cycle_length=32, sloppy=self._is_training))\n\n # Parse the fetched records to input tensors for model function.\n dataset = dataset.map(example_decoder.decode, num_parallel_calls=256)\n\n if self._is_training:\n dataset = dataset.map(\n # pylint: disable=g-long-lambda\n lambda data: (data,\n tf.greater(tf.shape(data['groundtruth_boxes'])[0], 0)),\n num_parallel_calls=256)\n dataset = dataset.filter(lambda data, pred: pred)\n\n dataset = dataset.shuffle(64).repeat()\n\n dataset = dataset.map(lambda data, pred: data) # use the first value\n dataset = dataset.map(_parse_example, num_parallel_calls=256)\n dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)\n else:\n dataset = dataset.prefetch(batch_size * 256)\n dataset = dataset.map(_parse_example, num_parallel_calls=256)\n dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)\n\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n options = tf.data.Options()\n options.experimental_threading.max_intra_op_parallelism = 1\n options.experimental_threading.private_threadpool_size = 32\n dataset = dataset.with_options(options)\n\n return dataset\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport random\nfrom tensorflow.contrib import slim\n\nfrom npu_bridge.estimator import npu_ops\nfrom tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig\n\ntf.app.flags.DEFINE_integer('input_size', 512, '')\ntf.app.flags.DEFINE_integer('batch_size_per_gpu', 14, '')\ntf.app.flags.DEFINE_integer('num_readers', 16, '')\ntf.app.flags.DEFINE_float('learning_rate', 0.0001, '')\ntf.app.flags.DEFINE_integer('max_steps', 100000, '')\ntf.app.flags.DEFINE_integer('loss_scale', 1024, '')\ntf.app.flags.DEFINE_float('moving_average_decay', 0.997, '')\ntf.app.flags.DEFINE_string('gpu_list', '1', '')\ntf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '')\ntf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint')\ntf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '')\ntf.app.flags.DEFINE_integer('save_summary_steps', 100, '')\ntf.app.flags.DEFINE_string('pretrained_model_path', None, '')\ntf.app.flags.DEFINE_boolean('allow_mix_precision', False, 'whether to allow mix precision')\ntf.app.flags.DEFINE_boolean('auto_tune', False, 'whether to autotune')\ntf.app.flags.DEFINE_boolean('use_processed_data', False, 'whether to use processed data')\ntf.app.flags.DEFINE_string('processed_data', './processed_dataset/', 'where to save preprocessed datasets')\n\nimport model\nimport icdar\n\nFLAGS = tf.app.flags.FLAGS\n\ngpus = list(range(len(FLAGS.gpu_list.split(','))))\n\n\ndef tower_loss(images, score_maps, geo_maps, training_masks, reuse_variables=None):\n # Build inference graph\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):\n f_score, f_geometry = model.model(images, is_training=True)\n\n model_loss = model.loss(score_maps, f_score,\n geo_maps, f_geometry,\n training_masks)\n total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n\n # add summary\n if reuse_variables is None:\n tf.summary.image('input', images)\n tf.summary.image('score_map', score_maps)\n tf.summary.image('score_map_pred', f_score * 255)\n tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1])\n tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1])\n tf.summary.image('training_masks', training_masks)\n tf.summary.scalar('model_loss', model_loss)\n tf.summary.scalar('total_loss', total_loss)\n\n return total_loss, model_loss\n\n\ndef average_gradients(tower_grads):\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n grads = []\n for g, _ in grad_and_vars:\n expanded_g = tf.expand_dims(g, 0)\n grads.append(expanded_g)\n\n grad = tf.concat(grads, 0)\n grad = tf.reduce_mean(grad, 0)\n\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n\n return average_grads\n\nclass MixedPrecisionOptimizer(tf.train.Optimizer):\n \"\"\"An optimizer that updates trainable variables in fp32.\"\"\"\n\n def __init__(self, optimizer,\n scale=None,\n name=\"MixedPrecisionOptimizer\",\n use_locking=False):\n super(MixedPrecisionOptimizer, self).__init__(\n name=name, use_locking=use_locking)\n self._optimizer = optimizer\n self._scale = float(scale) if scale is not None else 1.0\n\n def compute_gradients(self, loss, var_list=None, *args, **kwargs):\n if var_list is None:\n var_list = (\n tf.trainable_variables() +\n tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))\n\n replaced_list = var_list\n\n if self._scale != 1.0:\n loss = tf.scalar_mul(self._scale, loss)\n\n gradvar = self._optimizer.compute_gradients(loss, replaced_list, *args, **kwargs)\n\n final_gradvar = []\n for orig_var, (grad, var) in zip(var_list, gradvar):\n if var is not orig_var:\n grad = tf.cast(grad, orig_var.dtype)\n if self._scale != 1.0:\n grad = tf.scalar_mul(1. / self._scale, grad)\n final_gradvar.append((grad, orig_var))\n\n return final_gradvar\n\n def apply_gradients(self, *args, **kwargs):\n return self._optimizer.apply_gradients(*args, **kwargs)\n\ndef main(argv=None):\n start1 = time.time()\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list\n if not tf.gfile.Exists(FLAGS.checkpoint_path):\n tf.gfile.MkDir(FLAGS.checkpoint_path)\n else:\n if not FLAGS.restore:\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_path)\n tf.gfile.MkDir(FLAGS.checkpoint_path)\n\n input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')\n input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps')\n if FLAGS.geometry == 'RBOX':\n input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 5], name='input_geo_maps')\n else:\n input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 8], name='input_geo_maps')\n input_training_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_training_masks')\n\n global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)\n learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, global_step, decay_steps=10000, decay_rate=0.94, staircase=True)\n # add summary\n tf.summary.scalar('learning_rate', learning_rate)\n opt = tf.train.AdamOptimizer(learning_rate)\n opt = MixedPrecisionOptimizer(opt, scale=FLAGS.loss_scale)\n from npu_bridge.estimator.npu.npu_optimizer import NPUDistributedOptimizer\n opt = NPUDistributedOptimizer(opt)\n # split\n input_images_split = tf.split(input_images, len(gpus))\n input_score_maps_split = tf.split(input_score_maps, len(gpus))\n input_geo_maps_split = tf.split(input_geo_maps, len(gpus))\n input_training_masks_split = tf.split(input_training_masks, len(gpus))\n\n tower_grads = []\n reuse_variables = None\n for i, gpu_id in enumerate(gpus):\n #with tf.device('/gpu:%d' % gpu_id):\n with tf.name_scope('model_%d' % gpu_id) as scope:\n iis = input_images_split[i]\n isms = input_score_maps_split[i]\n igms = input_geo_maps_split[i]\n itms = input_training_masks_split[i]\n total_loss, model_loss = tower_loss(iis, isms, igms, itms, reuse_variables)\n batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))\n reuse_variables = True\n\n grads = opt.compute_gradients(total_loss)\n tower_grads.append(grads)\n\n grads = average_gradients(tower_grads)\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n summary_op = tf.summary.merge_all()\n # save moving average\n variable_averages = tf.train.ExponentialMovingAverage(\n FLAGS.moving_average_decay, global_step)\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n # batch norm updates\n with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]):\n train_op = tf.no_op(name='train_op')\n\n saver = tf.train.Saver(tf.global_variables())\n summary_writer = tf.summary.FileWriter(FLAGS.checkpoint_path, tf.get_default_graph())\n\n init = tf.global_variables_initializer()\n\n if FLAGS.pretrained_model_path is not None:\n variable_restore_op = slim.assign_from_checkpoint_fn(FLAGS.pretrained_model_path, slim.get_trainable_variables(),\n ignore_missing_vars=True)\n\n config = tf.ConfigProto()\n custom_op = config.graph_options.rewrite_options.custom_optimizers.add()\n custom_op.name = \"NpuOptimizer\"\n custom_op.parameter_map[\"use_off_line\"].b = True # 在昇腾AI处理器执行训练\n config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 关闭remap开关\n if FLAGS.allow_mix_precision:\n custom_op.parameter_map[\"precision_mode\"].s = tf.compat.as_bytes(\"allow_mix_precision\")\n if FLAGS.auto_tune:\n custom_op.parameter_map[\"auto_tune_mode\"].s = tf.compat.as_bytes(\"RL,GA\")\n \n with tf.Session(config=config) as sess:\n if FLAGS.restore:\n print('continue training from previous checkpoint')\n ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n saver.restore(sess, ckpt)\n else:\n sess.run(init)\n if FLAGS.pretrained_model_path is not None:\n variable_restore_op(sess)\n\n data_generator = icdar.get_batch(num_workers=FLAGS.num_readers,\n input_size=FLAGS.input_size,\n batch_size=FLAGS.batch_size_per_gpu * len(gpus))\n\n start = time.time()\n avg_time_per_step1 = 0\n performs = []\n for step in range(FLAGS.max_steps):\n if FLAGS.use_processed_data:\n index = random.randint(0,1000-1)\n images = np.fromfile(os.path.join(FLAGS.processed_data,'input_images_{}.bin'.format(index)),dtype='float32').reshape(FLAGS.batch_size_per_gpu,FLAGS.input_size,FLAGS.input_size,3)\n score_maps = np.fromfile(os.path.join(FLAGS.processed_data,'input_score_maps_{}.bin'.format(index)),dtype='float32').reshape(FLAGS.batch_size_per_gpu,128,128,1)\n geo_maps = np.fromfile(os.path.join(FLAGS.processed_data,'input_geo_maps_{}.bin'.format(index)),dtype='float32').reshape(FLAGS.batch_size_per_gpu,128,128,5)\n training_masks = np.fromfile(os.path.join(FLAGS.processed_data, 'input_training_masks_{}.bin'.format(index)),dtype='float32').reshape(FLAGS.batch_size_per_gpu, 128, 128, 1)\n else:\n data = next(data_generator)\n images = data[0]\n score_maps = data[2]\n geo_maps = data[3]\n training_masks = data[4]\n ml, tl, _ = sess.run([model_loss, total_loss, train_op], feed_dict={input_images: images,\n input_score_maps: score_maps,\n input_geo_maps: geo_maps,\n input_training_masks: training_masks})\n if np.isnan(tl):\n print('Loss diverged, stop training')\n break\n\n if step % 10 == 0:\n avg_time_per_step = (time.time() - start)/10\n avg_time_per_step1 += (time.time() - start)/FLAGS.max_steps\n avg_examples_per_second = (10 * FLAGS.batch_size_per_gpu * len(gpus))/(time.time() - start)\n performs.append(float(avg_examples_per_second))\n start = time.time()\n print('Step {:06d}, model loss {:.4f}, total loss {:.4f}, {:.2f} seconds/step, {:.2f} examples/second'.format(\n step, ml, tl, avg_time_per_step, avg_examples_per_second))\n\n if step % FLAGS.save_checkpoint_steps == 0:\n saver.save(sess, FLAGS.checkpoint_path + 'model.ckpt', global_step=global_step)\n\n if step % FLAGS.save_summary_steps == 0:\n _, tl, summary_str = sess.run([train_op, total_loss, summary_op], feed_dict={input_images: images,\n input_score_maps: score_maps,\n input_geo_maps: geo_maps,\n input_training_masks: training_masks})\n summary_writer.add_summary(summary_str, global_step=step)\n print(\"Final Train Accuracy\", tl)\n E2Etime = time.time() - start1\n print(\"E2E Training Duration sec\", E2Etime)\n print(\"avg time per step\", avg_time_per_step1)\n print(\"FPS {:.2f}\".format(sum(performs)/len(performs)))\n\nif __name__ == '__main__':\n tf.app.run()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Bert model.\"\"\"\nimport math\nimport copy\nimport numpy as np\nimport mindspore.common.dtype as mstype\nimport mindspore.nn as nn\nimport mindspore.ops.functional as F\nfrom mindspore.common.initializer import TruncatedNormal, initializer\nfrom mindspore.ops import operations as P\nfrom mindspore.ops import composite as C\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.common.parameter import Parameter\nfrom mindspore import context\n\n\nclass BertConfig:\n \"\"\"\n Configuration for `BertModel`.\n\n Args:\n seq_length (int): Length of input sequence. Default: 128.\n vocab_size (int): The shape of each embedding vector. Default: 32000.\n hidden_size (int): Size of the bert encoder layers. Default: 768.\n num_hidden_layers (int): Number of hidden layers in the BertTransformer encoder\n cell. Default: 12.\n num_attention_heads (int): Number of attention heads in the BertTransformer\n encoder cell. Default: 12.\n intermediate_size (int): Size of intermediate layer in the BertTransformer\n encoder cell. Default: 3072.\n hidden_act (str): Activation function used in the BertTransformer encoder\n cell. Default: \"gelu\".\n hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1.\n attention_probs_dropout_prob (float): The dropout probability for\n BertAttention. Default: 0.1.\n max_position_embeddings (int): Maximum length of sequences used in this\n model. Default: 512.\n type_vocab_size (int): Size of token type vocab. Default: 16.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n use_relative_positions (bool): Specifies whether to use relative positions. Default: False.\n dtype (:class:`mindspore.dtype`): Data type of the input. Default: mstype.float32.\n compute_type (:class:`mindspore.dtype`): Compute type in BertTransformer. Default: mstype.float32.\n \"\"\"\n\n def __init__(self,\n seq_length=128,\n vocab_size=32000,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02,\n use_relative_positions=False,\n dtype=mstype.float32,\n compute_type=mstype.float32):\n self.seq_length = seq_length\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.use_relative_positions = use_relative_positions\n self.dtype = dtype\n self.compute_type = compute_type\n\n\nclass EmbeddingLookup(nn.Cell):\n \"\"\"\n A embeddings lookup table with a fixed dictionary and size.\n\n Args:\n vocab_size (int): Size of the dictionary of embeddings.\n embedding_size (int): The size of each embedding vector.\n embedding_shape (list): [batch_size, seq_length, embedding_size], the shape of\n each embedding vector.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n \"\"\"\n\n def __init__(self,\n vocab_size,\n embedding_size,\n embedding_shape,\n use_one_hot_embeddings=False,\n initializer_range=0.02):\n super(EmbeddingLookup, self).__init__()\n self.vocab_size = vocab_size\n self.use_one_hot_embeddings = use_one_hot_embeddings\n self.embedding_table = Parameter(initializer\n (TruncatedNormal(initializer_range),\n [vocab_size, embedding_size]),\n name='embedding_table')\n self.expand = P.ExpandDims()\n self.shape_flat = (-1,)\n self.gather = P.GatherV2()\n self.one_hot = P.OneHot()\n self.on_value = Tensor(1.0, mstype.float32)\n self.off_value = Tensor(0.0, mstype.float32)\n self.array_mul = P.MatMul()\n self.reshape = P.Reshape()\n self.shape = tuple(embedding_shape)\n\n def construct(self, input_ids):\n \"\"\"embedding lookup\"\"\"\n extended_ids = self.expand(input_ids, -1)\n flat_ids = self.reshape(extended_ids, self.shape_flat)\n if self.use_one_hot_embeddings:\n one_hot_ids = self.one_hot(\n flat_ids,\n self.vocab_size,\n self.on_value,\n self.off_value)\n output_for_reshape = self.array_mul(\n one_hot_ids, self.embedding_table)\n else:\n output_for_reshape = self.gather(self.embedding_table, flat_ids, 0)\n output = self.reshape(output_for_reshape, self.shape)\n return output, self.embedding_table\n\n\nclass EmbeddingPostprocessor(nn.Cell):\n \"\"\"\n Postprocessors apply positional and token type embeddings to word embeddings.\n\n Args:\n embedding_size (int): The size of each embedding vector.\n embedding_shape (list): [batch_size, seq_length, embedding_size], the shape of\n each embedding vector.\n use_token_type (bool): Specifies whether to use token type embeddings. Default: False.\n token_type_vocab_size (int): Size of token type vocab. Default: 16.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n max_position_embeddings (int): Maximum length of sequences used in this\n model. Default: 512.\n dropout_prob (float): The dropout probability. Default: 0.1.\n \"\"\"\n\n def __init__(self,\n use_relative_positions,\n embedding_size,\n embedding_shape,\n use_token_type=False,\n token_type_vocab_size=16,\n use_one_hot_embeddings=False,\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n super(EmbeddingPostprocessor, self).__init__()\n self.use_token_type = use_token_type\n self.token_type_vocab_size = token_type_vocab_size\n self.use_one_hot_embeddings = use_one_hot_embeddings\n self.max_position_embeddings = max_position_embeddings\n self.embedding_table = Parameter(initializer\n (TruncatedNormal(initializer_range),\n [token_type_vocab_size,\n embedding_size]),\n name='embedding_table')\n self.shape_flat = (-1,)\n self.one_hot = P.OneHot()\n self.on_value = Tensor(1.0, mstype.float32)\n self.off_value = Tensor(0.1, mstype.float32)\n self.array_mul = P.MatMul()\n self.reshape = P.Reshape()\n self.shape = tuple(embedding_shape)\n self.layernorm = nn.LayerNorm((embedding_size,))\n self.dropout = nn.Dropout(1 - dropout_prob)\n self.gather = P.GatherV2()\n self.use_relative_positions = use_relative_positions\n self.slice = P.StridedSlice()\n self.full_position_embeddings = Parameter(initializer\n (TruncatedNormal(initializer_range),\n [max_position_embeddings,\n embedding_size]),\n name='full_position_embeddings')\n\n def construct(self, token_type_ids, word_embeddings):\n \"\"\"embedding postprocessor\"\"\"\n output = word_embeddings\n if self.use_token_type:\n flat_ids = self.reshape(token_type_ids, self.shape_flat)\n if self.use_one_hot_embeddings:\n one_hot_ids = self.one_hot(flat_ids,\n self.token_type_vocab_size, self.on_value, self.off_value)\n token_type_embeddings = self.array_mul(one_hot_ids,\n self.embedding_table)\n else:\n token_type_embeddings = self.gather(\n self.embedding_table, flat_ids, 0)\n token_type_embeddings = self.reshape(\n token_type_embeddings, self.shape)\n output += token_type_embeddings\n if not self.use_relative_positions:\n _, seq, width = self.shape\n position_embeddings = self.slice(\n self.full_position_embeddings, (0, 0), (seq, width), (1, 1))\n position_embeddings = self.reshape(\n position_embeddings, (1, seq, width))\n output += position_embeddings\n output = self.layernorm(output)\n output = self.dropout(output)\n return output\n\n\nclass BertOutput(nn.Cell):\n \"\"\"\n Apply a linear computation to hidden status and a residual computation to input.\n\n Args:\n in_channels (int): Input channels.\n out_channels (int): Output channels.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n dropout_prob (float): The dropout probability. Default: 0.1.\n compute_type (:class:`mindspore.dtype`): Compute type in BertTransformer. Default: mstype.float32.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n initializer_range=0.02,\n dropout_prob=0.1,\n compute_type=mstype.float32):\n super(BertOutput, self).__init__()\n self.dense = nn.Dense(in_channels, out_channels,\n weight_init=TruncatedNormal(initializer_range)).to_float(compute_type)\n self.dropout = nn.Dropout(1 - dropout_prob)\n self.add = P.TensorAdd()\n self.is_gpu = context.get_context('device_target') == \"GPU\"\n if self.is_gpu:\n self.layernorm = nn.LayerNorm(\n (out_channels,)).to_float(\n mstype.float32)\n self.compute_type = compute_type\n else:\n self.layernorm = nn.LayerNorm(\n (out_channels,)).to_float(compute_type)\n\n self.cast = P.Cast()\n\n def construct(self, hidden_status, input_tensor):\n \"\"\"bert output\"\"\"\n output = self.dense(hidden_status)\n output = self.dropout(output)\n output = self.add(input_tensor, output)\n output = self.layernorm(output)\n if self.is_gpu:\n output = self.cast(output, self.compute_type)\n return output\n\n\nclass RelaPosMatrixGenerator(nn.Cell):\n \"\"\"\n Generates matrix of relative positions between inputs.\n\n Args:\n length (int): Length of one dim for the matrix to be generated.\n max_relative_position (int): Max value of relative position.\n \"\"\"\n\n def __init__(self, length, max_relative_position):\n super(RelaPosMatrixGenerator, self).__init__()\n self._length = length\n self._max_relative_position = Tensor(\n max_relative_position, dtype=mstype.int32)\n self._min_relative_position = Tensor(\n -max_relative_position, dtype=mstype.int32)\n self.range_length = -length + 1\n self.tile = P.Tile()\n self.range_mat = P.Reshape()\n self.sub = P.Sub()\n self.expanddims = P.ExpandDims()\n self.cast = P.Cast()\n\n def construct(self):\n \"\"\"position matrix generator\"\"\"\n range_vec_row_out = self.cast(\n F.tuple_to_array(\n F.make_range(\n self._length)),\n mstype.int32)\n range_vec_col_out = self.range_mat(\n range_vec_row_out, (self._length, -1))\n tile_row_out = self.tile(range_vec_row_out, (self._length,))\n tile_col_out = self.tile(range_vec_col_out, (1, self._length))\n range_mat_out = self.range_mat(\n tile_row_out, (self._length, self._length))\n transpose_out = self.range_mat(\n tile_col_out, (self._length, self._length))\n distance_mat = self.sub(range_mat_out, transpose_out)\n distance_mat_clipped = C.clip_by_value(distance_mat,\n self._min_relative_position,\n self._max_relative_position)\n # Shift values to be >=0. Each integer still uniquely identifies a\n # relative position difference.\n final_mat = distance_mat_clipped + self._max_relative_position\n return final_mat\n\n\nclass RelaPosEmbeddingsGenerator(nn.Cell):\n \"\"\"\n Generates tensor of size [length, length, depth].\n\n Args:\n length (int): Length of one dim for the matrix to be generated.\n depth (int): Size of each attention head.\n max_relative_position (int): Maxmum value of relative position.\n initializer_range (float): Initialization value of TruncatedNormal.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n \"\"\"\n\n def __init__(self,\n length,\n depth,\n max_relative_position,\n initializer_range,\n use_one_hot_embeddings=False):\n super(RelaPosEmbeddingsGenerator, self).__init__()\n self.depth = depth\n self.vocab_size = max_relative_position * 2 + 1\n self.use_one_hot_embeddings = use_one_hot_embeddings\n self.embeddings_table = Parameter(\n initializer(TruncatedNormal(initializer_range),\n [self.vocab_size, self.depth]),\n name='embeddings_for_position')\n self.relative_positions_matrix = RelaPosMatrixGenerator(length=length,\n max_relative_position=max_relative_position)\n self.reshape = P.Reshape()\n self.one_hot = P.OneHot()\n self.on_value = Tensor(1.0, mstype.float32)\n self.off_value = Tensor(0.0, mstype.float32)\n self.shape = P.Shape()\n self.gather = P.GatherV2() # index_select\n self.matmul = P.BatchMatMul()\n\n def construct(self):\n \"\"\"position embedding generation\"\"\"\n relative_positions_matrix_out = self.relative_positions_matrix()\n # Generate embedding for each relative position of dimension depth.\n if self.use_one_hot_embeddings:\n flat_relative_positions_matrix = self.reshape(\n relative_positions_matrix_out, (-1,))\n one_hot_relative_positions_matrix = self.one_hot(\n flat_relative_positions_matrix, self.vocab_size, self.on_value, self.off_value)\n embeddings = self.matmul(\n one_hot_relative_positions_matrix,\n self.embeddings_table)\n my_shape = self.shape(\n relative_positions_matrix_out) + (self.depth,)\n embeddings = self.reshape(embeddings, my_shape)\n else:\n embeddings = self.gather(self.embeddings_table,\n relative_positions_matrix_out, 0)\n return embeddings\n\n\nclass SaturateCast(nn.Cell):\n \"\"\"\n Performs a safe saturating cast. This operation applies proper clamping before casting to prevent\n the danger that the value will overflow or underflow.\n\n Args:\n src_type (:class:`mindspore.dtype`): The type of the elements of the input tensor. Default: mstype.float32.\n dst_type (:class:`mindspore.dtype`): The type of the elements of the output tensor. Default: mstype.float32.\n \"\"\"\n\n def __init__(self, src_type=mstype.float32, dst_type=mstype.float32):\n super(SaturateCast, self).__init__()\n np_type = mstype.dtype_to_nptype(dst_type)\n min_type = np.finfo(np_type).min\n max_type = np.finfo(np_type).max\n self.tensor_min_type = Tensor([min_type], dtype=src_type)\n self.tensor_max_type = Tensor([max_type], dtype=src_type)\n self.min_op = P.Minimum()\n self.max_op = P.Maximum()\n self.cast = P.Cast()\n self.dst_type = dst_type\n\n def construct(self, x):\n \"\"\"saturate cast\"\"\"\n out = self.max_op(x, self.tensor_min_type)\n out = self.min_op(out, self.tensor_max_type)\n return self.cast(out, self.dst_type)\n\n\nclass BertAttention(nn.Cell):\n \"\"\"\n Apply multi-headed attention from \"from_tensor\" to \"to_tensor\".\n\n Args:\n from_tensor_width (int): Size of last dim of from_tensor.\n to_tensor_width (int): Size of last dim of to_tensor.\n from_seq_length (int): Length of from_tensor sequence.\n to_seq_length (int): Length of to_tensor sequence.\n num_attention_heads (int): Number of attention heads. Default: 1.\n size_per_head (int): Size of each attention head. Default: 512.\n query_act (str): Activation function for the query transform. Default: None.\n key_act (str): Activation function for the key transform. Default: None.\n value_act (str): Activation function for the value transform. Default: None.\n has_attention_mask (bool): Specifies whether to use attention mask. Default: False.\n attention_probs_dropout_prob (float): The dropout probability for\n BertAttention. Default: 0.0.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n do_return_2d_tensor (bool): True for return 2d tensor. False for return 3d\n tensor. Default: False.\n use_relative_positions (bool): Specifies whether to use relative positions. Default: False.\n compute_type (:class:`mindspore.dtype`): Compute type in BertAttention. Default: mstype.float32.\n \"\"\"\n\n def __init__(self,\n from_tensor_width,\n to_tensor_width,\n from_seq_length,\n to_seq_length,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n has_attention_mask=False,\n attention_probs_dropout_prob=0.0,\n use_one_hot_embeddings=False,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n use_relative_positions=False,\n compute_type=mstype.float32):\n super(BertAttention, self).__init__()\n self.from_seq_length = from_seq_length\n self.to_seq_length = to_seq_length\n self.num_attention_heads = num_attention_heads\n self.size_per_head = size_per_head\n self.has_attention_mask = has_attention_mask\n self.use_relative_positions = use_relative_positions\n self.scores_mul = Tensor(\n [1.0 / math.sqrt(float(self.size_per_head))], dtype=compute_type)\n self.reshape = P.Reshape()\n self.shape_from_2d = (-1, from_tensor_width)\n self.shape_to_2d = (-1, to_tensor_width)\n weight = TruncatedNormal(initializer_range)\n units = num_attention_heads * size_per_head\n self.query_layer = nn.Dense(from_tensor_width,\n units,\n activation=query_act,\n weight_init=weight).to_float(compute_type)\n self.key_layer = nn.Dense(to_tensor_width,\n units,\n activation=key_act,\n weight_init=weight).to_float(compute_type)\n self.value_layer = nn.Dense(to_tensor_width,\n units,\n activation=value_act,\n weight_init=weight).to_float(compute_type)\n self.shape_from = (-1,\n from_seq_length,\n num_attention_heads,\n size_per_head)\n self.shape_to = (-1, to_seq_length, num_attention_heads, size_per_head)\n self.matmul_trans_b = P.BatchMatMul(transpose_b=True)\n self.multiply = P.Mul()\n self.transpose = P.Transpose()\n self.trans_shape = (0, 2, 1, 3)\n self.trans_shape_relative = (2, 0, 1, 3)\n self.trans_shape_position = (1, 2, 0, 3)\n self.multiply_data = Tensor([-10000.0, ], dtype=compute_type)\n self.matmul = P.BatchMatMul()\n self.softmax = nn.Softmax()\n self.dropout = nn.Dropout(1 - attention_probs_dropout_prob)\n if self.has_attention_mask:\n self.expand_dims = P.ExpandDims()\n self.sub = P.Sub()\n self.add = P.TensorAdd()\n self.cast = P.Cast()\n self.get_dtype = P.DType()\n if do_return_2d_tensor:\n self.shape_return = (-1, num_attention_heads * size_per_head)\n else:\n self.shape_return = (-1, from_seq_length,\n num_attention_heads * size_per_head)\n self.cast_compute_type = SaturateCast(dst_type=compute_type)\n if self.use_relative_positions:\n self._generate_relative_positions_embeddings = \\\n RelaPosEmbeddingsGenerator(length=to_seq_length,\n depth=size_per_head,\n max_relative_position=16,\n initializer_range=initializer_range,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n def construct(self, from_tensor, to_tensor, attention_mask):\n \"\"\"bert attention\"\"\"\n # reshape 2d/3d input tensors to 2d\n from_tensor_2d = self.reshape(from_tensor, self.shape_from_2d)\n to_tensor_2d = self.reshape(to_tensor, self.shape_to_2d)\n query_out = self.query_layer(from_tensor_2d)\n key_out = self.key_layer(to_tensor_2d)\n value_out = self.value_layer(to_tensor_2d)\n query_layer = self.reshape(query_out, self.shape_from)\n query_layer = self.transpose(query_layer, self.trans_shape)\n key_layer = self.reshape(key_out, self.shape_to)\n key_layer = self.transpose(key_layer, self.trans_shape)\n attention_scores = self.matmul_trans_b(query_layer, key_layer)\n # use_relative_position, supplementary logic\n if self.use_relative_positions:\n # relations_keys is [F|T, F|T, H]\n relations_keys = self._generate_relative_positions_embeddings()\n relations_keys = self.cast_compute_type(relations_keys)\n # query_layer_t is [F, B, N, H]\n query_layer_t = self.transpose(\n query_layer, self.trans_shape_relative)\n # query_layer_r is [F, B * N, H]\n query_layer_r = self.reshape(query_layer_t,\n (self.from_seq_length,\n -1,\n self.size_per_head))\n # key_position_scores is [F, B * N, F|T]\n key_position_scores = self.matmul_trans_b(query_layer_r,\n relations_keys)\n # key_position_scores_r is [F, B, N, F|T]\n key_position_scores_r = self.reshape(key_position_scores,\n (self.from_seq_length,\n -1,\n self.num_attention_heads,\n self.from_seq_length))\n # key_position_scores_r_t is [B, N, F, F|T]\n key_position_scores_r_t = self.transpose(key_position_scores_r,\n self.trans_shape_position)\n attention_scores = attention_scores + key_position_scores_r_t\n attention_scores = self.multiply(self.scores_mul, attention_scores)\n if self.has_attention_mask:\n attention_mask = self.expand_dims(attention_mask, 1)\n multiply_out = self.sub(self.cast(F.tuple_to_array((1.0,)), self.get_dtype(attention_scores)),\n self.cast(attention_mask, self.get_dtype(attention_scores)))\n adder = self.multiply(multiply_out, self.multiply_data)\n attention_scores = self.add(adder, attention_scores)\n attention_probs = self.softmax(attention_scores)\n attention_probs = self.dropout(attention_probs)\n value_layer = self.reshape(value_out, self.shape_to)\n value_layer = self.transpose(value_layer, self.trans_shape)\n context_layer = self.matmul(attention_probs, value_layer)\n # use_relative_position, supplementary logic\n if self.use_relative_positions:\n # relations_values is [F|T, F|T, H]\n relations_values = self._generate_relative_positions_embeddings()\n relations_values = self.cast_compute_type(relations_values)\n # attention_probs_t is [F, B, N, T]\n attention_probs_t = self.transpose(\n attention_probs, self.trans_shape_relative)\n # attention_probs_r is [F, B * N, T]\n attention_probs_r = self.reshape(\n attention_probs_t,\n (self.from_seq_length,\n -1,\n self.to_seq_length))\n # value_position_scores is [F, B * N, H]\n value_position_scores = self.matmul(attention_probs_r,\n relations_values)\n # value_position_scores_r is [F, B, N, H]\n value_position_scores_r = self.reshape(value_position_scores,\n (self.from_seq_length,\n -1,\n self.num_attention_heads,\n self.size_per_head))\n # value_position_scores_r_t is [B, N, F, H]\n value_position_scores_r_t = self.transpose(value_position_scores_r,\n self.trans_shape_position)\n context_layer = context_layer + value_position_scores_r_t\n context_layer = self.transpose(context_layer, self.trans_shape)\n context_layer = self.reshape(context_layer, self.shape_return)\n return context_layer, attention_scores\n\n\nclass BertSelfAttention(nn.Cell):\n \"\"\"\n Apply self-attention.\n\n Args:\n seq_length (int): Length of input sequence.\n hidden_size (int): Size of the bert encoder layers.\n num_attention_heads (int): Number of attention heads. Default: 12.\n attention_probs_dropout_prob (float): The dropout probability for\n BertAttention. Default: 0.1.\n use_one_hot_embeddings (bool): Specifies whether to use one_hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1.\n use_relative_positions (bool): Specifies whether to use relative positions. Default: False.\n compute_type (:class:`mindspore.dtype`): Compute type in BertSelfAttention. Default: mstype.float32.\n \"\"\"\n\n def __init__(self,\n seq_length,\n hidden_size,\n num_attention_heads=12,\n attention_probs_dropout_prob=0.1,\n use_one_hot_embeddings=False,\n initializer_range=0.02,\n hidden_dropout_prob=0.1,\n use_relative_positions=False,\n compute_type=mstype.float32):\n super(BertSelfAttention, self).__init__()\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\"The hidden size (%d) is not a multiple of the number \"\n \"of attention heads (%d)\" % (hidden_size, num_attention_heads))\n self.size_per_head = int(hidden_size / num_attention_heads)\n self.attention = BertAttention(\n from_tensor_width=hidden_size,\n to_tensor_width=hidden_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length,\n num_attention_heads=num_attention_heads,\n size_per_head=self.size_per_head,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=initializer_range,\n use_relative_positions=use_relative_positions,\n has_attention_mask=True,\n do_return_2d_tensor=True,\n compute_type=compute_type)\n self.output = BertOutput(in_channels=hidden_size,\n out_channels=hidden_size,\n initializer_range=initializer_range,\n dropout_prob=hidden_dropout_prob,\n compute_type=compute_type)\n self.reshape = P.Reshape()\n self.shape = (-1, hidden_size)\n\n def construct(self, input_tensor, attention_mask):\n \"\"\"bert self attention\"\"\"\n input_tensor = self.reshape(input_tensor, self.shape)\n attention_output, attention_scores = self.attention(\n input_tensor, input_tensor, attention_mask)\n output = self.output(attention_output, input_tensor)\n return output, attention_scores\n\n\nclass BertEncoderCell(nn.Cell):\n \"\"\"\n Encoder cells used in BertTransformer.\n\n Args:\n hidden_size (int): Size of the bert encoder layers. Default: 768.\n seq_length (int): Length of input sequence. Default: 512.\n num_attention_heads (int): Number of attention heads. Default: 12.\n intermediate_size (int): Size of intermediate layer. Default: 3072.\n attention_probs_dropout_prob (float): The dropout probability for\n BertAttention. Default: 0.02.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1.\n use_relative_positions (bool): Specifies whether to use relative positions. Default: False.\n hidden_act (str): Activation function. Default: \"gelu\".\n compute_type (:class:`mindspore.dtype`): Compute type in attention. Default: mstype.float32.\n \"\"\"\n\n def __init__(self,\n hidden_size=768,\n seq_length=512,\n num_attention_heads=12,\n intermediate_size=3072,\n attention_probs_dropout_prob=0.02,\n use_one_hot_embeddings=False,\n initializer_range=0.02,\n hidden_dropout_prob=0.1,\n use_relative_positions=False,\n hidden_act=\"gelu\",\n compute_type=mstype.float32):\n super(BertEncoderCell, self).__init__()\n self.attention = BertSelfAttention(\n hidden_size=hidden_size,\n seq_length=seq_length,\n num_attention_heads=num_attention_heads,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=initializer_range,\n hidden_dropout_prob=hidden_dropout_prob,\n use_relative_positions=use_relative_positions,\n compute_type=compute_type)\n self.intermediate = nn.Dense(in_channels=hidden_size,\n out_channels=intermediate_size,\n activation=hidden_act,\n weight_init=TruncatedNormal(initializer_range)).to_float(compute_type)\n self.output = BertOutput(in_channels=intermediate_size,\n out_channels=hidden_size,\n initializer_range=initializer_range,\n dropout_prob=hidden_dropout_prob,\n compute_type=compute_type)\n\n def construct(self, hidden_states, attention_mask):\n \"\"\"bert encoder cell\"\"\"\n # self-attention\n attention_output, attention_scores = self.attention(\n hidden_states, attention_mask)\n # feed construct\n intermediate_output = self.intermediate(attention_output)\n # add and normalize\n output = self.output(intermediate_output, attention_output)\n return output, attention_scores\n\n\nclass BertTransformer(nn.Cell):\n \"\"\"\n Multi-layer bert transformer.\n\n Args:\n hidden_size (int): Size of the encoder layers.\n seq_length (int): Length of input sequence.\n num_hidden_layers (int): Number of hidden layers in encoder cells.\n num_attention_heads (int): Number of attention heads in encoder cells. Default: 12.\n intermediate_size (int): Size of intermediate layer in encoder cells. Default: 3072.\n attention_probs_dropout_prob (float): The dropout probability for\n BertAttention. Default: 0.1.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.\n hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1.\n use_relative_positions (bool): Specifies whether to use relative positions. Default: False.\n hidden_act (str): Activation function used in the encoder cells. Default: \"gelu\".\n compute_type (:class:`mindspore.dtype`): Compute type in BertTransformer. Default: mstype.float32.\n return_all_encoders (bool): Specifies whether to return all encoders. Default: False.\n \"\"\"\n\n def __init__(self,\n hidden_size,\n seq_length,\n num_hidden_layers,\n num_attention_heads=12,\n intermediate_size=3072,\n attention_probs_dropout_prob=0.1,\n use_one_hot_embeddings=False,\n initializer_range=0.02,\n hidden_dropout_prob=0.1,\n use_relative_positions=False,\n hidden_act=\"gelu\",\n compute_type=mstype.float32,\n return_all_encoders=False):\n super(BertTransformer, self).__init__()\n self.return_all_encoders = return_all_encoders\n layers = []\n for _ in range(num_hidden_layers):\n layer = BertEncoderCell(hidden_size=hidden_size,\n seq_length=seq_length,\n num_attention_heads=num_attention_heads,\n intermediate_size=intermediate_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=initializer_range,\n hidden_dropout_prob=hidden_dropout_prob,\n use_relative_positions=use_relative_positions,\n hidden_act=hidden_act,\n compute_type=compute_type)\n layers.append(layer)\n self.layers = nn.CellList(layers)\n self.reshape = P.Reshape()\n self.shape = (-1, hidden_size)\n self.out_shape = (-1, seq_length, hidden_size)\n\n def construct(self, input_tensor, attention_mask):\n \"\"\"bert transformer\"\"\"\n prev_output = self.reshape(input_tensor, self.shape)\n all_encoder_layers = ()\n all_encoder_atts = ()\n all_encoder_outputs = ()\n all_encoder_outputs += (prev_output,)\n for layer_module in self.layers:\n layer_output, encoder_att = layer_module(\n prev_output, attention_mask)\n prev_output = layer_output\n if self.return_all_encoders:\n all_encoder_outputs += (layer_output,)\n layer_output = self.reshape(layer_output, self.out_shape)\n all_encoder_layers += (layer_output,)\n all_encoder_atts += (encoder_att,)\n if not self.return_all_encoders:\n prev_output = self.reshape(prev_output, self.out_shape)\n all_encoder_layers += (prev_output,)\n return all_encoder_layers, all_encoder_outputs, all_encoder_atts\n\n\nclass CreateAttentionMaskFromInputMask(nn.Cell):\n \"\"\"\n Create attention mask according to input mask.\n\n Args:\n config (Class): Configuration for BertModel.\n \"\"\"\n\n def __init__(self, config):\n super(CreateAttentionMaskFromInputMask, self).__init__()\n self.input_mask = None\n self.cast = P.Cast()\n self.reshape = P.Reshape()\n self.shape = (-1, 1, config.seq_length)\n\n def construct(self, input_mask):\n attention_mask = self.cast(\n self.reshape(\n input_mask,\n self.shape),\n mstype.float32)\n return attention_mask\n\n\nclass BertModel(nn.Cell):\n \"\"\"\n Bidirectional Encoder Representations from Transformers.\n\n Args:\n config (Class): Configuration for BertModel.\n is_training (bool): True for training mode. False for eval mode.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n \"\"\"\n\n def __init__(self,\n config,\n is_training,\n use_one_hot_embeddings=False):\n super(BertModel, self).__init__()\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n self.seq_length = config.seq_length\n self.hidden_size = config.hidden_size\n self.num_hidden_layers = config.num_hidden_layers\n self.embedding_size = config.hidden_size\n self.token_type_ids = None\n self.last_idx = self.num_hidden_layers - 1\n output_embedding_shape = [-1, self.seq_length,\n self.embedding_size]\n self.bert_embedding_lookup = EmbeddingLookup(\n vocab_size=config.vocab_size,\n embedding_size=self.embedding_size,\n embedding_shape=output_embedding_shape,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=config.initializer_range)\n self.bert_embedding_postprocessor = EmbeddingPostprocessor(\n use_relative_positions=config.use_relative_positions,\n embedding_size=self.embedding_size,\n embedding_shape=output_embedding_shape,\n use_token_type=True,\n token_type_vocab_size=config.type_vocab_size,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=0.02,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n self.bert_encoder = BertTransformer(\n hidden_size=self.hidden_size,\n seq_length=self.seq_length,\n num_attention_heads=config.num_attention_heads,\n num_hidden_layers=self.num_hidden_layers,\n intermediate_size=config.intermediate_size,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=config.initializer_range,\n hidden_dropout_prob=config.hidden_dropout_prob,\n use_relative_positions=config.use_relative_positions,\n hidden_act=config.hidden_act,\n compute_type=config.compute_type,\n return_all_encoders=True)\n self.cast = P.Cast()\n self.dtype = config.dtype\n self.cast_compute_type = SaturateCast(dst_type=config.compute_type)\n self.slice = P.StridedSlice()\n self.squeeze_1 = P.Squeeze(axis=1)\n self.dense = nn.Dense(self.hidden_size, self.hidden_size,\n activation=\"tanh\",\n weight_init=TruncatedNormal(config.initializer_range)).to_float(config.compute_type)\n self._create_attention_mask_from_input_mask = CreateAttentionMaskFromInputMask(\n config)\n\n def construct(self, input_ids, token_type_ids, input_mask):\n \"\"\"bert model\"\"\"\n # embedding\n word_embeddings, embedding_tables = self.bert_embedding_lookup(\n input_ids)\n embedding_output = self.bert_embedding_postprocessor(\n token_type_ids, word_embeddings)\n # attention mask [batch_size, seq_length, seq_length]\n attention_mask = self._create_attention_mask_from_input_mask(\n input_mask)\n # bert encoder\n encoder_output, encoder_layers, layer_atts = self.bert_encoder(self.cast_compute_type(embedding_output),\n attention_mask)\n sequence_output = self.cast(encoder_output[self.last_idx], self.dtype)\n # pooler\n batch_size = P.Shape()(input_ids)[0]\n sequence_slice = self.slice(sequence_output,\n (0, 0, 0),\n (batch_size, 1, self.hidden_size),\n (1, 1, 1))\n first_token = self.squeeze_1(sequence_slice)\n pooled_output = self.dense(first_token)\n pooled_output = self.cast(pooled_output, self.dtype)\n encoder_outputs = ()\n for output in encoder_layers:\n encoder_outputs += (self.cast(output, self.dtype),)\n attention_outputs = ()\n for output in layer_atts:\n attention_outputs += (self.cast(output, self.dtype),)\n return sequence_output, pooled_output, embedding_tables, encoder_outputs, attention_outputs\n\n\nclass TinyBertModel(nn.Cell):\n \"\"\"\n Bidirectional Encoder Representations from Transformers.\n\n Args:\n config (Class): Configuration for BertModel.\n is_training (bool): True for training mode. False for eval mode.\n use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.\n \"\"\"\n\n def __init__(self,\n config,\n is_training,\n use_one_hot_embeddings=False):\n super(TinyBertModel, self).__init__()\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n self.seq_length = config.seq_length\n self.hidden_size = config.hidden_size\n self.num_hidden_layers = config.num_hidden_layers\n self.embedding_size = config.hidden_size\n self.token_type_ids = None\n self.last_idx = self.num_hidden_layers - 1\n output_embedding_shape = [-1, self.seq_length,\n self.embedding_size]\n self.tinybert_embedding_lookup = EmbeddingLookup(\n vocab_size=config.vocab_size,\n embedding_size=self.embedding_size,\n embedding_shape=output_embedding_shape,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=config.initializer_range)\n self.tinybert_embedding_postprocessor = EmbeddingPostprocessor(\n use_relative_positions=config.use_relative_positions,\n embedding_size=self.embedding_size,\n embedding_shape=output_embedding_shape,\n use_token_type=True,\n token_type_vocab_size=config.type_vocab_size,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=0.02,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n self.tinybert_encoder = BertTransformer(\n hidden_size=self.hidden_size,\n seq_length=self.seq_length,\n num_attention_heads=config.num_attention_heads,\n num_hidden_layers=self.num_hidden_layers,\n intermediate_size=config.intermediate_size,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n use_one_hot_embeddings=use_one_hot_embeddings,\n initializer_range=config.initializer_range,\n hidden_dropout_prob=config.hidden_dropout_prob,\n use_relative_positions=config.use_relative_positions,\n hidden_act=config.hidden_act,\n compute_type=config.compute_type,\n return_all_encoders=True)\n self.cast = P.Cast()\n self.dtype = config.dtype\n self.cast_compute_type = SaturateCast(dst_type=config.compute_type)\n self.slice = P.StridedSlice()\n self.squeeze_1 = P.Squeeze(axis=1)\n self.dense = nn.Dense(self.hidden_size, self.hidden_size,\n activation=\"tanh\",\n weight_init=TruncatedNormal(config.initializer_range)).to_float(config.compute_type)\n self._create_attention_mask_from_input_mask = CreateAttentionMaskFromInputMask(\n config)\n\n def construct(self, input_ids, token_type_ids, input_mask):\n \"\"\"tiny bert model\"\"\"\n # embedding\n word_embeddings, embedding_tables = self.tinybert_embedding_lookup(\n input_ids)\n embedding_output = self.tinybert_embedding_postprocessor(token_type_ids,\n word_embeddings)\n # attention mask [batch_size, seq_length, seq_length]\n attention_mask = self._create_attention_mask_from_input_mask(\n input_mask)\n # bert encoder\n encoder_output, encoder_layers, layer_atts = self.tinybert_encoder(self.cast_compute_type(embedding_output),\n attention_mask)\n sequence_output = self.cast(encoder_output[self.last_idx], self.dtype)\n # pooler\n batch_size = P.Shape()(input_ids)[0]\n sequence_slice = self.slice(sequence_output,\n (0, 0, 0),\n (batch_size, 1, self.hidden_size),\n (1, 1, 1))\n first_token = self.squeeze_1(sequence_slice)\n pooled_output = self.dense(first_token)\n pooled_output = self.cast(pooled_output, self.dtype)\n encoder_outputs = ()\n for output in encoder_layers:\n encoder_outputs += (self.cast(output, self.dtype),)\n attention_outputs = ()\n for output in layer_atts:\n attention_outputs += (self.cast(output, self.dtype),)\n return sequence_output, pooled_output, embedding_tables, encoder_outputs, attention_outputs\n\n\nclass BertModelCLS(nn.Cell):\n \"\"\"\n This class is responsible for classification task evaluation,\n i.e. XNLI(num_labels=3), LCQMC(num_labels=2), Chnsenti(num_labels=2).\n The returned output represents the final logits as the results of log_softmax is propotional to that of softmax.\n \"\"\"\n\n def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0,\n use_one_hot_embeddings=False, phase_type=\"teacher\"):\n super(BertModelCLS, self).__init__()\n self.bert = BertModel(config, is_training, use_one_hot_embeddings)\n self.cast = P.Cast()\n self.weight_init = TruncatedNormal(config.initializer_range)\n self.log_softmax = P.LogSoftmax(axis=-1)\n self.dtype = config.dtype\n self.num_labels = num_labels\n self.phase_type = phase_type\n self.dense_1 = nn.Dense(config.hidden_size, self.num_labels, weight_init=self.weight_init,\n has_bias=True).to_float(config.compute_type)\n self.dropout = nn.ReLU()\n\n def construct(self, input_ids, token_type_id, input_mask):\n \"\"\"classification bert model\"\"\"\n _, pooled_output, _, seq_output, att_output = self.bert(\n input_ids, token_type_id, input_mask)\n cls = self.cast(pooled_output, self.dtype)\n cls = self.dropout(cls)\n logits = self.dense_1(cls)\n logits = self.cast(logits, self.dtype)\n log_probs = self.log_softmax(logits)\n return seq_output, att_output, logits, log_probs\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nfrom PIL import Image\n\nfrom model_darknet19 import darknet\nfrom decode import decode\nfrom utils import preprocess_image, postprocess, draw_detection\nfrom config import anchors, class_names\nimport os \n\ndef main():\n labels_to_names={\n 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorbike', 4: 'aeroplane', 5: 'bus', 6: 'train', 7: 'truck',8: 'boat', 9:'traffic light', 10:'fire hydrant', 11:'stop sign',12:'parking meter', 13:'bench', 14:'bird',\n 15:'cat',16: 'dog',17:'horse', 18:'sheep',19:'cow',20:'elephant',21:'bear',22:'zebra',23:'giraffe',24:'backpack',25:'umbrella',26:'bandbag',27:'tie',28:'suitcase',29:'frisbee',30:'skis',31:'snowboard',32:'sports ball',\n 33:'kite',34:'baseball bat',35:'baseball glove',36:'skateboard',37:'surfboard',38:'tennis racket',39:'bottle',40:'wine glass',41:'cup',42:'fork',43:'knife',44:'spoon',45:'bowl',46:'banana',47:'apple',48:'sandwich',\n 49:'orange',50:'broccoli',51:'carrot',52:'hot dog',53:'pizza',54:'donut',55:'cake',56:'chair',57:'couch',58:'pottedplant',59:'bed',60:'diningtable',61:'toilet',62:'tv',63:'laptop',64:'mouse',65:'remote',66:'keyboard',\n 67:'cellphone',68:'microwave',69:'oven',70:'toaster',71:'sink',72:'refrigerator',73:'book',74:'clock',75:'vase',76:'scissors',77:'teddy bear',78:'hair direr',79:'toothbrush'}\n img_dir = \"./data/pascal_voc/VOCdevkit/VOC2007_test/JPEGImages\"\n for filename in os.listdir(img_dir):\n input_size = (416,416)\n \n image = cv2.imread(img_dir + '/' + filename)\n image_shape = image.shape[:2] #只取wh,channel=3不取\n\n # copy、resize416*416、归一化、在第0维增加存放batchsize维度\n image_cp = preprocess_image(image,input_size)\n tf.reset_default_graph() #运行到第2张就报错,需要加上这句,清除默认图形堆栈并充值默认图形\n \n # 【1】输入图片进入darknet19网络得到特征图,并进行解码得到:xmin xmax表示的边界框、置信度、类别概率\n tf_image = tf.placeholder(tf.float32,[1,input_size[0],input_size[1],3])\n model_output = darknet(tf_image) # darknet19网络输出的特征图\n output_sizes = input_size[0]//32, input_size[1]//32 # 特征图尺寸是图片下采样32倍\n output_decoded = decode(model_output=model_output,output_sizes=output_sizes,\n num_class=len(class_names),anchors=anchors) # 解码\n\n model_path = \"./yolov2_model/checkpoint_dir/yolo2_coco.ckpt\"\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess,model_path)\n bboxes,obj_probs,class_probs = sess.run(output_decoded,feed_dict={tf_image:image_cp})\n\n # 【2】筛选解码后的回归边界框——NMS(post process后期处理)\n bboxes,scores,class_max_index = postprocess(bboxes,obj_probs,class_probs,image_shape=image_shape)\n label_path_txt = \"./map_mul/detections_npu/\"\n with open(os.path.join(label_path_txt + filename.split('.')[0] + '.txt'), 'a+') as f:\n for i in range(len(scores)):\n if \" \" in labels_to_names[class_max_index[i]]:\n labels_to_name = labels_to_names[class_max_index[i]].split(' ')[0] + labels_to_names[class_max_index[i]].split(' ')[1]\n f.write(labels_to_name + \" \" + str(scores[i]) + \" \" + str(bboxes[i][0])+ \" \" + str(bboxes[i][1])+ \" \" + str(bboxes[i][2])+ \" \" + str(bboxes[i][3]) + '\\n')\n else:\n f.write(labels_to_names[class_max_index[i]] + \" \" + str(scores[i]) + \" \" + str(bboxes[i][0])+ \" \" + str(bboxes[i][1])+ \" \" + str(bboxes[i][2])+ \" \" + str(bboxes[i][3]) + '\\n')\n \n \n # 【3】绘制筛选后的边界框\n #print('-----',filename)\n #img_detection = draw_detection(image, bboxes, scores, class_max_index, class_names)\n #cv2.imwrite(f\"./VOC2007_jpeg_demo/\" + filename.split('.')[0]+'_' + \"detection.jpg\", img_detection)\n print('YOLO_v2 detection has done!')\n #cv2.imshow(\"detection_results\", img_detection)\n #cv2.waitKey(0)\n\nif __name__ == '__main__':\n main()\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport logging\nimport numpy as np\nimport torch\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.python import core\n\nfrom .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format\nfrom .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type\n\nlogger = logging.getLogger(__name__)\n\n\nclass ProtobufModel(torch.nn.Module):\n \"\"\"\n A class works just like nn.Module in terms of inference, but running\n caffe2 model under the hood. Input/Output are Dict[str, tensor] whose keys\n are in external_input/output.\n \"\"\"\n\n def __init__(self, predict_net, init_net):\n logger.info(\"Initializing ProtobufModel ...\")\n super().__init__()\n assert isinstance(predict_net, caffe2_pb2.NetDef)\n assert isinstance(init_net, caffe2_pb2.NetDef)\n self.ws_name = \"__ws_tmp__\"\n self.net = core.Net(predict_net)\n\n with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws:\n ws.RunNetOnce(init_net)\n for blob in self.net.Proto().external_input:\n if blob not in ws.Blobs():\n ws.CreateBlob(blob)\n ws.CreateNet(self.net)\n\n self._error_msgs = set()\n\n def forward(self, inputs_dict):\n assert all(inp in self.net.Proto().external_input for inp in inputs_dict)\n with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws:\n for b, tensor in inputs_dict.items():\n ws.FeedBlob(b, tensor)\n try:\n ws.RunNet(self.net.Proto().name)\n except RuntimeError as e:\n if not str(e) in self._error_msgs:\n self._error_msgs.add(str(e))\n logger.warning(\"Encountered new RuntimeError: \\n{}\".format(str(e)))\n logger.warning(\"Catch the error and use partial results.\")\n\n outputs_dict = collections.OrderedDict(\n [(b, ws.FetchBlob(b)) for b in self.net.Proto().external_output]\n )\n # Remove outputs of current run, this is necessary in order to\n # prevent fetching the result from previous run if the model fails\n # in the middle.\n for b in self.net.Proto().external_output:\n # Needs to create uninitialized blob to make the net runable.\n # This is \"equivalent\" to: ws.RemoveBlob(b) then ws.CreateBlob(b),\n # but there'no such API.\n ws.FeedBlob(b, \"{}, a C++ native class of type nullptr (uninitialized).\".format(b))\n\n return outputs_dict\n\n\nclass ProtobufDetectionModel(torch.nn.Module):\n \"\"\"\n A class works just like a pytorch meta arch in terms of inference, but running\n caffe2 model under the hood.\n \"\"\"\n\n def __init__(self, predict_net, init_net, *, convert_outputs=None):\n \"\"\"\n Args:\n predict_net, init_net (core.Net): caffe2 nets\n convert_outptus (callable): a function that converts caffe2\n outputs to the same format of the original pytorch model.\n By default, use the one defined in the caffe2 meta_arch.\n \"\"\"\n super().__init__()\n self.protobuf_model = ProtobufModel(predict_net, init_net)\n self.size_divisibility = get_pb_arg_vali(predict_net, \"size_divisibility\", 0)\n self.device = get_pb_arg_vals(predict_net, \"device\", b\"cpu\").decode(\"ascii\")\n\n if convert_outputs is None:\n meta_arch = get_pb_arg_vals(predict_net, \"meta_architecture\", b\"GeneralizedRCNN\")\n meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode(\"ascii\")]\n self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net)\n else:\n self._convert_outputs = convert_outputs\n\n def _infer_output_devices(self, inputs_dict):\n def _get_device_type(torch_tensor):\n assert torch_tensor.device.type in [\"cpu\", \"cuda\"]\n assert torch_tensor.device.index == 0\n return torch_tensor.device.type\n\n predict_net = self.protobuf_model.net.Proto()\n input_device_types = {\n (name, 0): _get_device_type(tensor) for name, tensor in inputs_dict.items()\n }\n device_type_map = infer_device_type(\n predict_net, known_status=input_device_types, device_name_style=\"pytorch\"\n )\n ssa, versions = core.get_ssa(predict_net)\n versioned_outputs = [(name, versions[name]) for name in predict_net.external_output]\n output_devices = [device_type_map[outp] for outp in versioned_outputs]\n return output_devices\n\n def _convert_inputs(self, batched_inputs):\n # currently all models convert inputs in the same way\n data, im_info = convert_batched_inputs_to_c2_format(\n batched_inputs, self.size_divisibility, self.device\n )\n return {\"data\": data, \"im_info\": im_info}\n\n def forward(self, batched_inputs):\n c2_inputs = self._convert_inputs(batched_inputs)\n c2_results = self.protobuf_model(c2_inputs)\n\n if any(t.device.type != \"cpu\" for _, t in c2_inputs.items()):\n output_devices = self._infer_output_devices(c2_inputs)\n else:\n output_devices = [\"cpu\" for _ in self.protobuf_model.net.Proto().external_output]\n\n def _cast_caffe2_blob_to_torch_tensor(blob, device):\n return torch.Tensor(blob).to(device) if isinstance(blob, np.ndarray) else None\n\n c2_results = {\n name: _cast_caffe2_blob_to_torch_tensor(c2_results[name], device)\n for name, device in zip(self.protobuf_model.net.Proto().external_output, output_devices)\n }\n\n return self._convert_outputs(batched_inputs, c2_inputs, c2_results)\n",
"# BSD 3-Clause License\n#\n# Copyright (c) 2017 xxxx\n# All rights reserved.\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ============================================================================\nimport os\nimport numpy as np\nimport time\nimport datetime\nimport torch\nimport torchvision\nfrom torch import optim\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom evaluation import *\nfrom network import U_Net,R2U_Net,AttU_Net,R2AttU_Net\nimport csv\n\ntry:\n import apex\n from apex import amp\nexcept ImportError:\n amp = None\n\nclass Solver(object):\n def __init__(self, config, train_loader, valid_loader, test_loader):\n\n # Data loader\n self.train_loader = train_loader\n self.valid_loader = valid_loader\n self.test_loader = test_loader\n\n # Models\n self.unet = None\n self.optimizer = None\n self.img_ch = config.img_ch\n self.output_ch = config.output_ch\n self.criterion = torch.nn.BCELoss()\n self.augmentation_prob = config.augmentation_prob\n\n # Hyper-parameters\n self.lr = config.lr\n self.beta1 = config.beta1\n self.beta2 = config.beta2\n\n # Training settings\n self.num_epochs = config.num_epochs\n self.num_epochs_decay = config.num_epochs_decay\n self.batch_size = config.batch_size\n self.config = config\n\n # Step size\n self.log_step = config.log_step\n self.val_step = config.val_step\n\n # Path\n self.model_path = config.model_path\n self.result_path = config.result_path\n self.mode = config.mode\n\n self.device = torch.device('npu:'+str(config.npu_idx) if torch.npu.is_available() else 'cpu')\n torch.npu.set_device(self.device)\n self.model_type = config.model_type\n self.t = config.t\n self.build_model()\n\n def build_model(self):\n \"\"\"Build generator and discriminator.\"\"\"\n if self.model_type =='U_Net':\n self.unet = U_Net(img_ch=3,output_ch=1)\n elif self.model_type =='R2U_Net':\n self.unet = R2U_Net(img_ch=3,output_ch=1,t=self.t)\n elif self.model_type =='AttU_Net':\n if self.config.pretrained:\n print(\"=> using pre-trained model '{}'\".format(self.model_type))\n self.unet = AttU_Net(img_ch=3,output_ch=1)\n print(\"Load my train models...\")\n pretrained_dict = \\\n torch.load(self.config.pth_path, map_location=\"cpu\")\n self.unet.load_state_dict(pretrained_dict, strict=False) \n print('%s is Successfully Loaded from %s'%(self.model_type,self.config.pth_path)) \n else:\n print(\"=> creating model '{}'\".format(self.model_type))\n self.unet = AttU_Net(img_ch=3,output_ch=1)\n elif self.model_type == 'R2AttU_Net':\n self.unet = R2AttU_Net(img_ch=3,output_ch=1,t=self.t)\n self.unet.to(self.device)\n self.optimizer = apex.optimizers.NpuFusedAdam(list(self.unet.parameters()), self.lr, [self.beta1, self.beta2])\n\n if self.config.apex:\n self.unet, self.optimizer = amp.initialize(self.unet, self.optimizer,opt_level=self.config.apex_opt_level, loss_scale=self.config.loss_scale_value,combine_grad=True)\n\n if self.config.distributed:\n self.unet = torch.nn.parallel.DistributedDataParallel(self.unet, device_ids=[self.config.npu_idx])\n\n # self.print_network(self.unet, self.model_type)\n\n def print_network(self, model, name):\n \"\"\"Print out the network information.\"\"\"\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n\n if self.config.is_master_node:\n print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))\n\n def to_data(self, x):\n \"\"\"Convert variable to tensor.\"\"\"\n if torch.npu.is_available():\n x = x.cpu()\n return x.data\n\n def update_lr(self, g_lr, d_lr):\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n\n def reset_grad(self):\n \"\"\"Zero the gradient buffers.\"\"\"\n self.unet.zero_grad()\n\n def compute_accuracy(self,SR,GT):\n SR_flat = SR.view(-1)\n GT_flat = GT.view(-1)\n\n acc = GT_flat.data.cpu()==(SR_flat.data.cpu()>0.5)\n\n def tensor2img(self,x):\n img = (x[:,0,:,:]>x[:,1,:,:]).float()\n img = img*255\n return img\n\n\n def train(self):\n \"\"\"Train encoder, generator and discriminator.\"\"\"\n\n #====================================== Training ===========================================#\n #===========================================================================================#\n \n unet_path = os.path.join(self.model_path, '%s-%d-%.4f-%d-%.4f.pkl' %(self.model_type,self.num_epochs,self.lr,self.num_epochs_decay,self.augmentation_prob))\n\n # Train for Encoder\n lr = self.lr\n best_unet_score = 0.\n\n for epoch in range(self.num_epochs):\n if self.config.distributed:\n self.config.train_sampler.set_epoch(epoch)\n\n self.unet.train(True)\n epoch_loss = 0\n\n acc = 0. # Accuracy\n SE = 0. # Sensitivity (Recall)\n SP = 0. # Specificity\n PC = 0. # Precision\n F1 = 0. # F1 Score\n JS = 0. # Jaccard Similarity\n DC = 0. # Dice Coefficient\n length = 0\n start_time = 0\n steps = len(self.train_loader)\n for i, (images, GT) in enumerate(self.train_loader):\n # GT : Ground Truth\n if i == 5:\n start_time = time.time()\n images = images.to(self.device)\n GT = GT.to(self.device)\n\n # SR : Segmentation Result\n SR = self.unet(images)\n SR_probs = F.sigmoid(SR)\n SR_flat = SR_probs.view(SR_probs.size(0),-1)\n\n GT_flat = GT.view(GT.size(0),-1)\n loss = self.criterion(SR_flat,GT_flat)\n epoch_loss += loss.item()\n\n # Backprop + optimize\n self.reset_grad()\n if self.config.apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n self.optimizer.step()\n\n acc += get_accuracy(SR,GT)\n SE += get_sensitivity(SR,GT)\n SP += get_specificity(SR,GT)\n PC += get_precision(SR,GT)\n F1 += get_F1(SR,GT)\n JS += get_JS(SR,GT)\n DC += get_DC(SR,GT)\n length += 1\n if i > 5 and self.config.is_master_node:\n print('Epoch [%d/%d], Step:[%d/%d], Loss: %.4f, [Training] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f' % (\n epoch + 1, self.num_epochs, i, steps, loss, \\\n acc, SE, SP, PC, F1, JS, DC,))\n FPS = self.batch_size * (steps - 5) * self.config.rank_size / (time.time() - start_time)\n acc = acc/length\n SE = SE/length\n SP = SP/length\n PC = PC/length\n F1 = F1/length\n JS = JS/length\n DC = DC/length\n\n # Print the log info\n if self.config.is_master_node:\n print('Epoch [%d/%d], Loss: %.4f, \\n[Training] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f, FPS: %.2f' % (\n epoch+1, self.num_epochs, \\\n epoch_loss,\\\n acc,SE,SP,PC,F1,JS,DC,FPS))\n\n # Decay learning rate\n if (epoch+1) > (self.num_epochs - self.num_epochs_decay):\n lr -= (self.lr / float(self.num_epochs_decay))\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n if self.config.is_master_node:\n print ('Decay learning rate to lr: {}.'.format(lr))\n \n\n #===================================== Validation ====================================#\n self.unet.train(False)\n self.unet.eval()\n\n acc = 0. # Accuracy\n SE = 0. # Sensitivity (Recall)\n SP = 0. # Specificity\n PC = 0. # Precision\n F1 = 0. # F1 Score\n JS = 0. # Jaccard Similarity\n DC = 0. # Dice Coefficient\n length=0\n for i, (images, GT) in enumerate(self.valid_loader):\n\n images = images.to(self.device)\n GT = GT.to(self.device)\n SR = F.sigmoid(self.unet(images))\n acc += get_accuracy(SR,GT)\n SE += get_sensitivity(SR,GT)\n SP += get_specificity(SR,GT)\n PC += get_precision(SR,GT)\n F1 += get_F1(SR,GT)\n JS += get_JS(SR,GT)\n DC += get_DC(SR,GT)\n \n length += 1\n \n acc = acc/length\n SE = SE/length\n SP = SP/length\n PC = PC/length\n F1 = F1/length\n JS = JS/length\n DC = DC/length\n unet_score = acc\n if self.config.is_master_node:\n print('[Validation] Acc: %.4f, SE: %.4f, SP: %.4f, PC: %.4f, F1: %.4f, JS: %.4f, DC: %.4f'%(acc,SE,SP,PC,F1,JS,DC))\n \n '''\n torchvision.utils.save_image(images.data.cpu(),\n os.path.join(self.result_path,\n '%s_valid_%d_image.png'%(self.model_type,epoch+1)))\n torchvision.utils.save_image(SR.data.cpu(),\n os.path.join(self.result_path,\n '%s_valid_%d_SR.png'%(self.model_type,epoch+1)))\n torchvision.utils.save_image(GT.data.cpu(),\n os.path.join(self.result_path,\n '%s_valid_%d_GT.png'%(self.model_type,epoch+1)))\n '''\n\n\n # Save Best U-Net model\n if unet_score > best_unet_score:\n best_unet_score = unet_score\n best_epoch = epoch\n best_unet = self.unet.state_dict()\n if self.config.is_master_node:\n print('Best %s model score : %.4f'%(self.model_type,best_unet_score))\n torch.save(best_unet,unet_path)\n \n #===================================== Test ====================================#\n del self.unet\n del best_unet\n self.build_model()\n if not self.config.distributed:\n self.unet.load_state_dict(torch.load(unet_path))\n\n self.unet.train(False)\n self.unet.eval()\n\n acc = 0. # Accuracy\n SE = 0. # Sensitivity (Recall)\n SP = 0. # Specificity\n PC = 0. # Precision\n F1 = 0. # F1 Score\n JS = 0. # Jaccard Similarity\n DC = 0. # Dice Coefficient\n length=0\n for i, (images, GT) in enumerate(self.valid_loader):\n\n images = images.to(self.device)\n GT = GT.to(self.device)\n SR = F.sigmoid(self.unet(images))\n acc += get_accuracy(SR,GT)\n SE += get_sensitivity(SR,GT)\n SP += get_specificity(SR,GT)\n PC += get_precision(SR,GT)\n F1 += get_F1(SR,GT)\n JS += get_JS(SR,GT)\n DC += get_DC(SR,GT)\n\n length += images.size(0)\n\n acc = acc/length\n SE = SE/length\n SP = SP/length\n PC = PC/length\n F1 = F1/length\n JS = JS/length\n DC = DC/length\n unet_score = acc\n print(\"Test finished, acc: %.3f \" % (acc))\n \n\n \n",
"# Copyright 2019 Ross Wightman\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" AdamW Optimizer\nImpl copied from PyTorch master\n\"\"\"\nimport math\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\nclass AdamW(Optimizer):\n r\"\"\"Implements AdamW algorithm.\n\n The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.\n The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay coefficient (default: 1e-2)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _Decoupled Weight Decay Regularization:\n https://arxiv.org/abs/1711.05101\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=1e-2, amsgrad=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, amsgrad=amsgrad)\n super(AdamW, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(AdamW, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsgrad', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n\n # Perform stepweight decay\n p.data.mul_(1 - group['lr'] * group['weight_decay'])\n\n # Perform optimization step\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n if amsgrad:\n max_exp_avg_sq = state['max_exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\n else:\n denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])\n\n step_size = group['lr'] / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n return loss\n",
"# BSD 3-Clause License\n#\n# Copyright (c) 2017 xxxx\n# All rights reserved.\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ============================================================================\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\n\ndef decode_seg_map_sequence(label_masks, dataset='pascal'):\n rgb_masks = []\n for label_mask in label_masks:\n rgb_mask = decode_segmap(label_mask, dataset)\n rgb_masks.append(rgb_mask)\n rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2]))\n return rgb_masks\n\n\ndef decode_segmap(label_mask, dataset, plot=False):\n \"\"\"Decode segmentation class labels into a color image\n Args:\n label_mask (np.ndarray): an (M,N) array of integer values denoting\n the class label at each spatial location.\n plot (bool, optional): whether to show the resulting color image\n in a figure.\n Returns:\n (np.ndarray, optional): the resulting decoded color image.\n \"\"\"\n if dataset == 'pascal' or dataset == 'coco':\n n_classes = 21\n label_colours = get_pascal_labels()\n elif dataset == 'cityscapes':\n n_classes = 19\n label_colours = get_cityscapes_labels()\n else:\n raise NotImplementedError\n\n r = label_mask.copy()\n g = label_mask.copy()\n b = label_mask.copy()\n for ll in range(0, n_classes):\n r[label_mask == ll] = label_colours[ll, 0]\n g[label_mask == ll] = label_colours[ll, 1]\n b[label_mask == ll] = label_colours[ll, 2]\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n if plot:\n plt.imshow(rgb)\n plt.show()\n else:\n return rgb\n\n\ndef encode_segmap(mask):\n \"\"\"Encode segmentation label images as pascal classes\n Args:\n mask (np.ndarray): raw segmentation label image of dimension\n (M, N, 3), in which the Pascal classes are encoded as colours.\n Returns:\n (np.ndarray): class map with dimensions (M,N), where the value at\n a given location is the integer denoting the class index.\n \"\"\"\n mask = mask.astype(int)\n label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)\n for ii, label in enumerate(get_pascal_labels()):\n label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii\n label_mask = label_mask.astype(int)\n return label_mask\n\n\ndef get_cityscapes_labels():\n return np.array([\n [128, 64, 128],\n [244, 35, 232],\n [70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [0, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [0, 0, 142],\n [0, 0, 70],\n [0, 60, 100],\n [0, 80, 100],\n [0, 0, 230],\n [119, 11, 32]])\n\n\ndef get_pascal_labels():\n \"\"\"Load the mapping that associates pascal classes with label colors\n Returns:\n np.ndarray with dimensions (21, 3)\n \"\"\"\n return np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],\n [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],\n [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],\n [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],\n [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],\n [0, 64, 128]])",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# @Time : 19-3-12 涓嬪崍9:03\n# @Author : MaybeShewill-CV\n# @Site : https://github.com/MaybeShewill-CV/CRNN_Tensorflow\n# @File : evaluation_tools.py\n# @IDE: PyCharm\n\"\"\"\nSome evaluation tools\n\"\"\"\nimport itertools\n\nimport numpy as np\nimport glog as log\n#import matplotlib.pyplot as plt\n\n\nSYNTH90K_CLASS_NAMES = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',\n 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',\n 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', ' ']\n\n\ndef compute_accuracy(ground_truth, predictions, display=False, mode='per_char'):\n \"\"\"\n Computes accuracy\n :param ground_truth:\n :param predictions:\n :param display: Whether to print values to stdout\n :param mode: if 'per_char' is selected then\n single_label_accuracy = correct_predicted_char_nums_of_single_sample / single_label_char_nums\n avg_label_accuracy = sum(single_label_accuracy) / label_nums\n if 'full_sequence' is selected then\n single_label_accuracy = 1 if the prediction result is exactly the same as label else 0\n avg_label_accuracy = sum(single_label_accuracy) / label_nums\n :return: avg_label_accuracy\n \"\"\"\n if mode == 'per_char':\n\n accuracy = []\n\n for index, label in enumerate(ground_truth):\n prediction = predictions[index]\n total_count = len(label)\n correct_count = 0\n try:\n for i, tmp in enumerate(label):\n if tmp == prediction[i]:\n correct_count += 1\n except IndexError:\n continue\n finally:\n try:\n accuracy.append(correct_count / total_count)\n except ZeroDivisionError:\n if len(prediction) == 0:\n accuracy.append(1)\n else:\n accuracy.append(0)\n avg_accuracy = np.mean(np.array(accuracy).astype(np.float32), axis=0)\n elif mode == 'full_sequence':\n try:\n correct_count = 0\n for index, label in enumerate(ground_truth):\n prediction = predictions[index]\n if prediction == label:\n correct_count += 1\n avg_accuracy = correct_count / len(ground_truth)\n except ZeroDivisionError:\n if not predictions:\n avg_accuracy = 1\n else:\n avg_accuracy = 0\n else:\n raise NotImplementedError('Other accuracy compute mode has not been implemented')\n\n if display:\n print('Mean accuracy is {:5f}'.format(avg_accuracy))\n\n return avg_accuracy\n\n\n#def plot_confusion_matrix(cm, classes=SYNTH90K_CLASS_NAMES,\n# normalize=False,\n# title='Confusion matrix',\n# cmap=plt.cm.Blues):\n# \"\"\"\n# This function prints and plots the confusion matrix.\n# Normalization can be applied by setting `normalize=True`.\n# \"\"\"\n# if normalize:\n# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n# log.info(\"Normalized confusion matrix\")\n# else:\n# log.info('Confusion matrix, without normalization')\n#\n# print(cm)\n#\n# plt.imshow(cm, interpolation='nearest', cmap=cmap)\n# plt.title(title)\n# plt.colorbar()\n# tick_marks = np.arange(len(classes))\n# plt.xticks(tick_marks, classes, rotation=45)\n# plt.yticks(tick_marks, classes)\n#\n# fmt = '.2f' if normalize else 'd'\n# thresh = cm.max() / 2.\n# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n# plt.text(j, i, format(cm[i, j], fmt),\n# horizontalalignment=\"center\",\n# color=\"white\" if cm[i, j] > thresh else \"black\")\n#\n# plt.ylabel('True label')\n# plt.xlabel('Predicted label')\n# plt.tight_layout()\n\n\ndef print_cm(cm, labels=SYNTH90K_CLASS_NAMES, hide_zeroes=False,\n hide_diagonal=False, hide_threshold=None):\n \"\"\"\n pretty print for confusion matrixes\n :param cm:\n :param labels:\n :param hide_zeroes:\n :param hide_diagonal:\n :param hide_threshold:\n :return:\n \"\"\"\n columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length\n empty_cell = \" \" * columnwidth\n\n # Begin CHANGES\n fst_empty_cell = (columnwidth - 3) // 2 * \" \" + \"t/p\" + (columnwidth - 3) // 2 * \" \"\n\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = \" \" * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell\n # Print header\n print(\" \" + fst_empty_cell, end=\" \")\n # End CHANGES\n\n for label in labels:\n print(\"%{0}s\".format(columnwidth) % label, end=\" \")\n\n print()\n # Print rows\n for i, label1 in enumerate(labels):\n print(\" %{0}s\".format(columnwidth) % label1, end=\" \")\n for j in range(len(labels)):\n cell = \"%{0}.1f\".format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n print(cell, end=\" \")\n print()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport argparse\nimport math\nimport os\nimport random\nimport time\nfrom pathlib import Path\n\nimport numpy as np\nimport torch.distributed as dist\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nimport torch.utils.data\nimport yaml\nimport apex\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\nfrom apex import amp\n\nimport test # import test.py to get mAP after each epoch\nfrom models.models import *\nfrom utils.datasets import create_dataloader\nfrom utils.general import (\n check_img_size, torch_distributed_zero_first, labels_to_class_weights, plot_labels, check_anchors,\n labels_to_image_weights, compute_loss, plot_images, fitness, strip_optimizer, plot_results,\n get_latest_run, check_git_status, check_file, increment_dir, print_mutation, plot_evolution)\nfrom utils.google_utils import attempt_download\nfrom utils.torch_utils import init_seeds, ModelEMA, select_device, intersect_dicts\n\n\ndef set_seed_everything(seed):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef train(hyp, opt, device, tb_writer=None):\n print(f'Hyperparameters {hyp}')\n log_dir = Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / 'evolve' # logging directory\n wdir = str(log_dir / 'weights') + os.sep # weights directory\n os.makedirs(wdir, exist_ok=True)\n last = wdir + 'last.pt'\n best = wdir + 'best.pt'\n results_file = str(log_dir / 'results.txt')\n epochs, batch_size, total_batch_size, weights, rank = \\\n opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank\n\n # TODO: Use DDP logging. Only the first process is allowed to log.\n # Save run settings\n with open(log_dir / 'hyp.yaml', 'w') as f:\n yaml.dump(hyp, f, sort_keys=False)\n with open(log_dir / 'opt.yaml', 'w') as f:\n yaml.dump(vars(opt), f, sort_keys=False)\n\n # Configure\n npu = device.type != 'cpu'\n # init_seeds(2 + rank)\n set_seed_everything(1234)\n with open(opt.data) as f:\n data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict\n train_path = data_dict['train']\n test_path = data_dict['val']\n nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names\n assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check\n\n # Model\n pretrained = weights.endswith('.pt')\n if pretrained:\n with torch_distributed_zero_first(rank):\n attempt_download(weights) # download if not found locally\n ckpt = torch.load(weights, map_location=device) # load checkpoint\n model = Darknet(opt.cfg).to(device) # create\n state_dict = {k: v for k, v in ckpt['model'].items() if model.state_dict()[k].numel() == v.numel()}\n model.load_state_dict(state_dict, strict=False)\n print('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report\n else:\n model = Darknet(opt.cfg).to(device) # create\n print('')\n # Optimizer\n nbs = 64 # nominal batch size\n accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing\n hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay\n\n pg0, pg1, pg2 = [], [], [] # optimizer parameter groups\n for k, v in dict(model.named_parameters()).items():\n if '.bias' in k:\n pg2.append(v) # biases\n elif 'Conv2d.weight' in k:\n pg1.append(v) # apply weight_decay\n else:\n pg0.append(v) # all else\n\n if opt.adam:\n optimizer = apex.optimizers.NpuFusedAdam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum\n else:\n optimizer = apex.optimizers.NpuFusedSGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)\n\n optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay\n optimizer.add_param_group({'params': pg2}) # add pg2 (biases)\n print('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))\n del pg0, pg1, pg2\n\n if opt.amp:\n model, optimizer = amp.initialize(model, optimizer, opt_level=opt.opt_level,\n loss_scale=opt.loss_scale, combine_grad=True)\n # Scheduler https://arxiv.org/pdf/1812.01187.pdf\n # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR\n lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2 # cosine\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)\n # plot_lr_scheduler(optimizer, scheduler, epochs)\n\n # Resume\n start_epoch, best_fitness = 0, 0.0\n if pretrained:\n # Optimizer\n if ckpt['optimizer'] is not None:\n optimizer.load_state_dict(ckpt['optimizer'])\n best_fitness = ckpt['best_fitness']\n\n # Results\n if ckpt.get('training_results') is not None:\n with open(results_file, 'w') as file:\n file.write(ckpt['training_results']) # write results.txt\n\n # Epochs\n start_epoch = ckpt['epoch'] + 1\n if epochs < start_epoch:\n print('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %\n (weights, ckpt['epoch'], epochs))\n epochs += ckpt['epoch'] # finetune additional epochs\n\n del ckpt, state_dict\n \n # Image sizes\n gs = 32 # grid size (max stride)\n imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples\n\n # SyncBatchNorm\n if opt.sync_bn and npu and rank != -1:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)\n print('Using SyncBatchNorm()')\n\n # Exponential moving average\n ema = ModelEMA(model) if rank in [-1, 0] else None\n\n # DDP mode\n if npu and rank != -1:\n model = DDP(model, device_ids=[opt.local_rank], broadcast_buffers=False)\n\n # Trainloader\n dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True,\n cache=opt.cache_images, rect=opt.rect, local_rank=rank,\n world_size=opt.world_size)\n mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class\n nb = len(dataloader) # number of batches\n assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)\n\n # Testloader\n if rank in [-1, 0]:\n ema.updates = start_epoch * nb // accumulate # set EMA updates ***\n # local_rank is set to -1. Because only the first process is expected to do evaluation.\n testloader = create_dataloader(test_path, imgsz_test, batch_size, imgsz_test + 32, opt, hyp=hyp, augment=False,\n cache=opt.cache_images, pad=0.0, rect=True, local_rank=-1,\n world_size=opt.world_size)[0]\n\n # Model parameters\n hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset\n model.nc = nc # attach number of classes to model\n model.hyp = hyp # attach hyperparameters to model\n model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)\n model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights\n model.names = names\n\n # Class frequency\n if rank in [-1, 0]:\n labels = np.concatenate(dataset.labels, 0)\n c = torch.tensor(labels[:, 0]) # classes\n # cf = torch.bincount(c.long(), minlength=nc) + 1.\n # model._initialize_biases(cf.to(device))\n plot_labels(labels, save_dir=log_dir)\n if tb_writer:\n tb_writer.add_histogram('classes', c, 0)\n\n # Check anchors\n #if not opt.noautoanchor:\n # check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)\n\n # Start training\n t0 = time.time()\n nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)\n # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training\n maps = np.zeros(nc) # mAP per class\n results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'\n scheduler.last_epoch = start_epoch - 1 # do not move\n\n if rank in [0, -1]:\n print('Image sizes %g train, %g test' % (imgsz, imgsz_test))\n print('Using %g dataloader workers' % dataloader.num_workers)\n print('Starting training for %g epochs...' % epochs)\n # torch.autograd.set_detect_anomaly(True)\n for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------\n model.train()\n\n # Update image weights (optional)\n if dataset.image_weights:\n # Generate indices\n if rank in [-1, 0]:\n w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights\n image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)\n dataset.indices = random.choices(range(dataset.n), weights=image_weights,\n k=dataset.n) # rand weighted idx\n # Broadcast if DDP\n if rank != -1:\n indices = torch.zeros([dataset.n], dtype=torch.int)\n if rank == 0:\n indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)\n dist.broadcast(indices, 0)\n if rank != 0:\n dataset.indices = indices.cpu().numpy()\n\n # Update mosaic border\n # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)\n # dataset.mosaic_border = [b - imgsz, -b] # height, width borders\n\n mloss = torch.zeros(4, device=device) # mean losses\n if rank != -1:\n dataloader.sampler.set_epoch(epoch)\n pbar = enumerate(dataloader)\n if rank in [-1, 0]:\n print(('\\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))\n pbar = tqdm(pbar, total=nb) # progress bar\n optimizer.zero_grad()\n start_time = time.time()\n for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------\n ni = i + nb * epoch # number integrated batches (since train start)\n imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0\n\n # Warmup\n if ni <= nw:\n xi = [0, nw] # x interp\n # model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)\n accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())\n for j, x in enumerate(optimizer.param_groups):\n # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0\n x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])\n if 'momentum' in x:\n x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']])\n\n # Multi-scale\n if opt.multi_scale:\n sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size\n sf = sz / max(imgs.shape[2:]) # scale factor\n if sf != 1:\n ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)\n imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)\n\n nt =targets.shape[0]\n batch_size = imgs.shape[0]\n nt_max = 32 * batch_size\n while nt > nt_max:\n nt_max *= 2\n print('targets len larger than nt_max, schedule to more bigger =', nt_max)\n pad_size = nt_max - nt\n pad_target = torch.nn.functional.pad(targets, [0, 0, 0, pad_size])\n pred = model(imgs)\n\n # Loss\n loss, loss_items = compute_loss(pred, pad_target.to(device), model) # scaled by batch_size\n if rank != -1:\n loss *= opt.world_size # gradient averaged between devices in DDP mode\n # if not torch.isfinite(loss):\n # print('WARNING: non-finite loss, ending training ', loss_items)\n # return results\n\n # Backward\n if opt.amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n # Optimize\n if ni % accumulate == 0:\n optimizer.step() # optimizer.step\n optimizer.zero_grad()\n if ema is not None:\n # x = torch.tensor([1.]).to(device)\n params_fp32_fused = optimizer.get_model_combined_params()\n ema.update(model, 'npu', params_fp32_fused[0])\n\n # Print\n if rank in [-1, 0]:\n mloss = (mloss * i + loss_items) / (i + 1) # update mean losses\n mem = '%.3gG' % (torch.npu.memory_reserved() / 1E9 if torch.npu.is_available() else 0) # (GB)\n s = ('%10s' * 2 + '%10.4g' * 6) % (\n '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])\n pbar.set_description(s)\n\n # Plot\n if ni < 3:\n f = str(log_dir / ('train_batch%g.jpg' % ni)) # filename\n result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)\n if tb_writer and result is not None:\n tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)\n # tb_writer.add_graph(model, imgs) # add model to tensorboard\n\n # end batch ------------------------------------------------------------------------------------------------\n if rank in [-1, 0]:\n epoch_time = time.time() - start_time\n print('Training speed is {} FPS'.format(total_batch_size * len(pbar) / (epoch_time)))\n # Scheduler\n scheduler.step()\n\n # DDP process 0 or single-GPU\n if rank in [-1, 0]:\n # mAP\n if ema is not None:\n ema.update_attr(model)\n final_epoch = epoch + 1 == epochs\n if not opt.notest: # Calculate mAP\n results, maps, times = test.test(opt.data,\n batch_size=batch_size,\n imgsz=imgsz_test,\n save_json=final_epoch and opt.data.endswith(os.sep + 'coco.yaml'),\n model=ema.ema.module if hasattr(ema.ema, 'module') else ema.ema,\n single_cls=opt.single_cls,\n dataloader=testloader,\n save_dir=log_dir)\n\n # Write\n with open(results_file, 'a') as f:\n f.write(s + '%10.4g' * 7 % results + '\\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)\n if len(opt.name) and opt.bucket:\n os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))\n\n # Tensorboard\n if tb_writer:\n tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss',\n 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',\n 'val/giou_loss', 'val/obj_loss', 'val/cls_loss']\n for x, tag in zip(list(mloss[:-1]) + list(results), tags):\n tb_writer.add_scalar(tag, x, epoch)\n\n # Update best mAP\n fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]\n if fi > best_fitness:\n best_fitness = fi\n\n # Save model\n save = (not opt.nosave) or (final_epoch and not opt.evolve)\n if save:\n with open(results_file, 'r') as f: # create checkpoint\n ckpt = {'epoch': epoch,\n 'best_fitness': best_fitness,\n 'training_results': f.read(),\n 'model': ema.ema.module.state_dict() if hasattr(ema, 'module') else ema.ema.state_dict(),\n 'optimizer': None if final_epoch else optimizer.state_dict()}\n\n # Save last, best and delete\n torch.save(ckpt, last)\n if epoch >= (epochs-5):\n torch.save(ckpt, last.replace('.pt','_{:03d}.pt'.format(epoch)))\n if (best_fitness == fi) and not final_epoch:\n torch.save(ckpt, best)\n del ckpt\n # end epoch ----------------------------------------------------------------------------------------------------\n # end training\n\n if rank in [-1, 0]:\n # Strip optimizers\n n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name\n fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n\n for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]):\n if os.path.exists(f1):\n os.rename(f1, f2) # rename\n ispt = f2.endswith('.pt') # is *.pt\n strip_optimizer(f2) if ispt else None # strip optimizer\n os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None # upload\n # Finish\n if not opt.evolve:\n plot_results(save_dir=log_dir) # save as results.png\n print('%g epochs completed in %.3f hours.\\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))\n\n dist.destroy_process_group() if rank not in [-1, 0] else None\n torch.npu.empty_cache()\n return results\n\ndef main(opt):\n # Resume\n if opt.resume:\n last = get_latest_run() if opt.resume == 'get_last' else opt.resume # resume from most recent run\n if last and not opt.weights:\n print(f'Resuming training from {last}')\n opt.weights = last if opt.resume and not opt.weights else opt.weights\n if opt.local_rank == -1 or (\"RANK\" in os.environ and os.environ[\"RANK\"] == \"0\"):\n check_git_status()\n\n opt.hyp = opt.hyp or ('data/hyp.scratch.yaml')\n opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files\n assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'\n\n opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)\n # device = select_device(opt.device, batch_size=opt.batch_size)\n loc = 'npu:{}'.format(opt.device_id)\n device = torch.device('cpu') if opt.device_id == 'cpu' else torch.device(loc)\n opt.total_batch_size = opt.batch_size\n opt.world_size = 1\n opt.global_rank = -1\n\n # DDP mode\n if opt.local_rank != -1:\n assert torch.npu.device_count() > opt.local_rank\n torch.npu.set_device(opt.local_rank)\n device = torch.device('npu', opt.local_rank)\n dist.init_process_group(backend='nccl', init_method='env://') # distributed backend\n opt.world_size = dist.get_world_size()\n opt.global_rank = dist.get_rank()\n assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'\n opt.batch_size = opt.total_batch_size // opt.world_size\n else:\n torch.npu.set_device(loc)\n print(opt)\n with open(opt.hyp) as f:\n hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps\n\n # Train\n if not opt.evolve:\n tb_writer = None\n if opt.global_rank in [-1, 0]:\n print('Start Tensorboard with \"tensorboard --logdir %s\", view at http://localhost:6006/' % opt.logdir)\n tb_writer = SummaryWriter(log_dir=increment_dir(Path(opt.logdir) / 'exp', opt.name)) # runs/exp\n\n train(hyp, opt, device, tb_writer)\n\n # Evolve hyperparameters (optional)\n else:\n # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)\n meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)\n 'momentum': (0.1, 0.6, 0.98), # SGD momentum/Adam beta1\n 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay\n 'giou': (1, 0.02, 0.2), # GIoU loss gain\n 'cls': (1, 0.2, 4.0), # cls loss gain\n 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight\n 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)\n 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight\n 'iou_t': (0, 0.1, 0.7), # IoU training threshold\n 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold\n 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)\n 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)\n 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)\n 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)\n 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)\n 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)\n 'scale': (1, 0.0, 0.9), # image scale (+/- gain)\n 'shear': (1, 0.0, 10.0), # image shear (+/- deg)\n 'perspective': (1, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001\n 'flipud': (0, 0.0, 1.0), # image flip up-down (probability)\n 'fliplr': (1, 0.0, 1.0), # image flip left-right (probability)\n 'mixup': (1, 0.0, 1.0)} # image mixup (probability)\n\n assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'\n opt.notest, opt.nosave = True, True # only test/save final epoch\n # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices\n yaml_file = Path('runs/evolve/hyp_evolved.yaml') # save best result here\n if opt.bucket:\n os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists\n\n for _ in range(100): # generations to evolve\n if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate\n # Select parent(s)\n parent = 'single' # parent selection method: 'single' or 'weighted'\n x = np.loadtxt('evolve.txt', ndmin=2)\n n = min(5, len(x)) # number of previous results to consider\n x = x[np.argsort(-fitness(x))][:n] # top n mutations\n w = fitness(x) - fitness(x).min() # weights\n if parent == 'single' or len(x) == 1:\n # x = x[random.randint(0, n - 1)] # random selection\n x = x[random.choices(range(n), weights=w)[0]] # weighted selection\n elif parent == 'weighted':\n x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination\n\n # Mutate\n mp, s = 0.9, 0.2 # mutation probability, sigma\n npr = np.random\n npr.seed(int(time.time()))\n g = np.array([x[0] for x in meta.values()]) # gains 0-1\n ng = len(meta)\n v = np.ones(ng)\n while all(v == 1): # mutate until a change occurs (prevent duplicates)\n v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)\n for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)\n hyp[k] = float(x[i + 7] * v[i]) # mutate\n\n # Constrain to limits\n for k, v in meta.items():\n hyp[k] = max(hyp[k], v[1]) # lower limit\n hyp[k] = min(hyp[k], v[2]) # upper limit\n hyp[k] = round(hyp[k], 5) # significant digits\n\n # Train mutation\n results = train(hyp.copy(), opt, device)\n\n # Write mutation results\n print_mutation(hyp.copy(), results, yaml_file, opt.bucket)\n\n # Plot results\n plot_evolution(yaml_file)\n print('Hyperparameter evolution complete. Best results saved as: %s\\nCommand to train a new model with these '\n 'hyperparameters: $ python train.py --hyp %s' % (yaml_file, yaml_file))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights', type=str, default='yolov4.pt', help='initial weights path')\n parser.add_argument('--cfg', type=str, default='', help='model.yaml path')\n parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')\n parser.add_argument('--hyp', type=str, default='', help='hyperparameters path, i.e. data/hyp.scratch.yaml')\n parser.add_argument('--epochs', type=int, default=300)\n parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')\n parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='train,test sizes')\n parser.add_argument('--rect', action='store_true', help='rectangular training')\n parser.add_argument('--resume', nargs='?', const='get_last', default=False,\n help='resume from given path/last.pt, or most recent run if blank')\n parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')\n parser.add_argument('--notest', action='store_true', help='only test final epoch')\n parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')\n parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')\n parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')\n parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')\n parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')\n parser.add_argument('--device_id', default='', help='npu device, i.e. 0 or 0,1,2,3 or cpu')\n parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')\n parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')\n parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')\n parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')\n parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')\n parser.add_argument('--logdir', type=str, default='runs/', help='logging directory')\n parser.add_argument('--amp', default=False, action='store_true',\n help='use amp to train the model')\n parser.add_argument('--loss_scale', default='dynamic',\n help='loss scale using in amp, default means dynamic loss scale')\n parser.add_argument('--opt-level', default='O1', type=str,\n help='loss scale using in amp, default O1')\n opt = parser.parse_args()\n main(opt)",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport json\nimport argparse\nimport numpy as np\n\n\ndef read_label_and_pred(npu_data_path):\n \"\"\"\n read label and predict\n :param npu_data_path:\n :return:\n \"\"\"\n label_list = []\n npu_data_list = []\n all_file = os.listdir(npu_data_path)\n all_file.sort()\n for result_file in all_file:\n if result_file.endswith(\".json\"):\n label = result_file.split(\"_\")[-1].split(\".\")[0].lower()\n label_list.append(label)\n result_path = npu_data_path + \"/\" + result_file\n with open(result_path, \"r\") as f:\n load_dict = json.load(f)\n npu_str = load_dict[\"MxpiTextsInfo\"][0][\"text\"][0]\n npu_data_list.append(npu_str)\n return label_list, npu_data_list\n\n\ndef compute_per_char(ground_truth, predictions):\n \"\"\"\n compute per char accuracy\n :param ground_truth:\n :param predictions:\n :return:\n \"\"\"\n accuracy = []\n for index, label in enumerate(ground_truth):\n prediction = predictions[index]\n total_count = len(label)\n correct_count = 0\n try:\n for i, tmp in enumerate(label):\n if tmp == prediction[i]:\n correct_count += 1\n except IndexError:\n continue\n finally:\n try:\n accuracy.append(correct_count / total_count)\n except ZeroDivisionError:\n if len(prediction) == 0:\n accuracy.append(1)\n else:\n accuracy.append(0)\n avg_accuracy = np.mean(np.array(accuracy).astype(np.float32), axis=0)\n print('PerChar Precision is {:5f}'.format(avg_accuracy))\n return avg_accuracy\n\n\ndef compute_full_sequence(ground_truth, predictions):\n \"\"\"\n compute full sequence accuracy\n :param ground_truth:\n :param predictions:\n :return:\n \"\"\"\n try:\n correct_count = 0\n mistake_count = [0] * 7\n for index, label in enumerate(ground_truth):\n prediction = predictions[index]\n if prediction == label:\n correct_count += 1\n else:\n mistake_count[int(index/100)] += 1\n avg_accuracy = correct_count / len(ground_truth)\n print(\"correct num: \" + str(correct_count))\n print(\"total count: \" + str(len(ground_truth)))\n print(\"mistake count: \" + str(mistake_count))\n except ZeroDivisionError:\n if not predictions:\n avg_accuracy = 1\n else:\n avg_accuracy = 0\n print('Full Sequence Precision is {:5f}'.format(avg_accuracy))\n return avg_accuracy\n\n\ndef compute_accuracy(ground_truth, predictions, mode='per_char'):\n \"\"\"\n Computes accuracy\n :param ground_truth:\n :param predictions:\n :param mode:\n :return: avg_label_accuracy\n \"\"\"\n if mode == \"per_char\":\n avg_accuracy = compute_per_char(ground_truth, predictions)\n elif mode == 'full_sequence':\n avg_accuracy = compute_full_sequence(ground_truth, predictions)\n else:\n raise NotImplementedError(\n 'Other accuracy compute model has not been implemented')\n\n return avg_accuracy\n\n\ndef main(args):\n \"\"\"\n main function\n :param args:\n :return:\n \"\"\"\n gt_data_list, npu_data_list = read_label_and_pred(args.npu_data_path)\n compute_accuracy(gt_data_list, npu_data_list, mode=\"per_char\")\n compute_accuracy(gt_data_list, npu_data_list, mode=\"full_sequence\")\n\n\ndef parse_args():\n \"\"\"\n parse args\n :return:\n \"\"\"\n parse = argparse.ArgumentParser()\n parse.add_argument('--npu_data_path', type=str,\n default='./npu_crnn_sdk_opencv/')\n return parse.parse_args()\n\n\nif __name__ == '__main__':\n main(parse_args())\n",
"# Copyright 2019 Ross Wightman\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Tensorflow Preprocessing Adapter\n\nAllows use of Tensorflow preprocessing pipeline in PyTorch Transform\n\nCopyright of original Tensorflow code below.\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\n\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"ImageNet preprocessing for MnasNet.\"\"\"\nimport tensorflow as tf\nimport numpy as np\n\nIMAGE_SIZE = 224\nCROP_PADDING = 32\n\n\ndef distorted_bounding_box_crop(image_bytes,\n bbox,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0),\n max_attempts=100,\n scope=None):\n \"\"\"Generates cropped_image using one of the bboxes randomly distorted.\n\n See `tf.image.sample_distorted_bounding_box` for more documentation.\n\n Args:\n image_bytes: `Tensor` of binary image data.\n bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`\n where each coordinate is [0, 1) and the coordinates are arranged\n as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole\n image.\n min_object_covered: An optional `float`. Defaults to `0.1`. The cropped\n area of the image must contain at least this fraction of any bounding\n box supplied.\n aspect_ratio_range: An optional list of `float`s. The cropped area of the\n image must have an aspect ratio = width / height within this range.\n area_range: An optional list of `float`s. The cropped area of the image\n must contain a fraction of the supplied image within in this range.\n max_attempts: An optional `int`. Number of attempts at generating a cropped\n region of the image of the specified constraints. After `max_attempts`\n failures, return the entire image.\n scope: Optional `str` for name scope.\n Returns:\n cropped image `Tensor`\n \"\"\"\n with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):\n shape = tf.image.extract_jpeg_shape(image_bytes)\n sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n shape,\n bounding_boxes=bbox,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n bbox_begin, bbox_size, _ = sample_distorted_bounding_box\n\n # Crop the image to the specified bounding box.\n offset_y, offset_x, _ = tf.unstack(bbox_begin)\n target_height, target_width, _ = tf.unstack(bbox_size)\n crop_window = tf.stack([offset_y, offset_x, target_height, target_width])\n image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)\n\n return image\n\n\ndef _at_least_x_are_equal(a, b, x):\n \"\"\"At least `x` of `a` and `b` `Tensors` are equal.\"\"\"\n match = tf.equal(a, b)\n match = tf.cast(match, tf.int32)\n return tf.greater_equal(tf.reduce_sum(match), x)\n\n\ndef _decode_and_random_crop(image_bytes, image_size, resize_method):\n \"\"\"Make a random crop of image_size.\"\"\"\n bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])\n image = distorted_bounding_box_crop(\n image_bytes,\n bbox,\n min_object_covered=0.1,\n aspect_ratio_range=(3. / 4, 4. / 3.),\n area_range=(0.08, 1.0),\n max_attempts=10,\n scope=None)\n original_shape = tf.image.extract_jpeg_shape(image_bytes)\n bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)\n\n image = tf.cond(\n bad,\n lambda: _decode_and_center_crop(image_bytes, image_size),\n lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0])\n\n return image\n\n\ndef _decode_and_center_crop(image_bytes, image_size, resize_method):\n \"\"\"Crops to center of image with padding then scales image_size.\"\"\"\n shape = tf.image.extract_jpeg_shape(image_bytes)\n image_height = shape[0]\n image_width = shape[1]\n\n padded_center_crop_size = tf.cast(\n ((image_size / (image_size + CROP_PADDING)) *\n tf.cast(tf.minimum(image_height, image_width), tf.float32)),\n tf.int32)\n\n offset_height = ((image_height - padded_center_crop_size) + 1) // 2\n offset_width = ((image_width - padded_center_crop_size) + 1) // 2\n crop_window = tf.stack([offset_height, offset_width,\n padded_center_crop_size, padded_center_crop_size])\n image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)\n image = tf.image.resize([image], [image_size, image_size], resize_method)[0]\n\n return image\n\n\ndef _flip(image):\n \"\"\"Random horizontal image flip.\"\"\"\n image = tf.image.random_flip_left_right(image)\n return image\n\n\ndef preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):\n \"\"\"Preprocesses the given image for evaluation.\n\n Args:\n image_bytes: `Tensor` representing an image binary of arbitrary size.\n use_bfloat16: `bool` for whether to use bfloat16.\n image_size: image size.\n interpolation: image interpolation method\n\n Returns:\n A preprocessed image `Tensor`.\n \"\"\"\n resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR\n image = _decode_and_random_crop(image_bytes, image_size, resize_method)\n image = _flip(image)\n image = tf.reshape(image, [image_size, image_size, 3])\n image = tf.image.convert_image_dtype(\n image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)\n return image\n\n\ndef preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'):\n \"\"\"Preprocesses the given image for evaluation.\n\n Args:\n image_bytes: `Tensor` representing an image binary of arbitrary size.\n use_bfloat16: `bool` for whether to use bfloat16.\n image_size: image size.\n interpolation: image interpolation method\n\n Returns:\n A preprocessed image `Tensor`.\n \"\"\"\n resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR\n image = _decode_and_center_crop(image_bytes, image_size, resize_method)\n image = tf.reshape(image, [image_size, image_size, 3])\n image = tf.image.convert_image_dtype(\n image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)\n return image\n\n\ndef preprocess_image(image_bytes,\n is_training=False,\n use_bfloat16=False,\n image_size=IMAGE_SIZE,\n interpolation='bicubic'):\n \"\"\"Preprocesses the given image.\n\n Args:\n image_bytes: `Tensor` representing an image binary of arbitrary size.\n is_training: `bool` for whether the preprocessing is for training.\n use_bfloat16: `bool` for whether to use bfloat16.\n image_size: image size.\n interpolation: image interpolation method\n\n Returns:\n A preprocessed image `Tensor` with value range of [0, 255].\n \"\"\"\n if is_training:\n return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation)\n else:\n return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation)\n\n\nclass TfPreprocessTransform:\n\n def __init__(self, is_training=False, size=224, interpolation='bicubic'):\n self.is_training = is_training\n self.size = size[0] if isinstance(size, tuple) else size\n self.interpolation = interpolation\n self._image_bytes = None\n self.process_image = self._build_tf_graph()\n self.sess = None\n\n def _build_tf_graph(self):\n with tf.device('/cpu:0'):\n self._image_bytes = tf.placeholder(\n shape=[],\n dtype=tf.string,\n )\n img = preprocess_image(\n self._image_bytes, self.is_training, False, self.size, self.interpolation)\n return img\n\n def __call__(self, image_bytes):\n if self.sess is None:\n self.sess = tf.Session()\n img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes})\n img = img.round().clip(0, 255).astype(np.uint8)\n if img.ndim < 3:\n img = np.expand_dims(img, axis=-1)\n img = np.rollaxis(img, 2) # HWC to CHW\n return img\n",
"#\n# BSD 3-Clause License\n#\n# Copyright (c) 2017 xxxx\n# All rights reserved.\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ============================================================================\n#from collections import OrderedDict\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom torchvision.ops import misc as misc_nn_ops\nfrom torchvision.ops import MultiScaleRoIAlign\n\nfrom ..utils import load_state_dict_from_url\n\nfrom .faster_rcnn import FasterRCNN\nfrom .backbone_utils import resnet_fpn_backbone\n\n__all__ = [\n \"MaskRCNN\", \"maskrcnn_resnet50_fpn\",\n]\n\n\nclass MaskRCNN(FasterRCNN):\n \"\"\"\n Implements Mask R-CNN.\n\n The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each\n image, and should be in 0-1 range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values\n between 0 and H and 0 and W\n - labels (Int64Tensor[N]): the class label for each ground-truth box\n - masks (UInt8Tensor[N, H, W]): the segmentation binary masks for each instance\n\n The model returns a Dict[Tensor] during training, containing the classification and regression\n losses for both the RPN and the R-CNN, and the mask loss.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as\n follows:\n - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values between\n 0 and H and 0 and W\n - labels (Int64Tensor[N]): the predicted labels for each image\n - scores (Tensor[N]): the scores or each prediction\n - masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to\n obtain the final segmentation masks, the soft masks can be thresholded, generally\n with a value of 0.5 (mask >= 0.5)\n\n Arguments:\n backbone (nn.Module): the network used to compute the features for the model.\n It should contain a out_channels attribute, which indicates the number of output\n channels that each feature map has (and it should be the same for all feature maps).\n The backbone should return a single Tensor or and OrderedDict[Tensor].\n num_classes (int): number of output classes of the model (including the background).\n If box_predictor is specified, num_classes should be None.\n min_size (int): minimum size of the image to be rescaled before feeding it to the backbone\n max_size (int): maximum size of the image to be rescaled before feeding it to the backbone\n image_mean (Tuple[float, float, float]): mean values used for input normalization.\n They are generally the mean values of the dataset on which the backbone has been trained\n on\n image_std (Tuple[float, float, float]): std values used for input normalization.\n They are generally the std values of the dataset on which the backbone has been trained on\n rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature\n maps.\n rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN\n rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training\n rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing\n rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training\n rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing\n rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals\n rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be\n considered as positive during training of the RPN.\n rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be\n considered as negative during training of the RPN.\n rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN\n for computing the loss\n rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training\n of the RPN\n box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in\n the locations indicated by the bounding boxes\n box_head (nn.Module): module that takes the cropped feature maps as input\n box_predictor (nn.Module): module that takes the output of box_head and returns the\n classification logits and box regression deltas.\n box_score_thresh (float): during inference, only return proposals with a classification score\n greater than box_score_thresh\n box_nms_thresh (float): NMS threshold for the prediction head. Used during inference\n box_detections_per_img (int): maximum number of detections per image, for all classes.\n box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be\n considered as positive during training of the classification head\n box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be\n considered as negative during training of the classification head\n box_batch_size_per_image (int): number of proposals that are sampled during training of the\n classification head\n box_positive_fraction (float): proportion of positive proposals in a mini-batch during training\n of the classification head\n bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the\n bounding boxes\n mask_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in\n the locations indicated by the bounding boxes, which will be used for the mask head.\n mask_head (nn.Module): module that takes the cropped feature maps as input\n mask_predictor (nn.Module): module that takes the output of the mask_head and returns the\n segmentation mask logits\n\n Example::\n\n >>> import torchvision\n >>> from torchvision.models.detection import MaskRCNN\n >>> from torchvision.models.detection.rpn import AnchorGenerator\n >>>\n >>> # load a pre-trained model for classification and return\n >>> # only the features\n >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features\n >>> # MaskRCNN needs to know the number of\n >>> # output channels in a backbone. For mobilenet_v2, it's 1280\n >>> # so we need to add it here\n >>> backbone.out_channels = 1280\n >>>\n >>> # let's make the RPN generate 5 x 3 anchors per spatial\n >>> # location, with 5 different sizes and 3 different aspect\n >>> # ratios. We have a Tuple[Tuple[int]] because each feature\n >>> # map could potentially have different sizes and\n >>> # aspect ratios\n >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),\n >>> aspect_ratios=((0.5, 1.0, 2.0),))\n >>>\n >>> # let's define what are the feature maps that we will\n >>> # use to perform the region of interest cropping, as well as\n >>> # the size of the crop after rescaling.\n >>> # if your backbone returns a Tensor, featmap_names is expected to\n >>> # be [0]. More generally, the backbone should return an\n >>> # OrderedDict[Tensor], and in featmap_names you can choose which\n >>> # feature maps to use.\n >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],\n >>> output_size=7,\n >>> sampling_ratio=2)\n >>>\n >>> mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],\n >>> output_size=14,\n >>> sampling_ratio=2)\n >>> # put the pieces together inside a FasterRCNN model\n >>> model = MaskRCNN(backbone,\n >>> num_classes=2,\n >>> rpn_anchor_generator=anchor_generator,\n >>> box_roi_pool=roi_pooler,\n >>> mask_roi_pool=mask_roi_pooler)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n \"\"\"\n def __init__(self, backbone, num_classes=None,\n # transform parameters\n min_size=800, max_size=1333,\n image_mean=None, image_std=None,\n # RPN parameters\n rpn_anchor_generator=None, rpn_head=None,\n rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,\n rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,\n rpn_nms_thresh=0.7,\n rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,\n rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,\n # Box parameters\n box_roi_pool=None, box_head=None, box_predictor=None,\n box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,\n box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,\n box_batch_size_per_image=512, box_positive_fraction=0.25,\n bbox_reg_weights=None,\n # Mask parameters\n mask_roi_pool=None, mask_head=None, mask_predictor=None):\n\n assert isinstance(mask_roi_pool, (MultiScaleRoIAlign, type(None)))\n\n if num_classes is not None:\n if mask_predictor is not None:\n raise ValueError(\"num_classes should be None when mask_predictor is specified\")\n\n out_channels = backbone.out_channels\n\n if mask_roi_pool is None:\n mask_roi_pool = MultiScaleRoIAlign(\n featmap_names=[0, 1, 2, 3],\n output_size=14,\n sampling_ratio=2)\n\n if mask_head is None:\n mask_layers = (256, 256, 256, 256)\n mask_dilation = 1\n mask_head = MaskRCNNHeads(out_channels, mask_layers, mask_dilation)\n\n if mask_predictor is None:\n mask_predictor_in_channels = 256 # == mask_layers[-1]\n mask_dim_reduced = 256\n mask_predictor = MaskRCNNPredictor(mask_predictor_in_channels,\n mask_dim_reduced, num_classes)\n\n super(MaskRCNN, self).__init__(\n backbone, num_classes,\n # transform parameters\n min_size, max_size,\n image_mean, image_std,\n # RPN-specific parameters\n rpn_anchor_generator, rpn_head,\n rpn_pre_nms_top_n_train, rpn_pre_nms_top_n_test,\n rpn_post_nms_top_n_train, rpn_post_nms_top_n_test,\n rpn_nms_thresh,\n rpn_fg_iou_thresh, rpn_bg_iou_thresh,\n rpn_batch_size_per_image, rpn_positive_fraction,\n # Box parameters\n box_roi_pool, box_head, box_predictor,\n box_score_thresh, box_nms_thresh, box_detections_per_img,\n box_fg_iou_thresh, box_bg_iou_thresh,\n box_batch_size_per_image, box_positive_fraction,\n bbox_reg_weights)\n\n self.roi_heads.mask_roi_pool = mask_roi_pool\n self.roi_heads.mask_head = mask_head\n self.roi_heads.mask_predictor = mask_predictor\n\n\nclass MaskRCNNHeads(nn.Sequential):\n def __init__(self, in_channels, layers, dilation):\n \"\"\"\n Arguments:\n num_classes (int): number of output classes\n input_size (int): number of channels of the input once it's flattened\n representation_size (int): size of the intermediate representation\n \"\"\"\n d = OrderedDict()\n next_feature = in_channels\n for layer_idx, layer_features in enumerate(layers, 1):\n d[\"mask_fcn{}\".format(layer_idx)] = misc_nn_ops.Conv2d(\n next_feature, layer_features, kernel_size=3,\n stride=1, padding=dilation, dilation=dilation)\n d[\"relu{}\".format(layer_idx)] = nn.ReLU(inplace=True)\n next_feature = layer_features\n\n super(MaskRCNNHeads, self).__init__(d)\n for name, param in self.named_parameters():\n if \"weight\" in name:\n nn.init.kaiming_normal_(param, mode=\"fan_out\", nonlinearity=\"relu\")\n # elif \"bias\" in name:\n # nn.init.constant_(param, 0)\n\n\nclass MaskRCNNPredictor(nn.Sequential):\n def __init__(self, in_channels, dim_reduced, num_classes):\n super(MaskRCNNPredictor, self).__init__(OrderedDict([\n (\"conv5_mask\", misc_nn_ops.ConvTranspose2d(in_channels, dim_reduced, 2, 2, 0)),\n (\"relu\", nn.ReLU(inplace=True)),\n (\"mask_fcn_logits\", misc_nn_ops.Conv2d(dim_reduced, num_classes, 1, 1, 0)),\n ]))\n\n for name, param in self.named_parameters():\n if \"weight\" in name:\n nn.init.kaiming_normal_(param, mode=\"fan_out\", nonlinearity=\"relu\")\n # elif \"bias\" in name:\n # nn.init.constant_(param, 0)\n\n\nmodel_urls = {\n 'maskrcnn_resnet50_fpn_coco':\n 'https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth',\n}\n\n\ndef maskrcnn_resnet50_fpn(pretrained=False, progress=True,\n num_classes=91, pretrained_backbone=True, **kwargs):\n \"\"\"\n Constructs a Mask R-CNN model with a ResNet-50-FPN backbone.\n\n The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each\n image, and should be in ``0-1`` range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values\n between ``0`` and ``H`` and ``0`` and ``W``\n - labels (``Int64Tensor[N]``): the class label for each ground-truth box\n - masks (``UInt8Tensor[N, H, W]``): the segmentation binary masks for each instance\n\n The model returns a ``Dict[Tensor]`` during training, containing the classification and regression\n losses for both the RPN and the R-CNN, and the mask loss.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as\n follows:\n - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with values between\n ``0`` and ``H`` and ``0`` and ``W``\n - labels (``Int64Tensor[N]``): the predicted labels for each image\n - scores (``Tensor[N]``): the scores or each prediction\n - masks (``UInt8Tensor[N, 1, H, W]``): the predicted masks for each instance, in ``0-1`` range. In order to\n obtain the final segmentation masks, the soft masks can be thresholded, generally\n with a value of 0.5 (``mask >= 0.5``)\n\n Example::\n\n >>> model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n\n Arguments:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n if pretrained:\n # no need to download the backbone if pretrained is set\n pretrained_backbone = False\n backbone = resnet_fpn_backbone('resnet50', pretrained_backbone)\n model = MaskRCNN(backbone, num_classes, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls['maskrcnn_resnet50_fpn_coco'],\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n",
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\n\n\ndef apply_transforms(x, y, mean, stdev, transforms):\n for _t in transforms:\n if _t is not None:\n x, y = _t(x, y, mean, stdev)\n return x, y\n\n\ndef apply_test_transforms(x, mean, stdev, transforms):\n for _t in transforms:\n if _t is not None:\n x = _t(x, y=None, mean=mean, stdev=stdev)\n return x\n\n\nclass PadXYZ:\n def __init__(self, shape=None):\n self.shape = shape\n\n def __call__(self, x, y, mean, stdev):\n paddings = tf.constant([[0, 0], [0, 0], [0, 5], [0, 0]])\n x = tf.pad(x, paddings, \"CONSTANT\")\n if y is None:\n return x\n y = tf.pad(y, paddings, \"CONSTANT\")\n return x, y\n\n\nclass CenterCrop:\n def __init__(self, shape):\n self.shape = shape\n\n def __call__(self, x, y, mean, stdev):\n shape = x.get_shape()\n delta = [(shape[i].value - self.shape[i]) // 2 for i in range(len(self.shape))]\n x = x[\n delta[0]:delta[0] + self.shape[0],\n delta[1]:delta[1] + self.shape[1],\n delta[2]:delta[2] + self.shape[2]\n ]\n if y is None:\n return x\n y = y[\n delta[0]:delta[0] + self.shape[0],\n delta[1]:delta[1] + self.shape[1],\n delta[2]:delta[2] + self.shape[2]\n ]\n return x, y\n\n\nclass RandomCrop3D:\n def __init__(self, shape, margins=(0, 0, 0)):\n self.shape = shape\n self.margins = margins\n\n def __call__(self, x, y, mean, stdev):\n shape = x.get_shape()\n min = tf.constant(self.margins, dtype=tf.float32)\n max = tf.constant([shape[0].value - self.shape[0] - self.margins[0],\n shape[1].value - self.shape[1] - self.margins[1],\n shape[2].value - self.shape[2] - self.margins[2]], dtype=tf.float32)\n center = tf.random_uniform((len(self.shape),), minval=min, maxval=max)\n center = tf.cast(center, dtype=tf.int32)\n x = x[center[0]:center[0] + self.shape[0],\n center[1]:center[1] + self.shape[1],\n center[2]:center[2] + self.shape[2]]\n # for NPU\n x.set_shape((128,128,128,4))\n # for NPU\n if y is None:\n return x\n y = y[center[0]:center[0] + self.shape[0],\n center[1]:center[1] + self.shape[1],\n center[2]:center[2] + self.shape[2]]\n # for NPU\n y.set_shape((128, 128, 128))\n # for NPU\n return x, y\n\n\nclass NormalizeImages:\n def __init__(self):\n pass\n\n def __call__(self, x, y, mean, stdev):\n mask = tf.math.greater(x, 0)\n x = tf.where(mask, (x - tf.cast(mean, x.dtype)) / (tf.cast(stdev + 1e-8, x.dtype)), x)\n\n if y is None:\n return x\n return x, y\n\n\nclass Cast:\n def __init__(self, dtype=tf.float32):\n self._dtype = dtype\n\n def __call__(self, x, y, mean, stdev):\n if y is None:\n return tf.cast(x, dtype=self._dtype)\n return tf.cast(x, dtype=self._dtype), y\n\n\nclass RandomHorizontalFlip:\n def __init__(self, threshold=0.5):\n self._threshold = threshold\n\n def __call__(self, x, y, mean, stdev):\n h_flip = tf.random_uniform([]) > self._threshold\n\n x = tf.cond(h_flip, lambda: tf.reverse(x, axis=[1]), lambda: x)\n y = tf.cond(h_flip, lambda: tf.reverse(y, axis=[1]), lambda: y)\n\n return x, y\n\n\nclass RandomVerticalFlip:\n def __init__(self, threshold=0.5):\n self._threshold = threshold\n\n def __call__(self, x, y, mean, stdev):\n h_flip = tf.random_uniform([]) > self._threshold\n\n x = tf.cond(h_flip, lambda: tf.reverse(x, axis=[0]), lambda: x)\n y = tf.cond(h_flip, lambda: tf.reverse(y, axis=[0]), lambda: y)\n\n return x, y\n\n\nclass RandomGammaCorrection:\n def __init__(self, gamma_range=(0.8, 1.5), keep_stats=False, threshold=0.5, epsilon=1e-8):\n self._gamma_range = gamma_range\n self._keep_stats = keep_stats\n self._eps = epsilon\n self._threshold = threshold\n\n def __call__(self, x, y, mean, stdev):\n augment = tf.random_uniform([]) > self._threshold\n gamma = tf.random_uniform([], minval=self._gamma_range[0], maxval=self._gamma_range[1])\n\n x_min = tf.math.reduce_min(x)\n x_range = tf.math.reduce_max(x) - x_min\n\n x = tf.cond(augment,\n lambda: tf.math.pow(((x - x_min) / float(x_range + self._eps)), gamma) * x_range + x_min,\n lambda: x)\n return x, y\n\n\nclass RandomBrightnessCorrection:\n def __init__(self, alpha=0.1, threshold=0.5, per_channel=True):\n self._alpha_range = [1.0 - alpha, 1.0 + alpha]\n self._threshold = threshold\n self._per_channel = per_channel\n\n def __call__(self, x, y, mean, stdev):\n mask = tf.math.greater(x, 0)\n size = x.get_shape()[-1].value if self._per_channel else 1\n augment = tf.random_uniform([]) > self._threshold\n correction = tf.random_uniform([size],\n minval=self._alpha_range[0],\n maxval=self._alpha_range[1],\n dtype=x.dtype)\n\n x = tf.cond(augment,\n lambda: tf.where(mask, x + correction, x),\n lambda: x)\n\n return x, y\n\n\nclass OneHotLabels:\n def __init__(self, n_classes=1):\n self._n_classes = n_classes\n\n def __call__(self, x, y, mean, stdev):\n return x, tf.one_hot(y, self._n_classes)\n\n\nclass PadXY:\n def __init__(self, dst_size=None):\n if not dst_size:\n raise ValueError(\"Invalid padding size: {}\".format(dst_size))\n\n self._dst_size = dst_size\n\n def __call__(self, x, y, mean, stdev):\n return tf.pad(x, self._build_padding(x)), \\\n tf.pad(y, self._build_padding(y))\n\n def _build_padding(self, _t):\n padding = []\n for i in range(len(_t.shape)):\n if i < len(self._dst_size):\n padding.append((0, self._dst_size[i] - _t.shape[i]))\n else:\n padding.append((0, 0))\n return padding\n",
"# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport contextlib\nimport logging\nimport numpy as np\nimport time\nimport weakref\nimport torch\nfrom apex import amp\n\nimport detectron2.utils.comm as comm\nfrom detectron2.utils.events import EventStorage\n\n__all__ = [\"HookBase\", \"TrainerBase\", \"SimpleTrainer\"]\n\n\ntry:\n _nullcontext = contextlib.nullcontext # python 3.7+\nexcept AttributeError:\n\n @contextlib.contextmanager\n def _nullcontext(enter_result=None):\n yield enter_result\n\n\nclass HookBase:\n \"\"\"\n Base class for hooks that can be registered with :class:`TrainerBase`.\n\n Each hook can implement 4 methods. The way they are called is demonstrated\n in the following snippet:\n ::\n hook.before_train()\n for iter in range(start_iter, max_iter):\n hook.before_step()\n trainer.run_step()\n hook.after_step()\n hook.after_train()\n\n Notes:\n 1. In the hook method, users can access `self.trainer` to access more\n properties about the context (e.g., current iteration).\n\n 2. A hook that does something in :meth:`before_step` can often be\n implemented equivalently in :meth:`after_step`.\n If the hook takes non-trivial time, it is strongly recommended to\n implement the hook in :meth:`after_step` instead of :meth:`before_step`.\n The convention is that :meth:`before_step` should only take negligible time.\n\n Following this convention will allow hooks that do care about the difference\n between :meth:`before_step` and :meth:`after_step` (e.g., timer) to\n function properly.\n\n Attributes:\n trainer: A weak reference to the trainer object. Set by the trainer when the hook is\n registered.\n \"\"\"\n\n def before_train(self):\n \"\"\"\n Called before the first iteration.\n \"\"\"\n pass\n\n def after_train(self):\n \"\"\"\n Called after the last iteration.\n \"\"\"\n pass\n\n def before_step(self):\n \"\"\"\n Called before each iteration.\n \"\"\"\n pass\n\n def after_step(self):\n \"\"\"\n Called after each iteration.\n \"\"\"\n pass\n\n\nclass TrainerBase:\n \"\"\"\n Base class for iterative trainer with hooks.\n\n The only assumption we made here is: the training runs in a loop.\n A subclass can implement what the loop is.\n We made no assumptions about the existence of dataloader, optimizer, model, etc.\n\n Attributes:\n iter(int): the current iteration.\n\n start_iter(int): The iteration to start with.\n By convention the minimum possible value is 0.\n\n max_iter(int): The iteration to end training.\n\n storage(EventStorage): An EventStorage that's opened during the course of training.\n \"\"\"\n\n def __init__(self):\n self._hooks = []\n\n def register_hooks(self, hooks):\n \"\"\"\n Register hooks to the trainer. The hooks are executed in the order\n they are registered.\n\n Args:\n hooks (list[Optional[HookBase]]): list of hooks\n \"\"\"\n hooks = [h for h in hooks if h is not None]\n for h in hooks:\n assert isinstance(h, HookBase)\n # To avoid circular reference, hooks and trainer cannot own each other.\n # This normally does not matter, but will cause memory leak if the\n # involved objects contain __del__:\n # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/\n h.trainer = weakref.proxy(self)\n self._hooks.extend(hooks)\n\n def train(self, start_iter: int, max_iter: int):\n \"\"\"\n Args:\n start_iter, max_iter (int): See docs above\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info(\"Starting training from iteration {}\".format(start_iter))\n\n self.iter = self.start_iter = start_iter\n self.max_iter = max_iter\n\n with EventStorage(start_iter) as self.storage:\n try:\n self.before_train()\n for self.iter in range(start_iter, max_iter):\n\n self.before_step()\n self.run_step()\n self.after_step()\n\n except Exception:\n logger.exception(\"Exception during training:\")\n raise\n finally:\n self.after_train()\n\n def before_train(self):\n for h in self._hooks:\n h.before_train()\n\n def after_train(self):\n for h in self._hooks:\n h.after_train()\n\n def before_step(self):\n for h in self._hooks:\n h.before_step()\n\n def after_step(self):\n for h in self._hooks:\n h.after_step()\n # this guarantees, that in each hook's after_step, storage.iter == trainer.iter\n self.storage.step()\n\n def run_step(self):\n raise NotImplementedError\n\n\nclass SimpleTrainer(TrainerBase):\n \"\"\"\n A simple trainer for the most common type of task:\n single-cost single-optimizer single-data-source iterative optimization.\n It assumes that every step, you:\n\n 1. Compute the loss with a data from the data_loader.\n 2. Compute the gradients with the above loss.\n 3. Update the model with the optimizer.\n\n All other tasks during training (checkpointing, logging, evaluation, LR schedule)\n are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.\n\n If you want to do anything fancier than this,\n either subclass TrainerBase and implement your own `run_step`,\n or write your own training loop.\n \"\"\"\n\n def __init__(self, model, data_loader, optimizer, aspect_ratio_grouping=False):\n \"\"\"\n Args:\n model: a torch Module. Takes a data from data_loader and returns a\n dict of losses.\n data_loader: an iterable. Contains data to be used to call model.\n optimizer: a torch optimizer.\n \"\"\"\n super().__init__()\n\n \"\"\"\n We set the model to training mode in the trainer.\n However it's valid to train a model that's in eval mode.\n If you want your model (or a submodule of it) to behave\n like evaluation during training, you can overwrite its train() method.\n \"\"\"\n model.train()\n self.aspect_ratio_grouping = aspect_ratio_grouping\n self.model = model\n self.data_loader = data_loader\n if self.aspect_ratio_grouping:\n self._data_loader_iter = iter(data_loader)\n self.optimizer = optimizer\n\n def run_step(self):\n \"\"\"\n Implement the standard training logic described above.\n \"\"\"\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If you want to do something with the data, you can wrap the dataloader.\n \"\"\"\n if self.aspect_ratio_grouping:\n data=next(self._data_loader_iter)\n else:\n data = self.data_loader.next()\n data_time = time.perf_counter() - start\n\n \"\"\"\n If you want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n losses = sum(loss_dict.values())\n\n \"\"\"\n If you need to accumulate gradients or do something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n if self.cfg.AMP:\n with amp.scale_loss(losses, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n losses.backward()\n\n # use a new stream so the ops don't wait for DDP\n with torch.cuda.stream(\n torch.cuda.Stream()\n ) if losses.device.type == \"cuda\" else _nullcontext():\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n self._write_metrics(metrics_dict)\n self._detect_anomaly(losses, loss_dict)\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method. But it is\n suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4\n \"\"\"\n self.optimizer.step()\n\n def _detect_anomaly(self, losses, loss_dict):\n if not torch.isfinite(losses).all():\n raise FloatingPointError(\n \"Loss became infinite or NaN at iteration={}!\\nloss_dict = {}\".format(\n self.iter, loss_dict\n )\n )\n\n def _write_metrics(self, metrics_dict: dict):\n \"\"\"\n Args:\n metrics_dict (dict): dict of scalar metrics\n \"\"\"\n metrics_dict = {\n k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)\n for k, v in metrics_dict.items()\n }\n # gather metrics among all workers for logging\n # This assumes we do DDP-style training, which is currently the only\n # supported method in detectron2.\n all_metrics_dict = comm.gather(metrics_dict)\n\n if comm.is_main_process():\n if \"data_time\" in all_metrics_dict[0]:\n # data_time among workers can have high variance. The actual latency\n # caused by data_time is the maximum among workers.\n data_time = np.max([x.pop(\"data_time\") for x in all_metrics_dict])\n self.storage.put_scalar(\"data_time\", data_time)\n\n # average the rest metrics\n metrics_dict = {\n k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()\n }\n total_losses_reduced = sum(loss for loss in metrics_dict.values())\n\n self.storage.put_scalar(\"total_loss\", total_losses_reduced)\n if len(metrics_dict) > 1:\n self.storage.put_scalars(**metrics_dict)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport torch\nimport timm\nfrom timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint,\\\n convert_splitbn_model, model_parameters\nimport torch.onnx\n\nfrom collections import OrderedDict\n\n\ndef proc_node_module(checkpoint, AttrName):\n new_state_dict = OrderedDict()\n for k, v in checkpoint[AttrName].items():\n if(k[0:7] == \"module.\"):\n name = k[7:]\n else:\n name = k[0:]\n new_state_dict[name] = v\n return new_state_dict\n\n\ndef convert():\n checkpoint = torch.load(\"./model_best.pth.tar\", map_location='cpu')\n checkpoint['state_dict'] = proc_node_module(checkpoint, 'state_dict')\n model = timm.create_model('spnasnet_100',pretrained=True)\n model.load_state_dict(checkpoint['state_dict'])\n model.eval()\n print(model)\n\n input_names = [\"actual_input_1\"]\n output_names = [\"output1\"]\n dummy_input = torch.randn(16, 3, 224, 224)\n torch.onnx.export(model, dummy_input, \"spnasnet_100_npu_16.onnx\", input_names=input_names, output_names=output_names,\n opset_version=11)\n\n\nif __name__ == \"__main__\":\n convert()\n",
"# Copyright (c) Soumith Chintala 2016,\n# All rights reserved.\n#\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://spdx.org/licenses/BSD-3-Clause.html\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport argparse\nimport glob\nimport numpy as np\nimport os\nimport random\nimport shutil\nimport sys\nimport time\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nfrom apex import amp\nimport moxing as mox\nimport torch.npu\n\nsys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../'))\n\nfrom hook import forward_hook_fn\nfrom hook import backward_hook_fn\nfrom mobilenet import mobilenet_v2\nfrom pthtar2onnx import convert\n\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('--data', metavar='DIR', default='/dataset/imagenet',\n help='path to dataset')\nparser.add_argument('-j', '--workers', default=128, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=600, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=512, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.05, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-5, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=1, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrain', default='', type=str, metavar='PATH',\n help='path to pretrain model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default=None, type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=49, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\nparser.add_argument('--amp', default=False, action='store_true',\n help='use amp to train the model')\nparser.add_argument('--opt-level', default=\"O2\", type=str, help='apex optimize level')\nparser.add_argument('--loss-scale-value', default='64', type=int, help='static loss scale value')\n\nparser.add_argument('--summary-path', default=None, type=str, help='event file path')\nparser.add_argument('--stop-step-num', default=None, type=int, help='after the stop-step, killing the training task')\nparser.add_argument('--device', default='npu:0', type=str, help='device type, cpu or npu:x or cuda:x')\nparser.add_argument('--eval-freq', default=5, type=int, help='test interval')\nparser.add_argument('--hook', default=False, action='store_true', help='pytorch hook')\nparser.add_argument('--class_nums', default=1000, type=int, help='class-nums only for pretrain')\n\n# modelarts modification\nparser.add_argument('--train_url',\n default=\"/cache/training\",\n type=str,\n help=\"setting dir of training output\")\nparser.add_argument('--data_url',\n metavar='DIR',\n default='/cache/data_url',\n help='path to dataset')\nparser.add_argument('--onnx', default=True, action='store_true',\n help=\"convert pth model to onnx\")\n\nCACHE_MODEL_URL = \"/cache/model\"\n\nbest_acc1 = 0\ncur_step = 0\n\nCACHE_TRAINING_URL = \"/cache/training/\"\n\ndef seed_everything(seed, device):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n if 'cuda' in device:\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n cudnn.deterministic = True\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef main():\n args = parser.parse_args()\n\n if args.seed is not None:\n seed_everything(args.seed, args.device)\n\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n main_worker(args)\n\n\ndef main_worker(args):\n global best_acc1\n global cur_step\n\n global_step = -1\n\n if 'npu' in args.device:\n torch.npu.set_device(args.device)\n if 'cuda' in args.device:\n torch.cuda.set_device(args.device)\n\n model = mobilenet_v2(num_classes=args.class_nums)\n\n # set hook\n if args.hook:\n modules = model.named_modules()\n for name, module in modules:\n module.register_forward_hook(forward_hook_fn)\n module.register_backward_hook(backward_hook_fn)\n\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n criterion = nn.CrossEntropyLoss()\n\n if 'npu' in args.device or 'cuda' in args.device:\n model = model.to(args.device)\n criterion = criterion.to(args.device)\n\n if args.amp:\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level, loss_scale=args.loss_scale_value)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume, map_location=args.device)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n if args.amp:\n amp.load_state_dict(checkpoint['amp'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n if args.pretrain:\n # ------------------modelarts modification----------------------\n os.makedirs(CACHE_MODEL_URL, exist_ok=True)\n mox.file.copy_parallel(args.pretrain, os.path.join(CACHE_MODEL_URL, \"checkpoint.pth\"))\n # ------------------modelarts modification---------------------\n args.pretrain = os.path.join(CACHE_MODEL_URL, \"checkpoint.pth\")\n if not os.path.isfile(args.pretrain):\n print(\"no chechpoint found at {}\".format(args.pretrain))\n\n print(\"loading checkpoint '{}'\".format(args.pretrain))\n pretrained_dict = torch.load(args.pretrain, map_location=\"cpu\")['state_dict']\n pretrained_dict.pop('module.classifier.1.weight')\n pretrained_dict.pop('module.classifier.1.bias')\n model.load_state_dict(pretrained_dict, strict=False)\n print(\"loaded checkpoint '{}'\".format(args.pretrain))\n\n # Data loading code\n # -------modelarts modification-------\n real_path = '/cache/data_url'\n if not os.path.exists(real_path):\n os.makedirs(real_path)\n mox.file.copy_parallel(args.data_url, real_path)\n print(\"training data finish copy to %s.\" % real_path)\n # ---------modelarts modification-----\n\n traindir = os.path.join(real_path, 'train')\n valdir = os.path.join(real_path, 'val')\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True, drop_last=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion, args, global_step)\n return\n\n for epoch in range(args.start_epoch, args.epochs):\n\n # train for one epoch\n global_step = train(train_loader, model, criterion, optimizer, epoch, args, global_step)\n\n if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:\n # evaluate on validation set\n acc1 = validate(val_loader, model, criterion, args, global_step)\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n # save checkpoint\n if args.amp:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer': optimizer.state_dict(),\n 'amp': amp.state_dict(),\n }, is_best)\n else:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer': optimizer.state_dict(),\n }, is_best)\n\n if args.stop_step_num is not None and cur_step >= args.stop_step_num:\n break\n\n if args.onnx:\n convert_pth_to_onnx(args)\n\n # --------------modelarts modification----------\n mox.file.copy_parallel(CACHE_TRAINING_URL, args.train_url)\n # --------------modelarts modification end----------\n\n\ndef convert_pth_to_onnx(args):\n pth_pattern = os.path.join(CACHE_TRAINING_URL, 'checkpoint.pth.tar')\n pth_file_list = glob.glob(pth_pattern)\n if not pth_file_list:\n print(f\"can't find pth {pth_pattern}\")\n return\n pth_file = pth_file_list[0]\n onnx_path = pth_file.split(\".\")[0] + '.onnx'\n convert(pth_file, onnx_path, args.class_nums)\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args, global_step, sum_writer=None):\n global cur_step\n\n if args.seed is not None:\n seed_everything(args.seed + epoch, args.device)\n\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n learning_rate = AverageMeter('LR', ':2.8f')\n losses = AverageMeter('Loss', ':6.8f')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, learning_rate, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n steps_per_epoch = len(train_loader)\n for i, (images, target) in enumerate(train_loader):\n\n global_step = epoch * steps_per_epoch + i\n cur_step = global_step\n\n lr = adjust_learning_rate(optimizer, global_step, steps_per_epoch, args)\n\n learning_rate.update(lr)\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n if 'npu' in args.device:\n target = target.to(torch.int32)\n\n if 'npu' in args.device or 'cuda' in args.device:\n images = images.to(args.device, non_blocking=True)\n target = target.to(args.device, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n if args.amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n if args.stop_step_num is not None and cur_step >= args.stop_step_num:\n break\n\n print(' * FPS@all {:.3f}'.format(args.batch_size / (batch_time.avg + 0.001)))\n return global_step\n\n\ndef validate(val_loader, model, criterion, args, global_step, sum_writer=None):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n\n if 'npu' in args.device:\n target = target.to(torch.int32)\n\n if 'npu' in args.device or 'cuda' in args.device:\n images = images.to(args.device, non_blocking=True)\n target = target.to(args.device, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n args = parser.parse_args()\n\n if not os.path.exists(CACHE_TRAINING_URL):\n os.makedirs(CACHE_TRAINING_URL)\n\n checkpoint_save_path = os.path.join(CACHE_TRAINING_URL, filename)\n torch.save(state, checkpoint_save_path)\n if is_best:\n shutil.copyfile(checkpoint_save_path, os.path.join(CACHE_TRAINING_URL, \"model_best.pth.tar\"))\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n self.start_count_index = 10\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n if self.count == 0:\n self.batchsize = n\n\n self.val = val\n self.count += n\n if self.count > (self.start_count_index * self.batchsize):\n self.sum += val * n\n self.avg = self.sum / (self.count - self.start_count_index * self.batchsize)\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, global_step, steps_per_epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n # lr = args.lr * (0.98 ** (epoch / 2.5))\n lr = args.lr * (0.98 ** (global_step // int(steps_per_epoch * 2.5)))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n",
"#! -*- coding:utf-8 -*-\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nfrom __future__ import division, print_function\n\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nfrom tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig\nimport npu_bridge\nimport json\nimport os,time\nimport collections\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\nRAW_SHAPE = \"raw_shape\"\nIS_PADDED = \"is_padded\"\nSOURCE_ID = \"source_id\"\nMIN_SCORE = 0.05\nDUMMY_SCORE = -1e5\nMAX_NUM_EVAL_BOXES = 200\nOVERLAP_CRITERIA = 0.5\nCLASS_INV_MAP = (\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21,\n 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,\n 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87,\n 88, 89, 90)\n\ndef parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--batch_size', default=1,\n help=\"\"\"batchsize\"\"\")\n parser.add_argument('--model_path', default='ssd-resnet34_1batch.pb',\n help=\"\"\"pb path\"\"\")\n parser.add_argument('--data_path', default = 'coco2017/val2017',\n help = \"\"\"the data path\"\"\")\n parser.add_argument('--val_json_file', default='coco_official_2017/annotations/instances_val2017.json',\n help=\"\"\"the val json file path\"\"\")\n parser.add_argument('--input_tensor_name', default='input:0',\n help=\"\"\"the output1 tensor name\"\"\")\n parser.add_argument('--output_tensor_name1', default='Softmax:0',\n help=\"\"\"the output1 tensor name\"\"\")\n parser.add_argument('--output_tensor_name2', default='stack:0',\n help=\"\"\"the output2 tensor name\"\"\")\n\n\n args, unknown_args = parser.parse_known_args()\n if len(unknown_args) > 0:\n for bad_arg in unknown_args:\n print(\"ERROR: Unknown command line arg: %s\" % bad_arg)\n raise ValueError(\"Invalid command line arg(s)\")\n return args\n\n\ndef load_model(model_file):\n with tf.gfile.GFile(model_file, \"rb\") as gf:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(gf.read())\n\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(graph_def, name=\"\")\n\n return graph\n\ndef top_k(input, k=1, sorted=True):\n \"\"\"Top k max pooling\n Args:\n input(ndarray): convolutional feature in heigh x width x channel format\n k(int): if k==1, it is equal to normal max pooling\n sorted(bool): whether to return the array sorted by channel value\n Returns:\n ndarray: k x (height x width)\n ndarray: k\n \"\"\"\n ind = np.argpartition(input, -k)[..., -k:]\n def get_entries(input, ind, sorted):\n if len(ind.shape) == 1:\n if sorted:\n ind = ind[np.argsort(-input[ind])]\n return input[ind], ind\n output, ind = zip(*[get_entries(inp, id, sorted) for inp, id in zip(input, ind)])\n return np.array(output), np.array(ind)\n return get_entries(input, ind, sorted)\n\ndef select_top_k_scores(scores_in, pre_nms_num_detections=5000):\n '''\n scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])\n top_k_scores, top_k_indices = tf.nn.top_k(\n scores_trans, k=pre_nms_num_detections, sorted=True)\n return tf.transpose(top_k_scores, [0, 2, 1]), tf.transpose(\n top_k_indices, [0, 2, 1])\n '''\n scores_trans = np.transpose(scores_in, (0, 2, 1))\n top_k_scores, top_k_indices = top_k(scores_trans, k = pre_nms_num_detections)\n return np.transpose(top_k_scores, (0, 2, 1)), np.transpose(top_k_indices, (0, 2, 1))\n\n\ndef _load_images_info(images_info_file):\n \"\"\"Loads object annotation JSON file.\"\"\"\n f = open(images_info_file, encoding='utf-8')\n info_dict = json.load(f)\n\n img_to_obj_annotation = collections.defaultdict(list)\n for annotation in info_dict['annotations']:\n image_id = annotation['image_id']\n img_to_obj_annotation[image_id].append(annotation)\n return info_dict['images'],img_to_obj_annotation\n\ndef get_image_obj(images_info_file, input_images):\n f = open(images_info_file, encoding='utf-8')\n info_dict = json.load(f)\n img_obj = collections.defaultdict(list)\n img_info_list = []\n image_list_new = []\n for image in info_dict['images']:\n img_info = {}\n image_name = image['file_name']\n if image_name not in input_images:\n continue\n img_info['source_id'] = image['id']\n img_info['raw_shape'] = [image['height'], image['width'], 3]\n img_info_list.append(img_info)\n image_list_new.append(image_name)\n\n return img_info_list, image_list_new\n\n\ndef _read_inputImage(filename):\n image_list = []\n if os.path.isdir(filename):\n for file in os.listdir(filename):\n file = file.split('.')[0] + \".jpg\"\n image_list.append(file)\n return image_list\n\n\ndef image_process(image_path, images_name):\n ###image process\n imagelist = []\n images_count = 0\n for image_name in images_name:\n with tf.Session().as_default():\n #with tf.Session() as sess:\n image_file = os.path.join(image_path, image_name)\n image = tf.gfile.FastGFile(image_file, 'rb').read()\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize_images(image, size=(300, 300))\n image /= 255.\n images_count = images_count + 1\n if tf.shape(image)[2].eval() == 1:\n image = tf.image.grayscale_to_rgb(image)\n image = image.eval()\n imagelist.append(image)\n tf.reset_default_graph()\n return np.array(imagelist), images_count\n\nclass Classifier(object):\n # set batch_size\n args = parse_args()\n batch_size = int(args.batch_size)\n\n def __init__(self):\n # --------------------------------------------------------------------------------\n config = tf.ConfigProto()\n custom_op = config.graph_options.rewrite_options.custom_optimizers.add()\n custom_op.name = \"NpuOptimizer\"\n # 1)run on Ascend NPU\n custom_op.parameter_map[\"use_off_line\"].b = True\n\n # 2)recommended use fp16 datatype to obtain better performance\n custom_op.parameter_map[\"precision_mode\"].s = tf.compat.as_bytes(\"force_fp16\")\n\n # 3)disable remapping\n config.graph_options.rewrite_options.remapping = RewriterConfig.OFF\n\n # 4)set graph_run_mode=0,obtain better performance\n custom_op.parameter_map[\"graph_run_mode\"].i = 0\n # --------------------------------------------------------------------------------\n\n # load model, set graph input nodes and output nodes\n args = parse_args()\n self.graph = self.__load_model(args.model_path)\n self.input_tensor = self.graph.get_tensor_by_name(args.input_tensor_name)\n self.output_tensor1 = self.graph.get_tensor_by_name(args.output_tensor_name1)\n self.output_tensor2 = self.graph.get_tensor_by_name(args.output_tensor_name2)\n\n # create session\n self.sess = tf.Session(config=config, graph=self.graph)\n\n def __load_model(self, model_file):\n \"\"\"\n load fronzen graph\n :param model_file:\n :return:\n \"\"\"\n with tf.gfile.GFile(model_file, \"rb\") as gf:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(gf.read())\n\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(graph_def, name=\"\")\n\n return graph\n\n def infer(self, batch_size, batch_data, labels_list, images_count):\n dataOutput = []\n total_time = 0\n count = 0\n for data in batch_data:\n t = time.time()\n classes, boxes = self.sess.run([self.output_tensor1, self.output_tensor2], feed_dict={self.input_tensor: data.reshape(int(batch_size),300,300,3)})\n total_time = total_time + time.time() - t\n pred_scores, indices = select_top_k_scores(classes,200)\n output_index = 0\n while output_index < int(batch_size) and count < images_count:\n dataOutput.append({\"pred_box\": boxes[output_index],\n \"source_id\": labels_list[count]['source_id'],\n \"indices\": indices[output_index],\n \"pred_scores\": pred_scores[output_index],\n \"raw_shape\": labels_list[count]['raw_shape']})\n output_index = output_index + 1\n count = count + 1\n\n return dataOutput, total_time\n\n def batch_process(self, image_data):\n \"\"\"\n images preprocess\n :return:\n \"\"\"\n # Get the batch information of the current input data, and automatically adjust the data to the fixed batch\n n_dim = image_data.shape[0]\n batch_size = self.batch_size\n\n # if data is not enough for the whole batch, you need to complete the data\n m = n_dim % batch_size\n if m < batch_size and m > 0:\n # The insufficient part shall be filled with 0 according to n dimension\n pad = np.zeros((batch_size - m, 300, 300, 3)).astype(np.float32)\n image_data = np.concatenate((image_data, pad), axis=0)\n\n # Define the Minis that can be divided into several batches\n mini_batch = []\n i = 0\n while i < n_dim:\n # Define the Minis that can be divided into several batches\n mini_batch.append(image_data[i: i + batch_size, :, :, :])\n i += batch_size\n\n return mini_batch\n\ndef decode_single(bboxes_in,\n scores_in,\n indices,\n criteria,\n max_output,\n max_num=200):\n \"\"\"Implement Non-maximum suppression.\n\n Reference to https://github.com/amdegroot/ssd.pytorch\n\n Args:\n bboxes_in: a Tensor with shape [N, 4], which stacks box regression outputs\n on all feature levels. The N is the number of total anchors on all levels.\n scores_in: a Tensor with shape [ssd_constants.MAX_NUM_EVAL_BOXES,\n num_classes]. The top ssd_constants.MAX_NUM_EVAL_BOXES box scores for each\n class.\n indices: a Tensor with shape [ssd_constants.MAX_NUM_EVAL_BOXES,\n num_classes]. The indices for these top boxes for each class.\n criteria: a float number to specify the threshold of NMS.\n max_output: maximum output length.\n max_num: maximum number of boxes before NMS.\n\n Returns:\n boxes, labels and scores after NMS.\n \"\"\"\n\n bboxes_out = []\n scores_out = []\n labels_out = []\n\n for i, score in enumerate(np.split(scores_in, scores_in.shape[1], 1)):\n class_indices = indices[:, i]\n bboxes = bboxes_in[class_indices, :]\n score = np.squeeze(score, 1)\n\n # skip background\n if i == 0:\n continue\n\n mask = score > MIN_SCORE\n if not np.any(mask):\n continue\n\n bboxes, score = bboxes[mask, :], score[mask]\n\n # remain_list = []\n # for r in range(bboxes.shape[0]):\n # if bboxes[r, 0] < 0 or bboxes[r, 1] < 0 or bboxes[r, 2] < 0 or bboxes[r, 3] < 0 or bboxes[r, 0] >= bboxes[r, 2] or \\\n # bboxes[r, 1] >= bboxes[r, 3]:\n # continue\n # remain_list.append(r)\n # bboxes = bboxes[remain_list, :]\n # score = score[remain_list]\n\n remain_list = []\n for r in range(bboxes.shape[0]):\n for j in range(4):\n if bboxes[r, j] < 0:\n bboxes[r, j] = 0.00001\n if bboxes[r, 0] >= bboxes[r, 2]:\n bboxes[r, 2] = bboxes[r, 0] + 0.00001\n if bboxes[r, 1] >= bboxes[r, 3]:\n bboxes[r, 3] = bboxes[r, 1] + 0.00001\n remain_list.append(r)\n bboxes = bboxes[remain_list, :]\n score = score[remain_list]\n\n\n score_idx_sorted = np.argsort(score)\n score_sorted = score[score_idx_sorted]\n\n score_idx_sorted = score_idx_sorted[-max_num:]\n candidates = []\n\n # perform non-maximum suppression\n while len(score_idx_sorted):\n idx = score_idx_sorted[-1]\n bboxes_sorted = bboxes[score_idx_sorted, :]\n bboxes_idx = bboxes[idx, :]\n iou = calc_iou(bboxes_idx, bboxes_sorted)\n\n score_idx_sorted = score_idx_sorted[iou < criteria]\n candidates.append(idx)\n\n bboxes_out.append(bboxes[candidates, :])\n scores_out.append(score[candidates])\n labels_out.extend([i]*len(candidates))\n\n if len(scores_out) == 0:\n tf.logging.info(\"No objects detected. Returning dummy values.\")\n return (\n np.zeros(shape=(1, 4), dtype=np.float32),\n np.zeros(shape=(1,), dtype=np.int32),\n np.ones(shape=(1,), dtype=np.float32) * DUMMY_SCORE,\n )\n\n bboxes_out = np.concatenate(bboxes_out, axis=0)\n scores_out = np.concatenate(scores_out, axis=0)\n labels_out = np.array(labels_out)\n\n max_ids = np.argsort(scores_out)[-max_output:]\n\n return bboxes_out[max_ids, :], labels_out[max_ids], scores_out[max_ids]\n\ndef calc_iou(target, candidates):\n target_tiled = np.tile(target[np.newaxis, :], (candidates.shape[0], 1))\n # Left Top & Right Bottom\n lt = np.maximum(target_tiled[:,:2], candidates[:,:2])\n\n rb = np.minimum(target_tiled[:,2:], candidates[:,2:])\n\n delta = np.maximum(rb - lt, 0)\n\n intersect = delta[:,0] * delta[:,1]\n\n delta1 = target_tiled[:, 2:] - target_tiled[:, :2]\n area1 = delta1[:,0] * delta1[:,1]\n delta2 = candidates[:, 2:] - candidates[:, :2]\n area2 = delta2[:,0] * delta2[:,1]\n\n iou = intersect/(area1 + area2 - intersect)\n return iou\n\ndef compute_map(labels_and_predictions,\n coco_gt,\n use_cpp_extension=True,\n nms_on_tpu=True):\n \"\"\"Use model predictions to compute mAP.\n\n The evaluation code is largely copied from the MLPerf reference\n implementation. While it is possible to write the evaluation as a tensor\n metric and use Estimator.evaluate(), this approach was selected for simplicity\n and ease of duck testing.\n\n Args:\n labels_and_predictions: A map from TPU predict method.\n coco_gt: ground truch COCO object.\n use_cpp_extension: use cocoeval C++ library.\n nms_on_tpu: do NMS on TPU.\n Returns:\n Evaluation result.\n \"\"\"\n predictions = []\n tic = time.time()\n\n if nms_on_tpu:\n p = []\n for i in labels_and_predictions:\n for j in i:\n p.append(np.array(j, dtype=np.float32))\n predictions = np.concatenate(list(p)).reshape((-1, 7))\n else:\n k = 0\n for example in labels_and_predictions:\n if IS_PADDED in example and example[\n IS_PADDED]:\n continue\n #print(k)\n k += 1\n htot, wtot, _ = example[RAW_SHAPE]\n pred_box = example['pred_box']\n pred_scores = example['pred_scores']\n indices = example['indices']\n loc, label, prob = decode_single(\n pred_box, pred_scores, indices, OVERLAP_CRITERIA,\n MAX_NUM_EVAL_BOXES, MAX_NUM_EVAL_BOXES)\n\n for loc_, label_, prob_ in zip(loc, label, prob):\n # Ordering convention differs, hence [1], [0] rather than [0], [1]\n predictions.append([\n int(example[SOURCE_ID]),\n loc_[1] * wtot, loc_[0] * htot, (loc_[3] - loc_[1]) * wtot,\n (loc_[2] - loc_[0]) * htot, prob_,\n CLASS_INV_MAP[label_]\n ])\n\n toc = time.time()\n tf.logging.info('Prepare predictions DONE (t={:0.2f}s).'.format(toc - tic))\n\n if coco_gt is None:\n coco_gt = create_coco(\n FLAGS.val_json_file, use_cpp_extension=use_cpp_extension)\n\n if use_cpp_extension:\n coco_dt = coco_gt.LoadRes(np.array(predictions, dtype=np.float32))\n coco_eval = COCOeval(coco_gt, coco_dt, iou_type='bbox')\n coco_eval.Evaluate()\n coco_eval.Accumulate()\n coco_eval.Summarize()\n stats = coco_eval.GetStats()\n\n else:\n coco_dt = coco_gt.loadRes(np.array(predictions))\n\n coco_eval = COCOeval(coco_gt, coco_dt, iouType='bbox')\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n stats = coco_eval.stats\n\n print('Current AP: {:.5f}'.format(stats[0]))\n metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1',\n 'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl']\n coco_time = time.time()\n tf.logging.info('COCO eval DONE (t={:0.2f}s).'.format(coco_time - toc))\n\n # Prefix with \"COCO\" to group in TensorBoard.\n return {'COCO/' + key: value for key, value in zip(metric_names, stats)}\n\n\ndef create_coco(val_json_file, use_cpp_extension=True):\n \"\"\"Creates Microsoft COCO helper class object and return it.\"\"\"\n if val_json_file.startswith('gs://'):\n _, local_val_json = tempfile.mkstemp(suffix='.json')\n tf.gfile.Remove(local_val_json)\n\n tf.gfile.Copy(val_json_file, local_val_json)\n atexit.register(tf.gfile.Remove, local_val_json)\n else:\n local_val_json = val_json_file\n\n if use_cpp_extension:\n coco_gt = coco.COCO(local_val_json, False)\n else:\n coco_gt = COCO(local_val_json)\n return coco_gt\n\ndef main():\n args = parse_args()\n tf.reset_default_graph()\n\n image_list = _read_inputImage(args.data_path)\n image_obj, image_list = get_image_obj(args.val_json_file, image_list)\n\n print(\"########NOW Start Preprocess!!!#########\")\n images, images_count = image_process(args.data_path, image_list)\n\n ###batch\n print(\"########NOW Start Batch!!!#########\")\n classifier = Classifier()\n batch_images = classifier.batch_process(images)\n\n ###do inference\n print(\"########NOW Start inference!!!#########\")\n dataOutput, total_time = classifier.infer(args.batch_size, batch_images, image_obj, images_count)\n\n coco_gt = create_coco(\n args.val_json_file, use_cpp_extension=False)\n compute_map(\n dataOutput,\n coco_gt,\n use_cpp_extension=False,\n nms_on_tpu=False)\n\n print('+-------------------------------------------------+')\n print('images number = ', images_count)\n print('images/sec = ', images_count / total_time)\n print('+-------------------------------------------------+')\n\nif __name__ == '__main__':\n main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n## ==============================================================================\n\"\"\"Contains definitions for the preactivation form of Residual Networks.\n\nResidual networks (ResNets) were originally proposed in:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n\nThe full preactivation 'v2' ResNet variant implemented in this module was\nintroduced by:\n[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Identity Mappings in Deep Residual Networks. arXiv: 1603.05027\n\nThe key difference of the full preactivation 'v2' variant compared to the\n'v1' variant in [1] is the use of batch normalization before every weight layer.\n\nTypical use:\n\n from tensorflow.contrib.slim.nets import resnet_v2\n\nResNet-101 for image classification into 1000 classes:\n\n # inputs has shape [batch, 224, 224, 3]\n with slim.arg_scope(resnet_v2.resnet_arg_scope()):\n net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)\n\nResNet-101 for semantic segmentation into 21 classes:\n\n # inputs has shape [batch, 513, 513, 3]\n with slim.arg_scope(resnet_v2.resnet_arg_scope()):\n net, end_points = resnet_v2.resnet_v2_101(inputs,\n 21,\n is_training=False,\n global_pool=False,\n output_stride=16)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.contrib import slim as contrib_slim\n\nfrom nets import resnet_utils\n\nslim = contrib_slim\nresnet_arg_scope = resnet_utils.resnet_arg_scope\n\n\[email protected]_arg_scope\ndef bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,\n outputs_collections=None, scope=None):\n \"\"\"Bottleneck residual unit variant with BN before convolutions.\n\n This is the full preactivation residual unit variant proposed in [2]. See\n Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck\n variant which has an extra bottleneck layer.\n\n When putting together two consecutive ResNet blocks that use this unit, one\n should use stride = 2 in the last unit of the first block.\n\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The depth of the ResNet unit output.\n depth_bottleneck: The depth of the bottleneck layers.\n stride: The ResNet unit's stride. Determines the amount of downsampling of\n the units output compared to its input.\n rate: An integer, rate for atrous convolution.\n outputs_collections: Collection to add the ResNet unit output.\n scope: Optional variable_scope.\n\n Returns:\n The ResNet unit's output.\n \"\"\"\n with tf.compat.v1.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:\n depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)\n preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')\n if depth == depth_in:\n shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')\n else:\n shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,\n normalizer_fn=None, activation_fn=None,\n scope='shortcut')\n\n residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1,\n scope='conv1')\n residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,\n rate=rate, scope='conv2')\n residual = slim.conv2d(residual, depth, [1, 1], stride=1,\n normalizer_fn=None, activation_fn=None,\n scope='conv3')\n\n output = shortcut + residual\n\n return slim.utils.collect_named_outputs(outputs_collections,\n sc.name,\n output)\n\n\ndef resnet_v2(inputs,\n blocks,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n include_root_block=True,\n spatial_squeeze=True,\n reuse=None,\n scope=None):\n \"\"\"Generator for v2 (preactivation) ResNet models.\n\n This function generates a family of ResNet v2 models. See the resnet_v2_*()\n methods for specific model instantiations, obtained by selecting different\n block instantiations that produce ResNets of various depths.\n\n Training for image classification on Imagenet is usually done with [224, 224]\n inputs, resulting in [7, 7] feature maps at the output of the last ResNet\n block for the ResNets defined in [1] that have nominal stride equal to 32.\n However, for dense prediction tasks we advise that one uses inputs with\n spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In\n this case the feature maps at the ResNet output will have spatial shape\n [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]\n and corners exactly aligned with the input image corners, which greatly\n facilitates alignment of the features to the image. Using as input [225, 225]\n images results in [8, 8] feature maps at the output of the last ResNet block.\n\n For dense prediction tasks, the ResNet needs to run in fully-convolutional\n (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all\n have nominal stride equal to 32 and a good choice in FCN mode is to use\n output_stride=16 in order to increase the density of the computed features at\n small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.\n\n Args:\n inputs: A tensor of size [batch, height_in, width_in, channels].\n blocks: A list of length equal to the number of ResNet blocks. Each element\n is a resnet_utils.Block object describing the units in the block.\n num_classes: Number of predicted classes for classification tasks.\n If 0 or None, we return the features before the logit layer.\n is_training: whether batch_norm layers are in training mode.\n global_pool: If True, we perform global average pooling before computing the\n logits. Set to True for image classification, False for dense prediction.\n output_stride: If None, then the output will be computed at the nominal\n network stride. If output_stride is not None, it specifies the requested\n ratio of input to output spatial resolution.\n include_root_block: If True, include the initial convolution followed by\n max-pooling, if False excludes it. If excluded, `inputs` should be the\n results of an activation-less convolution.\n spatial_squeeze: if True, logits is of shape [B, C], if false logits is\n of shape [B, 1, 1, C], where B is batch_size and C is number of classes.\n To use this parameter, the input images must be smaller than 300x300\n pixels, in which case the output logit layer does not contain spatial\n information and can be removed.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n\n\n Returns:\n net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].\n If global_pool is False, then height_out and width_out are reduced by a\n factor of output_stride compared to the respective height_in and width_in,\n else both height_out and width_out equal one. If num_classes is 0 or None,\n then net is the output of the last ResNet block, potentially after global\n average pooling. If num_classes is a non-zero integer, net contains the\n pre-softmax activations.\n end_points: A dictionary from components of the network to the corresponding\n activation.\n\n Raises:\n ValueError: If the target output_stride is not valid.\n \"\"\"\n with tf.compat.v1.variable_scope(\n scope, 'resnet_v2', [inputs], reuse=reuse) as sc:\n end_points_collection = sc.original_name_scope + '_end_points'\n with slim.arg_scope([slim.conv2d, bottleneck,\n resnet_utils.stack_blocks_dense],\n outputs_collections=end_points_collection):\n with slim.arg_scope([slim.batch_norm], is_training=is_training):\n net = inputs\n if include_root_block:\n if output_stride is not None:\n if output_stride % 4 != 0:\n raise ValueError('The output_stride needs to be a multiple of 4.')\n output_stride /= 4\n # We do not include batch normalization or activation functions in\n # conv1 because the first ResNet unit will perform these. Cf.\n # Appendix of [2].\n with slim.arg_scope([slim.conv2d],\n activation_fn=None, normalizer_fn=None):\n net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')\n net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')\n net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)\n # This is needed because the pre-activation variant does not have batch\n # normalization or activation functions in the residual unit output. See\n # Appendix of [2].\n net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')\n # Convert end_points_collection into a dictionary of end_points.\n end_points = slim.utils.convert_collection_to_dict(\n end_points_collection)\n\n if global_pool:\n # Global average pooling.\n net = tf.reduce_mean(\n input_tensor=net, axis=[1, 2], name='pool5', keepdims=True)\n end_points['global_pool'] = net\n if num_classes:\n net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, scope='logits')\n end_points[sc.name + '/logits'] = net\n if spatial_squeeze:\n net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')\n end_points[sc.name + '/spatial_squeeze'] = net\n end_points['predictions'] = slim.softmax(net, scope='predictions')\n return net, end_points\nresnet_v2.default_image_size = 224\n\n\ndef resnet_v2_block(scope, base_depth, num_units, stride):\n \"\"\"Helper function for creating a resnet_v2 bottleneck block.\n\n Args:\n scope: The scope of the block.\n base_depth: The depth of the bottleneck layer for each unit.\n num_units: The number of units in the block.\n stride: The stride of the block, implemented as a stride in the last unit.\n All other units have stride=1.\n\n Returns:\n A resnet_v2 bottleneck block.\n \"\"\"\n return resnet_utils.Block(scope, bottleneck, [{\n 'depth': base_depth * 4,\n 'depth_bottleneck': base_depth,\n 'stride': 1\n }] * (num_units - 1) + [{\n 'depth': base_depth * 4,\n 'depth_bottleneck': base_depth,\n 'stride': stride\n }])\nresnet_v2.default_image_size = 224\n\n\ndef resnet_v2_50(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n spatial_squeeze=True,\n reuse=None,\n scope='resnet_v2_50'):\n \"\"\"ResNet-50 model of [1]. See resnet_v2() for arg and return description.\"\"\"\n blocks = [\n resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),\n resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),\n resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),\n resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),\n ]\n return resnet_v2(inputs, blocks, num_classes, is_training=is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, spatial_squeeze=spatial_squeeze,\n reuse=reuse, scope=scope)\nresnet_v2_50.default_image_size = resnet_v2.default_image_size\n\n\ndef resnet_v2_101(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n spatial_squeeze=True,\n reuse=None,\n scope='resnet_v2_101'):\n \"\"\"ResNet-101 model of [1]. See resnet_v2() for arg and return description.\"\"\"\n blocks = [\n resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),\n resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),\n resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),\n resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),\n ]\n return resnet_v2(inputs, blocks, num_classes, is_training=is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, spatial_squeeze=spatial_squeeze,\n reuse=reuse, scope=scope)\nresnet_v2_101.default_image_size = resnet_v2.default_image_size\n\n\ndef resnet_v2_152(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n spatial_squeeze=True,\n reuse=None,\n scope='resnet_v2_152'):\n \"\"\"ResNet-152 model of [1]. See resnet_v2() for arg and return description.\"\"\"\n blocks = [\n resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),\n resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),\n resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),\n resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),\n ]\n return resnet_v2(inputs, blocks, num_classes, is_training=is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, spatial_squeeze=spatial_squeeze,\n reuse=reuse, scope=scope)\nresnet_v2_152.default_image_size = resnet_v2.default_image_size\n\n\ndef resnet_v2_200(inputs,\n num_classes=None,\n is_training=True,\n global_pool=True,\n output_stride=None,\n spatial_squeeze=True,\n reuse=None,\n scope='resnet_v2_200'):\n \"\"\"ResNet-200 model of [2]. See resnet_v2() for arg and return description.\"\"\"\n blocks = [\n resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),\n resnet_v2_block('block2', base_depth=128, num_units=24, stride=2),\n resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),\n resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),\n ]\n return resnet_v2(inputs, blocks, num_classes, is_training=is_training,\n global_pool=global_pool, output_stride=output_stride,\n include_root_block=True, spatial_squeeze=spatial_squeeze,\n reuse=reuse, scope=scope)\nresnet_v2_200.default_image_size = resnet_v2.default_image_size\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\nimport torch\n\nimport detectron2.model_zoo as model_zoo\nfrom detectron2.config import get_cfg\nfrom detectron2.modeling import build_model\nfrom detectron2.utils.analysis import flop_count_operators, parameter_count\n\n\ndef get_model_zoo(config_path):\n \"\"\"\n Like model_zoo.get, but do not load any weights (even pretrained)\n \"\"\"\n cfg_file = model_zoo.get_config_file(config_path)\n cfg = get_cfg()\n cfg.merge_from_file(cfg_file)\n if not torch.cuda.is_available():\n cfg.MODEL.DEVICE = \"cpu\"\n return build_model(cfg)\n\n\nclass RetinaNetTest(unittest.TestCase):\n def setUp(self):\n self.model = get_model_zoo(\"COCO-Detection/retinanet_R_50_FPN_1x.yaml\")\n\n def test_flop(self):\n # RetinaNet supports flop-counting with random inputs\n inputs = [{\"image\": torch.rand(3, 800, 800)}]\n res = flop_count_operators(self.model, inputs)\n self.assertTrue(int(res[\"conv\"]), 146) # 146B flops\n\n def test_param_count(self):\n res = parameter_count(self.model)\n self.assertTrue(res[\"\"], 37915572)\n self.assertTrue(res[\"backbone\"], 31452352)\n\n\nclass FasterRCNNTest(unittest.TestCase):\n def setUp(self):\n self.model = get_model_zoo(\"COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml\")\n\n def test_flop(self):\n # Faster R-CNN supports flop-counting with random inputs\n inputs = [{\"image\": torch.rand(3, 800, 800)}]\n res = flop_count_operators(self.model, inputs)\n\n # This only checks flops for backbone & proposal generator\n # Flops for box head is not conv, and depends on #proposals, which is\n # almost 0 for random inputs.\n self.assertTrue(int(res[\"conv\"]), 117)\n\n def test_param_count(self):\n res = parameter_count(self.model)\n self.assertTrue(res[\"\"], 41699936)\n self.assertTrue(res[\"backbone\"], 26799296)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\nimport time\nimport tensorflow as tf\nfrom tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig\nimport npu_bridge\nimport glob\nimport os\nimport argparse\n\ninput_shape = (224, 224, 3) # (height, width, channel)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--batchsize', default=1,\n help=\"\"\"batchsize\"\"\")\n parser.add_argument('--model_path', default='pb/mobileNetv2.pb',\n help=\"\"\"pb path\"\"\")\n parser.add_argument('--image_path', default = '../../image-1024/',\n help = \"\"\"the data path\"\"\")\n parser.add_argument('--label_file', default='val_lable.txt',\n help=\"\"\"label file\"\"\")\n parser.add_argument('--input_tensor_name', default = 'input:0',\n help = \"\"\"input_tensor_name\"\"\")\n parser.add_argument('--output_tensor_name', default='MobilenetV2/Logits/output:0',\n help=\"\"\"output_tensor_name\"\"\")\n args, unknown_args = parser.parse_known_args()\n if len(unknown_args) > 0:\n for bad_arg in unknown_args:\n print(\"ERROR: Unknown command line arg: %s\" % bad_arg)\n raise ValueError(\"Invalid command line arg(s)\")\n\n return args\n\ndef read_file(image_name, path):\n with open(path, 'r') as cs:\n rs_list = cs.readlines()\n for name in rs_list:\n if image_name in str(name):\n num = str(name).split(\" \")[1]\n break\n return int(num) + 1\n\nclass Classifier(object):\n # set batch_size\n args = parse_args()\n batch_size = int(args.batchsize)\n\n def __init__(self):\n # 定义模型的路径\n\n # NPU模型编译和优化配置\n # --------------------------------------------------------------------------------\n config = tf.ConfigProto()\n custom_op = config.graph_options.rewrite_options.custom_optimizers.add()\n custom_op.name = \"NpuOptimizer\"\n # 1)run on Ascend NPU\n custom_op.parameter_map[\"use_off_line\"].b = True\n\n # 2)recommended use fp16 datatype to obtain better performance\n custom_op.parameter_map[\"precision_mode\"].s = tf.compat.as_bytes(\"force_fp16\")\n\n # 3)disable remapping\n config.graph_options.rewrite_options.remapping = RewriterConfig.OFF\n\n # 4)set graph_run_mode=0,obtain better performance\n custom_op.parameter_map[\"graph_run_mode\"].i = 0\n # --------------------------------------------------------------------------------\n\n # load model, set graph input nodes and output nodes\n args = parse_args()\n self.graph = self.__load_model(args.model_path)\n self.input_tensor = self.graph.get_tensor_by_name(args.input_tensor_name)\n self.output_tensor = self.graph.get_tensor_by_name(args.output_tensor_name)\n\n # create session\n self.sess = tf.Session(config=config, graph=self.graph)\n\n def __load_model(self, model_file):\n \"\"\"\n load fronzen graph\n :param model_file:\n :return:\n \"\"\"\n with tf.gfile.GFile(model_file, \"rb\") as gf:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(gf.read())\n\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(graph_def, name=\"\")\n\n return graph\n\n def do_infer(self, batch_data):\n \"\"\"\n do inference\n :param image_data:\n :return:\n \"\"\"\n out_list = []\n total_time = 0\n i = 0\n for data in batch_data:\n t = time.time()\n out = self.sess.run(self.output_tensor, feed_dict={self.input_tensor: data})\n if i > 0:\n total_time = total_time + time.time() - t\n i = i + 1\n out_list.append(out)\n return np.array(out_list), total_time\n\n def batch_process(self, image_data, label_data):\n \"\"\"\n images preprocess\n :return:\n \"\"\"\n # Get the batch information of the current input data, and automatically adjust the data to the fixed batch\n n_dim = image_data.shape[0]\n batch_size = self.batch_size\n\n # if data is not enough for the whole batch, you need to complete the data\n m = n_dim % batch_size\n if m < batch_size and m > 0:\n # The insufficient part shall be filled with 0 according to n dimension\n pad = np.zeros((batch_size - m, 224, 224, 3)).astype(np.float32)\n image_data = np.concatenate((image_data, pad), axis=0)\n\n # Define the Minis that can be divided into several batches\n mini_batch = []\n mini_label = []\n i = 0\n while i < n_dim:\n # Define the Minis that can be divided into several batches\n mini_batch.append(image_data[i: i + batch_size, :, :, :])\n mini_label.append(label_data[i: i + batch_size])\n i += batch_size\n\n return mini_batch, mini_label\n\n\ndef normalize(inputs):\n imagenet_mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]\n imagenet_std = [0.229 * 255, 0.224 * 255, 0.225 * 255]\n imagenet_mean = tf.expand_dims(tf.expand_dims(imagenet_mean, 0), 0)\n imagenet_std = tf.expand_dims(tf.expand_dims(imagenet_std, 0), 0)\n inputs = inputs - imagenet_mean # tf.subtract(inputs, imagenet_mean)\n inputs = inputs * (1.0 / imagenet_std)\n\n return inputs\n\n\ndef image_process(image_path, label_file):\n ###image process\n imagelist = []\n labellist = []\n images_count = 0\n for file in os.listdir(image_path):\n with tf.Session().as_default():\n image_file = os.path.join(image_path, file)\n image_name = image_file.split('/')[-1].split('.')[0]\n image = tf.gfile.FastGFile(image_file, 'rb').read()\n image = tf.image.decode_jpeg(image, channels=3)\n if image.dtype != tf.float32:\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n image = tf.image.central_crop(image, central_fraction=0.875)\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image, [224, 224],align_corners=False)\n image = tf.squeeze(image, [0])\n image = tf.subtract(image, 0.5)\n img = tf.multiply(image, 2.0)\n images_count = images_count + 1\n if tf.shape(img)[2].eval() == 1:\n img = tf.image.grayscale_to_rgb(img)\n img = img.eval()\n imagelist.append(img)\n tf.reset_default_graph()\n\n ###labels\n lable = read_file(image_name, label_file)\n labellist.append(lable)\n return np.array(imagelist), np.array(labellist), images_count\n\ndef main():\n args = parse_args()\n top1_count = 0\n top5_count = 0\n\n ###preprocess\n tf.reset_default_graph()\n print(\"########NOW Start Preprocess!!!#########\")\n images, labels, images_count = image_process(args.image_path, args.label_file)\n\n ###batch\n print(\"########NOW Start Batch!!!#########\")\n classifier = Classifier()\n batch_images, batch_labels= classifier.batch_process(images, labels)\n\n ###do inference\n print(\"########NOW Start inference!!!#########\")\n batch_logits, total_time = classifier.do_infer(batch_images)\n\n ###compute accuary\n batchsize = int(args.batchsize)\n total_step = int(images_count / batchsize)\n print(\"########NOW Start Compute Accuary!!!#########\")\n for i in range(total_step):\n top1acc = tf.reduce_sum(tf.cast(tf.equal(tf.argmax(batch_logits[i], 1), batch_labels[i]), tf.float32))\n top5acc = tf.reduce_sum(tf.cast(tf.nn.in_top_k(batch_logits[i], batch_labels[i], 5), tf.float32))\n with tf.Session().as_default():\n tf.reset_default_graph()\n top1_count += top1acc.eval()\n top5_count += top5acc.eval()\n print('+----------------------------------------+')\n print('the correct num is {}, total num is {}.'.format(top1_count, total_step * batchsize))\n print('Top1 accuracy:', top1_count / (total_step * batchsize) * 100)\n print('Top5 accuracy:', top5_count / (total_step * batchsize) * 100)\n print('images number = ', total_step * batchsize)\n print('images/sec = ', (total_step * batchsize) / total_time)\n print('+----------------------------------------+')\n\nif __name__ == '__main__':\n main()\n\n\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Functions and classes related to optimization (weight updates).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom npu_bridge.npu_init import *\n\nimport re\nimport tensorflow as tf\ndef npu_tf_optimizer(opt):\n npu_opt = NPUDistributedOptimizer(opt)\n #loss scale\n loss_scale_manager = ExponentialUpdateLossScaleManager(init_loss_scale=2**32, incr_every_n_steps=1000, decr_every_n_nan_or_inf=2, decr_ratio=0.5)\n if int(os.getenv('RANK_SIZE')) == 1:\n npu_opt = NPULossScaleOptimizer(npu_opt, loss_scale_manager)\n else:\n npu_opt = NPULossScaleOptimizer(npu_opt, loss_scale_manager, is_distributed=True)\n return npu_opt\n\ndef create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=0.0,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m/v variables are NOT\n # loaded from init_checkpoint.)\n optimizer = LAMBOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n #optimizer = npu_tf_optimizer(optimizer)\n\n if use_tpu:\n optimizer = npu_tf_optimizer(tf.contrib.tpu.CrossShardOptimizer(optimizer))\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n # new_global_step = global_step + 1\n # train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op\n\n\nclass AdamWeightDecayOptimizer(tf.train.Optimizer):\n \"\"\"A basic Adam optimizer that includes \"correct\" L2 weight decay.\"\"\"\n\n def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n\n\n#\nclass LAMBOptimizer(tf.train.Optimizer):\n \"\"\"\n LAMBOptimizer optimizer.\n https://github.com/ymcui/LAMB_Optimizer_TF\n # IMPORTANT NOTE\n - This is NOT an official implementation.\n - LAMB optimizer is changed from arXiv v1 ~ v3.\n - We implement v3 version (which is the latest version on June, 2019.).\n - Our implementation is based on `AdamWeightDecayOptimizer` in BERT (provided by Google).\n\n # References\n - Large Batch Optimization for Deep Learning: Training BERT in 76 minutes. https://arxiv.org/abs/1904.00962v3\n - BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. https://arxiv.org/abs/1810.04805\n # Parameters\n - There is nothing special, just the same as `AdamWeightDecayOptimizer`.\n \"\"\"\n\n def __init__(self,\n learning_rate,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"LAMBOptimizer\"):\n \"\"\"Constructs a LAMBOptimizer.\"\"\"\n super(LAMBOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"/lamb_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"/lamb_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n ############## BELOW ARE THE SPECIFIC PARTS FOR LAMB ##############\n\n # Note: Here are two choices for scaling function \\phi(z)\n # minmax: \\phi(z) = min(max(z, \\gamma_l), \\gamma_u)\n # identity: \\phi(z) = z\n # The authors does not mention what is \\gamma_l and \\gamma_u\n # UPDATE: after asking authors, they provide me the code below.\n # ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(\n # math_ops.greater(g_norm, 0), (w_norm / g_norm), 1.0), 1.0)\n\n r1 = tf.sqrt(tf.reduce_sum(tf.square(param)))\n r2 = tf.sqrt(tf.reduce_sum(tf.square(update)))\n\n r = tf.where(tf.greater(r1, 0.0),\n tf.where(tf.greater(r2, 0.0),\n r1 / r2,\n 1.0),\n 1.0)\n\n eta = self.learning_rate * r\n\n update_with_lr = eta * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n new_global_step=global_step+1\n new_global_step=tf.identity(new_global_step,name='step_update')\n assignments.extend([global_step.assign(new_global_step)])\n return tf.group(*assignments, name=name)\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport numpy as np\nimport time\nfrom pycocotools.cocoeval import COCOeval\n\nfrom detectron2 import _C\n\n\nclass COCOeval_opt(COCOeval):\n \"\"\"\n This is a slightly modified version of the original COCO API, where the functions evaluateImg()\n and accumulate() are implemented in C++ to speedup evaluation\n \"\"\"\n\n def evaluate(self):\n \"\"\"\n Run per image evaluation on given images and store results in self.evalImgs_cpp, a\n datastructure that isn't readable from Python but is used by a c++ implementation of\n accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure\n self.evalImgs because this datastructure is a computational bottleneck.\n :return: None\n \"\"\"\n tic = time.time()\n\n print(\"Running per image evaluation...\")\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if p.useSegm is not None:\n p.iouType = \"segm\" if p.useSegm == 1 else \"bbox\"\n print(\"useSegm (deprecated) is not None. Running {} evaluation\".format(p.iouType))\n print(\"Evaluate annotation type *{}*\".format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params = p\n\n self._prepare()\n\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == \"segm\" or p.iouType == \"bbox\":\n computeIoU = self.computeIoU\n elif p.iouType == \"keypoints\":\n computeIoU = self.computeOks\n self.ious = {\n (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds\n }\n\n maxDet = p.maxDets[-1]\n\n # <<<< Beginning of code differences with original COCO API\n def convert_instances_to_cpp(instances, is_det=False):\n # Convert annotations for a list of instances in an image to a format that's fast\n # to access in C++\n instances_cpp = []\n for instance in instances:\n instance_cpp = _C.InstanceAnnotation(\n int(instance[\"id\"]),\n instance[\"score\"] if is_det else instance.get(\"score\", 0.0),\n instance[\"area\"],\n bool(instance.get(\"iscrowd\", 0)),\n bool(instance.get(\"ignore\", 0)),\n )\n instances_cpp.append(instance_cpp)\n return instances_cpp\n\n # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++\n ground_truth_instances = [\n [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]\n for imgId in p.imgIds\n ]\n detected_instances = [\n [convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds]\n for imgId in p.imgIds\n ]\n ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]\n\n if not p.useCats:\n # For each image, flatten per-category lists into a single list\n ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances]\n detected_instances = [[[o for c in i for o in c]] for i in detected_instances]\n\n # Call C++ implementation of self.evaluateImgs()\n self._evalImgs_cpp = _C.COCOevalEvaluateImages(\n p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances\n )\n self._evalImgs = None\n\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print(\"COCOeval_opt.evaluate() finished in {:0.2f} seconds.\".format(toc - tic))\n # >>>> End of code differences with original COCO API\n\n def accumulate(self):\n \"\"\"\n Accumulate per image evaluation results and store the result in self.eval. Does not\n support changing parameter settings from those used by self.evaluate()\n \"\"\"\n print(\"Accumulating evaluation results...\")\n tic = time.time()\n if not hasattr(self, \"_evalImgs_cpp\"):\n print(\"Please run evaluate() first\")\n\n self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)\n\n # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections\n self.eval[\"recall\"] = np.array(self.eval[\"recall\"]).reshape(\n self.eval[\"counts\"][:1] + self.eval[\"counts\"][2:]\n )\n\n # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X\n # num_area_ranges X num_max_detections\n self.eval[\"precision\"] = np.array(self.eval[\"precision\"]).reshape(self.eval[\"counts\"])\n self.eval[\"scores\"] = np.array(self.eval[\"scores\"]).reshape(self.eval[\"counts\"])\n toc = time.time()\n print(\"COCOeval_opt.accumulate() finished in {:0.2f} seconds.\".format(toc - tic))\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n## ==============================================================================\n\"\"\"Provides data for the Cifar10 dataset.\n\nThe dataset scripts used to create the dataset can be found at:\ntensorflow/models/research/slim/datasets/download_and_convert_cifar10.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nfrom tensorflow.contrib import slim as contrib_slim\n\nfrom datasets import dataset_utils\n\nslim = contrib_slim\n\n_FILE_PATTERN = 'cifar10_%s.tfrecord'\n\nSPLITS_TO_SIZES = {'train': 50000, 'test': 10000}\n\n_NUM_CLASSES = 10\n\n_ITEMS_TO_DESCRIPTIONS = {\n 'image': 'A [32 x 32 x 3] color image.',\n 'label': 'A single integer between 0 and 9',\n}\n\n\ndef get_split(split_name, dataset_dir, file_pattern=None, reader=None):\n \"\"\"Gets a dataset tuple with instructions for reading cifar10.\n\n Args:\n split_name: A train/test split name.\n dataset_dir: The base directory of the dataset sources.\n file_pattern: The file pattern to use when matching the dataset sources.\n It is assumed that the pattern contains a '%s' string so that the split\n name can be inserted.\n reader: The TensorFlow reader type.\n\n Returns:\n A `Dataset` namedtuple.\n\n Raises:\n ValueError: if `split_name` is not a valid train/test split.\n \"\"\"\n if split_name not in SPLITS_TO_SIZES:\n raise ValueError('split name %s was not recognized.' % split_name)\n\n if not file_pattern:\n file_pattern = _FILE_PATTERN\n file_pattern = os.path.join(dataset_dir, file_pattern % split_name)\n\n # Allowing None in the signature so that dataset_factory can use the default.\n if not reader:\n reader = tf.TFRecordReader\n\n keys_to_features = {\n 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),\n 'image/class/label': tf.FixedLenFeature(\n [], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),\n }\n\n items_to_handlers = {\n 'image': slim.tfexample_decoder.Image(shape=[32, 32, 3]),\n 'label': slim.tfexample_decoder.Tensor('image/class/label'),\n }\n\n decoder = slim.tfexample_decoder.TFExampleDecoder(\n keys_to_features, items_to_handlers)\n\n labels_to_names = None\n if dataset_utils.has_labels(dataset_dir):\n labels_to_names = dataset_utils.read_label_file(dataset_dir)\n\n return slim.dataset.Dataset(\n data_sources=file_pattern,\n reader=reader,\n decoder=decoder,\n num_samples=SPLITS_TO_SIZES[split_name],\n items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,\n num_classes=_NUM_CLASSES,\n labels_to_names=labels_to_names,\n )\n",
"# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\n\ndef mixup(alpha, num_classes, data, target):\n with torch.no_grad():\n bs = data.size(0)\n c = np.random.beta(alpha, alpha)\n\n perm = torch.randperm(bs).cuda()\n\n md = c * data + (1-c) * data[perm, :]\n mt = c * target + (1-c) * target[perm, :]\n return md, mt\n\n\nclass MixUpWrapper(object):\n def __init__(self, alpha, num_classes, dataloader):\n self.alpha = alpha\n self.dataloader = dataloader\n self.num_classes = num_classes\n\n def mixup_loader(self, loader):\n for input, target in loader:\n i, t = mixup(self.alpha, self.num_classes, input, target)\n yield i, t\n\n def __iter__(self):\n return self.mixup_loader(self.dataloader)\n\n\nclass NLLMultiLabelSmooth(nn.Module):\n def __init__(self, smoothing = 0.0):\n super(NLLMultiLabelSmooth, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n\n def forward(self, x, target):\n if self.training:\n x = x.float()\n target = target.float()\n logprobs = torch.nn.functional.log_softmax(x, dim = -1)\n \n nll_loss = -logprobs * target\n nll_loss = nll_loss.sum(-1)\n \n smooth_loss = -logprobs.mean(dim=-1)\n \n loss = self.confidence * nll_loss + self.smoothing * smooth_loss\n \n return loss.mean()\n else:\n return torch.nn.functional.cross_entropy(x, target)\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport importlib\nimport importlib.util\nimport logging\nimport numpy as np\nimport os\nimport random\nimport sys\nfrom datetime import datetime\nimport torch\n\n__all__ = [\"seed_all_rng\"]\n\n\nTORCH_VERSION = tuple(int(x) for x in torch.__version__.split(\".\")[:2])\n\"\"\"\nPyTorch version as a tuple of 2 ints. Useful for comparison.\n\"\"\"\n\n\ndef seed_all_rng(seed=None):\n \"\"\"\n Set the random seed for the RNG in torch, numpy and python.\n\n Args:\n seed (int): if None, will use a strong random seed.\n \"\"\"\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger = logging.getLogger(__name__)\n logger.info(\"Using a generated random seed {}\".format(seed))\n np.random.seed(seed)\n torch.set_rng_state(torch.manual_seed(seed).get_state())\n random.seed(seed)\n\n\n# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path\ndef _import_file(module_name, file_path, make_importable=False):\n spec = importlib.util.spec_from_file_location(module_name, file_path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n if make_importable:\n sys.modules[module_name] = module\n return module\n\n\ndef _configure_libraries():\n \"\"\"\n Configurations for some libraries.\n \"\"\"\n # An environment option to disable `import cv2` globally,\n # in case it leads to negative performance impact\n disable_cv2 = int(os.environ.get(\"DETECTRON2_DISABLE_CV2\", False))\n if disable_cv2:\n sys.modules[\"cv2\"] = None\n else:\n # Disable opencl in opencv since its interaction with cuda often has negative effects\n # This envvar is supported after OpenCV 3.4.0\n os.environ[\"OPENCV_OPENCL_RUNTIME\"] = \"disabled\"\n try:\n import cv2\n\n if int(cv2.__version__.split(\".\")[0]) >= 3:\n cv2.ocl.setUseOpenCL(False)\n except ImportError:\n pass\n\n def get_version(module, digit=2):\n return tuple(map(int, module.__version__.split(\".\")[:digit]))\n\n # fmt: off\n assert get_version(torch) >= (1, 4), \"Requires torch>=1.4\"\n import fvcore\n assert get_version(fvcore, 3) >= (0, 1, 1), \"Requires fvcore>=0.1.1\"\n import yaml\n assert get_version(yaml) >= (5, 1), \"Requires pyyaml>=5.1\"\n # fmt: on\n\n\n_ENV_SETUP_DONE = False\n\n\ndef setup_environment():\n \"\"\"Perform environment setup work. The default setup is a no-op, but this\n function allows the user to specify a Python source file or a module in\n the $DETECTRON2_ENV_MODULE environment variable, that performs\n custom setup work that may be necessary to their computing environment.\n \"\"\"\n global _ENV_SETUP_DONE\n if _ENV_SETUP_DONE:\n return\n _ENV_SETUP_DONE = True\n\n _configure_libraries()\n\n custom_module_path = os.environ.get(\"DETECTRON2_ENV_MODULE\")\n\n if custom_module_path:\n setup_custom_environment(custom_module_path)\n else:\n # The default setup is a no-op\n pass\n\n\ndef setup_custom_environment(custom_module):\n \"\"\"\n Load custom environment setup by importing a Python source file or a\n module, and run the setup function.\n \"\"\"\n if custom_module.endswith(\".py\"):\n module = _import_file(\"detectron2.utils.env.custom_module\", custom_module)\n else:\n module = importlib.import_module(custom_module)\n assert hasattr(module, \"setup_environment\") and callable(module.setup_environment), (\n \"Custom environment module defined in {} does not have the \"\n \"required callable attribute 'setup_environment'.\"\n ).format(custom_module)\n module.setup_environment()\n",
"import sys\nimport os\nimport cv2\nimport numpy as np\n\n\ndef letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):\n # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232\n shape = img.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better test mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n # if auto: # minimum rectangle\n # dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding\n # elif scaleFill: # stretch\n # dw, dh = 0.0, 0.0\n # new_unpad = (new_shape[1], new_shape[0])\n # ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return img, ratio, (dw, dh)\n\n\ndef yolov3_onnx(src_info, output_path):\n in_files = []\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n with open(src_info, 'r') as file:\n contents = file.read().split('\\n')\n for i in contents[:-1]:\n in_files.append(i.split()[1])\n\n i = 0\n for file in in_files:\n i = i + 1\n print(file, \"====\", i)\n img0 = cv2.imread(file)\n # Padded resize\n img = letterbox(img0, new_shape=416)[0]\n # cv2.imshow('image', img)\n # cv2.waitKey(0)\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x640x640\n image_np = np.array(img, dtype=np.float32)\n image_np /= 255.0\n # image_np = np.transpose(image_np, (2, 0, 1)) # HWC -> CHW\n image_np_expanded = np.expand_dims(image_np, axis=0) # NCHW\n # Focus\n print(\"shape:\", image_np_expanded.shape)\n img_numpy = np.ascontiguousarray(image_np_expanded)\n\n # save img_tensor as binary file for om inference input\n temp_name = file[file.rfind('/') + 1:]\n img_numpy.tofile(os.path.join(output_path, temp_name.split('.')[0] + \".bin\"))\n\n\nif __name__ == \"__main__\":\n src_info = os.path.abspath(sys.argv[1])\n bin_path = os.path.abspath(sys.argv[2])\n yolov3_onnx(src_info, bin_path)\n\n\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nfrom typing import List\nimport fvcore.nn.weight_init as weight_init\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom detectron2.config import configurable\nfrom detectron2.layers import Conv2d, Linear, ShapeSpec, get_norm\nfrom detectron2.utils.registry import Registry\n\n__all__ = [\"FastRCNNConvFCHead\", \"build_box_head\", \"ROI_BOX_HEAD_REGISTRY\"]\n\nROI_BOX_HEAD_REGISTRY = Registry(\"ROI_BOX_HEAD\")\nROI_BOX_HEAD_REGISTRY.__doc__ = \"\"\"\nRegistry for box heads, which make box predictions from per-region features.\n\nThe registered object will be called with `obj(cfg, input_shape)`.\n\"\"\"\n\n\n@ROI_BOX_HEAD_REGISTRY.register()\nclass FastRCNNConvFCHead(nn.Module):\n \"\"\"\n A head with several 3x3 conv layers (each followed by norm & relu) and then\n several fc layers (each followed by relu).\n \"\"\"\n\n @configurable\n def __init__(\n self, input_shape: ShapeSpec, *, conv_dims: List[int], fc_dims: List[int], conv_norm=\"\"\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n input_shape (ShapeSpec): shape of the input feature.\n conv_dims (list[int]): the output dimensions of the conv layers\n fc_dims (list[int]): the output dimensions of the fc layers\n conv_norm (str or callable): normalization for the conv layers.\n See :func:`detectron2.layers.get_norm` for supported types.\n \"\"\"\n super().__init__()\n assert len(conv_dims) + len(fc_dims) > 0\n\n self._output_size = (input_shape.channels, input_shape.height, input_shape.width)\n\n self.conv_norm_relus = []\n for k, conv_dim in enumerate(conv_dims):\n conv = Conv2d(\n self._output_size[0],\n conv_dim,\n kernel_size=3,\n padding=1,\n bias=not conv_norm,\n norm=get_norm(conv_norm, conv_dim),\n activation=F.relu,\n )\n self.add_module(\"conv{}\".format(k + 1), conv)\n self.conv_norm_relus.append(conv)\n self._output_size = (conv_dim, self._output_size[1], self._output_size[2])\n\n self.fcs = []\n for k, fc_dim in enumerate(fc_dims):\n fc = Linear(np.prod(self._output_size), fc_dim)\n self.add_module(\"fc{}\".format(k + 1), fc)\n self.fcs.append(fc)\n self._output_size = fc_dim\n\n for layer in self.conv_norm_relus:\n weight_init.c2_msra_fill(layer)\n for layer in self.fcs:\n weight_init.c2_xavier_fill(layer)\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV\n conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM\n num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC\n fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM\n return {\n \"input_shape\": input_shape,\n \"conv_dims\": [conv_dim] * num_conv,\n \"fc_dims\": [fc_dim] * num_fc,\n \"conv_norm\": cfg.MODEL.ROI_BOX_HEAD.NORM,\n }\n\n def forward(self, x):\n for layer in self.conv_norm_relus:\n x = layer(x)\n if len(self.fcs):\n if x.dim() > 2:\n x = torch.flatten(x, start_dim=1)\n for layer in self.fcs:\n x = F.relu(layer(x))\n return x\n\n @property\n def output_shape(self):\n \"\"\"\n Returns:\n ShapeSpec: the output feature shape\n \"\"\"\n o = self._output_size\n if isinstance(o, int):\n return ShapeSpec(channels=o)\n else:\n return ShapeSpec(channels=o[0], height=o[1], width=o[2])\n\n\ndef build_box_head(cfg, input_shape):\n \"\"\"\n Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`.\n \"\"\"\n name = cfg.MODEL.ROI_BOX_HEAD.NAME\n return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"export checkpoint file into air, onnx, mindir models\"\"\"\nimport argparse\nimport numpy as np\n\nimport mindspore\nfrom mindspore import context, Tensor\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net, export\n\nfrom src.centerface import CenterfaceMobilev2, CenterFaceWithNms\nfrom src.config import ConfigCenterface\n\nparser = argparse.ArgumentParser(description='centerface export')\nparser.add_argument(\"--device_id\", type=int, default=0, help=\"Device id\")\nparser.add_argument(\"--batch_size\", type=int, default=1, help=\"batch size\")\nparser.add_argument(\n \"--ckpt_file\",\n type=str,\n required=True,\n help=\"Checkpoint file path.\")\nparser.add_argument(\n \"--file_name\",\n type=str,\n default=\"centerface\",\n help=\"output file name.\")\nparser.add_argument(\n '--file_format',\n type=str,\n choices=[\n \"AIR\",\n \"ONNX\",\n \"MINDIR\"],\n default='AIR',\n help='file format')\nparser.add_argument(\"--device_target\", type=str, choices=[\"Ascend\", \"GPU\", \"CPU\"], default=\"Ascend\",\n help=\"device target\")\nargs = parser.parse_args()\n\ncontext.set_context(\n mode=context.GRAPH_MODE,\n device_target=args.device_target,\n device_id=args.device_id)\n\nif __name__ == '__main__':\n config = ConfigCenterface()\n net = CenterfaceMobilev2()\n\n param_dict = load_checkpoint(args.ckpt_file)\n param_dict_new = {}\n for key, values in param_dict.items():\n if key.startswith('moments.') or key.startswith(\n 'moment1.') or key.startswith('moment2.'):\n continue\n elif key.startswith('centerface_network.'):\n param_dict_new[key[19:]] = values\n else:\n param_dict_new[key] = values\n\n load_param_into_net(net, param_dict_new)\n net = CenterFaceWithNms(net)\n net.set_train(False)\n\n input_data = Tensor(np.zeros(\n [args.batch_size, 3, config.input_h, config.input_w]), mindspore.float32)\n export(\n net,\n input_data,\n file_name=args.file_name,\n file_format=args.file_format)\n",
"# Apache License\n# Version 2.0, January 2004\n# http://www.apache.org/licenses/\n\n# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by InceptionV3 on 19-4-4\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\n\n\"\"\"\n论文中提到的第一种模块结构,stride都是1,等效于三层\n但目前google源码可以看到和这里是不一样的,而是用了inceptionv1的结构\n但论文的图表中则明确指出了是拆分了5x5的卷积为两个3x3,网上基本上全部抄的google源码\n我这里就按照论文复现,这里体现了原则3\n\"\"\"\ndef inception_module_v3_1(net, scope, filter_num, stride=1):\n with tf.variable_scope(scope):\n with tf.variable_scope('bh1'):\n bh1 = slim.conv2d(net, filter_num[0], [1, 1], stride=stride, scope=\"bh1_conv1_1x1\")\n with tf.variable_scope('bh2'):\n bh2 = slim.avg_pool2d(net, [3, 3], stride=stride, scope=\"bh2_avg_3x3\")\n bh2 = slim.conv2d(bh2, filter_num[1], [1, 1], stride=stride, scope=\"bh2_conv_1x1\")\n with tf.variable_scope('bh3'):\n bh3 = slim.conv2d(net, filter_num[2], [1, 1], stride=stride, scope=\"bh3_conv1_1x1\")\n bh3 = slim.conv2d(bh3, filter_num[3], [3, 3], stride=stride, scope=\"bh3_conv2_3x3\")\n with tf.variable_scope('bh4'):\n bh4 = slim.conv2d(net, filter_num[4], [1, 1], stride=stride, scope=\"bh4_conv1_1x1\")\n bh4 = slim.conv2d(bh4, filter_num[5], [3, 3], stride=stride, scope=\"bh4_conv2_3x3\")\n bh4 = slim.conv2d(bh4, filter_num[6], [3, 3], stride=stride, scope=\"bh4_conv3_3x3\")\n net = tf.concat([bh1, bh2, bh3, bh4], axis=3)\n return net\n\n\n'''\n论文中提到的第二种结构,使用了1xn和nx1,论文中将n=7用来处理17x17的grid,五层\n这里体现了原则3\n'''\ndef inception_moudle_v3_2(net, scope, filter_num, stride=1):\n with tf.variable_scope(scope):\n with tf.variable_scope(\"bh1\"):\n bh1 = slim.conv2d(net, filter_num[0], [1, 1], stride=stride, scope=\"bh1_conv_1x1\")\n with tf.variable_scope(\"bh2\"):\n bh2 = slim.avg_pool2d(net, [3, 3], stride=stride, scope='bh2_avg_3x3')\n bh2 = slim.conv2d(bh2, filter_num[1], [1, 1], stride=stride, scope='bh2_conv_1x1')\n with tf.variable_scope(\"bh3\"):\n bh3 = slim.conv2d(net, filter_num[2], [1, 1], stride=stride, scope='bh3_conv1_1x1')\n bh3 = slim.conv2d(bh3, filter_num[3], [1, 7], stride=stride, scope='bh3_conv2_1x7')\n bh3 = slim.conv2d(bh3, filter_num[4], [7, 1], stride=stride, scope='bh3_conv3_7x1')\n with tf.variable_scope(\"bh4\"):\n bh4 = slim.conv2d(net, filter_num[5], [1, 1], stride=stride, scope='bh4_conv1_1x1')\n bh4 = slim.conv2d(bh4, filter_num[6], [1, 7], stride=stride, scope='bh4_conv2_1x7')\n bh4 = slim.conv2d(bh4, filter_num[7], [7, 1], stride=stride, scope='bh4_conv3_7x1')\n bh4 = slim.conv2d(bh4, filter_num[8], [1, 7], stride=stride, scope='bh4_conv4_1x7')\n bh4 = slim.conv2d(bh4, filter_num[9], [7, 1], stride=stride, scope='bh4_conv5_7x1')\n net = tf.concat([bh1, bh2, bh3, bh4], axis=3)\n return net\n\n\n'''\n论文提到的第三种结构,增加了宽度,三层\n体现了原则2\n'''\ndef inception_moudle_v3_3(net, scope, filter_num, stride=1):\n with tf.variable_scope(scope):\n with tf.variable_scope(\"bh1\"):\n bh1 = slim.conv2d(net, filter_num[0], [1, 1], stride=stride, scope='bh1_conv_1x1')\n with tf.variable_scope(\"bh2\"):\n bh2 = slim.avg_pool2d(net, [3, 3], stride=stride, scope='bh2_avg_3x3')\n bh2 = slim.conv2d(bh2, filter_num[1], [1, 1], stride=stride, scope='bh2_conv_1x1')\n with tf.variable_scope(\"bh3\"):\n bh3 = slim.conv2d(net, filter_num[2], [1, 1], stride=stride, scope='bh3_conv1_1x1')\n bh3_1 = slim.conv2d(bh3, filter_num[3], [3, 1], stride=stride, scope='bh3_conv2_3x1')\n bh3_2 = slim.conv2d(bh3, filter_num[4], [1, 3], stride=stride, scope='bh3_conv2_1x3')\n with tf.variable_scope(\"bh4\"):\n bh4 = slim.conv2d(net, filter_num[5], [1, 1], stride=stride, scope='bh4_conv1_1x1')\n bh4 = slim.conv2d(bh4, filter_num[6], [3, 3], stride=stride, scope='bh4_conv2_3x3')\n bh4_1 = slim.conv2d(bh4, filter_num[7], [3, 1], stride=stride, scope='bh4_conv3_3x1')\n bh4_2 = slim.conv2d(bh4, filter_num[8], [1, 3], stride=stride, scope='bh4_conv3_1x3')\n net = tf.concat([bh1, bh2, bh3_1, bh3_2, bh4_1, bh4_2], axis=3)\n return net\n\n\n'''\n论文中提到用来减少grid-size的inception模块\n等效三层,pad为VALID\n体现了原则1\n'''\ndef inception_moudle_v3_reduce(net, scope, filter_num):\n with tf.variable_scope(scope):\n with tf.variable_scope(\"bh1\"):\n bh1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',scope=\"bh1_max_3x3\")\n with tf.variable_scope(\"bh2\"):\n bh2 = slim.conv2d(net, filter_num[0], [1, 1], stride=1, scope='bh2_conv1_1x1')\n bh2 = slim.conv2d(bh2, filter_num[1], [3, 3], stride=2, padding='VALID', scope='bh2_conv2_3x3')\n with tf.variable_scope(\"bh3\"):\n bh3 = slim.conv2d(net, filter_num[2], [1, 1], stride=1, scope='bh3_conv1_1x1')\n bh3 = slim.conv2d(bh3, filter_num[3], [3, 3], stride=1, scope='bh3_conv2_3x3')\n bh3 = slim.conv2d(bh3, filter_num[4], [3, 3], stride=2, padding='VALID', scope='bh3_conv3_3x3')\n net = tf.concat([bh1, bh2, bh3], axis=3)\n return net\n\n\ndef V3_slim(inputs, num_cls, keep_prob=0.8, is_training=True, spatital_squeeze=True):\n batch_norm_params = {\n 'decay': 0.998,\n 'epsilon': 0.001,\n 'scale': False,\n 'updates_collections': tf.GraphKeys.UPDATE_OPS,\n }\n\n net = inputs\n with tf.name_scope('reshape'):\n net = tf.reshape(net, [-1, 299, 299, 3])\n\n with tf.variable_scope('GoogLeNet_V3'):\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected],\n weights_regularizer=slim.l2_regularizer(0.00004)):\n with slim.arg_scope(\n [slim.conv2d],\n weights_initializer=slim.xavier_initializer(),\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n with slim.arg_scope(\n [slim.batch_norm, slim.dropout], is_training=is_training):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='VALID'):\n net = slim.conv2d(net,32,[3,3], stride=2,scope=\"layer1\") #149x149\n net = slim.conv2d(net,32,[3,3], scope='layer2') #147x147\n net = slim.conv2d(net,64,[3,3], padding='SAME',scope='layer3') #147x147\n net = slim.max_pool2d(net,[3,3], stride=2,scope='layer4') #73x73\n net = slim.conv2d(net,80,[3,3], scope='layer5') #71x71\n net = slim.conv2d(net,192,[3,3], stride=2,scope='layer6') #35x35\n\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):\n net = slim.conv2d(net, 288, [3,3], scope='layer7')\n # 3 x inception\n net = inception_module_v3_1(net, scope='layer8',filter_num=[64,32,48,64,64,96,96]) #35x35\n net = inception_module_v3_1(net, scope='layer11',filter_num=[64,64,48,64,64,96,96])\n net = inception_module_v3_1(net, scope='layer14',filter_num=[64,64,48,64,64,96,96])\n print(net)\n # 5 x inception\n net = inception_moudle_v3_reduce(net, scope='layer17',filter_num=[192,384,64,96,96]) #17x17\n net = inception_moudle_v3_2(net, scope='layer20',filter_num=[192,192,128,128,192,128,128,128,128,192])\n net = inception_moudle_v3_2(net, scope='layer25',filter_num=[192,192,160,160,192,160,160,160,160,192])\n net = inception_moudle_v3_2(net, scope='layer30',filter_num=[192,192,160,160,192,160,160,160,160,192])\n net = inception_moudle_v3_2(net, scope='layer35',filter_num=[192,192,160,160,192,160,160,160,160,192])\n print(net)\n # 3 x inception\n net = inception_moudle_v3_reduce(net, scope='layer40',filter_num=[192,320,192,192,192]) #8x8\n net = inception_moudle_v3_3(net,scope='layer43',filter_num=[320,192,384,384,384,448,384,384,384])\n net = inception_moudle_v3_3(net,scope='layer46',filter_num=[320,192,384,384,384,448,384,384,384])\n print(net)\n net = slim.avg_pool2d(net,[8,8],padding='VALID',scope='layer49')\n net = slim.dropout(net)\n net = slim.conv2d(net,num_cls,[1,1],activation_fn=None,normalizer_fn=None,scope='layer50')\n print(net)\n if spatital_squeeze:\n net = tf.squeeze(net,[1,2],name='squeeze')\n\n net = slim.softmax(net,scope='softmax')\n return net\n\n\n\nclass testInceptionV3(tf.test.TestCase):\n def testBuildClassifyNetwork(self):\n inputs = tf.random_uniform((5,299,299,3))\n logits = V3_slim(inputs,10)\n print(logits)\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright [yyyy] [name of copyright owner]\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\n pnasnet5large implementation grabbed from Cadene's pretrained models\n Additional credit to https://github.com/creafz\n\n https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/pnasnet.py\n\n\"\"\"\nfrom collections import OrderedDict\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .helpers import build_model_with_cfg\nfrom .layers import ConvBnAct, create_conv2d, create_pool2d, create_classifier\nfrom .registry import register_model\n\n__all__ = ['PNASNet5Large']\n\ndefault_cfgs = {\n 'pnasnet5large': {\n 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth',\n 'input_size': (3, 331, 331),\n 'pool_size': (11, 11),\n 'crop_pct': 0.911,\n 'interpolation': 'bicubic',\n 'mean': (0.5, 0.5, 0.5),\n 'std': (0.5, 0.5, 0.5),\n 'num_classes': 1000,\n 'first_conv': 'conv_0.conv',\n 'classifier': 'last_linear',\n 'label_offset': 1, # 1001 classes in pretrained weights\n },\n}\n\n\nclass SeparableConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''):\n super(SeparableConv2d, self).__init__()\n self.depthwise_conv2d = create_conv2d(\n in_channels, in_channels, kernel_size=kernel_size,\n stride=stride, padding=padding, groups=in_channels)\n self.pointwise_conv2d = create_conv2d(\n in_channels, out_channels, kernel_size=1, padding=padding)\n\n def forward(self, x):\n x = self.depthwise_conv2d(x)\n x = self.pointwise_conv2d(x)\n return x\n\n\nclass BranchSeparables(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''):\n super(BranchSeparables, self).__init__()\n middle_channels = out_channels if stem_cell else in_channels\n self.act_1 = nn.ReLU()\n self.separable_1 = SeparableConv2d(\n in_channels, middle_channels, kernel_size, stride=stride, padding=padding)\n self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001)\n self.act_2 = nn.ReLU()\n self.separable_2 = SeparableConv2d(\n middle_channels, out_channels, kernel_size, stride=1, padding=padding)\n self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.act_1(x)\n x = self.separable_1(x)\n x = self.bn_sep_1(x)\n x = self.act_2(x)\n x = self.separable_2(x)\n x = self.bn_sep_2(x)\n return x\n\n\nclass ActConvBn(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''):\n super(ActConvBn, self).__init__()\n self.act = nn.ReLU()\n self.conv = create_conv2d(\n in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.act(x)\n x = self.conv(x)\n x = self.bn(x)\n return x\n\n\nclass FactorizedReduction(nn.Module):\n\n def __init__(self, in_channels, out_channels, padding=''):\n super(FactorizedReduction, self).__init__()\n self.act = nn.ReLU()\n self.path_1 = nn.Sequential(OrderedDict([\n ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)),\n ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)),\n ]))\n self.path_2 = nn.Sequential(OrderedDict([\n ('pad', nn.ZeroPad2d((-1, 1, -1, 1))), # shift\n ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)),\n ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)),\n ]))\n self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.act(x)\n x_path1 = self.path_1(x)\n x_path2 = self.path_2(x)\n out = self.final_path_bn(torch.cat([x_path1, x_path2], 1))\n return out\n\n\nclass CellBase(nn.Module):\n\n def cell_forward(self, x_left, x_right):\n x_comb_iter_0_left = self.comb_iter_0_left(x_left)\n x_comb_iter_0_right = self.comb_iter_0_right(x_left)\n x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right\n\n x_comb_iter_1_left = self.comb_iter_1_left(x_right)\n x_comb_iter_1_right = self.comb_iter_1_right(x_right)\n x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right\n\n x_comb_iter_2_left = self.comb_iter_2_left(x_right)\n x_comb_iter_2_right = self.comb_iter_2_right(x_right)\n x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right\n\n x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2)\n x_comb_iter_3_right = self.comb_iter_3_right(x_right)\n x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right\n\n x_comb_iter_4_left = self.comb_iter_4_left(x_left)\n if self.comb_iter_4_right is not None:\n x_comb_iter_4_right = self.comb_iter_4_right(x_right)\n else:\n x_comb_iter_4_right = x_right\n x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right\n\n x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)\n return x_out\n\n\nclass CellStem0(CellBase):\n\n def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):\n super(CellStem0, self).__init__()\n self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type)\n\n self.comb_iter_0_left = BranchSeparables(\n in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type)\n self.comb_iter_0_right = nn.Sequential(OrderedDict([\n ('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)),\n ('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)),\n ('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)),\n ]))\n\n self.comb_iter_1_left = BranchSeparables(\n out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type)\n self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type)\n\n self.comb_iter_2_left = BranchSeparables(\n out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type)\n self.comb_iter_2_right = BranchSeparables(\n out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type)\n\n self.comb_iter_3_left = BranchSeparables(\n out_chs_right, out_chs_right, kernel_size=3, padding=pad_type)\n self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type)\n\n self.comb_iter_4_left = BranchSeparables(\n in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type)\n self.comb_iter_4_right = ActConvBn(\n out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type)\n\n def forward(self, x_left):\n x_right = self.conv_1x1(x_left)\n x_out = self.cell_forward(x_left, x_right)\n return x_out\n\n\nclass Cell(CellBase):\n\n def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type='',\n is_reduction=False, match_prev_layer_dims=False):\n super(Cell, self).__init__()\n\n # If `is_reduction` is set to `True` stride 2 is used for\n # convolution and pooling layers to reduce the spatial size of\n # the output of a cell approximately by a factor of 2.\n stride = 2 if is_reduction else 1\n\n # If `match_prev_layer_dimensions` is set to `True`\n # `FactorizedReduction` is used to reduce the spatial size\n # of the left input of a cell approximately by a factor of 2.\n self.match_prev_layer_dimensions = match_prev_layer_dims\n if match_prev_layer_dims:\n self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type)\n else:\n self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)\n self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type)\n\n self.comb_iter_0_left = BranchSeparables(\n out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type)\n self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type)\n\n self.comb_iter_1_left = BranchSeparables(\n out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type)\n self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type)\n\n self.comb_iter_2_left = BranchSeparables(\n out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type)\n self.comb_iter_2_right = BranchSeparables(\n out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type)\n\n self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3)\n self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type)\n\n self.comb_iter_4_left = BranchSeparables(\n out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type)\n if is_reduction:\n self.comb_iter_4_right = ActConvBn(\n out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type)\n else:\n self.comb_iter_4_right = None\n\n def forward(self, x_left, x_right):\n x_left = self.conv_prev_1x1(x_left)\n x_right = self.conv_1x1(x_right)\n x_out = self.cell_forward(x_left, x_right)\n return x_out\n\n\nclass PNASNet5Large(nn.Module):\n def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg', pad_type=''):\n super(PNASNet5Large, self).__init__()\n self.num_classes = num_classes\n self.drop_rate = drop_rate\n self.num_features = 4320\n assert output_stride == 32\n\n self.conv_0 = ConvBnAct(\n in_chans, 96, kernel_size=3, stride=2, padding=0,\n norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False)\n\n self.cell_stem_0 = CellStem0(\n in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type)\n\n self.cell_stem_1 = Cell(\n in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type,\n match_prev_layer_dims=True, is_reduction=True)\n self.cell_0 = Cell(\n in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type,\n match_prev_layer_dims=True)\n self.cell_1 = Cell(\n in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type)\n self.cell_2 = Cell(\n in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type)\n self.cell_3 = Cell(\n in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type)\n\n self.cell_4 = Cell(\n in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type,\n is_reduction=True)\n self.cell_5 = Cell(\n in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type,\n match_prev_layer_dims=True)\n self.cell_6 = Cell(\n in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type)\n self.cell_7 = Cell(\n in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type)\n\n self.cell_8 = Cell(\n in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type,\n is_reduction=True)\n self.cell_9 = Cell(\n in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type,\n match_prev_layer_dims=True)\n self.cell_10 = Cell(\n in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type)\n self.cell_11 = Cell(\n in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type)\n self.act = nn.ReLU()\n self.feature_info = [\n dict(num_chs=96, reduction=2, module='conv_0'),\n dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'),\n dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'),\n dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'),\n dict(num_chs=4320, reduction=32, module='act'),\n ]\n\n self.global_pool, self.last_linear = create_classifier(\n self.num_features, self.num_classes, pool_type=global_pool)\n\n def get_classifier(self):\n return self.last_linear\n\n def reset_classifier(self, num_classes, global_pool='avg'):\n self.num_classes = num_classes\n self.global_pool, self.last_linear = create_classifier(\n self.num_features, self.num_classes, pool_type=global_pool)\n\n def forward_features(self, x):\n x_conv_0 = self.conv_0(x)\n x_stem_0 = self.cell_stem_0(x_conv_0)\n x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0)\n x_cell_0 = self.cell_0(x_stem_0, x_stem_1)\n x_cell_1 = self.cell_1(x_stem_1, x_cell_0)\n x_cell_2 = self.cell_2(x_cell_0, x_cell_1)\n x_cell_3 = self.cell_3(x_cell_1, x_cell_2)\n x_cell_4 = self.cell_4(x_cell_2, x_cell_3)\n x_cell_5 = self.cell_5(x_cell_3, x_cell_4)\n x_cell_6 = self.cell_6(x_cell_4, x_cell_5)\n x_cell_7 = self.cell_7(x_cell_5, x_cell_6)\n x_cell_8 = self.cell_8(x_cell_6, x_cell_7)\n x_cell_9 = self.cell_9(x_cell_7, x_cell_8)\n x_cell_10 = self.cell_10(x_cell_8, x_cell_9)\n x_cell_11 = self.cell_11(x_cell_9, x_cell_10)\n x = self.act(x_cell_11)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.global_pool(x)\n if self.drop_rate > 0:\n x = F.dropout(x, self.drop_rate, training=self.training)\n x = self.last_linear(x)\n return x\n\n\ndef _create_pnasnet(variant, pretrained=False, **kwargs):\n return build_model_with_cfg(\n PNASNet5Large, variant, pretrained,\n default_cfg=default_cfgs[variant],\n feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model\n **kwargs)\n\n\n@register_model\ndef pnasnet5large(pretrained=False, **kwargs):\n r\"\"\"PNASNet-5 model architecture from the\n `\"Progressive Neural Architecture Search\"\n <https://arxiv.org/abs/1712.00559>`_ paper.\n \"\"\"\n model_kwargs = dict(pad_type='same', **kwargs)\n return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\nfrom .utils import weight_reduce_loss\n\n\ndef cross_entropy(pred,\n label,\n weight=None,\n reduction='mean',\n avg_factor=None,\n class_weight=None):\n \"\"\"Calculate the CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the number\n of classes.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n reduction (str, optional): The method used to reduce the loss.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n # element-wise losses\n loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')\n\n # apply weights and do the reduction\n if weight is not None:\n weight = weight.float()\n loss = weight_reduce_loss(\n loss, weight=weight, reduction=reduction, avg_factor=avg_factor)\n\n return loss\n\n\ndef _expand_onehot_labels(labels, label_weights, label_channels):\n bin_labels = labels.new_full((labels.size(0), label_channels), 0)\n inds = torch.nonzero(\n (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()\n if inds.numel() > 0:\n bin_labels[inds, labels[inds]] = 1\n\n if label_weights is None:\n bin_label_weights = None\n else:\n bin_label_weights = label_weights.view(-1, 1).expand(\n label_weights.size(0), label_channels)\n\n return bin_labels, bin_label_weights\n\n\ndef binary_cross_entropy(pred,\n label,\n weight=None,\n reduction='mean',\n avg_factor=None,\n class_weight=None):\n \"\"\"Calculate the binary CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 1).\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n reduction (str, optional): The method used to reduce the loss.\n Options are \"none\", \"mean\" and \"sum\".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n if pred.dim() != label.dim():\n label, weight = _expand_onehot_labels(label, weight, pred.size(-1))\n\n # weighted element-wise losses\n if weight is not None:\n weight = weight.float()\n loss = F.binary_cross_entropy_with_logits(\n pred, label.float(), pos_weight=class_weight, reduction='none')\n # do the reduction for the weighted loss\n loss = weight_reduce_loss(\n loss, weight, reduction=reduction, avg_factor=avg_factor)\n\n return loss\n\n\ndef mask_cross_entropy(pred,\n target,\n label,\n reduction='mean',\n avg_factor=None,\n class_weight=None):\n \"\"\"Calculate the CrossEntropy loss for masks.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the number\n of classes.\n target (torch.Tensor): The learning label of the prediction.\n label (torch.Tensor): ``label`` indicates the class label of the mask'\n corresponding object. This will be used to select the mask in the\n of the class which the object belongs to when the mask prediction\n if not class-agnostic.\n reduction (str, optional): The method used to reduce the loss.\n Options are \"none\", \"mean\" and \"sum\".\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n class_weight (list[float], optional): The weight for each class.\n\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n # TODO: handle these two reserved arguments\n assert reduction == 'mean' and avg_factor is None\n num_rois = pred.size()[0]\n inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)\n pred_slice = pred[inds, label].squeeze(1)\n return F.binary_cross_entropy_with_logits(\n pred_slice, target, weight=class_weight, reduction='mean')[None]\n\n\[email protected]_module()\nclass CrossEntropyLoss(nn.Module):\n\n def __init__(self,\n use_sigmoid=False,\n use_mask=False,\n reduction='mean',\n class_weight=None,\n loss_weight=1.0):\n \"\"\"CrossEntropyLoss.\n\n Args:\n use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n of softmax. Defaults to False.\n use_mask (bool, optional): Whether to use mask cross entropy loss.\n Defaults to False.\n reduction (str, optional): . Defaults to 'mean'.\n Options are \"none\", \"mean\" and \"sum\".\n class_weight (list[float], optional): Weight of each class.\n Defaults to None.\n loss_weight (float, optional): Weight of the loss. Defaults to 1.0.\n \"\"\"\n super(CrossEntropyLoss, self).__init__()\n assert (use_sigmoid is False) or (use_mask is False)\n self.use_sigmoid = use_sigmoid\n self.use_mask = use_mask\n self.reduction = reduction\n self.loss_weight = loss_weight\n self.class_weight = class_weight\n\n if self.use_sigmoid:\n self.cls_criterion = binary_cross_entropy\n elif self.use_mask:\n self.cls_criterion = mask_cross_entropy\n else:\n self.cls_criterion = cross_entropy\n\n def forward(self,\n cls_score,\n label,\n weight=None,\n avg_factor=None,\n reduction_override=None,\n **kwargs):\n \"\"\"Forward function.\n\n Args:\n cls_score (torch.Tensor): The prediction.\n label (torch.Tensor): The learning label of the prediction.\n weight (torch.Tensor, optional): Sample-wise loss weight.\n avg_factor (int, optional): Average factor that is used to average\n the loss. Defaults to None.\n reduction (str, optional): The method used to reduce the loss.\n Options are \"none\", \"mean\" and \"sum\".\n Returns:\n torch.Tensor: The calculated loss\n \"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n if self.class_weight is not None:\n class_weight = cls_score.new_tensor(self.class_weight, device=cls_score.device)\n else:\n class_weight = None\n loss_cls = self.loss_weight * self.cls_criterion(\n cls_score,\n label,\n weight,\n class_weight=class_weight,\n reduction=reduction,\n avg_factor=avg_factor,\n **kwargs)\n return loss_cls\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport argparse\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\n\nimport apex\nimport numpy as np\nimport torch.npu\nfrom apex import amp\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom pthtar2onnx import convert\nimport models\nfrom models import resnet_0_6_0\n\nCALCULATE_DEVICE = \"npu:0\"\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--npu', default=None, type=int,\n help='NPU id to use.')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\nparser.add_argument('--device', default='npu', type=str, help='npu or gpu')\nparser.add_argument('--addr', default='10.136.181.115',\n type=str, help='master addr')\nparser.add_argument('--amp', default=False, action='store_true',\n help='use amp to train the model')\nparser.add_argument('--warm_up_epochs', default=0, type=int,\n help='warm up')\nparser.add_argument('--loss-scale', default=1024., type=float,\n help='loss scale using in amp, default -1 means dynamic')\nparser.add_argument('--opt-level', default='O2', type=str,\n help='loss scale using in amp, default -1 means dynamic')\nparser.add_argument('--prof', default=False, action='store_true',\n help='use profiling to evaluate the performance of model')\nparser.add_argument('--save_path', default='', type=str,\n help='path to save models')\n# modelarts modification\nparser.add_argument('--train_url',\n default='',\n type=str,\n help=\"setting dir of training output\")\nparser.add_argument('--data_url',\n metavar='DIR',\n default='',\n help='path to dataset')\n\nparser.add_argument('--model_url',\n metavar='DIR',\n default='',\n help='path to pretrained model')\nparser.add_argument('--onnx', default=True, action='store_true',\n help=\"convert pth model to onnx\")\n\n\ncur_step = 0\nCACHE_TRAINING_URL = \"/cache/training/\"\nCACHE_DATA_URL = \"/cache/data_url\"\nCACHE_MODEL_URL = \"/cache/model\"\n\nbest_acc1 = 0\n\n\ndef main():\n args = parser.parse_args()\n global CALCULATE_DEVICE\n CALCULATE_DEVICE = \"npu:{}\".format(args.npu)\n if 'npu' in CALCULATE_DEVICE:\n torch.npu.set_device(CALCULATE_DEVICE)\n if args.data_url:\n import moxing as mox\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n ###### modify npu_p1 1######\n args.gpu = None\n ###### modify npu_p1 1 end ######\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n ###### modify 8 ######\n if args.device == 'npu':\n dist.init_process_group(backend=args.dist_backend, # init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n else:\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n ###### modify 8 end ######\n # create model\n if args.pretrained:\n print(\"=> using pre-trained model wide_resnet101_2\")\n model = resnet_0_6_0.wide_resnet101_2()\n print(\"loading model of yours...\")\n model_path = \"./checkpoint.pth.tar\"\n if args.model_url:\n real_path = CACHE_MODEL_URL\n if not os.path.exists(real_path):\n os.makedirs(real_path)\n mox.file.copy_parallel(args.model_url, real_path)\n print(\"training data finish copy to %s.\" % real_path)\n model_path = os.path.join(CACHE_MODEL_URL, 'checkpoint.pth.tar')\n pretrained_dict = torch.load(model_path, map_location=\"cpu\")[\"state_dict\"]\n model.load_state_dict({k.replace('module.', ''): v for k, v in pretrained_dict.items()})\n if \"fc.weight\" in pretrained_dict:\n pretrained_dict.pop('fc.weight')\n pretrained_dict.pop('fc.bias')\n for param in model.parameters():\n param.requires_grad = False\n model.fc = nn.Linear(2048,1000)\n else:\n print(\"=> creating model wide_resnet101_2\")\n model = resnet_0_6_0.wide_resnet101_2()\n\n # if not torch.cuda.is_available():\n # print('using CPU, this will be slow')\n # elif args.distributed:\n ###### modify npu_p1 2######\n if args.distributed:\n ###### modify npu_p1 2 end ######\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n # model = torch.nn.DataParallel(model).cuda()\n ###### modify npu_p1 3######\n model = model.to(CALCULATE_DEVICE)\n ###### modify npu_p1 3 end ######\n\n # define loss function (criterion) and optimizer\n # criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n ############## npu modify 4 begin #############\n # tranfer the dataset to NPU to compute\n criterion = nn.CrossEntropyLoss().to(CALCULATE_DEVICE)\n ############## npu modify 4 end #############\n optimizer = apex.optimizers.NpuFusedSGD(model.parameters(), args.lr,\n momentum=args.momentum,\n nesterov=True,\n weight_decay=args.weight_decay)\n ###### modify 1 ######\n if args.amp:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=args.opt_level, loss_scale=args.loss_scale)\n ###### modify 1 end ######\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n if args.gpu is not None:\n # best_acc1 may be from a checkpoint from a different GPU\n best_acc1 = best_acc1.to(args.gpu)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = True\n if args.data_url:\n real_path = CACHE_DATA_URL\n if not os.path.exists(real_path):\n os.makedirs(real_path)\n mox.file.copy_parallel(args.data_url, real_path)\n print(\"training data finish copy to %s.\" % real_path)\n args.data = real_path\n\n # Data loading code\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n ###### modify 7 ######\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(\n train_sampler is None),\n num_workers=args.workers, pin_memory=False, sampler=train_sampler, drop_last=True)\n ###### modify 7 end #######\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion, args)\n return\n ###### modify 3 ######\n if args.prof:\n profiling(train_loader, model, criterion, optimizer, args)\n return\n ###### modify 3 end ######\n\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node)\n\n # evaluate on validation set\n acc1 = validate(val_loader, model, criterion, args)\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer': optimizer.state_dict(),\n }, is_best)\n if args.train_url:\n mox.file.copy_parallel(CACHE_TRAINING_URL, args.train_url)\n\n\ndef profiling(data_loader, model, criterion, optimizer, args):\n # switch to train mode\n model.train()\n\n def update(model, images, target, optimizer):\n output = model(images)\n loss = criterion(output, target)\n if args.amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optimizer.zero_grad()\n optimizer.step()\n\n for step, (images, target) in enumerate(data_loader):\n if args.device == 'npu':\n # loc = 'npu:{}'.format(args.gpu)\n loc = CALCULATE_DEVICE\n images = images.to(loc, non_blocking=True).to(torch.float)\n target = target.to(torch.int32).to(loc, non_blocking=True)\n else:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n if step < 5:\n update(model, images, target, optimizer)\n else:\n if args.device == 'npu':\n with torch.autograd.profiler.profile(use_npu=True) as prof:\n update(model, images, target, optimizer)\n else:\n with torch.autograd.profiler.profile(use_cuda=True) as prof:\n update(model, images, target, optimizer)\n break\n\n prof.export_chrome_trace(\"output.prof\")\n\ndef train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n # if torch.cuda.is_available():\n # target = target.cuda(args.gpu, non_blocking=True)\n ############## npu modify 5 begin #############\n # transfer the dataset to NPU to compute and modify the type of target\n if 'npu' in CALCULATE_DEVICE:\n target = target.to(torch.int32)\n images, target = images.to(CALCULATE_DEVICE, non_blocking=True), target.to(CALCULATE_DEVICE, non_blocking=True)\n ############## npu modify 5 end #############\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))# pylint: disable=unbalanced-tuple-unpacking\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n ###### modify 2 ######\n if args.amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n ###### modify 2 end ######\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n ###### modify 4 ######\n if i % args.print_freq == 0:\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n progress.display(i)\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n # print(\"[npu id:\", args.gpu, \"]\", \"batch_size:\", ngpus_per_node * args.batch_size,\n # 'Time: {:.3f}'.format(batch_time.avg), '* FPS@all {:.3f}'.format(\n # args.batch_size / batch_time.avg))\n if batch_time.avg:\n print(\"[npu id:\", CALCULATE_DEVICE, \"]\", \"batch_size:\", args.world_size * args.batch_size,\n 'Time: {:.3f}'.format(batch_time.avg), '* FPS@all {:.3f}'.format(\n args.batch_size * args.world_size / batch_time.avg))\n ###### modify 4 end ######\n\n\ndef validate(val_loader, model, criterion, args):\n ###### modify 5 ######\n batch_time = AverageMeter('Time', ':6.3f', start_count_index= 5)\n ###### modify 5 end ######\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n if args.device == 'npu':\n loc = CALCULATE_DEVICE\n images = images.to(loc).to(torch.float)\n if args.device == 'npu':\n loc = CALCULATE_DEVICE\n target = target.to(torch.int32).to(loc, non_blocking=True)\n # compute output\n\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))# pylint: disable=unbalanced-tuple-unpacking\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n args = parser.parse_args()\n filename = os.path.join(args.save_path, filename)\n torch.save(state, filename)\n path_best = os.path.join(args.save_path, 'model_best.pth.tar')\n if is_best:\n shutil.copyfile(filename, path_best)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=':f', start_count_index=2):\n self.val = None\n self.N = None\n self.avg = None\n self.name = name\n self.fmt = fmt\n self.reset()\n self.start_count_index = start_count_index\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n if self.count == 0:\n self.N = n\n\n self.val = val\n self.count += n\n if self.count > (self.start_count_index * self.N):\n self.sum += val * n\n self.avg = self.sum / (self.count - self.start_count_index * self.N)\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by cosine method\"\"\"\n\n if args.warm_up_epochs > 0 and epoch < args.warm_up_epochs:\n lr = args.lr * ((epoch + 1) / (args.warm_up_epochs + 1))\n else:\n alpha = 0\n cosine_decay = 0.5 * (\n 1 + np.cos(np.pi * (epoch - args.warm_up_epochs) / (args.epochs - args.warm_up_epochs)))\n decayed = (1 - alpha) * cosine_decay + alpha\n lr = args.lr * decayed\n\n print(\"=> Epoch[%d] Setting lr: %.4f\" % (epoch, lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n ############## npu modify 6 begin #############\n if 'npu' in CALCULATE_DEVICE:\n torch.npu.set_device(CALCULATE_DEVICE)\n ############## npu modify 6 begin #############\n main()\n",
"# Copyright [yyyy] [name of copyright owner]\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\" Dataset parser interface that wraps TFDS datasets\n\nWraps many (most?) TFDS image-classification datasets\nfrom https://github.com/tensorflow/datasets\nhttps://www.tensorflow.org/datasets/catalog/overview#image_classification\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nimport os\nimport io\nimport math\nimport torch\nimport torch.distributed as dist\nfrom PIL import Image\n\ntry:\n import tensorflow as tf\n tf.config.set_visible_devices([], 'GPU') # Hands off my GPU! (or pip install tensorflow-cpu)\n import tensorflow_datasets as tfds\nexcept ImportError as e:\n print(e)\n print(\"Please install tensorflow_datasets package `pip install tensorflow-datasets`.\")\n exit(1)\nfrom .parser import Parser\n\n\nMAX_TP_SIZE = 8 # maximum TF threadpool size, only doing jpeg decodes and queuing activities\nSHUFFLE_SIZE = 20480 # samples to shuffle in DS queue\nPREFETCH_SIZE = 2048 # samples to prefetch\n\n\ndef even_split_indices(split, n, num_samples):\n partitions = [round(i * num_samples / n) for i in range(n + 1)]\n return [f\"{split}[{partitions[i]}:{partitions[i+1]}]\" for i in range(n)]\n\n\nclass ParserTfds(Parser):\n \"\"\" Wrap Tensorflow Datasets for use in PyTorch\n\n There several things to be aware of:\n * To prevent excessive samples being dropped per epoch w/ distributed training or multiplicity of\n dataloader workers, the train iterator wraps to avoid returning partial batches that trigger drop_last\n https://github.com/pytorch/pytorch/issues/33413\n * With PyTorch IterableDatasets, each worker in each replica operates in isolation, the final batch\n from each worker could be a different size. For training this is worked around by option above, for\n validation extra samples are inserted iff distributed mode is enabled so that the batches being reduced\n across replicas are of same size. This will slightly alter the results, distributed validation will not be\n 100% correct. This is similar to common handling in DistributedSampler for normal Datasets but a bit worse\n since there are up to N * J extra samples with IterableDatasets.\n * The sharding (splitting of dataset into TFRecord) files imposes limitations on the number of\n replicas and dataloader workers you can use. For really small datasets that only contain a few shards\n you may have to train non-distributed w/ 1-2 dataloader workers. This is likely not a huge concern as the\n benefit of distributed training or fast dataloading should be much less for small datasets.\n * This wrapper is currently configured to return individual, decompressed image samples from the TFDS\n dataset. The augmentation (transforms) and batching is still done in PyTorch. It would be possible\n to specify TF augmentation fn and return augmented batches w/ some modifications to other downstream\n components.\n\n \"\"\"\n def __init__(self, root, name, split='train', shuffle=False, is_training=False, batch_size=None, repeats=0):\n super().__init__()\n self.root = root\n self.split = split\n self.shuffle = shuffle\n self.is_training = is_training\n if self.is_training:\n assert batch_size is not None,\\\n \"Must specify batch_size in training mode for reasonable behaviour w/ TFDS wrapper\"\n self.batch_size = batch_size\n self.repeats = repeats\n self.subsplit = None\n\n self.builder = tfds.builder(name, data_dir=root)\n # NOTE: please use tfds command line app to download & prepare datasets, I don't want to call\n # download_and_prepare() by default here as it's caused issues generating unwanted paths.\n self.num_samples = self.builder.info.splits[split].num_examples\n self.ds = None # initialized lazily on each dataloader worker process\n\n self.worker_info = None\n self.dist_rank = 0\n self.dist_num_replicas = 1\n if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1:\n self.dist_rank = dist.get_rank()\n self.dist_num_replicas = dist.get_world_size()\n\n def _lazy_init(self):\n \"\"\" Lazily initialize the dataset.\n\n This is necessary to init the Tensorflow dataset pipeline in the (dataloader) process that\n will be using the dataset instance. The __init__ method is called on the main process,\n this will be called in a dataloader worker process.\n\n NOTE: There will be problems if you try to re-use this dataset across different loader/worker\n instances once it has been initialized. Do not call any dataset methods that can call _lazy_init\n before it is passed to dataloader.\n \"\"\"\n worker_info = torch.utils.data.get_worker_info()\n\n # setup input context to split dataset across distributed processes\n split = self.split\n num_workers = 1\n if worker_info is not None:\n self.worker_info = worker_info\n num_workers = worker_info.num_workers\n global_num_workers = self.dist_num_replicas * num_workers\n worker_id = worker_info.id\n\n # FIXME I need to spend more time figuring out the best way to distribute/split data across\n # combo of distributed replicas + dataloader worker processes\n \"\"\"\n InputContext will assign subset of underlying TFRecord files to each 'pipeline' if used.\n My understanding is that using split, the underling TFRecord files will shuffle (shuffle_files=True)\n between the splits each iteration, but that understanding could be wrong.\n Possible split options include:\n * InputContext for both distributed & worker processes (current)\n * InputContext for distributed and sub-splits for worker processes\n * sub-splits for both\n \"\"\"\n # split_size = self.num_samples // num_workers\n # start = worker_id * split_size\n # if worker_id == num_workers - 1:\n # split = split + '[{}:]'.format(start)\n # else:\n # split = split + '[{}:{}]'.format(start, start + split_size)\n if not self.is_training and '[' not in self.split:\n # If not training, and split doesn't define a subsplit, manually split the dataset\n # for more even samples / worker\n self.subsplit = even_split_indices(self.split, global_num_workers, self.num_samples)[\n self.dist_rank * num_workers + worker_id]\n\n if self.subsplit is None:\n input_context = tf.distribute.InputContext(\n num_input_pipelines=self.dist_num_replicas * num_workers,\n input_pipeline_id=self.dist_rank * num_workers + worker_id,\n num_replicas_in_sync=self.dist_num_replicas # FIXME does this arg have any impact?\n )\n else:\n input_context = None\n\n read_config = tfds.ReadConfig(\n shuffle_seed=42,\n shuffle_reshuffle_each_iteration=True,\n input_context=input_context)\n ds = self.builder.as_dataset(\n split=self.subsplit or self.split, shuffle_files=self.shuffle, read_config=read_config)\n # avoid overloading threading w/ combo fo TF ds threads + PyTorch workers\n options = tf.data.Options()\n options.experimental_threading.private_threadpool_size = max(1, MAX_TP_SIZE // num_workers)\n options.experimental_threading.max_intra_op_parallelism = 1\n ds = ds.with_options(options)\n if self.is_training or self.repeats > 1:\n # to prevent excessive drop_last batch behaviour w/ IterableDatasets\n # see warnings at https://pytorch.org/docs/stable/data.html#multi-process-data-loading\n ds = ds.repeat() # allow wrap around and break iteration manually\n if self.shuffle:\n ds = ds.shuffle(min(self.num_samples, SHUFFLE_SIZE) // self._num_pipelines, seed=0)\n ds = ds.prefetch(min(self.num_samples // self._num_pipelines, PREFETCH_SIZE))\n self.ds = tfds.as_numpy(ds)\n\n def __iter__(self):\n if self.ds is None:\n self._lazy_init()\n # compute a rounded up sample count that is used to:\n # 1. make batches even cross workers & replicas in distributed validation.\n # This adds extra samples and will slightly alter validation results.\n # 2. determine loop ending condition in training w/ repeat enabled so that only full batch_size\n # batches are produced (underlying tfds iter wraps around)\n target_sample_count = math.ceil(max(1, self.repeats) * self.num_samples / self._num_pipelines)\n if self.is_training:\n # round up to nearest batch_size per worker-replica\n target_sample_count = math.ceil(target_sample_count / self.batch_size) * self.batch_size\n sample_count = 0\n for sample in self.ds:\n img = Image.fromarray(sample['image'], mode='RGB')\n yield img, sample['label']\n sample_count += 1\n if self.is_training and sample_count >= target_sample_count:\n # Need to break out of loop when repeat() is enabled for training w/ oversampling\n # this results in extra samples per epoch but seems more desirable than dropping\n # up to N*J batches per epoch (where N = num distributed processes, and J = num worker processes)\n break\n if not self.is_training and self.dist_num_replicas and 0 < sample_count < target_sample_count:\n # Validation batch padding only done for distributed training where results are reduced across nodes.\n # For single process case, it won't matter if workers return different batch sizes.\n # FIXME if using input_context or % based subsplits, sample count can vary by more than +/- 1 and this\n # approach is not optimal\n yield img, sample['label'] # yield prev sample again\n sample_count += 1\n\n @property\n def _num_workers(self):\n return 1 if self.worker_info is None else self.worker_info.num_workers\n\n @property\n def _num_pipelines(self):\n return self._num_workers * self.dist_num_replicas\n\n def __len__(self):\n # this is just an estimate and does not factor in extra samples added to pad batches based on\n # complete worker & replica info (not available until init in dataloader).\n return math.ceil(max(1, self.repeats) * self.num_samples / self.dist_num_replicas)\n\n def _filename(self, index, basename=False, absolute=False):\n assert False, \"Not supported\" # no random access to samples\n\n def filenames(self, basename=False, absolute=False):\n \"\"\" Return all filenames in dataset, overrides base\"\"\"\n if self.ds is None:\n self._lazy_init()\n names = []\n for sample in self.ds:\n if len(names) > self.num_samples:\n break # safety for ds.repeat() case\n if 'file_name' in sample:\n name = sample['file_name']\n elif 'filename' in sample:\n name = sample['filename']\n elif 'id' in sample:\n name = sample['id']\n else:\n assert False, \"No supported name field present\"\n names.append(name)\n return names\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"BERT models that are compatible with TF 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tf2_common.modeling import tf_utils\nimport configs\nfrom modeling import losses\nfrom modeling import networks\nfrom modeling.networks import bert_classifier\nfrom modeling.networks import bert_pretrainer\nfrom modeling.networks import bert_span_labeler\nfrom modeling.layers import bert_dropout\nfrom metrics_sparse_int32 import sparse_categorical_accuracy_int32\n\n\nclass BertPretrainLossAndMetricLayer(tf.keras.layers.Layer):\n \"\"\"Returns layer that computes custom loss and metrics for pretraining.\"\"\"\n\n def __init__(self, vocab_size, **kwargs):\n super(BertPretrainLossAndMetricLayer, self).__init__(**kwargs)\n self._vocab_size = vocab_size\n self.config = {\n 'vocab_size': vocab_size,\n }\n\n def _add_metrics(self, lm_output, lm_labels, lm_label_weights,\n lm_example_loss, sentence_output, sentence_labels,\n next_sentence_loss):\n \"\"\"Adds metrics.\"\"\"\n #masked_lm_accuracy = tf.keras.metrics.sparse_categorical_accuracy(\n # lm_labels, lm_output)\n masked_lm_accuracy = sparse_categorical_accuracy_int32(lm_labels, lm_output)\n numerator = tf.reduce_sum(masked_lm_accuracy * lm_label_weights)\n denominator = tf.reduce_sum(lm_label_weights)\n masked_lm_accuracy = numerator / denominator\n masked_lm_sum_loss = tf.reduce_sum(lm_example_loss * lm_label_weights)\n # self.add_metric(\n # masked_lm_accuracy, name='masked_lm_accuracy', aggregation='mean')\n # self.add_metric(lm_example_loss, name='lm_example_loss', aggregation='mean')\n\n #next_sentence_accuracy = tf.keras.metrics.sparse_categorical_accuracy(\n # sentence_labels, sentence_output)\n next_sentence_accuracy = sparse_categorical_accuracy_int32(sentence_labels, sentence_output)\n next_sentence_num = tf.reduce_sum(next_sentence_accuracy)\n next_sentence_denom = tf.size(next_sentence_accuracy)\n # self.add_metric(\n # next_sentence_accuracy,\n # name='next_sentence_accuracy',\n # aggregation='mean')\n # self.add_metric(\n # next_sentence_loss, name='next_sentence_loss', aggregation='mean')\n other_outputs = dict(\n masked_lm_num = numerator,\n masked_lm_denom = denominator,\n masked_lm_accuracy = masked_lm_accuracy,\n lm_example_loss = lm_example_loss,\n masked_lm_sum_loss = masked_lm_sum_loss,\n next_sentence_accuracy = next_sentence_accuracy,\n next_sentence_loss = next_sentence_loss,\n next_sentence_num = next_sentence_num,\n next_sentence_denom = next_sentence_denom)\n return other_outputs\n\n\n def call(self, lm_output, sentence_output, lm_label_ids, lm_label_weights,\n sentence_labels):\n \"\"\"Implements call() for the layer.\"\"\"\n lm_label_weights = tf.cast(lm_label_weights, tf.float32)\n lm_output = tf.cast(lm_output, tf.float32)\n sentence_output = tf.cast(sentence_output, tf.float32)\n\n mask_label_loss = losses.weighted_sparse_categorical_crossentropy_loss(\n labels=lm_label_ids, predictions=lm_output, weights=lm_label_weights)\n sentence_loss = losses.weighted_sparse_categorical_crossentropy_loss(\n labels=sentence_labels, predictions=sentence_output)\n loss = mask_label_loss + sentence_loss\n batch_shape = tf.slice(tf.shape(sentence_labels), [0], [1])\n # TODO(hongkuny): Avoids the hack and switches add_loss.\n final_loss = tf.fill(batch_shape, loss)\n\n # TODO(b/122840926): metrics use distribution strategy merge_call() and do\n # not work with tf.function(compile=True). Either fix this issue or move\n # metric aggregation outside the model.\n metric_outputs = self._add_metrics(lm_output, lm_label_ids, lm_label_weights,\n mask_label_loss, sentence_output, sentence_labels,\n sentence_loss)\n return final_loss, metric_outputs\n\n\ndef get_transformer_encoder(bert_config, sequence_length):\n \"\"\"Gets a 'TransformerEncoder' object.\n\n Args:\n bert_config: A 'modeling.BertConfig' or 'modeling.AlbertConfig' object.\n sequence_length: Maximum sequence length of the training data.\n\n Returns:\n A networks.TransformerEncoder object.\n \"\"\"\n kwargs = dict(\n vocab_size=bert_config.vocab_size,\n hidden_size=bert_config.hidden_size,\n num_layers=bert_config.num_hidden_layers,\n num_attention_heads=bert_config.num_attention_heads,\n intermediate_size=bert_config.intermediate_size,\n activation=tf_utils.get_activation(bert_config.hidden_act),\n dropout_rate=bert_config.hidden_dropout_prob,\n attention_dropout_rate=bert_config.attention_probs_dropout_prob,\n sequence_length=sequence_length,\n max_sequence_length=bert_config.max_position_embeddings,\n type_vocab_size=bert_config.type_vocab_size,\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=bert_config.initializer_range))\n assert isinstance(bert_config, configs.BertConfig)\n return networks.TransformerEncoder(**kwargs)\n\n\ndef pretrain_model(bert_config,\n seq_length,\n max_predictions_per_seq,\n initializer=None):\n \"\"\"Returns model to be used for pre-training.\n\n Args:\n bert_config: Configuration that defines the core BERT model.\n seq_length: Maximum sequence length of the training data.\n max_predictions_per_seq: Maximum number of tokens in sequence to mask out\n and use for pretraining.\n initializer: Initializer for weights in BertPretrainer.\n\n Returns:\n Pretraining model as well as core BERT submodel from which to save\n weights after pretraining.\n \"\"\"\n input_word_ids = tf.keras.layers.Input(\n shape=(seq_length,), name='input_word_ids', dtype=tf.int32)\n input_mask = tf.keras.layers.Input(\n shape=(seq_length,), name='input_mask', dtype=tf.int32)\n input_type_ids = tf.keras.layers.Input(\n shape=(seq_length,), name='input_type_ids', dtype=tf.int32)\n masked_lm_positions = tf.keras.layers.Input(\n shape=(max_predictions_per_seq,),\n name='masked_lm_positions',\n dtype=tf.int32)\n masked_lm_ids = tf.keras.layers.Input(\n shape=(max_predictions_per_seq,), name='masked_lm_ids', dtype=tf.int32)\n masked_lm_weights = tf.keras.layers.Input(\n shape=(max_predictions_per_seq,),\n name='masked_lm_weights',\n dtype=tf.int32)\n next_sentence_labels = tf.keras.layers.Input(\n shape=(1,), name='next_sentence_labels', dtype=tf.int32)\n\n transformer_encoder = get_transformer_encoder(bert_config, seq_length)\n if initializer is None:\n initializer = tf.keras.initializers.TruncatedNormal(\n stddev=bert_config.initializer_range)\n pretrainer_model = bert_pretrainer.BertPretrainer(\n network=transformer_encoder,\n num_classes=2, # The next sentence prediction label has two classes.\n num_token_predictions=max_predictions_per_seq,\n activation=tf_utils.get_activation(bert_config.hidden_act),\n initializer=initializer,\n output='predictions')\n\n lm_output, sentence_output = pretrainer_model(\n [input_word_ids, input_mask, input_type_ids, masked_lm_positions])\n\n pretrain_loss_layer = BertPretrainLossAndMetricLayer(\n vocab_size=bert_config.vocab_size)\n output_loss = pretrain_loss_layer(lm_output, sentence_output, masked_lm_ids,\n masked_lm_weights, next_sentence_labels)\n keras_model = tf.keras.Model(\n inputs={\n 'input_word_ids': input_word_ids,\n 'input_mask': input_mask,\n 'input_type_ids': input_type_ids,\n 'masked_lm_positions': masked_lm_positions,\n 'masked_lm_ids': masked_lm_ids,\n 'masked_lm_weights': masked_lm_weights,\n 'next_sentence_labels': next_sentence_labels,\n },\n outputs=output_loss)\n return keras_model, transformer_encoder, pretrainer_model\n\n\ndef squad_model(bert_config,\n max_seq_length,\n initializer=None,\n hub_module_url=None,\n hub_module_trainable=True):\n \"\"\"Returns BERT Squad model along with core BERT model to import weights.\n\n Args:\n bert_config: BertConfig, the config defines the core Bert model.\n max_seq_length: integer, the maximum input sequence length.\n initializer: Initializer for the final dense layer in the span labeler.\n Defaulted to TruncatedNormal initializer.\n hub_module_url: TF-Hub path/url to Bert module.\n hub_module_trainable: True to finetune layers in the hub module.\n\n Returns:\n A tuple of (1) keras model that outputs start logits and end logits and\n (2) the core BERT transformer encoder.\n \"\"\"\n if initializer is None:\n initializer = tf.keras.initializers.TruncatedNormal(\n stddev=bert_config.initializer_range)\n if not hub_module_url:\n bert_encoder = get_transformer_encoder(bert_config, max_seq_length)\n return bert_span_labeler.BertSpanLabeler(\n network=bert_encoder, initializer=initializer), bert_encoder\n\n input_word_ids = tf.keras.layers.Input(\n shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')\n input_mask = tf.keras.layers.Input(\n shape=(max_seq_length,), dtype=tf.int32, name='input_mask')\n input_type_ids = tf.keras.layers.Input(\n shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')\n core_model = hub.KerasLayer(hub_module_url, trainable=hub_module_trainable)\n pooled_output, sequence_output = core_model(\n [input_word_ids, input_mask, input_type_ids])\n bert_encoder = tf.keras.Model(\n inputs={\n 'input_word_ids': input_word_ids,\n 'input_mask': input_mask,\n 'input_type_ids': input_type_ids,\n },\n outputs=[sequence_output, pooled_output],\n name='core_model')\n return bert_span_labeler.BertSpanLabeler(\n network=bert_encoder, initializer=initializer), bert_encoder\n\n\ndef classifier_model(bert_config,\n num_labels,\n max_seq_length,\n final_layer_initializer=None,\n hub_module_url=None,\n hub_module_trainable=True):\n \"\"\"BERT classifier model in functional API style.\n\n Construct a Keras model for predicting `num_labels` outputs from an input with\n maximum sequence length `max_seq_length`.\n\n Args:\n bert_config: BertConfig or AlbertConfig, the config defines the core BERT or\n ALBERT model.\n num_labels: integer, the number of classes.\n max_seq_length: integer, the maximum input sequence length.\n final_layer_initializer: Initializer for final dense layer. Defaulted\n TruncatedNormal initializer.\n hub_module_url: TF-Hub path/url to Bert module.\n hub_module_trainable: True to finetune layers in the hub module.\n\n Returns:\n Combined prediction model (words, mask, type) -> (one-hot labels)\n BERT sub-model (words, mask, type) -> (bert_outputs)\n \"\"\"\n if final_layer_initializer is not None:\n initializer = final_layer_initializer\n else:\n initializer = tf.keras.initializers.TruncatedNormal(\n stddev=bert_config.initializer_range)\n\n if not hub_module_url:\n bert_encoder = get_transformer_encoder(bert_config, max_seq_length)\n return bert_classifier.BertClassifier(\n bert_encoder,\n num_classes=num_labels,\n dropout_rate=bert_config.hidden_dropout_prob,\n initializer=initializer), bert_encoder\n\n input_word_ids = tf.keras.layers.Input(\n shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')\n input_mask = tf.keras.layers.Input(\n shape=(max_seq_length,), dtype=tf.int32, name='input_mask')\n input_type_ids = tf.keras.layers.Input(\n shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')\n bert_model = hub.KerasLayer(\n hub_module_url, trainable=hub_module_trainable)\n pooled_output, _ = bert_model([input_word_ids, input_mask, input_type_ids])\n #output = tf.keras.layers.Dropout(rate=bert_config.hidden_dropout_prob)(\n # pooled_output)\n output = bert_dropout.Dropout(rate=bert_config.hidden_dropout_prob)(pooled_output)\n\n output = tf.keras.layers.Dense(\n num_labels, kernel_initializer=initializer, name='output')(\n output)\n return tf.keras.Model(\n inputs={\n 'input_word_ids': input_word_ids,\n 'input_mask': input_mask,\n 'input_type_ids': input_type_ids\n },\n outputs=output), bert_model\n",
"# Copyright [yyyy] [name of copyright owner]\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\" Nvidia NovoGrad Optimizer.\nOriginal impl by Nvidia from Jasper example:\n - https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper\nPaper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks`\n - https://arxiv.org/abs/1905.11286\n\"\"\"\n\nimport torch\nfrom torch.optim.optimizer import Optimizer\nimport math\n\n\nclass NvNovoGrad(Optimizer):\n \"\"\"\n Implements Novograd algorithm.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.95, 0.98))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n grad_averaging: gradient averaging\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8,\n weight_decay=0, grad_averaging=False, amsgrad=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay,\n grad_averaging=grad_averaging,\n amsgrad=amsgrad)\n\n super(NvNovoGrad, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(NvNovoGrad, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsgrad', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Sparse gradients are not supported.')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n if amsgrad:\n max_exp_avg_sq = state['max_exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n norm = torch.sum(torch.pow(grad, 2))\n\n if exp_avg_sq == 0:\n exp_avg_sq.copy_(norm)\n else:\n exp_avg_sq.mul_(beta2).add_(1 - beta2, norm)\n\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n else:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n grad.div_(denom)\n if group['weight_decay'] != 0:\n grad.add_(group['weight_decay'], p.data)\n if group['grad_averaging']:\n grad.mul_(1 - beta1)\n exp_avg.mul_(beta1).add_(grad)\n\n p.data.add_(-group['lr'], exp_avg)\n\n return loss\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"assessment methods\"\"\"\n\nimport numpy as np\n\n\nclass Accuracy():\n \"\"\"Accuracy\"\"\"\n\n def __init__(self):\n self.acc_num = 0\n self.total_num = 0\n\n def update(self, logits, labels):\n labels = labels.asnumpy()\n labels = np.reshape(labels, -1)\n logits = logits.asnumpy()\n logit_id = np.argmax(logits, axis=-1)\n self.acc_num += np.sum(labels == logit_id)\n self.total_num += len(labels)\n\n\nclass F1():\n \"\"\"F1\"\"\"\n\n def __init__(self):\n self.TP = 0\n self.FP = 0\n self.FN = 0\n\n def update(self, logits, labels):\n \"\"\"Update F1 score\"\"\"\n labels = labels.asnumpy()\n labels = np.reshape(labels, -1)\n logits = logits.asnumpy()\n logit_id = np.argmax(logits, axis=-1)\n logit_id = np.reshape(logit_id, -1)\n pos_eva = np.isin(logit_id, [2, 3, 4, 5, 6, 7])\n pos_label = np.isin(labels, [2, 3, 4, 5, 6, 7])\n self.TP += np.sum(pos_eva & pos_label)\n self.FP += np.sum(pos_eva & (~pos_label))\n self.FN += np.sum((~pos_eva) & pos_label)\n print(\"-----------------precision is \", self.TP / (self.TP + self.FP))\n print(\"-----------------recall is \", self.TP / (self.TP + self.FN))\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, kaiming_init, normal_init, xavier_init\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import build_bbox_coder, multi_apply, multiclass_nms\nfrom mmdet.models.builder import HEADS, build_loss\nfrom mmdet.models.losses import accuracy\n\n\[email protected]_module()\nclass SABLHead(nn.Module):\n \"\"\"Side-Aware Boundary Localization (SABL) for RoI-Head.\n\n Side-Aware features are extracted by conv layers\n with an attention mechanism.\n Boundary Localization with Bucketing and Bucketing Guided Rescoring\n are implemented in BucketingBBoxCoder.\n\n Please refer to https://arxiv.org/abs/1912.04260 for more details.\n\n Args:\n cls_in_channels (int): Input channels of cls RoI feature. \\\n Defaults to 256.\n reg_in_channels (int): Input channels of reg RoI feature. \\\n Defaults to 256.\n roi_feat_size (int): Size of RoI features. Defaults to 7.\n reg_feat_up_ratio (int): Upsample ratio of reg features. \\\n Defaults to 2.\n reg_pre_kernel (int): Kernel of 2D conv layers before \\\n attention pooling. Defaults to 3.\n reg_post_kernel (int): Kernel of 1D conv layers after \\\n attention pooling. Defaults to 3.\n reg_pre_num (int): Number of pre convs. Defaults to 2.\n reg_post_num (int): Number of post convs. Defaults to 1.\n num_classes (int): Number of classes in dataset. Defaults to 80.\n cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.\n reg_offset_out_channels (int): Hidden and output channel \\\n of reg offset branch. Defaults to 256.\n reg_cls_out_channels (int): Hidden and output channel \\\n of reg cls branch. Defaults to 256.\n num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.\n num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.\n reg_class_agnostic (bool): Class agnostic regresion or not. \\\n Defaults to True.\n norm_cfg (dict): Config of norm layers. Defaults to None.\n bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.\n loss_cls (dict): Config of classification loss.\n loss_bbox_cls (dict): Config of classification loss for bbox branch.\n loss_bbox_reg (dict): Config of regression loss for bbox branch.\n \"\"\"\n\n def __init__(self,\n num_classes,\n cls_in_channels=256,\n reg_in_channels=256,\n roi_feat_size=7,\n reg_feat_up_ratio=2,\n reg_pre_kernel=3,\n reg_post_kernel=3,\n reg_pre_num=2,\n reg_post_num=1,\n cls_out_channels=1024,\n reg_offset_out_channels=256,\n reg_cls_out_channels=256,\n num_cls_fcs=1,\n num_reg_fcs=0,\n reg_class_agnostic=True,\n norm_cfg=None,\n bbox_coder=dict(\n type='BucketingBBoxCoder',\n num_buckets=14,\n scale_factor=1.7),\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0),\n loss_bbox_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n loss_bbox_reg=dict(\n type='SmoothL1Loss', beta=0.1, loss_weight=1.0)):\n super(SABLHead, self).__init__()\n self.cls_in_channels = cls_in_channels\n self.reg_in_channels = reg_in_channels\n self.roi_feat_size = roi_feat_size\n self.reg_feat_up_ratio = int(reg_feat_up_ratio)\n self.num_buckets = bbox_coder['num_buckets']\n assert self.reg_feat_up_ratio // 2 >= 1\n self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio\n assert self.up_reg_feat_size == bbox_coder['num_buckets']\n self.reg_pre_kernel = reg_pre_kernel\n self.reg_post_kernel = reg_post_kernel\n self.reg_pre_num = reg_pre_num\n self.reg_post_num = reg_post_num\n self.num_classes = num_classes\n self.cls_out_channels = cls_out_channels\n self.reg_offset_out_channels = reg_offset_out_channels\n self.reg_cls_out_channels = reg_cls_out_channels\n self.num_cls_fcs = num_cls_fcs\n self.num_reg_fcs = num_reg_fcs\n self.reg_class_agnostic = reg_class_agnostic\n assert self.reg_class_agnostic\n self.norm_cfg = norm_cfg\n\n self.bbox_coder = build_bbox_coder(bbox_coder)\n self.loss_cls = build_loss(loss_cls)\n self.loss_bbox_cls = build_loss(loss_bbox_cls)\n self.loss_bbox_reg = build_loss(loss_bbox_reg)\n\n self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,\n self.cls_in_channels,\n self.roi_feat_size,\n self.cls_out_channels)\n\n self.side_num = int(np.ceil(self.num_buckets / 2))\n\n if self.reg_feat_up_ratio > 1:\n self.upsample_x = nn.ConvTranspose1d(\n reg_in_channels,\n reg_in_channels,\n self.reg_feat_up_ratio,\n stride=self.reg_feat_up_ratio)\n self.upsample_y = nn.ConvTranspose1d(\n reg_in_channels,\n reg_in_channels,\n self.reg_feat_up_ratio,\n stride=self.reg_feat_up_ratio)\n\n self.reg_pre_convs = nn.ModuleList()\n for i in range(self.reg_pre_num):\n reg_pre_conv = ConvModule(\n reg_in_channels,\n reg_in_channels,\n kernel_size=reg_pre_kernel,\n padding=reg_pre_kernel // 2,\n norm_cfg=norm_cfg,\n act_cfg=dict(type='ReLU'))\n self.reg_pre_convs.append(reg_pre_conv)\n\n self.reg_post_conv_xs = nn.ModuleList()\n for i in range(self.reg_post_num):\n reg_post_conv_x = ConvModule(\n reg_in_channels,\n reg_in_channels,\n kernel_size=(1, reg_post_kernel),\n padding=(0, reg_post_kernel // 2),\n norm_cfg=norm_cfg,\n act_cfg=dict(type='ReLU'))\n self.reg_post_conv_xs.append(reg_post_conv_x)\n self.reg_post_conv_ys = nn.ModuleList()\n for i in range(self.reg_post_num):\n reg_post_conv_y = ConvModule(\n reg_in_channels,\n reg_in_channels,\n kernel_size=(reg_post_kernel, 1),\n padding=(reg_post_kernel // 2, 0),\n norm_cfg=norm_cfg,\n act_cfg=dict(type='ReLU'))\n self.reg_post_conv_ys.append(reg_post_conv_y)\n\n self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)\n self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)\n\n self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)\n self.relu = nn.ReLU(inplace=True)\n\n self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,\n self.reg_in_channels, 1,\n self.reg_cls_out_channels)\n self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,\n self.reg_in_channels, 1,\n self.reg_offset_out_channels)\n self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)\n self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)\n\n def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,\n fc_out_channels):\n in_channels = in_channels * roi_feat_size * roi_feat_size\n branch_fcs = nn.ModuleList()\n for i in range(num_branch_fcs):\n fc_in_channels = (in_channels if i == 0 else fc_out_channels)\n branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))\n return branch_fcs\n\n def init_weights(self):\n for module_list in [\n self.reg_cls_fcs, self.reg_offset_fcs, self.cls_fcs\n ]:\n for m in module_list.modules():\n if isinstance(m, nn.Linear):\n xavier_init(m, distribution='uniform')\n if self.reg_feat_up_ratio > 1:\n kaiming_init(self.upsample_x, distribution='normal')\n kaiming_init(self.upsample_y, distribution='normal')\n\n normal_init(self.reg_conv_att_x, 0, 0.01)\n normal_init(self.reg_conv_att_y, 0, 0.01)\n normal_init(self.fc_reg_offset, 0, 0.001)\n normal_init(self.fc_reg_cls, 0, 0.01)\n normal_init(self.fc_cls, 0, 0.01)\n\n def cls_forward(self, cls_x):\n cls_x = cls_x.view(cls_x.size(0), -1)\n for fc in self.cls_fcs:\n cls_x = self.relu(fc(cls_x))\n cls_score = self.fc_cls(cls_x)\n return cls_score\n\n def attention_pool(self, reg_x):\n \"\"\"Extract direction-specific features fx and fy with attention\n methanism.\"\"\"\n reg_fx = reg_x\n reg_fy = reg_x\n reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()\n reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()\n reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)\n reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)\n reg_fx = (reg_fx * reg_fx_att).sum(dim=2)\n reg_fy = (reg_fy * reg_fy_att).sum(dim=3)\n return reg_fx, reg_fy\n\n def side_aware_feature_extractor(self, reg_x):\n \"\"\"Refine and extract side-aware features without split them.\"\"\"\n for reg_pre_conv in self.reg_pre_convs:\n reg_x = reg_pre_conv(reg_x)\n reg_fx, reg_fy = self.attention_pool(reg_x)\n\n if self.reg_post_num > 0:\n reg_fx = reg_fx.unsqueeze(2)\n reg_fy = reg_fy.unsqueeze(3)\n for i in range(self.reg_post_num):\n reg_fx = self.reg_post_conv_xs[i](reg_fx)\n reg_fy = self.reg_post_conv_ys[i](reg_fy)\n reg_fx = reg_fx.squeeze(2)\n reg_fy = reg_fy.squeeze(3)\n if self.reg_feat_up_ratio > 1:\n reg_fx = self.relu(self.upsample_x(reg_fx))\n reg_fy = self.relu(self.upsample_y(reg_fy))\n reg_fx = torch.transpose(reg_fx, 1, 2)\n reg_fy = torch.transpose(reg_fy, 1, 2)\n return reg_fx.contiguous(), reg_fy.contiguous()\n\n def reg_pred(self, x, offfset_fcs, cls_fcs):\n \"\"\"Predict bucketing esimation (cls_pred) and fine regression (offset\n pred) with side-aware features.\"\"\"\n x_offset = x.view(-1, self.reg_in_channels)\n x_cls = x.view(-1, self.reg_in_channels)\n\n for fc in offfset_fcs:\n x_offset = self.relu(fc(x_offset))\n for fc in cls_fcs:\n x_cls = self.relu(fc(x_cls))\n offset_pred = self.fc_reg_offset(x_offset)\n cls_pred = self.fc_reg_cls(x_cls)\n\n offset_pred = offset_pred.view(x.size(0), -1)\n cls_pred = cls_pred.view(x.size(0), -1)\n\n return offset_pred, cls_pred\n\n def side_aware_split(self, feat):\n \"\"\"Split side-aware features aligned with orders of bucketing\n targets.\"\"\"\n l_end = int(np.ceil(self.up_reg_feat_size / 2))\n r_start = int(np.floor(self.up_reg_feat_size / 2))\n feat_fl = feat[:, :l_end]\n feat_fr = feat[:, r_start:].flip(dims=(1, ))\n feat_fl = feat_fl.contiguous()\n feat_fr = feat_fr.contiguous()\n feat = torch.cat([feat_fl, feat_fr], dim=-1)\n return feat\n\n def bbox_pred_split(self, bbox_pred, num_proposals_per_img):\n \"\"\"Split batch bbox prediction back to each image.\"\"\"\n bucket_cls_preds, bucket_offset_preds = bbox_pred\n bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)\n bucket_offset_preds = bucket_offset_preds.split(\n num_proposals_per_img, 0)\n bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))\n return bbox_pred\n\n def reg_forward(self, reg_x):\n outs = self.side_aware_feature_extractor(reg_x)\n edge_offset_preds = []\n edge_cls_preds = []\n reg_fx = outs[0]\n reg_fy = outs[1]\n offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,\n self.reg_cls_fcs)\n offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,\n self.reg_cls_fcs)\n offset_pred_x = self.side_aware_split(offset_pred_x)\n offset_pred_y = self.side_aware_split(offset_pred_y)\n cls_pred_x = self.side_aware_split(cls_pred_x)\n cls_pred_y = self.side_aware_split(cls_pred_y)\n edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)\n edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)\n\n return (edge_cls_preds, edge_offset_preds)\n\n def forward(self, x):\n\n bbox_pred = self.reg_forward(x)\n cls_score = self.cls_forward(x)\n\n return cls_score, bbox_pred\n\n def get_targets(self, sampling_results, gt_bboxes, gt_labels,\n rcnn_train_cfg):\n pos_proposals = [res.pos_bboxes for res in sampling_results]\n neg_proposals = [res.neg_bboxes for res in sampling_results]\n pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]\n pos_gt_labels = [res.pos_gt_labels for res in sampling_results]\n cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals,\n pos_gt_bboxes, pos_gt_labels,\n rcnn_train_cfg)\n (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n bucket_offset_targets, bucket_offset_weights) = cls_reg_targets\n return (labels, label_weights, (bucket_cls_targets,\n bucket_offset_targets),\n (bucket_cls_weights, bucket_offset_weights))\n\n def bucket_target(self,\n pos_proposals_list,\n neg_proposals_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n rcnn_train_cfg,\n concat=True):\n (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n bucket_offset_targets, bucket_offset_weights) = multi_apply(\n self._bucket_target_single,\n pos_proposals_list,\n neg_proposals_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n cfg=rcnn_train_cfg)\n\n if concat:\n labels = torch.cat(labels, 0)\n label_weights = torch.cat(label_weights, 0)\n bucket_cls_targets = torch.cat(bucket_cls_targets, 0)\n bucket_cls_weights = torch.cat(bucket_cls_weights, 0)\n bucket_offset_targets = torch.cat(bucket_offset_targets, 0)\n bucket_offset_weights = torch.cat(bucket_offset_weights, 0)\n return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n bucket_offset_targets, bucket_offset_weights)\n\n def _bucket_target_single(self, pos_proposals, neg_proposals,\n pos_gt_bboxes, pos_gt_labels, cfg):\n \"\"\"Compute bucketing estimation targets and fine regression targets for\n a single image.\n\n Args:\n pos_proposals (Tensor): positive proposals of a single image,\n Shape (n_pos, 4)\n neg_proposals (Tensor): negative proposals of a single image,\n Shape (n_neg, 4).\n pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals\n of a single image, Shape (n_pos, 4).\n pos_gt_labels (Tensor): gt labels assigned to positive proposals\n of a single image, Shape (n_pos, ).\n cfg (dict): Config of calculating targets\n\n Returns:\n tuple:\n\n - labels (Tensor): Labels in a single image. \\\n Shape (n,).\n - label_weights (Tensor): Label weights in a single image.\\\n Shape (n,)\n - bucket_cls_targets (Tensor): Bucket cls targets in \\\n a single image. Shape (n, num_buckets*2).\n - bucket_cls_weights (Tensor): Bucket cls weights in \\\n a single image. Shape (n, num_buckets*2).\n - bucket_offset_targets (Tensor): Bucket offset targets \\\n in a single image. Shape (n, num_buckets*2).\n - bucket_offset_targets (Tensor): Bucket offset weights \\\n in a single image. Shape (n, num_buckets*2).\n \"\"\"\n num_pos = pos_proposals.size(0)\n num_neg = neg_proposals.size(0)\n num_samples = num_pos + num_neg\n labels = pos_gt_bboxes.new_full((num_samples, ),\n self.num_classes,\n dtype=torch.long)\n label_weights = pos_proposals.new_zeros(num_samples)\n bucket_cls_targets = pos_proposals.new_zeros(num_samples,\n 4 * self.side_num)\n bucket_cls_weights = pos_proposals.new_zeros(num_samples,\n 4 * self.side_num)\n bucket_offset_targets = pos_proposals.new_zeros(\n num_samples, 4 * self.side_num)\n bucket_offset_weights = pos_proposals.new_zeros(\n num_samples, 4 * self.side_num)\n if num_pos > 0:\n labels[:num_pos] = pos_gt_labels\n label_weights[:num_pos] = 1.0\n (pos_bucket_offset_targets, pos_bucket_offset_weights,\n pos_bucket_cls_targets,\n pos_bucket_cls_weights) = self.bbox_coder.encode(\n pos_proposals, pos_gt_bboxes)\n bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets\n bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights\n bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets\n bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights\n if num_neg > 0:\n label_weights[-num_neg:] = 1.0\n return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n bucket_offset_targets, bucket_offset_weights)\n\n def loss(self,\n cls_score,\n bbox_pred,\n rois,\n labels,\n label_weights,\n bbox_targets,\n bbox_weights,\n reduction_override=None):\n losses = dict()\n if cls_score is not None:\n avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n losses['loss_cls'] = self.loss_cls(\n cls_score,\n labels,\n label_weights,\n avg_factor=avg_factor,\n reduction_override=reduction_override)\n losses['acc'] = accuracy(cls_score, labels)\n\n if bbox_pred is not None:\n bucket_cls_preds, bucket_offset_preds = bbox_pred\n bucket_cls_targets, bucket_offset_targets = bbox_targets\n bucket_cls_weights, bucket_offset_weights = bbox_weights\n # edge cls\n bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)\n bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)\n bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)\n losses['loss_bbox_cls'] = self.loss_bbox_cls(\n bucket_cls_preds,\n bucket_cls_targets,\n bucket_cls_weights,\n avg_factor=bucket_cls_targets.size(0),\n reduction_override=reduction_override)\n\n losses['loss_bbox_reg'] = self.loss_bbox_reg(\n bucket_offset_preds,\n bucket_offset_targets,\n bucket_offset_weights,\n avg_factor=bucket_offset_targets.size(0),\n reduction_override=reduction_override)\n\n return losses\n\n @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n def get_bboxes(self,\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None):\n if isinstance(cls_score, list):\n cls_score = sum(cls_score) / float(len(cls_score))\n scores = F.softmax(cls_score, dim=1) if cls_score is not None else None\n\n if bbox_pred is not None:\n bboxes, confids = self.bbox_coder.decode(rois[:, 1:], bbox_pred,\n img_shape)\n else:\n bboxes = rois[:, 1:].clone()\n confids = None\n if img_shape is not None:\n bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)\n bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)\n\n if rescale and bboxes.size(0) > 0:\n if isinstance(scale_factor, float):\n bboxes /= scale_factor\n else:\n bboxes /= torch.from_numpy(scale_factor).to(bboxes.device)\n\n if cfg is None:\n return bboxes, scores\n else:\n det_bboxes, det_labels = multiclass_nms(\n bboxes,\n scores,\n cfg.score_thr,\n cfg.nms,\n cfg.max_per_img,\n score_factors=confids)\n\n return det_bboxes, det_labels\n\n @force_fp32(apply_to=('bbox_preds', ))\n def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):\n \"\"\"Refine bboxes during training.\n\n Args:\n rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,\n and bs is the sampled RoIs per image.\n labels (Tensor): Shape (n*bs, ).\n bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \\\n (n*bs, num_buckets*2)].\n pos_is_gts (list[Tensor]): Flags indicating if each positive bbox\n is a gt bbox.\n img_metas (list[dict]): Meta info of each image.\n\n Returns:\n list[Tensor]: Refined bboxes of each image in a mini-batch.\n \"\"\"\n img_ids = rois[:, 0].long().unique(sorted=True)\n assert img_ids.numel() == len(img_metas)\n\n bboxes_list = []\n for i in range(len(img_metas)):\n inds = torch.nonzero(\n rois[:, 0] == i, as_tuple=False).squeeze(dim=1)\n num_rois = inds.numel()\n\n bboxes_ = rois[inds, 1:]\n label_ = labels[inds]\n edge_cls_preds, edge_offset_preds = bbox_preds\n edge_cls_preds_ = edge_cls_preds[inds]\n edge_offset_preds_ = edge_offset_preds[inds]\n bbox_pred_ = [edge_cls_preds_, edge_offset_preds_]\n img_meta_ = img_metas[i]\n pos_is_gts_ = pos_is_gts[i]\n\n bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,\n img_meta_)\n # filter gt bboxes\n pos_keep = 1 - pos_is_gts_\n keep_inds = pos_is_gts_.new_ones(num_rois)\n keep_inds[:len(pos_is_gts_)] = pos_keep\n\n bboxes_list.append(bboxes[keep_inds.type(torch.bool)])\n\n return bboxes_list\n\n @force_fp32(apply_to=('bbox_pred', ))\n def regress_by_class(self, rois, label, bbox_pred, img_meta):\n \"\"\"Regress the bbox for the predicted class. Used in Cascade R-CNN.\n\n Args:\n rois (Tensor): shape (n, 4) or (n, 5)\n label (Tensor): shape (n, )\n bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \\\n (n, num_buckets *2)]\n img_meta (dict): Image meta info.\n\n Returns:\n Tensor: Regressed bboxes, the same shape as input rois.\n \"\"\"\n assert rois.size(1) == 4 or rois.size(1) == 5\n\n if rois.size(1) == 4:\n new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,\n img_meta['img_shape'])\n else:\n bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,\n img_meta['img_shape'])\n new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)\n\n return new_rois\n",
"# Copyright 2019 Ross Wightman\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport torch\n\nfrom timm.utils.agc import adaptive_clip_grad\n\n\ndef dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0):\n \"\"\" Dispatch to gradient clipping method\n\n Args:\n parameters (Iterable): model parameters to clip\n value (float): clipping value/factor/norm, mode dependant\n mode (str): clipping mode, one of 'norm', 'value', 'agc'\n norm_type (float): p-norm, default 2.0\n \"\"\"\n if mode == 'norm':\n torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type)\n elif mode == 'value':\n torch.nn.utils.clip_grad_value_(parameters, value)\n elif mode == 'agc':\n adaptive_clip_grad(parameters, value, norm_type=norm_type)\n else:\n assert False, f\"Unknown clip mode ({mode}).\"\n\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport os\nimport shutil\nimport urllib\n\nimport mmcv\nimport torch\n\nfrom mmdet.apis import (async_inference_detector, inference_detector,\n init_detector, show_result)\nfrom mmdet.utils.contextmanagers import concurrent\nfrom mmdet.utils.profiling import profile_time\n\n\nasync def main():\n \"\"\"Benchmark between async and synchronous inference interfaces.\n\n Sample runs for 20 demo images on K80 GPU, model - mask_rcnn_r50_fpn_1x:\n\n async sync\n\n 7981.79 ms 9660.82 ms\n 8074.52 ms 9660.94 ms\n 7976.44 ms 9406.83 ms\n\n Async variant takes about 0.83-0.85 of the time of the synchronous\n interface.\n \"\"\"\n project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n\n config_file = os.path.join(project_dir,\n 'configs/mask_rcnn_r50_fpn_1x_coco.py')\n checkpoint_file = os.path.join(\n project_dir, 'checkpoints/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth')\n\n if not os.path.exists(checkpoint_file):\n url = ('https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection'\n '/models/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth')\n print(f'Downloading {url} ...')\n local_filename, _ = urllib.request.urlretrieve(url)\n os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True)\n shutil.move(local_filename, checkpoint_file)\n print(f'Saved as {checkpoint_file}')\n else:\n print(f'Using existing checkpoint {checkpoint_file}')\n\n device = 'cuda:0'\n model = init_detector(\n config_file, checkpoint=checkpoint_file, device=device)\n\n # queue is used for concurrent inference of multiple images\n streamqueue = asyncio.Queue()\n # queue size defines concurrency level\n streamqueue_size = 4\n\n for _ in range(streamqueue_size):\n streamqueue.put_nowait(torch.cuda.Stream(device=device))\n\n # test a single image and show the results\n img = mmcv.imread(os.path.join(project_dir, 'demo/demo.jpg'))\n\n # warmup\n await async_inference_detector(model, img)\n\n async def detect(img):\n async with concurrent(streamqueue):\n return await async_inference_detector(model, img)\n\n num_of_images = 20\n with profile_time('benchmark', 'async'):\n tasks = [\n asyncio.create_task(detect(img)) for _ in range(num_of_images)\n ]\n async_results = await asyncio.gather(*tasks)\n\n with torch.cuda.stream(torch.cuda.default_stream()):\n with profile_time('benchmark', 'sync'):\n sync_results = [\n inference_detector(model, img) for _ in range(num_of_images)\n ]\n\n result_dir = os.path.join(project_dir, 'demo')\n show_result(\n img,\n async_results[0],\n model.CLASSES,\n score_thr=0.5,\n show=False,\n out_file=os.path.join(result_dir, 'result_async.jpg'))\n show_result(\n img,\n sync_results[0],\n model.CLASSES,\n score_thr=0.5,\n show=False,\n out_file=os.path.join(result_dir, 'result_sync.jpg'))\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n## =============================================================================\n\"\"\"Tests for MobileNet v1.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib import slim as contrib_slim\n\nfrom nets import mobilenet_v1\n\nslim = contrib_slim\n\n\nclass MobilenetV1Test(tf.test.TestCase):\n\n def testBuildClassificationNetwork(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith(\n 'MobilenetV1/Logits/SpatialSqueeze'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n self.assertTrue('Predictions' in end_points)\n self.assertListEqual(end_points['Predictions'].get_shape().as_list(),\n [batch_size, num_classes])\n\n def testBuildPreLogitsNetwork(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = None\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n net, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n self.assertTrue(net.op.name.startswith('MobilenetV1/Logits/AvgPool'))\n self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])\n self.assertFalse('Logits' in end_points)\n self.assertFalse('Predictions' in end_points)\n\n def testBuildBaseNetwork(self):\n batch_size = 5\n height, width = 224, 224\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n net, end_points = mobilenet_v1.mobilenet_v1_base(inputs)\n self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13'))\n self.assertListEqual(net.get_shape().as_list(),\n [batch_size, 7, 7, 1024])\n expected_endpoints = ['Conv2d_0',\n 'Conv2d_1_depthwise', 'Conv2d_1_pointwise',\n 'Conv2d_2_depthwise', 'Conv2d_2_pointwise',\n 'Conv2d_3_depthwise', 'Conv2d_3_pointwise',\n 'Conv2d_4_depthwise', 'Conv2d_4_pointwise',\n 'Conv2d_5_depthwise', 'Conv2d_5_pointwise',\n 'Conv2d_6_depthwise', 'Conv2d_6_pointwise',\n 'Conv2d_7_depthwise', 'Conv2d_7_pointwise',\n 'Conv2d_8_depthwise', 'Conv2d_8_pointwise',\n 'Conv2d_9_depthwise', 'Conv2d_9_pointwise',\n 'Conv2d_10_depthwise', 'Conv2d_10_pointwise',\n 'Conv2d_11_depthwise', 'Conv2d_11_pointwise',\n 'Conv2d_12_depthwise', 'Conv2d_12_pointwise',\n 'Conv2d_13_depthwise', 'Conv2d_13_pointwise']\n self.assertItemsEqual(end_points.keys(), expected_endpoints)\n\n def testBuildOnlyUptoFinalEndpoint(self):\n batch_size = 5\n height, width = 224, 224\n endpoints = ['Conv2d_0',\n 'Conv2d_1_depthwise', 'Conv2d_1_pointwise',\n 'Conv2d_2_depthwise', 'Conv2d_2_pointwise',\n 'Conv2d_3_depthwise', 'Conv2d_3_pointwise',\n 'Conv2d_4_depthwise', 'Conv2d_4_pointwise',\n 'Conv2d_5_depthwise', 'Conv2d_5_pointwise',\n 'Conv2d_6_depthwise', 'Conv2d_6_pointwise',\n 'Conv2d_7_depthwise', 'Conv2d_7_pointwise',\n 'Conv2d_8_depthwise', 'Conv2d_8_pointwise',\n 'Conv2d_9_depthwise', 'Conv2d_9_pointwise',\n 'Conv2d_10_depthwise', 'Conv2d_10_pointwise',\n 'Conv2d_11_depthwise', 'Conv2d_11_pointwise',\n 'Conv2d_12_depthwise', 'Conv2d_12_pointwise',\n 'Conv2d_13_depthwise', 'Conv2d_13_pointwise']\n for index, endpoint in enumerate(endpoints):\n with tf.Graph().as_default():\n inputs = tf.random.uniform((batch_size, height, width, 3))\n out_tensor, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint=endpoint)\n self.assertTrue(out_tensor.op.name.startswith(\n 'MobilenetV1/' + endpoint))\n self.assertItemsEqual(endpoints[:index+1], end_points.keys())\n\n def testBuildCustomNetworkUsingConvDefs(self):\n batch_size = 5\n height, width = 224, 224\n conv_defs = [\n mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=32),\n mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=64),\n mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=128),\n mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512)\n ]\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n net, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs)\n self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3'))\n self.assertListEqual(net.get_shape().as_list(),\n [batch_size, 56, 56, 512])\n expected_endpoints = ['Conv2d_0',\n 'Conv2d_1_depthwise', 'Conv2d_1_pointwise',\n 'Conv2d_2_depthwise', 'Conv2d_2_pointwise',\n 'Conv2d_3_depthwise', 'Conv2d_3_pointwise']\n self.assertItemsEqual(end_points.keys(), expected_endpoints)\n\n def testBuildAndCheckAllEndPointsUptoConv2d_13(self):\n batch_size = 5\n height, width = 224, 224\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n normalizer_fn=slim.batch_norm):\n _, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint='Conv2d_13_pointwise')\n _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint='Conv2d_13_pointwise',\n use_explicit_padding=True)\n endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],\n 'Conv2d_1_depthwise': [batch_size, 112, 112, 32],\n 'Conv2d_1_pointwise': [batch_size, 112, 112, 64],\n 'Conv2d_2_depthwise': [batch_size, 56, 56, 64],\n 'Conv2d_2_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_depthwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_4_depthwise': [batch_size, 28, 28, 128],\n 'Conv2d_4_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_depthwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_6_depthwise': [batch_size, 14, 14, 256],\n 'Conv2d_6_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_7_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_7_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_8_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_8_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_9_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_9_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_10_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_10_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_11_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_11_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_12_depthwise': [batch_size, 7, 7, 512],\n 'Conv2d_12_pointwise': [batch_size, 7, 7, 1024],\n 'Conv2d_13_depthwise': [batch_size, 7, 7, 1024],\n 'Conv2d_13_pointwise': [batch_size, 7, 7, 1024]}\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n self.assertItemsEqual(endpoints_shapes.keys(),\n explicit_padding_end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in explicit_padding_end_points)\n self.assertListEqual(\n explicit_padding_end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testOutputStride16BuildAndCheckAllEndPointsUptoConv2d_13(self):\n batch_size = 5\n height, width = 224, 224\n output_stride = 16\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n normalizer_fn=slim.batch_norm):\n _, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, output_stride=output_stride,\n final_endpoint='Conv2d_13_pointwise')\n _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, output_stride=output_stride,\n final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)\n endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],\n 'Conv2d_1_depthwise': [batch_size, 112, 112, 32],\n 'Conv2d_1_pointwise': [batch_size, 112, 112, 64],\n 'Conv2d_2_depthwise': [batch_size, 56, 56, 64],\n 'Conv2d_2_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_depthwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_4_depthwise': [batch_size, 28, 28, 128],\n 'Conv2d_4_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_depthwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_6_depthwise': [batch_size, 14, 14, 256],\n 'Conv2d_6_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_7_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_7_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_8_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_8_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_9_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_9_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_10_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_10_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_11_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_11_pointwise': [batch_size, 14, 14, 512],\n 'Conv2d_12_depthwise': [batch_size, 14, 14, 512],\n 'Conv2d_12_pointwise': [batch_size, 14, 14, 1024],\n 'Conv2d_13_depthwise': [batch_size, 14, 14, 1024],\n 'Conv2d_13_pointwise': [batch_size, 14, 14, 1024]}\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n self.assertItemsEqual(endpoints_shapes.keys(),\n explicit_padding_end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in explicit_padding_end_points)\n self.assertListEqual(\n explicit_padding_end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testOutputStride8BuildAndCheckAllEndPointsUptoConv2d_13(self):\n batch_size = 5\n height, width = 224, 224\n output_stride = 8\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n normalizer_fn=slim.batch_norm):\n _, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, output_stride=output_stride,\n final_endpoint='Conv2d_13_pointwise')\n _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, output_stride=output_stride,\n final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)\n endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],\n 'Conv2d_1_depthwise': [batch_size, 112, 112, 32],\n 'Conv2d_1_pointwise': [batch_size, 112, 112, 64],\n 'Conv2d_2_depthwise': [batch_size, 56, 56, 64],\n 'Conv2d_2_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_depthwise': [batch_size, 56, 56, 128],\n 'Conv2d_3_pointwise': [batch_size, 56, 56, 128],\n 'Conv2d_4_depthwise': [batch_size, 28, 28, 128],\n 'Conv2d_4_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_depthwise': [batch_size, 28, 28, 256],\n 'Conv2d_5_pointwise': [batch_size, 28, 28, 256],\n 'Conv2d_6_depthwise': [batch_size, 28, 28, 256],\n 'Conv2d_6_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_7_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_7_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_8_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_8_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_9_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_9_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_10_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_10_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_11_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_11_pointwise': [batch_size, 28, 28, 512],\n 'Conv2d_12_depthwise': [batch_size, 28, 28, 512],\n 'Conv2d_12_pointwise': [batch_size, 28, 28, 1024],\n 'Conv2d_13_depthwise': [batch_size, 28, 28, 1024],\n 'Conv2d_13_pointwise': [batch_size, 28, 28, 1024]}\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n self.assertItemsEqual(endpoints_shapes.keys(),\n explicit_padding_end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in explicit_padding_end_points)\n self.assertListEqual(\n explicit_padding_end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testBuildAndCheckAllEndPointsApproximateFaceNet(self):\n batch_size = 5\n height, width = 128, 128\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n normalizer_fn=slim.batch_norm):\n _, end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75)\n _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(\n inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75,\n use_explicit_padding=True)\n # For the Conv2d_0 layer FaceNet has depth=16\n endpoints_shapes = {'Conv2d_0': [batch_size, 64, 64, 24],\n 'Conv2d_1_depthwise': [batch_size, 64, 64, 24],\n 'Conv2d_1_pointwise': [batch_size, 64, 64, 48],\n 'Conv2d_2_depthwise': [batch_size, 32, 32, 48],\n 'Conv2d_2_pointwise': [batch_size, 32, 32, 96],\n 'Conv2d_3_depthwise': [batch_size, 32, 32, 96],\n 'Conv2d_3_pointwise': [batch_size, 32, 32, 96],\n 'Conv2d_4_depthwise': [batch_size, 16, 16, 96],\n 'Conv2d_4_pointwise': [batch_size, 16, 16, 192],\n 'Conv2d_5_depthwise': [batch_size, 16, 16, 192],\n 'Conv2d_5_pointwise': [batch_size, 16, 16, 192],\n 'Conv2d_6_depthwise': [batch_size, 8, 8, 192],\n 'Conv2d_6_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_7_depthwise': [batch_size, 8, 8, 384],\n 'Conv2d_7_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_8_depthwise': [batch_size, 8, 8, 384],\n 'Conv2d_8_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_9_depthwise': [batch_size, 8, 8, 384],\n 'Conv2d_9_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_10_depthwise': [batch_size, 8, 8, 384],\n 'Conv2d_10_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_11_depthwise': [batch_size, 8, 8, 384],\n 'Conv2d_11_pointwise': [batch_size, 8, 8, 384],\n 'Conv2d_12_depthwise': [batch_size, 4, 4, 384],\n 'Conv2d_12_pointwise': [batch_size, 4, 4, 768],\n 'Conv2d_13_depthwise': [batch_size, 4, 4, 768],\n 'Conv2d_13_pointwise': [batch_size, 4, 4, 768]}\n self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n self.assertItemsEqual(endpoints_shapes.keys(),\n explicit_padding_end_points.keys())\n for endpoint_name, expected_shape in endpoints_shapes.items():\n self.assertTrue(endpoint_name in explicit_padding_end_points)\n self.assertListEqual(\n explicit_padding_end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testModelHasExpectedNumberOfParameters(self):\n batch_size = 5\n height, width = 224, 224\n inputs = tf.random.uniform((batch_size, height, width, 3))\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n normalizer_fn=slim.batch_norm):\n mobilenet_v1.mobilenet_v1_base(inputs)\n total_params, _ = slim.model_analyzer.analyze_vars(\n slim.get_model_variables())\n self.assertAlmostEqual(3217920, total_params)\n\n def testBuildEndPointsWithDepthMultiplierLessThanOne(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n _, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n\n endpoint_keys = [key for key in end_points.keys() if key.startswith('Conv')]\n\n _, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(\n inputs, num_classes, scope='depth_multiplied_net',\n depth_multiplier=0.5)\n\n for key in endpoint_keys:\n original_depth = end_points[key].get_shape().as_list()[3]\n new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]\n self.assertEqual(0.5 * original_depth, new_depth)\n\n def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n _, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n\n endpoint_keys = [key for key in end_points.keys()\n if key.startswith('Mixed') or key.startswith('Conv')]\n\n _, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(\n inputs, num_classes, scope='depth_multiplied_net',\n depth_multiplier=2.0)\n\n for key in endpoint_keys:\n original_depth = end_points[key].get_shape().as_list()[3]\n new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]\n self.assertEqual(2.0 * original_depth, new_depth)\n\n def testRaiseValueErrorWithInvalidDepthMultiplier(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n with self.assertRaises(ValueError):\n _ = mobilenet_v1.mobilenet_v1(\n inputs, num_classes, depth_multiplier=-0.1)\n with self.assertRaises(ValueError):\n _ = mobilenet_v1.mobilenet_v1(\n inputs, num_classes, depth_multiplier=0.0)\n\n def testHalfSizeImages(self):\n batch_size = 5\n height, width = 112, 112\n num_classes = 1000\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Conv2d_13_pointwise']\n self.assertListEqual(pre_pool.get_shape().as_list(),\n [batch_size, 4, 4, 1024])\n\n def testUnknownImageShape(self):\n tf.compat.v1.reset_default_graph()\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))\n with self.test_session() as sess:\n inputs = tf.compat.v1.placeholder(\n tf.float32, shape=(batch_size, None, None, 3))\n logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Conv2d_13_pointwise']\n feed_dict = {inputs: input_np}\n tf.compat.v1.global_variables_initializer().run()\n pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)\n self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])\n\n def testGlobalPoolUnknownImageShape(self):\n tf.compat.v1.reset_default_graph()\n batch_size = 1\n height, width = 250, 300\n num_classes = 1000\n input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))\n with self.test_session() as sess:\n inputs = tf.compat.v1.placeholder(\n tf.float32, shape=(batch_size, None, None, 3))\n logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes,\n global_pool=True)\n self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Conv2d_13_pointwise']\n feed_dict = {inputs: input_np}\n tf.compat.v1.global_variables_initializer().run()\n pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)\n self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])\n\n def testUnknowBatchSize(self):\n batch_size = 1\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))\n logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [None, num_classes])\n images = tf.random.uniform((batch_size, height, width, 3))\n\n with self.test_session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output = sess.run(logits, {inputs: images.eval()})\n self.assertEquals(output.shape, (batch_size, num_classes))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n\n eval_inputs = tf.random.uniform((batch_size, height, width, 3))\n logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,\n is_training=False)\n predictions = tf.argmax(input=logits, axis=1)\n\n with self.test_session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEquals(output.shape, (batch_size,))\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 5\n eval_batch_size = 2\n height, width = 150, 150\n num_classes = 1000\n\n train_inputs = tf.random.uniform((train_batch_size, height, width, 3))\n mobilenet_v1.mobilenet_v1(train_inputs, num_classes)\n eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))\n logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,\n reuse=True)\n predictions = tf.argmax(input=logits, axis=1)\n\n with self.test_session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEquals(output.shape, (eval_batch_size,))\n\n def testLogitsNotSqueezed(self):\n num_classes = 25\n images = tf.random.uniform([1, 224, 224, 3])\n logits, _ = mobilenet_v1.mobilenet_v1(images,\n num_classes=num_classes,\n spatial_squeeze=False)\n\n with self.test_session() as sess:\n tf.compat.v1.global_variables_initializer().run()\n logits_out = sess.run(logits)\n self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])\n\n def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):\n sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)\n self.assertNotIn('is_training', sc[slim.arg_scope_func_key(\n slim.batch_norm)])\n\n def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):\n sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)\n self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])\n sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=False)\n self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])\n sc = mobilenet_v1.mobilenet_v1_arg_scope()\n self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport tensorflow as tf\n\nimport os\nlog_dir = './resnet50_train/results/'+os.path.basename(__file__).split('.')[0]\n\n#256\nconfig = {\n # ============ for testing =====================\n 'accelerator': '1980', # 'gpu', '1980' \n 'shuffle_enable': 'yes',\n 'shuffle_buffer_size': 10000,\n 'rank_size': 1, \n 'shard': False,\n\n # ======= basic config ======= # \n 'mode':'train', # \"train\",\"evaluate\"\n 'epochs_between_evals': 4, #used if mode is \"train_and_evaluate\"\n 'stop_threshold': 80.0, #used if mode is \"train_and_evaluate\"\n #'data_dir':'/opt/npu/resnet_data_new',\n 'data_url': 'file:///data/imagenet_TF', #data\n 'data_type': 'TFRECORD',\n 'model_name': 'resnet50', \n 'num_classes': 1001,\n 'num_epochs': None,\n 'height':224,\n 'width':224, \n 'dtype': tf.float32,\n 'data_format': 'channels_last',\n 'use_nesterov': True,\n 'eval_interval': 1,\n 'num_evaluating_samples': 50000, \n 'loss_scale': 1024, #could be float or string. If float, static loss scaling is applied. \n #If string, the corresponding automatic loss scaling algorithm is used.\n #Must be one of 'Backoff' of 'LogMax' (case insensitive).\n 'use_lars': False,\n 'label_smoothing':0.1, #If greater than 0 then smooth the labels.\n 'weight_decay': 0.0001,\n 'batch_size':32, #minibatch size per node, total batchsize = batch_size*hvd.size()*itersize\n \n 'momentum': [0.9],\n\n #======= data processing config =======\n 'min_object_covered': 0.1, #used for random crop\n 'aspect_ratio_range':[3. / 4., 4. / 3.],\n 'area_range':[0.16, 1.0],\n 'max_attempts': 100,\n\n #======= data augment config ======= \n 'increased_aug': False,\n 'brightness':0.3,\n 'saturation': 0.6,\n 'contrast': 0.6,\n 'hue': 0.13,\n 'num_preproc_threads': 22,\n\n #======= initialization config ======= \n 'conv_init': tf.variance_scaling_initializer(),\n 'bn_init_mode': 'adv_bn_init', # \"conv_bn_init\" or \"adv_bn_init\",initializer the gamma in bn in different modes\n # \"adv_bn_init\" means initialize gamma to 0 in each residual block's last bn, and initialize other gamma to 1\n # \"conv_bn_init\" means initialize all the gamma to a constant, defined by \"bn_gamma_initial_value\"\n 'bn_gamma_initial_value': 1.0,\n\n #======== model architecture ==========\n #'resnet_version': 'v1.5', \n 'resnet_version': 'resnext',\n 'arch_type': 'original', # ------ input -------\n # C1,C2,C3: input block, stride in different layer\n # ------ shortcut ------\n # D1: average_pooling + conv1*1 in shortcut in downsample block\n # D2: conv3*3,stride=2 in shortcut in downsample block\n # D3: conv1*1 +average_pooling in shortcut in downsample block\n # ------ mainstream ----\n # E1: average_pooling + conv3*3 in mainstream in downsample block \n # E2: conv3*3 + average_pooling in mainstream in downsample block \n\n #======= logger config ======= \n 'display_every': 1,\n 'log_name': 'resnet50.log',\n 'log_dir': log_dir,\n 'ckpt_dir': '/home/models/training_shop/03-code/ModelZoo_ResNext50_TF_MTI/d_solution/ckpt0',\n\n #======= Learning Rate Config ======= \n 'lr_warmup_mode': 'linear', # \"linear\" or \"cosine\"\n 'warmup_lr': 0.0,\n 'warmup_epochs': 10,\n 'learning_rate_maximum': 0.1, \n\n 'lr_decay_mode': 'steps', # \"steps\", \"poly\", \"poly_cycle\", \"cosine\", \"linear_cosine\", \"linear_twice\", \"constant\" for 1980 only\n 'learning_rate_end': 0.00001,\n\n 'decay_steps': '10,20,30', #for \"steps\"\n 'lr_decay_steps': '6.4,0.64,0.064',\n\n 'ploy_power': 2.0, #for \"poly\" and \"poly_cycle\"\n\n 'cdr_first_decay_ratio': 0.33, #for \"cosine_decay_restarts\"\n 'cdr_t_mul':2.0,\n 'cdr_m_mul':0.1,\n\n 'lc_periods':0.47, #for \"linear_consine\"\n 'lc_beta':0.00001, \n \n 'lr_mid': 0.5, #for \"linear_twice\"\n 'epoch_mid': 80,\n \n 'bn_lr_scale':1.0,\n 'restore_exclude': ['fp32_vars/dense'],\n\n }\n\ndef res50_config():\n config['global_batch_size'] = config['batch_size'] * config['rank_size']\n config['do_checkpoint'] = True\n\n return config\n",
"# Copyright (c) Open-MMLab. All rights reserved.\n#\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport os\nimport subprocess\nfrom collections import OrderedDict\n\nimport torch\nimport torch.multiprocessing as mp\nfrom torch import distributed as dist\nfrom torch._utils import (_flatten_dense_tensors, _take_tensors,\n _unflatten_dense_tensors)\n\nfrom mmcv.utils import TORCH_VERSION\n\n\ndef init_dist(launcher, backend='nccl', **kwargs):\n if mp.get_start_method(allow_none=True) is None:\n mp.set_start_method('spawn')\n if launcher == 'pytorch':\n _init_dist_pytorch(backend, **kwargs)\n elif launcher == 'mpi':\n _init_dist_mpi(backend, **kwargs)\n elif launcher == 'slurm':\n _init_dist_slurm(backend, **kwargs)\n else:\n raise ValueError(f'Invalid launcher type: {launcher}')\n\n\ndef _init_dist_pytorch(backend, **kwargs):\n # TODO: use local_rank instead of rank % num_gpus\n rank = int(os.environ['RANK'])\n offset = 0 if os.getenv('NPUID', None) is None else int(os.environ['NPUID'])\n num_gpus = torch.npu.device_count()\n torch.npu.set_device((rank + offset) % num_gpus)\n dist.init_process_group(backend=backend, world_size=num_gpus, rank=rank)\n\n\ndef _init_dist_mpi(backend, **kwargs):\n # TODO: use local_rank instead of rank % num_gpus\n rank = int(os.environ['OMPI_COMM_WORLD_RANK'])\n num_gpus = torch.cuda.device_count()\n torch.cuda.set_device(rank % num_gpus)\n dist.init_process_group(backend=backend, **kwargs)\n\n\ndef _init_dist_slurm(backend, port=None):\n \"\"\"Initialize slurm distributed training environment.\n If argument ``port`` is not specified, then the master port will be system\n environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system\n environment variable, then a default port ``29500`` will be used.\n Args:\n backend (str): Backend of torch.distributed.\n port (int, optional): Master port. Defaults to None.\n \"\"\"\n proc_id = int(os.environ['SLURM_PROCID'])\n ntasks = int(os.environ['SLURM_NTASKS'])\n node_list = os.environ['SLURM_NODELIST']\n num_gpus = torch.cuda.device_count()\n torch.cuda.set_device(proc_id % num_gpus)\n addr = subprocess.getoutput(\n f'scontrol show hostname {node_list} | head -n1')\n # specify master port\n if port is not None:\n os.environ['MASTER_PORT'] = str(port)\n elif 'MASTER_PORT' in os.environ:\n pass # use MASTER_PORT in the environment variable\n else:\n # 29500 is torch.distributed default port\n os.environ['MASTER_PORT'] = '29500'\n # use MASTER_ADDR in the environment variable if it already exists\n if 'MASTER_ADDR' not in os.environ:\n os.environ['MASTER_ADDR'] = addr\n os.environ['WORLD_SIZE'] = str(ntasks)\n os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)\n os.environ['RANK'] = str(proc_id)\n dist.init_process_group(backend=backend)\n\n\ndef get_dist_info():\n if TORCH_VERSION < '1.0':\n initialized = dist._initialized\n else:\n if dist.is_available():\n initialized = dist.is_initialized()\n else:\n initialized = False\n if initialized:\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n else:\n rank = 0\n world_size = 1\n return rank, world_size\n\n\ndef master_only(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef allreduce_params(params, coalesce=True, bucket_size_mb=-1):\n \"\"\"Allreduce parameters.\n Args:\n params (list[torch.Parameters]): List of parameters or buffers of a\n model.\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n Defaults to -1.\n \"\"\"\n _, world_size = get_dist_info()\n if world_size == 1:\n return\n params = [param.data for param in params]\n if coalesce:\n _allreduce_coalesced(params, world_size, bucket_size_mb)\n else:\n for tensor in params:\n dist.all_reduce(tensor.div_(world_size))\n\n\ndef allreduce_grads(params, coalesce=True, bucket_size_mb=-1):\n \"\"\"Allreduce gradients.\n Args:\n params (list[torch.Parameters]): List of parameters of a model\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n Defaults to -1.\n \"\"\"\n grads = [\n param.grad.data for param in params\n if param.requires_grad and param.grad is not None\n ]\n _, world_size = get_dist_info()\n if world_size == 1:\n return\n if coalesce:\n _allreduce_coalesced(grads, world_size, bucket_size_mb)\n else:\n for tensor in grads:\n dist.all_reduce(tensor.div_(world_size))\n\n\ndef _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):\n if bucket_size_mb > 0:\n bucket_size_bytes = bucket_size_mb * 1024 * 1024\n buckets = _take_tensors(tensors, bucket_size_bytes)\n else:\n buckets = OrderedDict()\n for tensor in tensors:\n tp = tensor.type()\n if tp not in buckets:\n buckets[tp] = []\n buckets[tp].append(tensor)\n buckets = buckets.values()\n\n for bucket in buckets:\n flat_tensors = _flatten_dense_tensors(bucket)\n dist.all_reduce(flat_tensors)\n flat_tensors.div_(world_size)\n for tensor, synced in zip(\n bucket, _unflatten_dense_tensors(flat_tensors, bucket)):\n tensor.copy_(synced)",
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n#\n# -------------------------------------------------------------------------\n#\n# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport contextlib\nimport itertools\nimport os\n\nimport numpy as np\nimport torch\n\nimport sys\n\nfrom collections import Counter\nfrom .dictionary import Dictionary\n\n\ndef infer_language_pair(path):\n \"\"\"Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx\"\"\"\n src, dst = None, None\n for filename in os.listdir(path):\n parts = filename.split('.')\n if len(parts) >= 3 and len(parts[1].split('-')) == 2:\n return parts[1].split('-')\n return src, dst\n\n\ndef load_dictionaries(args):\n if args.source_lang is None or args.target_lang is None:\n args.source_lang, args.target_lang = infer_language_pair(args.data)\n if args.source_lang is None or args.target_lang is None:\n raise Exception('Could not infer language pair, please provide it explicitly')\n src_dict = Dictionary.load(os.path.join(args.data, 'dict.{}.txt'.format(args.source_lang)))\n tgt_dict = Dictionary.load(os.path.join(args.data, 'dict.{}.txt'.format(args.target_lang)))\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n args.src_vocab_size = len(src_dict)\n args.tgt_vocab_size = len(tgt_dict)\n args.padding_idx = src_dict.pad()\n\n return src_dict, tgt_dict\n\n\nclass ShardedIterator(object):\n \"\"\"A sharded wrapper around an iterable (padded to length).\"\"\"\n\n def __init__(self, iterable, num_shards, shard_id, fill_value=None):\n if shard_id < 0 or shard_id >= num_shards:\n raise ValueError('shard_id must be between 0 and num_shards')\n\n self._sharded_len = len(iterable) // num_shards\n if len(iterable) % num_shards > 0:\n self._sharded_len += 1\n\n self.itr = itertools.zip_longest(\n range(self._sharded_len),\n itertools.islice(iterable, shard_id, len(iterable), num_shards),\n fillvalue=fill_value,\n )\n\n def __len__(self):\n return self._sharded_len\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self.itr)[1]\n\n\nclass CountingIterator(object):\n \"\"\"Wrapper around an iterable that maintains the iteration count.\"\"\"\n\n def __init__(self, iterable):\n self.iterable = iterable\n self.count = 0\n self.itr = iter(self)\n\n def __len__(self):\n return len(self.iterable)\n\n def __iter__(self):\n for x in self.iterable:\n self.count += 1\n yield x\n\n def __next__(self):\n return next(self.itr)\n\n def has_next(self):\n return self.count < len(self)\n\n def skip(self, num_to_skip):\n next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)\n return self\n\n\ndef collate_tokens(values, pad_idx, eos_idx, left_pad, move_eos_to_beginning=False, pad_sequence=1,gd_size=0):\n \"\"\"Convert a list of 1d tensors into a padded 2d tensor.\"\"\"\n\n if not gd_size:\n orig_size = max(v.size(0) for v in values)\n size = 0\n if pad_sequence > 1:\n size = orig_size // pad_sequence * pad_sequence\n if orig_size % pad_sequence > 0:\n size += pad_sequence\n else:\n size = orig_size\n else:\n size = gd_size\n res = values[0].new(len(values), size).fill_(pad_idx)\n\n def copy_tensor(src, dst):\n assert dst.numel() == src.numel()\n if move_eos_to_beginning:\n assert src[-1] == eos_idx\n dst[0] = eos_idx\n dst[1:] = src[:-1]\n else:\n dst.copy_(src)\n\n for i, v in enumerate(values):\n copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])\n return res\n\n\ndef collate(samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False, pad_sequence=1,gd_size=0):\n if len(samples) == 0:\n return {}\n\n def merge(key, left_pad, move_eos_to_beginning=False):\n return collate_tokens(\n [s[key] for s in samples],\n pad_idx, eos_idx, left_pad, move_eos_to_beginning,\n pad_sequence,\n gd_size,\n )\n\n id = torch.IntTensor([s['id'] for s in samples])\n src_tokens = merge('source', left_pad=left_pad_source)\n src_lengths = torch.IntTensor([s['source'].numel() for s in samples])\n src_lengths, sort_order = src_lengths.sort(descending=True)\n id = id.index_select(0, sort_order)\n src_tokens = src_tokens.index_select(0, sort_order)\n\n prev_output_tokens = None\n target = None\n if samples[0].get('target', None) is not None:\n target = merge('target', left_pad=left_pad_target)\n # we create a shifted version of targets for feeding the\n # previous output token(s) into the next decoder step\n prev_output_tokens = merge(\n 'target',\n left_pad=left_pad_target,\n move_eos_to_beginning=True,\n )\n prev_output_tokens = prev_output_tokens.index_select(0, sort_order)\n target = target.index_select(0, sort_order)\n ntokens = sum(len(s['target']) for s in samples)\n else:\n ntokens = sum(len(s['source']) for s in samples)\n\n return {\n 'id': id,\n 'ntokens': ntokens,\n 'net_input': {\n 'src_tokens': src_tokens,\n 'src_lengths': src_lengths,\n 'prev_output_tokens': prev_output_tokens,\n },\n 'target': target,\n }\n\n\ndef get_dummy_batch(num_tokens, src_dict, tgt_dict, src_len=128, tgt_len=128,\n left_pad_source=True, left_pad_target=False, pad_sequence=1):\n bsz = num_tokens // max(src_len, tgt_len)\n dummy_samples = [\n {\n 'id': i,\n 'source': src_dict.dummy_sentence(src_len),\n 'target': tgt_dict.dummy_sentence(tgt_len) if tgt_dict is not None else None,\n }\n for i in range(bsz)\n ]\n return collate(\n dummy_samples, pad_idx=src_dict.pad(), eos_idx=src_dict.eos(),\n left_pad_source=left_pad_source, left_pad_target=left_pad_target,\n pad_sequence=pad_sequence,\n )\n\n\nclass EpochBatchIterator(object):\n \"\"\"Iterate over a Dataset and yield batches bucketed by size.\n\n Batches may contain sequences of different lengths. This iterator can be\n reused across multiple epochs with the next_epoch_itr() method.\n\n Args:\n dataset: a Dataset\n max_tokens: max number of tokens in each batch\n max_sentences: max number of sentences in each batch\n max_positions: max sentence length supported by the model\n ignore_invalid_inputs: don't raise Exception for sentences that are too long\n required_batch_size_multiple: require batch size to be a multiple of N\n seed: seed for rand\n om number generator for reproducibility\n num_shards: shard the data iterator into N shards\n shard_id: which shard of the data iterator to return\n \"\"\"\n\n def __init__(\n self, dataset, max_tokens=None, max_sentences=None, max_positions=None,\n ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1,\n num_shards=1, shard_id=0, epoch=0, max_positions_num=None\n ):\n self.dataset = dataset\n self.max_tokens = max_tokens if max_tokens is not None else float('Inf')\n self.max_sentences = max_sentences if max_sentences is not None else float('Inf')\n self.max_positions = max_positions\n self.ignore_invalid_inputs = ignore_invalid_inputs\n self.bsz_mult = required_batch_size_multiple\n self.seed = seed\n self.num_shards = num_shards\n self.shard_id = shard_id\n\n self.epoch = epoch\n self._cur_epoch_itr = None\n self._next_epoch_itr = None\n self.max_positions_num = max_positions_num\n\n with numpy_seed(self.seed):\n batches = list(self._batch_generator())\n size = len(batches)\n k = size % self.num_shards\n batches = batches[:size-k]\n self.frozen_batches = tuple(batches)\n\n def __len__(self):\n return len(self.frozen_batches)\n\n def next_epoch_itr(self, shuffle=True):\n \"\"\"Shuffle batches and return a new iterator over the dataset.\"\"\"\n if self._next_epoch_itr is not None:\n self._cur_epoch_itr = self._next_epoch_itr\n self._next_epoch_itr = None\n else:\n self.epoch += 1\n self._cur_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle)\n return self._cur_epoch_itr\n\n def end_of_epoch(self):\n return not self._cur_epoch_itr.has_next()\n\n @property\n def iterations_in_epoch(self):\n if self._cur_epoch_itr is not None:\n return self._cur_epoch_itr.count\n elif self._next_epoch_itr is not None:\n return self._next_epoch_itr.count\n return 0\n\n def state_dict(self):\n return {\n 'epoch': self.epoch,\n 'iterations_in_epoch': self.iterations_in_epoch,\n }\n\n def load_state_dict(self, state_dict):\n self.epoch = state_dict['epoch']\n itr_pos = state_dict.get('iterations_in_epoch', 0)\n if itr_pos > 0:\n # fast-forward epoch iterator\n itr = self._get_iterator_for_epoch(self.epoch, state_dict.get('shuffle', True))\n if itr_pos < len(itr):\n self._next_epoch_itr = itr.skip(itr_pos)\n\n def _get_iterator_for_epoch(self, epoch, shuffle):\n if shuffle:\n # set seed based on the seed and epoch number so that we get\n # reproducible results when resuming from checkpoints\n with numpy_seed(self.seed + epoch):\n batches = list(self.frozen_batches) # copy\n np.random.shuffle(batches)\n else:\n batches = self.frozen_batches\n return CountingIterator(torch.utils.data.DataLoader(\n self.dataset,\n collate_fn=self.dataset.collater,\n # num_workers=1,\n batch_sampler=ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]),\n ))\n\n def _batch_generator(self):\n batch = []\n\n def is_batch_full(num_tokens):\n if len(batch) == 0:\n return False\n if len(batch) == self.max_sentences:\n return True\n if num_tokens > self.max_tokens:\n return True\n return False\n\n sample_len = 0\n sample_lens = []\n ignored = []\n for idx in self.dataset.ordered_indices(self.seed, self.epoch):\n if not self.dataset.valid_size(idx, self.max_positions):\n if self.ignore_invalid_inputs:\n ignored.append(idx)\n continue\n raise Exception((\n 'Size of sample #{} is invalid, max_positions={}, skip this '\n 'example with --skip-invalid-size-inputs-valid-test'\n ).format(idx, self.max_positions))\n sample_num_tokens = self.dataset.num_tokens(idx)\n if sample_num_tokens > self.max_positions_num:\n continue\n sample_lens.append(self.dataset.num_tokens(idx))\n sample_len = max(sample_len, sample_lens[-1])\n num_tokens = (len(batch) + 1) * sample_len\n if is_batch_full(num_tokens):\n mod_len = max(\n self.bsz_mult * (len(batch) // self.bsz_mult),\n len(batch) % self.bsz_mult,\n )\n yield batch[:mod_len]\n batch = batch[mod_len:]\n sample_lens = sample_lens[mod_len:]\n sample_len = max(sample_lens) if len(sample_lens) > 0 else 0\n\n batch.append(idx)\n\n if len(batch) > 0:\n yield batch\n\n\[email protected]\ndef numpy_seed(seed):\n \"\"\"Context manager which seeds the NumPy PRNG with the specified seed and\n restores the state afterward\"\"\"\n if seed is None:\n yield\n return\n state = np.random.get_state()\n np.random.seed(seed)\n try:\n yield\n finally:\n np.random.set_state(state)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Scale, normal_init\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import distance2bbox, multi_apply, multiclass_nms\nfrom ..builder import HEADS, build_loss\nfrom .anchor_free_head import AnchorFreeHead\n\nINF = 1e8\n\n\[email protected]_module()\nclass FCOSHead(AnchorFreeHead):\n \"\"\"Anchor-free head used in `FCOS <https://arxiv.org/abs/1904.01355>`_.\n\n The FCOS head does not use anchor boxes. Instead bounding boxes are\n predicted at each pixel and a centerness measure is used to supress\n low-quality predictions.\n Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training\n tricks used in official repo, which will bring remarkable mAP gains\n of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for\n more detail.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n strides (list[int] | list[tuple[int, int]]): Strides of points\n in multiple feature levels. Default: (4, 8, 16, 32, 64).\n regress_ranges (tuple[tuple[int, int]]): Regress range of multiple\n level points.\n center_sampling (bool): If true, use center sampling. Default: False.\n center_sample_radius (float): Radius of center sampling. Default: 1.5.\n norm_on_bbox (bool): If true, normalize the regression targets\n with FPN strides. Default: False.\n centerness_on_reg (bool): If true, position centerness on the\n regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.\n Default: False.\n conv_bias (bool | str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise\n False. Default: \"auto\".\n loss_cls (dict): Config of classification loss.\n loss_bbox (dict): Config of localization loss.\n loss_centerness (dict): Config of centerness loss.\n norm_cfg (dict): dictionary to construct and config norm layer.\n Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True).\n\n Example:\n >>> self = FCOSHead(11, 7)\n >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]\n >>> cls_score, bbox_pred, centerness = self.forward(feats)\n >>> assert len(cls_score) == len(self.scales)\n \"\"\" # noqa: E501\n\n def __init__(self,\n num_classes,\n in_channels,\n regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),\n (512, INF)),\n center_sampling=False,\n center_sample_radius=1.5,\n norm_on_bbox=False,\n centerness_on_reg=False,\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(type='IoULoss', loss_weight=1.0),\n loss_centerness=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n **kwargs):\n self.regress_ranges = regress_ranges\n self.center_sampling = center_sampling\n self.center_sample_radius = center_sample_radius\n self.norm_on_bbox = norm_on_bbox\n self.centerness_on_reg = centerness_on_reg\n super().__init__(\n num_classes,\n in_channels,\n loss_cls=loss_cls,\n loss_bbox=loss_bbox,\n norm_cfg=norm_cfg,\n **kwargs)\n self.loss_centerness = build_loss(loss_centerness)\n\n def _init_layers(self):\n \"\"\"Initialize layers of the head.\"\"\"\n super()._init_layers()\n self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)\n self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n def init_weights(self):\n \"\"\"Initialize weights of the head.\"\"\"\n super().init_weights()\n normal_init(self.conv_centerness, std=0.01)\n\n def forward(self, feats):\n \"\"\"Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple:\n cls_scores (list[Tensor]): Box scores for each scale level, \\\n each is a 4D-tensor, the channel number is \\\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for each \\\n scale level, each is a 4D-tensor, the channel number is \\\n num_points * 4.\n centernesses (list[Tensor]): Centerss for each scale level, \\\n each is a 4D-tensor, the channel number is num_points * 1.\n \"\"\"\n return multi_apply(self.forward_single, feats, self.scales,\n self.strides)\n\n def forward_single(self, x, scale, stride):\n \"\"\"Forward features of a single scale levle.\n\n Args:\n x (Tensor): FPN feature maps of the specified stride.\n scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n the bbox prediction.\n stride (int): The corresponding stride for feature maps, only\n used to normalize the bbox prediction when self.norm_on_bbox\n is True.\n\n Returns:\n tuple: scores for each class, bbox predictions and centerness \\\n predictions of input feature maps.\n \"\"\"\n cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x)\n if self.centerness_on_reg:\n centerness = self.conv_centerness(reg_feat)\n else:\n centerness = self.conv_centerness(cls_feat)\n # scale the bbox_pred of different level\n # float to avoid overflow when enabling FP16\n bbox_pred = scale(bbox_pred).float()\n if self.norm_on_bbox:\n bbox_pred = F.relu(bbox_pred)\n if not self.training:\n bbox_pred *= stride\n else:\n bbox_pred = bbox_pred.exp()\n return cls_score, bbox_pred, centerness\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n def loss(self,\n cls_scores,\n bbox_preds,\n centernesses,\n gt_bboxes,\n gt_labels,\n img_metas,\n gt_bboxes_ignore=None):\n \"\"\"Compute loss of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level,\n each is a 4D-tensor, the channel number is\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * 4.\n centernesses (list[Tensor]): Centerss for each scale level, each\n is a 4D-tensor, the channel number is num_points * 1.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n assert len(cls_scores) == len(bbox_preds) == len(centernesses)\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,\n bbox_preds[0].device)\n labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes,\n gt_labels)\n\n num_imgs = cls_scores[0].size(0)\n # flatten cls_scores, bbox_preds and centerness\n flatten_cls_scores = [\n cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n for cls_score in cls_scores\n ]\n flatten_bbox_preds = [\n bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n for bbox_pred in bbox_preds\n ]\n flatten_centerness = [\n centerness.permute(0, 2, 3, 1).reshape(-1)\n for centerness in centernesses\n ]\n flatten_cls_scores = torch.cat(flatten_cls_scores)\n flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n flatten_centerness = torch.cat(flatten_centerness)\n flatten_labels = torch.cat(labels)\n flatten_bbox_targets = torch.cat(bbox_targets)\n # repeat points to align with bbox_preds\n flatten_points = torch.cat(\n [points.repeat(num_imgs, 1) for points in all_level_points])\n\n # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n bg_class_ind = self.num_classes\n pos_inds = ((flatten_labels >= 0)\n & (flatten_labels < bg_class_ind)).nonzero().reshape(-1)\n num_pos = len(pos_inds)\n loss_cls = self.loss_cls(\n flatten_cls_scores, flatten_labels,\n avg_factor=num_pos + num_imgs) # avoid num_pos is 0\n\n pos_bbox_preds = flatten_bbox_preds[pos_inds]\n pos_centerness = flatten_centerness[pos_inds]\n\n if num_pos > 0:\n pos_bbox_targets = flatten_bbox_targets[pos_inds]\n pos_centerness_targets = self.centerness_target(pos_bbox_targets)\n pos_points = flatten_points[pos_inds]\n pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)\n pos_decoded_target_preds = distance2bbox(pos_points,\n pos_bbox_targets)\n # centerness weighted iou loss\n loss_bbox = self.loss_bbox(\n pos_decoded_bbox_preds,\n pos_decoded_target_preds,\n weight=pos_centerness_targets,\n avg_factor=pos_centerness_targets.sum())\n loss_centerness = self.loss_centerness(pos_centerness,\n pos_centerness_targets)\n else:\n loss_bbox = pos_bbox_preds.sum()\n loss_centerness = pos_centerness.sum()\n\n return dict(\n loss_cls=loss_cls,\n loss_bbox=loss_bbox,\n loss_centerness=loss_centerness)\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n def get_bboxes(self,\n cls_scores,\n bbox_preds,\n centernesses,\n img_metas,\n cfg=None,\n rescale=False,\n with_nms=True):\n \"\"\"Transform network output for a batch into bbox predictions.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n with shape (N, num_points * num_classes, H, W).\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_points * 4, H, W).\n centernesses (list[Tensor]): Centerness for each scale level with\n shape (N, num_points * 1, H, W).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n cfg (mmcv.Config | None): Test / postprocessing configuration,\n if None, test_cfg would be used. Default: None.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n with_nms (bool): If True, do nms before return boxes.\n Default: True.\n\n Returns:\n list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n The first item is an (n, 5) tensor, where the first 4 columns\n are bounding box positions (tl_x, tl_y, br_x, br_y) and the\n 5-th column is a score between 0 and 1. The second item is a\n (n,) tensor where each item is the predicted class label of the\n corresponding box.\n \"\"\"\n assert len(cls_scores) == len(bbox_preds)\n num_levels = len(cls_scores)\n\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,\n bbox_preds[0].device)\n result_list = []\n for img_id in range(len(img_metas)):\n cls_score_list = [\n cls_scores[i][img_id].detach() for i in range(num_levels)\n ]\n bbox_pred_list = [\n bbox_preds[i][img_id].detach() for i in range(num_levels)\n ]\n centerness_pred_list = [\n centernesses[i][img_id].detach() for i in range(num_levels)\n ]\n img_shape = img_metas[img_id]['img_shape']\n scale_factor = img_metas[img_id]['scale_factor']\n det_bboxes = self._get_bboxes_single(\n cls_score_list, bbox_pred_list, centerness_pred_list,\n mlvl_points, img_shape, scale_factor, cfg, rescale, with_nms)\n result_list.append(det_bboxes)\n return result_list\n\n def _get_bboxes_single(self,\n cls_scores,\n bbox_preds,\n centernesses,\n mlvl_points,\n img_shape,\n scale_factor,\n cfg,\n rescale=False,\n with_nms=True):\n \"\"\"Transform outputs for a single batch item into bbox predictions.\n\n Args:\n cls_scores (list[Tensor]): Box scores for a single scale level\n with shape (num_points * num_classes, H, W).\n bbox_preds (list[Tensor]): Box energies / deltas for a single scale\n level with shape (num_points * 4, H, W).\n centernesses (list[Tensor]): Centerness for a single scale level\n with shape (num_points * 4, H, W).\n mlvl_points (list[Tensor]): Box reference for a single scale level\n with shape (num_total_points, 4).\n img_shape (tuple[int]): Shape of the input image,\n (height, width, 3).\n scale_factor (ndarray): Scale factor of the image arrange as\n (w_scale, h_scale, w_scale, h_scale).\n cfg (mmcv.Config | None): Test / postprocessing configuration,\n if None, test_cfg would be used.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n with_nms (bool): If True, do nms before return boxes.\n Default: True.\n\n Returns:\n tuple(Tensor):\n det_bboxes (Tensor): BBox predictions in shape (n, 5), where\n the first 4 columns are bounding box positions\n (tl_x, tl_y, br_x, br_y) and the 5-th column is a score\n between 0 and 1.\n det_labels (Tensor): A (n,) tensor where each item is the\n predicted class label of the corresponding box.\n \"\"\"\n cfg = self.test_cfg if cfg is None else cfg\n assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)\n mlvl_bboxes = []\n mlvl_scores = []\n mlvl_centerness = []\n for cls_score, bbox_pred, centerness, points in zip(\n cls_scores, bbox_preds, centernesses, mlvl_points):\n assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n scores = cls_score.permute(1, 2, 0).reshape(\n -1, self.cls_out_channels).sigmoid()\n centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()\n\n bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n nms_pre = cfg.get('nms_pre', -1)\n if nms_pre > 0 and scores.shape[0] > nms_pre:\n max_scores, _ = (scores * centerness[:, None]).max(dim=1)\n _, topk_inds = max_scores.topk(nms_pre)\n points = points[topk_inds, :]\n bbox_pred = bbox_pred[topk_inds, :]\n scores = scores[topk_inds, :]\n centerness = centerness[topk_inds]\n bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n mlvl_centerness.append(centerness)\n mlvl_bboxes = torch.cat(mlvl_bboxes)\n if rescale:\n mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\n mlvl_scores = torch.cat(mlvl_scores)\n padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n # remind that we set FG labels to [0, num_class-1] since mmdet v2.0\n # BG cat_id: num_class\n mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)\n mlvl_centerness = torch.cat(mlvl_centerness)\n\n if with_nms:\n det_bboxes, det_labels = multiclass_nms(\n mlvl_bboxes,\n mlvl_scores,\n cfg.score_thr,\n cfg.nms,\n cfg.max_per_img,\n score_factors=mlvl_centerness)\n return det_bboxes, det_labels\n else:\n return mlvl_bboxes, mlvl_scores, mlvl_centerness\n\n def _get_points_single(self,\n featmap_size,\n stride,\n dtype,\n device,\n flatten=False):\n \"\"\"Get points according to feature map sizes.\"\"\"\n y, x = super()._get_points_single(featmap_size, stride, dtype, device)\n points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride),\n dim=-1) + stride // 2\n return points\n\n def get_targets(self, points, gt_bboxes_list, gt_labels_list):\n \"\"\"Compute regression, classification and centerss targets for points\n in multiple images.\n\n Args:\n points (list[Tensor]): Points of each fpn level, each has shape\n (num_points, 2).\n gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,\n each has shape (num_gt, 4).\n gt_labels_list (list[Tensor]): Ground truth labels of each box,\n each has shape (num_gt,).\n\n Returns:\n tuple:\n concat_lvl_labels (list[Tensor]): Labels of each level. \\\n concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \\\n level.\n \"\"\"\n assert len(points) == len(self.regress_ranges)\n num_levels = len(points)\n # expand regress ranges to align with points\n expanded_regress_ranges = [\n points[i].new_tensor(self.regress_ranges[i])[None].expand_as(\n points[i]) for i in range(num_levels)\n ]\n # concat all levels points and regress ranges\n concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)\n concat_points = torch.cat(points, dim=0)\n\n # the number of points per img, per lvl\n num_points = [center.size(0) for center in points]\n\n # get labels and bbox_targets of each image\n labels_list, bbox_targets_list = multi_apply(\n self._get_target_single,\n gt_bboxes_list,\n gt_labels_list,\n points=concat_points,\n regress_ranges=concat_regress_ranges,\n num_points_per_lvl=num_points)\n\n # split to per img, per level\n labels_list = [labels.split(num_points, 0) for labels in labels_list]\n bbox_targets_list = [\n bbox_targets.split(num_points, 0)\n for bbox_targets in bbox_targets_list\n ]\n\n # concat per level image\n concat_lvl_labels = []\n concat_lvl_bbox_targets = []\n for i in range(num_levels):\n concat_lvl_labels.append(\n torch.cat([labels[i] for labels in labels_list]))\n bbox_targets = torch.cat(\n [bbox_targets[i] for bbox_targets in bbox_targets_list])\n if self.norm_on_bbox:\n bbox_targets = bbox_targets / self.strides[i]\n concat_lvl_bbox_targets.append(bbox_targets)\n return concat_lvl_labels, concat_lvl_bbox_targets\n\n def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges,\n num_points_per_lvl):\n \"\"\"Compute regression and classification targets for a single image.\"\"\"\n num_points = points.size(0)\n num_gts = gt_labels.size(0)\n if num_gts == 0:\n return gt_labels.new_full((num_points,), self.num_classes), \\\n gt_bboxes.new_zeros((num_points, 4))\n\n areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (\n gt_bboxes[:, 3] - gt_bboxes[:, 1])\n # TODO: figure out why these two are different\n # areas = areas[None].expand(num_points, num_gts)\n areas = areas[None].repeat(num_points, 1)\n regress_ranges = regress_ranges[:, None, :].expand(\n num_points, num_gts, 2)\n gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)\n xs, ys = points[:, 0], points[:, 1]\n xs = xs[:, None].expand(num_points, num_gts)\n ys = ys[:, None].expand(num_points, num_gts)\n\n left = xs - gt_bboxes[..., 0]\n right = gt_bboxes[..., 2] - xs\n top = ys - gt_bboxes[..., 1]\n bottom = gt_bboxes[..., 3] - ys\n bbox_targets = torch.stack((left, top, right, bottom), -1)\n\n if self.center_sampling:\n # condition1: inside a `center bbox`\n radius = self.center_sample_radius\n center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2\n center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2\n center_gts = torch.zeros_like(gt_bboxes)\n stride = center_xs.new_zeros(center_xs.shape)\n\n # project the points on current lvl back to the `original` sizes\n lvl_begin = 0\n for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):\n lvl_end = lvl_begin + num_points_lvl\n stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius\n lvl_begin = lvl_end\n\n x_mins = center_xs - stride\n y_mins = center_ys - stride\n x_maxs = center_xs + stride\n y_maxs = center_ys + stride\n center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0],\n x_mins, gt_bboxes[..., 0])\n center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1],\n y_mins, gt_bboxes[..., 1])\n center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2],\n gt_bboxes[..., 2], x_maxs)\n center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3],\n gt_bboxes[..., 3], y_maxs)\n\n cb_dist_left = xs - center_gts[..., 0]\n cb_dist_right = center_gts[..., 2] - xs\n cb_dist_top = ys - center_gts[..., 1]\n cb_dist_bottom = center_gts[..., 3] - ys\n center_bbox = torch.stack(\n (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)\n inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0\n else:\n # condition1: inside a gt bbox\n inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0\n\n # condition2: limit the regression range for each location\n max_regress_distance = bbox_targets.max(-1)[0]\n inside_regress_range = (\n (max_regress_distance >= regress_ranges[..., 0])\n & (max_regress_distance <= regress_ranges[..., 1]))\n\n # if there are still more than one objects for a location,\n # we choose the one with minimal area\n areas[inside_gt_bbox_mask == 0] = INF\n areas[inside_regress_range == 0] = INF\n min_area, min_area_inds = areas.min(dim=1)\n\n labels = gt_labels[min_area_inds]\n labels[min_area == INF] = self.num_classes # set as BG\n bbox_targets = bbox_targets[range(num_points), min_area_inds]\n\n return labels, bbox_targets\n\n def centerness_target(self, pos_bbox_targets):\n \"\"\"Compute centerness targets.\n\n Args:\n pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape\n (num_pos, 4)\n\n Returns:\n Tensor: Centerness target.\n \"\"\"\n # only calculate pos centerness targets, otherwise there may be nan\n left_right = pos_bbox_targets[:, [0, 2]]\n top_bottom = pos_bbox_targets[:, [1, 3]]\n centerness_targets = (\n left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (\n top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])\n return torch.sqrt(centerness_targets)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" Transforms Factory\nFactory methods for building image transforms for use with TIMM (PyTorch Image Models)\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nimport math\n\nimport torch\nfrom torchvision import transforms\n\nfrom timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT\nfrom timm.data.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform\nfrom timm.data.transforms import _pil_interp, RandomResizedCropAndInterpolation, ToNumpy, ToTensor\nfrom timm.data.random_erasing import RandomErasing\n\n\ndef transforms_noaug_train(\n img_size=224,\n interpolation='bilinear',\n use_prefetcher=False,\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD,\n):\n if interpolation == 'random':\n # random interpolation not supported with no-aug\n interpolation = 'bilinear'\n tfl = [\n transforms.Resize(img_size, _pil_interp(interpolation)),\n transforms.CenterCrop(img_size)\n ]\n if use_prefetcher:\n # prefetcher and collate will handle tensor conversion and norm\n tfl += [ToNumpy()]\n else:\n tfl += [\n transforms.ToTensor(),\n transforms.Normalize(\n mean=torch.tensor(mean),\n std=torch.tensor(std))\n ]\n return transforms.Compose(tfl)\n\n\ndef transforms_imagenet_train(\n img_size=224,\n scale=None,\n ratio=None,\n hflip=0.5,\n vflip=0.,\n color_jitter=0.4,\n auto_augment=None,\n interpolation='random',\n use_prefetcher=False,\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD,\n re_prob=0.,\n re_mode='const',\n re_count=1,\n re_num_splits=0,\n separate=False,\n):\n \"\"\"\n If separate==True, the transforms are returned as a tuple of 3 separate transforms\n for use in a mixing dataset that passes\n * all data through the first (primary) transform, called the 'clean' data\n * a portion of the data through the secondary transform\n * normalizes and converts the branches above with the third, final transform\n \"\"\"\n scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range\n ratio = tuple(ratio or (3./4., 4./3.)) # default imagenet ratio range\n primary_tfl = [\n RandomResizedCropAndInterpolation(img_size, scale=scale, ratio=ratio, interpolation=interpolation)]\n if hflip > 0.:\n primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)]\n if vflip > 0.:\n primary_tfl += [transforms.RandomVerticalFlip(p=vflip)]\n\n secondary_tfl = []\n if auto_augment:\n assert isinstance(auto_augment, str)\n if isinstance(img_size, (tuple, list)):\n img_size_min = min(img_size)\n else:\n img_size_min = img_size\n aa_params = dict(\n translate_const=int(img_size_min * 0.45),\n img_mean=tuple([min(255, round(255 * x)) for x in mean]),\n )\n if interpolation and interpolation != 'random':\n aa_params['interpolation'] = _pil_interp(interpolation)\n if auto_augment.startswith('rand'):\n secondary_tfl += [rand_augment_transform(auto_augment, aa_params)]\n elif auto_augment.startswith('augmix'):\n aa_params['translate_pct'] = 0.3\n secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)]\n else:\n secondary_tfl += [auto_augment_transform(auto_augment, aa_params)]\n elif color_jitter is not None:\n # color jitter is enabled when not using AA\n if isinstance(color_jitter, (list, tuple)):\n # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation\n # or 4 if also augmenting hue\n assert len(color_jitter) in (3, 4)\n else:\n # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue\n color_jitter = (float(color_jitter),) * 3\n secondary_tfl += [transforms.ColorJitter(*color_jitter)]\n\n final_tfl = []\n if use_prefetcher:\n # prefetcher and collate will handle tensor conversion and norm\n final_tfl += [ToNumpy()]\n else:\n final_tfl += [\n transforms.ToTensor(),\n transforms.Normalize(\n mean=torch.tensor(mean),\n std=torch.tensor(std))\n ]\n if re_prob > 0.:\n final_tfl.append(\n RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu'))\n\n if separate:\n return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl)\n else:\n return transforms.Compose(primary_tfl + secondary_tfl + final_tfl)\n\n\ndef transforms_imagenet_eval(\n img_size=224,\n crop_pct=None,\n interpolation='bilinear',\n use_prefetcher=False,\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD):\n crop_pct = crop_pct or DEFAULT_CROP_PCT\n\n if isinstance(img_size, (tuple, list)):\n assert len(img_size) == 2\n if img_size[-1] == img_size[-2]:\n # fall-back to older behaviour so Resize scales to shortest edge if target is square\n scale_size = int(math.floor(img_size[0] / crop_pct))\n else:\n scale_size = tuple([int(x / crop_pct) for x in img_size])\n else:\n scale_size = int(math.floor(img_size / crop_pct))\n\n tfl = [\n transforms.Resize(scale_size, _pil_interp(interpolation)),\n transforms.CenterCrop(img_size),\n ]\n if use_prefetcher:\n # prefetcher and collate will handle tensor conversion and norm\n tfl += [ToNumpy()]\n else:\n tfl += [\n transforms.ToTensor(),\n transforms.Normalize(\n mean=torch.tensor(mean),\n std=torch.tensor(std))\n ]\n\n return transforms.Compose(tfl)\n\n\ndef create_transform(\n input_size,\n is_training=False,\n use_prefetcher=False,\n no_aug=False,\n scale=None,\n ratio=None,\n hflip=0.5,\n vflip=0.,\n color_jitter=0.4,\n auto_augment=None,\n interpolation='bilinear',\n mean=IMAGENET_DEFAULT_MEAN,\n std=IMAGENET_DEFAULT_STD,\n re_prob=0.,\n re_mode='const',\n re_count=1,\n re_num_splits=0,\n crop_pct=None,\n tf_preprocessing=False,\n separate=False):\n\n if isinstance(input_size, (tuple, list)):\n img_size = input_size[-2:]\n else:\n img_size = input_size\n\n if tf_preprocessing and use_prefetcher:\n assert not separate, \"Separate transforms not supported for TF preprocessing\"\n from timm.data.tf_preprocessing import TfPreprocessTransform\n transform = TfPreprocessTransform(\n is_training=is_training, size=img_size, interpolation=interpolation)\n else:\n if is_training and no_aug:\n assert not separate, \"Cannot perform split augmentation with no_aug\"\n transform = transforms_noaug_train(\n img_size,\n interpolation=interpolation,\n use_prefetcher=use_prefetcher,\n mean=mean,\n std=std)\n elif is_training:\n transform = transforms_imagenet_train(\n img_size,\n scale=scale,\n ratio=ratio,\n hflip=hflip,\n vflip=vflip,\n color_jitter=color_jitter,\n auto_augment=auto_augment,\n interpolation=interpolation,\n use_prefetcher=use_prefetcher,\n mean=mean,\n std=std,\n re_prob=re_prob,\n re_mode=re_mode,\n re_count=re_count,\n re_num_splits=re_num_splits,\n separate=separate)\n else:\n assert not separate, \"Separate transforms not supported for validation preprocessing\"\n transform = transforms_imagenet_eval(\n img_size,\n interpolation=interpolation,\n use_prefetcher=use_prefetcher,\n mean=mean,\n std=std,\n crop_pct=crop_pct)\n\n return transform\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\n\n\ndef ae_loss_per_image(tl_preds, br_preds, match):\n \"\"\"Associative Embedding Loss in one image.\n\n Associative Embedding Loss including two parts: pull loss and push loss.\n Pull loss makes embedding vectors from same object closer to each other.\n Push loss distinguish embedding vector from different objects, and makes\n the gap between them is large enough.\n\n During computing, usually there are 3 cases:\n - no object in image: both pull loss and push loss will be 0.\n - one object in image: push loss will be 0 and pull loss is computed\n by the two corner of the only object.\n - more than one objects in image: pull loss is computed by corner pairs\n from each object, push loss is computed by each object with all\n other objects. We use confusion matrix with 0 in diagonal to\n compute the push loss.\n\n Args:\n tl_preds (tensor): Embedding feature map of left-top corner.\n br_preds (tensor): Embedding feature map of bottim-right corner.\n match (list): Downsampled coordinates pair of each ground truth box.\n \"\"\"\n\n tl_list, br_list, me_list = [], [], []\n if len(match) == 0: # no object in image\n pull_loss = tl_preds.sum() * 0.\n push_loss = tl_preds.sum() * 0.\n else:\n for m in match:\n [tl_y, tl_x], [br_y, br_x] = m\n tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)\n br_e = br_preds[:, br_y, br_x].view(-1, 1)\n tl_list.append(tl_e)\n br_list.append(br_e)\n me_list.append((tl_e + br_e) / 2.0)\n\n tl_list = torch.cat(tl_list)\n br_list = torch.cat(br_list)\n me_list = torch.cat(me_list)\n\n assert tl_list.size() == br_list.size()\n\n # N is object number in image, M is dimension of embedding vector\n N, M = tl_list.size()\n\n pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)\n pull_loss = pull_loss.sum() / N\n\n margin = 1 # exp setting of CornerNet, details in section 3.3 of paper\n\n # confusion matrix of push loss\n conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list\n conf_weight = 1 - torch.eye(N).type_as(me_list)\n conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())\n\n if N > 1: # more than one object in current image\n push_loss = F.relu(conf_mat).sum() / (N * (N - 1))\n else:\n push_loss = tl_preds.sum() * 0.\n\n return pull_loss, push_loss\n\n\[email protected]_module()\nclass AssociativeEmbeddingLoss(nn.Module):\n \"\"\"Associative Embedding Loss.\n\n More details can be found in\n `Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and\n `CornerNet <https://arxiv.org/abs/1808.01244>`_ .\n Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501\n\n Args:\n pull_weight (float): Loss weight for corners from same object.\n push_weight (float): Loss weight for corners from different object.\n \"\"\"\n\n def __init__(self, pull_weight=0.25, push_weight=0.25):\n super(AssociativeEmbeddingLoss, self).__init__()\n self.pull_weight = pull_weight\n self.push_weight = push_weight\n\n def forward(self, pred, target, match):\n \"\"\"Forward function.\"\"\"\n batch = pred.size(0)\n pull_all, push_all = 0.0, 0.0\n for i in range(batch):\n pull, push = ae_loss_per_image(pred[i], target[i], match[i])\n\n pull_all += self.pull_weight * pull\n push_all += self.push_weight * push\n\n return pull_all, push_all\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport torch\nfrom torch.nn import functional as F\n\nfrom detectron2.layers import paste_masks_in_image\nfrom detectron2.structures import Instances\nfrom detectron2.utils.memory import retry_if_cuda_oom\n\n\ndef detector_postprocess(results, output_height, output_width, mask_threshold=0.5):\n \"\"\"\n Resize the output instances.\n The input images are often resized when entering an object detector.\n As a result, we often need the outputs of the detector in a different\n resolution from its inputs.\n\n This function will resize the raw outputs of an R-CNN detector\n to produce outputs according to the desired output resolution.\n\n Args:\n results (Instances): the raw outputs from the detector.\n `results.image_size` contains the input image resolution the detector sees.\n This object might be modified in-place.\n output_height, output_width: the desired output resolution.\n\n Returns:\n Instances: the resized output from the model, based on the output resolution\n \"\"\"\n\n # Converts integer tensors to float temporaries\n # to ensure true division is performed when\n # computing scale_x and scale_y.\n if isinstance(output_width, torch.Tensor):\n output_width_tmp = output_width.float()\n else:\n output_width_tmp = output_width\n\n if isinstance(output_height, torch.Tensor):\n output_height_tmp = output_height.float()\n else:\n output_height_tmp = output_height\n\n scale_x, scale_y = (\n output_width_tmp / results.image_size[1],\n output_height_tmp / results.image_size[0],\n )\n results = Instances((output_height, output_width), **results.get_fields())\n\n if results.has(\"pred_boxes\"):\n output_boxes = results.pred_boxes\n elif results.has(\"proposal_boxes\"):\n output_boxes = results.proposal_boxes\n\n print(\"***results***\")\n # print(output_boxes)\n # for i in output_boxes:\n # print(i)\n output_boxes.scale(scale_x, scale_y)\n output_boxes.clip(results.image_size)\n\n if results.has(\"pred_masks\"):\n results.pred_masks = retry_if_cuda_oom(paste_masks_in_image)(\n results.pred_masks[:, 0, :, :], # N, 1, M, M\n results.pred_boxes,\n results.image_size,\n threshold=mask_threshold,\n )\n\n hw_mask = output_boxes.nonempty()\n keep_mask = (results.scores != -1.0) & hw_mask\n results.scores[~keep_mask] = -1.0\n\n if results.has(\"pred_keypoints\"):\n results.pred_keypoints[:, :, 0] *= scale_x\n results.pred_keypoints[:, :, 1] *= scale_y\n\n return results\n\n\ndef sem_seg_postprocess(result, img_size, output_height, output_width):\n \"\"\"\n Return semantic segmentation predictions in the original resolution.\n\n The input images are often resized when entering semantic segmentor. Moreover, in same\n cases, they also padded inside segmentor to be divisible by maximum network stride.\n As a result, we often need the predictions of the segmentor in a different\n resolution from its inputs.\n\n Args:\n result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),\n where C is the number of classes, and H, W are the height and width of the prediction.\n img_size (tuple): image size that segmentor is taking as input.\n output_height, output_width: the desired output resolution.\n\n Returns:\n semantic segmentation prediction (Tensor): A tensor of the shape\n (C, output_height, output_width) that contains per-pixel soft predictions.\n \"\"\"\n result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)\n result = F.interpolate(\n result, size=(output_height, output_width), mode=\"bilinear\", align_corners=False\n )[0]\n return result\n"
] | [
[
"torch.zeros_like"
],
[
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.average_precision_score"
],
[
"numpy.fromfile"
],
[
"torch.save",
"torch.load"
],
[
"torch.is_grad_enabled",
"torch.nn.parallel.distributed._find_tensors"
],
[
"torch.npu.set_device",
"torch.sigmoid",
"torch.load",
"torch.sign",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"numpy.max",
"torch.npu.synchronize",
"torch.no_grad",
"numpy.mean",
"numpy.where",
"numpy.empty"
],
[
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d"
],
[
"numpy.array",
"numpy.minimum"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.image.random_contrast",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.minimum",
"tensorflow.cast",
"tensorflow.image.random_saturation",
"tensorflow.data.Dataset.list_files",
"tensorflow.pad",
"tensorflow.random_shuffle",
"tensorflow.string_to_number",
"tensorflow.boolean_mask",
"tensorflow.to_int64",
"tensorflow.image.random_hue",
"tensorflow.greater",
"tensorflow.data.experimental.parallel_interleave",
"tensorflow.data.TFRecordDataset",
"numpy.clip",
"tensorflow.squeeze",
"tensorflow.gather",
"tensorflow.name_scope",
"tensorflow.data.Options",
"tensorflow.logical_not",
"tensorflow.tile",
"tensorflow.image.random_brightness",
"tensorflow.shape",
"tensorflow.reduce_any",
"tensorflow.less",
"tensorflow.image.resize_images",
"numpy.array",
"tensorflow.not_equal",
"tensorflow.reduce_max",
"tensorflow.range",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.random_uniform"
],
[
"tensorflow.concat",
"tensorflow.gfile.DeleteRecursively",
"tensorflow.control_dependencies",
"tensorflow.gfile.Exists",
"tensorflow.gfile.MkDir",
"tensorflow.global_variables",
"tensorflow.cast",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.train.AdamOptimizer",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.get_default_graph",
"tensorflow.summary.scalar",
"tensorflow.get_collection",
"tensorflow.summary.image",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.train.exponential_decay",
"tensorflow.ConfigProto",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.trainable_variables",
"tensorflow.app.run",
"tensorflow.scalar_mul",
"numpy.isnan",
"tensorflow.placeholder",
"tensorflow.compat.as_bytes",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.no_op",
"tensorflow.contrib.slim.get_trainable_variables",
"tensorflow.train.latest_checkpoint",
"tensorflow.reduce_mean",
"tensorflow.expand_dims",
"tensorflow.constant_initializer",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.get_variable_scope"
],
[
"numpy.finfo"
],
[
"tensorflow.train.Saver",
"tensorflow.reset_default_graph",
"tensorflow.placeholder",
"tensorflow.Session"
],
[
"torch.Tensor"
],
[
"torch.npu.is_available",
"torch.npu.set_device",
"torch.load",
"torch.nn.BCELoss",
"torch.nn.functional.sigmoid",
"torch.nn.parallel.DistributedDataParallel",
"torch.save"
],
[
"torch.zeros_like",
"torch.max"
],
[
"matplotlib.pyplot.imshow",
"numpy.asarray",
"numpy.all",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
],
[
"numpy.array"
],
[
"torch.distributed.broadcast",
"torch.optim.lr_scheduler.LambdaLR",
"torch.distributed.init_process_group",
"numpy.random.seed",
"numpy.ones",
"numpy.concatenate",
"numpy.loadtxt",
"numpy.interp",
"torch.nn.functional.interpolate",
"torch.distributed.destroy_process_group",
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"numpy.array",
"numpy.zeros",
"torch.nn.parallel.DistributedDataParallel"
],
[
"numpy.array"
],
[
"numpy.rollaxis",
"tensorflow.device",
"numpy.expand_dims",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.minimum",
"tensorflow.image.decode_and_crop_jpeg",
"tensorflow.image.sample_distorted_bounding_box",
"tensorflow.image.random_flip_left_right",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.image.extract_jpeg_shape",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflow.image.resize",
"tensorflow.image.convert_image_dtype"
],
[
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
],
[
"tensorflow.reverse",
"tensorflow.math.reduce_min",
"tensorflow.constant",
"tensorflow.math.reduce_max",
"tensorflow.cast",
"tensorflow.math.greater",
"tensorflow.one_hot",
"tensorflow.pad",
"tensorflow.where",
"tensorflow.random_uniform"
],
[
"torch.isfinite",
"numpy.mean",
"torch.cuda.Stream"
],
[
"torch.randn",
"torch.onnx.export",
"torch.load"
],
[
"torch.nn.CrossEntropyLoss",
"torch.npu.set_device",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.load",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.save"
],
[
"numpy.split",
"numpy.minimum",
"tensorflow.gfile.GFile",
"numpy.squeeze",
"numpy.concatenate",
"numpy.any",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.ConfigProto",
"tensorflow.reset_default_graph",
"numpy.argpartition",
"tensorflow.Session",
"tensorflow.gfile.Remove",
"numpy.zeros",
"tensorflow.image.grayscale_to_rgb",
"tensorflow.image.decode_jpeg",
"tensorflow.shape",
"tensorflow.image.resize_images",
"tensorflow.compat.as_bytes",
"tensorflow.logging.info",
"numpy.transpose",
"numpy.argsort",
"numpy.array",
"numpy.maximum",
"tensorflow.gfile.Copy",
"numpy.tile",
"numpy.ones",
"tensorflow.GraphDef",
"tensorflow.gfile.FastGFile"
],
[
"tensorflow.compat.v1.variable_scope",
"tensorflow.squeeze",
"tensorflow.reduce_mean"
],
[
"torch.rand",
"torch.cuda.is_available"
],
[
"tensorflow.gfile.GFile",
"numpy.concatenate",
"tensorflow.image.central_crop",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.squeeze",
"tensorflow.subtract",
"tensorflow.ConfigProto",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.nn.in_top_k",
"tensorflow.argmax",
"tensorflow.image.grayscale_to_rgb",
"numpy.zeros",
"tensorflow.image.decode_jpeg",
"tensorflow.image.resize_bilinear",
"tensorflow.shape",
"tensorflow.compat.as_bytes",
"numpy.array",
"tensorflow.multiply",
"tensorflow.expand_dims",
"tensorflow.image.convert_image_dtype",
"tensorflow.GraphDef",
"tensorflow.gfile.FastGFile"
],
[
"tensorflow.train.polynomial_decay",
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.greater",
"tensorflow.zeros_initializer",
"tensorflow.cast",
"tensorflow.gradients",
"tensorflow.identity",
"tensorflow.train.get_or_create_global_step",
"tensorflow.contrib.tpu.CrossShardOptimizer",
"tensorflow.clip_by_global_norm",
"tensorflow.square",
"tensorflow.trainable_variables",
"tensorflow.sqrt",
"tensorflow.group"
],
[
"numpy.array",
"numpy.unique"
],
[
"tensorflow.FixedLenFeature",
"tensorflow.zeros"
],
[
"numpy.random.beta",
"torch.nn.functional.log_softmax",
"torch.randperm",
"torch.nn.functional.cross_entropy",
"torch.no_grad"
],
[
"torch.manual_seed",
"torch.__version__.split",
"numpy.random.seed"
],
[
"numpy.ascontiguousarray",
"numpy.array",
"numpy.expand_dims"
],
[
"numpy.prod",
"torch.flatten"
],
[
"numpy.zeros"
],
[
"tensorflow.concat",
"tensorflow.contrib.slim.dropout",
"tensorflow.contrib.slim.arg_scope",
"tensorflow.contrib.slim.max_pool2d",
"tensorflow.reshape",
"tensorflow.contrib.slim.l2_regularizer",
"tensorflow.test.main",
"tensorflow.contrib.slim.softmax",
"tensorflow.squeeze",
"tensorflow.name_scope",
"tensorflow.contrib.slim.conv2d",
"tensorflow.variable_scope",
"tensorflow.contrib.slim.xavier_initializer",
"tensorflow.random_uniform",
"tensorflow.contrib.slim.avg_pool2d"
],
[
"torch.nn.ZeroPad2d",
"torch.nn.functional.dropout",
"torch.cat",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.functional.cross_entropy",
"torch.nonzero",
"torch.arange"
],
[
"torch.npu.set_device",
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"torch.multiprocessing.spawn",
"torch.utils.data.distributed.DistributedSampler",
"torch.load",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.utils.data.DataLoader",
"numpy.cos",
"torch.nn.Linear",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.device_count",
"torch.autograd.profiler.profile",
"torch.nn.parallel.DistributedDataParallel",
"torch.save"
],
[
"tensorflow.distribute.InputContext",
"torch.distributed.is_initialized",
"torch.utils.data.get_worker_info",
"torch.distributed.is_available",
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"tensorflow.data.Options",
"tensorflow.config.set_visible_devices"
],
[
"tensorflow.fill",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Dense",
"tensorflow.cast",
"tensorflow.keras.Model",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.size",
"tensorflow.keras.layers.Input"
],
[
"torch.max",
"torch.zeros_like",
"torch.pow",
"torch.zeros"
],
[
"numpy.reshape",
"numpy.argmax",
"numpy.sum",
"numpy.isin"
],
[
"torch.nn.functional.softmax",
"torch.transpose",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.sum",
"torch.from_numpy",
"torch.nn.Linear",
"numpy.ceil",
"numpy.floor",
"torch.nonzero",
"torch.nn.ConvTranspose1d",
"torch.nn.ReLU"
],
[
"torch.nn.utils.clip_grad_norm_",
"torch.nn.utils.clip_grad_value_"
],
[
"torch.cuda.default_stream",
"torch.cuda.Stream"
],
[
"tensorflow.Graph",
"tensorflow.random.uniform",
"tensorflow.test.main",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"numpy.random.uniform",
"tensorflow.argmax",
"tensorflow.compat.v1.reset_default_graph"
],
[
"tensorflow.variance_scaling_initializer"
],
[
"torch.multiprocessing.set_start_method",
"torch.npu.set_device",
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.distributed.is_initialized",
"torch._utils._flatten_dense_tensors",
"torch.npu.device_count",
"torch.multiprocessing.get_start_method",
"torch.distributed.is_available",
"torch._utils._unflatten_dense_tensors",
"torch.distributed.get_rank",
"torch.cuda.device_count",
"torch.distributed.all_reduce",
"torch.distributed.get_world_size",
"torch._utils._take_tensors"
],
[
"numpy.random.get_state",
"numpy.random.seed",
"numpy.random.shuffle",
"numpy.random.set_state",
"torch.IntTensor"
],
[
"torch.cat",
"torch.sqrt",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.functional.relu",
"torch.where",
"torch.stack"
],
[
"torch.tensor"
],
[
"torch.nn.functional.relu",
"torch.eye",
"torch.cat"
],
[
"torch.nn.functional.interpolate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.10",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anglixjtu/MeshCNN_ | [
"83826e66d8989ed4967047c2ed6d099568c5781c",
"83826e66d8989ed4967047c2ed6d099568c5781c"
] | [
"src/util/losses.py",
"src/util/visualization.py"
] | [
"import torch\nimport torch.nn as nn\n\n\nclass ChamferLoss(nn.Module):\n\n def __init__(self):\n super(ChamferLoss, self).__init__()\n self.use_cuda = torch.cuda.is_available()\n\n def forward(self, preds, gts, reverse=True, bidirectional=True):\n def compute_loss(preds, gts):\n P = self.batch_pairwise_dist(gts, preds)\n mins, _ = torch.min(P, 1)\n loss_1 = torch.sum(mins)\n mins, _ = torch.min(P, 2)\n loss_2 = torch.sum(mins)\n return loss_1 + loss_2\n\n if bidirectional or reverse:\n backward_loss = compute_loss(gts, preds)\n if reverse:\n return backward_loss\n else:\n forward_loss = compute_loss(preds, gts)\n return forward_loss + backward_loss\n else:\n forward_loss = compute_loss(preds, gts)\n return forward_loss\n\n def batch_pairwise_dist(self, x, y):\n bs, num_points_x, points_dim = x.size()\n _, num_points_y, _ = y.size()\n xx = torch.bmm(x, x.transpose(2, 1))\n yy = torch.bmm(y, y.transpose(2, 1))\n zz = torch.bmm(x, y.transpose(2, 1))\n if self.use_cuda:\n dtype = torch.cuda.LongTensor\n else:\n dtype = torch.LongTensor\n diag_ind_x = torch.arange(0, num_points_x).type(dtype)\n diag_ind_y = torch.arange(0, num_points_y).type(dtype)\n rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(\n zz.transpose(2, 1))\n ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz)\n P = rx.transpose(2, 1) + ry - 2 * zz\n return P",
"import numpy as np\nfrom faiss import IndexFlatIP, IndexFlatL2\nimport pyvista as pv\nimport os\nimport time\nfrom torch_geometric.nn import global_mean_pool, global_add_pool, global_max_pool, global_sort_pool\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\nfrom .util import get_labels_from_path\n\n\ndef add_subplot(plotter, coord_y, coord_x,\n mesh, font_size, label=None,\n dissm=None, filename=None,\n show_edges=True):\n plotter.subplot(coord_y, coord_x)\n\n text = ''\n if label is not None:\n text += label + '\\n'\n if dissm is not None:\n text += \"distance: %.3f \\n\" % (dissm)\n if filename is not None:\n text += filename\n if label or dissm or filename:\n plotter.add_text(text, font_size=font_size, color='black')\n plotter.set_background('white')\n plotter.add_mesh(mesh, color=\"tan\", show_edges=show_edges)\n\n\ndef visualize_retrieval(paths_q, paths_retr, dissm=None, show_self=False,\n sub_size=(220, 150), font_size=10, out_path=None,\n camera_pos=[4, 4, 4]):\n num_query = len(paths_q)\n if show_self:\n start_ri = 0\n else:\n start_ri = 1\n num_retr = len(paths_retr[0][start_ri:])\n num_subplot = (num_query, num_retr+1)\n fig_size = ((num_retr+1)*sub_size[1], num_query*sub_size[0])\n\n p = pv.Plotter(shape=num_subplot,\n window_size=fig_size, border_color='gray')\n for qi, path_q in enumerate(paths_q):\n mesh_q = pv.read(path_q)\n _, filename = os.path.split(path_q)\n label = get_labels_from_path(path_q)\n label = 'Query - ' + label\n add_subplot(p, qi, 0, mesh_q, font_size,\n label=label, filename=filename)\n p.set_position(camera_pos)\n\n for ri, path_r in enumerate(paths_retr[qi][start_ri:]):\n mesh_r = pv.read(path_r)\n _, filename = os.path.split(path_r)\n label = get_labels_from_path(path_r)\n dissm_r = dissm[qi, ri+start_ri]\n add_subplot(p, qi, ri+1, mesh_r,\n font_size, dissm=dissm_r,\n label=label, filename=filename)\n p.set_position(camera_pos)\n p.show(screenshot=out_path)\n\n\ndef show_embedding(self, features, idx_list):\n label_list = self.get_labels_from_index(idx_list)\n writer = SummaryWriter('runs/embedding')\n writer.add_embedding(features,\n metadata=label_list)\n writer.close()\n"
] | [
[
"torch.min",
"torch.sum",
"torch.cuda.is_available",
"torch.arange"
],
[
"torch.utils.tensorboard.SummaryWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
morkovka1337/openvino_training_extensions | [
"846db45c264d6b061505213f51763520b9432ba9",
"846db45c264d6b061505213f51763520b9432ba9",
"846db45c264d6b061505213f51763520b9432ba9",
"846db45c264d6b061505213f51763520b9432ba9",
"846db45c264d6b061505213f51763520b9432ba9",
"846db45c264d6b061505213f51763520b9432ba9",
"846db45c264d6b061505213f51763520b9432ba9",
"846db45c264d6b061505213f51763520b9432ba9",
"846db45c264d6b061505213f51763520b9432ba9",
"846db45c264d6b061505213f51763520b9432ba9",
"846db45c264d6b061505213f51763520b9432ba9"
] | [
"pytorch_toolkit/nncf/examples/object_detection/layers/modules/multibox_loss.py",
"tensorflow_toolkit/text_recognition/text_recognition/model.py",
"pytorch_toolkit/nncf/tests/modules/test_rnn.py",
"tensorflow_toolkit/ssd_detector/vlp/config.py",
"pytorch_toolkit/nncf/nncf/nncf_network.py",
"tensorflow_toolkit/text_recognition/tools/test.py",
"tensorflow_toolkit/vehicle_attributes/cars_100/config.py",
"pytorch_toolkit/nncf/tests/test_models/pnasnet.py",
"pytorch_toolkit/nncf/examples/semantic_segmentation/test.py",
"tensorflow_toolkit/image_retrieval/image_retrieval/common.py",
"tensorflow_toolkit/action_detection/action_detection/postprocessing/metrics.py"
] | [
"\"\"\"\n Copyright (c) 2019 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..box_utils import match, log_sum_exp\n\n\nclass MultiBoxLoss(nn.Module):\n \"\"\"SSD Weighted Loss Function\n Compute Targets:\n 1) Produce Confidence Target Indices by matching ground truth boxes\n with (default) 'priorboxes' that have jaccard index > threshold parameter\n (default threshold: 0.5).\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3) Hard negative mining to filter the excessive number of negative examples\n that comes with using a large number of default bounding boxes.\n (default negative:positive ratio 3:1)\n Objective Loss:\n L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n weighted by α which is set to 1 by cross val.\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n \"\"\"\n\n def __init__(self, cfg, num_classes, overlap_thresh, prior_for_matching,\n bkg_label, neg_mining, neg_pos, neg_overlap, encode_target, device=None):\n super(MultiBoxLoss, self).__init__()\n self.device = device\n self.num_classes = num_classes\n self.threshold = overlap_thresh\n self.background_label = bkg_label\n self.encode_target = encode_target\n self.use_prior_for_matching = prior_for_matching\n self.do_neg_mining = neg_mining\n self.negpos_ratio = neg_pos\n self.neg_overlap = neg_overlap\n\n def forward(self, predictions, targets):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n and prior boxes from SSD net.\n conf shape: torch.size(batch_size,num_priors,num_classes)\n loc shape: torch.size(batch_size,num_priors,4)\n priors shape: torch.size(num_priors,4)\n\n ground_truth (tensor): Ground truth boxes and labels for a batch,\n shape: [batch_size,num_objs,5] (last idx is the label).\n \"\"\"\n loc_data, conf_data, priors = predictions\n batch = loc_data.size(0)\n num_priors = loc_data.size(1)\n\n # match priors (default boxes) and ground truth boxes\n loc_t = torch.Tensor(batch, num_priors, 4).to(self.device)\n conf_t = torch.LongTensor(batch, num_priors).to(self.device)\n for idx in range(batch):\n truths = targets[idx][:, :-1].data\n labels = targets[idx][:, -1].data\n defaults = priors.data\n match(self.threshold, truths, defaults[0], labels, loc_t, conf_t, idx)\n pos = conf_t > 0\n num_pos = pos.sum(dim=1, keepdim=True)\n\n # Localization Loss (Smooth L1)\n # Shape: [batch,num_priors,4]\n pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)\n loc_p = loc_data[pos_idx].view(-1, 4)\n loc_t = loc_t[pos_idx].view(-1, 4)\n loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')\n\n # Compute max conf across batch for hard negative mining\n batch_conf = conf_data.view(-1, self.num_classes)\n\n loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))\n\n # Hard Negative Mining\n loss_c = loss_c.view(batch, -1)\n loss_c[pos] = 0 # filter out pos boxes for now\n _, loss_idx = loss_c.sort(1, descending=True)\n _, idx_rank = loss_idx.sort(1)\n num_pos = pos.long().sum(1, keepdim=True)\n num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1)\n neg = idx_rank < num_neg.expand_as(idx_rank)\n\n # Confidence Loss Including Positive and Negative Examples\n pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)\n targets_weighted = conf_t[(pos + neg).gt(0)]\n loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')\n\n # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n\n N = num_pos.data.sum().to(torch.float)\n loss_l /= N\n loss_c /= N\n return loss_l, loss_c\n",
"# Copyright (C) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\n\"\"\" This module contains architecture of Text Recognition model.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nimport tensorflow.contrib.slim as slim\n\n\nclass TextRecognition:\n \"\"\" Text recognition model definition. \"\"\"\n\n def __init__(self, is_training, num_classes, backbone_dropout=0.0):\n self.is_training = is_training\n self.lstm_dim = 256\n self.num_classes = num_classes\n self.backbone_dropout = backbone_dropout\n\n def __call__(self, inputdata):\n with tf.variable_scope('shadow'):\n features = self.feature_extractor(inputdata=inputdata)\n logits = self.encoder_decoder(inputdata=tf.squeeze(features, axis=1))\n\n return logits\n\n # pylint: disable=too-many-locals\n def feature_extractor(self, inputdata):\n \"\"\" Extracts features from input text image. \"\"\"\n\n with slim.arg_scope([slim.conv2d], padding='SAME',\n weights_initializer=tf.contrib.layers.variance_scaling_initializer(),\n weights_regularizer=slim.l2_regularizer(0.00025),\n biases_initializer=None, activation_fn=None):\n with slim.arg_scope([slim.batch_norm], updates_collections=None):\n bn0 = slim.batch_norm(inputdata, 0.9, scale=True, is_training=self.is_training,\n activation_fn=None)\n\n dropout1 = slim.dropout(bn0, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv1 = slim.conv2d(dropout1, num_outputs=64, kernel_size=3)\n bn1 = slim.batch_norm(conv1, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n pool1 = slim.max_pool2d(bn1, kernel_size=2, stride=2)\n\n dropout2 = slim.dropout(pool1, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv2 = slim.conv2d(dropout2, num_outputs=128, kernel_size=3)\n bn2 = slim.batch_norm(conv2, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n pool2 = slim.max_pool2d(bn2, kernel_size=2, stride=2)\n\n dropout3 = slim.dropout(pool2, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv3 = slim.conv2d(dropout3, num_outputs=256, kernel_size=3)\n bn3 = slim.batch_norm(conv3, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n\n dropout4 = slim.dropout(bn3, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv4 = slim.conv2d(dropout4, num_outputs=256, kernel_size=3)\n bn4 = slim.batch_norm(conv4, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n pool4 = slim.max_pool2d(bn4, kernel_size=[2, 1], stride=[2, 1])\n\n dropout5 = slim.dropout(pool4, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv5 = slim.conv2d(dropout5, num_outputs=512, kernel_size=3)\n bn5 = slim.batch_norm(conv5, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n\n dropout6 = slim.dropout(bn5, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv6 = slim.conv2d(dropout6, num_outputs=512, kernel_size=3)\n bn6 = slim.batch_norm(conv6, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n pool6 = slim.max_pool2d(bn6, kernel_size=[2, 1], stride=[2, 1])\n\n dropout7 = slim.dropout(pool6, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv7 = slim.conv2d(dropout7, num_outputs=512, kernel_size=2, stride=[2, 1])\n bn7 = slim.batch_norm(conv7, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n\n return bn7\n\n def encoder_decoder(self, inputdata):\n \"\"\" LSTM-based encoder-decoder module. \"\"\"\n\n with tf.variable_scope('LSTMLayers'):\n [batch_size, width, _] = inputdata.get_shape().as_list()\n\n with tf.variable_scope('encoder'):\n forward_cells = []\n backward_cells = []\n\n for _ in range(2):\n forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))\n backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))\n\n encoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(\n forward_cells, backward_cells, inputdata, dtype=tf.float32)\n\n with tf.variable_scope('decoder'):\n forward_cells = []\n backward_cells = []\n\n for _ in range(2):\n forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))\n backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))\n\n decoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(\n forward_cells, backward_cells, encoder_layer, dtype=tf.float32)\n\n rnn_reshaped = tf.reshape(decoder_layer, [batch_size * width, -1])\n\n logits = slim.fully_connected(rnn_reshaped, self.num_classes, activation_fn=None)\n logits = tf.reshape(logits, [batch_size, width, self.num_classes])\n rnn_out = tf.transpose(logits, (1, 0, 2))\n\n return rnn_out\n",
"\"\"\"\n Copyright (c) 2019-2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport logging\nimport sys\nfrom collections import namedtuple\nfrom typing import List, Tuple\n\nimport copy\nimport onnx\nimport os\nimport pytest\nimport torch\nimport torch.nn.functional as F\nfrom functools import partial\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import PackedSequence\n\nfrom nncf.dynamic_graph.context import TracingContext\nfrom nncf.dynamic_graph.transform_graph import replace_modules\nfrom nncf.model_creation import create_compressed_model\nfrom nncf.layers import LSTMCellNNCF, NNCF_RNN, ITERATION_MODULES\nfrom tests.modules.seq2seq.gnmt import GNMT\nfrom tests.test_helpers import get_empty_config, get_grads, create_compressed_model_and_algo_for_test\n\n\ndef replace_lstm(model):\n def replace_fn(module_):\n if not isinstance(module_, nn.LSTM):\n return module_\n device = next(module_.parameters()).device\n custom_lstm = NNCF_RNN('LSTM', input_size=module_.input_size, hidden_size=module_.hidden_size,\n num_layers=module_.num_layers, bidirectional=module_.bidirectional,\n batch_first=module_.batch_first, dropout=module_.dropout,\n bias=module_.bias)\n\n def get_param_names(bias):\n # type: (bool) -> List[str]\n suffixes = ['ih', 'hh']\n names = ['weight_' + suffix for suffix in suffixes]\n if bias:\n names += ['bias_' + suffix for suffix in suffixes]\n return names\n\n for l in range(custom_lstm.num_layers):\n for d in range(custom_lstm.num_directions):\n for name in get_param_names(custom_lstm.bias):\n suffix = '_reverse' if d == 1 else ''\n param_name = name + '_l{}{}'.format(l, suffix)\n param = getattr(module_, param_name)\n getattr(custom_lstm, param_name).data.copy_(param.data)\n custom_lstm.to(device)\n return custom_lstm\n\n if isinstance(model, nn.LSTM):\n return replace_fn(model)\n affected_scopes = []\n return replace_modules(model, replace_fn, affected_scopes)[0]\n\ndef clone_test_data(data_list):\n # type: (LSTMTestData) -> List[torch.Tensor]\n results = []\n x = data_list[0]\n result = x if isinstance(x, PackedSequence) else x.clone()\n results.append(result)\n for tensor_list in data_list[1:]:\n result = ()\n for tensor in tensor_list:\n if isinstance(tensor, Variable):\n sub_result = tensor.data.clone()\n sub_result = Variable(sub_result, requires_grad=True)\n else:\n sub_result = tensor.clone()\n result += (sub_result,)\n results.append(result)\n return results\n\n\nLSTMTestSizes = namedtuple('LSTMTestSizes', ['input_size', 'hidden_size', 'batch', 'seq_length'])\nLSTMTestData = namedtuple('LSTMTestData', ['x', 'h0', 'c0', 'weight_ih', 'weight_hh', 'bias_ih', 'bias_hh'])\n\n\[email protected]('sizes',\n [LSTMTestSizes(512, 768, 128, 50),\n LSTMTestSizes(3, 3, 3, 3),\n LSTMTestSizes(1, 1, 1, 1)], ids=lambda val: '[{}]'.format('-'.join([str(v) for v in val])))\nclass TestLSTMCell:\n @staticmethod\n def generate_lstm_data(p, num_layers=1, num_directions=1, variable_length=False, sorted_=True, batch_first=True,\n is_cuda=False, bias=True, empty_initial=False, is_backward=False):\n # type: (LSTMTestSizes, int, int, bool, bool, bool, bool, bool, bool, bool) -> LSTMTestData\n num_chunks = 4\n seq_list = []\n if variable_length:\n seq_lens = torch.IntTensor(p.batch).random_(1, p.seq_length + 1)\n if sorted_:\n seq_lens = torch.sort(seq_lens, descending=True).values\n for seq_size in seq_lens:\n seq_list.append(torch.randn(seq_size.item(), p.input_size))\n padded_seq_batch = torch.nn.utils.rnn.pad_sequence(seq_list, batch_first=batch_first)\n x_data = torch.nn.utils.rnn.pack_padded_sequence(padded_seq_batch, lengths=seq_lens,\n batch_first=batch_first, enforce_sorted=sorted_)\n\n else:\n size = (p.seq_length, p.batch, p.input_size)\n if batch_first:\n size = (p.batch, p.seq_length, p.input_size)\n x_data = torch.randn(*size)\n\n def wrap_tensor(tensor):\n wrapped = tensor\n if is_cuda:\n wrapped = wrapped.cuda()\n if is_backward:\n wrapped = Variable(wrapped, requires_grad=True)\n return wrapped\n\n if is_cuda:\n x_data = x_data.cuda()\n h0, c0, wih, whh, bih, bhh = ([] for _ in range(6))\n for layer_ in range(num_layers):\n for _ in range(num_directions):\n layer_input_size = p.input_size if layer_ == 0 else p.hidden_size * num_directions\n if not empty_initial:\n h0.append(wrap_tensor(torch.randn(p.batch, p.hidden_size)))\n c0.append(wrap_tensor(torch.randn(p.batch, p.hidden_size)))\n wih.append(wrap_tensor(torch.rand(num_chunks * p.hidden_size, layer_input_size)))\n whh.append(wrap_tensor(torch.rand(num_chunks * p.hidden_size, p.hidden_size)))\n if bias:\n bih.append(wrap_tensor(torch.rand(num_chunks * p.hidden_size)))\n bhh.append(wrap_tensor(torch.rand(num_chunks * p.hidden_size)))\n result = LSTMTestData(x_data, h0, c0, wih, whh, bih, bhh)\n return result\n\n @staticmethod\n def set_weights(cell, data):\n # type: (nn.LSTMCell, LSTMTestData) -> None\n for name in TestLSTM.get_param_names(bias=True):\n param = getattr(data, name)\n if param:\n getattr(cell, name).data.copy_(param[0].data)\n\n def test_forward_lstm_cell(self, sizes, _seed):\n p = sizes\n ref_data = TestLSTMCell.generate_lstm_data(p, batch_first=False)\n test_data = LSTMTestData(*clone_test_data(ref_data))\n\n ref_rnn = nn.LSTMCell(p.input_size, p.hidden_size)\n TestLSTMCell.set_weights(ref_rnn, ref_data)\n test_rnn = LSTMCellNNCF(p.input_size, p.hidden_size)\n TestLSTMCell.set_weights(test_rnn, test_data)\n\n for i in range(p.seq_length):\n ref_result = ref_rnn(ref_data.x[i], (ref_data.h0[0], ref_data.c0[0]))\n test_result = test_rnn(test_data.x[i], (test_data.h0[0], test_data.c0[0]))\n for (ref, test) in list(zip(ref_result, test_result)):\n torch.testing.assert_allclose(test, ref)\n\n def test_backward_lstm_cell(self, sizes, _seed):\n p = sizes\n ref_data = TestLSTMCell.generate_lstm_data(p, batch_first=False, is_backward=True)\n with torch.no_grad():\n test_data = LSTMTestData(*clone_test_data(ref_data))\n\n ref_rnn = nn.LSTMCell(p.input_size, p.hidden_size)\n TestLSTMCell.set_weights(ref_rnn, ref_data)\n test_rnn = LSTMCellNNCF(p.input_size, p.hidden_size)\n TestLSTMCell.set_weights(test_rnn, test_data)\n\n for i in range(p.seq_length):\n ref_result = ref_rnn(ref_data.x[i], (ref_data.h0[0], ref_data.c0[0]))\n test_result = test_rnn(test_data.x[i], (test_data.h0[0], test_data.c0[0]))\n ref_result[0].sum().backward()\n test_result[0].sum().backward()\n ref_grads = get_grads([ref_data.h0[0], ref_data.c0[0]])\n ref_grads += get_grads([ref_rnn.weight_ih, ref_rnn.weight_hh, ref_rnn.bias_ih, ref_rnn.bias_hh])\n test_grads = get_grads([ref_data.h0[0], ref_data.c0[0]])\n test_grads += get_grads([test_rnn.weight_ih, test_rnn.weight_hh, test_rnn.bias_ih, test_rnn.bias_hh])\n for (ref, test) in list(zip(test_grads, ref_grads)):\n torch.testing.assert_allclose(test, ref)\n\n\ndef test_export_lstm_cell(tmp_path):\n config = get_empty_config(model_size=1, input_sample_size=(1, 1))\n config['compression'] = {'algorithm': 'quantization'}\n\n model, algo = create_compressed_model_and_algo_for_test(LSTMCellNNCF(1, 1), config)\n\n test_path = str(tmp_path.joinpath('test.onnx'))\n algo.export_model(test_path)\n assert os.path.exists(test_path)\n\n onnx_num = 0\n model = onnx.load(test_path)\n # pylint: disable=no-member\n for node in model.graph.node:\n if node.op_type == 'FakeQuantize':\n onnx_num += 1\n assert onnx_num == 12\n\n\[email protected]('sizes',\n [LSTMTestSizes(512, 324, 128, 50),\n LSTMTestSizes(3, 3, 3, 3),\n LSTMTestSizes(1, 1, 1, 1)], ids=lambda val: '[{}]'.format('-'.join([str(v) for v in val])))\[email protected]('bidirectional', (True, False), ids=('bi', 'uni'))\[email protected](\"bias\", [True, False], ids=['bias', 'no_bias'])\[email protected]('num_layers', [1, 2], ids=['single_layer', 'stacked'])\[email protected]('batch_first', [True, False], ids=['batch_first', 'seq_first'])\[email protected](('variable_length', 'sorted_'),\n ([True, True],\n [True, False],\n [False, False]), ids=['packed_sorted', 'packed_unsorted', 'not_packed'])\[email protected]('is_cuda', [True, False], ids=['cuda', 'cpu'])\[email protected]('empty_initial', [True, False], ids=['no_initial', 'with_initial'])\n# TODO: dropout gives different result. Looks like different random seed on CPU\n# @pytest.mark.parametrize('dropout', [0, 0.9], ids=['no_dropout', 'with_dropout'])\[email protected]('dropout', [0], ids=['no_dropout'])\nclass TestLSTM:\n def test_forward_lstm(self, sizes, bidirectional, num_layers, bias, batch_first, variable_length, sorted_, is_cuda,\n empty_initial, dropout, _seed):\n num_directions = 2 if bidirectional else 1\n p = sizes\n\n ref_data = TestLSTMCell.generate_lstm_data(p, num_layers, num_directions, variable_length, sorted_, batch_first,\n is_cuda, bias, empty_initial)\n\n ref_rnn = nn.LSTM(input_size=p.input_size, hidden_size=p.hidden_size, num_layers=num_layers,\n bidirectional=bidirectional, batch_first=batch_first, bias=bias, dropout=dropout)\n self.set_ref_lstm_weights(ref_data, ref_rnn, num_layers, num_directions, bias)\n ref_hidden = None if empty_initial else self.get_ref_lstm_hidden(ref_data)\n\n test_data = LSTMTestData(*clone_test_data(ref_data))\n\n class ModelWrapper(nn.Module):\n def __init__(self, lstm):\n super().__init__()\n self.lstm = lstm\n\n def forward(self, *input_):\n return self.lstm(*input_)\n\n wrapped_ref_rnn = ModelWrapper(ref_rnn)\n wrapped_test_rnn = replace_lstm(copy.deepcopy(wrapped_ref_rnn))\n test_rnn = wrapped_test_rnn.lstm\n test_hidden = None if empty_initial else self.get_test_lstm_hidden(test_data)\n\n if is_cuda:\n ref_rnn.cuda()\n test_rnn.cuda()\n ref_output, (ref_hn, ref_cn) = ref_rnn(ref_data.x, ref_hidden)\n test_output, (test_hn, test_cn) = test_rnn(test_data.x, test_hidden)\n\n torch.testing.assert_allclose(test_hn[0], ref_hn[0], rtol=1e-3, atol=1e-4)\n torch.testing.assert_allclose(test_cn[0], ref_cn[0], rtol=1e-3, atol=1e-4)\n if variable_length:\n torch.testing.assert_allclose(test_output.batch_sizes, ref_output.batch_sizes)\n torch.testing.assert_allclose(test_output.data, ref_output.data, rtol=1e-2, atol=1e-3)\n if not sorted_:\n torch.testing.assert_allclose(test_output.sorted_indices, ref_output.sorted_indices)\n torch.testing.assert_allclose(test_output.unsorted_indices, ref_output.unsorted_indices)\n else:\n torch.testing.assert_allclose(test_output, ref_output, rtol=1e-2, atol=1e-3)\n\n def test_backward_lstm(self, sizes, bidirectional, num_layers, bias, batch_first, variable_length, sorted_, is_cuda,\n empty_initial, dropout, _seed):\n\n num_directions = 2 if bidirectional else 1\n\n p = sizes\n\n ref_data = TestLSTMCell.generate_lstm_data(p, num_layers, num_directions, variable_length, sorted_, batch_first,\n is_cuda, bias, empty_initial, True)\n\n ref_rnn = nn.LSTM(input_size=p.input_size, hidden_size=p.hidden_size, num_layers=num_layers,\n bidirectional=bidirectional, batch_first=batch_first, bias=bias, dropout=dropout)\n self.set_ref_lstm_weights(ref_data, ref_rnn, num_layers, num_directions, bias)\n ref_hidden = None if empty_initial else self.get_ref_lstm_hidden(ref_data)\n\n test_data = LSTMTestData(*clone_test_data(ref_data))\n test_rnn = replace_lstm(copy.deepcopy(ref_rnn))\n test_hidden = None if empty_initial else self.get_test_lstm_hidden(test_data)\n\n if is_cuda:\n ref_rnn.cuda()\n test_rnn.cuda()\n\n ref_output, _ = ref_rnn(ref_data.x, ref_hidden)\n test_output, _ = test_rnn(test_data.x, test_hidden)\n\n ref_output[0].sum().backward()\n test_output[0].sum().backward()\n\n ref_grads = get_grads(self.flatten_nested_lists(ref_rnn.all_weights))\n test_grads = get_grads(self.flatten_nested_lists(test_rnn.all_weights))\n if not empty_initial:\n # TODO: compare gradient of all hidden\n ref_grads += get_grads([ref_data.h0[0], ref_data.c0[0]])\n test_grads += get_grads([test_hidden[0][0], test_hidden[1][0]])\n for (ref, test) in list(zip(test_grads, ref_grads)):\n torch.testing.assert_allclose(test, ref, rtol=1e-1, atol=1e-1)\n\n @classmethod\n def flatten_nested_lists(cls, nested_list):\n # type: (List) -> List[torch.Tensor]\n return [tensor for tensor_tuple in nested_list for tensor in tensor_tuple]\n\n @classmethod\n def get_test_lstm_hidden(cls, data):\n # type: (LSTMTestData) -> List[Tuple[torch.Tensor, ...]]\n result = []\n hidden_names = ['h0', 'c0']\n for name in hidden_names:\n hidden_list = getattr(data, name)\n element = ()\n num_hidden = len(hidden_list)\n for i in range(num_hidden):\n element += (hidden_list[i],)\n result.append(element)\n return result\n\n @classmethod\n def get_ref_lstm_hidden(cls, data):\n # type: (LSTMTestData) -> Tuple[torch.Tensor, torch.Tensor]\n hidden = cls.get_test_lstm_hidden(data)\n hidden_states = [torch.unsqueeze(tensor, dim=0) for tensor in hidden[0]]\n cell_states = [torch.unsqueeze(tensor, dim=0) for tensor in hidden[1]]\n return (\n torch.cat(hidden_states, dim=0),\n torch.cat(cell_states, dim=0)\n )\n\n @classmethod\n def set_ref_lstm_weights(cls, data, nn_lstm, num_layers, num_directions, bias):\n # type: (LSTMTestData, nn.LSTM, int, int, bool) -> None\n for l in range(num_layers):\n for d in range(num_directions):\n i = l * num_directions + d\n for name in cls.get_param_names(bias):\n suffix = '_reverse' if d == 1 else ''\n param = getattr(data, name)\n param_name = name + '_l{}{}'.format(l, suffix)\n getattr(nn_lstm, param_name).data.copy_(param[i].data)\n\n @classmethod\n def get_param_names(cls, bias):\n # type: (bool) -> List[str]\n suffixes = ['ih', 'hh']\n names = ['weight_' + suffix for suffix in suffixes]\n if bias:\n names += ['bias_' + suffix for suffix in suffixes]\n return names\n\n\ndef test_export_stacked_bi_lstm(tmp_path):\n p = LSTMTestSizes(3, 3, 3, 3)\n config = get_empty_config(input_sample_size=(1, p.hidden_size, p.input_size))\n config['compression'] = {'algorithm': 'quantization'}\n\n # TODO: batch_first=True fails with building graph: ambiguous call to mul or sigmoid\n test_rnn = NNCF_RNN('LSTM', input_size=p.input_size, hidden_size=p.hidden_size, num_layers=2, bidirectional=True,\n batch_first=False)\n model, algo = create_compressed_model_and_algo_for_test(test_rnn, config)\n\n test_path = str(tmp_path.joinpath('test.onnx'))\n algo.export_model(test_path)\n assert os.path.exists(test_path)\n\n onnx_num = 0\n model = onnx.load(test_path)\n # pylint: disable=no-member\n for node in model.graph.node:\n if node.op_type == 'FakeQuantize':\n onnx_num += 1\n assert onnx_num == 50\n\n\nclass TestNumberOfNodes:\n logging.basicConfig(level=logging.INFO, stream=sys.stdout)\n\n def test_number_of_calling_fq_for_lstm(self):\n p = LSTMTestSizes(1, 1, 1, 5)\n num_layers = 2\n bidirectional = True\n num_directions = 2 if bidirectional else 1\n bias = True\n batch_first = False\n config = get_empty_config(input_sample_size=(p.seq_length, p.batch, p.input_size))\n config['compression'] = {'algorithm': 'quantization', 'quantize_inputs': True}\n\n test_data = TestLSTMCell.generate_lstm_data(p, num_layers, num_directions, bias=bias, batch_first=batch_first)\n\n test_rnn = NNCF_RNN('LSTM', input_size=p.input_size, hidden_size=p.hidden_size, num_layers=num_layers,\n bidirectional=bidirectional, bias=bias, batch_first=batch_first)\n TestLSTM.set_ref_lstm_weights(test_data, test_rnn, num_layers, num_directions, bias)\n test_hidden = TestLSTM.get_test_lstm_hidden(test_data)\n\n model, algo = create_compressed_model_and_algo_for_test(test_rnn, config)\n\n class Counter:\n def __init__(self):\n self.count = 0\n\n def next(self):\n self.count += 1\n\n def hook(model, input_, counter):\n counter.next()\n\n counters = {}\n for name, quantizer in algo.all_quantizations.items():\n counter = Counter()\n counters[name] = counter\n quantizer.register_forward_pre_hook(partial(hook, counter=counter))\n _ = model(test_data.x, test_hidden)\n assert model.get_graph().get_nodes_count() == 107 # NB: may always fail in debug due to superfluous 'cat' nodes\n assert len(counters) == 50\n for counter in counters.values():\n assert counter.count == p.seq_length\n\n def test_number_of_calling_fq_for_gnmt(self):\n torch.cuda.set_device(0)\n device = torch.device('cuda')\n batch_first = False\n vocab_size = 32000\n model_config = {'hidden_size': 100,\n 'vocab_size': vocab_size,\n 'num_layers': 4,\n 'dropout': 0.2,\n 'batch_first': batch_first,\n 'share_embedding': True,\n }\n batch_size = 128\n sequence_size = 50\n input_sample_size = (batch_size, sequence_size) if batch_first else (sequence_size, batch_size)\n config = get_empty_config(input_sample_size=input_sample_size)\n config['compression'] = \\\n {'algorithm': 'quantization',\n 'quantize_inputs': True,\n 'quantizable_subgraph_patterns': [[\"linear\", \"__add__\"],\n [\"sigmoid\", \"__mul__\", \"__add__\"],\n [\"__add__\", \"tanh\", \"__mul__\"],\n [\"sigmoid\", \"__mul__\"]],\n 'disable_function_quantization_hooks': True}\n config['scopes_without_shape_matching'] = \\\n ['GNMT/ResidualRecurrentDecoder[decoder]/RecurrentAttention[att_rnn]/BahdanauAttention[attn]', ]\n\n model = GNMT(**model_config)\n model = replace_lstm(model)\n model.to(device)\n\n def dummy_forward_fn(model, seq_len=sequence_size):\n def gen_packed_sequence():\n seq_list = []\n seq_lens = torch.LongTensor(batch_size).random_(1, seq_len + 1)\n seq_lens = torch.sort(seq_lens, descending=True).values\n for seq_size in seq_lens:\n seq_list.append(torch.LongTensor(seq_size.item()).random_(1, vocab_size).to(device))\n padded_seq_batch = torch.nn.utils.rnn.pad_sequence(seq_list, batch_first=batch_first)\n return padded_seq_batch, seq_lens\n\n x_data, seq_lens = gen_packed_sequence()\n input_encoder = x_data\n input_enc_len = seq_lens.to(device)\n input_decoder = gen_packed_sequence()[0]\n model(input_encoder, input_enc_len, input_decoder)\n\n algo, model = create_compressed_model(model, config, dummy_forward_fn, dump_graphs=False)\n model.to(device)\n\n class Counter:\n def __init__(self):\n self.count = 0\n\n def next(self):\n self.count += 1\n\n def hook(model, input_, counter):\n counter.next()\n\n counters = {}\n for name, quantizer in algo.all_quantizations.items():\n counter = Counter()\n counters[str(name)] = counter\n quantizer.register_forward_pre_hook(partial(hook, counter=counter))\n dummy_forward_fn(model)\n assert model.get_graph().get_nodes_count() == 230 # NB: may always fail in debug due to superfluous 'cat' nodes\n assert len(counters) == 55\n for name, counter in counters.items():\n if 'cell' in name or \"LSTMCellForwardNNCF\" in name:\n assert counter.count == sequence_size, name\n else:\n assert counter.count == 1, name\n new_seq_len = int(sequence_size / 2)\n dummy_forward_fn(model, new_seq_len)\n assert model.get_graph().get_nodes_count() == 230 # NB: may always fail in debug due to superfluous 'cat' nodes\n assert len(counters) == 55\n for name, counter in counters.items():\n if 'cell' in name or \"LSTMCellForwardNNCF\" in name:\n assert counter.count == sequence_size + new_seq_len, name\n else:\n assert counter.count == 2, name\n\n def test_number_of_nodes_for_module_in_loop(self):\n num_iter = 5\n\n class LoopModule(nn.Module):\n @ITERATION_MODULES.register('Inner')\n class Inner(nn.Module):\n def __init__(self):\n super().__init__()\n self.operator1 = torch.sigmoid\n self.operator2 = torch.tanh\n\n def forward(self, x):\n s = self.operator1(x)\n t = self.operator2(x)\n result = t + s\n return result\n\n @staticmethod\n def nodes_number():\n return 3\n\n def __init__(self):\n super().__init__()\n self.inner = self.Inner()\n\n def forward(self, x):\n for _ in range(num_iter):\n x = self.inner(x)\n return x\n\n def nodes_number(self):\n return self.inner.nodes_number()\n\n test_module = LoopModule()\n context = TracingContext()\n with context as ctx:\n _ = test_module(torch.zeros(1))\n assert ctx.graph.get_nodes_count() == test_module.nodes_number()\n\n def test_number_of_nodes_for_module_in_loop__not_input_node(self):\n num_iter = 5\n\n class LoopModule(nn.Module):\n class Inner(nn.Module):\n def forward(self, x):\n s = F.sigmoid(x)\n t = F.tanh(x)\n result = F.sigmoid(x) * t + F.tanh(x) * s\n return result\n\n @staticmethod\n def nodes_number():\n return 7\n\n def __init__(self):\n super().__init__()\n self.inner = self.Inner()\n\n def forward(self, x):\n for _ in range(num_iter):\n x = self.inner(F.relu(x))\n return x\n\n def nodes_number(self):\n return self.inner.nodes_number() + num_iter\n\n test_module = LoopModule()\n context = TracingContext()\n with context as ctx:\n _ = test_module(torch.zeros(1))\n assert ctx.graph.get_nodes_count() == test_module.nodes_number()\n\n def test_number_of_nodes_for_module_with_nested_loops(self):\n num_iter = 5\n\n class TestIterModule(nn.Module):\n @ITERATION_MODULES.register()\n class TestIterModule_ResetPoint(nn.Module):\n def __init__(self, loop_module):\n super().__init__()\n self.loop_module = loop_module\n\n def forward(self, x):\n return self.loop_module(F.relu(x))\n\n def __init__(self):\n super().__init__()\n self.loop_module = self.LoopModule2()\n self.reset_point = self.TestIterModule_ResetPoint(self.loop_module)\n\n def forward(self, x):\n for _ in range(num_iter):\n x = self.reset_point(x)\n return x\n\n class LoopModule2(nn.Module):\n\n @ITERATION_MODULES.register()\n class LoopModule2_ResetPoint(nn.Module):\n def __init__(self, inner):\n super().__init__()\n self.inner = inner\n\n def forward(self, x):\n return self.inner(F.relu(x))\n\n def __init__(self):\n super().__init__()\n self.inner = self.Inner()\n self.reset_helper = self.LoopModule2_ResetPoint(self.inner)\n\n def forward(self, x):\n for _ in range(num_iter):\n self.reset_helper(x)\n return x\n\n class Inner(nn.Module):\n def forward(self, x):\n s = F.sigmoid(x)\n t = F.tanh(x)\n result = t + s\n return result\n\n test_module = TestIterModule()\n context = TracingContext()\n with context as ctx:\n _ = test_module(torch.zeros(1))\n assert ctx.graph.get_nodes_count() == num_iter\n\n def test_number_of_nodes_for_repeated_module(self):\n\n class LoopModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.operator = F.relu\n self.layers = nn.ModuleList([\n nn.Conv2d(1, 1, 1),\n nn.Conv2d(1, 1, 1)\n ])\n\n def forward(self, x):\n for layer in self.layers:\n x = F.relu(layer(x))\n return x\n\n test_module = LoopModule()\n context = TracingContext()\n with context as ctx:\n x = test_module(torch.zeros(1, 1, 1, 1))\n assert ctx.graph.get_nodes_count() == 4 # NB: may always fail in debug due to superfluous 'cat' nodes\n _ = test_module(x)\n assert ctx.graph.get_nodes_count() == 8 # NB: may always fail in debug due to superfluous 'cat' nodes\n",
"#!/usr/bin/env python3\n#\n# Copyright (C) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\n# pylint: disable=line-too-long\n\nimport os\nimport matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements\nfrom ssd_detector.readers.object_detector_json import ObjectDetectorJson\n\nCURRENT_DIR = os.path.dirname(os.path.realpath(__file__))\nROOT_DIR = os.path.normpath(os.path.join(CURRENT_DIR, \"../../..\"))\n\n# See more details about parameters in TensorFlow documentation tf.estimator\nclass train:\n annotation_path = os.path.join(ROOT_DIR, \"./data/vlp_test/annotations_train.json\") # Path to the annotation file\n cache_type = \"ENCODED\" # Type of data to save in memory, possible options: 'FULL', 'ENCODED', 'NONE'\n\n batch_size = 32 # Number of images in the batch\n steps = 65000 # Number of steps for which to train model\n max_steps = None # Number of total steps for which to train model\n save_checkpoints_steps = 1000 # Number of training steps when checkpoint should be saved\n keep_checkpoint_every_n_hours = 6 # Checkpoint should be saved forever after every n hours\n save_summary_steps = 100 # Number of steps when the summary information should be saved\n random_seed = 666 # Random seed\n\n fill_with_current_image_mean = True # Parameter of data transformer\n\n class execution:\n CUDA_VISIBLE_DEVICES = \"0\" # Environment variable to control CUDA device used for training\n per_process_gpu_memory_fraction = 0.8 # Fix extra memory allocation issue\n allow_growth = True # Option which attempts to allocate only as much GPU memory based on runtime allocations\n\n intra_op_parallelism_threads = 2\n inter_op_parallelism_threads = 8\n transformer_parallel_calls = 4 # Number of parallel threads in data transformer/augmentation\n transformer_prefetch_size = 8 # Number of batches to prefetch\n\n\nclass eval:\n annotation_path = {\n \"train\": os.path.join(ROOT_DIR, \"./data/vlp_test/annotations_train.json\"),\n \"test\": os.path.join(ROOT_DIR, \"./data/vlp_test/annotations_test.json\")\n } # Dictionary with paths to annotations and its short names which will be displayed in the TensorBoard\n datasets = [\"train\", \"test\"] # List of names from annotation_path dictionary on which evaluation will be launched\n vis_num = 2 # Select random images for visualization in the TensorBoard\n save_images_step = 2 # Save images every 2-th evaluation\n batch_size = 2 # Number of images in the batch\n\n class execution:\n CUDA_VISIBLE_DEVICES = \"0\" # Environment variable to control CUDA device used for evaluation\n per_process_gpu_memory_fraction = 0.5 # Fix extra memory allocation issue\n allow_growth = True # Option which attempts to allocate only as much GPU memory based on runtime allocations\n\n intra_op_parallelism_threads = 1\n inter_op_parallelism_threads = 1\n transformer_parallel_calls = 1 # Number of parallel threads in data transformer/augmentation\n transformer_prefetch_size = 1 # Number of batches to prefetch\n\n\nclass infer:\n out_subdir = \"predictions\" # Name of folder in model directory where output json files with detections will be saved\n batch_size = 32 # Number of images in the batch\n\n class execution:\n CUDA_VISIBLE_DEVICES = \"0\" # Environment variable to control cuda device used for training\n per_process_gpu_memory_fraction = 0.5 # Fix extra memory allocation issue\n allow_growth = True # Option which attempts to allocate only as much GPU memory based on runtime allocations\n\n intra_op_parallelism_threads = 2\n inter_op_parallelism_threads = 8\n transformer_parallel_calls = 4 # Number of parallel threads in data transformer/augmentation\n transformer_prefetch_size = 8 # Number of batches to prefetch\n\n\ninput_shape = (256, 256, 3) # Input shape of the model (width, height, channels)\nclasses = ObjectDetectorJson.get_classes_from_coco_annotation(os.path.join(CURRENT_DIR, train.annotation_path))\nMODEL_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'model') # Path to the folder where all training and evaluation artifacts will be located\nif not os.path.exists(MODEL_DIR):\n os.makedirs(MODEL_DIR)\n\n\ndef learning_rate_schedule(): # Function which controls learning rate during training\n import tensorflow as tf\n step = tf.train.get_or_create_global_step()\n lr = tf.case([(tf.less(step, 1000), lambda: tf.constant(0.0004)),\n (tf.less(step, 10000), lambda: tf.constant(0.01)),\n (tf.less(step, 40000), lambda: tf.constant(0.005)),\n (tf.less(step, 55000), lambda: tf.constant(0.0005)),\n (tf.less(step, 65000), lambda: tf.constant(0.00005))])\n return lr\n\n\ndef optimizer(learning_rate):\n import tensorflow as tf\n return tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.95)\n\n\ndetector_params = {\n \"num_classes\": len(classes), # Number of classes to detect\n \"priors_rule\": \"custom\", # Prior boxes rule for SSD, possible options: 'caffe', 'object_detection_api', 'custom'\n \"priors\": [\n [(0.068, 0.03), (0.052, 0.097)],\n [(0.18, 0.087), (0.11, 0.33), (0.43, 0.1)],\n [(0.26, 0.27), (0.34, 0.4), (0.2, 0.55)],\n [(0.37, 0.52)],\n [(0.48, 0.45)],\n [(0.63, 0.64), (0.77, 0.77), (0.95, 0.95)]\n ],\n \"mobilenet_version\": \"v2\", # Version of mobilenet backbone, possible options: 'v1', 'v2'\n \"initial_weights_path\": \"\", # Path to initial weights\n \"depth_multiplier\": 0.35, # MobileNet channels multiplier\n \"weight_regularization\": 1e-3, # L2 weight regularization\n \"learning_rate\": learning_rate_schedule, # Learning rate\n \"optimizer\": optimizer, # Optimizer\n \"collect_priors_summary\": False, # Option to collect priors summary for further analysis\n}\n",
"\"\"\"\n Copyright (c) 2019-2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\n\nfrom collections import OrderedDict\nfrom enum import Enum\nfrom typing import List, Callable, Tuple, Dict, Optional\n\nimport functools\nimport networkx as nx\nimport torch\nfrom copy import deepcopy\nfrom torch import nn\n\nfrom nncf.debug import CombinedDebugInterface, debuggable_forward, is_debug\nfrom nncf.dynamic_graph.context import TracingContext\nfrom nncf.dynamic_graph.graph import NNCFGraph, InputAgnosticOperationExecutionContext, OperationExecutionContext\nfrom nncf.dynamic_graph.graph import ShapeIgnoringTensorMetaComparator\nfrom nncf.dynamic_graph.graph_builder import GraphBuilder, PostGraphBuildActing, create_dummy_forward_fn, ModelInputInfo\nfrom nncf.dynamic_graph.graph_matching import NodeExpression\nfrom nncf.dynamic_graph.patch_pytorch import ignore_scope, nncf_model_input, MODEL_INPUT_OP_NAME\nfrom nncf.dynamic_graph.operator_metatypes import OPERATOR_METATYPES\nfrom nncf.dynamic_graph.transform_graph import replace_modules_by_nncf_modules\nfrom nncf.hw_config import HWConfig\nfrom nncf.layers import NNCF_MODULES\nfrom nncf.quantization.layers import QUANTIZATION_MODULES\nfrom nncf.utils import get_all_modules_by_type, get_state_dict_names_with_modules\nfrom nncf.nncf_logger import logger as nncf_logger\n\nMODEL_WRAPPED_BY_NNCF_ATTR_NAME = 'nncf_module'\n\n\nclass CompressionModuleType(Enum):\n FUNCTION_QUANTIZER = 0\n ACTIVATION_QUANTIZER = 1\n\n\[email protected]_ordering\nclass OperationPriority(Enum):\n DEFAULT_PRIORITY = 0\n SPARSIFICATION_PRIORITY = 2\n QUANTIZATION_PRIORITY = 11\n PRUNING_PRIORITY = 1\n\n def __lt__(self, other):\n # pylint: disable=comparison-with-callable\n return self.value < other.value\n\n\nclass InsertionType(Enum):\n OPERATOR_PRE_HOOK = 0\n OPERATOR_POST_HOOK = 1\n NNCF_MODULE_PRE_OP = 2\n NNCF_MODULE_POST_OP = 3\n\n def __eq__(self, other):\n # pylint: disable=comparison-with-callable\n if isinstance(other, InsertionType):\n return self.value == other.value\n return self.value == other\n\n\nclass InsertionInfo:\n def __init__(self, op_exec_context: OperationExecutionContext,\n is_input=False,\n is_output=False,\n shape_to_operate_on=None):\n self.op_exec_context = op_exec_context # type: OperationExecutionContext\n self.is_input = is_input\n self.is_output = is_output\n self.shape_to_operate_on = shape_to_operate_on\n\n def __eq__(self, other: 'InsertionInfo'):\n return self.op_exec_context == other.op_exec_context\n\n def __hash__(self):\n return self.op_exec_context.__hash__()\n\n\nclass InsertionPoint:\n def __init__(self, ia_op_exec_context: InputAgnosticOperationExecutionContext,\n insertion_type: InsertionType):\n self.ia_op_exec_context = ia_op_exec_context\n self.insertion_type = insertion_type\n\n def __eq__(self, other: 'InsertionPoint'):\n return self.insertion_type == other.insertion_type and self.ia_op_exec_context == other.ia_op_exec_context\n\n def __str__(self):\n return str(self.insertion_type) + \" \" + str(self.ia_op_exec_context)\n\n def __hash__(self):\n return hash(str(self))\n\n\nclass InsertionCommand:\n def __init__(self, point: InsertionPoint, fn: Callable,\n priority: OperationPriority = OperationPriority.DEFAULT_PRIORITY):\n self.insertion_point = point # type: InsertionPoint\n self.fn = fn # type: Callable\n self.priority = priority # type: OperationPriority\n\n\nclass LoadStateListener:\n \"\"\"\n Resets the initialization flags (`initialized`) for all quantization modules on `load_state_dict` call.\n These flags are used to update not loaded params (from checkpoint or model's state)\n on initialization stage of algorithm.\n Flags reset is required on each call of `load_state_dict`, because internal method (`build_graph`)\n restores model state by calling this method.\n \"\"\"\n\n def __init__(self, model, all_quantizations):\n for prefix, module in all_quantizations.items():\n module.state_dict_name = prefix\n # pylint: disable=protected-access\n self.hook = model._register_load_state_dict_pre_hook(\n functools.partial(self.hook_fn, quantize_modules=all_quantizations.values()))\n\n def hook_fn(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs,\n quantize_modules):\n for module in quantize_modules:\n module.initialized = False\n\n def close(self):\n self.hook.remove()\n\n\nclass InsertionPointGraphNodeType(Enum):\n INSERTION_POINT = 0\n OPERATOR = 1\n\n\nclass InsertionPointGraph(nx.DiGraph):\n \"\"\"\n This graph is built from the NNCFGraph representation of the model control flow graph and adds ephemeral\n \"insertion point nodes\" into the NNCF model graph representation corresponding to operator pre- and\n post-hooks. Module pre-op and post-op insertion points are currently not reflected here, but they are\n probably not required for quantizing activations, for which the quantizer propagation makes sense.\n This \"insertion point graph\" representation is useful for quantizer propagation and for referencing\n the compression algorithm hooks to the model operations to which they are applied to.\n \"\"\"\n NODE_TYPE_NODE_ATTR = \"node_type\"\n INSERTION_POINT_DATA_NODE_ATTR = \"insertion_point_data\"\n IS_IN_NNCF_MODULE_NODE_ATTR = \"is_in_nncf_module\"\n REGULAR_NODE_REF_NODE_ATTR = \"regular_node_ref\"\n ASSOCIATED_IP_NODE_KEYS_NODE_ATTR = \"associated_ip_node_keys\"\n OPERATOR_METATYPE_NODE_ATTR = \"op_meta\"\n\n PRE_HOOK_ID_PREFIX = \"PRE HOOK \" # NB: Do not use colon (':') in node keys! Causes trouble for .dot file export.\n POST_HOOK_ID_PREFIX = \"POST HOOK \"\n\n def __init__(self, model_nx_graph: nx.DiGraph):\n super().__init__()\n self._base_nx_graph = deepcopy(model_nx_graph)\n\n for node_key, node in self._base_nx_graph.nodes.items():\n attrs = {InsertionPointGraph.REGULAR_NODE_REF_NODE_ATTR: node,\n InsertionPointGraph.NODE_TYPE_NODE_ATTR: InsertionPointGraphNodeType.OPERATOR,\n InsertionPointGraph.ASSOCIATED_IP_NODE_KEYS_NODE_ATTR: set(),\n InsertionPointGraph.OPERATOR_METATYPE_NODE_ATTR: None}\n self.add_node(node_key, **attrs)\n for from_node, to_node in self._base_nx_graph.edges:\n self.add_edge(from_node, to_node)\n\n # TODO: Add insertion points for module pre- and post-ops.\n # Should roughly look so: first, determine subsets of nodes belonging to each\n # separate NNCF module (via scope analysis), then for each subset find input/output\n # edges using a corresponding NNCFGraph function; add a pre-op insertion point node as the\n # sink for input edges and connect it to input edge destinations, then add a post-op\n # insertion point as the source of output edges and connect it to output edge origins.\n\n node_keys_working_set = [deepcopy(node_key) for node_key in self.nodes.keys()]\n for operator_node_key in node_keys_working_set:\n original_node = self.nodes[operator_node_key][InsertionPointGraph.REGULAR_NODE_REF_NODE_ATTR]\n ia_op_exec_context = original_node[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR].input_agnostic\n\n # Pre-hook insertion point nodes\n pre_hook_insertion_point = InsertionPoint(ia_op_exec_context,\n InsertionType.OPERATOR_PRE_HOOK)\n attrs = {\n InsertionPointGraph.NODE_TYPE_NODE_ATTR: InsertionPointGraphNodeType.INSERTION_POINT,\n InsertionPointGraph.INSERTION_POINT_DATA_NODE_ATTR: pre_hook_insertion_point,\n }\n ip_node_key = self.get_pre_hook_node_key(str(operator_node_key))\n self.add_node(ip_node_key, **attrs)\n in_edges = list(self.in_edges(operator_node_key))\n for from_node_key, to_node_key in in_edges:\n self.remove_edge(from_node_key, to_node_key)\n self.add_edge(from_node_key, ip_node_key)\n self.add_edge(ip_node_key, operator_node_key)\n operator_node = self.nodes[operator_node_key]\n operator_node[InsertionPointGraph.ASSOCIATED_IP_NODE_KEYS_NODE_ATTR].add(ip_node_key)\n\n # Post-hook insertion point nodes\n post_hook_insertion_point = InsertionPoint(ia_op_exec_context,\n InsertionType.OPERATOR_POST_HOOK)\n attrs = {\n InsertionPointGraph.NODE_TYPE_NODE_ATTR: InsertionPointGraphNodeType.INSERTION_POINT,\n InsertionPointGraph.INSERTION_POINT_DATA_NODE_ATTR: post_hook_insertion_point\n }\n ip_node_key = self.get_post_hook_node_key(str(operator_node_key))\n self.add_node(ip_node_key, **attrs)\n out_edges = list(self.out_edges(operator_node_key))\n for from_node_key, to_node_key in out_edges:\n self.remove_edge(from_node_key, to_node_key)\n self.add_edge(ip_node_key, to_node_key)\n # TODO: introduce separate insertion points for operator outputs if\n # the outputs are semantically different\n self.add_edge(operator_node_key, ip_node_key)\n operator_node = self.nodes[operator_node_key]\n operator_node[InsertionPointGraph.ASSOCIATED_IP_NODE_KEYS_NODE_ATTR].add(ip_node_key)\n\n def get_ip_graph_with_merged_hw_optimized_operations(self,\n hw_config: Optional[HWConfig] = None) -> 'InsertionPointGraph':\n merged_ip_graph = deepcopy(self)\n pattern = self._get_mergeable_operator_patterns(hw_config)\n from nncf.dynamic_graph.graph_matching import search_all\n matches = search_all(self._base_nx_graph, pattern)\n for match in matches:\n if len(match) == 1:\n continue\n\n input_node_key = match[0]\n output_node_key = match[-1]\n in_edges = list(self.in_edges(input_node_key))\n out_edges = list(self.out_edges(output_node_key))\n\n assert len(in_edges) <= 1 # TODO: change to == 1 when input nodes are handled correctly\n\n if in_edges:\n in_edge_key = in_edges[0]\n in_edge_copy = deepcopy(self.edges[in_edge_key])\n out_edge_copies_dict = {}\n for out_edge_key in out_edges:\n out_edge_copies_dict[out_edge_key] = deepcopy(self.edges[out_edge_key])\n\n conserved_edges_list = out_edges\n if in_edges:\n conserved_edges_list.append(in_edge_key)\n\n merged_node_attrs = deepcopy(self.nodes[input_node_key])\n merged_node_attrs[InsertionPointGraph.ASSOCIATED_IP_NODE_KEYS_NODE_ATTR] = set()\n merged_node_key = \"\"\n for node_key in match:\n ip_node_keys = self.nodes[node_key][InsertionPointGraph.ASSOCIATED_IP_NODE_KEYS_NODE_ATTR]\n for ip_node_key in ip_node_keys:\n should_keep_ip_node = False\n for edge_key in conserved_edges_list:\n if ip_node_key in edge_key:\n should_keep_ip_node = True\n break\n if should_keep_ip_node:\n merged_node_attrs[InsertionPointGraph.ASSOCIATED_IP_NODE_KEYS_NODE_ATTR].add(ip_node_key)\n else:\n merged_ip_graph.remove_node(ip_node_key)\n merged_ip_graph.remove_node(node_key)\n merged_node_key += node_key + '\\n'\n\n merged_ip_graph.add_node(merged_node_key, **merged_node_attrs)\n if in_edges:\n merged_ip_graph.add_edge(in_edge_key[0], merged_node_key, **in_edge_copy)\n for out_edge_key, out_edge_attrs in out_edge_copies_dict.items():\n merged_ip_graph.add_edge(merged_node_key, out_edge_key[1], **out_edge_attrs)\n\n return merged_ip_graph\n\n @staticmethod\n def get_pre_hook_node_key(node_key: str):\n return InsertionPointGraph.PRE_HOOK_ID_PREFIX + node_key\n\n @staticmethod\n def get_post_hook_node_key(node_key: str):\n return InsertionPointGraph.POST_HOOK_ID_PREFIX + node_key\n\n def _get_mergeable_operator_patterns(self, hw_config: Optional[HWConfig] = None) -> NodeExpression:\n \"\"\"Resulting pattern should have single input; the operation with inputs to\n quantize should be the input operation; outputs should only be produced by one output node.\"\"\"\n # TODO: Implement \"repeating expressions\" so that any number of \"mergeable\" operations\n # immediately following a linear/convolutional/matrix op are merged into one block\n import nncf.dynamic_graph.patterns as p\n pattern = p.LINEAR_OPS + p.ANY_BN_RELU_COMBO | p.LINEAR_OPS + p.ELTWISE_UNIFORM_OPS\n return pattern\n\n def get_op_nodes_in_scope(self, scope: 'Scope') -> List:\n matching_ip_graph_op_nodes_list = []\n for node in self.nodes().values():\n if node[InsertionPointGraph.NODE_TYPE_NODE_ATTR] == InsertionPointGraphNodeType.OPERATOR:\n nncf_graph_node_ref = node[InsertionPointGraph.REGULAR_NODE_REF_NODE_ATTR]\n op_exec_context = nncf_graph_node_ref[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR]\n op_scope = op_exec_context.input_agnostic.scope_in_model\n if op_scope in scope:\n matching_ip_graph_op_nodes_list.append(node)\n return matching_ip_graph_op_nodes_list\n\n# pylint: disable=too-many-public-methods\n@ignore_scope\nclass NNCFNetwork(nn.Module, PostGraphBuildActing):\n\n def __init__(self, module, input_infos: List[ModelInputInfo] = None,\n dummy_forward_fn=None, scopes_without_shape_matching=None,\n ignored_scopes=None, target_scopes=None):\n super().__init__()\n self.set_nncf_wrapped_model(module)\n self.input_infos = input_infos\n self.ignored_scopes = ignored_scopes\n self.target_scopes = target_scopes\n self._dummy_forward_fn = dummy_forward_fn\n self._nncf_module_scopes = [] # type: List[Scope]\n self.scopes_without_shape_matching = scopes_without_shape_matching\n self.debug_interface = CombinedDebugInterface() if is_debug() else None\n self._extra_module_types = [] # type: List[CompressionModuleType]\n # pylint:disable=line-too-long\n self._insertions_into_original_graph = {} # type: Dict[InsertionPoint, List[Tuple[Callable, OperationPriority]]]\n\n device = next(module.parameters()).device\n\n # all modules should be replaced prior to graph building\n self._replace_modules_by_nncf_modules(device)\n\n _orig_context = TracingContext()\n _orig_graph_build_forward_fn = self._get_dummy_forward_fn_for_graph_building(with_input_tracing=True)\n\n self._graph_builder = GraphBuilder(_orig_graph_build_forward_fn)\n\n _orig_context.add_node_comparators([MODEL_INPUT_OP_NAME], ShapeIgnoringTensorMetaComparator())\n if self.scopes_without_shape_matching:\n _orig_context.add_node_comparators(scopes_without_shape_matching,\n ShapeIgnoringTensorMetaComparator())\n\n self._original_graph = self._graph_builder.build_graph(self.get_nncf_wrapped_model(), _orig_context)\n\n self._compressed_context = TracingContext()\n\n self._dummy_forward_fn = self._get_dummy_forward_fn_for_graph_building(with_input_tracing=False)\n\n self._compressed_context.add_node_comparators([MODEL_INPUT_OP_NAME], ShapeIgnoringTensorMetaComparator())\n if self.scopes_without_shape_matching:\n self._compressed_context.add_node_comparators(scopes_without_shape_matching,\n ShapeIgnoringTensorMetaComparator())\n self._load_listener = None\n\n self._builders = [] # type: List['CompressionAlgorithmBuilder']\n\n @debuggable_forward\n def forward(self, *args, **kwargs):\n with self._compressed_context as ctx: # type: TracingContext\n ctx.base_module_thread_local_replica = self\n arglist = list(args)\n for idx, tensor in enumerate(arglist): # TODO: extend to all tensors in args/kwargs hierarchy\n if isinstance(tensor, torch.Tensor):\n arglist[idx] = nncf_model_input(tensor)\n args = tuple(arglist)\n retval = self.get_nncf_wrapped_model()(*args, **kwargs)\n return retval\n\n def register_algorithm(self, builder: 'CompressionAlgorithmBuilder'):\n \"\"\"Should be called during *builder*'s *apply_to* method, otherwise there will be no corresponding\n controller returned by the network on the *commit_compression_changes* stage\"\"\"\n self._builders.append(builder)\n\n # Cannnot use property syntax here, otherwise the wrapped module will end up\n # being twice in the same checkpoint with different prefixes\n def get_nncf_wrapped_model(self):\n return getattr(self, MODEL_WRAPPED_BY_NNCF_ATTR_NAME)\n\n def set_nncf_wrapped_model(self, value):\n setattr(self, MODEL_WRAPPED_BY_NNCF_ATTR_NAME, value)\n\n def get_modules_in_nncf_modules_by_type(self, types) -> Dict['Scope', nn.Module]:\n nncf_modules = self.get_nncf_modules()\n retval = {}\n for nncf_module_scope, nncf_module in nncf_modules.items():\n nncf_module_scope.pop()\n for relative_scope, target_module in get_all_modules_by_type(nncf_module, types).items():\n retval[nncf_module_scope + relative_scope] = target_module\n return retval\n\n def register_insertion_command(self, command: InsertionCommand):\n point = command.insertion_point\n if point not in self._insertions_into_original_graph:\n self._insertions_into_original_graph[point] = [(command.fn, command.priority)]\n else:\n self._insertions_into_original_graph[point].append((command.fn, command.priority))\n\n def commit_compression_changes(self) -> 'CompressionAlgorithmController':\n for insertion_point, fn_list_with_priority in self._insertions_into_original_graph.items():\n fn_list_with_priority = sorted(fn_list_with_priority, key=lambda x: x[1])\n self._insertions_into_original_graph[insertion_point] = fn_list_with_priority\n self._insert_at_point(insertion_point, [x[0] for x in fn_list_with_priority])\n\n if self.debug_interface is not None:\n self.debug_interface.init_actual(self)\n\n quantization_types = [class_type.__name__ for class_type in QUANTIZATION_MODULES.registry_dict.values()]\n all_quantizations = get_state_dict_names_with_modules(self, quantization_types)\n self._load_listener = LoadStateListener(self, all_quantizations)\n\n if not self._builders:\n from nncf.algo_selector import NoCompressionAlgorithmController\n return NoCompressionAlgorithmController(self)\n\n if len(self._builders) == 1:\n return self._builders[0].build_controller(self)\n\n from nncf.composite_compression import CompositeCompressionAlgorithmController\n composite_controller = CompositeCompressionAlgorithmController(self)\n for algo_builder in self._builders:\n composite_controller.add(algo_builder.build_controller(self))\n return composite_controller\n\n def _insert_at_point(self, point: InsertionPoint, fn_list: List[Callable]):\n if point.insertion_type == InsertionType.OPERATOR_PRE_HOOK:\n self._compressed_context.register_pre_hooks(fn_list, point.ia_op_exec_context)\n elif point.insertion_type == InsertionType.OPERATOR_POST_HOOK:\n self._compressed_context.register_post_hooks(fn_list, point.ia_op_exec_context)\n else:\n norm_target_scope = self._normalize_variable_recurrent_scope(point.ia_op_exec_context.scope_in_model)\n norm_nncf_scopes = [self._normalize_variable_recurrent_scope(x) for x in self._nncf_module_scopes]\n assert norm_target_scope in norm_nncf_scopes # Required for proper Recurrent/VariableRecurrent addressing\n nncf_module = self.get_module_by_scope(point.ia_op_exec_context.scope_in_model)\n if point.insertion_type == InsertionType.NNCF_MODULE_PRE_OP:\n for fn in fn_list:\n nncf_module.register_pre_forward_operation(fn)\n elif point.insertion_type == InsertionType.NNCF_MODULE_POST_OP:\n for fn in fn_list:\n nncf_module.register_post_forward_operation(fn)\n\n def __getattr__(self, name):\n wrapped_module = super().__getattr__(MODEL_WRAPPED_BY_NNCF_ATTR_NAME)\n if hasattr(wrapped_module, name):\n return getattr(wrapped_module, name)\n return super().__getattr__(name)\n\n def get_graph(self) -> NNCFGraph:\n return self._compressed_context.graph\n\n def get_original_graph(self) -> NNCFGraph:\n return self._original_graph\n\n def get_tracing_context(self) -> TracingContext:\n return self._compressed_context\n\n def _get_dummy_forward_fn_for_graph_building(self, with_input_tracing):\n if self._dummy_forward_fn is None:\n return create_dummy_forward_fn(self.input_infos,\n with_input_tracing=with_input_tracing)\n return self._dummy_forward_fn\n\n def _replace_modules_by_nncf_modules(self, device):\n module, self._nncf_module_scopes = replace_modules_by_nncf_modules(self.get_nncf_wrapped_model(),\n ignored_scopes=self.ignored_scopes,\n target_scopes=self.target_scopes)\n self.set_nncf_wrapped_model(module.to(device))\n\n def get_nncf_module_scopes(self) -> List['Scope']:\n return self._nncf_module_scopes\n\n def get_nncf_modules(self) -> Dict['Scope', torch.nn.Module]:\n return get_all_modules_by_type(self.get_nncf_wrapped_model(), NNCF_MODULES)\n\n def rebuild_graph(self, *input_args):\n self._compressed_context.reset_graph()\n dummy_forward_fn = self._get_dummy_forward_fn_for_graph_building(with_input_tracing=False)\n builder = GraphBuilder(dummy_forward_fn)\n _ = builder.build_graph(self, self._compressed_context)\n\n def post_build_graph_actions(self):\n # Reset initialization flags (`initialized`) for all quantization modules\n # after dummy `load_state_dict` call.\n quantization_types = [class_type.__name__ for class_type in QUANTIZATION_MODULES.registry_dict.values()]\n all_quantizations = get_state_dict_names_with_modules(self, quantization_types)\n for module in all_quantizations.values():\n module.initialized = False\n\n def get_post_pattern_insertion_points(self, pattern: 'NNCFNodeExpression',\n omit_nodes_in_nncf_modules=False) -> List[InsertionInfo]:\n io_infos = self._original_graph.get_matching_nncf_graph_pattern_io_list(pattern)\n\n insertion_infos = []\n for io_info in io_infos:\n # The input/output is given in terms of edges, but the post-hooks are currently applied to\n # nodes. Multiple output edges in a pattern I/O info may originate from one and the same\n # node, and we have to ensure that these resolve into just one insertion point - thus the usage of \"set\".\n pattern_insertion_info_set = set()\n if len(io_info.output_edges) > 1:\n nncf_logger.debug(\"WARNING: pattern has more than one activation output\")\n\n for nncf_node in io_info.output_nodes:\n pattern_insertion_info_set.add(InsertionInfo(nncf_node.op_exec_context,\n is_output=True,\n shape_to_operate_on=None))\n # TODO: determine output shapes for output nodes to enable per-channel quantization\n\n # Ignore input nodes in the pattern for now, rely on the _quantize_inputs functions.\n # TODO: handle input quantization here as well\n\n # Since this function is currently only used for activation quantization purposes via operator\n # post-hook mechanism, we may take any edge and it will point from the same node where we will have to\n # insert a quantizer later. However, in the future the output edges may refer to activation tensors\n # with different sizes, in which case we have to insert different per-channel quantizers to\n # accomodate different trainable params if there is a difference in the channel dimension.\n # Furthermore, currently there is no distinction for single tensor output to multiple nodes and\n # multiple tensor output to multiple nodes (\"chunk\" operation is an example of the latter).\n # The pattern may also have unexpected outputs from a node in the middle of the pattern (see\n # \"densenet121.dot\" for an example of this) - need to decide what to do with that in terms\n # of quantization.\n # TODO: address the issues above.\n\n for nncf_edge in io_info.output_edges:\n pattern_insertion_info_set.add(InsertionInfo(nncf_edge.from_node.op_exec_context,\n is_output=False,\n shape_to_operate_on=nncf_edge.tensor_shape))\n insertion_infos += list(pattern_insertion_info_set)\n\n insertion_infos = list(\n set(insertion_infos)) # Filter the overlapping insertion points from different matches (happens for GNMT)\n insertion_infos_filtered = []\n\n for info in insertion_infos:\n if omit_nodes_in_nncf_modules and self.is_scope_in_nncf_module_scope(info.op_exec_context.scope_in_model):\n continue\n insertion_infos_filtered.append(info)\n\n return insertion_infos_filtered\n\n def is_scope_in_nncf_module_scope(self, scope: 'Scope'):\n # TODO: optimize\n norm_nncf_scopes = [self._normalize_variable_recurrent_scope(x) for x in self._nncf_module_scopes]\n norm_op_scope = self._normalize_variable_recurrent_scope(scope)\n for nncf_scope in norm_nncf_scopes:\n if norm_op_scope in nncf_scope:\n return True\n return False\n\n def register_compression_module_type(self, compression_module_type: CompressionModuleType):\n attr_name = self._compression_module_type_to_attr_name(compression_module_type)\n if compression_module_type in self._extra_module_types:\n raise RuntimeError(\"Module type {} is already registered\".format(compression_module_type))\n self.__setattr__(attr_name, nn.ModuleDict())\n self._extra_module_types.append(compression_module_type)\n\n def add_compression_module(self, module_key: str, module: nn.Module,\n compression_module_type: CompressionModuleType):\n attr_name = self._compression_module_type_to_attr_name(compression_module_type)\n if compression_module_type not in self._extra_module_types:\n raise RuntimeError(\"Module type {} was not registered\".format(compression_module_type))\n self.__getattr__(attr_name)[module_key] = module\n\n def get_compression_modules_by_type(self, compression_module_type: CompressionModuleType) -> nn.ModuleDict:\n attr_name = self._compression_module_type_to_attr_name(compression_module_type)\n if compression_module_type not in self._extra_module_types:\n raise RuntimeError(\"Module type {} was not registered\".format(compression_module_type))\n return self.__getattr__(attr_name)\n\n @staticmethod\n def _compression_module_type_to_attr_name(compression_module_type: CompressionModuleType):\n \"\"\"Required for backward compatibility with checkpoints that store function and activation\n quantizers directly under corresponding attributes of NNCFNetwork.\"\"\"\n if compression_module_type == CompressionModuleType.FUNCTION_QUANTIZER:\n return \"function_quantizers\"\n if compression_module_type == CompressionModuleType.ACTIVATION_QUANTIZER:\n return \"activation_quantizers\"\n raise RuntimeError(\"Unknown extra module type\")\n\n def sort_compression_modules(self, compression_module_type: CompressionModuleType):\n attr_name = self._compression_module_type_to_attr_name(compression_module_type)\n if compression_module_type not in self._extra_module_types:\n raise RuntimeError(\"Module type {} was not registered\".format(compression_module_type))\n module_dict = self.__getattr__(attr_name)\n # pylint: disable=protected-access\n module_dict._modules = OrderedDict(sorted(module_dict._modules.items()))\n self.__setattr__(attr_name, module_dict)\n\n @staticmethod\n def _normalize_variable_recurrent_scope(scope: 'Scope'):\n \"\"\"\n Two scopes pointing to an NNCF module that only differ in a Recurrent/VariableRecurrent/VariableRecurrentReverse\n scope element actually point to one and the same module.\n \"\"\"\n ret_scope = scope.copy()\n for scope_element in ret_scope:\n if scope_element.calling_module_class_name in [\"Recurrent\", \"VariableRecurrent\",\n \"VariableRecurrentReverse\"]:\n scope_element.calling_module_class_name = \"NormalizedName_Recurrent\"\n return ret_scope\n\n def do_dummy_forward(self, force_eval=False):\n \"\"\"Attention: If run with force_eval=False, this may spoil the batchnorm statistics,\n and an eval run of the model will perform much worse than the train run. \"\"\"\n if force_eval:\n train_mode = self.training\n self.eval()\n with torch.no_grad():\n self._dummy_forward_fn(self)\n if force_eval:\n if train_mode:\n self.train()\n\n def get_insertion_point_graph(self) -> InsertionPointGraph:\n ip_graph = InsertionPointGraph(self._original_graph.get_nx_graph_copy())\n\n # Mark IP graph operator nodes with associated op metatypes\n # Determining operator metatypes is more suited to occur at wrap_operator\n # stage, because it might be influenced by specific non-tensor function paramters,\n # but we have to inspect the containing module parameters as well, so the\n # TracingContext in wrap_operator would have to retain a reference to\n # the model that uses it. Since currently we do not need to inspect the\n # function arguments to determine the metatype, we can do this here, but\n # once we need to inspect the arguments, the code will have to be moved to\n # wrap_operator.\n\n for node_key in ip_graph.nodes:\n ip_graph_node = ip_graph.nodes[node_key]\n ip_graph_node_type = ip_graph_node[InsertionPointGraph.NODE_TYPE_NODE_ATTR]\n if ip_graph_node_type == InsertionPointGraphNodeType.OPERATOR:\n nncf_graph_node_ref = ip_graph_node[InsertionPointGraph.REGULAR_NODE_REF_NODE_ATTR]\n op_exec_context = nncf_graph_node_ref[NNCFGraph.OP_EXEC_CONTEXT_NODE_ATTR]\n op_name = op_exec_context.operator_name\n scope = op_exec_context.scope_in_model\n op_arch = OPERATOR_METATYPES.get_operator_metatype_by_op_name(op_name)\n module = self.get_module_by_scope(scope)\n if module is not None:\n subtype = op_arch.determine_subtype(containing_module=module)\n if subtype is not None:\n op_arch = subtype\n ip_graph_node[InsertionPointGraph.OPERATOR_METATYPE_NODE_ATTR] = op_arch\n return ip_graph\n\n def get_module_by_scope(self, scope: 'Scope') -> torch.nn.Module:\n curr_module = self.get_nncf_wrapped_model()\n for scope_element in scope[1:]: # omit first scope element which corresponds to base module\n if scope_element.calling_field_name is None:\n # The module used is being created in-place every time and never stored in the model,\n # happens for nn.Softmax in BERT implementations.\n return None\n # pylint: disable=protected-access\n next_module = curr_module._modules.get(scope_element.calling_field_name)\n if next_module is None:\n raise RuntimeError(\"Could not find a {} module member in {} module of scope {} during node search\"\n .format(scope_element.calling_field_name,\n scope_element.calling_module_class_name,\n str(scope)))\n curr_module = next_module\n return curr_module\n",
"#!/usr/bin/env python3\n#\n# Copyright (C) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\n\"\"\" This script allows you to test Text Recognition model. \"\"\"\n\nimport argparse\n\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nimport cv2\n\nfrom text_recognition.model import TextRecognition\nfrom text_recognition.dataset import Dataset\n\n\ndef parse_args():\n \"\"\" Parases input arguments. \"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--annotation_path', required=True, help='Annotation path.')\n parser.add_argument('--weights_path', required=True, help='Model weights path.')\n parser.add_argument('--show', action='store_true', help='Show images.')\n\n return parser.parse_args()\n\n\ndef main():\n \"\"\" Main testing funciton. \"\"\"\n\n args = parse_args()\n\n sequence_length = 30\n image_width = 120\n image_height = 32\n\n dataset = Dataset(args.annotation_path, image_width, image_height, repeat=1)\n next_sample = dataset().make_one_shot_iterator().get_next()\n\n model = TextRecognition(is_training=False, num_classes=dataset.num_classes)\n images_ph = tf.placeholder(tf.float32, [1, image_height, image_width, 1])\n model_out = model(inputdata=images_ph)\n decoded, _ = tf.nn.ctc_beam_search_decoder(model_out, sequence_length * np.ones(1),\n merge_repeated=False)\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess=sess, save_path=args.weights_path)\n\n correct = 0.0\n dataset_len = len(dataset)\n for _ in tqdm(range(dataset_len)):\n images_batch, labels_batch = sess.run(next_sample)\n\n preds, _ = sess.run([decoded, model_out], feed_dict={images_ph: images_batch})\n\n try:\n predicted = Dataset.sparse_tensor_to_str(preds[0], dataset.int_to_char)[0]\n expected = Dataset.sparse_tensor_to_str(labels_batch, dataset.int_to_char)[\n 0].lower()\n except:\n print('Could not find a word')\n continue\n\n correct += 1 if predicted == expected else 0\n\n if args.show and predicted != expected:\n image = np.reshape(images_batch, [image_height, image_width, -1]).astype(np.uint8)\n cv2.imshow('image', image)\n print('pr, gt', predicted, expected)\n k = cv2.waitKey(0)\n if k == 27:\n sess.close()\n return\n\n print('accuracy', correct / dataset_len)\n\n return\n\n\nif __name__ == '__main__':\n main()\n",
"# Copyright (C) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\ninput_shape = (72, 72, 3) # (height, width, channels)\nmodel_dir = 'model'\n\nclass train:\n batch_size = 32\n steps = 2000000\n\n random_seed = 666\n\n save_checkpoints_steps = 1000 # Number of training steps when checkpoint should be saved\n keep_checkpoint_every_n_hours = 1 # Checkpoint should be saved forever after every n hours\n save_summary_steps = 100 # Number of steps when the summary information should be saved\n\n num_parallel_calls = 4\n prefetch_size = 4\n\n annotation_path = '../../data/cars_100/cars_100_train.json'\n use_pretrained_weights = True\n pretrained_ckpt = 'vehicle-attributes-barrier-0103/model.ckpt-2000000'\n\n class execution:\n CUDA_VISIBLE_DEVICES = \"0\"\n per_process_gpu_memory_fraction = 0.8 # Fix extra memory allocation issue\n allow_growth = True # Option which attempts to allocate only as much GPU memory based on runtime allocations\n\n intra_op_parallelism_threads = 2\n inter_op_parallelism_threads = 8\n transformer_parallel_calls = 4 # Number of parallel threads in data transformer/augmentation\n transformer_prefetch_size = 8 # Number of batches to prefetch\n\nclass eval:\n batch_size = 32\n\n annotation_path = '../../data/cars_100/cars_100_test.json'\n\n class execution:\n CUDA_VISIBLE_DEVICES = \"0\"\n per_process_gpu_memory_fraction = 0.8 # Fix extra memory allocation issue\n allow_growth = True # Option which attempts to allocate only as much GPU memory based on runtime allocations\n\n intra_op_parallelism_threads = 2\n inter_op_parallelism_threads = 8\n transformer_parallel_calls = 4 # Number of parallel threads in data transformer/augmentation\n transformer_prefetch_size = 8 # Number of batches to prefetch\n\nclass infer:\n annotation_path = '../../data/cars_100/cars_100_test.json'\n\n class execution:\n CUDA_VISIBLE_DEVICES = \"0\"\n intra_op_parallelism_threads = 0\n\ndef optimizer(learning_rate):\n import tensorflow as tf\n return tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.95)\n\nresnet_params = {\n \"learning_rate\": 0.001, # Learning rate\n \"optimizer\": optimizer, # Optimizer\n \"pretrained_ckpt\": train.pretrained_ckpt, # Trained model\n \"use_pretrained_weights\": train.use_pretrained_weights # Use pretrained model weights\n}\n",
"\"\"\"\n Copyright (c) 2019 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SepConv(nn.Module):\n '''Separable Convolution.'''\n\n def __init__(self, in_planes, out_planes, kernel_size, stride):\n super(SepConv, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, out_planes,\n kernel_size, stride,\n padding=(kernel_size - 1) // 2,\n bias=False, groups=in_planes)\n self.bn1 = nn.BatchNorm2d(out_planes)\n\n def forward(self, x):\n return self.bn1(self.conv1(x))\n\n\nclass CellA(nn.Module):\n def __init__(self, in_planes, out_planes, stride=1):\n super(CellA, self).__init__()\n self.stride = stride\n self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)\n if stride == 2:\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(out_planes)\n\n def forward(self, x):\n y1 = self.sep_conv1(x)\n y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)\n if self.stride == 2:\n y2 = self.bn1(self.conv1(y2))\n return F.relu(y1 + y2)\n\n\nclass CellB(nn.Module):\n def __init__(self, in_planes, out_planes, stride=1):\n super(CellB, self).__init__()\n self.stride = stride\n # Left branch\n self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)\n self.sep_conv2 = SepConv(in_planes, out_planes, kernel_size=3, stride=stride)\n # Right branch\n self.sep_conv3 = SepConv(in_planes, out_planes, kernel_size=5, stride=stride)\n if stride == 2:\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(out_planes)\n # Reduce channels\n self.conv2 = nn.Conv2d(2 * out_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn2 = nn.BatchNorm2d(out_planes)\n\n def forward(self, x):\n # Left branch\n y1 = self.sep_conv1(x)\n y2 = self.sep_conv2(x)\n # Right branch\n y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)\n if self.stride == 2:\n y3 = self.bn1(self.conv1(y3))\n y4 = self.sep_conv3(x)\n # Concat & reduce channels\n b1 = F.relu(y1 + y2)\n b2 = F.relu(y3 + y4)\n y = torch.cat([b1, b2], 1)\n return F.relu(self.bn2(self.conv2(y)))\n\n\nclass PNASNet(nn.Module):\n def __init__(self, cell_type, num_cells, num_planes):\n super(PNASNet, self).__init__()\n self.in_planes = num_planes\n self.cell_type = cell_type\n\n self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(num_planes)\n\n self.layer1 = self._make_layer(num_planes, num_cells=6)\n self.layer2 = self._downsample(num_planes * 2)\n self.layer3 = self._make_layer(num_planes * 2, num_cells=6)\n self.layer4 = self._downsample(num_planes * 4)\n self.layer5 = self._make_layer(num_planes * 4, num_cells=6)\n\n self.linear = nn.Linear(num_planes * 4, 10)\n\n def _make_layer(self, planes, num_cells):\n layers = []\n for _ in range(num_cells):\n layers.append(self.cell_type(self.in_planes, planes, stride=1))\n self.in_planes = planes\n return nn.Sequential(*layers)\n\n def _downsample(self, planes):\n layer = self.cell_type(self.in_planes, planes, stride=2)\n self.in_planes = planes\n return layer\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n out = F.avg_pool2d(out, 8)\n out = self.linear(out.view(out.size(0), -1))\n return out\n\n\ndef PNASNetA():\n return PNASNet(CellA, num_cells=6, num_planes=44)\n\n\ndef PNASNetB():\n return PNASNet(CellB, num_cells=6, num_planes=32)\n\n\ndef test():\n net = PNASNetB()\n x = torch.randn(1, 3, 32, 32)\n y = net(x)\n print(y)\n\n# test()\n",
"\"\"\"\n Copyright (c) 2019 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom tqdm import tqdm\nimport torch\n\nfrom examples.common.example_logger import logger\nfrom examples.semantic_segmentation.utils.loss_funcs import do_model_specific_postprocessing\n\n\nclass Test:\n \"\"\"Tests the ``model`` on the specified test dataset using the\n data loader, and loss criterion.\n\n Keyword arguments:\n - model (``nn.Module``): the model instance to test.\n - data_loader (``Dataloader``): Provides single or multi-process\n iterators over the dataset.\n - criterion (``Optimizer``): The loss criterion.\n - metric (```Metric``): An instance specifying the metric to return.\n - device (``torch.device``): An object representing the device on which\n tensors are allocated.\n - model_name: Name of the model to be trained - determines model-specific processing\n of the results (i.e. whether center crop should be applied, what outputs should be counted in metrics, etc.)\n\n \"\"\"\n\n def __init__(self, model, data_loader, criterion, metric, device, model_name):\n self.model = model\n self.data_loader = data_loader\n self.criterion = criterion\n self.metric = metric\n self.device = device\n self.model_name = model_name\n\n def run_epoch(self, iteration_loss=False):\n \"\"\"Runs an epoch of validation.\n\n Keyword arguments:\n - iteration_loss (``bool``, optional): Prints loss at every step.\n\n Returns:\n - The epoch loss (float), and the values of the specified metrics\n\n \"\"\"\n self.model.eval()\n epoch_loss = 0.0\n self.metric.reset()\n for step, batch_data in tqdm(enumerate(self.data_loader), total=len(self.data_loader)):\n # Get the inputs and labels\n inputs = batch_data[0].to(self.device)\n labels = batch_data[1].to(self.device)\n\n with torch.no_grad():\n # Forward propagation\n outputs = self.model(inputs)\n\n labels, loss_outputs, metric_outputs = do_model_specific_postprocessing(self.model_name,\n labels,\n outputs)\n\n # Loss computation\n loss = self.criterion(loss_outputs, labels)\n\n # Keep track of loss for current epoch\n epoch_loss += loss.item()\n\n self.metric.add(metric_outputs.detach(), labels.detach())\n\n if iteration_loss:\n logger.info(\"[Step: {}] Iteration loss: {:.4f}\".format(step, loss.item()))\n\n return epoch_loss / len(self.data_loader), self.metric.value()\n",
"\"\"\"\n Copyright (c) 2019 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport os\nimport numpy as np\n\nimport cv2\n\n\ndef max_central_square_crop(image):\n ''' Makes max-sized central squared crop. '''\n\n height, width = image.shape[:2]\n\n if width > height:\n image = image[:, (width - height) // 2:(width - height) // 2 + height]\n else:\n image = image[(height - width) // 2:(height - width) // 2 + width, :]\n\n return image\n\n\ndef preproces_image(image):\n ''' Scales and subtracts mean value from image. '''\n\n image = image / 127.5 - 1.0\n return image\n\n\ndef depreprocess_image(image):\n ''' Makes transform which is inverse to preprocessing. '''\n\n image = (image + 1.0) * 127.5\n image = image.astype(np.uint8)\n return image\n\n\ndef fit_to_max_size(image, max_size):\n ''' Fits input image to max_size. '''\n\n if image.shape[0] > max_size or image.shape[1] > max_size:\n if image.shape[0] > image.shape[1]:\n image = cv2.resize(image, (int(image.shape[1] / (image.shape[0] / max_size)), max_size))\n else:\n image = cv2.resize(image, (max_size, int(image.shape[0] / (image.shape[1] / max_size))))\n\n return image\n\n\ndef crop_resize(image, input_size):\n ''' Makes max-sized central crop, resizes to input_size. '''\n\n image = max_central_square_crop(image)\n image = cv2.resize(image, (input_size, input_size))\n return image\n\n\ndef crop_resize_shift_scale(image, input_size):\n ''' Makes max-sized central crop, resizes to input_size, scales and subtracts mean values. '''\n\n image = crop_resize(image, input_size)\n image = preproces_image(image)\n image = np.expand_dims(image, axis=0)\n return image\n\n\ndef central_crop(image, divide_by, shift):\n ''' Makes central crops dividing input image by number of equal cells. '''\n\n height, width = image.shape[0:2]\n image = image[height // divide_by * shift: height // divide_by * (divide_by - shift),\n width // divide_by * shift: width // divide_by * (divide_by - shift)]\n return image\n\n\ndef from_list(path, multiple_images_per_label=True):\n ''' Loads images list. '''\n\n images_path = []\n labels = []\n is_real = []\n\n text_label_to_class_id = {}\n\n uniques_labels = set()\n\n root = os.path.dirname(os.path.abspath(path))\n\n with open(path) as opened_file:\n for line in opened_file.readlines():\n line = line.strip().split(' ')\n if len(line) == 2:\n image_path, label = line\n real = False\n else:\n image_path, label, real = line\n real = real.lower() == 'r'\n\n text_label_to_class_id[os.path.basename(image_path).split('.')[0]] = int(label)\n\n if not multiple_images_per_label and label in uniques_labels:\n continue\n\n uniques_labels.add(label)\n\n is_real.append(real)\n images_path.append(os.path.join(root, image_path))\n labels.append(int(label))\n\n return images_path, labels, is_real, text_label_to_class_id\n",
"# Copyright (C) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nimport numpy as np\n\n\ndef iou(box_a, box_b):\n \"\"\"Calculates IoU metric between two specified boxes.\n\n :param box_a: Coordinates of first box\n :param box_b: Coordinates of second box\n :return: Scalar value of UoU metric\n \"\"\"\n\n intersect_ymin = np.maximum(box_a[0], box_b[0])\n intersect_xmin = np.maximum(box_a[1], box_b[1])\n intersect_ymax = np.minimum(box_a[2], box_b[2])\n intersect_xmax = np.minimum(box_a[3], box_b[3])\n\n intersect_height = np.maximum(0.0, intersect_ymax - intersect_ymin)\n intersect_width = np.maximum(0.0, intersect_xmax - intersect_xmin)\n\n intersect_area = intersect_height * intersect_width\n area_a = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1])\n area_b = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1])\n\n union_area = area_a + area_b - intersect_area\n\n overlap_ratio = intersect_area / union_area if union_area > 0.0 else 0.0\n\n return overlap_ratio\n\n\ndef matrix_iou(set_a, set_b):\n \"\"\"Calculates IoU metric between all pairs of presented sets of boxes.\n\n :param set_a: First set of boxes\n :param set_b: Second set of boxes\n :return: Matrix of IoU metrics\n \"\"\"\n\n intersect_ymin = np.maximum(set_a[:, 0].reshape([-1, 1]), set_b[:, 0].reshape([1, -1]))\n intersect_xmin = np.maximum(set_a[:, 1].reshape([-1, 1]), set_b[:, 1].reshape([1, -1]))\n intersect_ymax = np.minimum(set_a[:, 2].reshape([-1, 1]), set_b[:, 2].reshape([1, -1]))\n intersect_xmax = np.minimum(set_a[:, 3].reshape([-1, 1]), set_b[:, 3].reshape([1, -1]))\n\n intersect_heights = np.maximum(0.0, intersect_ymax - intersect_ymin)\n intersect_widths = np.maximum(0.0, intersect_xmax - intersect_xmin)\n\n intersect_areas = intersect_heights * intersect_widths\n areas_set_a = ((set_a[:, 2] - set_a[:, 0]) * (set_a[:, 3] - set_a[:, 1])).reshape([-1, 1])\n areas_set_b = ((set_b[:, 2] - set_b[:, 0]) * (set_b[:, 3] - set_b[:, 1])).reshape([1, -1])\n\n areas_set_a[np.less(areas_set_a, 0.0)] = 0.0\n areas_set_b[np.less(areas_set_b, 0.0)] = 0.0\n\n union_areas = areas_set_a + areas_set_b - intersect_areas\n\n overlaps = intersect_areas / union_areas\n overlaps[np.less_equal(union_areas, 0.0)] = 0.0\n\n return overlaps\n"
] | [
[
"torch.LongTensor",
"torch.Tensor",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.smooth_l1_loss"
],
[
"tensorflow.contrib.rnn.stack_bidirectional_dynamic_rnn",
"tensorflow.transpose",
"tensorflow.contrib.slim.dropout",
"tensorflow.contrib.slim.arg_scope",
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.contrib.slim.max_pool2d",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.reshape",
"tensorflow.contrib.slim.l2_regularizer",
"tensorflow.squeeze",
"tensorflow.contrib.slim.fully_connected",
"tensorflow.contrib.slim.conv2d",
"tensorflow.variable_scope",
"tensorflow.contrib.slim.batch_norm"
],
[
"torch.cat",
"torch.zeros",
"torch.nn.utils.rnn.pad_sequence",
"torch.no_grad",
"torch.device",
"torch.nn.functional.tanh",
"torch.autograd.Variable",
"torch.testing.assert_allclose",
"torch.randn",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.LSTMCell",
"torch.nn.functional.sigmoid",
"torch.nn.functional.relu",
"torch.sort",
"torch.rand",
"torch.LongTensor",
"torch.nn.Conv2d",
"torch.unsqueeze",
"torch.cuda.set_device",
"torch.nn.LSTM",
"torch.IntTensor"
],
[
"tensorflow.constant",
"tensorflow.less",
"matplotlib.use",
"tensorflow.train.get_or_create_global_step",
"tensorflow.train.MomentumOptimizer"
],
[
"torch.nn.ModuleDict",
"torch.no_grad"
],
[
"numpy.reshape",
"tensorflow.placeholder",
"numpy.ones",
"tensorflow.Session",
"tensorflow.train.Saver"
],
[
"tensorflow.train.MomentumOptimizer"
],
[
"torch.nn.Sequential",
"torch.cat",
"torch.randn",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.BatchNorm2d",
"torch.nn.functional.max_pool2d"
],
[
"torch.no_grad"
],
[
"numpy.expand_dims"
],
[
"numpy.less",
"numpy.less_equal",
"numpy.maximum",
"numpy.minimum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CORAL-CMU/kalibr | [
"ebd759286944f156c3ae6202c27fe47667929744"
] | [
"aslam_offline_calibration/kalibr/python/kalibr_camera_calibration/CameraIntializers.py"
] | [
"import sm\nimport aslam_backend as aopt\nimport aslam_cv as cv\nimport numpy as np\n\ndef addPoseDesignVariable(problem, T0=sm.Transformation()):\n q_Dv = aopt.RotationQuaternionDv( T0.q() )\n q_Dv.setActive( True )\n problem.addDesignVariable(q_Dv)\n t_Dv = aopt.EuclideanPointDv( T0.t() )\n t_Dv.setActive( True )\n problem.addDesignVariable(t_Dv)\n return aopt.TransformationBasicDv( q_Dv.toExpression(), t_Dv.toExpression() )\n\ndef stereoCalibrate(camL_geometry, camH_geometry, obslist, distortionActive=False, baseline=None):\n #####################################################\n ## find initial guess as median of all pnp solutions\n #####################################################\n if baseline is None:\n r=[]; t=[]\n for obsL, obsH in obslist:\n #if we have observations for both camss\n if obsL is not None and obsH is not None:\n success, T_L = camL_geometry.geometry.estimateTransformation(obsL)\n success, T_H = camH_geometry.geometry.estimateTransformation(obsH)\n \n baseline = T_H.inverse()*T_L\n t.append(baseline.t())\n rv=sm.RotationVector()\n r.append(rv.rotationMatrixToParameters( baseline.C() ))\n \n r_median = np.median(np.asmatrix(r), axis=0).flatten().T\n R_median = rv.parametersToRotationMatrix(r_median)\n t_median = np.median(np.asmatrix(t), axis=0).flatten().T\n \n baseline_HL = sm.Transformation( sm.rt2Transform(R_median, t_median) )\n else:\n baseline_HL = baseline\n \n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n dL = camL_geometry.geometry.projection().distortion().getParameters().flatten()\n pL = camL_geometry.geometry.projection().getParameters().flatten()\n dH = camH_geometry.geometry.projection().distortion().getParameters().flatten()\n pH = camH_geometry.geometry.projection().getParameters().flatten()\n sm.logDebug(\"initial guess for stereo calib: {0}\".format(baseline_HL.T()))\n sm.logDebug(\"initial guess for intrinsics camL: {0}\".format(pL))\n sm.logDebug(\"initial guess for intrinsics camH: {0}\".format(pH))\n sm.logDebug(\"initial guess for distortion camL: {0}\".format(dL))\n sm.logDebug(\"initial guess for distortion camH: {0}\".format(dH)) \n \n ############################################\n ## solve the bundle adjustment\n ############################################\n problem = aopt.OptimizationProblem()\n\n #baseline design variable \n baseline_dv = addPoseDesignVariable(problem, baseline_HL)\n \n #target pose dv for all target views (=T_camL_w)\n target_pose_dvs = list()\n for obsL, obsH in obslist:\n if obsL is not None: #use camL if we have an obs for this one\n success, T_t_cL = camL_geometry.geometry.estimateTransformation(obsL)\n else:\n success, T_t_cH = camH_geometry.geometry.estimateTransformation(obsH)\n T_t_cL = T_t_cH*baseline_HL #apply baseline for the second camera\n \n target_pose_dv = addPoseDesignVariable(problem, T_t_cL)\n target_pose_dvs.append(target_pose_dv)\n \n #add camera dvs\n camL_geometry.setDvActiveStatus(camL_geometry.projectionActive, distortionActive or camL_geometry.distortionActive, False)\n camH_geometry.setDvActiveStatus(camH_geometry.projectionActive, distortionActive or camH_geometry.distortionActive, False)\n problem.addDesignVariable(camL_geometry.dv.distortionDesignVariable())\n problem.addDesignVariable(camL_geometry.dv.projectionDesignVariable())\n problem.addDesignVariable(camL_geometry.dv.shutterDesignVariable())\n problem.addDesignVariable(camH_geometry.dv.distortionDesignVariable())\n problem.addDesignVariable(camH_geometry.dv.projectionDesignVariable())\n problem.addDesignVariable(camH_geometry.dv.shutterDesignVariable())\n \n ############################################\n ## add error terms\n ############################################\n \n #corner uncertainty\n # \\todo pass in the detector uncertainty somehow.\n cornerUncertainty = 1.0\n R = np.eye(2) * cornerUncertainty * cornerUncertainty\n invR = np.linalg.inv(R)\n \n #Add reprojection error terms for both cameras\n reprojectionErrors0 = []; reprojectionErrors1 = []\n \n for cidx, cam in enumerate([camL_geometry, camH_geometry]):\n sm.logDebug(\"stereoCalibration: adding camera error terms for {0} calibration targets\".format(len(obslist)))\n\n #get the image and target points corresponding to the frame\n target = cam.ctarget.detector.target()\n \n #add error terms for all observations\n for view_id, obstuple in enumerate(obslist):\n \n #add error terms if we have an observation for this cam\n obs=obstuple[cidx]\n if obs is not None:\n T_cam_w = target_pose_dvs[view_id].toExpression().inverse()\n \n #add the baseline for the second camera\n if cidx!=0:\n T_cam_w = baseline_dv.toExpression() * T_cam_w\n \n for i in range(0, target.size()):\n p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));\n valid, y = obs.imagePoint(i)\n if valid:\n # Create an error term.\n rerr = cam.model.reprojectionError(y, invR, T_cam_w * p_target, cam.dv)\n rerr.idx = i\n problem.addErrorTerm(rerr)\n \n if cidx==0:\n reprojectionErrors0.append(rerr)\n else:\n reprojectionErrors1.append(rerr)\n \n sm.logDebug(\"stereoCalibrate: added {0} camera error terms\".format( len(reprojectionErrors0)+len(reprojectionErrors1) ))\n \n ############################################\n ## solve\n ############################################ \n options = aopt.Optimizer2Options()\n options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False\n options.nThreads = 4\n options.convergenceDeltaX = 1e-3\n options.convergenceDeltaJ = 1\n options.maxIterations = 200\n options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)\n\n optimizer = aopt.Optimizer2(options)\n optimizer.setProblem(problem)\n\n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n sm.logDebug(\"Before optimization:\")\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors0 ])\n sm.logDebug( \" Reprojection error squarred (camL): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors1 ])\n sm.logDebug( \" Reprojection error squarred (camH): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n \n sm.logDebug(\"baseline={0}\".format(baseline_dv.toTransformationMatrix()))\n \n try: \n retval = optimizer.optimize()\n if retval.linearSolverFailure:\n sm.logError(\"stereoCalibrate: Optimization failed!\")\n success = not retval.linearSolverFailure\n except:\n sm.logError(\"stereoCalibrate: Optimization failed!\")\n success = False\n \n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n sm.logDebug(\"After optimization:\")\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors0 ])\n sm.logDebug( \" Reprojection error squarred (camL): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors1 ])\n sm.logDebug( \" Reprojection error squarred (camH): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n \n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n dL = camL_geometry.geometry.projection().distortion().getParameters().flatten()\n pL = camL_geometry.geometry.projection().getParameters().flatten()\n dH = camH_geometry.geometry.projection().distortion().getParameters().flatten()\n pH = camH_geometry.geometry.projection().getParameters().flatten()\n sm.logDebug(\"guess for intrinsics camL: {0}\".format(pL))\n sm.logDebug(\"guess for intrinsics camH: {0}\".format(pH))\n sm.logDebug(\"guess for distortion camL: {0}\".format(dL))\n sm.logDebug(\"guess for distortion camH: {0}\".format(dH)) \n \n if success:\n baseline_HL = sm.Transformation(baseline_dv.toTransformationMatrix())\n return success, baseline_HL\n else:\n #return the intiial guess if we fail\n return success, baseline_HL\n\n\ndef calibrateIntrinsics(cam_geometry, obslist, distortionActive=True, intrinsicsActive=True):\n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n d = cam_geometry.geometry.projection().distortion().getParameters().flatten()\n p = cam_geometry.geometry.projection().getParameters().flatten()\n sm.logDebug(\"calibrateIntrinsics: intrinsics guess: {0}\".format(p))\n sm.logDebug(\"calibrateIntrinsics: distortion guess: {0}\".format(d))\n \n ############################################\n ## solve the bundle adjustment\n ############################################\n problem = aopt.OptimizationProblem()\n \n #add camera dvs\n cam_geometry.setDvActiveStatus(intrinsicsActive, distortionActive, False)\n problem.addDesignVariable(cam_geometry.dv.distortionDesignVariable())\n problem.addDesignVariable(cam_geometry.dv.projectionDesignVariable())\n problem.addDesignVariable(cam_geometry.dv.shutterDesignVariable())\n \n #corner uncertainty\n cornerUncertainty = 1.0\n R = np.eye(2) * cornerUncertainty * cornerUncertainty\n invR = np.linalg.inv(R)\n \n #get the image and target points corresponding to the frame\n target = cam_geometry.ctarget.detector.target()\n \n #target pose dv for all target views (=T_camL_w)\n reprojectionErrors = []; \n sm.logDebug(\"calibrateIntrinsics: adding camera error terms for {0} calibration targets\".format(len(obslist)))\n target_pose_dvs=list()\n for obs in obslist: \n success, T_t_c = cam_geometry.geometry.estimateTransformation(obs)\n target_pose_dv = addPoseDesignVariable(problem, T_t_c)\n target_pose_dvs.append(target_pose_dv)\n \n T_cam_w = target_pose_dv.toExpression().inverse()\n \n ## add error terms\n for i in range(0, target.size()):\n p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));\n valid, y = obs.imagePoint(i)\n if valid:\n rerr = cam_geometry.model.reprojectionError(y, invR, T_cam_w * p_target, cam_geometry.dv)\n problem.addErrorTerm(rerr)\n reprojectionErrors.append(rerr)\n \n sm.logDebug(\"calibrateIntrinsics: added {0} camera error terms\".format(len(reprojectionErrors)))\n \n ############################################\n ## solve\n ############################################ \n options = aopt.Optimizer2Options()\n options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False\n options.nThreads = 4\n options.convergenceDeltaX = 1e-3\n options.convergenceDeltaJ = 1\n options.maxIterations = 200\n options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)\n\n optimizer = aopt.Optimizer2(options)\n optimizer.setProblem(problem)\n\n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n sm.logDebug(\"Before optimization:\")\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors ])\n sm.logDebug( \" Reprojection error squarred (camL): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n \n #run intrinsic calibration\n try: \n retval = optimizer.optimize()\n if retval.linearSolverFailure:\n sm.logError(\"calibrateIntrinsics: Optimization failed!\")\n success = not retval.linearSolverFailure\n\n except:\n sm.logError(\"calibrateIntrinsics: Optimization failed!\")\n success = False\n \n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n d = cam_geometry.geometry.projection().distortion().getParameters().flatten()\n p = cam_geometry.geometry.projection().getParameters().flatten()\n sm.logDebug(\"calibrateIntrinsics: guess for intrinsics cam: {0}\".format(p))\n sm.logDebug(\"calibrateIntrinsics: guess for distortion cam: {0}\".format(d))\n \n return success\n\n\ndef solveFullBatch(cameras, baseline_guesses, graph): \n ############################################\n ## solve the bundle adjustment\n ############################################\n problem = aopt.OptimizationProblem()\n \n #add camera dvs\n for cam in cameras:\n cam.setDvActiveStatus(cam.projectionActive, cam.distortionActive, False)\n problem.addDesignVariable(cam.dv.distortionDesignVariable())\n problem.addDesignVariable(cam.dv.projectionDesignVariable())\n problem.addDesignVariable(cam.dv.shutterDesignVariable())\n \n baseline_dvs = list()\n for baseline_idx in range(0, len(cameras)-1): \n baseline_dv = aopt.TransformationDv(baseline_guesses[baseline_idx])\n \n for i in range(0, baseline_dv.numDesignVariables()):\n problem.addDesignVariable(baseline_dv.getDesignVariable(i))\n \n baseline_dvs.append( baseline_dv )\n \n #corner uncertainty\n cornerUncertainty = 1.0\n R = np.eye(2) * cornerUncertainty * cornerUncertainty\n invR = np.linalg.inv(R)\n \n #get the target\n target = cameras[0].ctarget.detector.target()\n\n #Add calibration target reprojection error terms for all camera in chain\n target_pose_dvs = list()\n \n #shuffle the views\n reprojectionErrors = []; \n timestamps = graph.obs_db.getAllViewTimestamps()\n for view_id, timestamp in enumerate(timestamps):\n \n #get all observations for all cams at this time\n obs_tuple = graph.obs_db.getAllObsAtTimestamp(timestamp)\n\n #create a target pose dv for all target views (= T_cam0_w)\n T0 = graph.getTargetPoseGuess(timestamp, cameras, baseline_guesses)\n target_pose_dv = addPoseDesignVariable(problem, T0)\n target_pose_dvs.append(target_pose_dv)\n \n\n for cidx, obs in obs_tuple:\n cam = cameras[cidx]\n \n #calibration target coords to camera X coords\n T_cam0_calib = target_pose_dv.toExpression().inverse()\n\n #build pose chain (target->cam0->baselines->camN)\n T_camN_calib = T_cam0_calib\n for idx in range(0, cidx):\n T_camN_calib = baseline_dvs[idx].toExpression() * T_camN_calib\n \n \n ## add error terms\n for i in range(0, target.size()):\n p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));\n valid, y = obs.imagePoint(i)\n if valid:\n rerr = cameras[cidx].model.reprojectionError(y, invR, T_camN_calib * p_target, cameras[cidx].dv)\n problem.addErrorTerm(rerr)\n reprojectionErrors.append(rerr)\n \n sm.logDebug(\"solveFullBatch: added {0} camera error terms\".format(len(reprojectionErrors)))\n \n ############################################\n ## solve\n ############################################ \n options = aopt.Optimizer2Options()\n options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False\n options.nThreads = 4\n options.convergenceDeltaX = 1e-3\n options.convergenceDeltaJ = 1\n options.maxIterations = 250\n options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)\n\n optimizer = aopt.Optimizer2(options)\n optimizer.setProblem(problem)\n\n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n sm.logDebug(\"Before optimization:\")\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors ])\n sm.logDebug( \" Reprojection error squarred (camL): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n \n #run intrinsic calibration\n try:\n retval = optimizer.optimize()\n if retval.linearSolverFailure:\n sm.logError(\"calibrateIntrinsics: Optimization failed!\")\n success = not retval.linearSolverFailure\n\n except:\n sm.logError(\"calibrateIntrinsics: Optimization failed!\")\n success = False\n\n baselines=list()\n for baseline_dv in baseline_dvs:\n baselines.append( sm.Transformation(baseline_dv.T()) )\n \n return success, baselines\n\n"
] | [
[
"numpy.linalg.inv",
"numpy.eye",
"numpy.median",
"numpy.asmatrix",
"numpy.std",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ymaxgit/mxnet | [
"01ae629c6593e0352fd30979bccd0196854ef882"
] | [
"tests/python/unittest/test_gluon_rnn.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport mxnet as mx\nfrom mxnet import gluon\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport unittest\nfrom mxnet.test_utils import almost_equal\n\n\ndef test_rnn():\n cell = gluon.rnn.RNNCell(100, prefix='rnn_')\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']\n assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 100), (10, 100), (10, 100)]\n\n\ndef test_lstm():\n cell = gluon.rnn.LSTMCell(100, prefix='rnn_')\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']\n assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 100), (10, 100), (10, 100)]\n\n\ndef test_lstm_forget_bias():\n forget_bias = 2.0\n stack = gluon.rnn.SequentialRNNCell()\n stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l0_'))\n stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l1_'))\n\n dshape = (32, 1, 200)\n data = mx.sym.Variable('data')\n\n sym, _ = stack.unroll(1, data, merge_outputs=True)\n mod = mx.mod.Module(sym, label_names=None, context=mx.cpu(0))\n mod.bind(data_shapes=[('data', dshape)], label_shapes=None)\n\n mod.init_params()\n\n bias_argument = next(x for x in sym.list_arguments() if x.endswith('i2h_bias'))\n expected_bias = np.hstack([np.zeros((100,)),\n forget_bias * np.ones(100, ), np.zeros((2 * 100,))])\n assert_allclose(mod.get_params()[0][bias_argument].asnumpy(), expected_bias)\n\n\ndef test_gru():\n cell = gluon.rnn.GRUCell(100, prefix='rnn_')\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']\n assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 100), (10, 100), (10, 100)]\n\n\ndef test_residual():\n cell = gluon.rnn.ResidualCell(gluon.rnn.GRUCell(50, prefix='rnn_'))\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]\n outputs, _ = cell.unroll(2, inputs)\n outputs = mx.sym.Group(outputs)\n assert sorted(cell.collect_params().keys()) == \\\n ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']\n # assert outputs.list_outputs() == \\\n # ['rnn_t0_out_plus_residual_output', 'rnn_t1_out_plus_residual_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))\n assert outs == [(10, 50), (10, 50)]\n outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50)),\n rnn_t1_data=mx.nd.ones((10, 50)),\n rnn_i2h_weight=mx.nd.zeros((150, 50)),\n rnn_i2h_bias=mx.nd.zeros((150,)),\n rnn_h2h_weight=mx.nd.zeros((150, 50)),\n rnn_h2h_bias=mx.nd.zeros((150,)))\n expected_outputs = np.ones((10, 50))\n assert np.array_equal(outputs[0].asnumpy(), expected_outputs)\n assert np.array_equal(outputs[1].asnumpy(), expected_outputs)\n\n\ndef test_residual_bidirectional():\n cell = gluon.rnn.ResidualCell(\n gluon.rnn.BidirectionalCell(\n gluon.rnn.GRUCell(25, prefix='rnn_l_'),\n gluon.rnn.GRUCell(25, prefix='rnn_r_')))\n\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]\n outputs, _ = cell.unroll(2, inputs, merge_outputs=False)\n outputs = mx.sym.Group(outputs)\n assert sorted(cell.collect_params().keys()) == \\\n ['rnn_l_h2h_bias', 'rnn_l_h2h_weight', 'rnn_l_i2h_bias', 'rnn_l_i2h_weight',\n 'rnn_r_h2h_bias', 'rnn_r_h2h_weight', 'rnn_r_i2h_bias', 'rnn_r_i2h_weight']\n # assert outputs.list_outputs() == \\\n # ['bi_t0_plus_residual_output', 'bi_t1_plus_residual_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))\n assert outs == [(10, 50), (10, 50)]\n outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50))+5,\n rnn_t1_data=mx.nd.ones((10, 50))+5,\n rnn_l_i2h_weight=mx.nd.zeros((75, 50)),\n rnn_l_i2h_bias=mx.nd.zeros((75,)),\n rnn_l_h2h_weight=mx.nd.zeros((75, 25)),\n rnn_l_h2h_bias=mx.nd.zeros((75,)),\n rnn_r_i2h_weight=mx.nd.zeros((75, 50)),\n rnn_r_i2h_bias=mx.nd.zeros((75,)),\n rnn_r_h2h_weight=mx.nd.zeros((75, 25)),\n rnn_r_h2h_bias=mx.nd.zeros((75,)))\n expected_outputs = np.ones((10, 50))+5\n assert np.array_equal(outputs[0].asnumpy(), expected_outputs)\n assert np.array_equal(outputs[1].asnumpy(), expected_outputs)\n\n\ndef test_stack():\n cell = gluon.rnn.SequentialRNNCell()\n for i in range(5):\n if i == 1:\n cell.add(gluon.rnn.ResidualCell(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_' % i)))\n else:\n cell.add(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_'%i))\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n keys = sorted(cell.collect_params().keys())\n for i in range(5):\n assert 'rnn_stack%d_h2h_weight'%i in keys\n assert 'rnn_stack%d_h2h_bias'%i in keys\n assert 'rnn_stack%d_i2h_weight'%i in keys\n assert 'rnn_stack%d_i2h_bias'%i in keys\n assert outputs.list_outputs() == ['rnn_stack4_t0_out_output', 'rnn_stack4_t1_out_output', 'rnn_stack4_t2_out_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 100), (10, 100), (10, 100)]\n\n\ndef test_bidirectional():\n cell = gluon.rnn.BidirectionalCell(\n gluon.rnn.LSTMCell(100, prefix='rnn_l0_'),\n gluon.rnn.LSTMCell(100, prefix='rnn_r0_'),\n output_prefix='rnn_bi_')\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n assert outputs.list_outputs() == ['rnn_bi_t0_output', 'rnn_bi_t1_output', 'rnn_bi_t2_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 200), (10, 200), (10, 200)]\n\n\ndef test_zoneout():\n cell = gluon.rnn.ZoneoutCell(gluon.rnn.RNNCell(100, prefix='rnn_'), zoneout_outputs=0.5,\n zoneout_states=0.5)\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 100), (10, 100), (10, 100)]\n\n\ndef check_rnn_forward(layer, inputs, deterministic=True):\n inputs.attach_grad()\n layer.collect_params().initialize()\n with mx.autograd.record():\n out = layer.unroll(3, inputs, merge_outputs=False)[0]\n mx.autograd.backward(out)\n out = layer.unroll(3, inputs, merge_outputs=True)[0]\n out.backward()\n\n np_out = out.asnumpy()\n np_dx = inputs.grad.asnumpy()\n\n layer.hybridize()\n\n with mx.autograd.record():\n out = layer.unroll(3, inputs, merge_outputs=False)[0]\n mx.autograd.backward(out)\n out = layer.unroll(3, inputs, merge_outputs=True)[0]\n out.backward()\n\n if deterministic:\n mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-3, atol=1e-5)\n mx.test_utils.assert_almost_equal(np_dx, inputs.grad.asnumpy(), rtol=1e-3, atol=1e-5)\n\n\ndef test_rnn_cells():\n check_rnn_forward(gluon.rnn.LSTMCell(100, input_size=200), mx.nd.ones((8, 3, 200)))\n check_rnn_forward(gluon.rnn.RNNCell(100, input_size=200), mx.nd.ones((8, 3, 200)))\n check_rnn_forward(gluon.rnn.GRUCell(100, input_size=200), mx.nd.ones((8, 3, 200)))\n\n bilayer = gluon.rnn.BidirectionalCell(gluon.rnn.LSTMCell(100, input_size=200),\n gluon.rnn.LSTMCell(100, input_size=200))\n check_rnn_forward(bilayer, mx.nd.ones((8, 3, 200)))\n\n check_rnn_forward(gluon.rnn.DropoutCell(0.5), mx.nd.ones((8, 3, 200)), False)\n\n check_rnn_forward(gluon.rnn.ZoneoutCell(gluon.rnn.LSTMCell(100, input_size=200),\n 0.5, 0.2),\n mx.nd.ones((8, 3, 200)), False)\n\n net = gluon.rnn.SequentialRNNCell()\n net.add(gluon.rnn.LSTMCell(100, input_size=200))\n net.add(gluon.rnn.RNNCell(100, input_size=100))\n net.add(gluon.rnn.GRUCell(100, input_size=100))\n check_rnn_forward(net, mx.nd.ones((8, 3, 200)))\n\ndef check_rnn_layer_forward(layer, inputs, states=None):\n layer.collect_params().initialize()\n inputs.attach_grad()\n with mx.autograd.record():\n out = layer(inputs, states)\n if states is not None:\n assert isinstance(out, tuple) and len(out) == 2\n out = out[0]\n else:\n assert isinstance(out, mx.nd.NDArray)\n out.backward()\n\n np_out = out.asnumpy()\n np_dx = inputs.grad.asnumpy()\n\n layer.hybridize()\n\n with mx.autograd.record():\n out = layer(inputs, states)\n if states is not None:\n assert isinstance(out, tuple) and len(out) == 2\n out = out[0]\n else:\n assert isinstance(out, mx.nd.NDArray)\n out.backward()\n\n mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-3, atol=1e-5)\n mx.test_utils.assert_almost_equal(np_dx, inputs.grad.asnumpy(), rtol=1e-3, atol=1e-5)\n\ndef test_rnn_layers():\n check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)))\n check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))\n check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)))\n check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)), [mx.nd.ones((2, 3, 10)), mx.nd.ones((2, 3, 10))])\n check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)))\n check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))\n\n net = gluon.nn.Sequential()\n net.add(gluon.rnn.LSTM(10, 2, bidirectional=True))\n net.add(gluon.nn.BatchNorm(axis=2))\n net.add(gluon.nn.Flatten())\n net.add(gluon.nn.Dense(3, activation='relu'))\n net.collect_params().initialize()\n with mx.autograd.record():\n net(mx.nd.ones((2, 3, 10))).backward()\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n"
] | [
[
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lamsoa729/FoXlink | [
"3c061b02968cdab1def752d5c145a6df4615504b"
] | [
"foxlink/me_zrl_bound_evolvers.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"@package docstring\nFile: me_zrl_bound_evolvers.py\nAuthor: Adam Lamson\nEmail: [email protected]\nDescription:\n\"\"\"\n\nimport numpy as np\n# from scipy.integrate import dblquad\nfrom .me_helpers import dr_dt, convert_sol_to_geom\nfrom .me_zrl_odes import (rod_geom_derivs_zrl, calc_moment_derivs_zrl,\n calc_moment_derivs_zrl_B_terms,\n calc_boundary_derivs_zrl)\nfrom .me_zrl_helpers import (avg_force_zrl,\n prep_zrl_bound_evolver,\n get_zrl_moments_and_boundary_terms)\nfrom .rod_steric_forces import calc_wca_force_torque\nfrom .me_zrl_evolvers import prep_zrl_evolver\n\n\ndef evolver_zrl_bound(sol, fric_coeff, params):\n \"\"\"!Calculate all time derivatives necessary to solve the moment expansion\n evolution of the Fokker-Planck equation of zero rest length (zrl) crosslinkers\nbound to moving rods. d<var> is the time derivative of corresponding\nvariable\n\n @param sol: Solution vector to solve_ivp\n @param fric_coeff: friction coefficients of rod\n @param params: Constant parameters of the simulation\n @return: Time-derivatives of all time varying quantities in a flattened\n array\n \"\"\"\n # Define useful parameters for functions\n hL_i, hL_j = (.5 * params['L_i'], .5 * params['L_j'])\n ks = params['ks']\n r_i, r_j, u_i, u_j = convert_sol_to_geom(sol)\n r_ij = r_j - r_i\n\n (scalar_geom, q_arr, Q_arr) = prep_zrl_bound_evolver(sol, params)\n (mu_kl, B_terms) = get_zrl_moments_and_boundary_terms(sol)\n if mu_kl[0] < 0.:\n mu_kl[0] = 0.\n if mu_kl[4] < 0.:\n mu_kl[4] = 0.\n if mu_kl[5] < 0.:\n mu_kl[5] = 0.\n\n # Get average force of crosslinkers on rod2\n f_ij = avg_force_zrl(r_ij, u_i, u_j, mu_kl[0], mu_kl[1], mu_kl[2], ks)\n # Evolution of rod positions\n dgeom = rod_geom_derivs_zrl(f_ij, r_ij, u_i, u_j, scalar_geom,\n mu_kl, fric_coeff, ks)\n\n # Evolution of moments\n dmu_kl = calc_moment_derivs_zrl_B_terms(mu_kl, scalar_geom,\n q_arr, B_terms, params)\n\n # Evolution of boundary condtions\n dB_terms = calc_boundary_derivs_zrl(B_terms, scalar_geom, Q_arr, params)\n dsol = np.concatenate(dgeom, dmu_kl, dB_terms)\n return dsol\n\n##########################################\n"
] | [
[
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shuishoudage/music_generator | [
"7c17ef5bb3a5d872bff5ac8e1664f57f5b4ea08f"
] | [
"data_clean/preprocessing.py"
] | [
"from typing import List, Tuple, Dict, Any\nfrom collections import Counter\nimport pretty_midi\nimport matplotlib.pyplot as plt\nimport librosa.display\nimport os\nfrom os import listdir, walk\nfrom os.path import isfile, isdir, join\nfrom sys import argv\nimport traceback\nimport logging\nimport numpy as np\nfrom shutil import copyfile\nimport shutil\n\n\n# Ideas behind the preprocessing class\n#\n# 1. only use those midi with one tempo and one key, since some midi music\n# have key and tempo changes inside. Which might make some unpredictable result\n#\n# 2. list distribution for all keys contained in the corpus. Only select those\n# most frequent appeared. (different keys may increase training difficulty)\n#\n# 3. only select similar tempo music, based on the mean and std of tempos,\n# simple one will be left boundary = mean - std, right boundary = mean + std\n#\n# 4. find the mean of highest and lowest pitch in the corpus. filter out those not\n# the range. We have pitch range from 0-128, no meaning cover two extreme sides.\nclass FileReport(object):\n \"\"\"\n This class is mainly for generating meta information for our report\n \"\"\"\n\n def __init__(self,\n tempos: List[float],\n freq_key: Dict[int, int],\n min_pitch: List[int],\n max_pitch: List[int]):\n self.tempos = tempos\n self.freq_key = freq_key\n self.min_pitch = min_pitch\n self.max_pitch = max_pitch\n\n def aggregation_report(self):\n \"\"\"\n two important variable are min_pitch and max_pitch,\n since they will be used to decode from pitch to audio\n \"\"\"\n temp_mean = np.array(self.tempos).mean()\n temp_std = np.array(self.tempos).std()\n most_freq_key = self.getMostFreqValue(self.freq_key)\n min_pitch = int(np.array(self.min_pitch).mean())\n max_pitch = int(np.array(self.max_pitch).mean())\n return temp_mean, temp_std, most_freq_key, min_pitch, max_pitch\n\n def plots(self):\n # implement later on\n pass\n\n def getMostFreqValue(self, keys: Dict[int, int], reversed=True) -> int:\n return sorted(keys.items(), key=lambda kv: kv[1], reverse=reversed)[0][0]\n\n\nclass Preprocess(object):\n def __init__(self, path: str):\n self.path = path\n self.fileFilter()\n\n def generateMidiFileReport(self) -> FileReport:\n \"\"\"\n meta information like tempos, keys, pitches will be generated for\n filtering the midi files\n \"\"\"\n tempos = []\n keys = []\n max_pitchs = []\n min_pitchs = []\n for pm in self.pms:\n try:\n tempos.append(pm.estimate_tempo())\n key = pm.key_signature_changes[0].key_number\n keys.append(key)\n min_pitch, max_pitch = self.getMinMaxPitch(pm)\n max_pitchs.append(max_pitch)\n min_pitchs.append(min_pitch)\n except:\n pass\n self.report = FileReport(tempos, dict(\n Counter(keys)), min_pitchs, max_pitchs)\n return self.report\n\n def getMinMaxPitch(self, pm: pretty_midi.PrettyMIDI):\n \"\"\"\n find the min and max pitch inside a midi file\n \"\"\"\n notes = [\n note.pitch for instrument in pm.instruments for note in instrument.notes\n ]\n return min(notes), max(notes)\n\n def SaveFilterMIDIfiles(self):\n \"\"\"\n according generated meta data info to filter out those not in range\n \"\"\"\n report = self.generateMidiFileReport()\n temp_mean, temp_std, key, left_boundary, right_boundary = report.aggregation_report()\n piano_roll_paths = []\n for pm, path in zip(self.pms, self.paths):\n try:\n tempo = pm.estimate_tempo()\n min_pitch, max_pitch = self.getMinMaxPitch(pm)\n if self.isTempoInRange(tempo, temp_mean, temp_std) \\\n and self.isPitchInRange(min_pitch, max_pitch, left_boundary, right_boundary) \\\n and self.isKeyMatch(pm.key_signature_changes[0].key_number, key):\n savedPath = os.path.join(os.getcwd(), 'filterData')\n if not os.path.exists(savedPath):\n os.makedirs(savedPath, exist_ok=True)\n shutil.move(\n path, os.path.join(os.getcwd(), 'filterData', os.path.basename(path)))\n except:\n pass\n\n def isTempoInRange(self, tempo: float, mean: float, std: float) -> bool:\n \"\"\"\n a helper function that can be used check if a midi file's tempo in range\n \"\"\"\n if tempo > (mean - std) and tempo < (mean + std):\n return True\n return False\n\n def isKeyMatch(self, key: int, grand_truth_key: int) -> bool:\n if key == grand_truth_key:\n return True\n return False\n\n def isPitchInRange(self, low_pitch: int,\n high_pitch: int,\n left_boundary: int,\n right_boundary: int) -> bool:\n if low_pitch >= left_boundary and high_pitch <= right_boundary:\n return True\n return False\n\n def fileFilter(self):\n \"\"\"\n first filtering that only allow one tempo and one key inside a midi file\n \"\"\"\n self.pms: List[pretty_midi.PrettyMIDI] = []\n self.paths: List[str] = []\n for (dirPath, _, files) in walk(self.path): # type: ignore\n for file in files:\n # get the absoluted path of file\n path = join(dirPath, file)\n try:\n pm = pretty_midi.PrettyMIDI(path)\n # only handle files contain one key and one tempo\n if len(pm.key_signature_changes) == 1 \\\n and len(pm.time_signature_changes) == 1:\n self.pms.append(pm)\n self.paths.append(path)\n except: # skip all parsing exceptions\n pass\n\n\ndef cliArgParser(argv) -> Any:\n if len(argv) != 2:\n raise ValueError(f\"path of folder must be provided\")\n if isdir(argv[1]):\n path = os.path.abspath(argv[1])\n return path\n else:\n raise ValueError(f\"provided path is not a folder\")\n\n\nif __name__ == \"__main__\":\n try:\n path = cliArgParser(argv)\n p = Preprocess(path)\n p.SaveFilterMIDIfiles()\n except Exception as err:\n print(traceback.format_exc())\n exit(1)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kushalj001/transformers | [
"0538820737bd8fb9ba1eb3a772412c6bbe2433ab"
] | [
"src/transformers/modeling_t5.py"
] | [
"# coding=utf-8\n# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch T5 model. \"\"\"\n\n\nimport copy\nimport math\nimport os\nimport warnings\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom .configuration_t5 import T5Config\nfrom .file_utils import (\n DUMMY_INPUTS,\n DUMMY_MASK,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom .modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput\nfrom .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer\nfrom .utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"T5Config\"\n_TOKENIZER_FOR_DOC = \"T5Tokenizer\"\n\n####################################################\n# This dict contains shortcut names and associated url\n# for the pretrained weights provided with the models\n####################################################\nT5_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"t5-small\",\n \"t5-base\",\n \"t5-large\",\n \"t5-3b\",\n \"t5-11b\",\n # See all T5 models at https://huggingface.co/models?filter=t5\n]\n\n\n####################################################\n# This is a conversion method from TF 1.0 to PyTorch\n# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28\n####################################################\ndef load_tf_weights_in_t5(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n tf_weights = {}\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n tf_weights[name] = array\n\n for txt_name in names:\n name = txt_name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n if \"_slot_\" in name[-1]:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n pointer = model\n array = tf_weights[txt_name]\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n # elif scope_names[0] == 'scale':\n # pointer = getattr(pointer, 'weight')\n # elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':\n # pointer = getattr(pointer, 'bias')\n # elif scope_names[0] == 'squad':\n # pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if scope_names[0] not in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n if scope_names[0] != \"embedding\":\n logger.info(\"Transposing numpy weight of shape {} for {}\".format(array.shape, name))\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array.astype(np.float32))\n tf_weights.pop(txt_name, None)\n\n logger.info(\"Weights not copied to PyTorch model: {}\".format(\", \".join(tf_weights.keys())))\n # logger.info(\"Weights not copied to PyTorch model: {}\".format(', '.join(tf_weights.keys())))\n return model\n\n\n####################################################\n# PyTorch Models are constructed by sub-classing\n# - torch.nn.Module for the layers and\n# - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module)\n####################################################\n\n\nclass T5LayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-6):\n \"\"\"\n Construct a layernorm module in the T5 style No bias and no subtraction of mean.\n \"\"\"\n super().__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n # layer norm should always be calculated in float32\n variance = x.to(torch.float32).pow(2).mean(-1, keepdim=True)\n x = x / torch.sqrt(variance + self.variance_epsilon)\n\n if self.weight.dtype == torch.float16:\n x = x.to(torch.float16)\n return self.weight * x\n\n\nclass T5DenseReluDense(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)\n self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(self, hidden_states):\n h = self.wi(hidden_states)\n h = F.relu(h)\n h = self.dropout(h)\n h = self.wo(h)\n return h\n\n\nclass T5LayerFF(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.DenseReluDense = T5DenseReluDense(config)\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(self, hidden_states):\n norm_x = self.layer_norm(hidden_states)\n y = self.DenseReluDense(norm_x)\n layer_output = hidden_states + self.dropout(y)\n return layer_output\n\n\nclass T5Attention(nn.Module):\n def __init__(self, config: T5Config, has_relative_attention_bias=False, is_bidirectional=False):\n super().__init__()\n self.is_bidirectional = is_bidirectional\n self.is_decoder = config.is_decoder\n self.has_relative_attention_bias = has_relative_attention_bias\n\n self.relative_attention_num_buckets = config.relative_attention_num_buckets\n self.d_model = config.d_model\n self.d_kv = config.d_kv\n self.n_heads = config.num_heads\n self.dropout = config.dropout_rate\n self.inner_dim = self.n_heads * self.d_kv\n\n # Mesh TensorFlow initialization to avoid scaling before softmax\n self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)\n\n if self.has_relative_attention_bias:\n self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.d_kv, self.pruned_heads)\n # Prune linear layers\n self.q = prune_linear_layer(self.q, index)\n self.k = prune_linear_layer(self.k, index)\n self.v = prune_linear_layer(self.v, index)\n self.o = prune_linear_layer(self.o, index, dim=1)\n # Update hyper params\n self.n_heads = self.n_heads - len(heads)\n self.inner_dim = self.d_kv * self.n_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n @staticmethod\n def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):\n \"\"\"\n Adapted from Mesh Tensorflow:\n https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593\n\n Translate relative position to a bucket number for relative attention. The relative position is defined as\n memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to\n position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for\n small absolute relative_position and larger buckets for larger absolute relative_positions. All relative\n positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.\n This should allow for more graceful generalization to longer sequences than the model has been trained on\n\n Args:\n relative_position: an int32 Tensor\n bidirectional: a boolean - whether the attention is bidirectional\n num_buckets: an integer\n max_distance: an integer\n\n Returns:\n a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)\n \"\"\"\n ret = 0\n n = -relative_position\n if bidirectional:\n num_buckets //= 2\n ret += (n < 0).to(torch.long) * num_buckets # mtf.to_int32(mtf.less(n, 0)) * num_buckets\n n = torch.abs(n)\n else:\n n = torch.max(n, torch.zeros_like(n))\n # now n is in the range [0, inf)\n\n # half of the buckets are for exact increments in positions\n max_exact = num_buckets // 2\n is_small = n < max_exact\n\n # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance\n val_if_large = max_exact + (\n torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)\n ).to(torch.long)\n val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))\n\n ret += torch.where(is_small, n, val_if_large)\n return ret\n\n def compute_bias(self, qlen, klen):\n \"\"\" Compute binned relative position bias \"\"\"\n context_position = torch.arange(qlen, dtype=torch.long)[:, None]\n memory_position = torch.arange(klen, dtype=torch.long)[None, :]\n relative_position = memory_position - context_position # shape (qlen, klen)\n rp_bucket = self._relative_position_bucket(\n relative_position, # shape (qlen, klen)\n bidirectional=self.is_bidirectional,\n num_buckets=self.relative_attention_num_buckets,\n )\n rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)\n values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads)\n values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen)\n return values\n\n def forward(\n self,\n input,\n mask=None,\n kv=None,\n position_bias=None,\n past_key_value=None,\n head_mask=None,\n query_length=None,\n use_cache=False,\n output_attentions=False,\n ):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n # past_key_value[0] is (bs, n_heads, q_len - 1, dim_per_head)\n bs, qlen, dim = input.size()\n\n if past_key_value is not None:\n assert self.is_decoder is True, \"Encoder cannot cache past key value states\"\n assert (\n len(past_key_value) == 2\n ), \"past_key_value should have 2 past states: keys and values. Got {} past states\".format(\n len(past_key_value)\n )\n real_qlen = qlen + past_key_value[0].shape[2] if query_length is None else query_length\n else:\n real_qlen = qlen\n\n if kv is None:\n klen = real_qlen\n else:\n klen = kv.size(1)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2)\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim)\n\n q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head)\n\n if kv is None:\n k = shape(self.k(input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v(input)) # (bs, n_heads, qlen, dim_per_head)\n elif past_key_value is None:\n k = v = kv\n k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if past_key_value is not None:\n if kv is None:\n k_, v_ = past_key_value\n k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)\n v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = past_key_value\n\n if self.is_decoder and use_cache is True:\n present_key_value_state = ((k, v),)\n else:\n present_key_value_state = (None,)\n\n # (bs, n_heads, qlen, klen)\n scores = torch.matmul(\n q, k.transpose(3, 2)\n ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", q, k), compatible with onnx op>9\n\n if position_bias is None:\n if not self.has_relative_attention_bias:\n raise ValueError(\"No position_bias provided and no weights to compute position_bias\")\n position_bias = self.compute_bias(real_qlen, klen)\n\n # if key and values are already calculated\n # we want only the last query position bias\n if past_key_value is not None:\n position_bias = position_bias[:, :, -qlen:, :]\n\n if mask is not None:\n position_bias = position_bias + mask # (bs, n_heads, qlen, klen)\n\n scores += position_bias\n weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)\n weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n context = unshape(context) # (bs, qlen, dim)\n\n context = self.o(context)\n\n outputs = (context,) + present_key_value_state\n\n if output_attentions:\n outputs = outputs + (weights,)\n if self.has_relative_attention_bias:\n outputs = outputs + (position_bias,)\n return outputs\n\n\nclass T5LayerSelfAttention(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super().__init__()\n self.SelfAttention = T5Attention(\n config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=not config.is_decoder\n )\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n position_bias=None,\n head_mask=None,\n past_key_value=None,\n use_cache=False,\n output_attentions=False,\n ):\n norm_x = self.layer_norm(hidden_states)\n attention_output = self.SelfAttention(\n norm_x,\n mask=attention_mask,\n position_bias=position_bias,\n head_mask=head_mask,\n past_key_value=past_key_value,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n y = attention_output[0]\n layer_output = hidden_states + self.dropout(y)\n outputs = (layer_output,) + attention_output[1:] # add attentions if we output them\n return outputs\n\n\nclass T5LayerCrossAttention(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super().__init__()\n self.EncDecAttention = T5Attention(\n config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=True\n )\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(\n self,\n hidden_states,\n kv,\n attention_mask=None,\n position_bias=None,\n head_mask=None,\n past_key_value=None,\n use_cache=False,\n query_length=None,\n output_attentions=False,\n ):\n norm_x = self.layer_norm(hidden_states)\n attention_output = self.EncDecAttention(\n norm_x,\n mask=attention_mask,\n kv=kv,\n position_bias=position_bias,\n head_mask=head_mask,\n past_key_value=past_key_value,\n use_cache=use_cache,\n query_length=query_length,\n output_attentions=output_attentions,\n )\n y = attention_output[0]\n layer_output = hidden_states + self.dropout(y)\n outputs = (layer_output,) + attention_output[1:] # add attentions if we output them\n return outputs\n\n\nclass T5Block(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super().__init__()\n self.is_decoder = config.is_decoder\n self.layer = nn.ModuleList()\n self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))\n if self.is_decoder:\n self.layer.append(T5LayerCrossAttention(config, has_relative_attention_bias=has_relative_attention_bias))\n\n self.layer.append(T5LayerFF(config))\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n position_bias=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n encoder_decoder_position_bias=None,\n head_mask=None,\n past_key_value=None,\n use_cache=False,\n output_attentions=False,\n ):\n\n if past_key_value is not None:\n assert self.is_decoder, \"Only decoder can use `past_key_values`\"\n expected_num_past_key_values = 2 if encoder_hidden_states is None else 4\n\n error_message = \"There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states\".format(\n expected_num_past_key_values,\n \"2 (past / key) for cross attention\" if expected_num_past_key_values == 4 else \"\",\n len(past_key_value),\n )\n assert len(past_key_value) == expected_num_past_key_values, error_message\n\n self_attn_past_key_value = past_key_value[:2]\n cross_attn_past_key_value = past_key_value[2:]\n else:\n self_attn_past_key_value, cross_attn_past_key_value = None, None\n\n self_attention_outputs = self.layer[0](\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n head_mask=head_mask,\n past_key_value=self_attn_past_key_value,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n hidden_states, present_key_value_state = self_attention_outputs[:2]\n attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights\n\n if self.is_decoder and encoder_hidden_states is not None:\n # the actual query length is unknown for cross attention\n # if using past key value states. Need to inject it here\n if present_key_value_state is not None:\n query_length = present_key_value_state[0].shape[2]\n else:\n query_length = None\n\n cross_attention_outputs = self.layer[1](\n hidden_states,\n kv=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n position_bias=encoder_decoder_position_bias,\n head_mask=head_mask,\n past_key_value=cross_attn_past_key_value,\n query_length=query_length,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n hidden_states = cross_attention_outputs[0]\n # Combine self attn and cross attn key value states\n if present_key_value_state is not None:\n present_key_value_state = present_key_value_state + cross_attention_outputs[1]\n\n # Keep cross-attention outputs and relative position weights\n attention_outputs = attention_outputs + cross_attention_outputs[2:]\n\n # Apply Feed Forward layer\n hidden_states = self.layer[-1](hidden_states)\n outputs = (hidden_states,)\n\n # Add attentions if we output them\n outputs = outputs + (present_key_value_state,) + attention_outputs\n return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n\n\nclass T5PreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = T5Config\n load_tf_weights = load_tf_weights_in_t5\n base_model_prefix = \"transformer\"\n\n @property\n def dummy_inputs(self):\n input_ids = torch.tensor(DUMMY_INPUTS)\n input_mask = torch.tensor(DUMMY_MASK)\n dummy_inputs = {\n \"decoder_input_ids\": input_ids,\n \"input_ids\": input_ids,\n \"decoder_attention_mask\": input_mask,\n }\n return dummy_inputs\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n factor = self.config.initializer_factor # Used for testing weights initialization\n if isinstance(module, T5LayerNorm):\n module.weight.data.fill_(factor * 1.0)\n elif isinstance(module, (T5Model, T5ForConditionalGeneration)):\n # Mesh TensorFlow embeddings initialization\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624\n module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)\n elif isinstance(module, T5DenseReluDense):\n # Mesh TensorFlow FF initialization\n # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56\n # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89\n module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))\n if hasattr(module.wi, \"bias\") and module.wi.bias is not None:\n module.wi.bias.data.zero_()\n module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))\n if hasattr(module.wo, \"bias\") and module.wo.bias is not None:\n module.wo.bias.data.zero_()\n elif isinstance(module, T5Attention):\n # Mesh TensorFlow attention initialization to avoid scaling before softmax\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136\n d_model = self.config.d_model\n d_kv = self.config.d_kv\n n_heads = self.config.num_heads\n module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * d_kv) ** -0.5))\n module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))\n module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))\n module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * d_kv) ** -0.5))\n if module.has_relative_attention_bias:\n module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))\n\n def _shift_right(self, input_ids):\n decoder_start_token_id = self.config.decoder_start_token_id\n pad_token_id = self.config.pad_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information\"\n\n # shift inputs to the right\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()\n shifted_input_ids[..., 0] = decoder_start_token_id\n\n assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\"\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `shifted_input_ids` has only positive values\"\n\n return shifted_input_ids\n\n\nclass T5Stack(T5PreTrainedModel):\n def __init__(self, config, embed_tokens=None):\n super().__init__(config)\n\n self.embed_tokens = embed_tokens\n self.is_decoder = config.is_decoder\n\n self.block = nn.ModuleList(\n [T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]\n )\n self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def get_output_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, new_embeddings):\n self.embed_tokens = new_embeddings\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n inputs_embeds=None,\n head_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n err_msg_prefix = \"decoder_\" if self.is_decoder else \"\"\n raise ValueError(\n f\"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n err_msg_prefix = \"decoder_\" if self.is_decoder else \"\"\n raise ValueError(f\"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds\")\n\n if inputs_embeds is None:\n assert self.embed_tokens is not None, \"You have to initialize the model with valid token embeddings\"\n inputs_embeds = self.embed_tokens(input_ids)\n\n batch_size, seq_length = input_shape\n\n # required mask seq length can be calculated via length of past\n mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length\n\n if use_cache is True:\n assert self.is_decoder, \":obj:`use_cache` can only be set to `True` if {} is used as a decoder\".format(\n self\n )\n\n if attention_mask is None:\n attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)\n if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:\n encoder_seq_length = encoder_hidden_states.shape[1]\n encoder_attention_mask = torch.ones(\n batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long\n )\n\n # initialize past_key_values with `None` if past does not exist\n if past_key_values is None:\n past_key_values = [None] * len(self.block)\n\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)\n\n if self.is_decoder and encoder_attention_mask is not None:\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n head_mask = self.get_head_mask(head_mask, self.config.num_layers)\n present_key_value_states = () if use_cache else None\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n position_bias = None\n encoder_decoder_position_bias = None\n\n hidden_states = self.dropout(inputs_embeds)\n\n for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states,\n attention_mask=extended_attention_mask,\n position_bias=position_bias,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n encoder_decoder_position_bias=encoder_decoder_position_bias,\n head_mask=head_mask[i],\n past_key_value=past_key_value,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n # layer_outputs is a tuple with:\n # hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n hidden_states, present_key_value_state = layer_outputs[:2]\n\n if i == 0:\n # We share the position biases between the layers - the first layer store them\n # layer_outputs = hidden-states, key-value-states (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n position_bias = layer_outputs[3 if output_attentions else 2]\n if self.is_decoder and encoder_hidden_states is not None:\n encoder_decoder_position_bias = layer_outputs[5 if output_attentions else 3]\n # append next layer key value states\n if use_cache:\n present_key_value_states = present_key_value_states + (present_key_value_state,)\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[2],) # We keep only self-attention weights for now\n\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions]\n if v is not None\n )\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=present_key_value_states,\n hidden_states=all_hidden_states,\n attentions=all_attentions,\n )\n\n\nT5_START_DOCSTRING = r\"\"\"\n\n The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer\n <https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,\n Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text\n denoising generative setting.\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nT5_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you\n should be able to pad the inputs on both the right and the left.\n\n Indices can be obtained using :class:`~transformers.T5Tokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n detail.\n\n To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training\n <./t5.html#training>`__.\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Provide for sequence to sequence training. T5 uses the :obj:`pad_token_id` as the starting token for\n :obj:`decoder_input_ids` generation. If :obj:`past_key_values` is used, optionally only the last\n :obj:`decoder_input_ids` have to be input (see :obj:`past_key_values`).\n\n To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at `T5 Training\n <./t5.html#training>`__. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset,\n :obj:`decoder_input_ids` takes the value of :obj:`input_ids`.\n decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):\n Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will\n also be used by default.\n encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):\n Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`:\n `attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a\n sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of\n the decoder.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded\n representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`\n have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert\n :obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`\n takes the value of :obj:`inputs_embeds`.\n\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare T5 Model transformer outputting raw hidden-states\" \"without any specific head on top.\",\n T5_START_DOCSTRING,\n)\nclass T5Model(T5PreTrainedModel):\n def __init__(self, config: T5Config):\n super().__init__(config)\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\n\n encoder_config = copy.deepcopy(config)\n encoder_config.use_cache = False\n encoder_config.is_encoder_decoder = False\n self.encoder = T5Stack(encoder_config, self.shared)\n\n decoder_config = copy.deepcopy(config)\n decoder_config.is_decoder = True\n decoder_config.is_encoder_decoder = False\n decoder_config.num_layers = config.num_decoder_layers\n self.decoder = T5Stack(decoder_config, self.shared)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, new_embeddings):\n self.shared = new_embeddings\n self.encoder.set_input_embeddings(new_embeddings)\n self.decoder.set_input_embeddings(new_embeddings)\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n head_mask=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n Returns:\n\n Example::\n\n >>> from transformers import T5Tokenizer, T5Model\n\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\n >>> model = T5Model.from_pretrained('t5-small')\n\n >>> input_ids = tokenizer(\"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\").input_ids # Batch size 1\n >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1\n >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids, return_dict=True)\n\n >>> last_hidden_states = outputs.last_hidden_state\n \"\"\"\n if \"decoder_past_key_value_states\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"decoder_past_key_value_states\")\n if \"decoder_past_key_values\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"decoder_past_key_values\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # Encode if needed (training, first prediction pass)\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n hidden_states = encoder_outputs[0]\n\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_values=past_key_values,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"T5 Model with a `language modeling` head on top. \"\"\", T5_START_DOCSTRING)\nclass T5ForConditionalGeneration(T5PreTrainedModel):\n authorized_missing_keys = [r\"encoder\\.embed_tokens\\.weight\", r\"decoder\\.embed_tokens\\.weight\", r\"lm_head\\.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.model_dim = config.d_model\n\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\n\n encoder_config = copy.deepcopy(config)\n encoder_config.use_cache = False\n encoder_config.is_encoder_decoder = False\n self.encoder = T5Stack(encoder_config, self.shared)\n\n decoder_config = copy.deepcopy(config)\n decoder_config.is_decoder = True\n decoder_config.is_encoder_decoder = False\n decoder_config.num_layers = config.num_decoder_layers\n self.decoder = T5Stack(decoder_config, self.shared)\n\n self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, new_embeddings):\n self.shared = new_embeddings\n self.encoder.set_input_embeddings(new_embeddings)\n self.decoder.set_input_embeddings(new_embeddings)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n head_mask=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,\n config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for\n labels in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n\n Examples::\n\n >>> from transformers import T5Tokenizer, T5ForConditionalGeneration\n\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\n >>> model = T5ForConditionalGeneration.from_pretrained('t5-small', return_dict=True)\n\n >>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids\n labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids\n >>> outputs = model(input_ids=input_ids, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n\n >>> input_ids = tokenizer(\"summarize: studies have shown that owning a dog is good for you \", return_tensors=\"pt\").input_ids # Batch size 1\n >>> outputs = model.generate(input_ids)\n \"\"\"\n\n if \"lm_labels\" in kwargs:\n warnings.warn(\n \"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"lm_labels\")\n if \"decoder_past_key_value_states\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"decoder_past_key_value_states\")\n if \"decoder_past_key_values\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"decoder_past_key_values\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # Encode if needed (training, first prediction pass)\n if encoder_outputs is None:\n # Convert encoder inputs in embeddings if needed\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n hidden_states = encoder_outputs[0]\n\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\n # get decoder inputs from shifting lm labels to the right\n decoder_input_ids = self._shift_right(labels)\n\n # If decoding with past key value states, only the last tokens\n # should be given as an input\n if past_key_values is not None:\n assert labels is None, \"Decoder should not use cached key value states when training.\"\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n if decoder_inputs_embeds is not None:\n decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]\n\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_values=past_key_values,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = decoder_outputs[0]\n # Rescale output before projecting on vocab\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\n sequence_output = sequence_output * (self.model_dim ** -0.5)\n lm_logits = self.lm_head(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-100)\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))\n # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666\n\n if not return_dict:\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=loss,\n logits=lm_logits,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs):\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\n \"decoder_input_ids\": input_ids,\n \"past_key_values\": past,\n \"encoder_outputs\": encoder_outputs,\n \"attention_mask\": attention_mask,\n \"use_cache\": use_cache,\n }\n\n def _reorder_cache(self, past, beam_idx):\n # if decoder past is not included in output\n # speedy decoding is disabled and no need to reorder\n if past is None:\n logger.warning(\"You might want to consider setting `use_cache=True` to speed up decoding\")\n return past\n\n reordered_decoder_past = ()\n for layer_past_states in past:\n # get the correct batch idx from layer past batch dim\n # batch dim of `past` is at 2nd position\n reordered_layer_past_states = ()\n for layer_past_state in layer_past_states:\n # need to set correct `past` for each of the four key / value states\n reordered_layer_past_states = reordered_layer_past_states + (\n layer_past_state.index_select(0, beam_idx),\n )\n\n assert reordered_layer_past_states[0].shape == layer_past_states[0].shape\n assert len(reordered_layer_past_states) == len(layer_past_states)\n\n reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)\n return reordered_decoder_past\n"
] | [
[
"torch.all",
"torch.abs",
"torch.nn.functional.dropout",
"torch.cat",
"torch.nn.Embedding",
"torch.where",
"torch.full_like",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.sqrt",
"torch.tensor",
"torch.nn.functional.relu",
"torch.arange",
"tensorflow.train.list_variables",
"torch.nn.ModuleList",
"torch.zeros_like",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"numpy.transpose",
"torch.matmul"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
sepam/machine-learning-engineering-for-production-public | [
"cd6053459eee9b7f30bf86da63104b3f1381383a"
] | [
"course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py"
] | [
"import pickle\nimport numpy as np\nfrom typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, conlist\n\n\n\napp = FastAPI(title=\"Predicting Wine Class with batching\")\n\n# Open classifier in global scope\nwith open(\"models/wine-95-fixed.pkl\", \"rb\") as file:\n clf = pickle.load(file)\n\n\nclass Wine(BaseModel):\n batches: List[conlist(item_type=float, min_items=13, max_items=13)]\n\n\n# make predictions on this endpoint\[email protected](\"/predict\")\ndef predict(wine: Wine):\n batches = wine.batches\n np_batches = np.array(batches)\n pred = clf.predict(np_batches).tolist()\n return {\"Prediction\": pred}\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GLaDO8/pytorch_playground | [
"3623de18881a37ce413c92d8a63ea9ba1cc401a5"
] | [
"nnwordembed.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\ntorch.manual_seed(1)\n\nword_to_ix = {\"hello\": 0, \"world\": 1}\n#first argument is the size of the embedded matrix. The second argument is the dimension of each word embedding. \nembeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings\nlookup_tensor = torch.tensor([word_to_ix[\"hello\"], word_to_ix[\"world\"]], dtype=torch.long)\nhello_embed = embeds(lookup_tensor)\nprint(hello_embed)"
] | [
[
"torch.manual_seed",
"torch.nn.Embedding",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zactodd/mmdetection | [
"68532eb6f4643ddf0179a4384c8c9e004a2c1d07",
"84fbb2c6ee7346ea722cea3a4fa16d73e11fcafd",
"f8d5f6cafeafeac8beb22d855798327682f65f0a"
] | [
"mmdet/models/dense_heads/pisa_retinanet_head.py",
"mmdet/models/dense_heads/ssd_head.py",
"mmdet/models/roi_heads/mask_heads/maskiou_head.py"
] | [
"import torch\n\nfrom mmdet.core import force_fp32, images_to_levels\nfrom ..builder import HEADS\nfrom ..losses import carl_loss, isr_p\nfrom .retina_head import RetinaHead\n\n\[email protected]_module()\nclass PISARetinaHead(RetinaHead):\n \"\"\"PISA Retinanet Head.\n\n The head owns the same structure with Retinanet Head, but differs in two\n aspects:\n 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to\n change the positive loss weights.\n 2. Classification-aware regression loss is adopted as a third loss.\n \"\"\"\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n def loss(self,\n cls_scores,\n bbox_preds,\n gt_bboxes,\n gt_labels,\n img_metas,\n gt_bboxes_ignore=None):\n \"\"\"Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image\n with shape (num_obj, 4).\n gt_labels (list[Tensor]): Ground truth labels of each image\n with shape (num_obj, 4).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.\n Default: None.\n\n Returns:\n dict: Loss dict, comprise classification loss, regression loss and\n carl loss.\n \"\"\"\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n assert len(featmap_sizes) == self.anchor_generator.num_levels\n\n device = cls_scores[0].device\n\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, img_metas, device=device)\n label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n cls_reg_targets = self.get_targets(\n anchor_list,\n valid_flag_list,\n gt_bboxes,\n img_metas,\n gt_bboxes_ignore_list=gt_bboxes_ignore,\n gt_labels_list=gt_labels,\n label_channels=label_channels,\n return_sampling_results=True)\n if cls_reg_targets is None:\n return None\n (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets\n num_total_samples = (\n num_total_pos + num_total_neg if self.sampling else num_total_pos)\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n # concat all level anchors and flags to a single tensor\n concat_anchor_list = []\n for i in range(len(anchor_list)):\n concat_anchor_list.append(torch.cat(anchor_list[i]))\n all_anchor_list = images_to_levels(concat_anchor_list,\n num_level_anchors)\n\n num_imgs = len(img_metas)\n flatten_cls_scores = [\n cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels)\n for cls_score in cls_scores\n ]\n flatten_cls_scores = torch.cat(\n flatten_cls_scores, dim=1).reshape(-1,\n flatten_cls_scores[0].size(-1))\n flatten_bbox_preds = [\n bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)\n for bbox_pred in bbox_preds\n ]\n flatten_bbox_preds = torch.cat(\n flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1))\n flatten_labels = torch.cat(labels_list, dim=1).reshape(-1)\n flatten_label_weights = torch.cat(\n label_weights_list, dim=1).reshape(-1)\n flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4)\n flatten_bbox_targets = torch.cat(\n bbox_targets_list, dim=1).reshape(-1, 4)\n flatten_bbox_weights = torch.cat(\n bbox_weights_list, dim=1).reshape(-1, 4)\n\n # Apply ISR-P\n isr_cfg = self.train_cfg.get('isr', None)\n if isr_cfg is not None:\n all_targets = (flatten_labels, flatten_label_weights,\n flatten_bbox_targets, flatten_bbox_weights)\n with torch.no_grad():\n all_targets = isr_p(\n flatten_cls_scores,\n flatten_bbox_preds,\n all_targets,\n flatten_anchors,\n sampling_results_list,\n bbox_coder=self.bbox_coder,\n loss_cls=self.loss_cls,\n num_class=self.num_classes,\n **self.train_cfg.isr)\n (flatten_labels, flatten_label_weights, flatten_bbox_targets,\n flatten_bbox_weights) = all_targets\n\n # For convenience we compute loss once instead separating by fpn level,\n # so that we don't need to separate the weights by level again.\n # The result should be the same\n losses_cls = self.loss_cls(\n flatten_cls_scores,\n flatten_labels,\n flatten_label_weights,\n avg_factor=num_total_samples)\n losses_bbox = self.loss_bbox(\n flatten_bbox_preds,\n flatten_bbox_targets,\n flatten_bbox_weights,\n avg_factor=num_total_samples)\n loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n\n # CARL Loss\n carl_cfg = self.train_cfg.get('carl', None)\n if carl_cfg is not None:\n loss_carl = carl_loss(\n flatten_cls_scores,\n flatten_labels,\n flatten_bbox_preds,\n flatten_bbox_targets,\n self.loss_bbox,\n **self.train_cfg.carl,\n avg_factor=num_total_pos,\n sigmoid=True,\n num_class=self.num_classes)\n loss_dict.update(loss_carl)\n\n return loss_dict\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import xavier_init\n\nfrom mmdet.core import (build_anchor_generator, build_assigner,\n build_bbox_coder, build_sampler, multi_apply)\nfrom ..builder import HEADS\nfrom ..losses import smooth_l1_loss\nfrom .anchor_head import AnchorHead\n\n\n# TODO: add loss evaluator for SSD\[email protected]_module()\nclass SSDHead(AnchorHead):\n \"\"\"SSD head used in https://arxiv.org/abs/1512.02325.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n anchor_generator (dict): Config dict for anchor generator\n background_label (int | None): Label ID of background, set as 0 for\n RPN and num_classes for other heads. It will automatically set as\n num_classes if None is given.\n bbox_coder (dict): Config of bounding box coder.\n reg_decoded_bbox (bool): If true, the regression loss would be\n applied on decoded bounding boxes. Default: False\n train_cfg (dict): Training config of anchor head.\n test_cfg (dict): Testing config of anchor head.\n \"\"\" # noqa: W605\n\n def __init__(self,\n num_classes=80,\n in_channels=(512, 1024, 512, 256, 256, 256),\n anchor_generator=dict(\n type='SSDAnchorGenerator',\n scale_major=False,\n input_size=300,\n strides=[8, 16, 32, 64, 100, 300],\n ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),\n basesize_ratio_range=(0.1, 0.9)),\n background_label=None,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[.0, .0, .0, .0],\n target_stds=[1.0, 1.0, 1.0, 1.0],\n ),\n reg_decoded_bbox=False,\n train_cfg=None,\n test_cfg=None):\n super(AnchorHead, self).__init__()\n self.num_classes = num_classes\n self.in_channels = in_channels\n self.cls_out_channels = num_classes + 1 # add background class\n self.anchor_generator = build_anchor_generator(anchor_generator)\n num_anchors = self.anchor_generator.num_base_anchors\n\n reg_convs = []\n cls_convs = []\n for i in range(len(in_channels)):\n reg_convs.append(\n nn.Conv2d(\n in_channels[i],\n num_anchors[i] * 4,\n kernel_size=3,\n padding=1))\n cls_convs.append(\n nn.Conv2d(\n in_channels[i],\n num_anchors[i] * (num_classes + 1),\n kernel_size=3,\n padding=1))\n self.reg_convs = nn.ModuleList(reg_convs)\n self.cls_convs = nn.ModuleList(cls_convs)\n\n self.background_label = (\n num_classes if background_label is None else background_label)\n # background_label should be either 0 or num_classes\n assert (self.background_label == 0\n or self.background_label == num_classes)\n\n self.bbox_coder = build_bbox_coder(bbox_coder)\n self.reg_decoded_bbox = reg_decoded_bbox\n self.use_sigmoid_cls = False\n self.cls_focal_loss = False\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n # set sampling=False for archor_target\n self.sampling = False\n if self.train_cfg:\n self.assigner = build_assigner(self.train_cfg.assigner)\n # SSD sampling=False so use PseudoSampler\n sampler_cfg = dict(type='PseudoSampler')\n self.sampler = build_sampler(sampler_cfg, context=self)\n self.fp16_enabled = False\n\n def init_weights(self):\n \"\"\"Initialize weights of the head.\"\"\"\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform', bias=0)\n\n def forward(self, feats):\n \"\"\"Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple:\n cls_scores (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * 4.\n \"\"\"\n cls_scores = []\n bbox_preds = []\n for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,\n self.cls_convs):\n cls_scores.append(cls_conv(feat))\n bbox_preds.append(reg_conv(feat))\n return cls_scores, bbox_preds\n\n def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights,\n bbox_targets, bbox_weights, num_total_samples):\n \"\"\"Compute loss of a single image.\n\n Args:\n cls_score (Tensor): Box scores for eachimage\n Has shape (num_total_anchors, num_classes).\n bbox_pred (Tensor): Box energies / deltas for each image\n level with shape (num_total_anchors, 4).\n anchors (Tensor): Box reference for each scale level with shape\n (num_total_anchors, 4).\n labels (Tensor): Labels of each anchors with shape\n (num_total_anchors,).\n label_weights (Tensor): Label weights of each anchor with shape\n (num_total_anchors,)\n bbox_targets (Tensor): BBox regression targets of each anchor wight\n shape (num_total_anchors, 4).\n bbox_weights (Tensor): BBox regression loss weights of each anchor\n with shape (num_total_anchors, 4).\n num_total_samples (int): If sampling, num total samples equal to\n the number of total anchors; Otherwise, it is the number of\n positive anchors.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n\n loss_cls_all = F.cross_entropy(\n cls_score, labels, reduction='none') * label_weights\n # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n pos_inds = ((labels >= 0) &\n (labels < self.background_label)).nonzero().reshape(-1)\n neg_inds = (labels == self.background_label).nonzero().view(-1)\n\n num_pos_samples = pos_inds.size(0)\n num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples\n if num_neg_samples > neg_inds.size(0):\n num_neg_samples = neg_inds.size(0)\n topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)\n loss_cls_pos = loss_cls_all[pos_inds].sum()\n loss_cls_neg = topk_loss_cls_neg.sum()\n loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples\n\n if self.reg_decoded_bbox:\n bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)\n\n loss_bbox = smooth_l1_loss(\n bbox_pred,\n bbox_targets,\n bbox_weights,\n beta=self.train_cfg.smoothl1_beta,\n avg_factor=num_total_samples)\n return loss_cls[None], loss_bbox\n\n def loss(self,\n cls_scores,\n bbox_preds,\n gt_bboxes,\n gt_labels,\n img_metas,\n gt_bboxes_ignore=None):\n \"\"\"Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): each item are the truth boxes for each\n image in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n assert len(featmap_sizes) == self.anchor_generator.num_levels\n\n device = cls_scores[0].device\n\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, img_metas, device=device)\n cls_reg_targets = self.get_targets(\n anchor_list,\n valid_flag_list,\n gt_bboxes,\n img_metas,\n gt_bboxes_ignore_list=gt_bboxes_ignore,\n gt_labels_list=gt_labels,\n label_channels=1,\n unmap_outputs=False)\n if cls_reg_targets is None:\n return None\n (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n num_total_pos, num_total_neg) = cls_reg_targets\n\n num_images = len(img_metas)\n all_cls_scores = torch.cat([\n s.permute(0, 2, 3, 1).reshape(\n num_images, -1, self.cls_out_channels) for s in cls_scores\n ], 1)\n all_labels = torch.cat(labels_list, -1).view(num_images, -1)\n all_label_weights = torch.cat(label_weights_list,\n -1).view(num_images, -1)\n all_bbox_preds = torch.cat([\n b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)\n for b in bbox_preds\n ], -2)\n all_bbox_targets = torch.cat(bbox_targets_list,\n -2).view(num_images, -1, 4)\n all_bbox_weights = torch.cat(bbox_weights_list,\n -2).view(num_images, -1, 4)\n\n # concat all level anchors to a single tensor\n all_anchors = []\n for i in range(num_images):\n all_anchors.append(torch.cat(anchor_list[i]))\n\n # check NaN and Inf\n assert torch.isfinite(all_cls_scores).all().item(), \\\n 'classification scores become infinite or NaN!'\n assert torch.isfinite(all_bbox_preds).all().item(), \\\n 'bbox predications become infinite or NaN!'\n\n losses_cls, losses_bbox = multi_apply(\n self.loss_single,\n all_cls_scores,\n all_bbox_preds,\n all_anchors,\n all_labels,\n all_label_weights,\n all_bbox_targets,\n all_bbox_weights,\n num_total_samples=num_total_pos)\n return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n",
"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import kaiming_init, normal_init\nfrom mmcv.ops import Conv2d, Linear, MaxPool2d\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.core import force_fp32\nfrom mmdet.models.builder import HEADS, build_loss\n\n\[email protected]_module()\nclass MaskIoUHead(nn.Module):\n \"\"\"Mask IoU Head.\n\n This head predicts the IoU of predicted masks and corresponding gt masks.\n \"\"\"\n\n def __init__(self,\n num_convs=4,\n num_fcs=2,\n roi_feat_size=14,\n in_channels=256,\n conv_out_channels=256,\n fc_out_channels=1024,\n num_classes=80,\n loss_iou=dict(type='MSELoss', loss_weight=0.5)):\n super(MaskIoUHead, self).__init__()\n self.in_channels = in_channels\n self.conv_out_channels = conv_out_channels\n self.fc_out_channels = fc_out_channels\n self.num_classes = num_classes\n self.fp16_enabled = False\n\n self.convs = nn.ModuleList()\n for i in range(num_convs):\n if i == 0:\n # concatenation of mask feature and mask prediction\n in_channels = self.in_channels + 1\n else:\n in_channels = self.conv_out_channels\n stride = 2 if i == num_convs - 1 else 1\n self.convs.append(\n Conv2d(\n in_channels,\n self.conv_out_channels,\n 3,\n stride=stride,\n padding=1))\n\n roi_feat_size = _pair(roi_feat_size)\n pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2)\n self.fcs = nn.ModuleList()\n for i in range(num_fcs):\n in_channels = (\n self.conv_out_channels *\n pooled_area if i == 0 else self.fc_out_channels)\n self.fcs.append(Linear(in_channels, self.fc_out_channels))\n\n self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes)\n self.relu = nn.ReLU()\n self.max_pool = MaxPool2d(2, 2)\n self.loss_iou = build_loss(loss_iou)\n\n def init_weights(self):\n for conv in self.convs:\n kaiming_init(conv)\n for fc in self.fcs:\n kaiming_init(\n fc,\n a=1,\n mode='fan_in',\n nonlinearity='leaky_relu',\n distribution='uniform')\n normal_init(self.fc_mask_iou, std=0.01)\n\n def forward(self, mask_feat, mask_pred):\n mask_pred = mask_pred.sigmoid()\n mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1))\n\n x = torch.cat((mask_feat, mask_pred_pooled), 1)\n\n for conv in self.convs:\n x = self.relu(conv(x))\n x = x.flatten(1)\n for fc in self.fcs:\n x = self.relu(fc(x))\n mask_iou = self.fc_mask_iou(x)\n return mask_iou\n\n @force_fp32(apply_to=('mask_iou_pred', ))\n def loss(self, mask_iou_pred, mask_iou_targets):\n pos_inds = mask_iou_targets > 0\n if pos_inds.sum() > 0:\n loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds],\n mask_iou_targets[pos_inds])\n else:\n loss_mask_iou = mask_iou_pred.sum() * 0\n return dict(loss_mask_iou=loss_mask_iou)\n\n @force_fp32(apply_to=('mask_pred', ))\n def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets,\n rcnn_train_cfg):\n \"\"\"Compute target of mask IoU.\n\n Mask IoU target is the IoU of the predicted mask (inside a bbox) and\n the gt mask of corresponding gt mask (the whole instance).\n The intersection area is computed inside the bbox, and the gt mask area\n is computed with two steps, firstly we compute the gt area inside the\n bbox, then divide it by the area ratio of gt area inside the bbox and\n the gt area of the whole instance.\n\n Args:\n sampling_results (list[:obj:`SamplingResult`]): sampling results.\n gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance)\n of each image, with the same shape of the input image.\n mask_pred (Tensor): Predicted masks of each positive proposal,\n shape (num_pos, h, w).\n mask_targets (Tensor): Gt mask of each positive proposal,\n binary map of the shape (num_pos, h, w).\n rcnn_train_cfg (dict): Training config for R-CNN part.\n\n Returns:\n Tensor: mask iou target (length == num positive).\n \"\"\"\n pos_proposals = [res.pos_bboxes for res in sampling_results]\n pos_assigned_gt_inds = [\n res.pos_assigned_gt_inds for res in sampling_results\n ]\n\n # compute the area ratio of gt areas inside the proposals and\n # the whole instance\n area_ratios = map(self._get_area_ratio, pos_proposals,\n pos_assigned_gt_inds, gt_masks)\n area_ratios = torch.cat(list(area_ratios))\n assert mask_targets.size(0) == area_ratios.size(0)\n\n mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float()\n mask_pred_areas = mask_pred.sum((-1, -2))\n\n # mask_pred and mask_targets are binary maps\n overlap_areas = (mask_pred * mask_targets).sum((-1, -2))\n\n # compute the mask area of the whole instance\n gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7)\n\n mask_iou_targets = overlap_areas / (\n mask_pred_areas + gt_full_areas - overlap_areas)\n return mask_iou_targets\n\n def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):\n \"\"\"Compute area ratio of the gt mask inside the proposal and the gt\n mask of the corresponding instance.\"\"\"\n num_pos = pos_proposals.size(0)\n if num_pos > 0:\n area_ratios = []\n proposals_np = pos_proposals.cpu().numpy()\n pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()\n # compute mask areas of gt instances (batch processing for speedup)\n gt_instance_mask_area = gt_masks.areas\n for i in range(num_pos):\n gt_mask = gt_masks[pos_assigned_gt_inds[i]]\n\n # crop the gt mask inside the proposal\n bbox = proposals_np[i, :].astype(np.int32)\n gt_mask_in_proposal = gt_mask.crop(bbox)\n\n ratio = gt_mask_in_proposal.areas[0] / (\n gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)\n area_ratios.append(ratio)\n area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(\n pos_proposals.device)\n else:\n area_ratios = pos_proposals.new_zeros((0, ))\n return area_ratios\n\n @force_fp32(apply_to=('mask_iou_pred', ))\n def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels):\n \"\"\"Get the mask scores.\n\n mask_score = bbox_score * mask_iou\n \"\"\"\n inds = range(det_labels.size(0))\n mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1]\n mask_scores = mask_scores.cpu().numpy()\n det_labels = det_labels.cpu().numpy()\n return [mask_scores[det_labels == i] for i in range(self.num_classes)]\n"
] | [
[
"torch.no_grad",
"torch.cat"
],
[
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.functional.cross_entropy",
"torch.nn.Conv2d",
"torch.isfinite"
],
[
"torch.cat",
"torch.nn.ModuleList",
"numpy.stack",
"torch.nn.modules.utils._pair",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
microsoft/iclr2019-learning-to-represent-edits | [
"e5777d6aa6cdeda500cf076646177c48d1cb4622",
"e5777d6aa6cdeda500cf076646177c48d1cb4622"
] | [
"diff_representation/model/edit_encoder/bag_of_edits_change_encoder.py",
"diff_representation/model/encdec/sequential_decoder.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\nfrom itertools import chain\n\nimport numpy as np\nimport torch\nfrom torch import nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom tqdm import tqdm\nimport sys\n\nfrom diff_representation.change_entry import ChangeExample\nfrom diff_representation.model import nn_utils\nfrom diff_representation.model.embedder import EmbeddingTable\n\n\nclass BagOfEditsChangeEncoder(nn.Module):\n \"\"\"project a CodeChange instance into distributed vectors\"\"\"\n\n def __init__(self, token_embedder, vocab, **kwargs):\n super(BagOfEditsChangeEncoder, self).__init__()\n\n self.token_embedder = token_embedder\n self.token_embedding_size = self.token_embedder.weight.size(1)\n self.vocab = vocab\n self.change_vector_size = self.token_embedding_size * 2\n\n @property\n def device(self):\n return self.token_embedder.device\n\n def forward(self, code_changes, *args, **kwargs):\n \"\"\"\n given the token encodings of the previous and updated code,\n and the diff information (alignment between the tokens between the\n previous and updated code), generate the diff representation\n \"\"\"\n\n added_tokens = []\n added_token_batch_ids = []\n deled_tokens = []\n deled_token_batch_ids = []\n for e_id, example in enumerate(code_changes):\n for entry in example.change_seq:\n tag, token = entry\n if tag == 'ADD':\n token_id = self.vocab[token]\n added_tokens.append(token_id)\n added_token_batch_ids.append(e_id)\n elif tag == 'DEL':\n token_id = self.vocab[token]\n deled_tokens.append(token_id)\n deled_token_batch_ids.append(e_id)\n elif tag == 'REPLACE':\n added_token_id = self.vocab[token[1]]\n deled_token_id = self.vocab[token[0]]\n\n added_tokens.append(added_token_id)\n deled_tokens.append(deled_token_id)\n\n added_token_batch_ids.append(e_id)\n deled_token_batch_ids.append(e_id)\n\n changed_token_ids = added_tokens + deled_tokens\n changed_token_ids = torch.tensor(changed_token_ids, dtype=torch.long, device=self.device)\n # (token_num, embed_size)\n changed_token_embeds = self.token_embedder.weight[changed_token_ids]\n\n added_token_embeds = changed_token_embeds[:len(added_tokens)]\n deled_token_embeds = changed_token_embeds[len(added_tokens):]\n\n added_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,\n device=self.device)\n if added_token_batch_ids:\n added_change_embeds = added_change_embeds.scatter_add_(0,\n torch.tensor(added_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(added_token_embeds),\n added_token_embeds)\n\n deled_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,\n device=self.device)\n if deled_token_batch_ids:\n deled_change_embeds = deled_change_embeds.scatter_add_(0,\n torch.tensor(deled_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(deled_token_embeds),\n deled_token_embeds)\n\n change_vectors = torch.cat([added_change_embeds, deled_change_embeds], dim=-1)\n\n return change_vectors\n\n def encode_code_change(self, prev_code_tokens, updated_code_tokens, code_encoder):\n example = ChangeExample(prev_code_tokens, updated_code_tokens, context=None)\n\n change_vec = self.forward([example]).data.cpu().numpy()[0]\n\n return change_vec\n\n def encode_code_changes(self, examples, code_encoder, batch_size=32):\n \"\"\"encode each change in the list `code_changes`,\n return a 2D numpy array of shape (len(code_changes), code_change_embed_dim)\"\"\"\n\n change_vecs = []\n\n for batch_examples in tqdm(nn_utils.batch_iter(examples, batch_size), file=sys.stdout, total=len(examples)):\n batch_change_vecs = self.forward(batch_examples).data.cpu().numpy()\n change_vecs.append(batch_change_vecs)\n\n change_vecs = np.concatenate(change_vecs, axis=0)\n\n return change_vecs\n",
"# coding=utf-8\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\nfrom collections import OrderedDict, namedtuple\nfrom itertools import chain\nimport re\nfrom typing import Dict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence\n\nfrom diff_representation.model import nn_utils\nfrom diff_representation.model.encdec.decoder import Decoder\nfrom diff_representation.model.pointer_net import PointerNet\n\nimport numpy as np\n\nSequentialHypothesis = namedtuple('SequentialHypothesis', ['code', 'score', 'action_log'])\nSequentialHypothesis.__new__.__defaults__ = (None,)\n\n\nclass SequentialDecoder(Decoder):\n \"\"\"\n given the context encoding [List[Tokens]], the previous code [List[Tokens]],\n and the change encoding vector, decode the updated code [List[Tokens]]\n \"\"\"\n\n def __init__(self,\n token_embed_size, token_encoding_size, change_vector_size, hidden_size,\n dropout,\n init_decode_vec_encoder_state_dropout,\n code_token_embedder,\n vocab,\n no_copy=False):\n super(SequentialDecoder, self).__init__()\n\n self.vocab = vocab\n self.hidden_size = hidden_size\n self.no_copy = no_copy\n\n self.code_token_embedder = code_token_embedder\n self.decoder_lstm = nn.LSTMCell(token_embed_size + change_vector_size,\n hidden_size)\n self.pointer_net = PointerNet(src_encoding_size=token_encoding_size,\n query_vec_size=hidden_size)\n\n self.decoder_cell_init = nn.Linear(token_encoding_size + change_vector_size, hidden_size)\n\n self.attention_linear = nn.Linear(token_encoding_size, hidden_size, bias=False)\n\n # transformation of decoder hidden states and context vectors before reading out target words\n # this produces the `attentional vector` in (Luong et al., 2015)\n self.att_vec_linear = nn.Linear(token_encoding_size + hidden_size, hidden_size, bias=False)\n\n # prediction layer of the target vocabulary\n self.code_token_readout = nn.Linear(hidden_size, len(self.vocab), bias=False)\n\n # switch probability between copy and generation\n self.copy_gen_switch = nn.Linear(hidden_size, 3)\n\n self.dropout = nn.Dropout(dropout)\n\n if init_decode_vec_encoder_state_dropout > 0.:\n self.init_decode_vec_encoder_state_dropout = nn.Dropout(init_decode_vec_encoder_state_dropout)\n\n @property\n def device(self):\n return self.copy_gen_switch.weight.device\n\n def forward(self, batch_examples, batched_prev_code, batched_context, change_vectors,\n embedding_cache=None, debug=False):\n \"\"\"\n compute the probability of generating the target code given context,\n previous code and the change vector\n\n batched_context: (batch_size, ctx_len, encode_size)\n batched_prev_code: (batch_size, code_len, encode_size)\n change_vector: (batch_size, change_vec_size)\n \"\"\"\n\n # (batch_size, hidden_size)\n h_tm1 = self.get_init_hidden_state(batched_prev_code, batched_context, change_vectors)\n\n batch_size = h_tm1[0].size(0)\n\n # (batch_size, prev_code_len, encode_size)\n prev_code_att_linear = self.attention_linear(batched_prev_code.encoding)\n\n # (**updated_code_len**, batch_size, embed_size)\n # pad the target code sequence with boundary symbols\n updated_code_list = [['<s>'] + e.updated_data + ['</s>'] for e in batch_examples]\n updated_code_embed = self.code_token_embedder(updated_code_list)\n\n att_vecs = []\n att_tm1 = torch.zeros(batch_size, self.hidden_size, dtype=torch.float, device=self.device)\n\n # assume the updated code is properly padded by <s> and </s>\n for t, y_tm1_embed in list(enumerate(updated_code_embed.split(split_size=1)))[:-1]:\n y_tm1_embed = y_tm1_embed.squeeze(0)\n\n x = torch.cat([y_tm1_embed, change_vectors], dim=-1) # No input feeding\n\n (h_t, cell_t), att_t = self.step(x,\n h_tm1,\n batched_prev_code.encoding, batched_prev_code.mask,\n prev_code_att_linear)\n\n att_vecs.append(att_t)\n\n att_tm1 = att_t\n h_tm1 = (h_t, cell_t)\n\n # compute copy probabilities and generation probabilities\n\n # (updated_code_len - 1, batch_size, hidden_size)\n att_vecs = torch.stack(att_vecs)\n\n # (updated_code_len - 1, batch_size, code_vocab_size)\n gen_code_token_log_prob = F.log_softmax(self.code_token_readout(att_vecs), dim=-1)\n\n # (updated_code_len - 1, batch_size, ctx_len)\n copy_ctx_token_prob = self.pointer_net(batched_context.encoding, batched_context.mask, att_vecs)\n\n # (updated_code_len - 1, batch_size, ctx_len)\n copy_prev_token_prob = self.pointer_net(batched_prev_code.encoding, batched_prev_code.mask, att_vecs)\n\n # (updated_code_len - 1, batch_size, [COPY_FROM_PREV, COPY_FROM_CONTEXT, GEN])\n token_copy_gen_switch = F.log_softmax(self.copy_gen_switch(att_vecs), dim=-1)\n\n # prepare masks, target indices\n tgt_gen_token_idx, tgt_gen_token_mask, \\\n tgt_copy_ctx_token_idx_mask, tgt_copy_ctx_token_mask, \\\n tgt_copy_prev_token_idx_mask, tgt_copy_prev_token_mask = self.get_gen_and_copy_index_and_mask(batch_examples,\n batched_prev_code,\n batched_context)\n\n # (updated_code_len - 1, batch_size)\n tgt_gen_token_prob = torch.gather(gen_code_token_log_prob, dim=-1,\n index=tgt_gen_token_idx.unsqueeze(2)).squeeze(2)\n tgt_gen_selection_prob = token_copy_gen_switch[:, :, 2]\n tgt_gen_token_prob = tgt_gen_token_prob + tgt_gen_selection_prob\n\n # (updated_code_len - 1, batch_size)\n tgt_copy_ctx_token_prob = (torch.sum(copy_ctx_token_prob * tgt_copy_ctx_token_idx_mask,\n dim=-1) + 1.e-15).log()\n\n tgt_copy_ctx_selection_prob = token_copy_gen_switch[:, :, 1]\n tgt_copy_ctx_token_prob = tgt_copy_ctx_token_prob + tgt_copy_ctx_selection_prob\n\n tgt_copy_prev_token_prob = (torch.sum(copy_prev_token_prob * tgt_copy_prev_token_idx_mask,\n dim=-1) + 1.e-15).log()\n\n tgt_copy_prev_selection_prob = token_copy_gen_switch[:, :, 0]\n tgt_copy_prev_token_prob = tgt_copy_prev_token_prob + tgt_copy_prev_selection_prob\n\n tgt_gen_and_copy_token_prob = nn_utils.log_sum_exp(\n torch.stack([tgt_gen_token_prob, tgt_copy_ctx_token_prob, tgt_copy_prev_token_prob], dim=-1),\n mask=torch.stack([tgt_gen_token_mask, tgt_copy_ctx_token_mask, tgt_copy_prev_token_mask], dim=-1))\n tgt_gen_and_copy_token_prob[tgt_gen_and_copy_token_prob == -float('inf')] = 0.\n\n # (batch_size)\n tgt_token_prob = tgt_gen_and_copy_token_prob.sum(dim=0)\n\n return {'log_probs': tgt_token_prob}\n\n def step(self, x, h_tm1, batched_prev_code_encoding, batched_prev_code_mask, prev_code_att_linear):\n \"\"\"\n a single LSTM decoding step\n \"\"\"\n\n # h_t: (batch_size, hidden_size)\n h_t, cell_t = self.decoder_lstm(x, h_tm1)\n\n ctx_t, alpha_t = nn_utils.dot_prod_attention(h_t,\n batched_prev_code_encoding, prev_code_att_linear,\n mask=batched_prev_code_mask)\n\n att_t = F.tanh(self.att_vec_linear(torch.cat([h_t, ctx_t], 1)))\n att_t = self.dropout(att_t)\n\n return (h_t, cell_t), att_t\n\n def get_init_hidden_state(self, batched_prev_code, batched_context, change_vectors):\n last_cell = batched_prev_code.last_cell\n\n if hasattr(self, 'init_decode_vec_dropout'):\n last_cell = self.init_decode_vec_encoder_state_dropout(last_cell)\n\n x = torch.cat([last_cell, change_vectors], dim=-1)\n dec_init_cell = self.decoder_cell_init(x)\n dec_init_state = F.tanh(dec_init_cell)\n\n return dec_init_state, dec_init_cell\n\n @staticmethod\n def populate_gen_and_copy_index_and_mask(example, vocab, copy_token=True):\n prev_code = example.prev_data\n updated_code = example.updated_data\n context = example.context\n\n seq_len = len(example.updated_data) + 1\n\n tgt_gen_token_idx = torch.zeros(seq_len, dtype=torch.long)\n tgt_gen_token_mask = torch.zeros(seq_len, dtype=torch.float)\n\n tgt_copy_ctx_token_idx_mask = torch.zeros(seq_len, len(context), dtype=torch.float)\n tgt_copy_ctx_token_mask = torch.zeros(seq_len, dtype=torch.float)\n\n tgt_copy_prev_token_idx_mask = torch.zeros(seq_len, len(prev_code), dtype=torch.float)\n tgt_copy_prev_token_mask = torch.zeros(seq_len, dtype=torch.float)\n\n for t, tgt_token in enumerate(updated_code):\n if SequentialDecoder._can_only_generate_this_token(tgt_token):\n tgt_gen_token_mask[t] = 1\n tgt_gen_token_idx[t] = vocab[tgt_token]\n else:\n copied = False\n if copy_token:\n if tgt_token in prev_code:\n token_pos_list = [pos for pos, token in enumerate(prev_code) if token == tgt_token]\n tgt_copy_prev_token_idx_mask[t, token_pos_list] = 1\n tgt_copy_prev_token_mask[t] = 1\n copied = True\n if tgt_token in context:\n token_pos_list = [pos for pos, token in enumerate(context) if token == tgt_token]\n tgt_copy_ctx_token_idx_mask[t, token_pos_list] = 1\n tgt_copy_ctx_token_mask[t] = 1\n copied = True\n\n if not copied or tgt_token in vocab:\n # if the token is not copied, we can only generate this token from the vocabulary,\n # even if it is a <unk>.\n # otherwise, we can still generate it from the vocabulary\n tgt_gen_token_mask[t] = 1\n tgt_gen_token_idx[t] = vocab[tgt_token]\n\n # add the index for ending </s>\n tgt_gen_token_mask[len(updated_code)] = 1\n tgt_gen_token_idx[len(updated_code)] = vocab['</s>']\n\n example.tgt_gen_token_idx = tgt_gen_token_idx\n example.tgt_gen_token_mask = tgt_gen_token_mask\n example.tgt_copy_ctx_token_idx_mask = tgt_copy_ctx_token_idx_mask\n example.tgt_copy_ctx_token_mask = tgt_copy_ctx_token_mask\n example.tgt_copy_prev_token_idx_mask = tgt_copy_prev_token_idx_mask\n example.tgt_copy_prev_token_mask = tgt_copy_prev_token_mask\n\n def get_gen_and_copy_index_and_mask(self, examples, batched_prev_code, batched_context):\n batch_size = len(examples)\n max_seq_len = max([len(e.updated_data) for e in examples]) + 1\n\n tgt_gen_token_idx = torch.zeros(batch_size, max_seq_len, dtype=torch.long)\n tgt_gen_token_mask = torch.zeros(batch_size, max_seq_len, dtype=torch.float)\n\n tgt_copy_ctx_token_idx_mask = torch.zeros(batch_size, max_seq_len, batched_context.encoding.size(1), dtype=torch.float)\n tgt_copy_ctx_token_mask = torch.zeros(batch_size, max_seq_len, dtype=torch.float)\n\n tgt_copy_prev_token_idx_mask = torch.zeros(batch_size, max_seq_len, batched_prev_code.encoding.size(1), dtype=torch.float)\n tgt_copy_prev_token_mask = torch.zeros(batch_size, max_seq_len, dtype=torch.float)\n\n for batch_id, example in enumerate(examples):\n tgt_gen_token_mask[batch_id, :example.tgt_gen_token_mask.size(0)] = example.tgt_gen_token_mask\n tgt_gen_token_idx[batch_id, :example.tgt_gen_token_idx.size(0)] = example.tgt_gen_token_idx\n\n tgt_copy_prev_token_idx_mask[batch_id, :example.tgt_copy_prev_token_idx_mask.size(0), :example.tgt_copy_prev_token_idx_mask.size(1)] = example.tgt_copy_prev_token_idx_mask\n tgt_copy_prev_token_mask[batch_id, :example.tgt_copy_prev_token_mask.size(0)] = example.tgt_copy_prev_token_mask\n\n tgt_copy_ctx_token_idx_mask[batch_id, :example.tgt_copy_ctx_token_idx_mask.size(0), :example.tgt_copy_ctx_token_idx_mask.size(1)] = example.tgt_copy_ctx_token_idx_mask\n tgt_copy_ctx_token_mask[batch_id, :example.tgt_copy_ctx_token_mask.size(0)] = example.tgt_copy_ctx_token_mask\n\n return tgt_gen_token_idx.permute(1, 0).to(self.device), tgt_gen_token_mask.permute(1, 0).to(self.device), \\\n tgt_copy_ctx_token_idx_mask.permute(1, 0, 2).to(self.device), tgt_copy_ctx_token_mask.permute(1, 0).to(self.device), \\\n tgt_copy_prev_token_idx_mask.permute(1, 0, 2).to(self.device), tgt_copy_prev_token_mask.permute(1, 0).to(self.device)\n\n def beam_search_with_source_encodings(self, prev_code, prev_code_encoding, context, context_encoding, change_vector,\n beam_size=5, max_decoding_time_step=70, debug=False):\n dec_init_vec = self.get_init_hidden_state(prev_code_encoding, context_encoding, change_vector)\n\n aggregated_prev_code_tokens = OrderedDict()\n for token_pos, token in enumerate(prev_code):\n aggregated_prev_code_tokens.setdefault(token, []).append(token_pos)\n\n aggregated_context_tokens = OrderedDict()\n for token_pos, token in enumerate(context):\n aggregated_context_tokens.setdefault(token, []).append(token_pos)\n\n # (1, prev_code_len, encode_size)\n prev_code_att_linear = self.attention_linear(prev_code_encoding.encoding)\n\n h_tm1 = dec_init_vec\n att_tm1 = torch.zeros(1, self.hidden_size, dtype=torch.float, device=self.device)\n\n t = 0\n hypotheses = [['<s>']]\n action_logs = [[]]\n hyp_scores = torch.zeros(len(hypotheses), dtype=torch.float, device=self.device)\n completed_hypotheses = []\n\n while len(completed_hypotheses) < beam_size and t < max_decoding_time_step:\n t += 1\n hyp_num = len(hypotheses)\n\n exp_src_encodings = prev_code_encoding.encoding.expand(hyp_num, prev_code_encoding.encoding.size(1),\n prev_code_encoding.encoding.size(2))\n exp_src_encodings_att_linear = prev_code_att_linear.expand(hyp_num, prev_code_att_linear.size(1),\n prev_code_att_linear.size(2))\n # (hyp_num, change_vec_size)\n exp_change_vector = change_vector.expand(hyp_num, change_vector.size(1))\n\n y_tm1 = torch.tensor([self.vocab[hyp[-1]] for hyp in hypotheses], dtype=torch.long, device=self.device)\n y_tm1_embed = self.code_token_embedder(y_tm1)\n\n x = torch.cat([y_tm1_embed, exp_change_vector], dim=-1)\n\n (h_t, cell_t), att_t = self.step(x, h_tm1,\n exp_src_encodings, batched_prev_code_mask=None,\n prev_code_att_linear=exp_src_encodings_att_linear)\n\n # (batch_size, code_vocab_size)\n gen_terminal_token_prob = F.softmax(self.code_token_readout(att_t), dim=-1)\n\n # (batch_size, ctx_len)\n copy_ctx_token_prob = self.pointer_net(context_encoding.encoding, None, att_t.unsqueeze(0)).squeeze(0)\n\n # (batch_size, ctx_len)\n copy_prev_token_prob = self.pointer_net(prev_code_encoding.encoding, None, att_t.unsqueeze(0)).squeeze(0)\n\n # (batch_size, [COPY_FROM_PREV, COPY_FROM_CONTEXT, GEN])\n token_copy_gen_switch = F.softmax(self.copy_gen_switch(att_t), dim=-1)\n\n # (batch_size, code_vocab_size)\n terminal_token_prob = token_copy_gen_switch[:, 2].unsqueeze(1) * gen_terminal_token_prob\n\n hyp_unk_copy_score_dict: Dict[str, torch.tensor] = OrderedDict() # Dict[token] = Tensor[hyp_num]\n if self.no_copy is False:\n for token, token_pos_list in aggregated_prev_code_tokens.items():\n # (hyp_num)\n sum_copy_prob = copy_prev_token_prob[:, token_pos_list].sum(dim=-1)\n # (hyp_num)\n gated_copy_prob = token_copy_gen_switch[:, 0] * sum_copy_prob\n\n if token in self.vocab:\n token_id = self.vocab[token]\n terminal_token_prob[:, token_id] = terminal_token_prob[:, token_id] + gated_copy_prob\n else:\n if token in hyp_unk_copy_score_dict:\n hyp_unk_copy_score_dict[token] = hyp_unk_copy_score_dict[token] + gated_copy_prob\n else:\n hyp_unk_copy_score_dict[token] = gated_copy_prob\n\n for token, token_pos_list in aggregated_context_tokens.items():\n # (hyp_num)\n sum_copy_prob = copy_ctx_token_prob[:, token_pos_list].sum(dim=-1)\n # (hyp_num)\n gated_copy_prob = token_copy_gen_switch[:, 1] * sum_copy_prob\n\n if token in self.vocab:\n token_id = self.vocab[token]\n terminal_token_prob[:, token_id] = terminal_token_prob[:, token_id] + gated_copy_prob\n else:\n if token in hyp_unk_copy_score_dict:\n hyp_unk_copy_score_dict[token] = hyp_unk_copy_score_dict[token] + gated_copy_prob\n else:\n hyp_unk_copy_score_dict[token] = gated_copy_prob\n\n terminal_token_prob = terminal_token_prob.log()\n candidate_hyp_scores = (hyp_scores.unsqueeze(1) + terminal_token_prob).view(-1)\n if len(hyp_unk_copy_score_dict) > 0:\n # (unk_num, hyp_num)\n unk_copy_hyp_scores = torch.cat(\n [copy_scores.log() + hyp_scores for copy_scores in hyp_unk_copy_score_dict.values()], dim=0)\n # (unk_num * hyp_num)\n unk_copy_hyp_scores = unk_copy_hyp_scores.view(-1)\n candidate_hyp_scores = torch.cat([candidate_hyp_scores, unk_copy_hyp_scores], dim=0)\n\n top_new_hyp_scores, top_new_hyp_pos = torch.topk(candidate_hyp_scores,\n k=min(candidate_hyp_scores.size(0),\n beam_size - len(completed_hypotheses)))\n\n live_hyp_ids = []\n new_hypotheses = []\n new_hypotheses_scores = []\n new_action_logs = []\n id2unk = list(hyp_unk_copy_score_dict.keys())\n vocab_size = terminal_token_prob.size(1)\n vocab_boundary = hyp_num * vocab_size # hyp_num * vocab_num\n for new_hyp_score, new_hyp_flattened_pos in zip(top_new_hyp_scores, top_new_hyp_pos):\n new_hyp_flattened_pos = new_hyp_flattened_pos.cpu().item()\n new_hyp_score = new_hyp_score.cpu().item()\n\n if new_hyp_flattened_pos < vocab_boundary:\n hyp_token_id = new_hyp_flattened_pos % vocab_size\n tgt_token = self.vocab.id2word[hyp_token_id]\n prev_hyp_id = new_hyp_flattened_pos // vocab_size\n else:\n k = new_hyp_flattened_pos - vocab_boundary\n unk_token_id = k // hyp_num\n tgt_token = id2unk[unk_token_id]\n prev_hyp_id = k % hyp_num\n\n if debug:\n action_log_entry = {'t': t,\n 'token': tgt_token,\n 'token_copy_gen_switch': token_copy_gen_switch[prev_hyp_id,\n :].log().cpu().numpy(),\n 'in_vocab': tgt_token in self.vocab,\n 'tgt_gen_token_prob': gen_terminal_token_prob[prev_hyp_id, self.vocab[\n tgt_token]].log().item() if tgt_token in self.vocab else 'n/a',\n 'tgt_copy_prev_token_prob': copy_prev_token_prob[\n prev_hyp_id, aggregated_prev_code_tokens[\n tgt_token]].sum().log().item() if tgt_token in aggregated_prev_code_tokens else 'n/a',\n 'tgt_copy_ctx_token_prob': copy_ctx_token_prob[\n prev_hyp_id, aggregated_context_tokens[\n tgt_token]].sum().log().item() if tgt_token in aggregated_context_tokens else 'n/a',\n 'p_t': (new_hyp_score - hyp_scores[prev_hyp_id]).item()\n }\n action_log_list = list(action_logs[prev_hyp_id]) + [action_log_entry]\n\n if tgt_token == '</s>':\n hyp_tgt_tokens = hypotheses[prev_hyp_id][1:]\n completed_hypotheses.append(SequentialHypothesis(hyp_tgt_tokens, new_hyp_score,\n action_log=action_log_list if debug else None))\n else:\n hyp_tgt_tokens = hypotheses[prev_hyp_id] + [tgt_token]\n new_hypotheses.append(hyp_tgt_tokens)\n\n live_hyp_ids.append(prev_hyp_id)\n new_hypotheses_scores.append(new_hyp_score)\n\n if debug:\n new_action_logs.append(action_log_list)\n\n if len(completed_hypotheses) >= beam_size:\n break\n\n live_hyp_ids = torch.tensor(live_hyp_ids, dtype=torch.long, device=self.device)\n h_tm1 = (h_t[live_hyp_ids], cell_t[live_hyp_ids])\n att_tm1 = att_t[live_hyp_ids]\n\n hypotheses = new_hypotheses\n hyp_scores = torch.tensor(new_hypotheses_scores, dtype=torch.float, device=self.device)\n if debug:\n action_logs = new_action_logs\n\n completed_hypotheses.sort(key=lambda hyp: -hyp.score)\n\n return completed_hypotheses\n\n\nclass SequentialDecoderWithTreeEncoder(SequentialDecoder):\n \"\"\"\n given the context encoding [List[Tokens]], the previous code [List[Tokens]],\n and the change encoding vector, decode the updated code [List[Tokens]]\n \"\"\"\n\n def __init__(self,\n token_embed_size, token_encoding_size, change_vector_size, hidden_size,\n dropout,\n init_decode_vec_encoder_state_dropout,\n code_token_embedder,\n vocab,\n decoder_init_method='avg_pooling'):\n super(SequentialDecoderWithTreeEncoder, self).__init__(token_embed_size, token_encoding_size, change_vector_size, hidden_size,\n dropout,\n init_decode_vec_encoder_state_dropout,\n code_token_embedder,\n vocab)\n\n self.decoder_init_method = decoder_init_method\n\n\n @property\n def device(self):\n return self.copy_gen_switch.weight.device\n\n def forward(self, batch_examples, batched_prev_code, batched_context, change_vectors,\n embedding_cache=None, debug=False):\n \"\"\"\n compute the probability of generating the target code given context,\n previous code and the change vector\n\n batched_context: (batch_size, ctx_len, encode_size)\n batched_prev_code: (batch_size, code_len, encode_size)\n change_vector: (batch_size, change_vec_size)\n \"\"\"\n\n # (batch_size, hidden_size)\n h_tm1 = self.get_init_hidden_state(batched_prev_code, batched_context, change_vectors)\n\n batch_size = h_tm1[0].size(0)\n\n # (batch_size, prev_code_len, encode_size)\n prev_code_att_linear = self.attention_linear(batched_prev_code.encoding)\n\n # (**updated_code_len**, batch_size, embed_size)\n # pad the target code sequence with boundary symbols\n updated_code_list = [['<s>'] + e.updated_code_chunk + ['</s>'] for e in batch_examples]\n updated_code_embed = self.code_token_embedder.get_embed_for_token_sequences(updated_code_list)\n\n att_vecs = []\n att_tm1 = torch.zeros(batch_size, self.hidden_size, dtype=torch.float, device=self.device)\n\n # assume the updated code is properly padded by <s> and </s>\n for t, y_tm1_embed in list(enumerate(updated_code_embed.split(split_size=1)))[:-1]:\n y_tm1_embed = y_tm1_embed.squeeze(0)\n\n x = torch.cat([y_tm1_embed, change_vectors], dim=-1) # No input feeding\n\n (h_t, cell_t), att_t = self.step(x,\n h_tm1,\n batched_prev_code.encoding, batched_prev_code.mask,\n prev_code_att_linear)\n\n att_vecs.append(att_t)\n\n att_tm1 = att_t\n h_tm1 = (h_t, cell_t)\n\n # compute copy probabilities and generation probabilities\n\n # (updated_code_len - 1, batch_size, hidden_size)\n att_vecs = torch.stack(att_vecs)\n\n # (updated_code_len - 1, batch_size, code_vocab_size)\n gen_code_token_log_prob = F.log_softmax(self.code_token_readout(att_vecs), dim=-1)\n\n # (updated_code_len - 1, batch_size, ctx_len)\n copy_ctx_token_prob = self.pointer_net(batched_context.encoding, batched_context.mask, att_vecs)\n\n # (updated_code_len - 1, batch_size, ctx_len)\n copy_prev_token_prob = self.pointer_net(batched_prev_code.encoding, batched_prev_code.syntax_token_mask, att_vecs)\n\n # (updated_code_len - 1, batch_size, [COPY_FROM_PREV, COPY_FROM_CONTEXT, GEN])\n token_copy_gen_switch = F.log_softmax(self.copy_gen_switch(att_vecs), dim=-1)\n\n # prepare masks, target indices\n tgt_gen_token_idx, tgt_gen_token_mask, \\\n tgt_copy_ctx_token_idx_mask, tgt_copy_ctx_token_mask, \\\n tgt_copy_prev_token_idx_mask, tgt_copy_prev_token_mask = self.get_gen_and_copy_index_and_mask(batch_examples,\n batched_prev_code,\n batched_context)\n\n # (updated_code_len - 1, batch_size)\n tgt_gen_token_prob = torch.gather(gen_code_token_log_prob, dim=-1,\n index=tgt_gen_token_idx.unsqueeze(2)).squeeze(2)\n tgt_gen_selection_prob = token_copy_gen_switch[:, :, 2]\n gated_tgt_gen_token_prob = tgt_gen_token_prob + tgt_gen_selection_prob\n\n # (updated_code_len - 1, batch_size)\n tgt_copy_ctx_token_prob = (torch.sum(copy_ctx_token_prob * tgt_copy_ctx_token_idx_mask,\n dim=-1) + 1.e-15).log()\n\n tgt_copy_ctx_selection_prob = token_copy_gen_switch[:, :, 1]\n gated_tgt_copy_ctx_token_prob = tgt_copy_ctx_token_prob + tgt_copy_ctx_selection_prob\n\n tgt_copy_prev_token_prob = (torch.sum(copy_prev_token_prob * tgt_copy_prev_token_idx_mask,\n dim=-1) + 1.e-15).log()\n\n tgt_copy_prev_selection_prob = token_copy_gen_switch[:, :, 0]\n gated_tgt_copy_prev_token_prob = tgt_copy_prev_token_prob + tgt_copy_prev_selection_prob\n\n tgt_gen_and_copy_token_prob = nn_utils.log_sum_exp(\n torch.stack([gated_tgt_gen_token_prob, gated_tgt_copy_ctx_token_prob, gated_tgt_copy_prev_token_prob], dim=-1),\n mask=torch.stack([tgt_gen_token_mask, tgt_copy_ctx_token_mask, tgt_copy_prev_token_mask], dim=-1))\n tgt_gen_and_copy_token_prob[tgt_gen_and_copy_token_prob == -float('inf')] = 0.\n\n # (batch_size)\n tgt_token_prob = tgt_gen_and_copy_token_prob.sum(dim=0)\n\n if debug:\n debug_info = OrderedDict()\n\n for batch_id, example in enumerate(batch_examples):\n action_trace = []\n log_p = 0.0\n for t in range(len(example.updated_code_chunk) + 1):\n p_t = tgt_gen_and_copy_token_prob[t, batch_id].item()\n entry = {'t': t,\n 'token': updated_code_list[batch_id][t + 1],\n 'copy_gen_switch': token_copy_gen_switch[t, batch_id].cpu().numpy(),\n 'tgt_gen_token_prob': tgt_gen_token_prob[t, batch_id].item() if tgt_gen_token_mask[t, batch_id].item() else 'n/a',\n 'tgt_copy_prev_token_prob': tgt_copy_prev_token_prob[t, batch_id].item() if tgt_copy_prev_token_mask[t, batch_id].item() else 'n/a',\n 'tgt_copy_ctx_token_prob': tgt_copy_ctx_token_prob[t, batch_id].item() if tgt_copy_ctx_token_mask[t, batch_id].item() else 'n/a',\n 'p_t': p_t}\n\n # entry.update({k: v[batch_id] for k, v in log_entries[t].items()})\n\n log_p += p_t\n action_trace.append(entry)\n\n debug_info[example.id] = dict(action_trace=action_trace, log_p=log_p)\n\n if debug:\n return tgt_token_prob, debug_info\n return tgt_token_prob\n\n def get_init_hidden_state(self, batched_prev_code, batched_context, change_vectors):\n if self.decoder_init_method == 'avg_pooling':\n node_state = batched_prev_code.encoding.sum(dim=1) / ((1. - batched_prev_code.mask.float()).sum(dim=-1, keepdim=True))\n elif self.decoder_init_method == 'root_node':\n node_state = batched_prev_code.encoding[:, 0]\n\n if hasattr(self, 'init_decode_vec_encoder_state_dropout'):\n node_state = self.init_decode_vec_encoder_state_dropout(node_state)\n\n x = torch.cat([node_state, change_vectors], dim=-1)\n dec_init_cell = self.decoder_cell_init(x)\n dec_init_state = F.tanh(dec_init_cell)\n\n return dec_init_state, dec_init_cell\n\n def get_gen_and_copy_index_and_mask(self, examples, batched_prev_code, batched_context):\n batch_size = len(examples)\n max_seq_len = max([len(e.updated_code_chunk) for e in examples]) + 1\n\n tgt_gen_token_idx = torch.zeros(max_seq_len, batch_size, dtype=torch.long, device=self.device)\n tgt_gen_token_mask = torch.zeros(max_seq_len, batch_size, dtype=torch.float, device=self.device)\n\n tgt_copy_ctx_token_idx_mask = torch.zeros(max_seq_len, batch_size, batched_context.encoding.size(1),\n dtype=torch.float, device=self.device)\n tgt_copy_ctx_token_mask = torch.zeros(max_seq_len, batch_size, dtype=torch.float, device=self.device)\n\n tgt_copy_prev_token_idx_mask = torch.zeros(max_seq_len, batch_size, batched_prev_code.encoding.size(1),\n dtype=torch.float, device=self.device)\n tgt_copy_prev_token_mask = torch.zeros(max_seq_len, batch_size, dtype=torch.float, device=self.device)\n\n for batch_id in range(batch_size):\n updated_code = examples[batch_id].updated_data\n prev_ast = examples[batch_id].prev_data\n context = batched_context.data[batch_id]\n for t, tgt_token in enumerate(updated_code):\n if SequentialDecoder._can_only_generate_this_token(tgt_token):\n tgt_gen_token_mask[t, batch_id] = 1\n tgt_gen_token_idx[t, batch_id] = self.vocab[tgt_token]\n else:\n copied = False\n if tgt_token in prev_ast.syntax_token_value2ids:\n token_pos_list = prev_ast.syntax_token_value2ids[tgt_token]\n tgt_copy_prev_token_idx_mask[t, batch_id, token_pos_list] = 1\n tgt_copy_prev_token_mask[t, batch_id] = 1\n copied = True\n if tgt_token in context:\n token_pos_list = [pos for pos, token in enumerate(context) if token == tgt_token]\n tgt_copy_ctx_token_idx_mask[t, batch_id, token_pos_list] = 1\n tgt_copy_ctx_token_mask[t, batch_id] = 1\n copied = True\n\n if not copied or tgt_token in self.vocab:\n # if the token is not copied, we can only generate this token from the vocabulary,\n # even if it is a <unk>.\n # otherwise, we can still generate it from the vocabulary\n tgt_gen_token_mask[t, batch_id] = 1\n tgt_gen_token_idx[t, batch_id] = self.vocab[tgt_token]\n\n # add the index for ending </s>\n tgt_gen_token_mask[len(updated_code), batch_id] = 1\n tgt_gen_token_idx[len(updated_code), batch_id] = self.vocab['</s>']\n\n return tgt_gen_token_idx, tgt_gen_token_mask, \\\n tgt_copy_ctx_token_idx_mask, tgt_copy_ctx_token_mask, \\\n tgt_copy_prev_token_idx_mask, tgt_copy_prev_token_mask\n\n def beam_search_with_source_encodings(self, prev_code, prev_code_encoding, context, context_encoding, change_vector,\n beam_size=5, max_decoding_time_step=70, debug=False, **kwargs):\n dec_init_vec = self.get_init_hidden_state(prev_code_encoding, context_encoding, change_vector)\n\n aggregated_prev_code_tokens = OrderedDict()\n for token_pos, token in prev_code.syntax_tokens_and_ids:\n aggregated_prev_code_tokens.setdefault(token.value, []).append(token_pos)\n\n aggregated_context_tokens = OrderedDict()\n for token_pos, token in enumerate(context):\n aggregated_context_tokens.setdefault(token, []).append(token_pos)\n\n # (1, prev_code_len, encode_size)\n prev_code_att_linear = self.attention_linear(prev_code_encoding.encoding)\n\n h_tm1 = dec_init_vec\n att_tm1 = torch.zeros(1, self.hidden_size, dtype=torch.float, device=self.device)\n\n t = 0\n hypotheses = [['<s>']]\n action_logs = [[]]\n hyp_scores = torch.zeros(len(hypotheses), dtype=torch.float, device=self.device)\n completed_hypotheses = []\n\n while len(completed_hypotheses) < beam_size and t < max_decoding_time_step:\n t += 1\n hyp_num = len(hypotheses)\n\n exp_src_encodings = prev_code_encoding.encoding.expand(hyp_num, prev_code_encoding.encoding.size(1),\n prev_code_encoding.encoding.size(2))\n exp_src_encodings_att_linear = prev_code_att_linear.expand(hyp_num, prev_code_att_linear.size(1),\n prev_code_att_linear.size(2))\n # (hyp_num, change_vec_size)\n exp_change_vector = change_vector.expand(hyp_num, change_vector.size(1))\n\n y_tm1 = torch.tensor([self.vocab[hyp[-1]] for hyp in hypotheses], dtype=torch.long, device=self.device)\n y_tm1_embed = self.code_token_embedder(y_tm1)\n\n x = torch.cat([y_tm1_embed, exp_change_vector], dim=-1)\n\n (h_t, cell_t), att_t = self.step(x, h_tm1,\n exp_src_encodings, batched_prev_code_mask=None,\n prev_code_att_linear=exp_src_encodings_att_linear)\n\n # (batch_size, code_vocab_size)\n gen_terminal_token_prob = F.softmax(self.code_token_readout(att_t), dim=-1)\n\n # (batch_size, ctx_len)\n copy_ctx_token_prob = self.pointer_net(context_encoding.encoding, None, att_t.unsqueeze(0)).squeeze(0)\n\n # (batch_size, ctx_len)\n copy_prev_token_prob = self.pointer_net(prev_code_encoding.encoding, prev_code_encoding.syntax_token_mask, att_t.unsqueeze(0)).squeeze(0)\n\n # (batch_size, [COPY_FROM_PREV, COPY_FROM_CONTEXT, GEN])\n token_copy_gen_switch = F.softmax(self.copy_gen_switch(att_t), dim=-1)\n\n # (batch_size, code_vocab_size)\n terminal_token_prob = token_copy_gen_switch[:, 2].unsqueeze(1) * gen_terminal_token_prob\n\n hyp_unk_copy_score_dict: Dict[str, torch.tensor] = OrderedDict() # Dict[token] = Tensor[hyp_num]\n for token, token_pos_list in aggregated_prev_code_tokens.items():\n # (hyp_num)\n sum_copy_prob = copy_prev_token_prob[:, token_pos_list].sum(dim=-1)\n # (hyp_num)\n gated_copy_prob = token_copy_gen_switch[:, 0] * sum_copy_prob\n\n if token in self.vocab:\n token_id = self.vocab[token]\n terminal_token_prob[:, token_id] = terminal_token_prob[:, token_id] + gated_copy_prob\n else:\n if token in hyp_unk_copy_score_dict:\n hyp_unk_copy_score_dict[token] = hyp_unk_copy_score_dict[token] + gated_copy_prob\n else:\n hyp_unk_copy_score_dict[token] = gated_copy_prob\n\n for token, token_pos_list in aggregated_context_tokens.items():\n # (hyp_num)\n sum_copy_prob = copy_ctx_token_prob[:, token_pos_list].sum(dim=-1)\n # (hyp_num)\n gated_copy_prob = token_copy_gen_switch[:, 1] * sum_copy_prob\n\n if token in self.vocab:\n token_id = self.vocab[token]\n terminal_token_prob[:, token_id] = terminal_token_prob[:, token_id] + gated_copy_prob\n else:\n if token in hyp_unk_copy_score_dict:\n hyp_unk_copy_score_dict[token] = hyp_unk_copy_score_dict[token] + gated_copy_prob\n else:\n hyp_unk_copy_score_dict[token] = gated_copy_prob\n\n terminal_token_prob = terminal_token_prob.log()\n candidate_hyp_scores = (hyp_scores.unsqueeze(1) + terminal_token_prob).view(-1)\n if len(hyp_unk_copy_score_dict) > 0:\n # (unk_num, hyp_num)\n unk_copy_hyp_scores = torch.cat(\n [copy_scores.log() + hyp_scores for copy_scores in hyp_unk_copy_score_dict.values()], dim=0)\n # (unk_num * hyp_num)\n unk_copy_hyp_scores = unk_copy_hyp_scores.view(-1)\n candidate_hyp_scores = torch.cat([candidate_hyp_scores, unk_copy_hyp_scores], dim=0)\n\n top_new_hyp_scores, top_new_hyp_pos = torch.topk(candidate_hyp_scores,\n k=min(candidate_hyp_scores.size(0),\n beam_size - len(completed_hypotheses)))\n\n live_hyp_ids = []\n new_hypotheses = []\n new_hypotheses_scores = []\n new_action_logs = []\n id2unk = list(hyp_unk_copy_score_dict.keys())\n vocab_size = terminal_token_prob.size(1)\n vocab_boundary = hyp_num * vocab_size # hyp_num * vocab_num\n for new_hyp_score, new_hyp_flattened_pos in zip(top_new_hyp_scores, top_new_hyp_pos):\n new_hyp_flattened_pos = new_hyp_flattened_pos.cpu().item()\n new_hyp_score = new_hyp_score.cpu().item()\n\n if new_hyp_flattened_pos < vocab_boundary:\n hyp_token_id = new_hyp_flattened_pos % vocab_size\n tgt_token = self.vocab.id2word[hyp_token_id]\n prev_hyp_id = new_hyp_flattened_pos // vocab_size\n else:\n k = new_hyp_flattened_pos - vocab_boundary\n unk_token_id = k // hyp_num\n tgt_token = id2unk[unk_token_id]\n prev_hyp_id = k % hyp_num\n\n if debug:\n action_log_entry = {'t': t,\n 'token': tgt_token,\n 'token_copy_gen_switch': token_copy_gen_switch[prev_hyp_id,\n :].log().cpu().numpy(),\n 'in_vocab': tgt_token in self.vocab,\n 'tgt_gen_token_prob': gen_terminal_token_prob[prev_hyp_id, self.vocab[\n tgt_token]].log().item() if tgt_token in self.vocab else 'n/a',\n 'tgt_copy_prev_token_prob': copy_prev_token_prob[\n prev_hyp_id, aggregated_prev_code_tokens[\n tgt_token]].sum().log().item() if tgt_token in aggregated_prev_code_tokens else 'n/a',\n 'tgt_copy_ctx_token_prob': copy_ctx_token_prob[\n prev_hyp_id, aggregated_context_tokens[\n tgt_token]].sum().log().item() if tgt_token in aggregated_context_tokens else 'n/a',\n 'p_t': (new_hyp_score - hyp_scores[prev_hyp_id]).item()\n }\n action_log_list = list(action_logs[prev_hyp_id]) + [action_log_entry]\n\n if tgt_token == '</s>':\n hyp_tgt_tokens = hypotheses[prev_hyp_id][1:]\n completed_hypotheses.append(SequentialHypothesis(hyp_tgt_tokens, new_hyp_score,\n action_log=action_log_list if debug else None))\n else:\n hyp_tgt_tokens = hypotheses[prev_hyp_id] + [tgt_token]\n new_hypotheses.append(hyp_tgt_tokens)\n\n live_hyp_ids.append(prev_hyp_id)\n new_hypotheses_scores.append(new_hyp_score)\n\n if debug:\n new_action_logs.append(action_log_list)\n\n if len(completed_hypotheses) >= beam_size:\n break\n\n live_hyp_ids = torch.tensor(live_hyp_ids, dtype=torch.long, device=self.device)\n h_tm1 = (h_t[live_hyp_ids], cell_t[live_hyp_ids])\n att_tm1 = att_t[live_hyp_ids]\n\n hypotheses = new_hypotheses\n hyp_scores = torch.tensor(new_hypotheses_scores, dtype=torch.float, device=self.device)\n if debug:\n action_logs = new_action_logs\n\n completed_hypotheses.sort(key=lambda hyp: -hyp.score)\n\n return completed_hypotheses\n"
] | [
[
"numpy.concatenate",
"torch.cat",
"torch.tensor"
],
[
"torch.nn.Dropout",
"torch.zeros",
"torch.cat",
"torch.sum",
"torch.tensor",
"torch.nn.LSTMCell",
"torch.nn.Linear",
"torch.stack",
"torch.nn.functional.tanh"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
junarwohn/tvm | [
"96c2e06cd063a695b3b485f2bdf8875df55fff1a",
"96c2e06cd063a695b3b485f2bdf8875df55fff1a",
"96c2e06cd063a695b3b485f2bdf8875df55fff1a"
] | [
"tvm_test/run_simple_mod_op2_pth.py",
"tvm_test/run_op2_pth.py",
"tvm_test/run_simple_mod_op3_pth.py"
] | [
"import tvm\nfrom tvm import relay\nfrom tvm import relay\nfrom tvm.runtime.vm import VirtualMachine\nfrom tvm.contrib.download import download_testdata\nfrom SimpleModel import Net\nimport numpy as np\nimport cv2\n\n# PyTorch imports\nimport torch\nimport torchvision\n\n# Time library for speed check\nimport time\n\nin_size = 32\n\ninput_shape = (1, 3, in_size, in_size)\n\n\ndef do_trace(model, inp):\n model_trace = torch.jit.trace(model, inp)\n model_trace.eval()\n return model_trace\n\n\n# model_func = torchvision.models.detection.maskrcnn_resnet50_fpn\n# model = TraceWrapper(model_func(pretrained=True))\n\nmodel = Net()\nmodel.load_state_dict(torch.load('./simple_mod.pth'))\n\nmodel.eval()\ninp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size)))\n\nwith torch.no_grad():\n out = model(inp)\n script_module = do_trace(model, inp)\n \n\nimg_url = (\n \"https://raw.githubusercontent.com/dmlc/web-data/\" \"master/gluoncv/detection/street_small.jpg\"\n)\nimg_path = download_testdata(img_url, \"test_street_small.jpg\", module=\"data\")\n\nimg = cv2.imread(img_path).astype(\"float32\")\nimg = cv2.resize(img, (in_size, in_size))\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nimg = np.transpose(img / 255.0, [2, 0, 1])\nimg = np.expand_dims(img, axis=0)\n\ninput_name = \"input0\"\nshape_list = [(input_name, input_shape)]\nmod, params = relay.frontend.from_pytorch(script_module, shape_list)\n\ntarget = \"llvm\"\n\nwith tvm.transform.PassContext(opt_level=2, disabled_pass=[\"FoldScaleAxis\"]):\n vm_exec = relay.vm.compile(mod, target=target, params=params)\n\n# dev = tvm.cuda()\ndev = tvm.cpu()\nvm = VirtualMachine(vm_exec, dev)\nvm.set_input(\"main\", **{input_name: img})\ninference_start = time.time()\ntvm_res = vm.run()\ninference_end = time.time()\ninference_time_tvm = inference_end - inference_start\nprint(\"Infernece Time : {}\".format(inference_time_tvm))\n\n\n",
"import tvm\nfrom tvm import relay\nfrom tvm import relay\nfrom tvm.runtime.vm import VirtualMachine\nfrom tvm.contrib.download import download_testdata\n\nimport numpy as np\nimport cv2\n\n# PyTorch imports\nimport torch\nimport torchvision\n\n# Time library for speed check\nimport time\n\nin_size = 300\n\ninput_shape = (1, 3, in_size, in_size)\n\n\ndef do_trace(model, inp):\n model_trace = torch.jit.trace(model, inp)\n model_trace.eval()\n return model_trace\n\n\ndef dict_to_tuple(out_dict):\n if \"masks\" in out_dict.keys():\n return out_dict[\"boxes\"], out_dict[\"scores\"], out_dict[\"labels\"], out_dict[\"masks\"]\n return out_dict[\"boxes\"], out_dict[\"scores\"], out_dict[\"labels\"]\n\n\nclass TraceWrapper(torch.nn.Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def forward(self, inp):\n out = self.model(inp)\n return dict_to_tuple(out[0])\n\n\nmodel_func = torchvision.models.detection.maskrcnn_resnet50_fpn\nmodel = TraceWrapper(model_func(pretrained=True))\n\nmodel.eval()\ninp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size)))\n\nwith torch.no_grad():\n out = model(inp)\n script_module = do_trace(model, inp)\n \n\nimg_url = (\n \"https://raw.githubusercontent.com/dmlc/web-data/\" \"master/gluoncv/detection/street_small.jpg\"\n)\nimg_path = download_testdata(img_url, \"test_street_small.jpg\", module=\"data\")\n\nimg = cv2.imread(img_path).astype(\"float32\")\nimg = cv2.resize(img, (in_size, in_size))\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nimg = np.transpose(img / 255.0, [2, 0, 1])\nimg = np.expand_dims(img, axis=0)\n\ninput_name = \"input0\"\nshape_list = [(input_name, input_shape)]\nmod, params = relay.frontend.from_pytorch(script_module, shape_list)\n\ntarget = \"llvm\"\n\nwith tvm.transform.PassContext(opt_level=2, disabled_pass=[\"FoldScaleAxis\"]):\n vm_exec = relay.vm.compile(mod, target=target, params=params)\n\n# dev = tvm.cuda()\ndev = tvm.cpu()\nvm = VirtualMachine(vm_exec, dev)\nvm.set_input(\"main\", **{input_name: img})\n\ntest_trial = 10\ntotal_time = 0\n\nfor i in range(test_trial):\n inference_start = time.time()\n tvm_res = vm.run()\n inference_end = time.time()\n inference_time_tvm = inference_end - inference_start\n total_time += inference_time_tvm\n print(\"Infernece Time : {}\".format(inference_time_tvm), end=' ')\n\nprint(\"Avg time : {}\".format(total_time / test_trial))\n\n\nscore_threshold = 0.9\nboxes = tvm_res[0].numpy().tolist()\nvalid_boxes = []\nfor i, score in enumerate(tvm_res[1].numpy().tolist()):\n if score > score_threshold:\n valid_boxes.append(boxes[i])\n else:\n break\n\nprint(\"Get {} valid boxes\".format(len(valid_boxes)))\n\n",
"import tvm\nfrom tvm import relay\nfrom tvm import relay\nfrom tvm.runtime.vm import VirtualMachine\nfrom tvm.contrib.download import download_testdata\nfrom SimpleModel import Net\nimport numpy as np\nimport cv2\n\n# PyTorch imports\nimport torch\nimport torchvision\n\n# Time library for speed check\nimport time\n\nin_size = 32\n\ninput_shape = (1, 3, in_size, in_size)\n\n\ndef do_trace(model, inp):\n model_trace = torch.jit.trace(model, inp)\n model_trace.eval()\n return model_trace\n\n\n# model_func = torchvision.models.detection.maskrcnn_resnet50_fpn\n# model = TraceWrapper(model_func(pretrained=True))\n\nmodel = Net()\nmodel.load_state_dict(torch.load('./simple_mod.pth'))\n\nmodel.eval()\ninp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size)))\n\nwith torch.no_grad():\n out = model(inp)\n script_module = do_trace(model, inp)\n \n\nimg_url = (\n \"https://raw.githubusercontent.com/dmlc/web-data/\" \"master/gluoncv/detection/street_small.jpg\"\n)\nimg_path = download_testdata(img_url, \"test_street_small.jpg\", module=\"data\")\n\nimg = cv2.imread(img_path).astype(\"float32\")\nimg = cv2.resize(img, (in_size, in_size))\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nimg = np.transpose(img / 255.0, [2, 0, 1])\nimg = np.expand_dims(img, axis=0)\n\ninput_name = \"input0\"\nshape_list = [(input_name, input_shape)]\nmod, params = relay.frontend.from_pytorch(script_module, shape_list)\n\ntarget = \"llvm\"\n\nwith tvm.transform.PassContext(opt_level=3, disabled_pass=[\"FoldScaleAxis\"]):\n vm_exec = relay.vm.compile(mod, target=target, params=params)\n\n# dev = tvm.cuda()\ndev = tvm.cpu()\nvm = VirtualMachine(vm_exec, dev)\nvm.set_input(\"main\", **{input_name: img})\ninference_start = time.time()\ntvm_res = vm.run()\ninference_end = time.time()\ninference_time_tvm = inference_end - inference_start\nprint(\"Infernece Time : {}\".format(inference_time_tvm))\n\n\n"
] | [
[
"numpy.expand_dims",
"torch.jit.trace",
"torch.load",
"torch.no_grad",
"numpy.transpose",
"numpy.random.uniform"
],
[
"numpy.expand_dims",
"torch.jit.trace",
"torch.no_grad",
"numpy.transpose",
"numpy.random.uniform"
],
[
"numpy.expand_dims",
"torch.jit.trace",
"torch.load",
"torch.no_grad",
"numpy.transpose",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rogerfitz/tutorials | [
"dae6470bad63b71e755caaff0b69893f5c9a1d63",
"dae6470bad63b71e755caaff0b69893f5c9a1d63"
] | [
"travel_time_visualization/server.py",
"draft_kings_contests_scrape/main.py"
] | [
"from flask import Flask, jsonify,render_template,request\nfrom config import API_KEY\nimport datetime\nfrom collections import defaultdict\nimport requests\nimport pandas as pd\nimport sys\nimport logging\nfrom itertools import repeat\n\napp = Flask(__name__)\ngunicorn_error_logger = logging.getLogger('gunicorn.error')\napp.logger.handlers.extend(gunicorn_error_logger.handlers)\napp.logger.setLevel(logging.DEBUG)\n\nfrom multiprocessing.dummy import Pool as ThreadPool \npool = ThreadPool(20) \nBASE_URL=\"https://maps.googleapis.com/maps/api/\"\napp.logger.debug(datetime.datetime.fromtimestamp(1498924020))\n\nclass GAPIError(Exception):\n status_code = 31337\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\ndef makeRequest(url, API_KEY):\n url+=\"&key=%s\"%API_KEY\n return requests.get(url).json()['rows'][0]['elements'][0]['duration_in_traffic']['value']\ndef getDistanceMatrix(origin,destination,mode,departure_time,traffic_model, API_KEY):\n #UTC Time\n url=BASE_URL+\"distancematrix/json?\"\n params=\"origins=%s&destinations=%s&mode=%s&departure_time=%s&traffic_model=%s\"%(origin,destination,mode,departure_time,traffic_model)\n return makeRequest(url+params, API_KEY)\n\ndef getNearest(dt,offset):\n return dt + (datetime.datetime.min - dt) % datetime.timedelta(minutes=offset)\n\ndef getChartData(starting_address,destination_address, leave_after, hours_to_grab,API_KEY,OFFSET=15):\n start_date=getNearest(leave_after,15)\n request_times=defaultdict(dict)\n dts=[int(leave_after.timestamp())]\n \n for dt in (start_date + datetime.timedelta(minutes=offset) for offset in range(0,60*hours_to_grab,OFFSET)):\n dts.append(int(dt.timestamp()))\n \n request_times={}\n for traffic_model in [\"best_guess\",\"pessimistic\",\"optimistic\"]:\n results=pool.starmap(\n getDistanceMatrix, zip(repeat(starting_address),repeat(destination_address),repeat(\"car\"),dts,repeat(traffic_model), repeat(API_KEY))\n )\n request_times[traffic_model]=results\n request_times[\"index\"]=dts\n travel_times=pd.DataFrame.from_dict(request_times).set_index(\"index\")/60\n viz_df=travel_times.reset_index()\n viz_df['x']=viz_df['index']*1000#Add milliseconds for JS datetime\n del viz_df['index']\n viz_json=viz_df.to_dict(orient=\"list\")\n #to c3 Columns\n columns=[]\n for col,vals in viz_json.items():\n if col!=\"x\":\n vals=[round(x) for x in vals]\n columns.append([col]+vals)\n return columns\n\[email protected](\"/\")\ndef index():\n return render_template('index.html', API_KEY=API_KEY)\n \[email protected]('/data')\ndef data():\n app.logger.debug(request.args) \n leaveAfter=request.args.get(\"leaveAfter\")\n leaveAfter=datetime.datetime.fromtimestamp(int(leaveAfter)/1000)\n USERS_API_KEY=request.args.get(\"API_KEY\",default=API_KEY)\n now=datetime.datetime.now()\n if leaveAfter<now:\n leaveAfter=now\n try:\n response=getChartData(request.args.get(\"startingAddress\"),request.args.get(\"destinationAddress\"),leaveAfter,8, USERS_API_KEY)\n return jsonify(response)\n except:\n raise GAPIError(\"API Key no longer valid\", status_code=31337)\n \n \[email protected](GAPIError)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n \nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n",
"import os\nfrom time import sleep\nimport datetime\nimport random\nfrom selenium import webdriver\nfrom config import DK_PASSWORD, DK_USERNAME\nchromeOptions = webdriver.ChromeOptions()\nprefs = {\"download.default_directory\" : os.getcwd()+\"/data\"}\nchromeOptions.add_experimental_option(\"prefs\",prefs)\ndriver = webdriver.Chrome(chrome_options=chromeOptions) # Optional argument, if not specified will search path.\n\ndef randomSleep(scale_factor=1):\n\tsleep_dur=.5*scale_factor*random.randint(5,10)*random.randint(1,3)\n\tprint(\"Sleeping for:\", sleep_dur, flush=True)#flush needed to make sure output gets written in timely matter\n\tsleep(sleep_dur)\n\ndriver.get(\"http://www.draftkings.com\")\nrandomSleep()\nsign_in = driver.find_element_by_link_text('SIGN IN')\nsign_in.click()\nrandomSleep(.5)\ndriver.find_element_by_name('username').send_keys(DK_USERNAME)\nrandomSleep(.001)\ndriver.find_element_by_name('username').send_keys(\"\\t\"+DK_PASSWORD)\nrandomSleep(.01)\ndriver.find_elements_by_xpath(\"//*[contains(text(), 'LOG IN')]\")[0].click()\nrandomSleep(.1)\n#driver.get(\"https://www.draftkings.com/lobby#/NFL\")\ndriver.find_element_by_link_text(\"NFL\").click()\nrandomSleep(.1)\ndriver.find_elements_by_xpath(\"//*[contains(text(), 'View Live Contests')]\")[0].click()\nrandomSleep(.5)\nprize_pool=driver.find_elements_by_css_selector(\"div.ui-state-default.slick-header-column.grid-text-with-icon\")[2]\nprize_pool.click()\nprize_pool.click()\n\n\nfrom bs4 import BeautifulSoup as soup\nimport pandas as pd\ntable=soup(driver.find_element_by_id(\"lobby-grid\").get_attribute(\"innerHTML\"),\"lxml\")\nrows=[]\nfor row in table.find(\"div\",\"grid-canvas\").find_all(\"div\",\"slick-row\"):\n row_data={}\n for cell in row.find_all(\"div\"):\n if cell.find(\"a\"):\n a=cell.find(\"a\")\n if a.get(\"data-tracking\"):\n row_data[a.get(\"data-tracking\")]=cell.text\n if cell.text==\"Watch\":\n row_data['link']=a.get(\"href\")\n rows.append(row_data)\nlive_contests=pd.DataFrame(rows)#missing some elements that you need to scroll for\n\ndef get_lineups(link):\n randomSleep(4)\n BASE_URL=\"https://www.draftkings.com\"\n driver.get(BASE_URL+link)\n driver.find_element_by_id(\"export-lineups-csv\").click()\n\nlive_contests['time_pulled']=datetime.datetime.now().strftime(\"%Y-%m-%d %I\")\ncontests['link'].apply(get_lineups)\nfile_name=\"NFL_contests.csv\"\ntry:\n\told_contests=pd.read_csv(file_name)\n\tfull_contests=pd.concat([old_contests,live_contests]).drop_duplicates(subset=[\"link\"])\nexcept:\n\tprint(\"No file %s found. Creating new.\"%file_name)\n\tfull_contests=live_contests\n\nfull_contests.to_csv(file_name)\n\n#run every sunday at 8PM CST"
] | [
[
"pandas.DataFrame.from_dict"
],
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mommy79/AuDi-GIT-turtlebot3_autorace | [
"fd1382246f1ee74ee70857006563184d672a6666"
] | [
"src/mission_node/src/intersection_detector.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2\nimport math\n\n\nclass IntersectionDetector:\n def __init__(self):\n self.lower_blue = np.array([85, 90, 120], np.uint8)\n self.upper_blue = np.array([115, 255, 255], np.uint8)\n\n def fn_find_intersection_line(self, img_trans):\n # ROI 영역에 맞게 자른 이미지\n pers_height, pers_width = img_trans.shape[:2] # shape is w384 x h240\n img_gray = cv2.cvtColor(img_trans[:int(pers_height * 1/ 2), :].copy(), cv2.COLOR_RGB2GRAY)\n _, img_intersection = cv2.threshold(img_gray, 180, 255, 0)\n img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))\n img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))\n img_debug = cv2.merge((img_intersection, img_intersection, img_intersection)).copy()\n\n _, list_intersection_contour, _ = cv2.findContours(img_intersection, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n intersection_check = False\n\n for intersection_contour in list_intersection_contour:\n cv2.drawContours(img_debug, [intersection_contour], 0, (0, 0, 255), 2)\n x_stop, y_stop, w_stop, h_stop = cv2.boundingRect(intersection_contour)\n cv2.putText(img_debug, 'w: {}, h: {}'.format(w_stop, h_stop), (intersection_contour[0][0][0]+10, intersection_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))\n if 330 < w_stop:\n cv2.drawContours(img_debug, [intersection_contour], 0, (0, 255, 0), 2)\n intersection_check = True\n\n return intersection_check, img_debug\n\n def fn_find_exit_line(self, img_trans, direction='left'):\n # ROI 영역에 맞게 자른 이미지\n pers_height, pers_width = img_trans.shape[:2] # shape is w384 x h240\n if direction == 'left':\n img_gray = cv2.cvtColor(img_trans[:, int(pers_width * 1/ 2):].copy(), cv2.COLOR_RGB2GRAY)\n else:\n img_gray = cv2.cvtColor(img_trans[:, :int(pers_width * 1/ 2)].copy(), cv2.COLOR_RGB2GRAY)\n _, img_exit = cv2.threshold(img_gray, 190, 255, 0)\n img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))\n img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))\n img_debug = cv2.merge((img_exit, img_exit, img_exit)).copy()\n\n _, list_exit_contour, _ = cv2.findContours(img_exit, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n exit_check = False\n exit_pos = (0, 0)\n\n for exit_contour in list_exit_contour:\n cv2.drawContours(img_debug, [exit_contour], 0, (0, 0, 255), 2)\n x_exit, y_exit, w_exit, h_exit = cv2.boundingRect(exit_contour)\n bottom_most_pos = tuple(exit_contour[exit_contour[:, :, 1].argmax()][0])\n val_height = h_exit\n for pos_y in range(pers_height-1, 0, -1):\n if img_gray[pos_y, bottom_most_pos[0]] != 0:\n val_height = pos_y\n break\n\n cv2.putText(img_debug, 'w: {}, h: {}, length: {}'.format(w_exit, h_exit, val_height), (exit_contour[0][0][0]+10, exit_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))\n\n if h_exit > val_height * 4/5 and h_exit > pers_height/2:\n cv2.drawContours(img_debug, [exit_contour], 0, (0, 255, 0), 2)\n exit_pos = exit_contour[0][0]\n exit_check = True\n\n return exit_check, exit_pos, img_debug\n\n def fn_find_direction_sign(self, img_ori):\n left_sign_detect = False\n right_sign_detect = False\n\n img_height, img_width = img_ori.shape[:2]\n img_roi = img_ori[:int(img_height*1 / 2), :].copy()\n img_hsv = cv2.cvtColor(img_roi, cv2.COLOR_BGR2HSV)\n\n # Hsv fillter - Blue color\n img_mask_b = cv2.inRange(img_hsv, self.lower_blue, self.upper_blue)\n img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_OPEN, np.ones((7, 7), np.uint8))\n img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8))\n #_, list_obj_contour, _ = cv2.findContours(img_mask_b, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n _, list_obj_contour, _ = cv2.findContours(img_mask_b, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n img_blue = cv2.bitwise_and(img_roi, img_roi, mask=img_mask_b)\n img_debug = img_roi.copy()\n\n list_obj = []\n\n for obj_contour in list_obj_contour:\n #cv2.drawContours(img_blue, [contour], 0, (0, 0, 255), 2)\n x, y, w, h = cv2.boundingRect(obj_contour)\n area = cv2.contourArea(obj_contour)\n aspect_ratio = float(w) / h\n area_ratio = float(area) / (w*h)\n cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cv2.putText(img_debug, 'w: {}, h: {}, aspect_ratio: {:.2f}, area_ratio: {:.2f}'.format(w, h, aspect_ratio, area_ratio), (x+10, y+10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 127, 0))\n\n if (50 < w < 150) and (50 < h < 150) and (0.8 < aspect_ratio < 2.5) and (area_ratio > 0.5):\n cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 255, 255), 2)\n list_obj.append((img_roi[y:y+h, x:x+w].copy(), (x, y, w, h)))\n\n for (img_obj, (obj_x, obj_y, obj_w, obj_h)) in list_obj:\n img_obj_gray = cv2.cvtColor(img_obj, cv2.COLOR_BGR2GRAY)\n _, img_obj_binary = cv2.threshold(img_obj_gray, 180, 255, cv2.THRESH_BINARY)\n img_obj_binary = cv2.morphologyEx(img_obj_binary, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))\n _, list_arrow_contour, _ = cv2.findContours(img_obj_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n obj_x_mid = int(obj_w / 2)\n obj_y_mid = int(obj_h / 2)\n\n min_val_dis = 30\n bottom_most_pos = None\n\n for arrow_contour in list_arrow_contour:\n mask_arrow = np.zeros(img_obj_gray.shape, np.uint8)\n cv2.drawContours(mask_arrow, [arrow_contour], 0, 255, -1)\n arrow_x, arrow_y, arrow_w, arrow_h = cv2.boundingRect(arrow_contour)\n cv2.rectangle(img_debug, (obj_x + arrow_x, obj_y + arrow_y), (obj_x + arrow_x + arrow_w, arrow_y + obj_y + arrow_h), (255, 255, 0), 1)\n arrow_area = cv2.contourArea(arrow_contour)\n arrow_aspect_ratio = float(arrow_w) / arrow_h\n arrow_area_ratio = float(arrow_area) / (arrow_w * arrow_h)\n\n arrow_x_mid = int(arrow_x + arrow_w / 2)\n arrow_y_mid = int(arrow_y + arrow_h / 2)\n\n if (0.4 * obj_w < arrow_w) and (0.4 * obj_h < arrow_h) and (0.5 < arrow_aspect_ratio < 2) and (arrow_area_ratio > 0.3):\n val_dis = math.sqrt((arrow_x_mid - obj_x_mid) ** 2 + (arrow_y_mid - obj_y_mid) ** 2)\n if val_dis < min_val_dis:\n min_val_dis = val_dis\n\n #left_most_pos = tuple(obj_contour[obj_contour[:, :, 0].argmin()][0])\n #right_most_pos = tuple(obj_contour[obj_contour[:, :, 0].argmax()][0])\n #top_most_pos = tuple(obj_contour[obj_contour[:, :, 1].argmin()][0])\n bottom_most_pos = tuple(arrow_contour[arrow_contour[:, :, 1].argmax()][0])\n\n if bottom_most_pos is not None:\n cv2.circle(img_debug, (obj_x + bottom_most_pos[0], obj_y + bottom_most_pos[1]), 4, (0, 0, 255), -1)\n cv2.line(img_debug, (obj_x + obj_x_mid, obj_y), (obj_x + obj_x_mid, obj_y + obj_h), (255, 0, 255), 2)\n if bottom_most_pos[0] > obj_x_mid:\n left_sign_detect = True\n cv2.putText(img_debug, 'LEFT', (obj_x+10, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))\n cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (255, 0, 0), 2)\n else:\n right_sign_detect = True\n cv2.putText(img_debug, 'RIGHT', (obj_x+3, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))\n cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (0, 255, 0), 2)\n\n return left_sign_detect, right_sign_detect, np.vstack((img_debug, img_blue))\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.vstack",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ahoho/numpyro | [
"64e94e346c51a6c0c1ba51aa7b608e73513f158f",
"64e94e346c51a6c0c1ba51aa7b608e73513f158f"
] | [
"numpyro/distributions/transforms.py",
"numpyro/distributions/util.py"
] | [
"# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport math\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom jax import lax, ops, tree_flatten, tree_map, vmap\nfrom jax.flatten_util import ravel_pytree\nfrom jax.nn import softplus\nimport jax.numpy as jnp\nfrom jax.scipy.linalg import solve_triangular\nfrom jax.scipy.special import expit, logit\n\nfrom numpyro.distributions import constraints\nfrom numpyro.distributions.util import matrix_to_tril_vec, signed_stick_breaking_tril, sum_rightmost, vec_to_tril_matrix\nfrom numpyro.util import not_jax_tracer\n\n__all__ = [\n 'biject_to',\n 'AbsTransform',\n 'AffineTransform',\n 'CholeskyTransform',\n 'ComposeTransform',\n 'CorrCholeskyTransform',\n 'CorrMatrixCholeskyTransform',\n 'ExpTransform',\n 'SoftplusTransform',\n 'IdentityTransform',\n 'InvCholeskyTransform',\n 'LowerCholeskyTransform',\n 'LowerCholeskyAffine',\n 'PermuteTransform',\n 'PowerTransform',\n 'SigmoidTransform',\n 'SoftplusTransform',\n 'SoftplusLowerCholeskyTransform',\n 'StickBreakingTransform',\n 'Transform',\n 'UnpackTransform',\n]\n\n\ndef _clipped_expit(x):\n finfo = jnp.finfo(jnp.result_type(x))\n return jnp.clip(expit(x), a_min=finfo.tiny, a_max=1. - finfo.eps)\n\n\nclass Transform(object):\n domain = constraints.real\n codomain = constraints.real\n _inv = None\n\n @property\n def event_dim(self):\n warnings.warn(\"transform.event_dim is deprecated. Please use Transform.domain.event_dim to \"\n \"get input event dim or Transform.codomain.event_dim to get output event dim.\",\n FutureWarning)\n return self.domain.event_dim\n\n @property\n def inv(self):\n inv = None\n if self._inv is not None:\n inv = self._inv()\n if inv is None:\n inv = _InverseTransform(self)\n self._inv = weakref.ref(inv)\n return inv\n\n def __call__(self, x):\n return NotImplementedError\n\n def _inverse(self, y):\n raise NotImplementedError\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n raise NotImplementedError\n\n def call_with_intermediates(self, x):\n return self(x), None\n\n def forward_shape(self, shape):\n \"\"\"\n Infers the shape of the forward computation, given the input shape.\n Defaults to preserving shape.\n \"\"\"\n return shape\n\n def inverse_shape(self, shape):\n \"\"\"\n Infers the shapes of the inverse computation, given the output shape.\n Defaults to preserving shape.\n \"\"\"\n return shape\n\n\nclass _InverseTransform(Transform):\n def __init__(self, transform):\n super().__init__()\n self._inv = transform\n\n @property\n def domain(self):\n return self._inv.codomain\n\n @property\n def codomain(self):\n return self._inv.domain\n\n @property\n def inv(self):\n return self._inv\n\n def __call__(self, x):\n return self._inv._inverse(x)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # NB: we don't use intermediates for inverse transform\n return -self._inv.log_abs_det_jacobian(y, x, None)\n\n def forward_shape(self, shape):\n return self._inv.inverse_shape(shape)\n\n def inverse_shape(self, shape):\n return self._inv.forward_shape(shape)\n\n\nclass AbsTransform(Transform):\n domain = constraints.real\n codomain = constraints.positive\n\n def __eq__(self, other):\n return isinstance(other, AbsTransform)\n\n def __call__(self, x):\n return jnp.abs(x)\n\n def _inverse(self, y):\n return y\n\n\nclass AffineTransform(Transform):\n \"\"\"\n .. note:: When `scale` is a JAX tracer, we always assume that `scale > 0`\n when calculating `codomain`.\n \"\"\"\n def __init__(self, loc, scale, domain=constraints.real):\n self.loc = loc\n self.scale = scale\n self.domain = domain\n\n @property\n def codomain(self):\n if self.domain is constraints.real:\n return constraints.real\n elif isinstance(self.domain, constraints.greater_than):\n if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):\n return constraints.less_than(self(self.domain.lower_bound))\n # we suppose scale > 0 for any tracer\n else:\n return constraints.greater_than(self(self.domain.lower_bound))\n elif isinstance(self.domain, constraints.less_than):\n if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):\n return constraints.greater_than(self(self.domain.upper_bound))\n # we suppose scale > 0 for any tracer\n else:\n return constraints.less_than(self(self.domain.upper_bound))\n elif isinstance(self.domain, constraints.interval):\n if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):\n return constraints.interval(self(self.domain.upper_bound),\n self(self.domain.lower_bound))\n else:\n return constraints.interval(self(self.domain.lower_bound),\n self(self.domain.upper_bound))\n else:\n raise NotImplementedError\n\n def __call__(self, x):\n return self.loc + self.scale * x\n\n def _inverse(self, y):\n return (y - self.loc) / self.scale\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.broadcast_to(jnp.log(jnp.abs(self.scale)), jnp.shape(x))\n\n def forward_shape(self, shape):\n return lax.broadcast_shapes(shape,\n getattr(self.loc, \"shape\", ()),\n getattr(self.scale, \"shape\", ()))\n\n def inverse_shape(self, shape):\n return lax.broadcast_shapes(shape,\n getattr(self.loc, \"shape\", ()),\n getattr(self.scale, \"shape\", ()))\n\n\ndef _get_compose_transform_input_event_dim(parts):\n input_event_dim = parts[-1].domain.event_dim\n for part in parts[len(parts) - 1::-1]:\n input_event_dim = part.domain.event_dim + max(input_event_dim - part.codomain.event_dim, 0)\n return input_event_dim\n\n\ndef _get_compose_transform_output_event_dim(parts):\n output_event_dim = parts[0].codomain.event_dim\n for part in parts[1:]:\n output_event_dim = part.codomain.event_dim + max(output_event_dim - part.domain.event_dim, 0)\n return output_event_dim\n\n\nclass ComposeTransform(Transform):\n def __init__(self, parts):\n self.parts = parts\n\n @property\n def domain(self):\n input_event_dim = _get_compose_transform_input_event_dim(self.parts)\n first_input_event_dim = self.parts[0].domain.event_dim\n assert input_event_dim >= first_input_event_dim\n if input_event_dim == first_input_event_dim:\n return self.parts[0].domain\n else:\n return constraints.independent(self.parts[0].domain, input_event_dim - first_input_event_dim)\n\n @property\n def codomain(self):\n output_event_dim = _get_compose_transform_output_event_dim(self.parts)\n last_output_event_dim = self.parts[-1].codomain.event_dim\n assert output_event_dim >= last_output_event_dim\n if output_event_dim == last_output_event_dim:\n return self.parts[-1].codomain\n else:\n return constraints.independent(self.parts[-1].codomain, output_event_dim - last_output_event_dim)\n\n def __call__(self, x):\n for part in self.parts:\n x = part(x)\n return x\n\n def _inverse(self, y):\n for part in self.parts[::-1]:\n y = part.inv(y)\n return y\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n if intermediates is not None:\n if len(intermediates) != len(self.parts):\n raise ValueError('Intermediates array has length = {}. Expected = {}.'\n .format(len(intermediates), len(self.parts)))\n\n result = 0.\n input_event_dim = self.domain.event_dim\n for i, part in enumerate(self.parts[:-1]):\n y_tmp = part(x) if intermediates is None else intermediates[i][0]\n inter = None if intermediates is None else intermediates[i][1]\n logdet = part.log_abs_det_jacobian(x, y_tmp, intermediates=inter)\n batch_ndim = input_event_dim - part.domain.event_dim\n result = result + sum_rightmost(logdet, batch_ndim)\n input_event_dim = part.codomain.event_dim + batch_ndim\n x = y_tmp\n # account the the last transform, where y is available\n inter = None if intermediates is None else intermediates[-1]\n part = self.parts[-1]\n logdet = part.log_abs_det_jacobian(x, y, intermediates=inter)\n result = result + sum_rightmost(logdet, input_event_dim - part.domain.event_dim)\n return result\n\n def call_with_intermediates(self, x):\n intermediates = []\n for part in self.parts[:-1]:\n x, inter = part.call_with_intermediates(x)\n intermediates.append([x, inter])\n # NB: we don't need to hold the last output value in `intermediates`\n x, inter = self.parts[-1].call_with_intermediates(x)\n intermediates.append(inter)\n return x, intermediates\n\n def forward_shape(self, shape):\n for part in self.parts:\n shape = part.forward_shape(shape)\n return shape\n\n def inverse_shape(self, shape):\n for part in reversed(self.parts):\n shape = part.inverse_shape(shape)\n return shape\n\n\ndef _matrix_forward_shape(shape, offset=0):\n # Reshape from (..., N) to (..., D, D).\n if len(shape) < 1:\n raise ValueError(\"Too few dimensions in input\")\n N = shape[-1]\n D = round((0.25 + 2 * N) ** 0.5 - 0.5)\n if D * (D + 1) // 2 != N:\n raise ValueError(\"Input is not a flattend lower-diagonal number\")\n D = D - offset\n return shape[:-1] + (D, D)\n\n\ndef _matrix_inverse_shape(shape, offset=0):\n # Reshape from (..., D, D) to (..., N).\n if len(shape) < 2:\n raise ValueError(\"Too few dimensions on input\")\n if shape[-2] != shape[-1]:\n raise ValueError(\"Input is not square\")\n D = shape[-1] + offset\n N = D * (D + 1) // 2\n return shape[:-2] + (N,)\n\n\nclass CholeskyTransform(Transform):\n r\"\"\"\n Transform via the mapping :math:`y = cholesky(x)`, where `x` is a\n positive definite matrix.\n \"\"\"\n domain = constraints.positive_definite\n codomain = constraints.lower_cholesky\n\n def __call__(self, x):\n return jnp.linalg.cholesky(x)\n\n def _inverse(self, y):\n return jnp.matmul(y, jnp.swapaxes(y, -2, -1))\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # Ref: http://web.mit.edu/18.325/www/handouts/handout2.pdf page 13\n n = jnp.shape(x)[-1]\n order = -jnp.arange(n, 0, -1)\n return -n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)\n\n\nclass CorrCholeskyTransform(Transform):\n r\"\"\"\n Transforms a uncontrained real vector :math:`x` with length :math:`D*(D-1)/2` into the\n Cholesky factor of a D-dimension correlation matrix. This Cholesky factor is a lower\n triangular matrix with positive diagonals and unit Euclidean norm for each row.\n The transform is processed as follows:\n\n 1. First we convert :math:`x` into a lower triangular matrix with the following order:\n\n .. math::\n \\begin{bmatrix}\n 1 & 0 & 0 & 0 \\\\\n x_0 & 1 & 0 & 0 \\\\\n x_1 & x_2 & 1 & 0 \\\\\n x_3 & x_4 & x_5 & 1\n \\end{bmatrix}\n\n 2. For each row :math:`X_i` of the lower triangular part, we apply a *signed* version of\n class :class:`StickBreakingTransform` to transform :math:`X_i` into a\n unit Euclidean length vector using the following steps:\n\n a. Scales into the interval :math:`(-1, 1)` domain: :math:`r_i = \\tanh(X_i)`.\n b. Transforms into an unsigned domain: :math:`z_i = r_i^2`.\n c. Applies :math:`s_i = StickBreakingTransform(z_i)`.\n d. Transforms back into signed domain: :math:`y_i = (sign(r_i), 1) * \\sqrt{s_i}`.\n \"\"\"\n domain = constraints.real_vector\n codomain = constraints.corr_cholesky\n\n def __call__(self, x):\n # we interchange step 1 and step 2.a for a better performance\n t = jnp.tanh(x)\n return signed_stick_breaking_tril(t)\n\n def _inverse(self, y):\n # inverse stick-breaking\n z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)\n pad_width = [(0, 0)] * y.ndim\n pad_width[-1] = (1, 0)\n z1m_cumprod_shifted = jnp.pad(z1m_cumprod[..., :-1], pad_width,\n mode=\"constant\", constant_values=1.)\n t = matrix_to_tril_vec(y, diagonal=-1) / jnp.sqrt(\n matrix_to_tril_vec(z1m_cumprod_shifted, diagonal=-1))\n # inverse of tanh\n x = jnp.log((1 + t) / (1 - t)) / 2\n return x\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # NB: because domain and codomain are two spaces with different dimensions, determinant of\n # Jacobian is not well-defined. Here we return `log_abs_det_jacobian` of `x` and the\n # flatten lower triangular part of `y`.\n\n # stick_breaking_logdet = log(y / r) = log(z_cumprod) (modulo right shifted)\n z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)\n # by taking diagonal=-2, we don't need to shift z_cumprod to the right\n # NB: diagonal=-2 works fine for (2 x 2) matrix, where we get an empty array\n z1m_cumprod_tril = matrix_to_tril_vec(z1m_cumprod, diagonal=-2)\n stick_breaking_logdet = 0.5 * jnp.sum(jnp.log(z1m_cumprod_tril), axis=-1)\n\n tanh_logdet = -2 * jnp.sum(x + softplus(-2 * x) - jnp.log(2.), axis=-1)\n return stick_breaking_logdet + tanh_logdet\n\n def forward_shape(self, shape):\n return _matrix_forward_shape(shape, offset=-1)\n\n def inverse_shape(self, shape):\n return _matrix_inverse_shape(shape, offset=-1)\n\n\nclass CorrMatrixCholeskyTransform(CholeskyTransform):\n r\"\"\"\n Transform via the mapping :math:`y = cholesky(x)`, where `x` is a\n correlation matrix.\n \"\"\"\n domain = constraints.corr_matrix\n codomain = constraints.corr_cholesky\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # NB: see derivation in LKJCholesky implementation\n n = jnp.shape(x)[-1]\n order = -jnp.arange(n - 1, -1, -1)\n return jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)\n\n\nclass ExpTransform(Transform):\n # TODO: refine domain/codomain logic through setters, especially when\n # transforms for inverses are supported\n def __init__(self, domain=constraints.real):\n self.domain = domain\n\n @property\n def codomain(self):\n if self.domain is constraints.real:\n return constraints.positive\n elif isinstance(self.domain, constraints.greater_than):\n return constraints.greater_than(self.__call__(self.domain.lower_bound))\n elif isinstance(self.domain, constraints.interval):\n return constraints.interval(self.__call__(self.domain.lower_bound),\n self.__call__(self.domain.upper_bound))\n else:\n raise NotImplementedError\n\n def __call__(self, x):\n # XXX consider to clamp from below for stability if necessary\n return jnp.exp(x)\n\n def _inverse(self, y):\n return jnp.log(y)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return x\n\n\nclass IdentityTransform(Transform):\n\n def __call__(self, x):\n return x\n\n def _inverse(self, y):\n return y\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.zeros_like(x)\n\n\nclass IndependentTransform(Transform):\n \"\"\"\n Wraps a transform by aggregating over ``reinterpreted_batch_ndims``-many\n dims in :meth:`check`, so that an event is valid only if all its\n independent entries are valid.\n \"\"\"\n def __init__(self, base_transform, reinterpreted_batch_ndims):\n assert isinstance(base_transform, Transform)\n assert isinstance(reinterpreted_batch_ndims, int)\n assert reinterpreted_batch_ndims >= 0\n self.base_transform = base_transform\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n super().__init__()\n\n @property\n def domain(self):\n return constraints.independent(self.base_transform.domain, self.reinterpreted_batch_ndims)\n\n @property\n def codomain(self):\n return constraints.independent(self.base_transform.codomain, self.reinterpreted_batch_ndims)\n\n def __call__(self, x):\n return self.base_transform(x)\n\n def _inverse(self, y):\n return self.base_transform._inverse(y)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n result = self.base_transform.log_abs_det_jacobian(x, y, intermediates=intermediates)\n if jnp.ndim(result) < self.reinterpreted_batch_ndims:\n expected = self.domain.event_dim\n raise ValueError(f\"Expected x.dim() >= {expected} but got {jnp.ndim(x)}\")\n return sum_rightmost(result, self.reinterpreted_batch_ndims)\n\n def call_with_intermediates(self, x):\n return self.base_transform.call_with_intermediates(x)\n\n def forward_shape(self, shape):\n return self.base_transform.forward_shape(shape)\n\n def inverse_shape(self, shape):\n return self.base_transform.inverse_shape(shape)\n\n\nclass InvCholeskyTransform(Transform):\n r\"\"\"\n Transform via the mapping :math:`y = x @ x.T`, where `x` is a lower\n triangular matrix with positive diagonal.\n \"\"\"\n\n def __init__(self, domain=constraints.lower_cholesky):\n warnings.warn(\"InvCholeskyTransform is deprecated. Please use CholeskyTransform\"\n \" or CorrMatrixCholeskyTransform instead.\", FutureWarning)\n assert domain in [constraints.lower_cholesky, constraints.corr_cholesky]\n self.domain = domain\n\n @property\n def codomain(self):\n if self.domain is constraints.lower_cholesky:\n return constraints.positive_definite\n elif self.domain is constraints.corr_cholesky:\n return constraints.corr_matrix\n\n def __call__(self, x):\n return jnp.matmul(x, jnp.swapaxes(x, -2, -1))\n\n def _inverse(self, y):\n return jnp.linalg.cholesky(y)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n if self.domain is constraints.lower_cholesky:\n # Ref: http://web.mit.edu/18.325/www/handouts/handout2.pdf page 13\n n = jnp.shape(x)[-1]\n order = jnp.arange(n, 0, -1)\n return n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)\n else:\n # NB: see derivation in LKJCholesky implementation\n n = jnp.shape(x)[-1]\n order = jnp.arange(n - 1, -1, -1)\n return jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)\n\n\nclass LowerCholeskyAffine(Transform):\n r\"\"\"\n Transform via the mapping :math:`y = loc + scale\\_tril\\ @\\ x`.\n\n :param loc: a real vector.\n :param scale_tril: a lower triangular matrix with positive diagonal.\n \"\"\"\n domain = constraints.real_vector\n codomain = constraints.real_vector\n\n def __init__(self, loc, scale_tril):\n if jnp.ndim(scale_tril) != 2:\n raise ValueError(\"Only support 2-dimensional scale_tril matrix. \"\n \"Please make a feature request if you need to \"\n \"use this transform with batched scale_tril.\")\n self.loc = loc\n self.scale_tril = scale_tril\n\n def __call__(self, x):\n return self.loc + jnp.squeeze(jnp.matmul(self.scale_tril, x[..., jnp.newaxis]), axis=-1)\n\n def _inverse(self, y):\n y = y - self.loc\n original_shape = jnp.shape(y)\n yt = jnp.reshape(y, (-1, original_shape[-1])).T\n xt = solve_triangular(self.scale_tril, yt, lower=True)\n return jnp.reshape(xt.T, original_shape)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.broadcast_to(jnp.log(jnp.diagonal(self.scale_tril, axis1=-2, axis2=-1)).sum(-1),\n jnp.shape(x)[:-1])\n\n def forward_shape(self, shape):\n if len(shape) < 1:\n raise ValueError(\"Too few dimensions on input\")\n return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])\n\n def inverse_shape(self, shape):\n if len(shape) < 1:\n raise ValueError(\"Too few dimensions on input\")\n return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])\n\n\nclass LowerCholeskyTransform(Transform):\n domain = constraints.real_vector\n codomain = constraints.lower_cholesky\n\n def __call__(self, x):\n n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)\n z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)\n diag = jnp.exp(x[..., -n:])\n return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)\n\n def _inverse(self, y):\n z = matrix_to_tril_vec(y, diagonal=-1)\n return jnp.concatenate([z, jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1))], axis=-1)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform\n n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)\n return x[..., -n:].sum(-1)\n\n def forward_shape(self, shape):\n return _matrix_forward_shape(shape)\n\n def inverse_shape(self, shape):\n return _matrix_inverse_shape(shape)\n\n\nclass OrderedTransform(Transform):\n \"\"\"\n Transform a real vector to an ordered vector.\n\n **References:**\n\n 1. *Stan Reference Manual v2.20, section 10.6*,\n Stan Development Team\n \"\"\"\n domain = constraints.real_vector\n codomain = constraints.ordered_vector\n\n def __call__(self, x):\n z = jnp.concatenate([x[..., :1], jnp.exp(x[..., 1:])], axis=-1)\n return jnp.cumsum(z, axis=-1)\n\n def _inverse(self, y):\n x = jnp.log(y[..., 1:] - y[..., :-1])\n return jnp.concatenate([y[..., :1], x], axis=-1)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.sum(x[..., 1:], -1)\n\n\nclass PermuteTransform(Transform):\n domain = constraints.real_vector\n codomain = constraints.real_vector\n\n def __init__(self, permutation):\n self.permutation = permutation\n\n def __call__(self, x):\n return x[..., self.permutation]\n\n def _inverse(self, y):\n size = self.permutation.size\n permutation_inv = ops.index_update(jnp.zeros(size, dtype=jnp.result_type(int)),\n self.permutation,\n jnp.arange(size))\n return y[..., permutation_inv]\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.full(jnp.shape(x)[:-1], 0.)\n\n\nclass PowerTransform(Transform):\n domain = constraints.positive\n codomain = constraints.positive\n\n def __init__(self, exponent):\n self.exponent = exponent\n\n def __call__(self, x):\n return jnp.power(x, self.exponent)\n\n def _inverse(self, y):\n return jnp.power(y, 1 / self.exponent)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.log(jnp.abs(self.exponent * y / x))\n\n def forward_shape(self, shape):\n return lax.broadcast_shapes(shape, getattr(self.exponent, \"shape\", ()))\n\n def inverse_shape(self, shape):\n return lax.broadcast_shapes(shape, getattr(self.exponent, \"shape\", ()))\n\n\nclass SigmoidTransform(Transform):\n codomain = constraints.unit_interval\n\n def __call__(self, x):\n return _clipped_expit(x)\n\n def _inverse(self, y):\n return logit(y)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n x_abs = jnp.abs(x)\n return -x_abs - 2 * jnp.log1p(jnp.exp(-x_abs))\n\n\ndef _softplus_inv(y):\n return jnp.log(-jnp.expm1(-y)) + y\n\n\nclass SoftplusTransform(Transform):\n r\"\"\"\n Transform from unconstrained space to positive domain via softplus :math:`y = \\log(1 + \\exp(x))`.\n The inverse is computed as :math:`x = \\log(\\exp(y) - 1)`.\n \"\"\"\n domain = constraints.real\n codomain = constraints.softplus_positive\n\n def __call__(self, x):\n return softplus(x)\n\n def _inverse(self, y):\n return _softplus_inv(y)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return -softplus(-x)\n\n\nclass SoftplusLowerCholeskyTransform(Transform):\n \"\"\"\n Transform from unconstrained vector to lower-triangular matrices with\n nonnegative diagonal entries. This is useful for parameterizing positive\n definite matrices in terms of their Cholesky factorization.\n \"\"\"\n domain = constraints.real_vector\n codomain = constraints.softplus_lower_cholesky\n\n def __call__(self, x):\n n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)\n z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)\n diag = softplus(x[..., -n:])\n return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)\n\n def _inverse(self, y):\n z = matrix_to_tril_vec(y, diagonal=-1)\n diag = _softplus_inv(jnp.diagonal(y, axis1=-2, axis2=-1))\n return jnp.concatenate([z, diag], axis=-1)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform\n n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)\n return -softplus(-x[..., -n:]).sum(-1)\n\n def forward_shape(self, shape):\n return _matrix_forward_shape(shape)\n\n def inverse_shape(self, shape):\n return _matrix_inverse_shape(shape)\n\n\nclass StickBreakingTransform(Transform):\n domain = constraints.real_vector\n codomain = constraints.simplex\n\n def __call__(self, x):\n # we shift x to obtain a balanced mapping (0, 0, ..., 0) -> (1/K, 1/K, ..., 1/K)\n x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))\n # convert to probabilities (relative to the remaining) of each fraction of the stick\n z = _clipped_expit(x)\n z1m_cumprod = jnp.cumprod(1 - z, axis=-1)\n pad_width = [(0, 0)] * x.ndim\n pad_width[-1] = (0, 1)\n z_padded = jnp.pad(z, pad_width, mode=\"constant\", constant_values=1.)\n pad_width = [(0, 0)] * x.ndim\n pad_width[-1] = (1, 0)\n z1m_cumprod_shifted = jnp.pad(z1m_cumprod, pad_width, mode=\"constant\", constant_values=1.)\n return z_padded * z1m_cumprod_shifted\n\n def _inverse(self, y):\n y_crop = y[..., :-1]\n z1m_cumprod = jnp.clip(1 - jnp.cumsum(y_crop, axis=-1), a_min=jnp.finfo(y.dtype).tiny)\n # hence x = logit(z) = log(z / (1 - z)) = y[::-1] / z1m_cumprod\n x = jnp.log(y_crop / z1m_cumprod)\n return x + jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # Ref: https://mc-stan.org/docs/2_19/reference-manual/simplex-transform-section.html\n # |det|(J) = Product(y * (1 - z))\n x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))\n z = jnp.clip(expit(x), a_min=jnp.finfo(x.dtype).tiny)\n # XXX we use the identity 1 - z = z * exp(-x) to not worry about\n # the case z ~ 1\n return jnp.sum(jnp.log(y[..., :-1] * z) - x, axis=-1)\n\n def forward_shape(self, shape):\n if len(shape) < 1:\n raise ValueError(\"Too few dimensions on input\")\n return shape[:-1] + (shape[-1] + 1,)\n\n def inverse_shape(self, shape):\n if len(shape) < 1:\n raise ValueError(\"Too few dimensions on input\")\n return shape[:-1] + (shape[-1] - 1,)\n\n\nclass UnpackTransform(Transform):\n \"\"\"\n Transforms a contiguous array to a pytree of subarrays.\n\n :param unpack_fn: callable used to unpack a contiguous array.\n \"\"\"\n domain = constraints.real_vector\n codomain = constraints.dependent\n\n def __init__(self, unpack_fn):\n self.unpack_fn = unpack_fn\n\n def __call__(self, x):\n batch_shape = x.shape[:-1]\n if batch_shape:\n unpacked = vmap(self.unpack_fn)(x.reshape((-1,) + x.shape[-1:]))\n return tree_map(lambda z: jnp.reshape(z, batch_shape + z.shape[1:]), unpacked)\n else:\n return self.unpack_fn(x)\n\n def _inverse(self, y):\n leading_dims = [v.shape[0] if jnp.ndim(v) > 0 else 0\n for v in tree_flatten(y)[0]]\n d0 = leading_dims[0]\n not_scalar = d0 > 0 or len(leading_dims) > 1\n if not_scalar and all(d == d0 for d in leading_dims[1:]):\n warnings.warn(\"UnpackTransform.inv might lead to an unexpected behavior because it\"\n \" cannot transform a batch of unpacked arrays.\")\n return ravel_pytree(y)[0]\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.zeros(jnp.shape(x)[:-1])\n\n def forward_shape(self, shape):\n raise NotImplementedError\n\n def inverse_shape(self, shape):\n raise NotImplementedError\n\n\n##########################################################\n# CONSTRAINT_REGISTRY\n##########################################################\n\nclass ConstraintRegistry(object):\n def __init__(self):\n self._registry = {}\n\n def register(self, constraint, factory=None):\n if factory is None:\n return lambda factory: self.register(constraint, factory)\n\n if isinstance(constraint, constraints.Constraint):\n constraint = type(constraint)\n\n self._registry[constraint] = factory\n\n def __call__(self, constraint):\n try:\n factory = self._registry[type(constraint)]\n except KeyError as e:\n raise NotImplementedError from e\n\n return factory(constraint)\n\n\nbiject_to = ConstraintRegistry()\n\n\n@biject_to.register(constraints.corr_cholesky)\ndef _transform_to_corr_cholesky(constraint):\n return CorrCholeskyTransform()\n\n\n@biject_to.register(constraints.corr_matrix)\ndef _transform_to_corr_matrix(constraint):\n return ComposeTransform([CorrCholeskyTransform(),\n CorrMatrixCholeskyTransform().inv])\n\n\n@biject_to.register(constraints.greater_than)\ndef _transform_to_greater_than(constraint):\n if constraint is constraints.positive:\n return ExpTransform()\n return ComposeTransform([ExpTransform(),\n AffineTransform(constraint.lower_bound, 1,\n domain=constraints.positive)])\n\n\n@biject_to.register(constraints.less_than)\ndef _transform_to_less_than(constraint):\n return ComposeTransform([ExpTransform(),\n AffineTransform(constraint.upper_bound, -1,\n domain=constraints.positive)])\n\n\n@biject_to.register(constraints.independent)\ndef _biject_to_independent(constraint):\n return IndependentTransform(biject_to(constraint.base_constraint),\n constraint.reinterpreted_batch_ndims)\n\n\n@biject_to.register(constraints.interval)\ndef _transform_to_interval(constraint):\n if constraint is constraints.unit_interval:\n return SigmoidTransform()\n scale = constraint.upper_bound - constraint.lower_bound\n return ComposeTransform([SigmoidTransform(),\n AffineTransform(constraint.lower_bound, scale,\n domain=constraints.unit_interval)])\n\n\n@biject_to.register(constraints.lower_cholesky)\ndef _transform_to_lower_cholesky(constraint):\n return LowerCholeskyTransform()\n\n\n@biject_to.register(constraints.ordered_vector)\ndef _transform_to_ordered_vector(constraint):\n return OrderedTransform()\n\n\n@biject_to.register(constraints.positive_definite)\ndef _transform_to_positive_definite(constraint):\n return ComposeTransform([LowerCholeskyTransform(), CholeskyTransform().inv])\n\n\n@biject_to.register(constraints.positive_ordered_vector)\ndef _transform_to_positive_ordered_vector(constraint):\n return ComposeTransform([OrderedTransform(), ExpTransform()])\n\n\n@biject_to.register(constraints.real)\ndef _transform_to_real(constraint):\n return IdentityTransform()\n\n\n@biject_to.register(constraints.softplus_positive)\ndef _transform_to_softplus_positive(constraint):\n return SoftplusTransform()\n\n\n@biject_to.register(constraints.softplus_lower_cholesky)\ndef _transform_to_softplus_lower_cholesky(constraint):\n return SoftplusLowerCholeskyTransform()\n\n\n@biject_to.register(constraints.simplex)\ndef _transform_to_simplex(constraint):\n return StickBreakingTransform()\n",
"# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import namedtuple\nfrom functools import update_wrapper\nimport math\n\nimport numpy as np\n\nfrom jax import jit, lax, random, vmap\nfrom jax.lib import xla_bridge\nimport jax.numpy as jnp\nfrom jax.scipy.linalg import solve_triangular\nfrom jax.util import partial\n\n# Parameters for Transformed Rejection with Squeeze (TRS) algorithm - page 3.\n_tr_params = namedtuple('tr_params', ['c', 'b', 'a', 'alpha', 'u_r', 'v_r', 'm', 'log_p', 'log1_p', 'log_h'])\n\n\ndef _get_tr_params(n, p):\n # See Table 1. Additionally, we pre-compute log(p), log1(-p) and the\n # constant terms, that depend only on (n, p, m) in log(f(k)) (bottom of page 5).\n mu = n * p\n spq = jnp.sqrt(mu * (1 - p))\n c = mu + 0.5\n b = 1.15 + 2.53 * spq\n a = -0.0873 + 0.0248 * b + 0.01 * p\n alpha = (2.83 + 5.1 / b) * spq\n u_r = 0.43\n v_r = 0.92 - 4.2 / b\n m = jnp.floor((n + 1) * p).astype(n.dtype)\n log_p = jnp.log(p)\n log1_p = jnp.log1p(-p)\n log_h = (m + 0.5) * (jnp.log((m + 1.) / (n - m + 1.)) + log1_p - log_p) + \\\n (stirling_approx_tail(m) + stirling_approx_tail(n - m))\n return _tr_params(c, b, a, alpha, u_r, v_r, m, log_p, log1_p, log_h)\n\n\ndef stirling_approx_tail(k):\n precomputed = jnp.array([\n 0.08106146679532726,\n 0.04134069595540929,\n 0.02767792568499834,\n 0.02079067210376509,\n 0.01664469118982119,\n 0.01387612882307075,\n 0.01189670994589177,\n 0.01041126526197209,\n 0.009255462182712733,\n 0.008330563433362871,\n ])\n kp1 = k + 1\n kp1sq = (k + 1) ** 2\n return jnp.where(k < 10,\n precomputed[k],\n (1. / 12 - (1. / 360 - (1. / 1260) / kp1sq) / kp1sq) / kp1)\n\n\ndef _binomial_btrs(key, p, n):\n \"\"\"\n Based on the transformed rejection sampling algorithm (BTRS) from the\n following reference:\n\n Hormann, \"The Generation of Binonmial Random Variates\"\n (https://core.ac.uk/download/pdf/11007254.pdf)\n \"\"\"\n\n def _btrs_body_fn(val):\n _, key, _, _ = val\n key, key_u, key_v = random.split(key, 3)\n u = random.uniform(key_u)\n v = random.uniform(key_v)\n u = u - 0.5\n k = jnp.floor((2 * tr_params.a / (0.5 - jnp.abs(u)) + tr_params.b) * u + tr_params.c).astype(n.dtype)\n return k, key, u, v\n\n def _btrs_cond_fn(val):\n def accept_fn(k, u, v):\n # See acceptance condition in Step 3. (Page 3) of TRS algorithm\n # v <= f(k) * g_grad(u) / alpha\n\n m = tr_params.m\n log_p = tr_params.log_p\n log1_p = tr_params.log1_p\n # See: formula for log(f(k)) at bottom of Page 5.\n log_f = (n + 1.) * jnp.log((n - m + 1.) / (n - k + 1.)) + \\\n (k + 0.5) * (jnp.log((n - k + 1.) / (k + 1.)) + log_p - log1_p) + \\\n (stirling_approx_tail(k) - stirling_approx_tail(n - k)) + tr_params.log_h\n g = (tr_params.a / (0.5 - jnp.abs(u)) ** 2) + tr_params.b\n return jnp.log((v * tr_params.alpha) / g) <= log_f\n\n k, key, u, v = val\n early_accept = (jnp.abs(u) <= tr_params.u_r) & (v <= tr_params.v_r)\n early_reject = (k < 0) | (k > n)\n return lax.cond(early_accept | early_reject,\n (),\n lambda _: ~early_accept,\n (k, u, v),\n lambda x: ~accept_fn(*x))\n\n tr_params = _get_tr_params(n, p)\n ret = lax.while_loop(_btrs_cond_fn, _btrs_body_fn,\n (-1, key, 1., 1.)) # use k=-1 initially so that cond_fn returns True\n return ret[0]\n\n\ndef _binomial_inversion(key, p, n):\n def _binom_inv_body_fn(val):\n i, key, geom_acc = val\n key, key_u = random.split(key)\n u = random.uniform(key_u)\n geom = jnp.floor(jnp.log1p(-u) / log1_p) + 1\n geom_acc = geom_acc + geom\n return i + 1, key, geom_acc\n\n def _binom_inv_cond_fn(val):\n i, _, geom_acc = val\n return geom_acc <= n\n\n log1_p = jnp.log1p(-p)\n ret = lax.while_loop(_binom_inv_cond_fn, _binom_inv_body_fn,\n (-1, key, 0.))\n return ret[0]\n\n\ndef _binomial_dispatch(key, p, n):\n def dispatch(key, p, n):\n is_le_mid = p <= 0.5\n pq = jnp.where(is_le_mid, p, 1 - p)\n mu = n * pq\n k = lax.cond(mu < 10,\n (key, pq, n),\n lambda x: _binomial_inversion(*x),\n (key, pq, n),\n lambda x: _binomial_btrs(*x))\n return jnp.where(is_le_mid, k, n - k)\n\n # Return 0 for nan `p` or negative `n`, since nan values are not allowed for integer types\n cond0 = jnp.isfinite(p) & (n > 0) & (p > 0)\n return lax.cond(cond0 & (p < 1),\n (key, p, n),\n lambda x: dispatch(*x),\n (),\n lambda _: jnp.where(cond0, n, 0))\n\n\n@partial(jit, static_argnums=(3,))\ndef _binomial(key, p, n, shape):\n shape = shape or lax.broadcast_shapes(jnp.shape(p), jnp.shape(n))\n # reshape to map over axis 0\n p = jnp.reshape(jnp.broadcast_to(p, shape), -1)\n n = jnp.reshape(jnp.broadcast_to(n, shape), -1)\n key = random.split(key, jnp.size(p))\n if xla_bridge.get_backend().platform == 'cpu':\n ret = lax.map(lambda x: _binomial_dispatch(*x),\n (key, p, n))\n else:\n ret = vmap(lambda *x: _binomial_dispatch(*x))(key, p, n)\n return jnp.reshape(ret, shape)\n\n\ndef binomial(key, p, n=1, shape=()):\n return _binomial(key, p, n, shape)\n\n\n@partial(jit, static_argnums=(2,))\ndef _categorical(key, p, shape):\n # this implementation is fast when event shape is small, and slow otherwise\n # Ref: https://stackoverflow.com/a/34190035\n shape = shape or p.shape[:-1]\n s = jnp.cumsum(p, axis=-1)\n r = random.uniform(key, shape=shape + (1,))\n # FIXME: replace this computation by using binary search as suggested in the above\n # reference. A while_loop + vmap for a reshaped 2D array would be enough.\n return jnp.sum(s < r, axis=-1)\n\n\ndef categorical(key, p, shape=()):\n return _categorical(key, p, shape)\n\n\ndef _scatter_add_one(operand, indices, updates):\n return lax.scatter_add(operand, indices, updates,\n lax.ScatterDimensionNumbers(update_window_dims=(),\n inserted_window_dims=(0,),\n scatter_dims_to_operand_dims=(0,)))\n\n\n@partial(jit, static_argnums=(3, 4))\ndef _multinomial(key, p, n, n_max, shape=()):\n if jnp.shape(n) != jnp.shape(p)[:-1]:\n broadcast_shape = lax.broadcast_shapes(jnp.shape(n), jnp.shape(p)[:-1])\n n = jnp.broadcast_to(n, broadcast_shape)\n p = jnp.broadcast_to(p, broadcast_shape + jnp.shape(p)[-1:])\n shape = shape or p.shape[:-1]\n # get indices from categorical distribution then gather the result\n indices = categorical(key, p, (n_max,) + shape)\n # mask out values when counts is heterogeneous\n if jnp.ndim(n) > 0:\n mask = promote_shapes(jnp.arange(n_max) < jnp.expand_dims(n, -1), shape=shape + (n_max,))[0]\n mask = jnp.moveaxis(mask, -1, 0).astype(indices.dtype)\n excess = jnp.concatenate([jnp.expand_dims(n_max - n, -1), jnp.zeros(jnp.shape(n) + (p.shape[-1] - 1,))], -1)\n else:\n mask = 1\n excess = 0\n # NB: we transpose to move batch shape to the front\n indices_2D = (jnp.reshape(indices * mask, (n_max, -1,))).T\n samples_2D = vmap(_scatter_add_one, (0, 0, 0))(jnp.zeros((indices_2D.shape[0], p.shape[-1]),\n dtype=indices.dtype),\n jnp.expand_dims(indices_2D, axis=-1),\n jnp.ones(indices_2D.shape, dtype=indices.dtype))\n return jnp.reshape(samples_2D, shape + p.shape[-1:]) - excess\n\n\ndef multinomial(key, p, n, shape=()):\n n_max = int(jnp.max(n))\n return _multinomial(key, p, n, n_max, shape)\n\n\ndef cholesky_of_inverse(matrix):\n # This formulation only takes the inverse of a triangular matrix\n # which is more numerically stable.\n # Refer to:\n # https://nbviewer.jupyter.org/gist/fehiepsi/5ef8e09e61604f10607380467eb82006#Precision-to-scale_tril\n tril_inv = jnp.swapaxes(jnp.linalg.cholesky(matrix[..., ::-1, ::-1])[..., ::-1, ::-1], -2, -1)\n identity = jnp.broadcast_to(jnp.identity(matrix.shape[-1]), tril_inv.shape)\n return solve_triangular(tril_inv, identity, lower=True)\n\n\n# TODO: move upstream to jax.nn\ndef binary_cross_entropy_with_logits(x, y):\n # compute -y * log(sigmoid(x)) - (1 - y) * log(1 - sigmoid(x))\n # Ref: https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits\n return jnp.clip(x, 0) + jnp.log1p(jnp.exp(-jnp.abs(x))) - x * y\n\n\ndef _reshape(x, shape):\n if isinstance(x, (int, float, np.ndarray, np.generic)):\n return np.reshape(x, shape)\n else:\n return jnp.reshape(x, shape)\n\n\ndef promote_shapes(*args, shape=()):\n # adapted from lax.lax_numpy\n if len(args) < 2 and not shape:\n return args\n else:\n shapes = [jnp.shape(arg) for arg in args]\n num_dims = len(lax.broadcast_shapes(shape, *shapes))\n return [_reshape(arg, (1,) * (num_dims - len(s)) + s)\n if len(s) < num_dims else arg for arg, s in zip(args, shapes)]\n\n\ndef sum_rightmost(x, dim):\n \"\"\"\n Sum out ``dim`` many rightmost dimensions of a given tensor.\n \"\"\"\n out_dim = jnp.ndim(x) - dim\n x = jnp.reshape(jnp.expand_dims(x, -1), jnp.shape(x)[:out_dim] + (-1,))\n return jnp.sum(x, axis=-1)\n\n\ndef matrix_to_tril_vec(x, diagonal=0):\n idxs = jnp.tril_indices(x.shape[-1], diagonal)\n return x[..., idxs[0], idxs[1]]\n\n\ndef vec_to_tril_matrix(t, diagonal=0):\n # NB: the following formula only works for diagonal <= 0\n n = round((math.sqrt(1 + 8 * t.shape[-1]) - 1) / 2) - diagonal\n n2 = n * n\n idx = jnp.reshape(jnp.arange(n2), (n, n))[jnp.tril_indices(n, diagonal)]\n x = lax.scatter_add(jnp.zeros(t.shape[:-1] + (n2,)), jnp.expand_dims(idx, axis=-1), t,\n lax.ScatterDimensionNumbers(update_window_dims=range(t.ndim - 1),\n inserted_window_dims=(t.ndim - 1,),\n scatter_dims_to_operand_dims=(t.ndim - 1,)))\n return jnp.reshape(x, x.shape[:-1] + (n, n))\n\n\ndef cholesky_update(L, x, coef=1):\n \"\"\"\n Finds cholesky of L @ L.T + coef * x @ x.T.\n\n **References;**\n\n 1. A more efficient rank-one covariance matrix update for evolution strategies,\n Oswin Krause and Christian Igel\n \"\"\"\n batch_shape = lax.broadcast_shapes(L.shape[:-2], x.shape[:-1])\n L = jnp.broadcast_to(L, batch_shape + L.shape[-2:])\n x = jnp.broadcast_to(x, batch_shape + x.shape[-1:])\n diag = jnp.diagonal(L, axis1=-2, axis2=-1)\n # convert to unit diagonal triangular matrix: L @ D @ T.t\n L = L / diag[..., None, :]\n D = jnp.square(diag)\n\n def scan_fn(carry, val):\n b, w = carry\n j, Dj, L_j = val\n wj = w[..., j]\n gamma = b * Dj + coef * jnp.square(wj)\n Dj_new = gamma / b\n b = gamma / Dj_new\n\n # update vectors w and L_j\n w = w - wj[..., None] * L_j\n L_j = L_j + (coef * wj / gamma)[..., None] * w\n return (b, w), (Dj_new, L_j)\n\n D, L = jnp.moveaxis(D, -1, 0), jnp.moveaxis(L, -1, 0) # move scan dim to front\n _, (D, L) = lax.scan(scan_fn, (jnp.ones(batch_shape), x), (jnp.arange(D.shape[0]), D, L))\n D, L = jnp.moveaxis(D, 0, -1), jnp.moveaxis(L, 0, -1) # move scan dim back\n return L * jnp.sqrt(D)[..., None, :]\n\n\ndef signed_stick_breaking_tril(t):\n # make sure that t in (-1, 1)\n eps = jnp.finfo(t.dtype).eps\n t = jnp.clip(t, a_min=(-1 + eps), a_max=(1 - eps))\n # transform t to tril matrix with identity diagonal\n r = vec_to_tril_matrix(t, diagonal=-1)\n\n # apply stick-breaking on the squared values;\n # we omit the step of computing s = z * z_cumprod by using the fact:\n # y = sign(r) * s = sign(r) * sqrt(z * z_cumprod) = r * sqrt(z_cumprod)\n z = r ** 2\n z1m_cumprod_sqrt = jnp.cumprod(jnp.sqrt(1 - z), axis=-1)\n\n pad_width = [(0, 0)] * z.ndim\n pad_width[-1] = (1, 0)\n z1m_cumprod_sqrt_shifted = jnp.pad(z1m_cumprod_sqrt[..., :-1], pad_width,\n mode=\"constant\", constant_values=1.)\n y = (r + jnp.identity(r.shape[-1])) * z1m_cumprod_sqrt_shifted\n return y\n\n\ndef logmatmulexp(x, y):\n \"\"\"\n Numerically stable version of ``(x.log() @ y.log()).exp()``.\n \"\"\"\n x_shift = lax.stop_gradient(jnp.amax(x, -1, keepdims=True))\n y_shift = lax.stop_gradient(jnp.amax(y, -2, keepdims=True))\n xy = jnp.log(jnp.matmul(jnp.exp(x - x_shift), jnp.exp(y - y_shift)))\n return xy + x_shift + y_shift\n\n\ndef clamp_probs(probs):\n finfo = jnp.finfo(jnp.result_type(probs))\n return jnp.clip(probs, a_min=finfo.tiny, a_max=1. - finfo.eps)\n\n\ndef is_identically_zero(x):\n \"\"\"\n Check if argument is exactly the number zero. True for the number zero;\n false for other numbers; false for ndarrays.\n \"\"\"\n if isinstance(x, (int, float)):\n return x == 0\n else:\n return False\n\n\ndef is_identically_one(x):\n \"\"\"\n Check if argument is exactly the number one. True for the number one;\n false for other numbers; false for ndarrays.\n \"\"\"\n if isinstance(x, (int, float)):\n return x == 1\n else:\n return False\n\n\ndef von_mises_centered(key, concentration, shape=(), dtype=jnp.float64):\n \"\"\" Compute centered von Mises samples using rejection sampling from [1] with wrapped Cauchy proposal.\n\n *** References ***\n [1] Luc Devroye \"Non-Uniform Random Variate Generation\", Springer-Verlag, 1986;\n Chapter 9, p. 473-476. http://www.nrbook.com/devroye/Devroye_files/chapter_nine.pdf\n\n\n :param key: random number generator key\n :param concentration: concentration of distribution\n :param shape: shape of samples\n :param dtype: float precesions for choosing correct s cutfoff\n :return: centered samples from von Mises\n \"\"\"\n shape = shape or jnp.shape(concentration)\n dtype = jnp.result_type(dtype)\n concentration = lax.convert_element_type(concentration, dtype)\n concentration = jnp.broadcast_to(concentration, shape)\n return _von_mises_centered(key, concentration, shape, dtype)\n\n\n@partial(jit, static_argnums=(2, 3))\ndef _von_mises_centered(key, concentration, shape, dtype):\n # Cutoff from TensorFlow probability\n # (https://github.com/tensorflow/probability/blob/f051e03dd3cc847d31061803c2b31c564562a993/tensorflow_probability/python/distributions/von_mises.py#L567-L570)\n s_cutoff_map = {jnp.dtype(jnp.float16): 1.8e-1,\n jnp.dtype(jnp.float32): 2e-2,\n jnp.dtype(jnp.float64): 1.2e-4}\n s_cutoff = s_cutoff_map.get(dtype)\n\n r = 1. + jnp.sqrt(1. + 4. * concentration ** 2)\n rho = (r - jnp.sqrt(2. * r)) / (2. * concentration)\n s_exact = (1. + rho ** 2) / (2. * rho)\n\n s_approximate = 1. / concentration\n\n s = jnp.where(concentration > s_cutoff, s_exact, s_approximate)\n\n def cond_fn(*args):\n \"\"\" check if all are done or reached max number of iterations \"\"\"\n i, _, done, _, _ = args[0]\n return jnp.bitwise_and(i < 100, jnp.logical_not(jnp.all(done)))\n\n def body_fn(*args):\n i, key, done, _, w = args[0]\n uni_ukey, uni_vkey, key = random.split(key, 3)\n\n u = random.uniform(key=uni_ukey, shape=shape, dtype=concentration.dtype, minval=-1., maxval=1.)\n z = jnp.cos(jnp.pi * u)\n w = jnp.where(done, w, (1. + s * z) / (s + z)) # Update where not done\n\n y = concentration * (s - w)\n v = random.uniform(key=uni_vkey, shape=shape, dtype=concentration.dtype)\n\n accept = (y * (2. - y) >= v) | (jnp.log(y / v) + 1. >= y)\n\n return i+1, key, accept | done, u, w\n\n init_done = jnp.zeros(shape, dtype=bool)\n init_u = jnp.zeros(shape)\n init_w = jnp.zeros(shape)\n\n _, _, done, u, w = lax.while_loop(\n cond_fun=cond_fn,\n body_fun=body_fn,\n init_val=(jnp.array(0), key, init_done, init_u, init_w)\n )\n\n return jnp.sign(u) * jnp.arccos(w)\n\n\ndef scale_and_mask(x, scale=None, mask=None):\n \"\"\"\n Scale and mask a tensor, broadcasting and avoiding unnecessary ops.\n \"\"\"\n if is_identically_zero(x):\n return x\n if not (scale is None or is_identically_one(scale)):\n x = x * scale\n if mask is None:\n return x\n else:\n return jnp.where(mask, x, 0.)\n\n\n# TODO: use funsor implementation\ndef periodic_repeat(x, size, dim):\n \"\"\"\n Repeat a ``period``-sized array up to given ``size``.\n \"\"\"\n assert isinstance(size, int) and size >= 0\n assert isinstance(dim, int)\n if dim >= 0:\n dim -= jnp.ndim(x)\n\n period = jnp.shape(x)[dim]\n repeats = (size + period - 1) // period\n result = jnp.repeat(x, repeats, axis=dim)\n result = result[(Ellipsis, slice(None, size)) + (slice(None),) * (-1 - dim)]\n return result\n\n\ndef safe_normalize(x, *, p=2):\n \"\"\"\n Safely project a vector onto the sphere wrt the ``p``-norm. This avoids the\n singularity at zero by mapping zero to the uniform unit vector proportional\n to ``[1, 1, ..., 1]``.\n\n :param numpy.ndarray x: A vector\n :param float p: The norm exponent, defaults to 2 i.e. the Euclidean norm.\n :returns: A normalized version ``x / ||x||_p``.\n :rtype: numpy.ndarray\n \"\"\"\n assert isinstance(p, (float, int))\n assert p >= 0\n norm = jnp.linalg.norm(x, p, axis=-1, keepdims=True)\n x = x / jnp.clip(norm, a_min=jnp.finfo(x).tiny)\n # Avoid the singularity.\n mask = jnp.all(x == 0, axis=-1, keepdims=True)\n x = jnp.where(mask, x.shape[-1] ** (-1/p), x)\n return x\n\n\n# src: https://github.com/google/jax/blob/5a41779fbe12ba7213cd3aa1169d3b0ffb02a094/jax/_src/random.py#L95\ndef is_prng_key(key):\n try:\n return key.shape == (2,) and key.dtype == np.uint32\n except AttributeError:\n return False\n\n\n# The is sourced from: torch.distributions.util.py\n#\n# Copyright (c) 2016- Facebook, Inc (Adam Paszke)\n# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)\n# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\n# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\n# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\n# Copyright (c) 2011-2013 NYU (Clement Farabet)\n# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\n# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)\n# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\nclass lazy_property(object):\n r\"\"\"\n Used as a decorator for lazy loading of class attributes. This uses a\n non-data descriptor that calls the wrapped method to compute the property on\n first call; thereafter replacing the wrapped method into an instance\n attribute.\n \"\"\"\n\n def __init__(self, wrapped):\n self.wrapped = wrapped\n update_wrapper(self, wrapped)\n\n # This is to prevent warnings from sphinx\n def __call__(self, *args, **kwargs):\n return self.wrapped(*args, **kwargs)\n\n def __get__(self, instance, obj_type=None):\n if instance is None:\n return self\n value = self.wrapped(instance)\n setattr(instance, self.wrapped.__name__, value)\n return value\n\n\ndef validate_sample(log_prob_fn):\n def wrapper(self, *args, **kwargs):\n log_prob = log_prob_fn(self, *args, *kwargs)\n if self._validate_args:\n value = kwargs['value'] if 'value' in kwargs else args[0]\n mask = self._validate_sample(value)\n log_prob = jnp.where(mask, log_prob, -jnp.inf)\n return log_prob\n\n return wrapper\n"
] | [
[
"numpy.less"
],
[
"numpy.reshape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tvieijra/netket | [
"ef3ff32b242f25b6a6ae0f08db1aada85775a2ea"
] | [
"Test/Machine/rbm.py"
] | [
"# Copyright 2019 The Simons Foundation, Inc. - All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport netket\nimport numpy as _np\n\n__all__ = [\"PyRbm\"]\n\n\nclass PyRbm(netket.machine.CxxMachine):\n \"\"\"\n __Do not use me in production code!__\n\n A proof of concept implementation of a complex-valued RBM in pure Python.\n This is an example of how to subclass `CxxMachine` so that the machine will\n be usable with NetKet's C++ core.\n\n This class can be used as a drop-in replacement for `RbmSpin`.\n \"\"\"\n\n def __init__(\n self, hilbert, alpha=None, use_visible_bias=True, use_hidden_bias=True\n ):\n r\"\"\"Constructs a new RBM.\n\n Args:\n hilbert: Hilbert space.\n alpha: `alpha * hilbert.size` is the number of hidden spins.\n use_visible_bias: specifies whether to use a bias for visible\n spins.\n use_hidden_bias: specifies whether to use a bias for hidden spins.\n \"\"\"\n # NOTE: The following call to __init__ is important!\n super(PyRbm, self).__init__(hilbert)\n n = hilbert.size\n if alpha < 0:\n raise ValueError(\"`alpha` should be non-negative\")\n m = int(round(alpha * n))\n self._w = _np.empty([m, n], dtype=_np.complex128)\n self._a = _np.empty(n, dtype=_np.complex128) if use_visible_bias else None\n self._b = _np.empty(m, dtype=_np.complex128) if use_hidden_bias else None\n\n def _number_parameters(self):\n r\"\"\"Returns the number of parameters in the machine. We just sum the\n sizes of all the tensors we hold.\n \"\"\"\n return (\n self._w.size\n + (self._a.size if self._a is not None else 0)\n + (self._b.size if self._b is not None else 0)\n )\n\n def _number_visible(self):\n r\"\"\"Returns the number of visible units.\n \"\"\"\n return self._w.shape[1]\n\n def _get_parameters(self):\n r\"\"\"Returns the parameters as a 1D tensor.\n\n This function tries to order parameters in the exact same way as\n ``RbmSpin`` does so that we can do stuff like\n\n >>> import netket\n >>> import numpy\n >>> hilbert = netket.hilbert.Spin(\n graph=netket.graph.Hypercube(length=100, n_dim=1),\n s=1/2.\n )\n >>> cxx_rbm = netket.machine.RbmSpin(hilbert, alpha=3)\n >>> py_rbm = netket.machine.PyRbm(hilbert, alpha=3)\n >>> cxx_rbm.init_random_parameters()\n >>> # Order of parameters is the same, so we can assign one to the\n >>> # other\n >>> py_rbm.parameters = cxx_rbm.parameters\n >>> x = np.array(hilbert.local_states, size=hilbert.size)\n >>> assert numpy.isclose(py_rbm.log_val(x), cxx_rbm.log_val(x))\n \"\"\"\n params = tuple()\n if self._a is not None:\n params += (self._a,)\n if self._b is not None:\n params += (self._b,)\n params += (self._w.reshape(-1, order=\"C\"),)\n return _np.concatenate(params)\n\n def _set_parameters(self, p):\n r\"\"\"Sets parameters from a 1D tensor.\n\n ``self._set_parameters(self._get_parameters())`` is an identity.\n \"\"\"\n i = 0\n if self._a is not None:\n self._a[:] = p[i : i + self._a.size]\n i += self._a.size\n if self._b is not None:\n self._b[:] = p[i : i + self._b.size]\n i += self._b.size\n\n self._w[:] = p[i : i + self._w.size].reshape(self._w.shape, order=\"C\")\n\n def log_val(self, x):\n r\"\"\"Computes the logarithm of the wave function given a spin\n configuration ``x``.\n \"\"\"\n r = _np.dot(self._w, x)\n if self._b is not None:\n r += self._b\n r = _np.sum(PyRbm._log_cosh(r))\n if self._a is not None:\n r += _np.dot(self._a, x)\n # Officially, we should return\n # self._w.shape[0] * 0.6931471805599453 + r\n # but the C++ implementation ignores the \"constant factor\"\n return r\n\n def der_log(self, x):\n r\"\"\"Computes the gradient of the logarithm of the wave function\n given a spin configuration ``x``.\n \"\"\"\n grad = _np.empty(self.n_par, dtype=_np.complex128)\n i = 0\n\n if self._a is not None:\n grad[i : i + self._a.size] = x\n i += self._a.size\n\n tanh_stuff = _np.dot(self._w, x)\n if self._b is not None:\n tanh_stuff += self._b\n tanh_stuff = _np.tanh(tanh_stuff, out=tanh_stuff)\n\n if self._b is not None:\n grad[i : i + self._b.size] = tanh_stuff\n i += self._b.size\n\n out = grad[i : i + self._w.size]\n out.shape = (tanh_stuff.size, x.size)\n _np.outer(tanh_stuff, x, out=out)\n\n return grad\n\n def _is_holomorphic(self):\n r\"\"\"Complex valued RBM a holomorphic function.\n \"\"\"\n return True\n\n def save(self, filename):\n r\"\"\"Saves machine weights to ``filename`` using ``pickle``.\n \"\"\"\n import pickle\n\n with open(filename, \"wb\") as output_file:\n pickle.dump((self._w, self._a, self._b), output_file)\n\n def load(self, filename):\n r\"\"\"Loads machine weights from ``filename`` using ``pickle``.\n \"\"\"\n import pickle\n\n with open(filename, \"rb\") as input_file:\n self._w, self._a, self._b = pickle.load(input_file)\n\n @staticmethod\n def _log_cosh(x):\n # TODO: Handle big numbers properly\n return _np.log(_np.cosh(x))\n"
] | [
[
"numpy.dot",
"numpy.cosh",
"numpy.concatenate",
"numpy.outer",
"numpy.tanh",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jameszhou-gl/CBRE | [
"53c952e0afc74518fc4223f0f20881336df20f95",
"53c952e0afc74518fc4223f0f20881336df20f95"
] | [
"cbre/cbre_net.py",
"cbre/loader.py"
] | [
"import tensorflow as tf\nimport numpy as np\nfrom cbre.util import *\n\n\nclass CBRENet(object):\n \"\"\"\n cbre_net implements the cycly-balanced representation learning for counterfactual inference\n\n The network is implemented as a tensorflow graph. The class constructor\n creates an object containing relevant TF nodes as member variables.\n \"\"\"\n\n def __init__(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):\n \"\"\"\n x The varibales of data\n t The treatment applied to x, t.shape[1]==1\n y_ The true outcome\n p_t The treatment probability in all observations\n z_norm todo unknown\n flags The arg params\n r_alpha The coefficient of reconstruction and cycle loss\n r_lambda The coefficient of regularization of prediction network\n r_beta The coefficient of gradient penalty in GAN\n do_in The val of dropout_in\n do_out The val of dropout_out\n data_x_dim The dim of varibale x\n \"\"\"\n self.variables = {}\n # wd_loss: regularization l2 loss\n self.wd_loss = 0\n\n if flags.nonlin.lower() == 'elu':\n self.nonlin = tf.nn.elu\n else:\n self.nonlin = tf.nn.relu\n\n self._build_graph(x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim)\n\n def _add_variable(self, var, name):\n \"\"\"\n Adds variables to the internal track-keeper\n \"\"\"\n basename = name\n i = 0\n while name in self.variables:\n name = '%s_%d' % (basename, i) # @TODO: not consistent with TF internally if changed\n i += 1\n\n self.variables[name] = var\n\n def _create_variable(self, var, name):\n \"\"\" Create and adds variables to the internal track-keeper \"\"\"\n # tf.get_variable(name=name, initializer=var)\n var = tf.Variable(var, name=name)\n self._add_variable(var, name)\n return var\n\n def _create_variable_with_weight_decay(self, initializer, name, wd):\n \"\"\" Create and adds variables to the internal track-keeper\n and adds it to the list of weight decayed variables \"\"\"\n var = self._create_variable(initializer, name)\n self.wd_loss += wd * tf.nn.l2_loss(var)\n return var\n\n def _build_graph(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):\n \"\"\"\n Constructs a TensorFlow subgraph for causal effect inference.\n Sets the following member variables (to TF nodes):\n\n self.output The output prediction \"y\"\n self.tot_loss The total objective to minimize\n self.pred_loss The prediction term of the objective\n self.weights_in The input/representation layer weights\n self.weights_out The output/post-representation layer weights\n self.weights_pred The (linear) prediction layer weights\n self.h_rep The layer of the penalized representation\n \"\"\"\n self.x = x\n self.t = t\n self.y_ = y_\n self.p_t = p_t\n self.r_alpha = r_alpha\n self.r_lambda = r_lambda\n self.r_beta = r_beta\n self.do_in = do_in\n self.do_out = do_out\n self.z_norm = z_norm\n\n self.encoder_dim = flags.encoder_dim\n encoder_dim = flags.encoder_dim\n self.decoder_dim = flags.decoder_dim\n self.predictor_dim = flags.predictor_dim\n predictor_dim = flags.predictor_dim\n mi_estimator_dim = flags.mi_estimator_dim\n self.discriminator_dim = flags.discriminator_dim\n discriminator_dim = flags.discriminator_dim\n\n \"\"\"\n Network Components\n \"\"\"\n '''\n 1. Encoder Network\n '''\n # Construct Encoder network layers, four layers with size 200\n h_rep, h_rep_norm, weights_in = self._build_encoder(x, data_x_dim, flags)\n\n '''\n 2. GAN\n '''\n d0, d1, dp, weights_dis, weights_discore = self._build_adversarial_graph(h_rep_norm, t, encoder_dim,\n discriminator_dim, do_out,\n flags)\n # discriminator\n # with sigmoid\n # discriminator_loss = tf.reduce_mean(tf.nn.softplus(-d0)) + tf.reduce_mean(tf.nn.softplus(-d1) + d1) + dp\n # without sigmoid\n discriminator_loss = -tf.reduce_mean(d0) + tf.reduce_mean(d1) + r_beta * dp\n # encoder\n # with sigmoid\n # rep_loss = tf.reduce_mean(tf.nn.softplus(-d1))\n # without sigmoid\n # todo rep_loss in paper: rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)\n rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)\n # rep_loss = -tf.reduce_mean(d1)\n\n '''\n 3. Reconstruction \n '''\n # graph for reconstruction loss\n x0, recons_x_0, x1, recons_x_1 = self._build_reconstruct_graph(x, t, data_x_dim, flags)\n recons_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - recons_x_0)) + 1.0e-12) + tf.sqrt(\n tf.reduce_mean(tf.square(x1 - recons_x_1)) + 1.0e-12)\n\n '''\n 4. Cycle \n '''\n x0, cycle_x0, x1, cycle_x1 = self._build_cycle_graph(x, t, data_x_dim, flags)\n cycle_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - cycle_x0)) + 1.0e-12) + tf.sqrt(\n tf.reduce_mean(tf.square(x1 - cycle_x1)) + 1.0e-12)\n\n '''\n Predict Networks\n '''\n y, weights_out, weights_pred = self._build_output_graph(h_rep_norm, t, encoder_dim, predictor_dim, do_out,\n flags)\n\n \"\"\" Compute sample reweighting \"\"\"\n if flags.reweight_sample:\n w_t = t / (2 * p_t)\n w_c = (1 - t) / (2 * 1 - p_t)\n sample_weight = w_t + w_c\n else:\n sample_weight = 1.0\n\n self.sample_weight = sample_weight\n\n risk = tf.reduce_mean(sample_weight * tf.square(y_ - y))\n pred_error = tf.sqrt(tf.reduce_mean(tf.square(y_ - y)) + 1.0e-12)\n\n \"\"\" Regularization \"\"\"\n if flags.p_lambda > 0 and flags.rep_weight_decay:\n for i in range(0, flags.layer_num_encoder):\n if not (flags.varsel and i == 0): # No penalty on W in variable selection\n self.wd_loss += tf.nn.l2_loss(weights_in[i])\n\n \"\"\" Total error \"\"\"\n tot_error = risk\n\n if flags.p_lambda > 0:\n tot_error = tot_error + r_lambda * self.wd_loss + recons_loss + cycle_loss\n if flags.coef_recons > 0:\n tot_error += flags.coef_recons * recons_loss\n if flags.coef_cycle:\n tot_error += flags.coef_cycle * cycle_loss\n if flags.coef_d:\n tot_error += flags.coef_d * discriminator_loss\n\n if flags.varsel:\n self.w_proj = tf.placeholder(\"float\", shape=[data_x_dim], name='w_proj')\n self.projection = weights_in[0].assign(self.w_proj)\n\n self.output = y\n self.tot_loss = tot_error\n self.discriminator_loss = discriminator_loss\n self.rep_loss = rep_loss\n self.rec_loss = recons_loss\n self.cycle_loss = cycle_loss\n self.recons_cycle_loss = recons_loss + cycle_loss\n self.pred_loss = pred_error\n self.weights_in = weights_in\n self.weights_out = weights_out\n self.weights_dis = weights_dis\n self.weights_discore = weights_discore\n self.weights_pred = weights_pred\n self.h_rep = h_rep\n self.h_rep_norm = h_rep_norm\n self.dp = dp\n\n def _build_output_0(self, h_input, encoder_dim, predictor_dim, do_out, flags):\n h_out = [h_input]\n dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)\n with tf.variable_scope('pred_0') as scope:\n weights_out = []\n biases_out = []\n\n for i in range(0, flags.layer_num_predictor):\n wo = tf.get_variable(name='w_{}'.format(i),\n initializer=tf.random_normal([dims[i], dims[i + 1]],\n stddev=flags.weight_init / np.sqrt(dims[i])))\n\n weights_out.append(wo)\n\n # biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))\n biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))\n z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]\n\n h_out.append(self.nonlin(z))\n h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)\n\n weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],\n stddev=flags.weight_init / np.sqrt(predictor_dim)),\n 'w_pred')\n weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],\n stddev=flags.weight_init / np.sqrt(\n predictor_dim)))\n bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')\n\n if flags.varsel or flags.layer_num_predictor == 0:\n self.wd_loss += tf.nn.l2_loss(\n tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient\n else:\n self.wd_loss += tf.nn.l2_loss(weights_pred)\n\n \"\"\" Construct linear classifier \"\"\"\n h_pred = h_out[-1]\n y = tf.matmul(h_pred, weights_pred) + bias_pred\n\n return y, weights_out, weights_pred\n\n def _build_output_1(self, h_input, encoder_dim, predictor_dim, do_out, flags):\n h_out = [h_input]\n dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)\n with tf.variable_scope('pred_1') as scope:\n weights_out = []\n biases_out = []\n\n for i in range(0, flags.layer_num_predictor):\n wo = tf.get_variable(name='w_{}'.format(i),\n initializer=tf.random_normal([dims[i], dims[i + 1]],\n stddev=flags.weight_init / np.sqrt(dims[i])))\n\n weights_out.append(wo)\n\n # biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))\n biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))\n z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]\n\n h_out.append(self.nonlin(z))\n h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)\n\n weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],\n stddev=flags.weight_init / np.sqrt(predictor_dim)),\n 'w_pred')\n weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],\n stddev=flags.weight_init / np.sqrt(\n predictor_dim)))\n bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')\n\n if flags.varsel or flags.layer_num_predictor == 0:\n self.wd_loss += tf.nn.l2_loss(\n tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient\n else:\n self.wd_loss += tf.nn.l2_loss(weights_pred)\n\n \"\"\" Construct linear classifier \"\"\"\n h_pred = h_out[-1]\n y = tf.matmul(h_pred, weights_pred) + bias_pred\n\n return y, weights_out, weights_pred\n\n def _build_output_graph(self, rep, t, encoder_dim, predictor_dim, do_out, flags):\n \"\"\" Construct output/regression layers \"\"\"\n\n if flags.split_output:\n\n i0 = tf.to_int32(tf.where(t < 1)[:, 0])\n i1 = tf.to_int32(tf.where(t > 0)[:, 0])\n\n rep0 = tf.gather(rep, i0)\n rep1 = tf.gather(rep, i1)\n\n y0, weights_out0, weights_pred0 = self._build_output_0(rep0, encoder_dim, predictor_dim, do_out, flags)\n y1, weights_out1, weights_pred1 = self._build_output_1(rep1, encoder_dim, predictor_dim, do_out, flags)\n\n y = tf.dynamic_stitch([i0, i1], [y0, y1])\n weights_out = weights_out0 + weights_out1\n weights_pred = weights_pred0 + weights_pred1\n else:\n h_input = tf.concat(1, [rep, t])\n # y, weights_out, weights_pred = self._build_output(h_input, encoder_dim + 1, predictor_dim, do_out, flags)\n y, weights_out, weights_pred = None, None, None\n\n return y, weights_out, weights_pred\n\n def _build_encoder(self, x, data_x_dim, flags):\n with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE) as scope:\n weights_in = []\n biases_in = []\n\n if flags.batch_norm:\n bn_biases = []\n bn_scales = []\n\n h_in = [x]\n\n for i in range(0, flags.layer_num_encoder):\n if i == 0:\n \"\"\" If using variable selection, first layer is just rescaling\"\"\"\n if flags.varsel:\n weights_in.append(tf.get_variable(name='wg_{}'.format(i),\n initializer=1.0 / data_x_dim * tf.ones([data_x_dim])))\n else:\n wg = tf.get_variable(name='wg_{}'.format(i),\n initializer=tf.random_normal([data_x_dim, self.encoder_dim],\n stddev=flags.weight_init / np.sqrt(\n data_x_dim)))\n weights_in.append(wg)\n else:\n wg = tf.get_variable(name='wg_{}'.format(i),\n initializer=tf.random_normal([self.encoder_dim, self.encoder_dim],\n stddev=flags.weight_init / np.sqrt(\n self.encoder_dim)))\n weights_in.append(wg)\n\n biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, self.encoder_dim])))\n # z equals outcome of each layer in Encoder Network.\n z = tf.matmul(h_in[i], weights_in[i]) + biases_in[i]\n\n if flags.batch_norm:\n batch_mean, batch_var = tf.nn.moments(z, [0])\n\n if flags.normalization == 'bn_fixed':\n z = tf.nn.batch_normalization(z, batch_mean, batch_var, 0, 1, 1e-3)\n else:\n # bn_biases.append(tf.Variable(tf.zeros([self.encoder_dim])))\n bn_biases.append(\n tf.get_variable(name='bn_b_{}'.format(i), initializer=tf.zeros([self.encoder_dim])))\n # bn_scales.append(tf.Variable(tf.ones([self.encoder_dim])))\n bn_scales.append(\n tf.get_variable(name='bn_s_{}'.format(i), initializer=tf.ones([self.encoder_dim])))\n z = tf.nn.batch_normalization(z, batch_mean, batch_var, bn_biases[-1], bn_scales[-1], 1e-3)\n\n h_in.append(self.nonlin(z))\n h_in[i + 1] = tf.nn.dropout(h_in[i + 1], self.do_in)\n\n h_rep = h_in[-1]\n\n # todo normalization meaning?\n if flags.normalization == 'divide':\n h_rep_norm = h_rep / safe_sqrt(tf.reduce_sum(tf.square(h_rep), axis=1, keep_dims=True) + 1.0e-12)\n else:\n h_rep_norm = 1.0 * h_rep\n return h_rep, h_rep_norm, weights_in\n\n def _build_decoder(self, h_rep, data_x_dim, flags, suffix='0'):\n with tf.variable_scope('decoder_' + suffix, reuse=tf.AUTO_REUSE) as scope:\n weights_in = []\n biases_in = []\n recons_x = [h_rep]\n decoder_dim = flags.decoder_dim\n for i in range(0, flags.layer_num_decoder):\n if i == 0:\n weights_in.append(tf.get_variable(name='wg_{}'.format(i),\n initializer=tf.random_normal([flags.encoder_dim, decoder_dim],\n stddev=flags.weight_init / np.sqrt(\n flags.encoder_dim))))\n biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))\n elif i == flags.layer_num_decoder - 1:\n weights_in.append(\n tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, data_x_dim],\n stddev=flags.weight_init / np.sqrt(\n decoder_dim))))\n biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, data_x_dim])))\n\n else:\n weights_in.append(\n tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, decoder_dim],\n stddev=flags.weight_init / np.sqrt(\n decoder_dim))))\n biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))\n\n # z equals outcome of each layer in Encoder Network.\n z = tf.matmul(recons_x[i], weights_in[i]) + biases_in[i]\n\n recons_x.append(self.nonlin(z))\n recons_x[i + 1] = tf.nn.dropout(recons_x[i + 1], self.do_in)\n\n recons_x = recons_x[-1]\n return recons_x, weights_in\n\n def _build_discriminator_graph_mine(self, x, hrep, data_x_dim, encoder_dim, mi_estimator_dim, flags):\n \"\"\" Construct MI estimation layers \"\"\"\n # two layers with size 200\n with tf.variable_scope('gmi') as scope:\n input_num = tf.shape(x)[0]\n x_shuffle = tf.random_shuffle(x)\n x_conc = tf.concat([x, x_shuffle], axis=0)\n y_conc = tf.concat([hrep, hrep], axis=0)\n\n # forward\n # [25, 200]\n weights_mi_x = self._create_variable(tf.random_normal([data_x_dim, mi_estimator_dim],\n stddev=flags.weight_init / np.sqrt(data_x_dim)),\n 'weights_mi_x')\n biases_mi_x = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_x')\n # [, 200]\n lin_x = tf.matmul(x_conc, weights_mi_x) + biases_mi_x\n # [200, 200]\n weights_mi_y = self._create_variable(tf.random_normal([encoder_dim, mi_estimator_dim],\n stddev=flags.weight_init / np.sqrt(encoder_dim)),\n 'weights_mi_y')\n biases_mi_y = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_y')\n # [, 200]\n lin_y = tf.matmul(y_conc, weights_mi_y) + biases_mi_y\n\n # lin_conc = tf.nn.relu(lin_x + lin_y)\n lin_conc = self.nonlin(lin_x + lin_y)\n\n weights_mi_pred = self._create_variable(tf.random_normal([mi_estimator_dim, 1],\n stddev=flags.weight_init / np.sqrt(\n mi_estimator_dim)),\n 'gmi_p')\n biases_mi_pred = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_pred')\n gmi_output = tf.matmul(lin_conc, weights_mi_pred) + biases_mi_pred\n # real estimator outcome: shape=[input_num, 1]\n real_estimate = gmi_output[:input_num]\n # fake estimator outcome: shape=[input_num, 1]\n fake_estimate = gmi_output[input_num:]\n\n return real_estimate, fake_estimate, weights_mi_x, weights_mi_y, weights_mi_pred\n\n def _build_discriminator_adversarial(self, hrep, encoder_dim, discriminator_dim, do_out, flags):\n \"\"\" Construct adversarial discriminator layers \"\"\"\n with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as scope:\n h_dis = [hrep]\n\n weights_dis = []\n biases_dis = []\n for i in range(0, flags.layer_num_discriminator):\n\n if i == 0:\n weights_dis.append(tf.get_variable(name='wg_{}'.format(i),\n initializer=tf.random_normal([encoder_dim, discriminator_dim],\n stddev=flags.weight_init / np.sqrt(\n encoder_dim))))\n else:\n weights_dis.append(tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal(\n [discriminator_dim, discriminator_dim],\n stddev=flags.weight_init / np.sqrt(\n discriminator_dim))))\n biases_dis.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, discriminator_dim])))\n z = tf.matmul(h_dis[i], weights_dis[i]) + biases_dis[i]\n h_dis.append(self.nonlin(z))\n h_dis[i + 1] = tf.nn.dropout(h_dis[i + 1], do_out)\n\n weights_discore = tf.get_variable(initializer=tf.random_normal([discriminator_dim, 1],\n stddev=flags.weight_init / np.sqrt(\n discriminator_dim)), name='dc_p')\n bias_dc = tf.get_variable(initializer=tf.zeros([1]), name='dc_b_p')\n\n h_score = h_dis[-1]\n dis_score = tf.matmul(h_score, weights_discore) + bias_dc\n\n return dis_score, weights_dis, weights_discore\n\n def _build_adversarial_graph(self, rep, t, encoder_dim, discriminator_dim, do_out, flags):\n \"\"\"\n Construct adversarial discriminator\n \"\"\"\n # three layers with size 200\n\n i0 = tf.to_int32(tf.where(t < 1)[:, 0])\n i1 = tf.to_int32(tf.where(t > 0)[:, 0])\n\n rep0 = tf.gather(rep, i0)\n rep1 = tf.gather(rep, i1)\n\n z_rep0 = tf.reduce_max(rep0, axis=0, keep_dims=True)\n z_rep1 = tf.reduce_max(rep1, axis=0, keep_dims=True)\n\n z_rep0_conc = tf.concat([z_rep0, self.z_norm], axis=1)\n z_rep1_conc = tf.concat([z_rep1, self.z_norm], axis=1)\n\n d0, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep0_conc, encoder_dim + encoder_dim,\n discriminator_dim,\n do_out, flags)\n d1, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep1_conc, encoder_dim + encoder_dim,\n discriminator_dim,\n do_out, flags)\n\n # gradient penalty\n alpha_dist = tf.contrib.distributions.Uniform(low=0., high=1.)\n alpha = alpha_dist.sample((1, 1))\n interpolated = z_rep1 + alpha * (z_rep0 - z_rep1)\n interpolated_conc = tf.concat([interpolated, self.z_norm], axis=1)\n inte_logit, weights_dis, weights_discore = self._build_discriminator_adversarial(interpolated_conc,\n encoder_dim + encoder_dim,\n discriminator_dim, do_out,\n flags)\n gradients = tf.gradients(inte_logit, [interpolated])[0]\n grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1]) + 1.0e-12)\n gradient_penalty = tf.reduce_mean(tf.square(grad_l2 - 1.0))\n\n return d0, d1, gradient_penalty, weights_dis, weights_discore\n\n def _build_reconstruct_graph(self, x, t, data_x_dim, flags):\n \"\"\" construct graph for later computing reconstruction loss easily\n\n Parameters:\n x The varibales of data\n t The treatment applied to x\n\n Returns:\n x0 x[t=0]\n reconstruct_x reconstruct x when pass encoder and decoder networks\n \"\"\"\n i0 = tf.to_int32(tf.where(t < 1)[:, 0])\n i1 = tf.to_int32(tf.where(t > 0)[:, 0])\n\n x0 = tf.gather(x, i0)\n x1 = tf.gather(x, i1)\n h_rep_0, h_rep_norm_0, weights_in_0 = self._build_encoder(x0, data_x_dim, flags)\n h_rep_1, h_rep_norm_1, weights_in_1 = self._build_encoder(x1, data_x_dim, flags)\n\n recons_x_0, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='0')\n recons_x_1, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='1')\n return x0, recons_x_0, x1, recons_x_1\n\n def _build_cycle_graph(self, x, t, data_x_dim, flags):\n \"\"\" construct graph for later computing cycle loss easily\n\n Parameters:\n x The varibales of data\n t The treatment applied to x\n\n Returns:\n x0 x[t=0]\n reconstruct_x reconstruct x when pass encoder and decoder networks\n \"\"\"\n i0 = tf.to_int32(tf.where(t < 1)[:, 0])\n i1 = tf.to_int32(tf.where(t > 0)[:, 0])\n\n x0 = tf.gather(x, i0)\n x1 = tf.gather(x, i1)\n # cycle x0-x1'-x0\n _, h_rep_norm_0, _ = self._build_encoder(x0, data_x_dim, flags)\n temp_x_0_in_1, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='1')\n _, cyc_h_rep_norm_0, _ = self._build_encoder(temp_x_0_in_1, data_x_dim, flags)\n cycle_x0, _ = self._build_decoder(cyc_h_rep_norm_0, data_x_dim, flags, suffix='0')\n\n # cycle x1-x0'-x1\n _, h_rep_norm_1, _ = self._build_encoder(x1, data_x_dim, flags)\n temp_x_1_in_0, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='0')\n _, cyc_h_rep_norm_1, _ = self._build_encoder(temp_x_1_in_0, data_x_dim, flags)\n cycle_x1, _ = self._build_decoder(cyc_h_rep_norm_1, data_x_dim, flags, suffix='1')\n\n return x0, cycle_x0, x1, cycle_x1\n",
"import os\nimport numpy as np\n\nfrom rbci.logger import Logger as Log\n\n\ndef load_result_file(file):\n arr = np.load(file)\n\n D = dict([(k, arr[k]) for k in arr.keys()])\n\n return D\n\n\ndef load_config(cfgfile):\n \"\"\" Parses a configuration file \"\"\"\n\n cfgf = open(cfgfile, 'r')\n cfg = {}\n for l in cfgf:\n ps = [p.strip() for p in l.split(':')]\n if len(ps) == 2:\n try:\n cfg[ps[0]] = float(ps[1])\n except ValueError:\n cfg[ps[0]] = ps[1]\n if cfg[ps[0]] == 'False':\n cfg[ps[0]] = False\n elif cfg[ps[0]] == 'True':\n cfg[ps[0]] = True\n cfgf.close()\n return cfg\n\n\ndef load_single_result(result_dir):\n if Log.VERBOSE:\n print('Loading %s...' % result_dir)\n\n config_path = '%s/config.txt' % result_dir\n has_config = os.path.isfile(config_path)\n if not has_config:\n print('WARNING: Could not find config.txt for %s. Skipping.' % os.path.basename(result_dir))\n config = None\n else:\n config = load_config(config_path)\n\n train_path = '%s/result.npz' % result_dir\n test_path = '%s/result.test.npz' % result_dir\n\n has_test = os.path.isfile(test_path)\n\n try:\n train_results = load_result_file(train_path)\n except:\n 'WARNING: Couldnt load result file. Skipping'\n return None\n\n n_rep = np.max([config['repetitions'], config['experiments']])\n\n # if len(train_results['pred'].shape) < 4 or train_results['pred'].shape[2] < n_rep:\n # print('WARNING: Experiment %s appears not to have finished. Skipping.' % result_dir)\n # return None\n\n if has_test:\n test_results = load_result_file(test_path)\n else:\n test_results = None\n\n return {'train': train_results, 'test': test_results, 'config': config}\n\n\ndef load_results(output_dir):\n if Log.VERBOSE:\n print('Loading results from %s...' % output_dir)\n\n ''' Detect results structure '''\n # Single result\n if os.path.isfile('%s/results.npz' % output_dir):\n # @TODO: Implement\n pass\n\n # Multiple results\n files = ['%s/%s' % (output_dir, f) for f in os.listdir(output_dir)]\n exp_dirs = [f for f in files if os.path.isdir(f)\n if os.path.isfile('%s/result.npz' % f)]\n\n if Log.VERBOSE:\n print('Found %d experiment configurations.' % len(exp_dirs))\n\n # Load each result folder\n results = []\n for dir in exp_dirs:\n dir_result = load_single_result(dir)\n if dir_result is not None:\n results.append(dir_result)\n\n return results\n\n\ndef load_data(datapath):\n \"\"\" Load dataset \"\"\"\n arr = np.load(datapath)\n xs = arr['x']\n\n HAVE_TRUTH = False\n SPARSE = False\n\n if len(xs.shape) == 1:\n SPARSE = True\n\n ts = arr['t']\n yfs = arr['yf']\n try:\n es = arr['e']\n except:\n es = None\n try:\n ate = np.mean(arr['ate'])\n except:\n ate = None\n try:\n ymul = arr['ymul'][0, 0]\n yadd = arr['yadd'][0, 0]\n except:\n ymul = 1\n yadd = 0\n try:\n ycfs = arr['ycf']\n mu0s = arr['mu0']\n mu1s = arr['mu1']\n HAVE_TRUTH = True\n except:\n print('Couldn\\'t find ground truth. Proceeding...')\n ycfs = None;\n mu0s = None;\n mu1s = None\n\n data = {'x': xs, 't': ts, 'e': es, 'yf': yfs, 'ycf': ycfs, \\\n 'mu0': mu0s, 'mu1': mu1s, 'ate': ate, 'YMUL': ymul, \\\n 'YADD': yadd, 'HAVE_TRUTH': HAVE_TRUTH, \\\n 'SPARSE': SPARSE}\n\n return data\n"
] | [
[
"tensorflow.concat",
"numpy.sqrt",
"tensorflow.zeros",
"tensorflow.nn.l2_loss",
"tensorflow.where",
"tensorflow.random_shuffle",
"tensorflow.Variable",
"tensorflow.nn.moments",
"tensorflow.gradients",
"tensorflow.gather",
"tensorflow.dynamic_stitch",
"tensorflow.square",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.nn.batch_normalization",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.contrib.distributions.Uniform",
"tensorflow.reduce_max",
"tensorflow.reduce_mean",
"tensorflow.slice",
"tensorflow.ones",
"tensorflow.variable_scope"
],
[
"numpy.load",
"numpy.max",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
caltech-netlab/gym-acnportal | [
"cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46",
"cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46"
] | [
"gym_acnportal/gym_acnsim/envs/tests/test_action_spaces.py",
"tutorials/lessonx_training_running_rl_agent.py"
] | [
"# coding=utf-8\n\"\"\" Tests for SimAction and action space functions. \"\"\"\nimport unittest\nfrom typing import Callable, Dict, List, Any\nfrom unittest.mock import create_autospec\n\nimport numpy as np\nfrom gym import Space\n\nfrom ..action_spaces import (\n SimAction,\n single_charging_schedule,\n zero_centered_single_charging_schedule,\n)\nfrom ...interfaces import GymTrainedInterface\n\n\nclass TestSimAction(unittest.TestCase):\n # noinspection PyMissingOrEmptyDocstring\n @classmethod\n def setUpClass(cls) -> None:\n # The type here is Any as space_function is actually a Mock\n # object, but there's no Mock type in the typing library.\n cls.space_function: Any = create_autospec(lambda interface: Space())\n cls.to_schedule: Callable[\n [GymTrainedInterface, np.ndarray], Dict[str, List[float]]\n ] = lambda interface, array: {\"a\": [0]}\n cls.name: str = \"stub_action\"\n cls.sim_action: SimAction = SimAction(\n cls.space_function, cls.to_schedule, cls.name\n )\n cls.interface: GymTrainedInterface = create_autospec(GymTrainedInterface)\n\n def test_correct_on_init_sim_action_name(self) -> None:\n self.assertEqual(self.sim_action.name, self.name)\n\n def test_get_space(self) -> None:\n self.sim_action.get_space(self.interface)\n self.space_function.assert_called_once()\n\n def test_get_schedule(self) -> None:\n array: np.ndarray = np.array([[1, 0], [0, 1]])\n self.assertEqual(\n self.sim_action.get_schedule(self.interface, array), {\"a\": [0]}\n )\n\n\nclass TestSingleChargingSchedule(unittest.TestCase):\n # Some class variables are defined outside of setUpClass so that\n # the code inspector knows that inherited classes have these\n # attributes.\n max_rate: float = 16.0\n min_rate: float = 0.0\n negative_rate: float = -4.0\n deadband_rate: float = 6.0\n\n # noinspection PyMissingOrEmptyDocstring\n @classmethod\n def setUpClass(cls) -> None:\n cls.sim_action: SimAction = single_charging_schedule()\n cls.station_ids: List[str] = [\"T1\", \"T2\"]\n cls.offset: float = 0.5\n\n def _interface_builder(interface: Any, min_rate: float) -> Any:\n interface.station_ids = cls.station_ids\n interface.max_pilot_signal = lambda station_id: cls.max_rate\n interface.min_pilot_signal = lambda station_id: (\n min_rate if station_id == cls.station_ids[1] else cls.min_rate\n )\n return interface\n\n cls.interface: Any = _interface_builder(\n create_autospec(GymTrainedInterface), cls.min_rate\n )\n cls.interface_negative_min: Any = _interface_builder(\n create_autospec(GymTrainedInterface), cls.negative_rate\n )\n cls.interface_deadband_min: Any = _interface_builder(\n create_autospec(GymTrainedInterface), cls.deadband_rate\n )\n\n def test_correct_on_init_single_name(self) -> None:\n self.assertEqual(self.sim_action.name, \"single schedule\")\n\n def _test_space_function_helper(\n self, interface: GymTrainedInterface, min_rate: float, max_rate: float\n ) -> None:\n out_space: Space = self.sim_action.get_space(interface)\n self.assertEqual(out_space.shape, (len(self.station_ids),))\n np.testing.assert_equal(out_space.low, 2 * [min_rate])\n np.testing.assert_equal(out_space.high, 2 * [max_rate])\n self.assertEqual(out_space.dtype, \"float\")\n\n def test_single_space_function(self) -> None:\n self._test_space_function_helper(self.interface, self.min_rate, self.max_rate)\n\n def test_single_space_function_negative_min(self) -> None:\n self._test_space_function_helper(\n self.interface_negative_min, self.negative_rate, self.max_rate\n )\n\n def test_single_space_function_deadband_min(self) -> None:\n self._test_space_function_helper(\n self.interface_deadband_min, self.min_rate, self.max_rate\n )\n\n def test_single_to_schedule(self) -> None:\n good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(\n self.interface,\n np.array(\n [self.min_rate + self.offset, (self.max_rate - self.min_rate) / 2]\n ),\n )\n self.assertEqual(\n good_schedule,\n {\n self.station_ids[0]: [self.min_rate + self.offset],\n self.station_ids[1]: [(self.max_rate - self.min_rate) / 2],\n },\n )\n\n def test_single_to_bad_schedule(self) -> None:\n # The get_schedule function does not test if the input schedule\n # array is within the action space.\n bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(\n self.interface,\n np.array([self.min_rate - self.offset, self.max_rate + self.offset]),\n )\n self.assertEqual(\n bad_schedule,\n {\n self.station_ids[0]: [self.min_rate - self.offset],\n self.station_ids[1]: [self.max_rate + self.offset],\n },\n )\n\n def test_single_error_schedule(self) -> None:\n with self.assertRaises(TypeError):\n _ = self.sim_action.get_schedule(\n self.interface,\n np.array(\n [[self.min_rate - self.offset], [self.max_rate + self.offset]]\n ),\n )\n\n\nclass TestZeroCenteredSingleChargingSchedule(TestSingleChargingSchedule):\n # noinspection PyMissingOrEmptyDocstring\n @classmethod\n def setUpClass(cls) -> None:\n super().setUpClass()\n cls.sim_action: SimAction = zero_centered_single_charging_schedule()\n cls.shifted_max = cls.max_rate - (cls.max_rate + cls.min_rate) / 2\n cls.shifted_minimums = [\n cls.min_rate - (cls.max_rate + cls.min_rate) / 2,\n cls.negative_rate - (cls.max_rate + cls.negative_rate) / 2,\n cls.min_rate - (cls.max_rate + cls.deadband_rate) / 2,\n ]\n cls.negative_max_shift = cls.max_rate - (cls.max_rate + cls.negative_rate) / 2\n\n def test_correct_on_init_single_name(self) -> None:\n self.assertEqual(self.sim_action.name, \"zero-centered single schedule\")\n\n def test_single_space_function(self) -> None:\n self._test_space_function_helper(\n self.interface, self.shifted_minimums[0], self.shifted_max\n )\n\n def test_single_space_function_negative_min(self) -> None:\n self._test_space_function_helper(\n self.interface_negative_min,\n self.shifted_minimums[1],\n self.negative_max_shift,\n )\n\n def test_single_space_function_deadband_min(self) -> None:\n self._test_space_function_helper(\n self.interface_deadband_min, self.shifted_minimums[2], self.shifted_max\n )\n\n def test_single_to_bad_schedule(self) -> None:\n # The get_schedule function does not test if the input schedule\n # array is within the action space.\n bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(\n self.interface,\n np.array([self.min_rate - self.offset, self.max_rate + self.offset]),\n )\n self.assertEqual(\n bad_schedule,\n {\n self.station_ids[0]: [\n self.min_rate - self.offset + (self.max_rate + self.min_rate) / 2\n ],\n self.station_ids[1]: [\n self.max_rate + self.offset + (self.max_rate + self.min_rate) / 2\n ],\n },\n )\n\n def test_single_to_schedule(self) -> None:\n good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(\n self.interface,\n np.array(\n [\n self.min_rate - (self.max_rate + self.min_rate) / 2,\n self.max_rate - (self.max_rate + self.min_rate) / 2,\n ]\n ),\n )\n self.assertEqual(\n good_schedule,\n {\n self.station_ids[0]: [self.min_rate],\n self.station_ids[1]: [self.max_rate],\n },\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# coding=utf-8\n\"\"\"\nACN-Sim Tutorial: Lesson X\nTraining and Running a Reinforcement Learning Agent on ACN-Sim\nby Sunash Sharma\nLast updated: 02/27/2020\n\nIt is strongly suggested that this tutorial is run in its own\nenvironment (e.g. conda or pyenv), as it will require dependencies\nnot required by the rest of gym-acnportal.\n\nIn this lesson we will learn how to train a reinforcement learning (\nRL) agent and run it using OpenAI Gym environments that wrap ACN-Sim.\nFor this example we will be using the stable-baselines proximal\npolicy optimization (PPO2) algorithm. As such, running this tutorial\nrequires the stable-baselines package.\n\nNote: This tutorial uses stable_baselines: https://github.com/hill-a/stable-baselines\nfor baseline algorithms. As of this writing, stable_baselines requires Tensorflow and\nTensorflow gpu <2.0.0, >=1.8.0, so you may need to install in a new environment to\nrun this tutorial.\n\n\"\"\"\n\nimport os\nimport random\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom typing import List, Callable, Optional, Dict, Any\nimport numpy as np\nimport gym\nimport pytz\nfrom gym.wrappers import FlattenObservation\nfrom gym_acnportal import GymTrainedInterface, GymTrainingInterface\nfrom matplotlib import pyplot as plt\nfrom stable_baselines import PPO2\nfrom stable_baselines.common import BaseRLModel\nfrom stable_baselines.common.vec_env import DummyVecEnv\n\nfrom acnportal import acnsim\nfrom acnportal.acnsim import events, models, Simulator\n\nfrom gym_acnportal.algorithms import SimRLModelWrapper\nfrom gym_acnportal.gym_acnsim.envs.action_spaces import SimAction\nfrom gym_acnportal.gym_acnsim.envs import (\n BaseSimEnv,\n reward_functions,\n CustomSimEnv,\n default_action_object,\n default_observation_objects,\n)\nfrom gym_acnportal.gym_acnsim.envs.observation import SimObservation\nfrom acnportal.algorithms import (\n BaseAlgorithm,\n Interface,\n SortedSchedulingAlgo,\n earliest_deadline_first,\n first_come_first_served,\n RoundRobin,\n)\n\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"True\"\n\n\n# For this lesson, we will use a simple example. Imagine we have a\n# single charger, with EV's plugging in over a set duration. Each EV\n# has a random arrival and departure time, requesting an amount of\n# energy that equates to a laxity of $d/2$, where $d$ is staying\n# duration. (i.e. we may charge at half the maximum rate for the entire\n# staying time and deliver all the energy requested). First, let's make\n# some functions to generate Simulation instances that simulate this\n# scenario. We'll start by defining a function which generates random\n# plugins for a single EVSE.\ndef random_plugin(\n num, time_limit, evse, laxity_ratio=1 / 2, max_rate=32, voltage=208, period=1\n) -> List[events.Event]:\n \"\"\" Returns a list of num random plugin events occurring anytime\n from time 0 to time_limit. Each plugin has a random arrival and\n departure under the time limit, and a satisfiable requested\n energy assuming no other cars plugged in. Each EV has initial\n laxity equal to half the staying duration unless otherwise\n specified.\n \n The plugins occur for a single EVSE, whose maximal rate and\n voltage are assumed to be 32 A and 208 V, respectively, unless\n otherwise specified.\n\n Args:\n num (int): Number of random plugin\n time_limit (int):\n evse (str):\n laxity_ratio (float):\n max_rate (float):\n voltage (float):\n period (int):\n \"\"\"\n out_event_lst: List[events.Event] = []\n times = []\n i = 0\n while i < 2 * num:\n random_timestep = random.randint(0, time_limit)\n if random_timestep not in times:\n times.append(random_timestep)\n i += 1\n times = sorted(times)\n battery = models.Battery(100, 0, 100)\n for i in range(num):\n arrival_time = times[2 * i]\n departure_time = times[2 * i + 1]\n requested_energy = (\n (departure_time - arrival_time)\n / (60 / period)\n * max_rate\n * voltage\n / (1 / laxity_ratio)\n )\n ev = models.EV(\n arrival_time,\n departure_time,\n requested_energy,\n evse,\n f\"rs-{evse}-{i}\",\n battery,\n )\n out_event_lst.append(events.PluginEvent(arrival_time, ev))\n return out_event_lst\n\n\n# Since the above event generation is stochastic, we'll want to\n# completely rebuild the simulation each time the environment is\n# reset, so that the next simulation has a new event queue. As such,\n# we will define a simulation generating function.\ndef _random_sim_builder(\n algorithm: Optional[BaseAlgorithm], interface_type: type\n) -> Simulator:\n timezone = pytz.timezone(\"America/Los_Angeles\")\n start = timezone.localize(datetime(2018, 9, 5))\n period = 1\n\n # Make random event queue\n cn = acnsim.sites.simple_acn(\n [\"EVSE-001\", \"EVSE-002\"], aggregate_cap=32 * 208 / 1000\n )\n event_list = []\n for station_id in cn.station_ids:\n event_list.extend(random_plugin(10, 100, station_id))\n event_queue = events.EventQueue(event_list)\n\n # Simulation to be wrapped\n return acnsim.Simulator(\n deepcopy(cn),\n algorithm,\n deepcopy(event_queue),\n start,\n period=period,\n verbose=False,\n interface_type=interface_type,\n )\n\n\ndef interface_generating_function() -> Interface:\n \"\"\"\n Initializes a simulation with random events on a 1 phase, 1\n constraint ACN (simple_acn), with 1 EVSE\n \"\"\"\n schedule_rl = None\n # Simulation to be wrapped\n sim = _random_sim_builder(schedule_rl, GymTrainingInterface)\n return sim.generate_interface(GymTrainingInterface)\n\n\n# ACN-Sim gym environments wrap an interface to an ACN-Sim\n# Simulation. These environments allow for customizable observations,\n# reward functions, and actions through the CustomSimEnv class,\n# and for rebuilding through the RebuildingSimEnv class (the\n# RebuildingSimEnv class extends the CustomSimEnv class, and so has\n# all the customization features of the latter). As an example,\n# let's make a rebuilding simulation environment with the following\n# characteristics:\n#\n# - Observations:\n# - Arrival times of all currently plugged-in EVs.\n# - Departure times of all currently plugged-in EVs.\n# - Remaining demand of all currently plugged-in EVs.\n# - Constraint matrix of the network.\n# - Limiting constraint magnitudes of the network.\n# - Current timestep of the simulation\n# - Action:\n# - A zero-centered array of pilot signals. A 0 entry in the array\n# corresponds to a charging rate of 16 A.\n# - Rewards:\n# - A negative reward for each amp of violation of individual EVSE\n# constraints.\n# - A negative reward for each amp of pilot signal delivered to an\n# EVSE with no EV plugged in.\n# - A negative reward for each amp of network constraint violation.\n# - A positive charging reward for each amp of charge delivered if\n# the above penalties are all 0.\n#\n# The observations, actions, and rewards listed here are all already\n# encoded in the `gym_acnsim` package; see the package documentation\n# for more details. Broadly, each observation object has space and\n# observation generating functions. Each action is an object with\n# space and schedule generating functions. Each reward is a function\n# of the environment, outputting a number. The environment described\n# here is generated by the make_rebuilding_default_env function from\n# the gym_acnsim object; see the code there for more details. The\n# `gym_acnsim` package provides `'default-rebuilding-acnsim-v0'`,\n# a registered gym environment that provides this functionality. To\n# make this environment, we need to input as a `kwarg` the\n# `sim_gen_func` we defined earlier.\nvec_env = DummyVecEnv(\n [\n lambda: FlattenObservation(\n gym.make(\n \"default-rebuilding-acnsim-v0\",\n interface_generating_function=interface_generating_function,\n )\n )\n ]\n)\nmodel = PPO2(\"MlpPolicy\", vec_env, verbose=2)\nnum_iterations: int = int(1e6)\nmodel_name: str = f\"PPO2_{num_iterations}_test_{'default_rebuilding-1e6'}.zip\"\n# model.learn(num_iterations)\n# model.save(model_name)\n\n# We've trained the above model for 10000 iterations. Packaged with this\n# library is the same model trained for 1000000 iterations, which we\n# will now load\nmodel.load(model_name)\n#\n#\n# This is a stable_baselines PPO2 model. PPO2 requires vectorized\n# environments to run, so the model wrapper should convert between\n# vectorized and non-vectorized environments.\nclass StableBaselinesRLModel(SimRLModelWrapper):\n \"\"\" An RL model wrapper that wraps stable_baselines style models.\n \"\"\"\n\n model: BaseRLModel\n\n def predict(\n self,\n observation: object,\n reward: float,\n done: bool,\n info: Dict[Any, Any] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\" See SimRLModelWrapper.predict(). \"\"\"\n return self.model.predict(observation, **kwargs)\n\n\nclass GymTrainedAlgorithmVectorized(BaseAlgorithm):\n \"\"\" Abstract algorithm class for Simulations using a\n reinforcement learning agent that operates in an Open AI Gym\n environment that is vectorized via stable-baselines VecEnv style\n constructions.\n\n Implements abstract class BaseAlgorithm.\n\n Vectorized environments in stable-baselines do not inherit from\n gym Env, so we must define a new algorithm class that handles\n models that use these environments.\n\n Args:\n max_recompute (int): See BaseAlgorithm.\n \"\"\"\n\n _env: Optional[DummyVecEnv]\n max_recompute: Optional[int]\n _model: Optional[SimRLModelWrapper]\n\n def __init__(self, max_recompute: int = 1) -> None:\n super().__init__()\n self._env = None\n self.max_recompute = max_recompute\n self._model = None\n\n def __deepcopy__(\n self, memodict: Optional[Dict] = None\n ) -> \"GymTrainedAlgorithmVectorized\":\n return type(self)(max_recompute=self.max_recompute)\n\n def register_interface(self, interface: Interface) -> None:\n \"\"\" NOTE: Registering an interface sets the environment's\n interface to GymTrainedInterface.\n \"\"\"\n if not isinstance(interface, GymTrainedInterface):\n gym_interface: GymTrainedInterface = GymTrainedInterface.from_interface(\n interface\n )\n else:\n gym_interface: GymTrainedInterface = interface\n super().register_interface(gym_interface)\n if self._env is not None:\n self.env.interface = interface\n\n @property\n def env(self) -> DummyVecEnv:\n \"\"\" Return the algorithm's gym environment.\n\n Returns:\n DummyVecEnv: A gym environment that wraps a simulation.\n\n Raises:\n ValueError: Exception raised if vec_env is accessed prior to\n an vec_env being registered.\n \"\"\"\n if self._env is not None:\n return self._env\n else:\n raise ValueError(\n \"No vec_env has been registered yet. Please call \"\n \"register_env with an appropriate environment before \"\n \"attempting to call vec_env or schedule.\"\n )\n\n def register_env(self, env: DummyVecEnv) -> None:\n \"\"\" Register a model that outputs schedules for the simulation.\n\n Args:\n env (DummyVecEnv): An vec_env wrapping a simulation.\n\n Returns:\n None\n \"\"\"\n self._env = env\n\n @property\n def model(self) -> SimRLModelWrapper:\n \"\"\" Return the algorithm's predictive model.\n\n Returns:\n SimRLModelWrapper: A predictive model that returns an array\n of actions given an environment wrapping a simulation.\n\n Raises:\n ValueError: Exception raised if model is accessed prior to\n a model being registered.\n \"\"\"\n if self._model is not None:\n return self._model\n else:\n raise ValueError(\n \"No model has been registered yet. Please call \"\n \"register_model with an appropriate model before \"\n \"attempting to call model or schedule.\"\n )\n\n def register_model(self, new_model: SimRLModelWrapper) -> None:\n \"\"\" Register a model that outputs schedules for the simulation.\n\n Args:\n new_model (SimRLModelWrapper): A model that can be used for\n predictions in ACN-Sim.\n\n Returns:\n None\n \"\"\"\n self._model = new_model\n\n def schedule(self, active_evs) -> Dict[str, List[float]]:\n \"\"\" Creates a schedule of charging rates for each EVSE in the\n network. This only works if a model and environment have been\n registered.\n\n Overrides BaseAlgorithm.schedule().\n\n The environment is assumed to be vectorized.\n \"\"\"\n if self._model is None or self._env is None:\n raise TypeError(\n f\"A model and environment must be set to call the \"\n f\"schedule function for GymAlgorithm.\"\n )\n env: BaseSimEnv = self._env.envs[0].env\n if not isinstance(env.interface, GymTrainedInterface):\n raise TypeError(\n \"GymAlgorithm environment must have an interface of \"\n \"type GymTrainedInterface to call schedule(). \"\n )\n env.update_state()\n env.store_previous_state()\n env.action = self.model.predict(\n self._env.env_method(\"observation\", env.observation)[0],\n env.reward,\n env.done,\n env.info,\n )[0]\n env.schedule = env.action_to_schedule()\n return env.schedule\n\n\nevaluation_algorithm = GymTrainedAlgorithmVectorized()\nevaluation_simulation = _random_sim_builder(evaluation_algorithm, GymTrainedInterface)\nedf_simulation = deepcopy(evaluation_simulation)\nrr_simulation = deepcopy(evaluation_simulation)\nedf_simulation.update_scheduler(SortedSchedulingAlgo(earliest_deadline_first))\nrr_simulation.update_scheduler(RoundRobin(first_come_first_served))\n\n# Make a new, single-use environment with only charging rewards. One can do this by\n# explicitly defining which rewards, observations, and actions to include.\nobservation_objects: List[SimObservation] = default_observation_objects\naction_object: SimAction = default_action_object\nreward_functions: List[Callable[[BaseSimEnv], float]] = [\n reward_functions.hard_charging_reward\n]\neval_env: DummyVecEnv = DummyVecEnv(\n [\n lambda: FlattenObservation(\n CustomSimEnv(\n evaluation_algorithm.interface,\n observation_objects,\n action_object,\n reward_functions,\n )\n )\n ]\n)\nevaluation_algorithm.register_env(eval_env)\nevaluation_algorithm.register_model(StableBaselinesRLModel(model))\n\nevaluation_simulation.run()\nedf_simulation.run()\nrr_simulation.run()\n\nfig, axs = plt.subplots(3)\nrl = axs[0].plot(evaluation_simulation.charging_rates[0], label=\"RL Agent\")\nedf = axs[0].plot(rr_simulation.charging_rates[0], label=\"EDF\")\naxs[1].plot(evaluation_simulation.charging_rates[1])\naxs[1].plot(rr_simulation.charging_rates[1])\naxs[2].plot(acnsim.aggregate_current(evaluation_simulation))\naxs[2].plot(acnsim.aggregate_current(rr_simulation))\n\naxs[0].title.set_text(\"Current, Line 1\")\naxs[1].title.set_text(\"Current, Line 2\")\naxs[2].title.set_text(\"Total Current\")\n\nplt.show()\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.array"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ckxy/part-of-hitogata | [
"76402d48a336fcd964d0e64bb01d959e8f07f296",
"76402d48a336fcd964d0e64bb01d959e8f07f296",
"76402d48a336fcd964d0e64bb01d959e8f07f296",
"76402d48a336fcd964d0e64bb01d959e8f07f296"
] | [
"datasets/readers/ccpd.py",
"utils/mask_tools.py",
"datasetsnx/readers/wflw.py",
"datasets/readers/lvis.py"
] | [
"import os\nimport numpy as np\nfrom addict import Dict\nfrom PIL import Image\nfrom .reader import Reader\nfrom .builder import READER\n\n\n__all__ = ['CCPD2019FolderReader']\n\n\[email protected]_module()\nclass CCPD2019FolderReader(Reader):\n def __init__(self, root, **kwargs):\n super(CCPD2019FolderReader, self).__init__(**kwargs)\n\n self.root = root\n self.chars = ('京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',\n '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',\n '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',\n '新', \n '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',\n 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z', 'I', 'O', '-')\n self.img_paths = sorted(os.listdir(kwargs['root']))\n assert len(self.img_paths) > 0\n\n def get_dataset_info(self):\n return range(len(self.img_paths)), Dict({'chars': self.chars})\n\n def get_data_info(self, index):\n img = Image.open(self.img_paths[index][0])\n w, h = img.size\n return dict(h=h, w=w)\n\n def __call__(self, index):\n # index = data_dict\n # img = Image.open(os.path.join(self.root, self.img_paths[index])).convert('RGB')\n img = self.read_image(os.path.join(self.root, self.img_paths[index]))\n w, h = img.size\n path = os.path.join(self.root, self.img_paths[index])\n\n base_name = os.path.basename(self.img_paths[index])\n img_name, suffix = os.path.splitext(base_name)\n img_name = img_name.split(\"-\")[0].split(\"_\")[0]\n\n # if len(img_name) == 8:\n # print(path, 'a')\n # if img_name[2] != 'D' and img_name[2] != 'F' and img_name[-1] != 'D' and img_name[-1] != 'F':\n # print(path)\n # raise ValueError\n\n words = []\n for c in img_name:\n words.append(self.chars.index(c))\n\n # return {'image': img, 'ori_size': np.array([h, w]).astype(np.float32), 'path': path, 'seq': words, 'seq_length': len(words)}\n return dict(\n image=img,\n ori_size=np.array([h, w]).astype(np.float32),\n path=path,\n seq=words,\n seq_length=len(words)\n )\n\n def __repr__(self):\n return 'CCPD2019FolderReader(root={}, {})'.format(self.root, super(CCPD2019FolderReader, self).__repr__())\n",
"import math\nimport torch\nimport colorsys\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\n\n\ndef draw_mask(img, mask, classes, colorbar=True, erase_contour=False, have_background=True):\n uni = np.unique(mask).tolist()\n have_contour = 255 in uni\n\n if have_contour and not erase_contour:\n num_classes = len(classes)\n else:\n num_classes = len(classes) - 1\n \n hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))\n colors = [(0, 0, 0)] + colors\n\n if have_contour and not erase_contour:\n tmp = colors.pop(-1)\n for i in range(255 - len(colors)):\n colors.append((0, 0, 0))\n colors.append(tmp)\n elif have_contour and erase_contour:\n mask = np.array(mask).astype(np.int)\n mask[mask == 255] = 0\n mask = Image.fromarray(mask.astype(np.uint8))\n\n mask.putpalette(np.array(colors).flatten().tolist())\n mask = mask.convert(\"RGB\")\n\n img = Image.blend(img, mask, 0.5)\n\n if colorbar:\n if have_background and 0 in uni:\n uni.pop(0)\n if len(uni) > 0:\n colors = [colors[u] for u in uni]\n classes = [classes[u] if u != 255 else 'boundary' for u in uni]\n\n w, h = img.size\n w = w // 4\n\n bar = Image.new('RGB', (w, h), (255, 255, 255))\n l = math.sqrt(h * h + w * w)\n draw = ImageDraw.Draw(bar)\n font = ImageFont.truetype(\"fonts/arial.ttf\", int(l * 5e-2))\n\n pw = w\n ph = h // len(colors)\n\n x1, y1 = 0, (h - ph * len(colors)) // 2\n for i in range(len(colors)):\n draw.rectangle((x1, y1, x1 + pw, y1 + ph), fill=colors[i], outline=(0, 0, 0))\n draw.text((x1, y1), classes[i], fill=(0, 0, 0), font=font)\n y1 += ph\n else:\n w, h = img.size\n w = w // 4\n\n bar = Image.new('RGB', (w, h), (255, 255, 255))\n l = math.sqrt(h * h + w * w)\n draw = ImageDraw.Draw(bar)\n font = ImageFont.truetype(\"fonts/arial.ttf\", int(l * 5e-2))\n draw.text((0, 0), 'no_label', fill=(0, 0, 0), font=font)\n\n return img, bar\n else:\n return img, None\n\n\ndef pixel_accuracy(imPred, imLab):\n \"\"\"\n This function takes the prediction and label of a single image, returns pixel-wise accuracy\n To compute over many images do:\n for i = range(Nimages):\n (pixel_accuracy[i], pixel_correct[i], pixel_labeled[i]) = \\\n pixelAccuracy(imPred[i], imLab[i])\n mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / (np.spacing(1) + np.sum(pixel_labeled))\n \"\"\"\n # Remove classes from unlabeled pixels in gt image.\n # We should not penalize detections in unlabeled portions of the image.\n pixel_labeled = np.sum(imLab >= 0)\n pixel_correct = np.sum((imPred == imLab) * (imLab >= 0))\n pixel_accuracy = 1.0 * pixel_correct / pixel_labeled\n return pixel_accuracy, pixel_correct, pixel_labeled\n\n\ndef intersection_union(imPred, imLab, numClass):\n \"\"\"\n This function takes the prediction and label of a single image,\n returns intersection and union areas for each class\n To compute over many images do:\n for i in range(Nimages):\n (area_intersection[:,i], area_union[:,i]) = intersectionAndUnion(imPred[i], imLab[i])\n IoU = 1.0 * np.sum(area_intersection, axis=1) / np.sum(np.spacing(1)+area_union, axis=1)\n \"\"\"\n # Remove classes from unlabeled pixels in gt image.\n # We should not penalize detections in unlabeled portions of the image.\n imPred = imPred * (imLab >= 0)\n\n # Compute area intersection:\n intersection = imPred * (imPred == imLab)\n (area_intersection, _) = np.histogram(intersection, bins=numClass, range=(1, numClass))\n\n # Compute area union:\n (area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass))\n (area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass))\n area_union = area_pred + area_lab - area_intersection\n return area_intersection, area_union\n\n\n# pytorch version\ndef batch_pix_accuracy(output, target):\n \"\"\"PixAcc\"\"\"\n # inputs are numpy array, output 4D, target 3D\n # predict = torch.argmax(output.long(), 1) + 1\n predict = output.long() + 1\n target = target.long() + 1\n\n pixel_labeled = torch.sum(target > 0).item()\n pixel_correct = torch.sum((predict == target) * (target > 0)).item()\n assert pixel_correct <= pixel_labeled, \"Correct area should be smaller than Labeled\"\n return pixel_correct, pixel_labeled\n\n\ndef batch_intersection_union(output, target, nclass):\n \"\"\"mIoU\"\"\"\n # inputs are numpy array, output 4D, target 3D\n mini = 1\n maxi = nclass\n nbins = nclass\n # predict = torch.argmax(output, 1) + 1\n predict = output + 1\n target = target.float() + 1\n\n predict = predict.float() * (target > 0).float()\n intersection = predict * (predict == target).float()\n # areas of intersection and union\n # element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary.\n area_inter = torch.histc(intersection.cpu(), bins=nbins, min=mini, max=maxi)\n area_pred = torch.histc(predict.cpu(), bins=nbins, min=mini, max=maxi)\n area_lab = torch.histc(target.cpu(), bins=nbins, min=mini, max=maxi)\n area_union = area_pred + area_lab - area_inter\n assert torch.sum(area_inter > area_union).item() == 0, \"Intersection area should be smaller than Union area\"\n return area_inter.float(), area_union.float()\n",
"import os\nimport numpy as np\nfrom addict import Dict\nfrom PIL import Image\nfrom .reader import Reader\n\n\n__all__ = ['WFLWReader', 'WFLWPPReader']\n\n\nclass WFLWReader(Reader):\n def __init__(self, root, txt_path, **kwargs):\n super(WFLWReader, self).__init__(**kwargs)\n\n self.root = root\n self.txt = txt_path\n self.img_root = os.path.join(self.root, 'WFLW_images')\n assert os.path.exists(self.img_root)\n\n with open(os.path.join(self.root, self.txt), 'r') as f:\n self.data_lines = f.readlines()\n\n assert len(self.data_lines) > 0\n\n def get_dataset_info(self):\n return range(len(self.data_lines)), Dict({})\n\n def get_data_info(self, index):\n return\n\n def __call__(self, index):\n # index = data_dict\n\n line = self.data_lines[index].strip().split()\n landmark = np.asarray(list(map(float, line[:196])), dtype=np.float32).reshape(-1, 2)\n box = np.asarray(list(map(int, line[196:200])))\n attribute = np.asarray(list(map(int, line[200:206])), dtype=np.int)\n name = line[206]\n\n # img = Image.open(os.path.join(self.img_root, name)).convert('RGB')\n img = self.read_image(os.path.join(self.img_root, name))\n\n res = dict()\n res['image'] = img\n res['point'] = landmark\n res['path'] = os.path.join(self.img_root, name)\n res['attribute'] = attribute\n res['bbox'] = box[np.newaxis, ...].astype(np.float)\n w, h = img.size\n res['ori_size'] = np.array([h, w]).astype(np.float32)\n return res\n\n def __repr__(self):\n return 'WFLWReader(txt={}, {})'.format(self.txt, super(WFLWReader, self).__repr__())\n\n\nclass WFLWPPReader(Reader):\n def __init__(self, txt_path, **kwargs):\n super(WFLWPPReader, self).__init__(**kwargs)\n\n self.txt = txt_path\n with open(txt_path, 'r') as f:\n self.data_lines = f.readlines()\n assert len(self.data_lines) > 0\n\n def get_dataset_info(self):\n return range(len(self.data_lines)), Dict({})\n\n def get_data_info(self, index):\n return\n\n def __call__(self, index):\n # index = data_dict\n\n line = self.data_lines[index].strip().split()\n path = line[0]\n landmark = np.asarray(line[1:197], dtype=np.float32).reshape(-1, 2)\n # landmark = landmark.reshape(-1, 2)\n attribute = np.asarray(line[197:203], dtype=np.int32)\n euler_angle = np.asarray(line[203:206], dtype=np.float32)\n\n # img = Image.open(path).convert('RGB')\n img = self.read_image(path)\n\n res = dict()\n res['image'] = img\n res['point'] = landmark\n res['path'] = path\n res['attribute'] = attribute\n res['euler_angle'] = euler_angle\n w, h = img.size\n res['ori_size'] = np.array([h, w]).astype(np.float32)\n return res\n\n def __repr__(self):\n return 'WFLWPPReader(txt={}, {})'.format(self.txt, super(WFLWPPReader, self).__repr__())\n",
"import os\nimport numpy as np\nfrom PIL import Image\nfrom addict import Dict\nfrom copy import deepcopy\nfrom .reader import Reader\nfrom .lvis_v1_categories import LVIS_CATEGORIES as LVIS_V1_CATEGORIES\nfrom .builder import READER\n\n\n__all__ = ['LVISAPIReader']\n\n\[email protected]_module()\nclass LVISAPIReader(Reader):\n def __init__(self, set_path, img_root, **kwargs):\n super(LVISAPIReader, self).__init__(**kwargs)\n\n self.set = set_path\n self.img_root = img_root\n\n assert os.path.exists(self.set)\n assert os.path.exists(self.img_root)\n\n assert len(LVIS_V1_CATEGORIES) == 1203\n cat_ids = [k[\"id\"] for k in LVIS_V1_CATEGORIES]\n assert min(cat_ids) == 1 and max(cat_ids) == len(\n cat_ids\n ), \"Category ids are not in [1, #categories], as expected\"\n # Ensure that the category list is sorted by id\n lvis_categories = sorted(LVIS_V1_CATEGORIES, key=lambda x: x[\"id\"])\n self.thing_classes = [k[\"synonyms\"][0] for k in lvis_categories]\n self.meta = {\"thing_classes\": self.thing_classes}\n\n self.data_lines = self.load_lvis_json()\n\n def get_dataset_info(self):\n return range(len(self.data_lines)), Dict({'classes': self.thing_classes})\n\n def get_data_info(self, index):\n data_line = self.data_lines[index]\n return dict(h=data_line['ori_size'][0], w=data_line['ori_size'][1], bbox=data_line['bbox'])\n\n def load_lvis_json(self):\n \"\"\"\n Load a json file in LVIS's annotation format.\n Args:\n extra_annotation_keys (list[str]): list of per-annotation keys that should also be\n loaded into the dataset dict (besides \"bbox\", \"bbox_mode\", \"category_id\",\n \"segmentation\"). The values for these keys will be returned as-is.\n Returns:\n list[dict]: a list of dicts in Detectron2 standard format. (See\n `Using Custom Datasets </tutorials/datasets.html>`_ )\n Notes:\n 1. This function does not read the image files.\n The results do not have the \"image\" field.\n \"\"\"\n from lvis import LVIS\n\n lvis_api = LVIS(self.set)\n\n # sort indices for reproducible results\n img_ids = sorted(lvis_api.imgs.keys())\n # imgs is a list of dicts, each looks something like:\n # {'license': 4,\n # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',\n # 'file_name': 'COCO_val2014_000000001268.jpg',\n # 'height': 427,\n # 'width': 640,\n # 'date_captured': '2013-11-17 05:57:24',\n # 'id': 1268}\n imgs = lvis_api.load_imgs(img_ids)\n # anns is a list[list[dict]], where each dict is an annotation\n # record for an object. The inner list enumerates the objects in an image\n # and the outer list enumerates over images. Example of anns[0]:\n # [{'segmentation': [[192.81,\n # 247.09,\n # ...\n # 219.03,\n # 249.06]],\n # 'area': 1035.749,\n # 'image_id': 1268,\n # 'bbox': [192.81, 224.8, 74.73, 33.43],\n # 'category_id': 16,\n # 'id': 42986},\n # ...]\n anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]\n\n # Sanity check that each annotation has a unique id\n ann_ids = [ann[\"id\"] for anns_per_image in anns for ann in anns_per_image]\n assert len(set(ann_ids)) == len(ann_ids), \"Annotation ids in '{}' are not unique\".format(\n json_file\n )\n\n imgs_anns = list(zip(imgs, anns))\n\n extra_annotation_keys = []\n\n def get_file_name(img_root, img_dict):\n # Determine the path including the split folder (\"train2017\", \"val2017\", \"test2017\") from\n # the coco_url field. Example:\n # 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'\n split_folder, file_name = img_dict[\"coco_url\"].split(\"/\")[-2:]\n return os.path.join(img_root, file_name)\n\n dataset_dicts = []\n\n for (img_dict, anno_dict_list) in imgs_anns:\n record = dict()\n record[\"path\"] = get_file_name(self.img_root, img_dict)\n record['ori_size'] = np.array([img_dict[\"height\"], img_dict[\"width\"]]).astype(np.float32)\n record[\"not_exhaustive_category_ids\"] = img_dict.get(\"not_exhaustive_category_ids\", [])\n record[\"not_exhaustive_category_ids\"] = [i - 1 for i in record[\"not_exhaustive_category_ids\"]]\n record[\"neg_category_ids\"] = img_dict.get(\"neg_category_ids\", [])\n record[\"neg_category_ids\"] = [i - 1 for i in record[\"neg_category_ids\"]]\n image_id = record[\"id\"] = img_dict[\"id\"]\n\n boxes = []\n for anno in anno_dict_list:\n # Check that the image_id in this annotation is the same as\n # the image_id we're looking at.\n # This fails only when the data parsing logic or the annotation file is buggy.\n assert anno[\"image_id\"] == image_id\n x1, y1, w, h = anno[\"bbox\"]\n boxes.append([x1, y1, x1 + w, y1 + h, anno[\"category_id\"] - 1]) # Convert 1-indexed to 0-indexed\n\n # segm = anno[\"segmentation\"] # list[list[float]]\n # # filter out invalid polygons (< 3 points)\n # valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]\n # assert len(segm) == len(\n # valid_segm\n # ), \"Annotation contains an invalid polygon with < 3 points\"\n # assert len(segm) > 0\n # obj[\"segmentation\"] = segm\n\n if len(boxes) == 0:\n record[\"bbox\"] = np.zeros((0, 5)).astype(np.float32)\n else:\n record[\"bbox\"] = np.array(boxes)\n dataset_dicts.append(record)\n\n return dataset_dicts\n\n def __call__(self, index):\n data_line = deepcopy(self.data_lines[index])\n data_line['image'] = self.read_image(data_line['path'])\n data_line['bbox'] = np.concatenate([data_line['bbox'], np.full((len(data_line['bbox']), 1), 1).astype(np.float32)], axis=1)\n return data_line\n\n def __repr__(self):\n return 'LVISAPIReader(set_path={}, img_root={}, {})'.format(self.set, self.img_root, super(LVISAPIReader, self).__repr__())\n"
] | [
[
"numpy.array"
],
[
"numpy.unique",
"torch.sum",
"numpy.array",
"numpy.histogram",
"numpy.sum"
],
[
"numpy.asarray",
"numpy.array"
],
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YeeCY/PASF | [
"95e548d365ea5da482c56408539d9a1514ef246b"
] | [
"rlkit/samplers/data_collector/path_collector.py"
] | [
"from collections import deque, OrderedDict\nfrom functools import partial\n\nimport numpy as np\n\nfrom rlkit.core.eval_util import create_stats_ordered_dict\nfrom rlkit.samplers.data_collector.base import PathCollector\nfrom rlkit.samplers.rollout_functions import rollout\n\n\nclass ActionAgent():\n def __init__(self):\n\n self._actions = None\n self._step = 0\n\n def reset(self):\n self._step = 0\n\n def set_action(self, actions):\n self._actions = actions\n\n def get_action(self, *args, **kwargs):\n action = self._actions[self._step]\n self._step += 1\n return action, []\n\n\nclass MdpPathCollector(PathCollector):\n def __init__(\n self,\n env,\n policy,\n max_num_epoch_paths_saved=None,\n render=False,\n render_kwargs=None,\n rollout_fn=rollout,\n save_env_in_snapshot=True,\n ):\n if render_kwargs is None:\n render_kwargs = {}\n self._env = env\n self._policy = policy\n self._max_num_epoch_paths_saved = max_num_epoch_paths_saved\n self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)\n self._render = render\n self._render_kwargs = render_kwargs\n self._rollout_fn = rollout_fn\n self._action_agent = ActionAgent()\n\n self._num_steps_total = 0\n self._num_paths_total = 0\n\n self._save_env_in_snapshot = save_env_in_snapshot\n\n def collect_new_paths(\n self,\n max_path_length,\n num_steps,\n discard_incomplete_paths,\n ):\n paths = []\n num_steps_collected = 0\n while num_steps_collected < num_steps:\n max_path_length_this_loop = min( # Do not go over num_steps\n max_path_length,\n num_steps - num_steps_collected,\n )\n path = self._rollout_fn(\n self._env,\n self._policy,\n max_path_length=max_path_length_this_loop,\n render=self._render,\n render_kwargs=self._render_kwargs,\n )\n path_len = len(path['actions'])\n if (\n path_len != max_path_length\n and not path['terminals'][-1]\n and discard_incomplete_paths\n ):\n break\n num_steps_collected += path_len\n paths.append(path)\n self._num_paths_total += len(paths)\n self._num_steps_total += num_steps_collected\n self._epoch_paths.extend(paths)\n return paths\n\n def collect_aligned_paths(self, path_actions, discard_incomplete_paths=True):\n paths = []\n num_steps_collected = 0\n for p in path_actions:\n max_path_length = len(p)\n self._action_agent.set_action(p)\n path = self._rollout_fn(\n self._env,\n self._action_agent,\n max_path_length=max_path_length,\n render=self._render,\n render_kwargs=self._render_kwargs,\n )\n path_len = len(path['actions'])\n if (\n path_len != max_path_length\n and not path['terminals'][-1]\n and discard_incomplete_paths\n ):\n break\n num_steps_collected += path_len\n paths.append(path)\n self._num_paths_total += len(paths)\n self._num_steps_total += num_steps_collected\n return paths\n\n def get_epoch_paths(self):\n return self._epoch_paths\n\n def end_epoch(self, epoch):\n self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)\n\n def get_diagnostics(self):\n path_lens = [len(path['actions']) for path in self._epoch_paths]\n stats = OrderedDict([\n ('num steps total', self._num_steps_total),\n ('num paths total', self._num_paths_total),\n ])\n stats.update(create_stats_ordered_dict(\n \"path length\",\n path_lens,\n always_show_all_stats=True,\n ))\n return stats\n\n def get_snapshot(self):\n snapshot_dict = dict(\n policy=self._policy,\n num_steps_total=self._num_steps_total,\n num_paths_total=self._num_paths_total,\n )\n if self._save_env_in_snapshot:\n snapshot_dict['env'] = self._env\n return snapshot_dict\n\n def load_from_snapshot(self, snapshot):\n self._policy = snapshot['policy']\n self._num_steps_total = snapshot['num_steps_total']\n self._num_paths_total = snapshot['num_paths_total']\n if self._save_env_in_snapshot:\n assert 'env' in snapshot\n if hasattr(self._env, '_custom_goal_sampler'):\n snapshot['env']._custom_goal_sampler = self._env._custom_goal_sampler\n self._env = snapshot['env']\n\n\nclass GoalConditionedPathCollector(MdpPathCollector):\n def __init__(\n self,\n *args,\n observation_key='observation',\n desired_goal_key='desired_goal',\n goal_sampling_mode=None,\n **kwargs\n ):\n def obs_processor(o):\n return np.hstack((o[observation_key], o[desired_goal_key]))\n\n rollout_fn = partial(\n rollout,\n preprocess_obs_for_policy_fn=obs_processor,\n )\n super().__init__(*args, rollout_fn=rollout_fn, **kwargs)\n self._observation_key = observation_key\n self._desired_goal_key = desired_goal_key\n self._goal_sampling_mode = goal_sampling_mode\n\n def collect_new_paths(self, *args, **kwargs):\n self._env.goal_sampling_mode = self._goal_sampling_mode\n return super().collect_new_paths(*args, **kwargs)\n\n def get_snapshot(self):\n snapshot = super().get_snapshot()\n snapshot.update(\n observation_key=self._observation_key,\n desired_goal_key=self._desired_goal_key,\n )\n return snapshot\n\n def load_from_snapshot(self, snapshot):\n super().load_from_snapshot(snapshot)\n self._observation_key = snapshot['observation_key']\n self._desired_goal_key = snapshot['desired_goal_key']\n\n\nclass ObsDictPathCollector(MdpPathCollector):\n def __init__(\n self,\n *args,\n observation_key='observation',\n **kwargs\n ):\n def obs_processor(obs):\n return obs[observation_key]\n\n rollout_fn = partial(\n rollout,\n preprocess_obs_for_policy_fn=obs_processor,\n )\n super().__init__(*args, rollout_fn=rollout_fn, **kwargs)\n self._observation_key = observation_key\n\n def get_snapshot(self):\n snapshot = super().get_snapshot()\n snapshot.update(\n observation_key=self._observation_key,\n )\n return snapshot\n\n\nclass VAEWrappedEnvPathCollector(GoalConditionedPathCollector):\n def __init__(\n self,\n env,\n policy,\n decode_goals=False,\n **kwargs\n ):\n \"\"\"Expects env is VAEWrappedEnv\"\"\"\n super().__init__(env, policy, **kwargs)\n self._decode_goals = decode_goals\n\n def collect_new_paths(self, *args, **kwargs):\n self._env.decode_goals = self._decode_goals\n return super().collect_new_paths(*args, **kwargs)\n"
] | [
[
"numpy.hstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
charliec443/plaid-rl | [
"2e8fbf389af9efecd41361df80e40e0bf932056d",
"2e8fbf389af9efecd41361df80e40e0bf932056d",
"2e8fbf389af9efecd41361df80e40e0bf932056d",
"2e8fbf389af9efecd41361df80e40e0bf932056d"
] | [
"plaidrl/exploration_strategies/ou_strategy.py",
"plaidrl/torch/data_management/normalizer.py",
"plaidrl/torch/vae/vae_trainer.py",
"plaidrl/torch/data.py"
] | [
"import numpy as np\nimport numpy.random as nr\n\nfrom plaidrl.exploration_strategies.base import RawExplorationStrategy\n\n\nclass OUStrategy(RawExplorationStrategy):\n \"\"\"\n This strategy implements the Ornstein-Uhlenbeck process, which adds\n time-correlated noise to the actions taken by the deterministic policy.\n The OU process satisfies the following stochastic differential equation:\n dxt = theta*(mu - xt)*dt + sigma*dWt\n where Wt denotes the Wiener process\n\n Based on the rllab implementation.\n \"\"\"\n\n def __init__(\n self,\n action_space,\n mu=0,\n theta=0.15,\n max_sigma=0.3,\n min_sigma=None,\n decay_period=100000,\n ):\n if min_sigma is None:\n min_sigma = max_sigma\n self.mu = mu\n self.theta = theta\n self.sigma = max_sigma\n self._max_sigma = max_sigma\n if min_sigma is None:\n min_sigma = max_sigma\n self._min_sigma = min_sigma\n self._decay_period = decay_period\n self.dim = np.prod(action_space.low.shape)\n self.low = action_space.low\n self.high = action_space.high\n self.state = np.ones(self.dim) * self.mu\n self.reset()\n\n def reset(self):\n self.state = np.ones(self.dim) * self.mu\n\n def evolve_state(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))\n self.state = x + dx\n return self.state\n\n def get_action_from_raw_action(self, action, t=0, **kwargs):\n ou_state = self.evolve_state()\n self.sigma = self._max_sigma - (self._max_sigma - self._min_sigma) * min(\n 1.0, t * 1.0 / self._decay_period\n )\n return np.clip(action + ou_state, self.low, self.high)\n",
"import numpy as np\nimport torch\n\nimport plaidrl.torch.pytorch_util as ptu\nfrom plaidrl.data_management.normalizer import FixedNormalizer, Normalizer\n\n\nclass TorchNormalizer(Normalizer):\n \"\"\"\n Update with np array, but de/normalize pytorch Tensors.\n \"\"\"\n\n def normalize(self, v, clip_range=None):\n if not self.synchronized:\n self.synchronize()\n if clip_range is None:\n clip_range = self.default_clip_range\n mean = ptu.from_numpy(self.mean)\n std = ptu.from_numpy(self.std)\n if v.dim() == 2:\n # Unsqueeze along the batch use automatic broadcasting\n mean = mean.unsqueeze(0)\n std = std.unsqueeze(0)\n return torch.clamp((v - mean) / std, -clip_range, clip_range)\n\n def denormalize(self, v):\n if not self.synchronized:\n self.synchronize()\n mean = ptu.from_numpy(self.mean)\n std = ptu.from_numpy(self.std)\n if v.dim() == 2:\n mean = mean.unsqueeze(0)\n std = std.unsqueeze(0)\n return mean + v * std\n\n\nclass TorchFixedNormalizer(FixedNormalizer):\n def normalize(self, v, clip_range=None):\n if clip_range is None:\n clip_range = self.default_clip_range\n mean = ptu.from_numpy(self.mean)\n std = ptu.from_numpy(self.std)\n if v.dim() == 2:\n # Unsqueeze along the batch use automatic broadcasting\n mean = mean.unsqueeze(0)\n std = std.unsqueeze(0)\n return torch.clamp((v - mean) / std, -clip_range, clip_range)\n\n def normalize_scale(self, v):\n \"\"\"\n Only normalize the scale. Do not subtract the mean.\n \"\"\"\n std = ptu.from_numpy(self.std)\n if v.dim() == 2:\n std = std.unsqueeze(0)\n return v / std\n\n def denormalize(self, v):\n mean = ptu.from_numpy(self.mean)\n std = ptu.from_numpy(self.std)\n if v.dim() == 2:\n mean = mean.unsqueeze(0)\n std = std.unsqueeze(0)\n return mean + v * std\n\n def denormalize_scale(self, v):\n \"\"\"\n Only denormalize the scale. Do not add the mean.\n \"\"\"\n std = ptu.from_numpy(self.std)\n if v.dim() == 2:\n std = std.unsqueeze(0)\n return v * std\n",
"from collections import OrderedDict\nfrom os import path as osp\n\nimport numpy as np\nimport torch\nfrom multiworld.core.image_env import normalize_image\nfrom torch import optim\nfrom torch.distributions import Normal\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import save_image\n\nfrom plaidrl.core import logger\nfrom plaidrl.core.eval_util import create_stats_ordered_dict\nfrom plaidrl.torch import pytorch_util as ptu\nfrom plaidrl.torch.data import (\n ImageDataset,\n InfiniteRandomSampler,\n InfiniteWeightedRandomSampler,\n)\nfrom plaidrl.util.ml_util import ConstantSchedule\n\n\ndef relative_probs_from_log_probs(log_probs):\n \"\"\"\n Returns relative probability from the log probabilities. They're not exactly\n equal to the probability, but relative scalings between them are all maintained.\n\n For correctness, all log_probs must be passed in at the same time.\n \"\"\"\n probs = np.exp(log_probs - log_probs.mean())\n assert not np.any(probs <= 0), \"choose a smaller power\"\n return probs\n\n\ndef compute_log_p_log_q_log_d(\n model,\n data,\n decoder_distribution=\"bernoulli\",\n num_latents_to_sample=1,\n sampling_method=\"importance_sampling\",\n):\n assert data.dtype == np.float64, \"images should be normalized\"\n imgs = ptu.from_numpy(data)\n latent_distribution_params = model.encode(imgs)\n batch_size = data.shape[0]\n representation_size = model.representation_size\n log_p, log_q, log_d = (\n ptu.zeros((batch_size, num_latents_to_sample)),\n ptu.zeros((batch_size, num_latents_to_sample)),\n ptu.zeros((batch_size, num_latents_to_sample)),\n )\n true_prior = Normal(\n ptu.zeros((batch_size, representation_size)),\n ptu.ones((batch_size, representation_size)),\n )\n mus, logvars = latent_distribution_params\n for i in range(num_latents_to_sample):\n if sampling_method == \"importance_sampling\":\n latents = model.rsample(latent_distribution_params)\n elif sampling_method == \"biased_sampling\":\n latents = model.rsample(latent_distribution_params)\n elif sampling_method == \"true_prior_sampling\":\n latents = true_prior.rsample()\n else:\n raise EnvironmentError(\"Invalid Sampling Method Provided\")\n\n stds = logvars.exp().pow(0.5)\n vae_dist = Normal(mus, stds)\n log_p_z = true_prior.log_prob(latents).sum(dim=1)\n log_q_z_given_x = vae_dist.log_prob(latents).sum(dim=1)\n if decoder_distribution == \"bernoulli\":\n decoded = model.decode(latents)[0]\n log_d_x_given_z = torch.log(\n imgs * decoded + (1 - imgs) * (1 - decoded) + 1e-8\n ).sum(dim=1)\n elif decoder_distribution == \"gaussian_identity_variance\":\n _, obs_distribution_params = model.decode(latents)\n dec_mu, dec_logvar = obs_distribution_params\n dec_var = dec_logvar.exp()\n decoder_dist = Normal(dec_mu, dec_var.pow(0.5))\n log_d_x_given_z = decoder_dist.log_prob(imgs).sum(dim=1)\n else:\n raise EnvironmentError(\"Invalid Decoder Distribution Provided\")\n\n log_p[:, i] = log_p_z\n log_q[:, i] = log_q_z_given_x\n log_d[:, i] = log_d_x_given_z\n return log_p, log_q, log_d\n\n\ndef compute_p_x_np_to_np(\n model,\n data,\n power,\n decoder_distribution=\"bernoulli\",\n num_latents_to_sample=1,\n sampling_method=\"importance_sampling\",\n):\n assert data.dtype == np.float64, \"images should be normalized\"\n assert power >= -1 and power <= 0, \"power for skew-fit should belong to [-1, 0]\"\n\n log_p, log_q, log_d = compute_log_p_log_q_log_d(\n model, data, decoder_distribution, num_latents_to_sample, sampling_method\n )\n\n if sampling_method == \"importance_sampling\":\n log_p_x = (log_p - log_q + log_d).mean(dim=1)\n elif (\n sampling_method == \"biased_sampling\" or sampling_method == \"true_prior_sampling\"\n ):\n log_p_x = log_d.mean(dim=1)\n else:\n raise EnvironmentError(\"Invalid Sampling Method Provided\")\n log_p_x_skewed = power * log_p_x\n return ptu.get_numpy(log_p_x_skewed)\n\n\nclass ConvVAETrainer(object):\n def __init__(\n self,\n train_dataset,\n test_dataset,\n model,\n batch_size=128,\n log_interval=0,\n beta=0.5,\n beta_schedule=None,\n lr=None,\n do_scatterplot=False,\n normalize=False,\n mse_weight=0.1,\n is_auto_encoder=False,\n background_subtract=False,\n use_parallel_dataloading=True,\n train_data_workers=2,\n skew_dataset=False,\n skew_config=None,\n priority_function_kwargs=None,\n start_skew_epoch=0,\n weight_decay=0,\n ):\n if skew_config is None:\n skew_config = {}\n self.log_interval = log_interval\n self.batch_size = batch_size\n self.beta = beta\n if is_auto_encoder:\n self.beta = 0\n if lr is None:\n if is_auto_encoder:\n lr = 1e-2\n else:\n lr = 1e-3\n self.beta_schedule = beta_schedule\n if self.beta_schedule is None or is_auto_encoder:\n self.beta_schedule = ConstantSchedule(self.beta)\n self.imsize = model.imsize\n self.do_scatterplot = do_scatterplot\n\n model.to(ptu.device)\n\n self.model = model\n self.representation_size = model.representation_size\n self.input_channels = model.input_channels\n self.imlength = model.imlength\n\n self.lr = lr\n params = list(self.model.parameters())\n self.optimizer = optim.Adam(\n params,\n lr=self.lr,\n weight_decay=weight_decay,\n )\n self.train_dataset, self.test_dataset = train_dataset, test_dataset\n assert self.train_dataset.dtype == np.uint8\n assert self.test_dataset.dtype == np.uint8\n self.train_dataset = train_dataset\n self.test_dataset = test_dataset\n\n self.batch_size = batch_size\n self.use_parallel_dataloading = use_parallel_dataloading\n self.train_data_workers = train_data_workers\n self.skew_dataset = skew_dataset\n self.skew_config = skew_config\n self.start_skew_epoch = start_skew_epoch\n if priority_function_kwargs is None:\n self.priority_function_kwargs = dict()\n else:\n self.priority_function_kwargs = priority_function_kwargs\n\n if self.skew_dataset:\n self._train_weights = self._compute_train_weights()\n else:\n self._train_weights = None\n\n if use_parallel_dataloading:\n self.train_dataset_pt = ImageDataset(train_dataset, should_normalize=True)\n self.test_dataset_pt = ImageDataset(test_dataset, should_normalize=True)\n\n if self.skew_dataset:\n base_sampler = InfiniteWeightedRandomSampler(\n self.train_dataset, self._train_weights\n )\n else:\n base_sampler = InfiniteRandomSampler(self.train_dataset)\n self.train_dataloader = DataLoader(\n self.train_dataset_pt,\n sampler=InfiniteRandomSampler(self.train_dataset),\n batch_size=batch_size,\n drop_last=False,\n num_workers=train_data_workers,\n pin_memory=True,\n )\n self.test_dataloader = DataLoader(\n self.test_dataset_pt,\n sampler=InfiniteRandomSampler(self.test_dataset),\n batch_size=batch_size,\n drop_last=False,\n num_workers=0,\n pin_memory=True,\n )\n self.train_dataloader = iter(self.train_dataloader)\n self.test_dataloader = iter(self.test_dataloader)\n\n self.normalize = normalize\n self.mse_weight = mse_weight\n self.background_subtract = background_subtract\n\n if self.normalize or self.background_subtract:\n self.train_data_mean = np.mean(self.train_dataset, axis=0)\n self.train_data_mean = normalize_image(np.uint8(self.train_data_mean))\n self.eval_statistics = OrderedDict()\n self._extra_stats_to_log = None\n\n def get_dataset_stats(self, data):\n torch_input = ptu.from_numpy(normalize_image(data))\n mus, log_vars = self.model.encode(torch_input)\n mus = ptu.get_numpy(mus)\n mean = np.mean(mus, axis=0)\n std = np.std(mus, axis=0)\n return mus, mean, std\n\n def update_train_weights(self):\n if self.skew_dataset:\n self._train_weights = self._compute_train_weights()\n if self.use_parallel_dataloading:\n self.train_dataloader = DataLoader(\n self.train_dataset_pt,\n sampler=InfiniteWeightedRandomSampler(\n self.train_dataset, self._train_weights\n ),\n batch_size=self.batch_size,\n drop_last=False,\n num_workers=self.train_data_workers,\n pin_memory=True,\n )\n self.train_dataloader = iter(self.train_dataloader)\n\n def _compute_train_weights(self):\n method = self.skew_config.get(\"method\", \"squared_error\")\n power = self.skew_config.get(\"power\", 1)\n batch_size = 512\n size = self.train_dataset.shape[0]\n next_idx = min(batch_size, size)\n cur_idx = 0\n weights = np.zeros(size)\n while cur_idx < self.train_dataset.shape[0]:\n idxs = np.arange(cur_idx, next_idx)\n data = self.train_dataset[idxs, :]\n if method == \"vae_prob\":\n data = normalize_image(data)\n weights[idxs] = compute_p_x_np_to_np(\n self.model, data, power=power, **self.priority_function_kwargs\n )\n else:\n raise NotImplementedError(\"Method {} not supported\".format(method))\n cur_idx = next_idx\n next_idx += batch_size\n next_idx = min(next_idx, size)\n\n if method == \"vae_prob\":\n weights = relative_probs_from_log_probs(weights)\n return weights\n\n def set_vae(self, vae):\n self.model = vae\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)\n\n def get_batch(self, train=True, epoch=None):\n if self.use_parallel_dataloading:\n if not train:\n dataloader = self.test_dataloader\n else:\n dataloader = self.train_dataloader\n samples = next(dataloader).to(ptu.device)\n return samples\n\n dataset = self.train_dataset if train else self.test_dataset\n skew = False\n if epoch is not None:\n skew = self.start_skew_epoch < epoch\n if train and self.skew_dataset and skew:\n probs = self._train_weights / np.sum(self._train_weights)\n ind = np.random.choice(\n len(probs),\n self.batch_size,\n p=probs,\n )\n else:\n ind = np.random.randint(0, len(dataset), self.batch_size)\n samples = normalize_image(dataset[ind, :])\n if self.normalize:\n samples = ((samples - self.train_data_mean) + 1) / 2\n if self.background_subtract:\n samples = samples - self.train_data_mean\n return ptu.from_numpy(samples)\n\n def get_debug_batch(self, train=True):\n dataset = self.train_dataset if train else self.test_dataset\n X, Y = dataset\n ind = np.random.randint(0, Y.shape[0], self.batch_size)\n X = X[ind, :]\n Y = Y[ind, :]\n return ptu.from_numpy(X), ptu.from_numpy(Y)\n\n def train_epoch(self, epoch, sample_batch=None, batches=100, from_rl=False):\n self.model.train()\n losses = []\n log_probs = []\n kles = []\n zs = []\n beta = float(self.beta_schedule.get_value(epoch))\n for batch_idx in range(batches):\n if sample_batch is not None:\n data = sample_batch(self.batch_size, epoch)\n # obs = data['obs']\n next_obs = data[\"next_obs\"]\n # actions = data['actions']\n else:\n next_obs = self.get_batch(epoch=epoch)\n obs = None\n actions = None\n self.optimizer.zero_grad()\n (\n reconstructions,\n obs_distribution_params,\n latent_distribution_params,\n ) = self.model(next_obs)\n log_prob = self.model.logprob(next_obs, obs_distribution_params)\n kle = self.model.kl_divergence(latent_distribution_params)\n\n encoder_mean = self.model.get_encoding_from_latent_distribution_params(\n latent_distribution_params\n )\n z_data = ptu.get_numpy(encoder_mean.cpu())\n for i in range(len(z_data)):\n zs.append(z_data[i, :])\n\n loss = -1 * log_prob + beta * kle\n\n self.optimizer.zero_grad()\n loss.backward()\n losses.append(loss.item())\n log_probs.append(log_prob.item())\n kles.append(kle.item())\n\n self.optimizer.step()\n if self.log_interval and batch_idx % self.log_interval == 0:\n print(\n \"Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\".format(\n epoch,\n batch_idx * len(data),\n len(self.train_loader.dataset),\n 100.0 * batch_idx / len(self.train_loader),\n loss.item() / len(next_obs),\n )\n )\n if not from_rl:\n zs = np.array(zs)\n self.model.dist_mu = zs.mean(axis=0)\n self.model.dist_std = zs.std(axis=0)\n\n self.eval_statistics[\"train/log prob\"] = np.mean(log_probs)\n self.eval_statistics[\"train/KL\"] = np.mean(kles)\n self.eval_statistics[\"train/loss\"] = np.mean(losses)\n\n def get_diagnostics(self):\n return self.eval_statistics\n\n def test_epoch(\n self,\n epoch,\n save_reconstruction=True,\n save_vae=True,\n from_rl=False,\n ):\n self.model.eval()\n losses = []\n log_probs = []\n kles = []\n zs = []\n beta = float(self.beta_schedule.get_value(epoch))\n for batch_idx in range(10):\n next_obs = self.get_batch(train=False)\n (\n reconstructions,\n obs_distribution_params,\n latent_distribution_params,\n ) = self.model(next_obs)\n log_prob = self.model.logprob(next_obs, obs_distribution_params)\n kle = self.model.kl_divergence(latent_distribution_params)\n loss = -1 * log_prob + beta * kle\n\n encoder_mean = latent_distribution_params[0]\n z_data = ptu.get_numpy(encoder_mean.cpu())\n for i in range(len(z_data)):\n zs.append(z_data[i, :])\n losses.append(loss.item())\n log_probs.append(log_prob.item())\n kles.append(kle.item())\n\n if batch_idx == 0 and save_reconstruction:\n n = min(next_obs.size(0), 8)\n comparison = torch.cat(\n [\n next_obs[:n]\n .narrow(start=0, length=self.imlength, dim=1)\n .contiguous()\n .view(-1, self.input_channels, self.imsize, self.imsize)\n .transpose(2, 3),\n reconstructions.view(\n self.batch_size,\n self.input_channels,\n self.imsize,\n self.imsize,\n )[:n].transpose(2, 3),\n ]\n )\n save_dir = osp.join(logger.get_snapshot_dir(), \"r%d.png\" % epoch)\n save_image(comparison.data.cpu(), save_dir, nrow=n)\n\n zs = np.array(zs)\n\n self.eval_statistics[\"epoch\"] = epoch\n self.eval_statistics[\"test/log prob\"] = np.mean(log_probs)\n self.eval_statistics[\"test/KL\"] = np.mean(kles)\n self.eval_statistics[\"test/loss\"] = np.mean(losses)\n self.eval_statistics[\"beta\"] = beta\n if not from_rl:\n for k, v in self.eval_statistics.items():\n logger.record_tabular(k, v)\n logger.dump_tabular()\n if save_vae:\n logger.save_itr_params(epoch, self.model)\n\n def debug_statistics(self):\n \"\"\"\n Given an image $$x$$, samples a bunch of latents from the prior\n $$z_i$$ and decode them $$\\hat x_i$$.\n Compare this to $$\\hat x$$, the reconstruction of $$x$$.\n Ideally\n - All the $$\\hat x_i$$s do worse than $$\\hat x$$ (makes sure VAE\n isn’t ignoring the latent)\n - Some $$\\hat x_i$$ do better than other $$\\hat x_i$$ (tests for\n coverage)\n \"\"\"\n debug_batch_size = 64\n data = self.get_batch(train=False)\n reconstructions, _, _ = self.model(data)\n img = data[0]\n recon_mse = ((reconstructions[0] - img) ** 2).mean().view(-1)\n img_repeated = img.expand((debug_batch_size, img.shape[0]))\n\n samples = ptu.randn(debug_batch_size, self.representation_size)\n random_imgs, _ = self.model.decode(samples)\n random_mses = (random_imgs - img_repeated) ** 2\n mse_improvement = ptu.get_numpy(random_mses.mean(dim=1) - recon_mse)\n stats = create_stats_ordered_dict(\n \"debug/MSE improvement over random\",\n mse_improvement,\n )\n stats.update(\n create_stats_ordered_dict(\n \"debug/MSE of random decoding\",\n ptu.get_numpy(random_mses),\n )\n )\n stats[\"debug/MSE of reconstruction\"] = ptu.get_numpy(recon_mse)[0]\n if self.skew_dataset:\n stats.update(create_stats_ordered_dict(\"train weight\", self._train_weights))\n return stats\n\n def dump_samples(self, epoch):\n self.model.eval()\n sample = ptu.randn(64, self.representation_size)\n sample = self.model.decode(sample)[0].cpu()\n save_dir = osp.join(logger.get_snapshot_dir(), \"s%d.png\" % epoch)\n save_image(\n sample.data.view(\n 64, self.input_channels, self.imsize, self.imsize\n ).transpose(2, 3),\n save_dir,\n )\n\n def _dump_imgs_and_reconstructions(self, idxs, filename):\n imgs = []\n recons = []\n for i in idxs:\n img_np = self.train_dataset[i]\n img_torch = ptu.from_numpy(normalize_image(img_np))\n recon, *_ = self.model(img_torch.view(1, -1))\n\n img = img_torch.view(\n self.input_channels, self.imsize, self.imsize\n ).transpose(1, 2)\n rimg = recon.view(self.input_channels, self.imsize, self.imsize).transpose(\n 1, 2\n )\n imgs.append(img)\n recons.append(rimg)\n all_imgs = torch.stack(imgs + recons)\n save_file = osp.join(logger.get_snapshot_dir(), filename)\n save_image(\n all_imgs.data,\n save_file,\n nrow=len(idxs),\n )\n\n def log_loss_under_uniform(self, model, data, priority_function_kwargs):\n import torch.nn.functional as F\n\n log_probs_prior = []\n log_probs_biased = []\n log_probs_importance = []\n kles = []\n mses = []\n for i in range(0, data.shape[0], self.batch_size):\n img = normalize_image(data[i : min(data.shape[0], i + self.batch_size), :])\n torch_img = ptu.from_numpy(img)\n (\n reconstructions,\n obs_distribution_params,\n latent_distribution_params,\n ) = self.model(torch_img)\n\n priority_function_kwargs[\"sampling_method\"] = \"true_prior_sampling\"\n log_p, log_q, log_d = compute_log_p_log_q_log_d(\n model, img, **priority_function_kwargs\n )\n log_prob_prior = log_d.mean()\n\n priority_function_kwargs[\"sampling_method\"] = \"biased_sampling\"\n log_p, log_q, log_d = compute_log_p_log_q_log_d(\n model, img, **priority_function_kwargs\n )\n log_prob_biased = log_d.mean()\n\n priority_function_kwargs[\"sampling_method\"] = \"importance_sampling\"\n log_p, log_q, log_d = compute_log_p_log_q_log_d(\n model, img, **priority_function_kwargs\n )\n log_prob_importance = (log_p - log_q + log_d).mean()\n\n kle = model.kl_divergence(latent_distribution_params)\n mse = F.mse_loss(torch_img, reconstructions, reduction=\"elementwise_mean\")\n mses.append(mse.item())\n kles.append(kle.item())\n log_probs_prior.append(log_prob_prior.item())\n log_probs_biased.append(log_prob_biased.item())\n log_probs_importance.append(log_prob_importance.item())\n\n logger.record_tabular(\n \"Uniform Data Log Prob (True Prior)\", np.mean(log_probs_prior)\n )\n logger.record_tabular(\n \"Uniform Data Log Prob (Biased)\", np.mean(log_probs_biased)\n )\n logger.record_tabular(\n \"Uniform Data Log Prob (Importance)\", np.mean(log_probs_importance)\n )\n logger.record_tabular(\"Uniform Data KL\", np.mean(kles))\n logger.record_tabular(\"Uniform Data MSE\", np.mean(mses))\n\n def dump_uniform_imgs_and_reconstructions(self, dataset, epoch):\n idxs = np.random.choice(range(dataset.shape[0]), 4)\n filename = \"uniform{}.png\".format(epoch)\n imgs = []\n recons = []\n for i in idxs:\n img_np = dataset[i]\n img_torch = ptu.from_numpy(normalize_image(img_np))\n recon, *_ = self.model(img_torch.view(1, -1))\n\n img = img_torch.view(\n self.input_channels, self.imsize, self.imsize\n ).transpose(1, 2)\n rimg = recon.view(self.input_channels, self.imsize, self.imsize).transpose(\n 1, 2\n )\n imgs.append(img)\n recons.append(rimg)\n all_imgs = torch.stack(imgs + recons)\n save_file = osp.join(logger.get_snapshot_dir(), filename)\n save_image(\n all_imgs.data,\n save_file,\n nrow=4,\n )\n",
"import numpy as np\nimport torch\nfrom torch.utils.data import Dataset, Sampler\n\n# TODO: move this to more reasonable place\nfrom plaidrl.data_management.obs_dict_replay_buffer import normalize_image\n\n\nclass ImageDataset(Dataset):\n def __init__(self, images, should_normalize=True):\n super().__init__()\n self.dataset = images\n self.dataset_len = len(self.dataset)\n assert should_normalize == (images.dtype == np.uint8)\n self.should_normalize = should_normalize\n\n def __len__(self):\n return self.dataset_len\n\n def __getitem__(self, idxs):\n samples = self.dataset[idxs, :]\n if self.should_normalize:\n samples = normalize_image(samples)\n return np.float32(samples)\n\n\nclass InfiniteRandomSampler(Sampler):\n def __init__(self, data_source):\n self.data_source = data_source\n self.iter = iter(torch.randperm(len(self.data_source)).tolist())\n\n def __iter__(self):\n return self\n\n def __next__(self):\n try:\n idx = next(self.iter)\n except StopIteration:\n self.iter = iter(torch.randperm(len(self.data_source)).tolist())\n idx = next(self.iter)\n return idx\n\n def __len__(self):\n return 2 ** 62\n\n\nclass InfiniteWeightedRandomSampler(Sampler):\n def __init__(self, data_source, weights):\n assert len(data_source) == len(weights)\n assert len(weights.shape) == 1\n self.data_source = data_source\n # Always use CPU\n self._weights = torch.from_numpy(weights)\n self.iter = self._create_iterator()\n\n def update_weights(self, weights):\n self._weights = weights\n self.iter = self._create_iterator()\n\n def _create_iterator(self):\n return iter(\n torch.multinomial(\n self._weights, len(self._weights), replacement=True\n ).tolist()\n )\n\n def __iter__(self):\n return self\n\n def __next__(self):\n try:\n idx = next(self.iter)\n except StopIteration:\n self.iter = self._create_iterator()\n idx = next(self.iter)\n return idx\n\n def __len__(self):\n return 2 ** 62\n"
] | [
[
"numpy.ones",
"numpy.prod",
"numpy.clip"
],
[
"torch.clamp"
],
[
"torch.optim.Adam",
"numpy.arange",
"numpy.uint8",
"torch.nn.functional.mse_loss",
"numpy.std",
"numpy.mean",
"numpy.any",
"torch.log",
"torch.distributions.Normal",
"torch.stack",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randint"
],
[
"torch.from_numpy",
"numpy.float32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sdwivedi/LightGBM | [
"f5ec54fbaca8bd5f72cdecbf755216c6278aafe3"
] | [
"examples/python-guide/advanced_example.py"
] | [
"# coding: utf-8\nimport json\nimport lightgbm as lgb\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\n\ntry:\n import cPickle as pickle\nexcept BaseException:\n import pickle\n\nprint('Loading data...')\n# load or create your dataset\ndf_train = pd.read_csv('../binary_classification/binary.train', header=None, sep='\\t')\ndf_test = pd.read_csv('../binary_classification/binary.test', header=None, sep='\\t')\nW_train = pd.read_csv('../binary_classification/binary.train.weight', header=None)[0]\nW_test = pd.read_csv('../binary_classification/binary.test.weight', header=None)[0]\n\ny_train = df_train[0]\ny_test = df_test[0]\nX_train = df_train.drop(0, axis=1)\nX_test = df_test.drop(0, axis=1)\n\nnum_train, num_feature = X_train.shape\n\n# create dataset for lightgbm\n# if you want to re-use data, remember to set free_raw_data=False\nlgb_train = lgb.Dataset(X_train, y_train,\n weight=W_train, free_raw_data=False)\nlgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train,\n weight=W_test, free_raw_data=False)\n\n# specify your configurations as a dict\nparams = {\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': 'binary_logloss',\n 'num_leaves': 31,\n 'learning_rate': 0.05,\n 'feature_fraction': 0.9,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 5,\n 'verbose': 0\n}\n\n# generate feature names\nfeature_name = ['feature_' + str(col) for col in range(num_feature)]\n\nprint('Starting training...')\n# feature_name and categorical_feature\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n valid_sets=lgb_train, # eval training data\n feature_name=feature_name,\n categorical_feature=[21])\n\nprint('Finished first 10 rounds...')\n# check feature name\nprint('7th feature name is:', lgb_train.feature_name[6])\n\nprint('Saving model...')\n# save model to file\ngbm.save_model('model.txt')\n\nprint('Dumping model to JSON...')\n# dump model to JSON (and save to file)\nmodel_json = gbm.dump_model()\n\nwith open('model.json', 'w+') as f:\n json.dump(model_json, f, indent=4)\n\n# feature names\nprint('Feature names:', gbm.feature_name())\n\n# feature importances\nprint('Feature importances:', list(gbm.feature_importance()))\n\nprint('Loading model to predict...')\n# load model to predict\nbst = lgb.Booster(model_file='model.txt')\n# can only predict with the best iteration (or the saving iteration)\ny_pred = bst.predict(X_test)\n# eval with loaded model\nprint(\"The rmse of loaded model's prediction is:\", mean_squared_error(y_test, y_pred) ** 0.5)\n\nprint('Dumping and loading model with pickle...')\n# dump model with pickle\nwith open('model.pkl', 'wb') as fout:\n pickle.dump(gbm, fout)\n# load model with pickle to predict\nwith open('model.pkl', 'rb') as fin:\n pkl_bst = pickle.load(fin)\n# can predict with any iteration when loaded in pickle way\ny_pred = pkl_bst.predict(X_test, num_iteration=7)\n# eval with loaded model\nprint(\"The rmse of pickled model's prediction is:\", mean_squared_error(y_test, y_pred) ** 0.5)\n\n# continue training\n# init_model accepts:\n# 1. model file name\n# 2. Booster()\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n init_model='model.txt',\n valid_sets=lgb_eval)\n\nprint('Finished 10 - 20 rounds with model file...')\n\n# decay learning rates\n# learning_rates accepts:\n# 1. list/tuple with length = num_boost_round\n# 2. function(curr_iter)\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n init_model=gbm,\n learning_rates=lambda iter: 0.05 * (0.99 ** iter),\n valid_sets=lgb_eval)\n\nprint('Finished 20 - 30 rounds with decay learning rates...')\n\n# change other parameters during training\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n init_model=gbm,\n valid_sets=lgb_eval,\n callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)])\n\nprint('Finished 30 - 40 rounds with changing bagging_fraction...')\n\n\n# self-defined objective function\n# f(preds: array, train_data: Dataset) -> grad: array, hess: array\n# log likelihood loss\ndef loglikelihood(preds, train_data):\n labels = train_data.get_label()\n preds = 1. / (1. + np.exp(-preds))\n grad = preds - labels\n hess = preds * (1. - preds)\n return grad, hess\n\n\n# self-defined eval metric\n# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool\n# binary error\n# NOTE: when you do customized loss function, the default prediction value is margin\n# This may make built-in evalution metric calculate wrong results\n# For example, we are doing log likelihood loss, the prediction is score before logistic transformation\n# Keep this in mind when you use the customization\ndef binary_error(preds, train_data):\n labels = train_data.get_label()\n preds = 1. / (1. + np.exp(-preds))\n return 'error', np.mean(labels != (preds > 0.5)), False\n\n\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n init_model=gbm,\n fobj=loglikelihood,\n feval=binary_error,\n valid_sets=lgb_eval)\n\nprint('Finished 40 - 50 rounds with self-defined objective function and eval metric...')\n\n\n# another self-defined eval metric\n# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool\n# accuracy\n# NOTE: when you do customized loss function, the default prediction value is margin\n# This may make built-in evalution metric calculate wrong results\n# For example, we are doing log likelihood loss, the prediction is score before logistic transformation\n# Keep this in mind when you use the customization\ndef accuracy(preds, train_data):\n labels = train_data.get_label()\n preds = 1. / (1. + np.exp(-preds))\n return 'accuracy', np.mean(labels == (preds > 0.5)), True\n\n\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n init_model=gbm,\n fobj=loglikelihood,\n feval=lambda preds, train_data: [binary_error(preds, train_data),\n accuracy(preds, train_data)],\n valid_sets=lgb_eval)\n\nprint('Finished 50 - 60 rounds with self-defined objective function '\n 'and multiple self-defined eval metrics...')\n\nprint('Starting a new training job...')\n\n\n# callback\ndef reset_metrics():\n def callback(env):\n lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train)\n if env.iteration - env.begin_iteration == 5:\n print('Add a new valid dataset at iteration 5...')\n env.model.add_valid(lgb_eval_new, 'new_valid')\n callback.before_iteration = True\n callback.order = 0\n return callback\n\n\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n valid_sets=lgb_train,\n callbacks=[reset_metrics()])\n\nprint('Finished first 10 rounds with callback function...')\n"
] | [
[
"numpy.exp",
"pandas.read_csv",
"numpy.mean",
"sklearn.metrics.mean_squared_error"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
afonchikk/Audio-Classification | [
"6acc7015ec847a64338f6300dca608a0752ba554"
] | [
"predict.py"
] | [
"from tensorflow.keras.models import load_model\nfrom clean import downsample_mono, envelope\nfrom kapre.time_frequency import STFT, Magnitude, ApplyFilterbank, MagnitudeToDecibel\nfrom sklearn.preprocessing import LabelEncoder\nimport numpy as np\nfrom glob import glob\nimport argparse\nimport os\nimport pandas as pd\nfrom tqdm import tqdm\n\n\ndef make_prediction(args):\n # load the model\n model = load_model(args.model_fn,\n custom_objects={'STFT': STFT,\n 'Magnitude': Magnitude,\n 'ApplyFilterbank': ApplyFilterbank,\n 'MagnitudeToDecibel': MagnitudeToDecibel})\n\n # find the sound data\n wav_paths = glob('{}/**'.format(args.src_dir), recursive=True)\n wav_paths = sorted([x.replace(os.sep, '/') for x in wav_paths if '.wav' in x])\n classes = sorted(os.listdir(args.src_dir))\n labels = [os.path.split(x)[0].split('/')[-1] for x in wav_paths]\n le = LabelEncoder()\n y_true = le.fit_transform(labels)\n results = []\n\n for z, wav_fn in tqdm(enumerate(wav_paths), total=len(wav_paths)):\n rate, wav = downsample_mono(wav_fn, args.sr)\n mask, env = envelope(wav, rate, threshold=args.threshold)\n clean_wav = wav[mask]\n step = int(args.sr * args.dt)\n batch = []\n\n for i in range(0, clean_wav.shape[0], step):\n sample = clean_wav[i:i + step]\n sample = sample.reshape(-1, 1)\n if sample.shape[0] < step:\n tmp = np.zeros(shape=(step, 1), dtype=np.float32)\n tmp[:sample.shape[0], :] = sample.flatten().reshape(-1, 1)\n sample = tmp\n batch.append(sample)\n X_batch = np.array(batch, dtype=np.float32)\n y_pred = model.predict(X_batch)\n y_mean = np.mean(y_pred, axis=0)\n y_pred = np.argmax(y_mean)\n real_class = os.path.dirname(wav_fn).split('/')[-1]\n print('Actual class: {}, Predicted class: {}'.format(real_class, classes[y_pred]))\n results.append(y_mean)\n\n np.save(os.path.join('logs', args.pred_fn), np.array(results))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Audio Classification Training')\n parser.add_argument('--model_fn', type=str, default='models/lstm.h5',\n help='model file to make predictions')\n parser.add_argument('--pred_fn', type=str, default='y_pred',\n help='fn to write predictions in logs dir')\n parser.add_argument('--src_dir', type=str, default='wavfiles',\n help='directory containing wavfiles to predict')\n parser.add_argument('--dt', type=float, default=1.0,\n help='time in seconds to sample audio')\n parser.add_argument('--sr', type=int, default=16000,\n help='sample rate of clean audio')\n parser.add_argument('--threshold', type=str, default=20,\n help='threshold magnitude for np.int16 dtype')\n args, _ = parser.parse_known_args()\n\n make_prediction(args)\n"
] | [
[
"tensorflow.keras.models.load_model",
"sklearn.preprocessing.LabelEncoder",
"numpy.argmax",
"numpy.mean",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
electr0de/APControllerProjectGit | [
"141ac08e716d6ac8cebe7b144b744744024d8939",
"141ac08e716d6ac8cebe7b144b744744024d8939"
] | [
"simglucose/controller/PaperController.py",
"simglucose/simulation/theta_init.py"
] | [
"from functools import partial\nfrom pprint import pprint\nimport matplotlib.pyplot as plt\n\n# import test2\nfrom simglucose.controller.base import Controller\n#from datetime import datetime, timedelta, time\nimport numpy as np\nimport math\n\npercent_value = 0.05\n\nsign = lambda x: math.copysign(1, x)\n\nnormalize_f = lambda x: (x - 39) / (600 - 39)\n\n\nclass PaperRLController(Controller):\n\n def __init__(self, a_hyper=1, a_hypo=10, current_breakfast_bolus=0.0, current_lunch_bolus=0.0,\n current_dinner_bolus=0.0, current_basal_rate=0.0, current_snack_bolus=0.0, init_state=None):\n super().__init__(init_state)\n np.random.seed(1)\n\n self.a_hyper = a_hyper\n self.hypo = a_hypo\n self.GL = normalize_f(90)\n self.GH = normalize_f(150)\n self.current_basal_rate = current_basal_rate\n self.current_breakfast_bolus = current_breakfast_bolus # bolus means IC ratio\n self.current_lunch_bolus = current_lunch_bolus\n self.current_dinner_bolus = current_dinner_bolus\n # self.current_snack_bolus = current_snack_bolus\n self.basal_theta = []\n self.bolus_theta = []\n # np.random.seed(2)\n # self.bolus_theta = np.random.rand(2).tolist()\n self.h = 0.5\n self.c_sigma = 0.05\n self.m = 0.5\n self.previous_basal_rate = 0.0\n np.random.seed(55)\n self.w = (np.random.rand(2) * 2 - 1).tolist()\n self._lambda = 0.5\n self.gamma = 0.9\n self.z = [0.0, 0.0]\n self.a = 0.5\n self.beta = 0.5\n self.beta_basal = 0.5\n self.value_factor = 10\n # self.time_array = []\n # self.theta_array_1 = []\n # self.theta_array_2 = []\n # self.bolus_time_array = []\n # self.F_1_array = []\n # self.F_2_array = []\n # plt.figure(200)\n # self.fig, self.axis = plt.subplots(4)\n # plt.show()\n # self.axis[0].set_title(\" Hyper feature for basal\")\n # self.axis[1].set_title(\" Hypo feature for basal\")\n # self.axis[2].set_title(\"Hyper theta for basal\")\n # self.axis[3].set_title(\" Hypo theta for basal\")\n\n self.previous_state_basal = None\n self.previous_state_breakfast = None\n self.previous_state_lunch = None\n self.previous_state_dinner = None\n\n def extract_features(self, array):\n M_hyper = []\n M_hypo = []\n\n for element in array:\n if element > 150:\n M_hyper.append(normalize_f(element))\n elif element < 90:\n M_hypo.append(normalize_f(element))\n\n F_hyper = sum([element - self.GH for element in M_hyper]) * 1 / len(M_hyper) if M_hyper else 0\n\n F_hypo = sum([self.GL - element for element in M_hypo]) * 1 / len(M_hypo) if M_hypo else 0\n\n return (F_hyper, F_hypo)\n\n def calculate_basal(self, previous_state, basal_array, time):\n F_hyper, F_hypo = self.extract_features(basal_array)\n F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)\n\n #\n # self.F_1_array.append(F_hyper)\n # self.F_2_array.append(F_hypo)\n # self.time_array.append(time)\n #\n # self.axis[0].plot(self.time_array, self.F_1_array)\n #\n # self.axis[1].plot(self.time_array, self.F_2_array)\n #\n # plt.pause(0.001)\n\n Ps = None\n if F_hypo == 0.0:\n Ps = 0\n elif F_hypo > 0.0 and F_hyper == 0.0:\n Ps = -0.1 * F_hypo\n elif F_hypo > 0.0 and F_hyper > 0.0:\n Ps = -0.05 * F_hypo\n\n assert Ps is not None, \"No conditions matched\"\n\n P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), True)\n\n self.previous_basal_rate = self.current_basal_rate\n\n br_change = self.m * P * self.current_basal_rate\n\n # uncomment to enable 5 % change\n # percent_value = 0\n if abs(br_change / self.current_basal_rate) > percent_value:\n self.current_basal_rate += self.current_basal_rate * percent_value * sign(br_change)\n print(\" used % changed\")\n else:\n self.current_basal_rate += br_change\n print(\" didn't use % changed\")\n return self.current_basal_rate\n\n def calculate_bolus(self, previous_state, next_state, food_counter, time):\n F_hyper, F_hypo = self.extract_features(next_state)\n\n F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)\n\n #\n # self.F_1_array.append(F_hyper)\n # self.F_2_array.append(F_hypo)\n # self.bolus_time_array.append(time)\n #\n # self.axis[0].plot(self.bolus_time_array, self.F_1_array)\n # self.axis[1].plot(self.bolus_time_array, self.F_2_array)\n\n Ps = None\n if F_hypo == 0.0:\n Ps = 0\n elif F_hypo > 0.0 and F_hyper == 0.0:\n Ps = +0.1 * F_hypo\n elif F_hypo > 0.0 and F_hyper > 0.0:\n Ps = +0.05 * F_hypo\n\n assert Ps is not None, \"No conditions matched\"\n\n P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), False, food_counter)\n\n if food_counter == 0:\n self.current_breakfast_bolus = self.update_bolus(self.current_breakfast_bolus, P)\n return self.current_breakfast_bolus\n\n if food_counter == 1:\n self.current_lunch_bolus = self.update_bolus(self.current_lunch_bolus, P)\n return self.current_lunch_bolus\n\n if food_counter == 2:\n self.current_dinner_bolus = self.update_bolus(self.current_dinner_bolus, P)\n return self.current_dinner_bolus\n # if food_counter == 3:\n # self.current_snack_bolus = self.update_bolus(self.current_snack_bolus, P)\n # return self.current_snack_bolus\n return 0.0\n\n def perform_update(self, Ps, F_old, F, coming_from, food_counter=None):\n\n if coming_from:\n theta = self.basal_theta\n previous_state = self.previous_state_basal\n else:\n theta = self.bolus_theta\n if food_counter == 0:\n previous_state = self.previous_state_breakfast\n elif food_counter == 1:\n previous_state = self.previous_state_lunch\n elif food_counter == 2:\n previous_state = self.previous_state_dinner\n else:\n return 0\n\n # theta = self.theta\n\n print(f\"theta: {theta}\")\n\n Pa = sum([element1 * element2 for element1, element2 in zip(F, theta)])\n\n Pd = self.h * Pa + (1 - self.h) * Ps\n\n sigma = self.c_sigma * (F[0] ** 2 + F[1] ** 2)\n\n Pe = Pd + np.random.normal(0, sigma)\n\n cost = 1 * F[0] + self.value_factor * F[1]\n\n if not previous_state:\n previous_state = sum(\n [((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F_old, self.w)])\n\n next_value = sum(\n [((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F, self.w)])\n d = cost + self.gamma * next_value - previous_state\n\n self.w = [element1 + self.a * d * element2 for element1, element2 in zip(self.w, self.z)]\n\n self.z = [self._lambda * element1 + element2 for element1, element2 in zip(self.z, F)]\n\n if coming_from:\n self.basal_theta = [element1 - self.beta_basal * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for\n element1, element2 in zip(self.basal_theta, F)]\n self.previous_state_basal = next_value\n else:\n self.bolus_theta = [element1 - self.beta * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for\n element1, element2 in zip(self.bolus_theta, F)]\n\n if food_counter == 0:\n self.previous_state_breakfast = next_value\n elif food_counter == 1:\n self.previous_state_lunch = next_value\n else:\n self.previous_state_dinner = next_value\n\n assert sigma > 0.0000001, \"sigma is too low\"\n # self.theta_array_1.append(self.theta[0])\n # self.theta_array_2.append(self.theta[1])\n # self.axis[2].plot(self.time_array, self.theta_array_1)\n # self.axis[3].plot(self.time_array, self.theta_array_2)\n\n return Pe\n\n def update_bolus(self, old_bolus, P):\n fusion_rate = old_bolus + self.m * P * old_bolus\n\n l = 1 if (self.current_basal_rate > self.previous_basal_rate and fusion_rate < old_bolus) or (\n self.current_basal_rate < self.previous_basal_rate and fusion_rate > old_bolus) else 0\n\n # fusion_rate = l * old_bolus + (1 - l) * fusion_rate\n bl_change = fusion_rate - old_bolus\n\n if abs(bl_change / old_bolus) > percent_value:\n old_bolus += sign(bl_change) * old_bolus * percent_value\n print(\" used % changed\")\n else:\n old_bolus += bl_change\n print(\" didn't use % changed\")\n return old_bolus\n\n# if __name__ == '__main__':\n#\n# GL = normalize_f(90)\n# GH = normalize_f(150)\n#\n# def extract_features(array):\n# M_hyper = []\n# M_hypo = []\n#\n# for element in array:\n# if element > 150:\n# M_hyper.append(normalize_f(element))\n# elif element < 90:\n# M_hypo.append(normalize_f(element))\n#\n# F_hyper = sum([element - GH for element in M_hyper]) * 1 / len(M_hyper) if M_hyper else 0\n#\n# F_hypo = sum([GL - element for element in M_hypo]) * 1 / len(M_hypo) if M_hypo else 0\n#\n# return (F_hyper, F_hypo)\n#\n# array = test2.array\n# print(extract_features(array))\n",
"from dataclasses import dataclass\nfrom pprint import pprint\n\nimport numpy as np\nfrom PyIF import te_compute as te\n\n\n@dataclass\nclass ThetaInit:\n def __init__(self, u2ss, BW, TDI):\n # constants\n self.u2ss = u2ss\n self.BW = BW\n self.TDI = TDI[0]\n self.aIOB = 5.0\n\n self.d = int(21 / 3)\n\n self.Wh = 0.1\n self.Wl = -0.2\n\n # signals\n self.g_sig = []\n\n def send_glucose(self, g):\n self.g_sig.append(g)\n\n def calculate_theta(self):\n g_sig = np.array(self.g_sig)\n\n basal = self._calc_basal()\n u0 = self._calc_u0(basal)\n IOBbasal = self._calc_IOBbasal(u0)\n IOB_TDI = self._calc_IOB_TDI()\n\n dg_sig = self._calc_dg()\n d2g_sig = self._calc_d2g(dg_sig)\n IOB_max = self._calc_IOBmax(IOBbasal, IOB_TDI, g_sig, dg_sig, d2g_sig)\n IA = IOB_max + basal\n\n g_sig = g_sig[self.d:]\n IA = IA[:len(g_sig)]\n\n TE = te.te_compute(IA, g_sig, k=1, embedding=1, safetyCheck=False, GPU=False)\n return self.Wh / TE, self.Wl / TE\n\n def _calc_basal(self):\n return self.u2ss * self.BW / 6000 * 60\n\n def _calc_u0(self, basal):\n if basal >= 1.25:\n return 0.85 * basal\n if self.g_sig[0] >= 100:\n return 1 * basal\n if self.g_sig[0] < 100:\n return 0.75 * basal\n\n raise Exception(\"no conditions matched\")\n\n def _calc_IOBbasal(self, u0):\n return self.aIOB * u0\n\n def _calc_IOB_TDI(self):\n if self.TDI <= 25:\n return 0.11 * self.TDI\n if 25 < self.TDI <= 35:\n return 0.125 * self.TDI\n if 35 < self.TDI <= 45:\n return 0.12 * self.TDI\n if 45 < self.TDI <= 55:\n return 0.175 * self.TDI\n if 55 < self.TDI:\n return 0.2 * self.TDI\n\n raise Exception(\"no conditions matched\")\n\n def _calc_dg(self):\n return np.diff(self.g_sig) / 3\n\n def _calc_d2g(self, dg_sig):\n return np.diff(dg_sig) / 3\n\n def _calc_IOBmax(self, IOBbasal, IOB_TDI, g_sig, dg_sig, d2g_sig):\n g_sig = g_sig[2:]\n dg_sig = dg_sig[1:]\n\n def calc(g, dg, ddg):\n if g < 125:\n return 1.10 * IOBbasal\n\n if 150 <= g and dg > 0.25 and ddg > 0.035:\n return max(IOB_TDI, 2.5 * IOBbasal)\n\n if 175 <= g and dg > 0.35 and ddg > 0.035:\n return max(IOB_TDI, 3.5 * IOBbasal)\n\n if 200 <= g and dg > -0.05:\n return max(IOB_TDI, 3.5 * IOBbasal)\n\n if 200 <= g and dg > 0.15:\n return max(IOB_TDI, 4.5 * IOBbasal)\n\n if 200 <= g and dg > 0.3:\n return max(IOB_TDI, 6.5 * IOBbasal)\n\n if self.TDI < 30:\n return 0.95 * IOBbasal\n\n if 125 <= g:\n return 1.35 * IOBbasal\n\n raise Exception(\"no conditions matched\")\n\n return np.array([calc(g, dg, ddg) for g, dg, ddg in zip(g_sig, dg_sig, d2g_sig)])\n"
] | [
[
"numpy.random.normal",
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.array",
"numpy.diff"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
0Miquel/LIIF-temporal | [
"b992cb87cb9bdeba6d4c9bc3960b36ba52a1ba75"
] | [
"models/rdn.py"
] | [
"# Residual Dense Network for Image Super-Resolution\r\n# https://arxiv.org/abs/1802.08797\r\n# modified from: https://github.com/thstkdgus35/EDSR-PyTorch\r\n\r\nfrom argparse import Namespace\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nfrom models import register\r\n\r\n\r\nclass RDB_Conv(nn.Module):\r\n def __init__(self, inChannels, growRate, kSize=3):\r\n super(RDB_Conv, self).__init__()\r\n Cin = inChannels\r\n G = growRate\r\n self.conv = nn.Sequential(*[\r\n #nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1),\r\n nn.Conv3d(Cin, G, kSize, padding=(kSize - 1) // 2, stride=1),\r\n nn.ReLU()\r\n ])\r\n\r\n def forward(self, x):\r\n out = self.conv(x)\r\n return torch.cat((x, out), 1)\r\n\r\nclass RDB(nn.Module):\r\n def __init__(self, growRate0, growRate, nConvLayers, kSize=3):\r\n super(RDB, self).__init__()\r\n G0 = growRate0\r\n G = growRate\r\n C = nConvLayers\r\n\r\n convs = []\r\n for c in range(C):\r\n convs.append(RDB_Conv(G0 + c*G, G))\r\n self.convs = nn.Sequential(*convs)\r\n\r\n # Local Feature Fusion\r\n self.LFF = nn.Conv3d(G0 + C * G, G0, 1, padding=0, stride=1)\r\n #self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1)\r\n\r\n def forward(self, x):\r\n return self.LFF(self.convs(x)) + x\r\n\r\nclass RDN(nn.Module):\r\n def __init__(self, args):\r\n super(RDN, self).__init__()\r\n self.args = args\r\n r = args.scale[0]\r\n G0 = args.G0\r\n kSize = args.RDNkSize\r\n\r\n # number of RDB blocks, conv layers, out channels\r\n self.D, C, G = {\r\n 'A': (20, 6, 32),\r\n 'B': (16, 8, 64),\r\n }[args.RDNconfig]\r\n\r\n # Shallow feature extraction net\r\n #self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)\r\n #self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)\r\n self.SFENet1 = nn.Conv3d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)\r\n self.SFENet2 = nn.Conv3d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)\r\n\r\n # Redidual dense blocks and dense feature fusion\r\n self.RDBs = nn.ModuleList()\r\n for i in range(self.D):\r\n self.RDBs.append(\r\n RDB(growRate0 = G0, growRate = G, nConvLayers = C)\r\n )\r\n\r\n # Global Feature Fusion\r\n self.GFF = nn.Sequential(*[\r\n #nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1),\r\n #nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)\r\n nn.Conv3d(self.D * G0, G0, 1, padding=0, stride=1),\r\n nn.Conv3d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)\r\n ])\r\n\r\n if args.no_upsampling:\r\n self.out_dim = G0\r\n else:\r\n self.out_dim = args.n_colors\r\n # Up-sampling net\r\n if r == 2 or r == 3:\r\n self.UPNet = nn.Sequential(*[\r\n nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1),\r\n nn.PixelShuffle(r),\r\n nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)\r\n ])\r\n elif r == 4:\r\n self.UPNet = nn.Sequential(*[\r\n nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1),\r\n nn.PixelShuffle(2),\r\n nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),\r\n nn.PixelShuffle(2),\r\n nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)\r\n ])\r\n else:\r\n raise ValueError(\"scale must be 2 or 3 or 4.\")\r\n\r\n def forward(self, x):\r\n f__1 = self.SFENet1(x)\r\n x = self.SFENet2(f__1)\r\n\r\n RDBs_out = []\r\n for i in range(self.D):\r\n x = self.RDBs[i](x)\r\n RDBs_out.append(x)\r\n\r\n x = self.GFF(torch.cat(RDBs_out,1))\r\n x += f__1\r\n\r\n if self.args.no_upsampling:\r\n return x\r\n else:\r\n return self.UPNet(x)\r\n\r\n\r\n@register('rdn')\r\ndef make_rdn(G0=64, RDNkSize=3, RDNconfig='B',\r\n scale=2, no_upsampling=False):\r\n args = Namespace()\r\n args.G0 = G0\r\n args.RDNkSize = RDNkSize\r\n args.RDNconfig = RDNconfig\r\n\r\n args.scale = [scale]\r\n args.no_upsampling = no_upsampling\r\n\r\n args.n_colors = 3\r\n return RDN(args)\r\n"
] | [
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.PixelShuffle",
"torch.nn.Conv3d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danagi/tianshou | [
"c97aa4065ee8464bd5897bb86f1f81abd8e2cff9",
"c97aa4065ee8464bd5897bb86f1f81abd8e2cff9"
] | [
"tianshou/policy/modelfree/discrete_sac.py",
"examples/mujoco/point_maze_td3.py"
] | [
"import torch\nimport numpy as np\nfrom torch.distributions import Categorical\nfrom typing import Any, Dict, Tuple, Union, Optional\n\nfrom tianshou.policy import SACPolicy\nfrom tianshou.data import Batch, ReplayBuffer, to_torch\n\n\nclass DiscreteSACPolicy(SACPolicy):\n \"\"\"Implementation of SAC for Discrete Action Settings. arXiv:1910.07207.\n\n :param torch.nn.Module actor: the actor network following the rules in\n :class:`~tianshou.policy.BasePolicy`. (s -> logits)\n :param torch.optim.Optimizer actor_optim: the optimizer for actor network.\n :param torch.nn.Module critic1: the first critic network. (s -> Q(s))\n :param torch.optim.Optimizer critic1_optim: the optimizer for the first\n critic network.\n :param torch.nn.Module critic2: the second critic network. (s -> Q(s))\n :param torch.optim.Optimizer critic2_optim: the optimizer for the second\n critic network.\n :param float tau: param for soft update of the target network, defaults to\n 0.005.\n :param float gamma: discount factor, in [0, 1], defaults to 0.99.\n :param (float, torch.Tensor, torch.optim.Optimizer) or float alpha: entropy\n regularization coefficient, default to 0.2.\n If a tuple (target_entropy, log_alpha, alpha_optim) is provided, then\n alpha is automatatically tuned.\n :param bool reward_normalization: normalize the reward to Normal(0, 1),\n defaults to ``False``.\n :param bool ignore_done: ignore the done flag while training the policy,\n defaults to ``False``.\n\n .. seealso::\n\n Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed\n explanation.\n \"\"\"\n\n def __init__(\n self,\n actor: torch.nn.Module,\n actor_optim: torch.optim.Optimizer,\n critic1: torch.nn.Module,\n critic1_optim: torch.optim.Optimizer,\n critic2: torch.nn.Module,\n critic2_optim: torch.optim.Optimizer,\n tau: float = 0.005,\n gamma: float = 0.99,\n alpha: Union[\n float, Tuple[float, torch.Tensor, torch.optim.Optimizer]\n ] = 0.2,\n reward_normalization: bool = False,\n ignore_done: bool = False,\n estimation_step: int = 1,\n **kwargs: Any,\n ) -> None:\n super().__init__(actor, actor_optim, critic1, critic1_optim, critic2,\n critic2_optim, (-np.inf, np.inf), tau, gamma, alpha,\n reward_normalization, ignore_done, estimation_step,\n **kwargs)\n self._alpha: Union[float, torch.Tensor]\n\n def forward( # type: ignore\n self,\n batch: Batch,\n state: Optional[Union[dict, Batch, np.ndarray]] = None,\n input: str = \"obs\",\n **kwargs: Any,\n ) -> Batch:\n obs = batch[input]\n logits, h = self.actor(obs, state=state, info=batch.info)\n dist = Categorical(logits=logits)\n act = dist.sample()\n return Batch(logits=logits, act=act, state=h, dist=dist)\n\n def _target_q(\n self, buffer: ReplayBuffer, indice: np.ndarray\n ) -> torch.Tensor:\n batch = buffer[indice] # batch.obs: s_{t+n}\n with torch.no_grad():\n obs_next_result = self(batch, input=\"obs_next\")\n dist = obs_next_result.dist\n target_q = dist.probs * torch.min(\n self.critic1_old(batch.obs_next),\n self.critic2_old(batch.obs_next),\n )\n target_q = target_q.sum(dim=-1) + self._alpha * dist.entropy()\n return target_q\n\n def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, float]:\n weight = batch.pop(\"weight\", 1.0)\n target_q = batch.returns.flatten()\n act = to_torch(\n batch.act[:, np.newaxis], device=target_q.device, dtype=torch.long)\n\n # critic 1\n current_q1 = self.critic1(batch.obs).gather(1, act).flatten()\n td1 = current_q1 - target_q\n critic1_loss = (td1.pow(2) * weight).mean()\n\n self.critic1_optim.zero_grad()\n critic1_loss.backward()\n self.critic1_optim.step()\n\n # critic 2\n current_q2 = self.critic2(batch.obs).gather(1, act).flatten()\n td2 = current_q2 - target_q\n critic2_loss = (td2.pow(2) * weight).mean()\n\n self.critic2_optim.zero_grad()\n critic2_loss.backward()\n self.critic2_optim.step()\n batch.weight = (td1 + td2) / 2.0 # prio-buffer\n\n # actor\n dist = self(batch).dist\n entropy = dist.entropy()\n with torch.no_grad():\n current_q1a = self.critic1(batch.obs)\n current_q2a = self.critic2(batch.obs)\n q = torch.min(current_q1a, current_q2a)\n actor_loss = -(self._alpha * entropy\n + (dist.probs * q).sum(dim=-1)).mean()\n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n\n if self._is_auto_alpha:\n log_prob = -entropy.detach() + self._target_entropy\n alpha_loss = -(self._log_alpha * log_prob).mean()\n self._alpha_optim.zero_grad()\n alpha_loss.backward()\n self._alpha_optim.step()\n self._alpha = self._log_alpha.detach().exp()\n\n self.sync_weight()\n\n result = {\n \"loss/actor\": actor_loss.item(),\n \"loss/critic1\": critic1_loss.item(),\n \"loss/critic2\": critic2_loss.item(),\n }\n if self._is_auto_alpha:\n result[\"loss/alpha\"] = alpha_loss.item()\n result[\"alpha\"] = self._alpha.item() # type: ignore\n\n return result\n",
"import gym\nimport torch\nimport pprint\nimport argparse\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom tianshou.policy import TD3Policy\nfrom tianshou.utils.net.common import Net\nfrom tianshou.env import SubprocVectorEnv\nfrom tianshou.exploration import GaussianNoise\nfrom tianshou.trainer import offpolicy_trainer\nfrom tianshou.data import Collector, ReplayBuffer\nfrom tianshou.utils.net.continuous import Actor, Critic\n\nfrom mujoco.register import reg\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--task', type=str, default='PointMaze-v1')\n parser.add_argument('--seed', type=int, default=1626)\n parser.add_argument('--buffer-size', type=int, default=20000)\n parser.add_argument('--actor-lr', type=float, default=3e-5)\n parser.add_argument('--critic-lr', type=float, default=1e-4)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--tau', type=float, default=0.005)\n parser.add_argument('--exploration-noise', type=float, default=0.1)\n parser.add_argument('--policy-noise', type=float, default=0.2)\n parser.add_argument('--noise-clip', type=float, default=0.5)\n parser.add_argument('--update-actor-freq', type=int, default=2)\n parser.add_argument('--epoch', type=int, default=100)\n parser.add_argument('--step-per-epoch', type=int, default=2400)\n parser.add_argument('--collect-per-step', type=int, default=10)\n parser.add_argument('--batch-size', type=int, default=128)\n parser.add_argument('--layer-num', type=int, default=1)\n parser.add_argument('--training-num', type=int, default=8)\n parser.add_argument('--test-num', type=int, default=100)\n parser.add_argument('--logdir', type=str, default='log')\n parser.add_argument('--render', type=float, default=0.)\n parser.add_argument(\n '--device', type=str,\n default='cuda' if torch.cuda.is_available() else 'cpu')\n return parser.parse_args()\n\n\ndef test_td3(args=get_args()):\n reg()\n env = gym.make(args.task)\n args.state_shape = env.observation_space.shape or env.observation_space.n\n args.action_shape = env.action_space.shape or env.action_space.n\n args.max_action = env.action_space.high[0]\n # train_envs = gym.make(args.task)\n train_envs = SubprocVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.training_num)])\n # test_envs = gym.make(args.task)\n test_envs = SubprocVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.test_num)])\n # seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n train_envs.seed(args.seed)\n test_envs.seed(args.seed)\n # model\n net = Net(args.layer_num, args.state_shape, device=args.device)\n actor = Actor(\n net, args.action_shape,\n args.max_action, args.device\n ).to(args.device)\n actor_optim = torch.optim.Adam(actor.parameters(), lr=args.actor_lr)\n net = Net(args.layer_num, args.state_shape,\n args.action_shape, concat=True, device=args.device)\n critic1 = Critic(net, args.device).to(args.device)\n critic1_optim = torch.optim.Adam(critic1.parameters(), lr=args.critic_lr)\n critic2 = Critic(net, args.device).to(args.device)\n critic2_optim = torch.optim.Adam(critic2.parameters(), lr=args.critic_lr)\n policy = TD3Policy(\n actor, actor_optim, critic1, critic1_optim, critic2, critic2_optim,\n action_range=[env.action_space.low[0], env.action_space.high[0]],\n tau=args.tau, gamma=args.gamma,\n exploration_noise=GaussianNoise(sigma=args.exploration_noise),\n policy_noise=args.policy_noise,\n update_actor_freq=args.update_actor_freq,\n noise_clip=args.noise_clip,\n reward_normalization=True, ignore_done=True)\n # collector\n train_collector = Collector(\n policy, train_envs, ReplayBuffer(args.buffer_size))\n test_collector = Collector(policy, test_envs)\n # train_collector.collect(n_step=args.buffer_size)\n # log\n writer = SummaryWriter(args.logdir + '/' + 'td3')\n\n def stop_fn(mean_rewards):\n if env.spec.reward_threshold:\n return mean_rewards >= env.spec.reward_threshold\n else:\n return False\n\n # trainer\n result = offpolicy_trainer(\n policy, train_collector, test_collector, args.epoch,\n args.step_per_epoch, args.collect_per_step, args.test_num,\n args.batch_size, stop_fn=stop_fn, writer=writer)\n assert stop_fn(result['best_reward'])\n if __name__ == '__main__':\n pprint.pprint(result)\n # Let's watch its performance!\n policy.eval()\n test_envs.seed(args.seed)\n test_collector.reset()\n result = test_collector.collect(n_episode=[1] * args.test_num,\n render=args.render)\n print(f'Final reward: {result[\"rew\"]}, length: {result[\"len\"]}')\n\n\nif __name__ == '__main__':\n test_td3()\n"
] | [
[
"torch.min",
"torch.distributions.Categorical",
"torch.no_grad"
],
[
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mayanks888/second.pytorch | [
"02d37885a543ee46516648dcab7db8f5d677a179",
"02d37885a543ee46516648dcab7db8f5d677a179",
"02d37885a543ee46516648dcab7db8f5d677a179"
] | [
"second/mayank_scripts/infer_ros_melodic_pretained_same_frame.py",
"second/mayank_scripts/infer_ros2.py",
"second/pytorch/models/voxelnet_mayank.py"
] | [
"#!/usr/bin/env python\n# ROS node libs\n\nimport time\n\nimport numpy as np\nimport rospy\nimport torch\n# from geometry_msgs.msg import Quaternion, Pose, Point, Vector3\nfrom pyquaternion import Quaternion\nfrom google.protobuf import text_format\nfrom sensor_msgs.msg import PointCloud2\nfrom std_msgs.msg import Header, ColorRGBA\n# from cv_bridge import CvBridge, CvBridgeError\nfrom visualization_msgs.msg import Marker, MarkerArray\n\nfrom second.protos import pipeline_pb2\n# from second.utils import simplevis\nfrom second.pytorch.train import build_network\nfrom second.utils import config_tool\nfrom std_msgs.msg import Int16, Float32MultiArray\nfrom jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray\n# import ros_numpy\n\n\n# GPU settings: Select GPUs to use. Coment it to let the system decide\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nclass ros_tensorflow_obj():\n def __init__(self):\n # ## Initial msg\n rospy.loginfo(' ## Starting ROS interface ##')\n # ## Load a (frozen) Tensorflow model into memory.\n print(\"ready to process----------------------------------------------------------\")\n ####################################################################################333\n # config_path = \"../configs/nuscenes/all.pp.largea.config\"\n # config_path = \"/home/mayank_sati/codebase/python/lidar/second.pytorch/second/configs/pointpillars/car/xyres_28.config\"\n config_path = \"/home/mayank_sati/codebase/python/lidar/second.pytorch/second/configs/pointpillars/car/xyres_24.config\"\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n # config_tool.change_detection_range(model_cfg, [-50, -50, 50, 50])\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # ckpt_path = \"../checkpoint/voxelnet-140670.tckpt\"\n ckpt_path=\"/home/mayank_sati/Downloads/pretrained_models_v1.5/pp_model_for_nuscenes_pretrain/voxelnet-296960.tckpt\"\n net = build_network(model_cfg).to(device).eval()\n net.load_state_dict(torch.load(ckpt_path))\n target_assigner = net.target_assigner\n self.voxel_generator = net.voxel_generator\n\n class_names = target_assigner.classes\n\n grid_size = self.voxel_generator.grid_size\n feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(model_cfg)\n feature_map_size = [*feature_map_size, 1][::-1]\n anchors = target_assigner.generate_anchors(feature_map_size)[\"anchors\"]\n anchors = torch.tensor(anchors, dtype=torch.float32, device=device)\n anchors = anchors.view(1, -1, 7)\n # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n feature_map_size = [1, 50, 50]\n ret = target_assigner.generate_anchors(feature_map_size)\n class_names = target_assigner.classes\n anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)\n anchors_list = []\n for k, v in anchors_dict.items():\n anchors_list.append(v[\"anchors\"])\n\n # anchors = ret[\"anchors\"]\n anchors = np.concatenate(anchors_list, axis=0)\n anchors = anchors.reshape([-1, target_assigner.box_ndim])\n assert np.allclose(anchors, ret[\"anchors\"].reshape(-1, target_assigner.box_ndim))\n matched_thresholds = ret[\"matched_thresholds\"]\n unmatched_thresholds = ret[\"unmatched_thresholds\"]\n # anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:, [0, 1, 3, 4, 6]])\n anchors_bv = 2\n anchor_cache = {\n \"anchors\": anchors,\n \"anchors_bv\": anchors_bv,\n \"matched_thresholds\": matched_thresholds,\n \"unmatched_thresholds\": unmatched_thresholds,\n \"anchors_dict\": anchors_dict,\n }\n anchors = torch.tensor(anchors, dtype=torch.float32, device=device)\n self.anchors = anchors.view(1, -1, 7)\n self.net = net\n self.device = device\n ##########################################################################################\n # self.marker_publisher = rospy.Publisher('visualization_marker', MarkerArray, queue_size=5)\n self.pcl_publisher = rospy.Publisher('result_pcl', PointCloud2, queue_size=1)\n ############\n # [print(n.name) for n in tf.get_default_graph().as_graph_def().node]\n # ROS environment setup\n # ## Define subscribers\n self.subscribers_def()\n # ## Define publishers\n self.publishers_def()\n self.now = rospy.Time.now()\n\n # Define subscribers\n def subscribers_def(self):\n # subs_topic = '/kitti/velo/pointcloud'\n #subs_topic = '/apollo/sensor/velodyne64/compensator/PointCloud2'\n # subs_topic = '/velodyne64_points'\n\n # subs_topic = '/apollo/sensor/velodyne64/PointCloud2'\n # subs_topic = '/points_raw'\n # subs_topic = '/livox/lidar'\n # subs_topic = '/apollo/sensor/velodyne32C/compensator/PointCloud2'\n subs_topic = '/lidar_top'\n self._sub = rospy.Subscriber(subs_topic, PointCloud2, self.lidar_callback, queue_size=10, buff_size=2 ** 24)\n # mydata = rospy.Subscriber( subs_topic , PointCloud2, self.lidar_callback, queue_size=1, buff_size=2**24)\n # print(mydata)\n\n # self._sub = rospy.Subscriber( subs_topic , Image, self.lidar_callback, queue_size=1, buff_size=100)\n\n # Define publishers\n def publishers_def(self):\n self._pub = rospy.Publisher('pc_bbox_topic', Float32MultiArray, queue_size=1)\n self.pub_arr_bbox = rospy.Publisher(\"Detections\", BoundingBoxArray, queue_size=1)\n\n # Camera image callback\n def lidar_callback(self, point_cl_msg):\n arr_bbox = BoundingBoxArray()\n ############################################################################3\n # lidar = np.fromstring(point_cl_msg.data, dtype=np.float32)\n # points = lidar.reshape(-1, 4)\n # print('gotit\"')\n # pc = ros_numpy.numpify(point_cl_msg)\n # points = np.zeros((pc.shape[0], 4))\n # points[:, 0] = pc['x']\n # points[:, 1] = pc['y']\n # points[:, 2] = pc['z']\n # points[:, 3] = pc['intensity']\n # points[:, 3] /= 255\n #########################################################333\n lidar = np.fromstring(point_cl_msg.data, dtype=np.float32)\n points = lidar.reshape(-1, 4)\n points[:, 3] /= 255\n\n #######################################################################\n res = self.voxel_generator.generate(points, max_voxels=30000)\n voxels = res[\"voxels\"]\n coords = res[\"coordinates\"]\n num_points = res[\"num_points_per_voxel\"]\n num_voxels = np.array([voxels.shape[0]], dtype=np.int64)\n # print(\"voxel_generator_time\",(time.time() - t)*1000)\n ###############################################################\n # print(voxels.shape)\n # add batch idx to coords\n coords = np.pad(coords, ((0, 0), (1, 0)), mode='constant', constant_values=0)\n voxels = torch.tensor(voxels, dtype=torch.float32, device=self.device)\n coords = torch.tensor(coords, dtype=torch.int32, device=self.device)\n num_points = torch.tensor(num_points, dtype=torch.int32, device=self.device)\n # print(\"conversion time\",(time.time() - t)*1000)\n example = {\"anchors\": self.anchors, \"voxels\": voxels, \"num_points\": num_points, \"coordinates\": coords, }\n t2 = time.time()\n pred = self.net(example)[0]\n # print(pred)\n # print(\"prediction\",(time.time() - t2)*1000)\n # print(\"total_time\",(time.time() - t)*1000)\n boxes_lidar = pred[\"box3d_lidar\"].detach().cpu().numpy()\n scores_lidar = pred[\"scores\"].detach().cpu().numpy()\n labels_lidar = pred[\"label_preds\"].detach().cpu().numpy()\n ##############################3333\n threshold = 0.2\n keep = np.where((scores_lidar >= threshold))[0]\n scores_lidar = scores_lidar[keep]\n print(scores_lidar)\n boxes_lidar = boxes_lidar[keep]\n labels_lidar = labels_lidar[keep]\n # sco\n # print(scores_lidar)\n ################################################################################\n # self.show_text_in_rviz_mullti_cube(boxes_lidar,point_cl_msg)\n # self.show_text_in_rviz_mullti_sphere(boxes_lidar,point_cl_msg)\n ##################################################################################\n # apollo integration\n # numboxes = np.squeeze(scores_lidar)\n numboxes = len(scores_lidar)\n tl_bbox = Float32MultiArray()\n iLen = boxes_lidar.shape[0]\n lidar_bbox = Float32MultiArray()\n print('Processing no of object:', iLen)\n\n if (numboxes) >= 1:\n tmp = -np.ones(10 * (numboxes) + 1)\n for i in range(0, int(numboxes)):\n try:\n score = float((scores_lidar)[i])\n if (boxes_lidar.shape[0]) == 1:\n bboxes = [float(v) for v in (boxes_lidar)[i]]\n else:\n bboxes = [float(v) for v in np.squeeze(boxes_lidar)[i]]\n tmp[0] = numboxes\n tmp[10 * i + 1] = score\n tmp[10 * i + 2] = bboxes[0]\n tmp[10 * i + 3] = bboxes[1]\n tmp[10 * i + 4] = bboxes[2]\n tmp[10 * i + 5] = bboxes[3]\n tmp[10 * i + 6] = bboxes[4]\n tmp[10 * i + 7] = bboxes[5]\n tmp[10 * i + 8] = bboxes[6]\n tmp[10 * i + 9] = 0\n tmp[10 * i + 10] = 0\n bbox = BoundingBox()\n # bbox.header.frame_id = point_cl_msg.header.frame_id\n # bbox.header.frame_id = 'livox_frame'\n bbox.header.frame_id = 'lidar_top'\n q = Quaternion(axis=(0, 0, 1), radians=-1.0 * float(boxes_lidar[i][6]))\n bbox.pose.orientation.x = q.x\n bbox.pose.orientation.y = q.y\n bbox.pose.orientation.z = q.z\n bbox.pose.orientation.w = q.w\n bbox.pose.position.x = float(boxes_lidar[i][0])\n bbox.pose.position.y = float(boxes_lidar[i][1])\n bbox.pose.position.z = float(boxes_lidar[i][2])\n bbox.dimensions.x = float(boxes_lidar[i][3])\n bbox.dimensions.y = float(boxes_lidar[i][4])\n bbox.dimensions.z = float(boxes_lidar[i][5])\n arr_bbox.boxes.append(bbox)\n\n except:\n print(\"I am here\")\n # here data for publishing\n tl_bbox.data = tmp\n self._pub.publish(tl_bbox)\n arr_bbox.header.frame_id = point_cl_msg.header.frame_id\n self.pub_arr_bbox.publish(arr_bbox)\n\n point_cl_msg.header.frame_id = point_cl_msg.header.frame_id\n self.pcl_publisher.publish(point_cl_msg)\n arr_bbox.boxes.clear()\n\n\ndef spin(self):\n rospy.spin()\n\n\ndef main():\n rospy.init_node('LIDAR_NODE', anonymous=True)\n tf_ob = ros_tensorflow_obj()\n\n # tf_ob.subscribers_def\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python\n# ROS node libs\n\nimport time\n\nimport numpy as np\nimport rospy\nimport torch\n# from geometry_msgs.msg import Quaternion, Pose, Point, Vector3\nfrom pyquaternion import Quaternion\nfrom google.protobuf import text_format\nfrom sensor_msgs.msg import PointCloud2\nfrom std_msgs.msg import Header, ColorRGBA\n# from cv_bridge import CvBridge, CvBridgeError\nfrom visualization_msgs.msg import Marker, MarkerArray\n\nfrom second.protos import pipeline_pb2\n# from second.utils import simplevis\nfrom second.pytorch.train import build_network\nfrom second.utils import config_tool\nfrom std_msgs.msg import Int16, Float32MultiArray\nfrom jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray\n# import ros_numpy\n\n\n# GPU settings: Select GPUs to use. Coment it to let the system decide\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nclass ros_tensorflow_obj():\n def __init__(self):\n # ## Initial msg\n rospy.loginfo(' ## Starting ROS interface ##')\n # ## Load a (frozen) Tensorflow model into memory.\n print(\"ready to process----------------------------------------------------------\")\n ####################################################################################333\n config_path = \"../configs/nuscenes/all.pp.largea.config\"\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n # config_tool.change_detection_range(model_cfg, [-50, -50, 50, 50])\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n ckpt_path = \"../checkpoint/voxelnet-140670.tckpt\"\n net = build_network(model_cfg).to(device).eval()\n net.load_state_dict(torch.load(ckpt_path))\n target_assigner = net.target_assigner\n self.voxel_generator = net.voxel_generator\n\n class_names = target_assigner.classes\n\n grid_size = self.voxel_generator.grid_size\n feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(model_cfg)\n feature_map_size = [*feature_map_size, 1][::-1]\n anchors = target_assigner.generate_anchors(feature_map_size)[\"anchors\"]\n anchors = torch.tensor(anchors, dtype=torch.float32, device=device)\n anchors = anchors.view(1, -1, 7)\n # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n feature_map_size = [1, 50, 50]\n ret = target_assigner.generate_anchors(feature_map_size)\n class_names = target_assigner.classes\n anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)\n anchors_list = []\n for k, v in anchors_dict.items():\n anchors_list.append(v[\"anchors\"])\n\n # anchors = ret[\"anchors\"]\n anchors = np.concatenate(anchors_list, axis=0)\n anchors = anchors.reshape([-1, target_assigner.box_ndim])\n assert np.allclose(anchors, ret[\"anchors\"].reshape(-1, target_assigner.box_ndim))\n matched_thresholds = ret[\"matched_thresholds\"]\n unmatched_thresholds = ret[\"unmatched_thresholds\"]\n # anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:, [0, 1, 3, 4, 6]])\n anchors_bv = 2\n anchor_cache = {\n \"anchors\": anchors,\n \"anchors_bv\": anchors_bv,\n \"matched_thresholds\": matched_thresholds,\n \"unmatched_thresholds\": unmatched_thresholds,\n \"anchors_dict\": anchors_dict,\n }\n anchors = torch.tensor(anchors, dtype=torch.float32, device=device)\n self.anchors = anchors.view(1, -1, 7)\n self.net = net\n self.device = device\n ##########################################################################################\n # self.marker_publisher = rospy.Publisher('visualization_marker', MarkerArray, queue_size=5)\n # self.pcl_publisher = rospy.Publisher('result_pcl', PointCloud2, queue_size=1)\n ############\n # [print(n.name) for n in tf.get_default_graph().as_graph_def().node]\n # ROS environment setup\n # ## Define subscribers\n self.subscribers_def()\n # ## Define publishers\n self.publishers_def()\n self.now = rospy.Time.now()\n\n # Define subscribers\n def subscribers_def(self):\n # subs_topic = '/kitti/velo/pointcloud'\n #subs_topic = '/apollo/sensor/velodyne64/compensator/PointCloud2'\n # subs_topic = '/velodyne64_points'\n\n # subs_topic = '/apollo/sensor/velodyne64/PointCloud2'\n # subs_topic = '/points_raw'\n # subs_topic = '/livox/lidar'\n # subs_topic = '/apollo/sensor/velodyne32C/compensator/PointCloud2'\n subs_topic = '/lidar_top'\n self._sub = rospy.Subscriber(subs_topic, PointCloud2, self.lidar_callback, queue_size=10, buff_size=2 ** 24)\n # mydata = rospy.Subscriber( subs_topic , PointCloud2, self.lidar_callback, queue_size=1, buff_size=2**24)\n # print(mydata)\n\n # self._sub = rospy.Subscriber( subs_topic , Image, self.lidar_callback, queue_size=1, buff_size=100)\n\n # Define publishers\n def publishers_def(self):\n self._pub = rospy.Publisher('pc_bbox_topic', Float32MultiArray, queue_size=1)\n self.pub_arr_bbox = rospy.Publisher(\"Detections\", BoundingBoxArray, queue_size=1)\n\n # Camera image callback\n def lidar_callback(self, point_cl_msg):\n arr_bbox = BoundingBoxArray()\n ############################################################################3\n # lidar = np.fromstring(point_cl_msg.data, dtype=np.float32)\n # points = lidar.reshape(-1, 4)\n # print('gotit\"')\n # pc = ros_numpy.numpify(point_cl_msg)\n # points = np.zeros((pc.shape[0], 4))\n # points[:, 0] = pc['x']\n # points[:, 1] = pc['y']\n # points[:, 2] = pc['z']\n # points[:, 3] = pc['intensity']\n # points[:, 3] /= 255\n #########################################################333\n lidar = np.fromstring(point_cl_msg.data, dtype=np.float32)\n points = lidar.reshape(-1, 4)\n points[:, 3] /= 255\n\n #######################################################################\n res = self.voxel_generator.generate(points, max_voxels=30000)\n voxels = res[\"voxels\"]\n coords = res[\"coordinates\"]\n num_points = res[\"num_points_per_voxel\"]\n num_voxels = np.array([voxels.shape[0]], dtype=np.int64)\n # print(\"voxel_generator_time\",(time.time() - t)*1000)\n ###############################################################\n # print(voxels.shape)\n # add batch idx to coords\n coords = np.pad(coords, ((0, 0), (1, 0)), mode='constant', constant_values=0)\n voxels = torch.tensor(voxels, dtype=torch.float32, device=self.device)\n coords = torch.tensor(coords, dtype=torch.int32, device=self.device)\n num_points = torch.tensor(num_points, dtype=torch.int32, device=self.device)\n # print(\"conversion time\",(time.time() - t)*1000)\n example = {\"anchors\": self.anchors, \"voxels\": voxels, \"num_points\": num_points, \"coordinates\": coords, }\n t2 = time.time()\n pred = self.net(example)[0]\n # print(pred)\n # print(\"prediction\",(time.time() - t2)*1000)\n # print(\"total_time\",(time.time() - t)*1000)\n boxes_lidar = pred[\"box3d_lidar\"].detach().cpu().numpy()\n scores_lidar = pred[\"scores\"].detach().cpu().numpy()\n labels_lidar = pred[\"label_preds\"].detach().cpu().numpy()\n ##############################3333\n threshold = .3\n keep = np.where((scores_lidar >= threshold))[0]\n scores_lidar = scores_lidar[keep]\n print(scores_lidar)\n boxes_lidar = boxes_lidar[keep]\n labels_lidar = labels_lidar[keep]\n # sco\n # print(scores_lidar)\n ################################################################################\n # self.show_text_in_rviz_mullti_cube(boxes_lidar,point_cl_msg)\n # self.show_text_in_rviz_mullti_sphere(boxes_lidar,point_cl_msg)\n ##################################################################################\n # apollo integration\n # numboxes = np.squeeze(scores_lidar)\n numboxes = len(scores_lidar)\n tl_bbox = Float32MultiArray()\n iLen = boxes_lidar.shape[0]\n lidar_bbox = Float32MultiArray()\n print('Processing no of object:', iLen)\n\n if (numboxes) >= 1:\n tmp = -np.ones(10 * (numboxes) + 1)\n for i in range(0, int(numboxes)):\n try:\n score = float((scores_lidar)[i])\n if (boxes_lidar.shape[0]) == 1:\n bboxes = [float(v) for v in (boxes_lidar)[i]]\n else:\n bboxes = [float(v) for v in np.squeeze(boxes_lidar)[i]]\n tmp[0] = numboxes\n tmp[10 * i + 1] = score\n tmp[10 * i + 2] = bboxes[0]\n tmp[10 * i + 3] = bboxes[1]\n tmp[10 * i + 4] = bboxes[2]\n tmp[10 * i + 5] = bboxes[3]\n tmp[10 * i + 6] = bboxes[4]\n tmp[10 * i + 7] = bboxes[5]\n tmp[10 * i + 8] = bboxes[6]\n tmp[10 * i + 9] = 0\n tmp[10 * i + 10] = 0\n bbox = BoundingBox()\n # bbox.header.frame_id = point_cl_msg.header.frame_id\n # bbox.header.frame_id = 'livox_frame'\n bbox.header.frame_id = 'lidar_top'\n q = Quaternion(axis=(0, 0, 1), radians=-1.0 * float(boxes_lidar[i][6]))\n bbox.pose.orientation.x = q.x\n bbox.pose.orientation.y = q.y\n bbox.pose.orientation.z = q.z\n bbox.pose.orientation.w = q.w\n bbox.pose.position.x = float(boxes_lidar[i][0])\n bbox.pose.position.y = float(boxes_lidar[i][1])\n bbox.pose.position.z = float(boxes_lidar[i][2])\n bbox.dimensions.x = float(boxes_lidar[i][3])\n bbox.dimensions.y = float(boxes_lidar[i][4])\n bbox.dimensions.z = float(boxes_lidar[i][5])\n arr_bbox.boxes.append(bbox)\n\n except:\n print(\"I am here\")\n # here data for publishing\n tl_bbox.data = tmp\n self._pub.publish(tl_bbox)\n arr_bbox.header.frame_id = point_cl_msg.header.frame_id\n self.pub_arr_bbox.publish(arr_bbox)\n arr_bbox.boxes.clear()\n\n\ndef spin(self):\n rospy.spin()\n\n\ndef main():\n rospy.init_node('LIDAR_NODE', anonymous=True)\n tf_ob = ros_tensorflow_obj()\n\n # tf_ob.subscribers_def\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n\n\nif __name__ == '__main__':\n main()\n",
"import time\nfrom enum import Enum\nfrom functools import reduce\nimport contextlib\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport datetime\nimport torchplus\nfrom second.pytorch.core import box_torch_ops\nfrom second.pytorch.core.losses import (WeightedSigmoidClassificationLoss,\n WeightedSmoothL1LocalizationLoss,\n WeightedSoftmaxClassificationLoss)\nfrom second.pytorch.models import middle, pointpillars, rpn, voxel_encoder\nfrom torchplus import metrics\nfrom second.pytorch.utils import torch_timer\nimport mayavi.mlab as mlab\n\ndef draw_lidar(pc, color=None, fig=None, bgcolor=(0, 0, 0), pts_scale=1, pts_mode='point', pts_color=None):\n if fig is None: fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1600, 1000))\n if color is None: color = pc[:, 2]\n mlab.points3d(pc[:, 0], pc[:, 1], pc[:, 2], color, color=pts_color, mode=pts_mode, colormap='gnuplot',\n scale_factor=pts_scale, figure=fig)\n\n # draw origin\n mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2)\n\n # draw axis\n axes = np.array([\n [2., 0., 0., 0.],\n [0., 2., 0., 0.],\n [0., 0., 2., 0.],\n ], dtype=np.float64)\n mlab.plot3d([0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0, 0), tube_radius=None, figure=fig)\n mlab.plot3d([0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1, 0), tube_radius=None, figure=fig)\n mlab.plot3d([0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0, 1), tube_radius=None, figure=fig)\n\n # draw fov (todo: update to real sensor spec.)\n fov = np.array([ # 45 degree\n [20., 20., 0., 0.],\n [20., -20., 0., 0.],\n ], dtype=np.float64)\n\n mlab.plot3d([0, fov[0, 0]], [0, fov[0, 1]], [0, fov[0, 2]], color=(1, 1, 1), tube_radius=None, line_width=1,\n figure=fig)\n mlab.plot3d([0, fov[1, 0]], [0, fov[1, 1]], [0, fov[1, 2]], color=(1, 1, 1), tube_radius=None, line_width=1,\n figure=fig)\n\n # draw square region\n TOP_Y_MIN = -20\n TOP_Y_MAX = 20\n TOP_X_MIN = 0\n TOP_X_MAX = 40\n TOP_Z_MIN = -2.0\n TOP_Z_MAX = 0.4\n\n x1 = TOP_X_MIN\n x2 = TOP_X_MAX\n y1 = TOP_Y_MIN\n y2 = TOP_Y_MAX\n mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)\n mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)\n mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)\n mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)\n\n # mlab.orientation_axes()\n mlab.view(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -2.03249991], distance=62.0, figure=fig)\n # mlab.show()\n return fig\n\ndef _get_pos_neg_loss(cls_loss, labels):\n # cls_loss: [N, num_anchors, num_class]\n # labels: [N, num_anchors]\n batch_size = cls_loss.shape[0]\n if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:\n cls_pos_loss = (labels > 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_neg_loss = (labels == 0).type_as(cls_loss) * cls_loss.view(\n batch_size, -1)\n cls_pos_loss = cls_pos_loss.sum() / batch_size\n cls_neg_loss = cls_neg_loss.sum() / batch_size\n else:\n cls_pos_loss = cls_loss[..., 1:].sum() / batch_size\n cls_neg_loss = cls_loss[..., 0].sum() / batch_size\n return cls_pos_loss, cls_neg_loss\n\nREGISTERED_NETWORK_CLASSES = {}\n\ndef register_voxelnet(cls, name=None):\n global REGISTERED_NETWORK_CLASSES\n if name is None:\n name = cls.__name__\n assert name not in REGISTERED_NETWORK_CLASSES, f\"exist class: {REGISTERED_NETWORK_CLASSES}\"\n REGISTERED_NETWORK_CLASSES[name] = cls\n return cls\n\ndef get_voxelnet_class(name):\n global REGISTERED_NETWORK_CLASSES\n assert name in REGISTERED_NETWORK_CLASSES, f\"available class: {REGISTERED_NETWORK_CLASSES}\"\n return REGISTERED_NETWORK_CLASSES[name]\n\nclass LossNormType(Enum):\n NormByNumPositives = \"norm_by_num_positives\"\n NormByNumExamples = \"norm_by_num_examples\"\n NormByNumPosNeg = \"norm_by_num_pos_neg\"\n DontNorm = \"dont_norm\"\n\n@register_voxelnet\nclass VoxelNet(nn.Module):\n def __init__(self,\n output_shape,\n num_class=2,\n num_input_features=4,\n vfe_class_name=\"VoxelFeatureExtractor\",\n vfe_num_filters=[32, 128],\n with_distance=False,\n middle_class_name=\"SparseMiddleExtractor\",\n middle_num_input_features=-1,\n middle_num_filters_d1=[64],\n middle_num_filters_d2=[64, 64],\n rpn_class_name=\"RPN\",\n rpn_num_input_features=-1,\n rpn_layer_nums=[3, 5, 5],\n rpn_layer_strides=[2, 2, 2],\n rpn_num_filters=[128, 128, 256],\n rpn_upsample_strides=[1, 2, 4],\n rpn_num_upsample_filters=[256, 256, 256],\n use_norm=True,\n use_groupnorm=False,\n num_groups=32,\n use_direction_classifier=True,\n use_sigmoid_score=False,\n encode_background_as_zeros=True,\n use_rotate_nms=True,\n multiclass_nms=False,\n nms_score_thresholds=None,\n nms_pre_max_sizes=None,\n nms_post_max_sizes=None,\n nms_iou_thresholds=None,\n target_assigner=None,\n cls_loss_weight=1.0,\n loc_loss_weight=1.0,\n pos_cls_weight=1.0,\n neg_cls_weight=1.0,\n direction_loss_weight=1.0,\n loss_norm_type=LossNormType.NormByNumPositives,\n encode_rad_error_by_sin=False,\n loc_loss_ftor=None,\n cls_loss_ftor=None,\n measure_time=False,\n voxel_generator=None,\n post_center_range=None,\n dir_offset=0.0,\n sin_error_factor=1.0,\n nms_class_agnostic=False,\n num_direction_bins=2,\n direction_limit_offset=0,\n name='voxelnet'):\n super().__init__()\n self.name = name\n self._sin_error_factor = sin_error_factor\n self._num_class = num_class\n self._use_rotate_nms = use_rotate_nms\n self._multiclass_nms = multiclass_nms\n self._nms_score_thresholds = nms_score_thresholds\n self._nms_pre_max_sizes = nms_pre_max_sizes\n self._nms_post_max_sizes = nms_post_max_sizes\n self._nms_iou_thresholds = nms_iou_thresholds\n self._use_sigmoid_score = use_sigmoid_score\n self._encode_background_as_zeros = encode_background_as_zeros\n self._use_direction_classifier = use_direction_classifier\n self._num_input_features = num_input_features\n self._box_coder = target_assigner.box_coder\n self.target_assigner = target_assigner\n self.voxel_generator = voxel_generator\n self._pos_cls_weight = pos_cls_weight\n self._neg_cls_weight = neg_cls_weight\n self._encode_rad_error_by_sin = encode_rad_error_by_sin\n self._loss_norm_type = loss_norm_type\n self._dir_loss_ftor = WeightedSoftmaxClassificationLoss()\n self._diff_loc_loss_ftor = WeightedSmoothL1LocalizationLoss()\n self._dir_offset = dir_offset\n self._loc_loss_ftor = loc_loss_ftor\n self._cls_loss_ftor = cls_loss_ftor\n self._direction_loss_weight = direction_loss_weight\n self._cls_loss_weight = cls_loss_weight\n self._loc_loss_weight = loc_loss_weight\n self._post_center_range = post_center_range or []\n self.measure_time = measure_time\n self._nms_class_agnostic = nms_class_agnostic\n self._num_direction_bins = num_direction_bins\n self._dir_limit_offset = direction_limit_offset\n self.voxel_feature_extractor = voxel_encoder.get_vfe_class(vfe_class_name)(\n num_input_features,\n use_norm,\n num_filters=vfe_num_filters,\n with_distance=with_distance,\n voxel_size=self.voxel_generator.voxel_size,\n pc_range=self.voxel_generator.point_cloud_range,\n )\n self.middle_feature_extractor = middle.get_middle_class(middle_class_name)(\n output_shape,\n use_norm,\n num_input_features=middle_num_input_features,\n num_filters_down1=middle_num_filters_d1,\n num_filters_down2=middle_num_filters_d2)\n self.rpn = rpn.get_rpn_class(rpn_class_name)(\n use_norm=True,\n num_class=num_class,\n layer_nums=rpn_layer_nums,\n layer_strides=rpn_layer_strides,\n num_filters=rpn_num_filters,\n upsample_strides=rpn_upsample_strides,\n num_upsample_filters=rpn_num_upsample_filters,\n num_input_features=rpn_num_input_features,\n num_anchor_per_loc=target_assigner.num_anchors_per_location,\n encode_background_as_zeros=encode_background_as_zeros,\n use_direction_classifier=use_direction_classifier,\n use_groupnorm=use_groupnorm,\n num_groups=num_groups,\n box_code_size=target_assigner.box_coder.code_size,\n num_direction_bins=self._num_direction_bins)\n self.rpn_acc = metrics.Accuracy(dim=-1, encode_background_as_zeros=encode_background_as_zeros)\n self.rpn_precision = metrics.Precision(dim=-1)\n self.rpn_recall = metrics.Recall(dim=-1)\n self.rpn_metrics = metrics.PrecisionRecall(\n dim=-1,\n thresholds=[0.1, 0.3, 0.5, 0.7, 0.8, 0.9, 0.95],\n use_sigmoid_score=use_sigmoid_score,\n encode_background_as_zeros=encode_background_as_zeros)\n\n self.rpn_cls_loss = metrics.Scalar()\n self.rpn_loc_loss = metrics.Scalar()\n self.rpn_total_loss = metrics.Scalar()\n self.register_buffer(\"global_step\", torch.LongTensor(1).zero_())\n\n self._time_dict = {}\n self._time_total_dict = {}\n self._time_count_dict = {}\n\n def start_timer(self, *names):\n # if not self.measure_time:\n # return\n torch.cuda.synchronize()\n for name in names:\n self._time_dict[name] = time.time()\n\n def end_timer(self, name):\n # if not self.measure_time:\n # return\n torch.cuda.synchronize()\n time_elapsed = time.time() - self._time_dict[name]\n if name not in self._time_count_dict:\n self._time_count_dict[name] = 1\n self._time_total_dict[name] = time_elapsed\n else:\n self._time_count_dict[name] += 1\n self._time_total_dict[name] += time_elapsed\n self._time_dict[name] = 0\n print(name ,\":\" ,time_elapsed*1000)\n\n def clear_timer(self):\n self._time_count_dict.clear()\n self._time_dict.clear()\n self._time_total_dict.clear()\n\n @contextlib.contextmanager\n def profiler(self):\n old_measure_time = self.measure_time\n self.measure_time = True\n yield\n self.measure_time = old_measure_time\n\n def get_avg_time_dict(self):\n ret = {}\n for name, val in self._time_total_dict.items():\n count = self._time_count_dict[name]\n ret[name] = val / max(1, count)\n return ret\n\n def update_global_step(self):\n self.global_step += 1\n\n def get_global_step(self):\n return int(self.global_step.cpu().numpy()[0])\n\n def clear_global_step(self):\n self.global_step.zero_()\n\n def loss(self, example, preds_dict):\n box_preds = preds_dict[\"box_preds\"]\n cls_preds = preds_dict[\"cls_preds\"]\n batch_size_dev = cls_preds.shape[0]\n self.start_timer(\"loss forward\")\n labels = example['labels']\n reg_targets = example['reg_targets']\n importance = example['importance']\n self.start_timer(\"prepare weight forward\")\n cls_weights, reg_weights, cared = prepare_loss_weights(\n labels,\n pos_cls_weight=self._pos_cls_weight,\n neg_cls_weight=self._neg_cls_weight,\n loss_norm_type=self._loss_norm_type,\n dtype=box_preds.dtype)\n\n cls_targets = labels * cared.type_as(labels)\n cls_targets = cls_targets.unsqueeze(-1)\n self.end_timer(\"prepare weight forward\")\n self.start_timer(\"create_loss forward\")\n loc_loss, cls_loss = create_loss(\n self._loc_loss_ftor,\n self._cls_loss_ftor,\n box_preds=box_preds,\n cls_preds=cls_preds,\n cls_targets=cls_targets,\n cls_weights=cls_weights * importance,\n reg_targets=reg_targets,\n reg_weights=reg_weights * importance,\n num_class=self._num_class,\n encode_rad_error_by_sin=self._encode_rad_error_by_sin,\n encode_background_as_zeros=self._encode_background_as_zeros,\n box_code_size=self._box_coder.code_size,\n sin_error_factor=self._sin_error_factor,\n num_direction_bins=self._num_direction_bins,\n )\n loc_loss_reduced = loc_loss.sum() / batch_size_dev\n loc_loss_reduced *= self._loc_loss_weight\n cls_pos_loss, cls_neg_loss = _get_pos_neg_loss(cls_loss, labels)\n cls_pos_loss /= self._pos_cls_weight\n cls_neg_loss /= self._neg_cls_weight\n cls_loss_reduced = cls_loss.sum() / batch_size_dev\n cls_loss_reduced *= self._cls_loss_weight\n loss = loc_loss_reduced + cls_loss_reduced\n self.end_timer(\"create_loss forward\")\n if self._use_direction_classifier:\n dir_targets = get_direction_target(\n example['anchors'],\n reg_targets,\n dir_offset=self._dir_offset,\n num_bins=self._num_direction_bins)\n dir_logits = preds_dict[\"dir_cls_preds\"].view(\n batch_size_dev, -1, self._num_direction_bins)\n weights = (labels > 0).type_as(dir_logits) * importance\n weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)\n dir_loss = self._dir_loss_ftor(\n dir_logits, dir_targets, weights=weights)\n dir_loss = dir_loss.sum() / batch_size_dev\n loss += dir_loss * self._direction_loss_weight\n self.end_timer(\"loss forward\")\n res = {\n \"loss\": loss,\n \"cls_loss\": cls_loss,\n \"loc_loss\": loc_loss,\n \"cls_pos_loss\": cls_pos_loss,\n \"cls_neg_loss\": cls_neg_loss,\n \"cls_preds\": cls_preds,\n \"cls_loss_reduced\": cls_loss_reduced,\n \"loc_loss_reduced\": loc_loss_reduced,\n \"cared\": cared,\n }\n if self._use_direction_classifier:\n res[\"dir_loss_reduced\"] = dir_loss\n return res\n\n def network_forward(self, voxels, num_points, coors, batch_size):\n \"\"\"this function is used for subclass.\n you can add custom network architecture by subclass VoxelNet class\n and override this function.\n Returns:\n preds_dict: {\n box_preds: ...\n cls_preds: ...\n dir_cls_preds: ...\n }\n \"\"\"\n #######################33\n # the actual model start from here\n self.start_timer(\"voxel_feature_extractor\")\n voxel_features = self.voxel_feature_extractor(voxels, num_points, coors)\n # print(voxel_features.shape)\n # mydat=[voxels, num_points, coors]\n # voxel_features = self.voxel_feature_extractor(mydat)\n # here in term of point pillar i have converted each 4 dimension of point cloud into 64 dimesnion feature across all point cloud in pillar\n # torch.onnx.export(self.voxel_feature_extractor, (voxels, num_points, coors), \"mayank_pfe_new.onnx\", verbose=False,opset_version=11)\n ########################################################################33\n # fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1000, 500))\n # pcd_data = voxel_features.detach().cpu().numpy()\n # draw_lidar(pcd_data, fig=fig)\n # mlab.show()\n ####################################################################################\n self.end_timer(\"voxel_feature_extractor\")\n\n self.start_timer(\"middle forward\")\n spatial_features = self.middle_feature_extractor(voxel_features, coors, batch_size)\n # torch.onnx.export( self.middle_feature_extractor, (voxel_features, coors, batch_size), \"mayank_spacial.onnx\")\n\n self.end_timer(\"middle forward\")\n self.start_timer(\"rpn forward\")\n # mytime=time.time()\n\n\n preds_dict = self.rpn(spatial_features)\n ###############################################3333\n # print(\"rpn calculated\", (time.time() - mytime) * 1000)\n #####################################333\n # numpy_conv=spatial_features.cpu().detach().numpy()\n # ts = time.time()\n # st = datetime.fromtimestamp(ts).strftime('%d_%m_%Y_%H_%M_%S_%f')\n # # color_path = image_save_path + filename.split(\"/\")[-3] + filename.split(\"/\")[-1]\n # img_name = rpn + str(st)\n # np.save(img_name, numpy_conv)\n ##########################################\n # torch.onnx.export(self.rpn, spatial_features, \"mayank_rpn_new.onnx\")\n # in preds_dicts : box shape is [1,12,50,50,7]: 1 :batch, 12: no of anchor per point, 50*50:channel, 7 is dimension\n # class shape : [1,12,50,50,10], 1 :batch, 12: no of anchor per point, 50*50:channel, 10 no of class\n self.end_timer(\"rpn forward\")\n return preds_dict\n\n\n def forward(self, example):\n \"\"\"module's forward should always accept dict and return loss.\n \"\"\"\n voxels = example[\"voxels\"]\n\n num_points = example[\"num_points\"]\n coors = example[\"coordinates\"]\n if len(num_points.shape) == 2: # multi-gpu\n num_voxel_per_batch = example[\"num_voxels\"].cpu().numpy().reshape(\n -1)\n voxel_list = []\n num_points_list = []\n coors_list = []\n for i, num_voxel in enumerate(num_voxel_per_batch):\n voxel_list.append(voxels[i, :num_voxel])\n num_points_list.append(num_points[i, :num_voxel])\n coors_list.append(coors[i, :num_voxel])\n voxels = torch.cat(voxel_list, dim=0)\n num_points = torch.cat(num_points_list, dim=0)\n coors = torch.cat(coors_list, dim=0)\n batch_anchors = example[\"anchors\"]\n batch_size_dev = batch_anchors.shape[0]\n # features: [num_voxels, max_num_points_per_voxel, 7]\n # num_points: [num_voxels]\n # coors: [num_voxels, 4]\n preds_dict = self.network_forward(voxels, num_points, coors, batch_size_dev)\n # need to check size.\n box_preds = preds_dict[\"box_preds\"].view(batch_size_dev, -1, self._box_coder.code_size)\n err_msg = f\"num_anchors={batch_anchors.shape[1]}, but num_output={box_preds.shape[1]}. please check size\"\n assert batch_anchors.shape[1] == box_preds.shape[1], err_msg\n if self.training:\n return self.loss(example, preds_dict)\n else:\n self.start_timer(\"predict\")\n with torch.no_grad():\n res = self.predict(example, preds_dict)\n self.end_timer(\"predict\")\n return res\n\n def predict(self, example, preds_dict):\n \"\"\"start with v1.6.0, this function don't contain any kitti-specific code.\n Returns:\n predict: list of pred_dict.\n pred_dict: {\n box3d_lidar: [N, 7] 3d box.\n scores: [N]\n label_preds: [N]\n metadata: meta-data which contains dataset-specific information.\n for kitti, it contains image idx (label idx), \n for nuscenes, sample_token is saved in it.\n }\n \"\"\"\n batch_size = example['anchors'].shape[0]\n if \"metadata\" not in example or len(example[\"metadata\"]) == 0:\n meta_list = [None] * batch_size\n else:\n meta_list = example[\"metadata\"]\n batch_anchors = example[\"anchors\"].view(batch_size, -1,\n example[\"anchors\"].shape[-1])\n if \"anchors_mask\" not in example:\n batch_anchors_mask = [None] * batch_size\n else:\n batch_anchors_mask = example[\"anchors_mask\"].view(batch_size, -1)\n\n t = time.time()\n batch_box_preds = preds_dict[\"box_preds\"]\n batch_cls_preds = preds_dict[\"cls_preds\"]\n batch_box_preds = batch_box_preds.view(batch_size, -1,\n self._box_coder.code_size)\n num_class_with_bg = self._num_class\n if not self._encode_background_as_zeros:\n num_class_with_bg = self._num_class + 1\n\n batch_cls_preds = batch_cls_preds.view(batch_size, -1,\n num_class_with_bg)\n batch_box_preds = self._box_coder.decode_torch(batch_box_preds,\n batch_anchors)\n if self._use_direction_classifier:\n batch_dir_preds = preds_dict[\"dir_cls_preds\"]\n batch_dir_preds = batch_dir_preds.view(batch_size, -1,\n self._num_direction_bins)\n else:\n batch_dir_preds = [None] * batch_size\n\n predictions_dicts = []\n post_center_range = None\n if len(self._post_center_range) > 0:\n post_center_range = torch.tensor(self._post_center_range, dtype=batch_box_preds.dtype, device=batch_box_preds.device).float()\n for box_preds, cls_preds, dir_preds, a_mask, meta in zip(batch_box_preds, batch_cls_preds, batch_dir_preds, batch_anchors_mask, meta_list):\n if a_mask is not None:\n box_preds = box_preds[a_mask]\n cls_preds = cls_preds[a_mask]\n box_preds = box_preds.float()\n cls_preds = cls_preds.float()\n if self._use_direction_classifier:\n if a_mask is not None:\n dir_preds = dir_preds[a_mask]\n dir_labels = torch.max(dir_preds, dim=-1)[1]\n if self._encode_background_as_zeros:\n # this don't support softmax\n assert self._use_sigmoid_score is True\n total_scores = torch.sigmoid(cls_preds)\n else:\n # encode background as first element in one-hot vector\n if self._use_sigmoid_score:\n total_scores = torch.sigmoid(cls_preds)[..., 1:]\n else:\n total_scores = F.softmax(cls_preds, dim=-1)[..., 1:]\n # Apply NMS in birdeye view\n if self._use_rotate_nms:\n nms_func = box_torch_ops.rotate_nms\n else:\n nms_func = box_torch_ops.nms\n feature_map_size_prod = batch_box_preds.shape[1] // self.target_assigner.num_anchors_per_location\n if self._multiclass_nms:\n assert self._encode_background_as_zeros is True\n boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]\n if not self._use_rotate_nms:\n box_preds_corners = box_torch_ops.center_to_corner_box2d(\n boxes_for_nms[:, :2], boxes_for_nms[:, 2:4],\n boxes_for_nms[:, 4])\n boxes_for_nms = box_torch_ops.corner_to_standup_nd(\n box_preds_corners)\n\n selected_boxes, selected_labels, selected_scores = [], [], []\n selected_dir_labels = []\n\n scores = total_scores\n boxes = boxes_for_nms\n selected_per_class = []\n score_threshs = self._nms_score_thresholds\n pre_max_sizes = self._nms_pre_max_sizes\n post_max_sizes = self._nms_post_max_sizes\n iou_thresholds = self._nms_iou_thresholds\n for class_idx, score_thresh, pre_ms, post_ms, iou_th in zip(\n range(self._num_class),\n score_threshs,\n pre_max_sizes, post_max_sizes, iou_thresholds):\n if self._nms_class_agnostic:\n class_scores = total_scores.view(\n feature_map_size_prod, -1,\n self._num_class)[..., class_idx]\n class_scores = class_scores.contiguous().view(-1)\n class_boxes_nms = boxes.view(-1,\n boxes_for_nms.shape[-1])\n class_boxes = box_preds\n class_dir_labels = dir_labels\n else:\n anchors_range = self.target_assigner.anchors_range(class_idx)\n class_scores = total_scores.view(\n -1,\n self._num_class)[anchors_range[0]:anchors_range[1], class_idx]\n class_boxes_nms = boxes.view(-1,\n boxes_for_nms.shape[-1])[anchors_range[0]:anchors_range[1], :]\n class_scores = class_scores.contiguous().view(-1)\n class_boxes_nms = class_boxes_nms.contiguous().view(\n -1, boxes_for_nms.shape[-1])\n class_boxes = box_preds.view(-1,\n box_preds.shape[-1])[anchors_range[0]:anchors_range[1], :]\n class_boxes = class_boxes.contiguous().view(\n -1, box_preds.shape[-1])\n if self._use_direction_classifier:\n class_dir_labels = dir_labels.view(-1)[anchors_range[0]:anchors_range[1]]\n class_dir_labels = class_dir_labels.contiguous(\n ).view(-1)\n if score_thresh > 0.0:\n class_scores_keep = class_scores >= score_thresh\n if class_scores_keep.shape[0] == 0:\n selected_per_class.append(None)\n continue\n class_scores = class_scores[class_scores_keep]\n if class_scores.shape[0] != 0:\n if score_thresh > 0.0:\n class_boxes_nms = class_boxes_nms[\n class_scores_keep]\n class_boxes = class_boxes[class_scores_keep]\n class_dir_labels = class_dir_labels[\n class_scores_keep]\n keep = nms_func(class_boxes_nms, class_scores, pre_ms,\n post_ms, iou_th)\n if keep.shape[0] != 0:\n selected_per_class.append(keep)\n else:\n selected_per_class.append(None)\n else:\n selected_per_class.append(None)\n selected = selected_per_class[-1]\n\n if selected is not None:\n selected_boxes.append(class_boxes[selected])\n selected_labels.append(\n torch.full([class_boxes[selected].shape[0]],\n class_idx,\n dtype=torch.int64,\n device=box_preds.device))\n if self._use_direction_classifier:\n selected_dir_labels.append(\n class_dir_labels[selected])\n selected_scores.append(class_scores[selected])\n selected_boxes = torch.cat(selected_boxes, dim=0)\n selected_labels = torch.cat(selected_labels, dim=0)\n selected_scores = torch.cat(selected_scores, dim=0)\n if self._use_direction_classifier:\n selected_dir_labels = torch.cat(selected_dir_labels, dim=0)\n else:\n # get highest score per prediction, than apply nms\n # to remove overlapped box.\n if num_class_with_bg == 1:\n top_scores = total_scores.squeeze(-1)\n top_labels = torch.zeros(\n total_scores.shape[0],\n device=total_scores.device,\n dtype=torch.long)\n else:\n top_scores, top_labels = torch.max(\n total_scores, dim=-1)\n if self._nms_score_thresholds[0] > 0.0:\n top_scores_keep = top_scores >= self._nms_score_thresholds[0]\n top_scores = top_scores.masked_select(top_scores_keep)\n\n if top_scores.shape[0] != 0:\n if self._nms_score_thresholds[0] > 0.0:\n box_preds = box_preds[top_scores_keep]\n if self._use_direction_classifier:\n dir_labels = dir_labels[top_scores_keep]\n top_labels = top_labels[top_scores_keep]\n boxes_for_nms = box_preds[:, [0, 1, 3, 4, 6]]\n if not self._use_rotate_nms:\n box_preds_corners = box_torch_ops.center_to_corner_box2d(boxes_for_nms[:, :2], boxes_for_nms[:, 2:4], boxes_for_nms[:, 4])\n boxes_for_nms = box_torch_ops.corner_to_standup_nd(box_preds_corners)\n # the nms in 3d detection just remove overlap boxes.\n selected = nms_func(boxes_for_nms, top_scores, pre_max_size=self._nms_pre_max_sizes[0], post_max_size=self._nms_post_max_sizes[0], iou_threshold=self._nms_iou_thresholds[0],)\n else:\n selected = []\n # if selected is not None:\n selected_boxes = box_preds[selected]\n if self._use_direction_classifier:\n selected_dir_labels = dir_labels[selected]\n selected_labels = top_labels[selected]\n selected_scores = top_scores[selected]\n # finally generate predictions.\n if selected_boxes.shape[0] != 0:\n box_preds = selected_boxes\n scores = selected_scores\n label_preds = selected_labels\n if self._use_direction_classifier:\n dir_labels = selected_dir_labels\n period = (2 * np.pi / self._num_direction_bins)\n dir_rot = box_torch_ops.limit_period(box_preds[..., 6] - self._dir_offset, self._dir_limit_offset, period)\n box_preds[..., 6] = dir_rot + self._dir_offset + period * dir_labels.to(box_preds.dtype)\n final_box_preds = box_preds\n final_scores = scores\n final_labels = label_preds\n if post_center_range is not None:\n mask = (final_box_preds[:, :3] >=\n post_center_range[:3]).all(1)\n mask &= (final_box_preds[:, :3] <=\n post_center_range[3:]).all(1)\n predictions_dict = {\n \"box3d_lidar\": final_box_preds[mask],\n \"scores\": final_scores[mask],\n \"label_preds\": label_preds[mask],\n \"metadata\": meta,\n }\n else:\n predictions_dict = {\n \"box3d_lidar\": final_box_preds,\n \"scores\": final_scores,\n \"label_preds\": label_preds,\n \"metadata\": meta,\n }\n else:\n dtype = batch_box_preds.dtype\n device = batch_box_preds.device\n predictions_dict = {\n \"box3d_lidar\":\n torch.zeros([0, box_preds.shape[-1]],\n dtype=dtype,\n device=device),\n \"scores\":\n torch.zeros([0], dtype=dtype, device=device),\n \"label_preds\":\n torch.zeros([0], dtype=top_labels.dtype, device=device),\n \"metadata\":\n meta,\n }\n predictions_dicts.append(predictions_dict)\n return predictions_dicts\n\n def metrics_to_float(self):\n self.rpn_acc.float()\n self.rpn_metrics.float()\n self.rpn_cls_loss.float()\n self.rpn_loc_loss.float()\n self.rpn_total_loss.float()\n\n def update_metrics(self, cls_loss, loc_loss, cls_preds, labels, sampled):\n batch_size = cls_preds.shape[0]\n num_class = self._num_class\n if not self._encode_background_as_zeros:\n num_class += 1\n cls_preds = cls_preds.view(batch_size, -1, num_class)\n rpn_acc = self.rpn_acc(labels, cls_preds, sampled).numpy()[0]\n prec, recall = self.rpn_metrics(labels, cls_preds, sampled)\n precall = self.rpn_metrics(labels, cls_preds, sampled)\n prec = prec.numpy()\n recall = recall.numpy()\n rpn_cls_loss = self.rpn_cls_loss(cls_loss).numpy()[0]\n rpn_loc_loss = self.rpn_loc_loss(loc_loss).numpy()[0]\n ret = {\n \"loss\": {\n \"cls_loss\": float(rpn_cls_loss),\n \"cls_loss_rt\": float(cls_loss.data.cpu().numpy()),\n 'loc_loss': float(rpn_loc_loss),\n \"loc_loss_rt\": float(loc_loss.data.cpu().numpy()),\n },\n \"rpn_acc\": float(rpn_acc),\n \"pr\": {},\n }\n for i, thresh in enumerate(self.rpn_metrics.thresholds):\n ret[\"pr\"][f\"prec@{int(thresh*100)}\"] = float(prec[i])\n ret[\"pr\"][f\"rec@{int(thresh*100)}\"] = float(recall[i])\n return ret\n\n def clear_metrics(self):\n self.rpn_acc.clear()\n self.rpn_metrics.clear()\n self.rpn_cls_loss.clear()\n self.rpn_loc_loss.clear()\n self.rpn_total_loss.clear()\n\n @staticmethod\n def convert_norm_to_float(net):\n '''\n BatchNorm layers to have parameters in single precision.\n Find all layers and convert them back to float. This can't\n be done with built in .apply as that function will apply\n fn to all modules, parameters, and buffers. Thus we wouldn't\n be able to guard the float conversion based on the module type.\n '''\n if isinstance(net, torch.nn.modules.batchnorm._BatchNorm):\n net.float()\n for child in net.children():\n VoxelNet.convert_norm_to_float(child)\n return net\n\n\ndef add_sin_difference(boxes1, boxes2, boxes1_rot, boxes2_rot, factor=1.0):\n if factor != 1.0:\n boxes1_rot = factor * boxes1_rot\n boxes2_rot = factor * boxes2_rot\n rad_pred_encoding = torch.sin(boxes1_rot) * torch.cos(boxes2_rot)\n rad_tg_encoding = torch.cos(boxes1_rot) * torch.sin(boxes2_rot)\n boxes1 = torch.cat([boxes1[..., :6], rad_pred_encoding, boxes1[..., 7:]],\n dim=-1)\n boxes2 = torch.cat([boxes2[..., :6], rad_tg_encoding, boxes2[..., 7:]],\n dim=-1)\n return boxes1, boxes2\n\n\ndef create_loss(loc_loss_ftor,\n cls_loss_ftor,\n box_preds,\n cls_preds,\n cls_targets,\n cls_weights,\n reg_targets,\n reg_weights,\n num_class,\n encode_background_as_zeros=True,\n encode_rad_error_by_sin=True,\n sin_error_factor=1.0,\n box_code_size=7,\n num_direction_bins=2):\n batch_size = int(box_preds.shape[0])\n box_preds = box_preds.view(batch_size, -1, box_code_size)\n if encode_background_as_zeros:\n cls_preds = cls_preds.view(batch_size, -1, num_class)\n else:\n cls_preds = cls_preds.view(batch_size, -1, num_class + 1)\n cls_targets = cls_targets.squeeze(-1)\n one_hot_targets = torchplus.nn.one_hot(\n cls_targets, depth=num_class + 1, dtype=box_preds.dtype)\n if encode_background_as_zeros:\n one_hot_targets = one_hot_targets[..., 1:]\n if encode_rad_error_by_sin:\n # sin(a - b) = sinacosb-cosasinb\n # reg_tg_rot = box_torch_ops.limit_period(\n # reg_targets[..., 6:7], 0.5, 2 * np.pi / num_direction_bins)\n box_preds, reg_targets = add_sin_difference(box_preds, reg_targets, box_preds[..., 6:7], reg_targets[..., 6:7],\n sin_error_factor)\n\n loc_losses = loc_loss_ftor(\n box_preds, reg_targets, weights=reg_weights) # [N, M]\n cls_losses = cls_loss_ftor(\n cls_preds, one_hot_targets, weights=cls_weights) # [N, M]\n return loc_losses, cls_losses\n\n\ndef prepare_loss_weights(labels,\n pos_cls_weight=1.0,\n neg_cls_weight=1.0,\n loss_norm_type=LossNormType.NormByNumPositives,\n dtype=torch.float32):\n \"\"\"get cls_weights and reg_weights from labels.\n \"\"\"\n cared = labels >= 0\n # cared: [N, num_anchors]\n positives = labels > 0\n negatives = labels == 0\n negative_cls_weights = negatives.type(dtype) * neg_cls_weight\n cls_weights = negative_cls_weights + pos_cls_weight * positives.type(dtype)\n reg_weights = positives.type(dtype)\n if loss_norm_type == LossNormType.NormByNumExamples:\n num_examples = cared.type(dtype).sum(1, keepdim=True)\n num_examples = torch.clamp(num_examples, min=1.0)\n cls_weights /= num_examples\n bbox_normalizer = positives.sum(1, keepdim=True).type(dtype)\n reg_weights /= torch.clamp(bbox_normalizer, min=1.0)\n elif loss_norm_type == LossNormType.NormByNumPositives: # for focal loss\n pos_normalizer = positives.sum(1, keepdim=True).type(dtype)\n reg_weights /= torch.clamp(pos_normalizer, min=1.0)\n cls_weights /= torch.clamp(pos_normalizer, min=1.0)\n elif loss_norm_type == LossNormType.NormByNumPosNeg:\n pos_neg = torch.stack([positives, negatives], dim=-1).type(dtype)\n normalizer = pos_neg.sum(1, keepdim=True) # [N, 1, 2]\n cls_normalizer = (pos_neg * normalizer).sum(-1) # [N, M]\n cls_normalizer = torch.clamp(cls_normalizer, min=1.0)\n # cls_normalizer will be pos_or_neg_weight/num_pos_or_neg\n normalizer = torch.clamp(normalizer, min=1.0)\n reg_weights /= normalizer[:, 0:1, 0]\n cls_weights /= cls_normalizer\n elif loss_norm_type == LossNormType.DontNorm: # support ghm loss\n pos_normalizer = positives.sum(1, keepdim=True).type(dtype)\n reg_weights /= torch.clamp(pos_normalizer, min=1.0)\n else:\n raise ValueError(\n f\"unknown loss norm type. available: {list(LossNormType)}\")\n return cls_weights, reg_weights, cared\n\n\ndef assign_weight_to_each_class(labels,\n weight_per_class,\n norm_by_num=True,\n dtype=torch.float32):\n weights = torch.zeros(labels.shape, dtype=dtype, device=labels.device)\n for label, weight in weight_per_class:\n positives = (labels == label).type(dtype)\n weight_class = weight * positives\n if norm_by_num:\n normalizer = positives.sum()\n normalizer = torch.clamp(normalizer, min=1.0)\n weight_class /= normalizer\n weights += weight_class\n return weights\n\n\ndef get_direction_target(anchors,\n reg_targets,\n one_hot=True,\n dir_offset=0,\n num_bins=2):\n batch_size = reg_targets.shape[0]\n anchors = anchors.view(batch_size, -1, anchors.shape[-1])\n rot_gt = reg_targets[..., 6] + anchors[..., 6]\n offset_rot = box_torch_ops.limit_period(rot_gt - dir_offset, 0, 2 * np.pi)\n dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long()\n dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1)\n if one_hot:\n dir_cls_targets = torchplus.nn.one_hot(\n dir_cls_targets, num_bins, dtype=anchors.dtype)\n return dir_cls_targets\n"
] | [
[
"numpy.pad",
"torch.load",
"numpy.squeeze",
"torch.tensor",
"numpy.concatenate",
"numpy.ones",
"numpy.fromstring",
"torch.cuda.is_available",
"numpy.array",
"numpy.where"
],
[
"numpy.pad",
"torch.load",
"numpy.squeeze",
"torch.tensor",
"numpy.concatenate",
"numpy.ones",
"numpy.fromstring",
"torch.cuda.is_available",
"numpy.array",
"numpy.where"
],
[
"torch.cuda.synchronize",
"torch.sigmoid",
"torch.LongTensor",
"torch.floor",
"torch.max",
"torch.zeros",
"torch.cat",
"torch.sin",
"torch.nn.functional.softmax",
"torch.full",
"torch.tensor",
"torch.no_grad",
"torch.stack",
"torch.clamp",
"numpy.array",
"torch.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MaxSchambach/colour | [
"3f3685d616fda4be58cec20bc1e16194805d7e2d",
"3f3685d616fda4be58cec20bc1e16194805d7e2d",
"3f3685d616fda4be58cec20bc1e16194805d7e2d",
"3f3685d616fda4be58cec20bc1e16194805d7e2d",
"3f3685d616fda4be58cec20bc1e16194805d7e2d",
"3f3685d616fda4be58cec20bc1e16194805d7e2d",
"3f3685d616fda4be58cec20bc1e16194805d7e2d"
] | [
"colour/corresponding/datasets/breneman1987.py",
"colour/models/rgb/transfer_functions/itur_bt_1886.py",
"colour/volume/tests/test_mesh.py",
"colour/utilities/common.py",
"colour/appearance/llab.py",
"colour/plotting/quality.py",
"colour/models/rgb/transfer_functions/tests/test_viper_log.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nBreneman Corresponding Chromaticities Dataset\n=============================================\n\nDefines *Breneman (1987)* results for corresponding chromaticities experiments.\n\nSee Also\n--------\n`Corresponding Chromaticities Prediction Jupyter Notebook\n<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\\\nblob/master/notebooks/corresponding/prediction.ipynb>`_\n\nReferences\n----------\n- :cite:`Breneman1987b` : Breneman, E. J. (1987). Corresponding\n chromaticities for different states of adaptation to complex visual fields.\n Journal of the Optical Society of America A, 4(6), 1115.\n doi:10.1364/JOSAA.4.001115\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\n\nfrom collections import namedtuple\n\nfrom colour.utilities.documentation import DocstringDict\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'BrenemanExperimentResult', 'PrimariesChromaticityCoordinates',\n 'BRENEMAN_EXPERIMENT_1_RESULTS', 'BRENEMAN_EXPERIMENT_2_RESULTS',\n 'BRENEMAN_EXPERIMENT_3_RESULTS', 'BRENEMAN_EXPERIMENT_4_RESULTS',\n 'BRENEMAN_EXPERIMENT_5_RESULTS', 'BRENEMAN_EXPERIMENT_6_RESULTS',\n 'BRENEMAN_EXPERIMENT_7_RESULTS', 'BRENEMAN_EXPERIMENT_10_RESULTS',\n 'BRENEMAN_EXPERIMENT_8_RESULTS', 'BRENEMAN_EXPERIMENT_9_RESULTS',\n 'BRENEMAN_EXPERIMENT_11_RESULTS', 'BRENEMAN_EXPERIMENT_12_RESULTS',\n 'BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES', 'BRENEMAN_EXPERIMENTS'\n]\n\n\nclass BrenemanExperimentResult(\n namedtuple('BrenemanExperimentResult',\n ('name', 'uv_t', 'uv_m', 's_uv', 'd_uv_i', 'd_uv_g'))):\n \"\"\"\n Experiment result.\n\n Parameters\n ----------\n name : unicode\n Test colour name.\n uv_t : numeric\n Chromaticity coordinates :math:`uv_t^p` of test colour.\n uv_m : array_like, (2,)\n Chromaticity coordinates :math:`uv_m^p` of matching colour.\n s_uv : array_like, (2,), optional\n Interobserver variation (:math:`x10^3`) :math:`\\\\sigma_uv^p`.\n d_uv_i : array_like, (2,), optional\n Deviation of individual linear transformation (:math:`x10^3`)\n :math:`\\\\delta_uv_i^p`.\n d_uv_g : array_like, (2,), optional\n Deviation of individual linear transformation (:math:`x10^3`)\n :math:`\\\\delta_uv_g^p`.\n \"\"\"\n\n def __new__(cls, name, uv_t, uv_m, s_uv=None, d_uv_i=None, d_uv_g=None):\n \"\"\"\n Returns a new instance of the\n :class:`colour.corresponding.datasets.corresponding_chromaticities.\\\nBrenemanExperimentResult` class.\n \"\"\"\n\n return super(BrenemanExperimentResult, cls).__new__(\n cls, name, np.array(uv_t), np.array(uv_m), np.array(s_uv),\n np.array(d_uv_i), np.array(d_uv_g))\n\n\nclass PrimariesChromaticityCoordinates(\n namedtuple(\n 'PrimariesChromaticityCoordinates',\n ('experiment', 'illuminants', 'Y', 'P_uvp', 'D_uvp', 'T_uvp'))):\n \"\"\"\n Chromaticity coordinates of primaries.\n\n Parameters\n ----------\n experiment : integer\n Experiment.\n illuminants : array_like, (2,)\n Chromaticity coordinates :math:`uv_t^p` of test colour.\n Y : numeric\n White luminance :math:`Y` in :math:`cd/m^2`.\n P_uvp : numeric\n Chromaticity coordinates :math:`uv^p` of primary :math:`P`.\n D_uvp : numeric\n Chromaticity coordinates :math:`uv^p` of primary :math:`D`.\n T_uvp : numeric\n Chromaticity coordinates :math:`uv^p` of primary :math:`T`.\n \"\"\"\n\n def __new__(cls,\n experiment,\n illuminants,\n Y,\n P_uvp=None,\n D_uvp=None,\n T_uvp=None):\n \"\"\"\n Returns a new instance of the\n :class:`colour.corresponding.datasets.corresponding_chromaticities.\\\nPrimariesChromaticityCoordinates` class.\n \"\"\"\n\n return super(PrimariesChromaticityCoordinates, cls).__new__(\n cls, experiment, np.array(illuminants), np.array(Y),\n np.array(P_uvp), np.array(D_uvp), np.array(T_uvp))\n\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_1_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.259, 0.526), (0.200, 0.475)),\n BrenemanExperimentResult(\n 'Gray',\n (0.259, 0.524), (0.199, 0.487), (4, 4), (2, 3), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.459, 0.522), (0.420, 0.509), (19, 4), (-10, -7), (-19, -3)),\n BrenemanExperimentResult(\n 'Skin',\n (0.307, 0.526), (0.249, 0.497), (7, 4), (-1, 1), (-6, -1)),\n BrenemanExperimentResult(\n 'Orange',\n (0.360, 0.544), (0.302, 0.548), (12, 1), (1, -2), (-7, -6)),\n BrenemanExperimentResult(\n 'Brown',\n (0.350, 0.541), (0.290, 0.537), (11, 4), (3, 0), (-5, -3)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.318, 0.550), (0.257, 0.554), (8, 2), (0, 2), (-5, -5)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.258, 0.542), (0.192, 0.529), (4, 6), (3, 2), (3, -6)),\n BrenemanExperimentResult(\n 'Green',\n (0.193, 0.542), (0.129, 0.521), (7, 5), (3, 2), (9, -7)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.180, 0.516), (0.133, 0.469), (4, 6), (-3, -2), (2, -5)),\n BrenemanExperimentResult(\n 'Blue',\n (0.186, 0.445), (0.158, 0.340), (13, 33), (2, 7), (1, 13)),\n BrenemanExperimentResult(\n 'Sky',\n (0.226, 0.491), (0.178, 0.426), (3, 14), (1, -3), (0, -1)),\n BrenemanExperimentResult(\n 'Purple',\n (0.278, 0.456), (0.231, 0.365), (4, 25), (0, 2), (-5, 7)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 1 results.\n\nBRENEMAN_EXPERIMENT_1_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *A*, *D65*\n- White Luminance : 1500 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_2_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.222, 0.521), (0.204, 0.479)),\n BrenemanExperimentResult(\n 'Gray',\n (0.227, 0.517), (0.207, 0.486), (2, 5), (-1, 0), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.464, 0.520), (0.449, 0.511), (22, 3), (-8, -8), (-7, -2)),\n BrenemanExperimentResult(\n 'Skin',\n (0.286, 0.526), (0.263, 0.505), (7, 2), (0, -1), (0, -1)),\n BrenemanExperimentResult(\n 'Orange',\n (0.348, 0.546), (0.322, 0.545), (13, 3), (3, -1), (3, -2)),\n BrenemanExperimentResult(\n 'Brown',\n (0.340, 0.543), (0.316, 0.537), (11, 3), (1, 1), (0, 0)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.288, 0.554), (0.265, 0.553), (5, 2), (-2, 2), (-1, -2)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.244, 0.547), (0.221, 0.538), (4, 3), (-2, 1), (0, -3)),\n BrenemanExperimentResult(\n 'Green',\n (0.156, 0.548), (0.135, 0.532), (4, 3), (-1, 3), (3, -4)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.159, 0.511), (0.145, 0.472), (9, 7), (-1, 2), (2, 1)),\n BrenemanExperimentResult(\n 'Blue',\n (0.160, 0.406), (0.163, 0.331), (23, 31), (2, -3), (-1, 3)),\n BrenemanExperimentResult(\n 'Sky',\n (0.190, 0.481), (0.176, 0.431), (5, 24), (2, -2), (2, 0)),\n BrenemanExperimentResult(\n 'Purple',\n (0.258, 0.431), (0.244, 0.349), (4, 19), (-3, 13), (-4, 19)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 2 results.\n\nBRENEMAN_EXPERIMENT_2_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *Projector*, *D55*\n- White Luminance : 1500 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_3_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.223, 0.521), (0.206, 0.478)),\n BrenemanExperimentResult(\n 'Gray',\n (0.228, 0.517), (0.211, 0.494), (1, 3), (0, 2), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.462, 0.519), (0.448, 0.505), (11, 4), (-3, 6), (-4, 6)),\n BrenemanExperimentResult(\n 'Skin',\n (0.285, 0.524), (0.267, 0.507), (6, 3), (-1, 1), (-2, 1)),\n BrenemanExperimentResult(\n 'Orange',\n (0.346, 0.546), (0.325, 0.541), (11, 3), (1, -2), (2, 3)),\n BrenemanExperimentResult(\n 'Brown',\n (0.338, 0.543), (0.321, 0.532), (9, 6), (-3, 2), (-3, 7)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.287, 0.554), (0.267, 0.548), (4, 5), (1, -2), (0, 5)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.244, 0.547), (0.226, 0.531), (3, 6), (-1, 3), (-2, 8)),\n BrenemanExperimentResult(\n 'Green',\n (0.157, 0.548), (0.141, 0.528), (9, 6), (2, 2), (0, 6)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.160, 0.510), (0.151, 0.486), (8, 5), (-2, -1), (-2, -5)),\n BrenemanExperimentResult(\n 'Blue',\n (0.162, 0.407), (0.158, 0.375), (6, 7), (1, -6), (4, -23)),\n BrenemanExperimentResult(\n 'Sky',\n (0.191, 0.482), (0.179, 0.452), (4, 5), (0, 1), (1, -7)),\n BrenemanExperimentResult(\n 'Purple',\n (0.258, 0.432), (0.238, 0.396), (4, 8), (5, 3), (4, -11)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 3 results.\n\nBRENEMAN_EXPERIMENT_3_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *Projector*, *D55*\n- White Luminance : 75 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_4_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.258, 0.523), (0.199, 0.467)),\n BrenemanExperimentResult(\n 'Gray',\n (0.257, 0.524), (0.205, 0.495), (2, 2), (0, 4), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.460, 0.521), (0.416, 0.501), (11, 6), (-6, 4), (-6, 9)),\n BrenemanExperimentResult(\n 'Skin',\n (0.308, 0.526), (0.253, 0.503), (7, 3), (-1, 1), (-1, 0)),\n BrenemanExperimentResult(\n 'Orange',\n (0.360, 0.544), (0.303, 0.541), (14, 5), (1, -4), (1, 2)),\n BrenemanExperimentResult(\n 'Brown',\n (0.350, 0.541), (0.296, 0.527), (11, 7), (-2, 4), (-3, 9)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.317, 0.550), (0.260, 0.547), (9, 5), (1, -3), (0, 3)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.258, 0.543), (0.203, 0.520), (4, 6), (0, 8), (0, 9)),\n BrenemanExperimentResult(\n 'Green',\n (0.193, 0.543), (0.142, 0.516), (6, 9), (3, 8), (2, 6)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.180, 0.516), (0.140, 0.484), (9, 5), (-2, -1), (-1, -9)),\n BrenemanExperimentResult(\n 'Blue',\n (0.185, 0.445), (0.151, 0.394), (8, 10), (2, -8), (8, -24)),\n BrenemanExperimentResult(\n 'Sky',\n (0.225, 0.490), (0.180, 0.448), (4, 8), (1, -1), (3, -11)),\n BrenemanExperimentResult(\n 'Purple',\n (0.278, 0.455), (0.229, 0.388), (6, 14), (1, 12), (3, 0)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 4 results.\n\nBRENEMAN_EXPERIMENT_4_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *A*, *D65*\n- White Luminance : 75 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_5_RESULTS = (\n BrenemanExperimentResult(\n 'Gray',\n (0.028, 0.480), (0.212, 0.491), (2, 2)),\n BrenemanExperimentResult(\n 'Red',\n (0.449, 0.512), (0.408, 0.514), (11, 5)),\n BrenemanExperimentResult(\n 'Skin',\n (0.269, 0.505), (0.262, 0.511), (4, 2)),\n BrenemanExperimentResult(\n 'Orange',\n (0.331, 0.548), (0.303, 0.545), (4, 3)),\n BrenemanExperimentResult(\n 'Brown',\n (0.322, 0.541), (0.303, 0.538), (4, 4)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.268, 0.555), (0.264, 0.550), (3, 2)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.224, 0.538), (0.227, 0.535), (3, 3)),\n BrenemanExperimentResult(\n 'Green',\n (0.134, 0.531), (0.159, 0.530), (9, 3)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.145, 0.474), (0.165, 0.490), (8, 3)),\n BrenemanExperimentResult(\n 'Blue',\n (0.163, 0.329), (0.173, 0.378), (7, 12)),\n BrenemanExperimentResult(\n 'Sky',\n (0.179, 0.438), (0.189, 0.462), (5, 4)),\n BrenemanExperimentResult(\n 'Purple',\n (0.245, 0.364), (0.239, 0.401), (4, 16)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 5 results.\n\nBRENEMAN_EXPERIMENT_5_RESULTS : tuple\n\nNotes\n-----\n- Effective White Levels : 130 and 2120 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_6_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.257, 0.525), (0.201, 0.482)),\n BrenemanExperimentResult(\n 'Gray',\n (0.267, 0.521), (0.207, 0.485), (5, 3), (-1, 0), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.457, 0.521), (0.398, 0.516), (9, 4), (-2, -5), (1, -9)),\n BrenemanExperimentResult(\n 'Skin',\n (0.316, 0.526), (0.253, 0.503), (5, 3), (-3, -2), (-1, -3)),\n BrenemanExperimentResult(\n 'Orange',\n (0.358, 0.545), (0.287, 0.550), (7, 3), (3, 0), (7, -6)),\n BrenemanExperimentResult(\n 'Brown',\n (0.350, 0.541), (0.282, 0.540), (6, 3), (-1, 0), (2, -5)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.318, 0.551), (0.249, 0.556), (7, 2), (-1, 1), (2, -5)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.256, 0.547), (0.188, 0.537), (5, 4), (3, 1), (4, -2)),\n BrenemanExperimentResult(\n 'Green',\n (0.193, 0.542), (0.133, 0.520), (13, 3), (5, -2), (5, -4)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.180, 0.516), (0.137, 0.466), (12, 10), (0, 0), (-2, 2)),\n BrenemanExperimentResult(\n 'Blue',\n (0.186, 0.445), (0.156, 0.353), (12, 45), (6, 1), (2, 6)),\n BrenemanExperimentResult(\n 'Sky',\n (0.225, 0.492), (0.178, 0.428), (6, 14), (1, -1), (-1, 3)),\n BrenemanExperimentResult(\n 'Purple',\n (0.276, 0.456), (0.227, 0.369), (6, 27), (-2, 4), (-3, 9)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 6 results.\n\nBRENEMAN_EXPERIMENT_6_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *A*, *D55*\n- White Luminance : 11100 :math:`cd/m^2`\n- Observers Count : 8\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_7_RESULTS = (\n BrenemanExperimentResult(\n 'Gray',\n (0.208, 0.481), (0.211, 0.486), (2, 3)),\n BrenemanExperimentResult(\n 'Red',\n (0.448, 0.512), (0.409, 0.516), (9, 2)),\n BrenemanExperimentResult(\n 'Skin',\n (0.269, 0.505), (0.256, 0.506), (4, 3)),\n BrenemanExperimentResult(\n 'Orange',\n (0.331, 0.549), (0.305, 0.547), (5, 4)),\n BrenemanExperimentResult(\n 'Brown',\n (0.322, 0.541), (0.301, 0.539), (5, 2)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.268, 0.555), (0.257, 0.552), (3, 4)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.225, 0.538), (0.222, 0.536), (3, 2)),\n BrenemanExperimentResult(\n 'Green',\n (0.135, 0.531), (0.153, 0.529), (8, 2)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.145, 0.475), (0.160, 0.484), (3, 5)),\n BrenemanExperimentResult(\n 'Blue',\n (0.163, 0.331), (0.171, 0.379), (4, 11)),\n BrenemanExperimentResult(\n 'Sky',\n (0.179, 0.438), (0.187, 0.452), (4, 7)),\n BrenemanExperimentResult(\n 'Purple',\n (0.245, 0.365), (0.240, 0.398), (4, 10)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 7 results.\n\nBRENEMAN_EXPERIMENT_7_RESULTS : tuple\n\nNotes\n-----\n- Effective White Levels : 850 and 11100 :math:`cd/m^2`\n- Observers Count : 8\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_8_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.258, 0.524), (0.195, 0.469)),\n BrenemanExperimentResult(\n 'Gray',\n (0.257, 0.525), (0.200, 0.494), (2, 3), (1, 2), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.458, 0.522), (0.410, 0.508), (12, 4), (-3, 5), (-7, 2)),\n BrenemanExperimentResult(\n 'Skin',\n (0.308, 0.526), (0.249, 0.502), (6, 2), (-1, 1), (-3, -1)),\n BrenemanExperimentResult(\n 'Orange',\n (0.359, 0.545), (0.299, 0.545), (12, 4), (0, -2), (-3, 0)),\n BrenemanExperimentResult(\n 'Brown',\n (0.349, 0.540), (0.289, 0.532), (10, 4), (0, 1), (-2, 2)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.317, 0.550), (0.256, 0.549), (9, 5), (0, -3), (-3, 1)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.260, 0.545), (0.198, 0.529), (5, 5), (3, 1), (0, 3)),\n BrenemanExperimentResult(\n 'Green',\n (0.193, 0.543), (0.137, 0.520), (9, 5), (3, 0), (2, 1)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.182, 0.516), (0.139, 0.477), (9, 4), (-3, 0), (-2, -4)),\n BrenemanExperimentResult(\n 'Blue',\n (0.184, 0.444), (0.150, 0.387), (5, 11), (3, -10), (6, -22)),\n BrenemanExperimentResult(\n 'Sky',\n (0.224, 0.489), (0.177, 0.439), (5, 6), (1, 1), (1, -7)),\n BrenemanExperimentResult(\n 'Purple',\n (0.277, 0.454), (0.226, 0.389), (4, 10), (1, 4), (1, -8)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 8 results.\n\nBRENEMAN_EXPERIMENT_8_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *A*, *D65*\n- White Luminance : 350 :math:`cd/m^2`\n- Observers Count : 8\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_9_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.254, 0.525), (0.195, 0.465)),\n BrenemanExperimentResult(\n 'Gray',\n (0.256, 0.524), (0.207, 0.496), (4, 6), (3, 2), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.459, 0.521), (0.415, 0.489), (20, 14), (2, 12), (-2, 21)),\n BrenemanExperimentResult(\n 'Skin',\n (0.307, 0.525), (0.261, 0.500), (7, 7), (0, 1), (-5, 2)),\n BrenemanExperimentResult(\n 'Orange',\n (0.359, 0.545), (0.313, 0.532), (7, 5), (-2, -3), (-6, 13)),\n BrenemanExperimentResult(\n 'Brown',\n (0.349, 0.540), (0.302, 0.510), (11, 15), (0, 12), (-5, 24)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.317, 0.550), (0.268, 0.538), (7, 10), (1, -4), (-4, 12)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.259, 0.544), (0.212, 0.510), (10, 11), (0, 14), (-4, 22)),\n BrenemanExperimentResult(\n 'Green',\n (0.193, 0.542), (0.150, 0.506), (6, 10), (-1, 13), (-2, 15)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.181, 0.517), (0.144, 0.487), (9, 6), (-3, 0), (-1, -9)),\n BrenemanExperimentResult(\n 'Blue',\n (0.184, 0.444), (0.155, 0.407), (4, 11), (-2, -6), (6, -36)),\n BrenemanExperimentResult(\n 'Sky',\n (0.225, 0.490), (0.183, 0.458), (5, 8), (1, -3), (2, -19)),\n BrenemanExperimentResult(\n 'Purple',\n (0.276, 0.454), (0.233, 0.404), (7, 12), (2, 9), (0, -16)),\n BrenemanExperimentResult(\n '(Gray)h',\n (0.256, 0.525), (0.208, 0.498)),\n BrenemanExperimentResult(\n '(Red)h',\n (0.456, 0.521), (0.416, 0.501), (15, 7), None, (-6, -9)),\n BrenemanExperimentResult(\n '(Brown)h',\n (0.349, 0.539), (0.306, 0.526), (11, 8), None, (-8, 7)),\n BrenemanExperimentResult(\n '(Foliage)h',\n (0.260, 0.545), (0.213, 0.528), (7, 9), None, (-4, 5)),\n BrenemanExperimentResult(\n '(Green)h',\n (0.193, 0.543), (0.149, 0.525), (10, 8), None, (-1, -1)),\n BrenemanExperimentResult(\n '(Blue)h',\n (0.184, 0.444), (0.156, 0.419), (7, 8), None, (4, -45)),\n BrenemanExperimentResult(\n '(Purple)h',\n (0.277, 0.456), (0.236, 0.422), (6, 11), None, (-2, -29)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 9 results.\n\nBRENEMAN_EXPERIMENT_9_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *A*, *D65*\n- White Luminance : 15 :math:`cd/m^2`\n- Observers Count : 8\n- The colors indicated by (.)h are the darker colors presented at the higher\n luminescence level of the lighter colors.\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_10_RESULTS = (\n BrenemanExperimentResult(\n 'Gray',\n (0.208, 0.482), (0.213, 0.494), (3, 3)),\n BrenemanExperimentResult(\n 'Red',\n (0.447, 0.512), (0.411, 0.506), (15, 7)),\n BrenemanExperimentResult(\n 'Skin',\n (0.269, 0.505), (0.269, 0.511), (4, 3)),\n BrenemanExperimentResult(\n 'Orange',\n (0.331, 0.549), (0.315, 0.536), (7, 8)),\n BrenemanExperimentResult(\n 'Brown',\n (0.323, 0.542), (0.310, 0.526), (6, 8)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.268, 0.556), (0.268, 0.541), (3, 6)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.226, 0.538), (0.230, 0.525), (4, 8)),\n BrenemanExperimentResult(\n 'Green',\n (0.135, 0.531), (0.158, 0.524), (6, 3)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.145, 0.476), (0.161, 0.491), (4, 4)),\n BrenemanExperimentResult(\n 'Blue',\n (0.163, 0.330), (0.171, 0.377), (6, 19)),\n BrenemanExperimentResult(\n 'Sky',\n (0.179, 0.439), (0.187, 0.465), (5, 5)),\n BrenemanExperimentResult(\n 'Purple',\n (0.245, 0.366), (0.240, 0.402), (3, 12)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 10 results.\n\nBRENEMAN_EXPERIMENT_10_RESULTS : tuple\n\nNotes\n-----\n- Effective White Levels : 15 and 270 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_11_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.208, 0.482), (0.174, 0.520)),\n BrenemanExperimentResult(\n 'Gray',\n (0.209, 0.483), (0.176, 0.513), (3, 4), (2, 2), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.450, 0.512), (0.419, 0.524), (10, 2), (3, 2), (8, -1)),\n BrenemanExperimentResult(\n 'Skin',\n (0.268, 0.506), (0.240, 0.528), (6, 2), (-4, 0), (-3, 0)),\n BrenemanExperimentResult(\n 'Orange',\n (0.331, 0.547), (0.293, 0.553), (6, 2), (3, -1), (5, 1)),\n BrenemanExperimentResult(\n 'Brown',\n (0.323, 0.542), (0.290, 0.552), (5, 2), (-1, -3), (0, -1)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.266, 0.549), (0.236, 0.557), (4, 2), (-3, -2), (-4, 2)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.227, 0.538), (0.194, 0.552), (4, 2), (2, -3), (-1, 1)),\n BrenemanExperimentResult(\n 'Green',\n (0.146, 0.534), (0.118, 0.551), (8, 3), (4, -2), (-6, 3)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.160, 0.475), (0.130, 0.513), (9, 4), (1, -1), (-4, -3)),\n BrenemanExperimentResult(\n 'Blue',\n (0.177, 0.340), (0.133, 0.427), (6, 14), (4, -17), (11, -29)),\n BrenemanExperimentResult(\n 'Sky',\n (0.179, 0.438), (0.146, 0.482), (6, 10), (1, 4), (0, -1)),\n BrenemanExperimentResult(\n 'Purple',\n (0.245, 0.366), (0.216, 0.419), (4, 13), (-3, 8), (4, -2)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 1 results.\n\nBRENEMAN_EXPERIMENT_11_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *green*, *D65*\n- White Luminance : 1560 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_12_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.205, 0.482), (0.174, 0.519)),\n BrenemanExperimentResult(\n 'Gray',\n (0.208, 0.482), (0.181, 0.507), (4, 3), (0, 1), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.451, 0.512), (0.422, 0.526), (20, 3), (0, -5), (10, -5)),\n BrenemanExperimentResult(\n 'Skin',\n (0.268, 0.506), (0.244, 0.525), (5, 2), (-6, 0), (-2, -1)),\n BrenemanExperimentResult(\n 'Orange',\n (0.331, 0.548), (0.292, 0.553), (10, 2), (5, 2), (11, 1)),\n BrenemanExperimentResult(\n 'Brown',\n (0.324, 0.542), (0.286, 0.554), (8, 1), (5, -3), (10, -4)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.266, 0.548), (0.238, 0.558), (6, 2), (-3, -1), (-1, -2)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.227, 0.538), (0.196, 0.555), (6, 3), (3, -4), (2, -5)),\n BrenemanExperimentResult(\n 'Green',\n (0.145, 0.534), (0.124, 0.551), (8, 6), (1, -1), (-8, -1)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.160, 0.474), (0.135, 0.505), (5, 2), (1, -1), (-4, -3)),\n BrenemanExperimentResult(\n 'Blue',\n (0.178, 0.339), (0.149, 0.392), (4, 20), (-1, -5), (3, -7)),\n BrenemanExperimentResult(\n 'Sky',\n (0.179, 0.440), (0.150, 0.473), (4, 8), (3, 2), (2, 0)),\n BrenemanExperimentResult(\n 'Purple',\n (0.246, 0.366), (0.222, 0.404), (5, 15), (-4, 2), (4, 2)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 12 results.\n\nBRENEMAN_EXPERIMENT_12_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *D55*, *green*\n- White Luminance : 75 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES = DocstringDict({\n 1: PrimariesChromaticityCoordinates(\n 1, ('A', 'D65'), 1500,\n (0.671, 0.519), (-0.586, 0.627), (0.253, 0.016)),\n 2: PrimariesChromaticityCoordinates(\n 2, ('Projector', 'D55'), 1500,\n (0.675, 0.523), (-0.466, 0.617), (0.255, 0.018)),\n 3: PrimariesChromaticityCoordinates(\n 3, ('Projector', 'D55'), 75,\n (0.664, 0.510), (-0.256, 0.729), (0.244, 0.003)),\n 4: PrimariesChromaticityCoordinates(\n 4, ('A', 'D65'), 75,\n (0.674, 0.524), (-0.172, 0.628), (0.218, -0.026)),\n 6: PrimariesChromaticityCoordinates(\n 6, ('A', 'D55'), 11100,\n (0.659, 0.506), (-0.141, 0.615), (0.249, 0.009)),\n 8: PrimariesChromaticityCoordinates(\n 8, ('A', 'D65'), 350,\n (0.659, 0.505), (-0.246, 0.672), (0.235, -0.006)),\n 9: PrimariesChromaticityCoordinates(\n 9, ('A', 'D65'), 15,\n (0.693, 0.546), (-0.446, 0.773), (0.221, -0.023)),\n 11: PrimariesChromaticityCoordinates(\n 11, ('D55', 'green'), 1560,\n (0.680, 0.529), (0.018, 0.576), (0.307, 0.080)),\n 12: PrimariesChromaticityCoordinates(\n 12, ('D55', 'green'), 75,\n (0.661, 0.505), (0.039, 0.598), (0.345, 0.127))})\n# yapf: enable\nBRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES.__doc__ = \"\"\"\n*Breneman (1987)* experiments primaries chromaticities.\n\nReferences\n----------\n:cite:`Breneman1987b`\n\nBRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES : dict\n\"\"\"\n\nBRENEMAN_EXPERIMENTS = DocstringDict({\n 1: BRENEMAN_EXPERIMENT_1_RESULTS,\n 2: BRENEMAN_EXPERIMENT_2_RESULTS,\n 3: BRENEMAN_EXPERIMENT_3_RESULTS,\n 4: BRENEMAN_EXPERIMENT_4_RESULTS,\n 5: BRENEMAN_EXPERIMENT_5_RESULTS,\n 6: BRENEMAN_EXPERIMENT_6_RESULTS,\n 7: BRENEMAN_EXPERIMENT_7_RESULTS,\n 8: BRENEMAN_EXPERIMENT_8_RESULTS,\n 9: BRENEMAN_EXPERIMENT_9_RESULTS,\n 10: BRENEMAN_EXPERIMENT_10_RESULTS,\n 11: BRENEMAN_EXPERIMENT_11_RESULTS,\n 12: BRENEMAN_EXPERIMENT_12_RESULTS\n})\nBRENEMAN_EXPERIMENTS.__doc__ = \"\"\"\n*Breneman (1987)* experiments.\n\nReferences\n----------\n:cite:`Breneman1987b`\n\nBRENEMAN_EXPERIMENTS : dict\n\"\"\"\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nITU-R BT.1886\n=============\n\nDefines *Recommendation ITU-R BT.1886* electro-optical transfer function\n(EOTF / EOCF) and its inverse:\n\n- :func:`colour.models.eotf_inverse_BT1886`\n- :func:`colour.models.eotf_BT1886`\n\nSee Also\n--------\n`RGB Colourspaces Jupyter Notebook\n<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\\\nblob/master/notebooks/models/rgb.ipynb>`_\n\nReferences\n----------\n- :cite:`InternationalTelecommunicationUnion2011h` : International\n Telecommunication Union. (2011). Recommendation ITU-R BT.1886 - Reference\n electro-optical transfer function for flat panel displays used in HDTV\n studio production BT Series Broadcasting service. Retrieved from\n https://www.itu.int/dms_pubrec/itu-r/rec/bt/\\\nR-REC-BT.1886-0-201103-I!!PDF-E.pdf\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\n\nfrom colour.utilities import from_range_1, to_domain_1\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['eotf_inverse_BT1886', 'eotf_BT1886']\n\n\ndef eotf_inverse_BT1886(L, L_B=0, L_W=1):\n \"\"\"\n Defines *Recommendation ITU-R BT.1886* inverse electro-optical transfer\n function (EOTF / EOCF).\n\n Parameters\n ----------\n L : numeric or array_like\n Screen luminance in :math:`cd/m^2`.\n L_B : numeric, optional\n Screen luminance for black.\n L_W : numeric, optional\n Screen luminance for white.\n\n Returns\n -------\n numeric or ndarray\n Input video signal level (normalised, black at :math:`V = 0`, to white\n at :math:`V = 1`.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``L`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``V`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`InternationalTelecommunicationUnion2011h`\n\n Examples\n --------\n >>> eotf_inverse_BT1886(0.11699185725296059) # doctest: +ELLIPSIS\n 0.4090077...\n \"\"\"\n\n L = to_domain_1(L)\n\n gamma = 2.40\n gamma_d = 1 / gamma\n\n n = L_W ** gamma_d - L_B ** gamma_d\n a = n ** gamma\n b = L_B ** gamma_d / n\n\n V = (L / a) ** gamma_d - b\n\n return from_range_1(V)\n\n\ndef eotf_BT1886(V, L_B=0, L_W=1):\n \"\"\"\n Defines *Recommendation ITU-R BT.1886* electro-optical transfer function\n (EOTF / EOCF).\n\n Parameters\n ----------\n V : numeric or array_like\n Input video signal level (normalised, black at :math:`V = 0`, to white\n at :math:`V = 1`. For content mastered per\n *Recommendation ITU-R BT.709*, 10-bit digital code values :math:`D` map\n into values of :math:`V` per the following equation:\n :math:`V = (D-64)/876`\n L_B : numeric, optional\n Screen luminance for black.\n L_W : numeric, optional\n Screen luminance for white.\n\n Returns\n -------\n numeric or ndarray\n Screen luminance in :math:`cd/m^2`.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``V`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``L`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`InternationalTelecommunicationUnion2011h`\n\n Examples\n --------\n >>> eotf_BT1886(0.409007728864150) # doctest: +ELLIPSIS\n 0.1169918...\n \"\"\"\n\n V = to_domain_1(V)\n\n gamma = 2.40\n gamma_d = 1 / gamma\n\n n = L_W ** gamma_d - L_B ** gamma_d\n a = n ** gamma\n b = L_B ** gamma_d / n\n L = a * np.maximum(V + b, 0) ** gamma\n\n return from_range_1(L)\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.volume.mesh` module.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport unittest\nfrom itertools import permutations\n\nfrom colour.volume import is_within_mesh_volume\nfrom colour.utilities import ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['TestIsWithinMeshVolume']\n\n\nclass TestIsWithinMeshVolume(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.volume.mesh.is_within_mesh_volume` definition unit\n tests methods.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialises common tests attributes.\n \"\"\"\n\n self._mesh = np.array([\n [-1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [0.0, 1.0, 0.0],\n ])\n\n def test_is_within_mesh_volume(self):\n \"\"\"\n Tests :func:`colour.volume.mesh.is_within_mesh_volume` definition.\n \"\"\"\n\n self.assertTrue(\n is_within_mesh_volume(\n np.array([0.0005, 0.0031, 0.0010]), self._mesh))\n\n self.assertFalse(\n is_within_mesh_volume(\n np.array([0.3205, 0.4131, 0.5100]), self._mesh))\n\n self.assertTrue(\n is_within_mesh_volume(\n np.array([0.0025, 0.0088, 0.0340]), self._mesh))\n\n self.assertFalse(\n is_within_mesh_volume(\n np.array([0.4325, 0.3788, 0.1034]), self._mesh))\n\n def test_n_dimensional_is_within_mesh_volume(self):\n \"\"\"\n Tests :func:`colour.volume.mesh.is_within_mesh_volume` definition\n n-dimensional arrays support.\n \"\"\"\n\n a = np.array([0.0005, 0.0031, 0.0010])\n b = is_within_mesh_volume(a, self._mesh)\n\n a = np.tile(a, (6, 1))\n b = np.tile(b, 6)\n np.testing.assert_almost_equal(is_within_mesh_volume(a, self._mesh), b)\n\n a = np.reshape(a, (2, 3, 3))\n b = np.reshape(b, (2, 3))\n np.testing.assert_almost_equal(is_within_mesh_volume(a, self._mesh), b)\n\n @ignore_numpy_errors\n def test_nan_is_within_mesh_volume(self):\n \"\"\"\n Tests :func:`colour.volume.mesh.is_within_mesh_volume` definition nan\n support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n is_within_mesh_volume(case, self._mesh)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCommon Utilities\n================\n\nDefines common utilities objects that don't fall in any specific category.\n\nReferences\n----------\n- :cite:`Kienzle2011a` : Kienzle, P., Patel, N., & Krycka, J. (2011).\n refl1d.numpyerrors - Refl1D v0.6.19 documentation. Retrieved January 30,\n 2015, from http://www.reflectometry.org/danse/docs/refl1d/_modules/\\\nrefl1d/numpyerrors.html\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport inspect\nimport multiprocessing\nimport multiprocessing.pool\nimport functools\nimport numpy as np\nimport re\nimport six\nimport warnings\nfrom contextlib import contextmanager\nfrom collections import OrderedDict\nfrom copy import copy\nfrom six import integer_types, string_types\n\nfrom colour.constants import INTEGER_THRESHOLD, DEFAULT_FLOAT_DTYPE\nfrom colour.utilities import Lookup\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'handle_numpy_errors', 'ignore_numpy_errors', 'raise_numpy_errors',\n 'print_numpy_errors', 'warn_numpy_errors', 'ignore_python_warnings',\n 'batch', 'disable_multiprocessing', 'multiprocessing_pool',\n 'is_networkx_installed', 'is_openimageio_installed', 'is_pandas_installed',\n 'is_iterable', 'is_string', 'is_numeric', 'is_integer', 'is_sibling',\n 'filter_kwargs', 'filter_mapping', 'first_item', 'get_domain_range_scale',\n 'set_domain_range_scale', 'domain_range_scale', 'to_domain_1',\n 'to_domain_10', 'to_domain_100', 'to_domain_degrees', 'to_domain_int',\n 'from_range_1', 'from_range_10', 'from_range_100', 'from_range_degrees',\n 'from_range_int'\n]\n\n\ndef handle_numpy_errors(**kwargs):\n \"\"\"\n Decorator for handling *Numpy* errors.\n\n Other Parameters\n ----------------\n \\\\**kwargs : dict, optional\n Keywords arguments.\n\n Returns\n -------\n object\n\n References\n ----------\n :cite:`Kienzle2011a`\n\n Examples\n --------\n >>> import numpy\n >>> @handle_numpy_errors(all='ignore')\n ... def f():\n ... 1 / numpy.zeros(3)\n >>> f()\n \"\"\"\n\n context = np.errstate(**kwargs)\n\n def wrapper(function):\n \"\"\"\n Wrapper for given function.\n \"\"\"\n\n @functools.wraps(function)\n def wrapped(*args, **kwargs):\n \"\"\"\n Wrapped function.\n \"\"\"\n\n with context:\n return function(*args, **kwargs)\n\n return wrapped\n\n return wrapper\n\n\nignore_numpy_errors = handle_numpy_errors(all='ignore')\nraise_numpy_errors = handle_numpy_errors(all='raise')\nprint_numpy_errors = handle_numpy_errors(all='print')\nwarn_numpy_errors = handle_numpy_errors(all='warn')\n\n\ndef ignore_python_warnings(function):\n \"\"\"\n Decorator for ignoring *Python* warnings.\n\n Parameters\n ----------\n function : object\n Function to decorate.\n\n Returns\n -------\n object\n\n Examples\n --------\n >>> @ignore_python_warnings\n ... def f():\n ... warnings.warn('This is an ignored warning!')\n >>> f()\n \"\"\"\n\n @functools.wraps(function)\n def wrapped(*args, **kwargs):\n \"\"\"\n Wrapped function.\n \"\"\"\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n\n return function(*args, **kwargs)\n\n return wrapped\n\n\ndef batch(iterable, k=3):\n \"\"\"\n Returns a batch generator from given iterable.\n\n Parameters\n ----------\n iterable : iterable\n Iterable to create batches from.\n k : integer\n Batches size.\n\n Returns\n -------\n bool\n Is *string_like* variable.\n\n Examples\n --------\n >>> batch(tuple(range(10))) # doctest: +ELLIPSIS\n <generator object batch at 0x...>\n \"\"\"\n\n for i in range(0, len(iterable), k):\n yield iterable[i:i + k]\n\n\n_MULTIPROCESSING_ENABLED = True\n\"\"\"\nWhether *Colour* multiprocessing is enabled.\n\n_MULTIPROCESSING_ENABLED : bool\n\"\"\"\n\n\nclass disable_multiprocessing(object):\n \"\"\"\n A context manager and decorator temporarily disabling *Colour*\n multiprocessing.\n \"\"\"\n\n def __enter__(self):\n \"\"\"\n Called upon entering the context manager and decorator.\n \"\"\"\n\n global _MULTIPROCESSING_ENABLED\n\n _MULTIPROCESSING_ENABLED = False\n\n return self\n\n def __exit__(self, *args):\n \"\"\"\n Called upon exiting the context manager and decorator.\n \"\"\"\n\n global _MULTIPROCESSING_ENABLED\n\n _MULTIPROCESSING_ENABLED = True\n\n def __call__(self, function):\n \"\"\"\n Calls the wrapped definition.\n \"\"\"\n\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n with self:\n return function(*args, **kwargs)\n\n return wrapper\n\n\ndef _initializer(kwargs):\n \"\"\"\n Initializer for the multiprocessing pool. It is mainly use to ensure that\n processes on *Windows* correctly inherit from the current domain-range\n scale.\n\n Parameters\n ----------\n kwargs : dict\n Initialisation arguments.\n \"\"\"\n\n global _DOMAIN_RANGE_SCALE\n\n # NOTE: No coverage information is available as this code is executed in\n # sub-processes.\n _DOMAIN_RANGE_SCALE = kwargs.get('scale', 'reference') # pragma: no cover\n\n\n@contextmanager\ndef multiprocessing_pool(*args, **kwargs):\n \"\"\"\n A context manager providing a multiprocessing pool.\n\n Other Parameters\n ----------------\n \\\\*args : list, optional\n Arguments.\n \\\\**kwargs : dict, optional\n Keywords arguments.\n\n Examples\n --------\n >>> from functools import partial\n >>> def _add(a, b):\n ... return a + b\n >>> with multiprocessing_pool() as pool:\n ... pool.map(partial(_add, b=2), range(10))\n ... # doctest: +SKIP\n [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n \"\"\"\n\n class _DummyPool(object):\n \"\"\"\n A dummy multiprocessing pool that does not perform multiprocessing.\n\n Other Parameters\n ----------------\n \\\\*args : list, optional\n Arguments.\n \\\\**kwargs : dict, optional\n Keywords arguments.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n pass\n\n def map(self, func, iterable, chunksize=None):\n \"\"\"\n Applies given function to each element of given iterable.\n \"\"\"\n\n return [func(a) for a in iterable]\n\n def terminate(self):\n \"\"\"\n Terminate the process.\n \"\"\"\n\n pass\n\n kwargs['initializer'] = _initializer\n kwargs['initargs'] = ({'scale': get_domain_range_scale()}, )\n\n if _MULTIPROCESSING_ENABLED:\n pool_factory = multiprocessing.Pool\n else:\n pool_factory = _DummyPool\n\n pool = pool_factory(*args, **kwargs)\n\n try:\n yield pool\n finally:\n pool.terminate()\n\n\ndef is_networkx_installed(raise_exception=False):\n \"\"\"\n Returns if *NetworkX* is installed and available.\n\n Parameters\n ----------\n raise_exception : bool\n Raise exception if *NetworkX* is unavailable.\n\n Returns\n -------\n bool\n Is *NetworkX* installed.\n\n Raises\n ------\n ImportError\n If *NetworkX* is not installed.\n \"\"\"\n\n try: # pragma: no cover\n import networkx # noqa\n\n return True\n except ImportError as error: # pragma: no cover\n if raise_exception:\n raise ImportError(('\"NetworkX\" related API features, e.g. '\n 'the automatic colour conversion graph, '\n 'are not available: \"{0}\".').format(error))\n return False\n\n\ndef is_openimageio_installed(raise_exception=False):\n \"\"\"\n Returns if *OpenImageIO* is installed and available.\n\n Parameters\n ----------\n raise_exception : bool\n Raise exception if *OpenImageIO* is unavailable.\n\n Returns\n -------\n bool\n Is *OpenImageIO* installed.\n\n Raises\n ------\n ImportError\n If *OpenImageIO* is not installed.\n \"\"\"\n\n try: # pragma: no cover\n import OpenImageIO # noqa\n\n return True\n except ImportError as error: # pragma: no cover\n if raise_exception:\n raise ImportError(('\"OpenImageIO\" related API features '\n 'are not available: \"{0}\".').format(error))\n return False\n\n\ndef is_pandas_installed(raise_exception=False):\n \"\"\"\n Returns if *Pandas* is installed and available.\n\n Parameters\n ----------\n raise_exception : bool\n Raise exception if *Pandas* is unavailable.\n\n Returns\n -------\n bool\n Is *Pandas* installed.\n\n Raises\n ------\n ImportError\n If *Pandas* is not installed.\n \"\"\"\n\n try: # pragma: no cover\n import pandas # noqa\n\n return True\n except ImportError as error: # pragma: no cover\n if raise_exception:\n raise ImportError(('\"Pandas\" related API features '\n 'are not available: \"{0}\".').format(error))\n return False\n\n\ndef is_iterable(a):\n \"\"\"\n Returns if given :math:`a` variable is iterable.\n\n Parameters\n ----------\n a : object\n Variable to check the iterability.\n\n Returns\n -------\n bool\n :math:`a` variable iterability.\n\n Examples\n --------\n >>> is_iterable([1, 2, 3])\n True\n >>> is_iterable(1)\n False\n \"\"\"\n\n return is_string(a) or (True if getattr(a, '__iter__', False) else False)\n\n\ndef is_string(a):\n \"\"\"\n Returns if given :math:`a` variable is a *string* like variable.\n\n Parameters\n ----------\n a : object\n Data to test.\n\n Returns\n -------\n bool\n Is :math:`a` variable a *string* like variable.\n\n Examples\n --------\n >>> is_string(\"I'm a string!\")\n True\n >>> is_string([\"I'm a string!\"])\n False\n \"\"\"\n\n return True if isinstance(a, string_types) else False\n\n\ndef is_numeric(a):\n \"\"\"\n Returns if given :math:`a` variable is a number.\n\n Parameters\n ----------\n a : object\n Variable to check.\n\n Returns\n -------\n bool\n Is :math:`a` variable a number.\n\n Examples\n --------\n >>> is_numeric(1)\n True\n >>> is_numeric((1,))\n False\n \"\"\"\n\n return isinstance(\n a,\n tuple(\n list(integer_types) +\n [float, complex, np.integer, np.floating, np.complex]))\n\n\ndef is_integer(a):\n \"\"\"\n Returns if given :math:`a` variable is an integer under given threshold.\n\n Parameters\n ----------\n a : object\n Variable to check.\n\n Returns\n -------\n bool\n Is :math:`a` variable an integer.\n\n Notes\n -----\n - The determination threshold is defined by the\n :attr:`colour.algebra.common.INTEGER_THRESHOLD` attribute.\n\n Examples\n --------\n >>> is_integer(1)\n True\n >>> is_integer(1.01)\n False\n \"\"\"\n\n return abs(a - round(a)) <= INTEGER_THRESHOLD\n\n\ndef is_sibling(element, mapping):\n \"\"\"\n Returns whether given element type is present in given mapping types.\n\n Parameters\n ----------\n element : object\n Element to check if its type is present in the mapping types.\n mapping : dict\n Mapping.\n\n Returns\n -------\n bool\n Whether given element type is present in given mapping types.\n \"\"\"\n\n return isinstance(\n element, tuple(set(type(element) for element in mapping.values())))\n\n\ndef filter_kwargs(function, **kwargs):\n \"\"\"\n Filters keyword arguments incompatible with the given function signature.\n\n Parameters\n ----------\n function : callable\n Callable to filter the incompatible keyword arguments.\n\n Other Parameters\n ----------------\n \\\\**kwargs : dict, optional\n Keywords arguments.\n\n Returns\n -------\n dict\n Filtered keyword arguments.\n\n Warnings\n --------\n Python 2.7 does not support inspecting the signature of *partial*\n functions, this could cause unexpected behaviour when using this\n definition.\n\n Examples\n --------\n >>> def fn_a(a):\n ... return a\n >>> def fn_b(a, b=0):\n ... return a, b\n >>> def fn_c(a, b=0, c=0):\n ... return a, b, c\n >>> fn_a(1, **filter_kwargs(fn_a, b=2, c=3))\n 1\n >>> fn_b(1, **filter_kwargs(fn_b, b=2, c=3))\n (1, 2)\n >>> fn_c(1, **filter_kwargs(fn_c, b=2, c=3))\n (1, 2, 3)\n \"\"\"\n\n kwargs = copy(kwargs)\n\n # TODO: Remove when dropping Python 2.7.\n if six.PY2: # pragma: no cover\n try:\n args, _varargs, _keywords, _defaults = inspect.getargspec(function)\n except (TypeError, ValueError):\n return {}\n else: # pragma: no cover\n try:\n args = list(inspect.signature(function).parameters.keys())\n except ValueError:\n return {}\n\n args = set(kwargs.keys()) - set(args)\n for key in args:\n kwargs.pop(key)\n\n return kwargs\n\n\ndef filter_mapping(mapping, filterers, anchors=True, flags=re.IGNORECASE):\n \"\"\"\n Filters given mapping with given filterers.\n\n Parameters\n ----------\n mapping : dict_like\n Mapping to filter.\n filterers : unicode or object or array_like\n Filterer pattern for given mapping elements or a list of filterers.\n anchors : bool, optional\n Whether to use Regex line anchors, i.e. *^* and *$* are added,\n surrounding the filterer pattern.\n flags : int, optional\n Regex flags.\n\n Returns\n -------\n OrderedDict\n Filtered mapping elements.\n\n Notes\n -----\n - To honour the filterers ordering, the return value is an\n :class:`OrderedDict` class instance.\n\n Examples\n --------\n >>> class Element(object):\n ... pass\n >>> mapping = {\n ... 'Element A': Element(),\n ... 'Element B': Element(),\n ... 'Element C': Element(),\n ... 'Not Element C': Element(),\n ... }\n >>> # Doctests skip for Python 2.x compatibility.\n >>> filter_mapping(mapping, '\\\\w+\\\\s+A') # doctest: +SKIP\n {u'Element A': <colour.utilities.common.Element object at 0x...>}\n >>> # Doctests skip for Python 2.x compatibility.\n >>> sorted(filter_mapping(mapping, 'Element.*')) # doctest: +SKIP\n [u'Element A', u'Element B', u'Element C']\n \"\"\"\n\n def filter_mapping_with_filter(mapping, filterer, anchors, flags):\n \"\"\"\n Filters given mapping with given filterer.\n\n Parameters\n ----------\n mapping : dict_like\n Mapping to filter.\n filterer : unicode or object\n Filterer pattern for given mapping elements.\n anchors : bool, optional\n Whether to use Regex line anchors, i.e. *^* and *$* are added,\n surrounding the filterer pattern.\n flags : int, optional\n Regex flags.\n\n Returns\n -------\n OrderedDict\n Filtered mapping elements.\n \"\"\"\n\n if anchors:\n filterer = '^{0}$'.format(filterer)\n filterer = filterer.replace('^^', '^').replace('$$', '$')\n\n elements = [\n mapping[element] for element in mapping\n if re.match(filterer, element, flags)\n ]\n\n lookup = Lookup(mapping)\n\n return OrderedDict((lookup.first_key_from_value(element), element)\n for element in elements)\n\n if is_string(filterers):\n filterers = [filterers]\n\n filtered_mapping = OrderedDict()\n\n for filterer in filterers:\n filtered_mapping.update(\n filter_mapping_with_filter(mapping, filterer, anchors, flags))\n\n return filtered_mapping\n\n\ndef first_item(a):\n \"\"\"\n Return the first item of an iterable.\n\n Parameters\n ----------\n a : object\n Iterable to get the first item from.\n\n Returns\n -------\n object\n\n Raises\n ------\n StopIteration\n If the iterable is empty.\n\n Examples\n --------\n >>> a = range(10)\n >>> first_item(a)\n 0\n \"\"\"\n\n return next(iter(a))\n\n\n_DOMAIN_RANGE_SCALE = 'reference'\n\"\"\"\nGlobal variable storing the current *Colour* domain-range scale.\n\n_DOMAIN_RANGE_SCALE : unicode\n\"\"\"\n\n\ndef get_domain_range_scale():\n \"\"\"\n Returns the current *Colour* domain-range scale. The following scales are\n available:\n\n - **'Reference'**, the default *Colour* domain-range scale which varies\n depending on the referenced algorithm, e.g. [0, 1], [0, 10], [0, 100],\n [0, 255], etc...\n - **'1'**, a domain-range scale normalised to [0, 1], it is important to\n acknowledge that this is a soft normalisation and it is possible to\n use negative out of gamut values or high dynamic range data exceeding\n 1.\n\n Returns\n -------\n unicode\n *Colour* domain-range scale.\n \"\"\"\n\n return _DOMAIN_RANGE_SCALE\n\n\ndef set_domain_range_scale(scale='Reference'):\n \"\"\"\n Sets the current *Colour* domain-range scale. The following scales are\n available:\n\n - **'Reference'**, the default *Colour* domain-range scale which varies\n depending on the referenced algorithm, e.g. [0, 1], [0, 10], [0, 100],\n [0, 255], etc...\n - **'1'**, a domain-range scale normalised to [0, 1], it is important to\n acknowledge that this is a soft normalisation and it is possible to\n use negative out of gamut values or high dynamic range data exceeding\n 1.\n\n Parameters\n ----------\n scale : unicode or int\n **{'Reference', '1'}**,\n *Colour* domain-range scale to set.\n \"\"\"\n\n global _DOMAIN_RANGE_SCALE\n\n scale = str(scale).lower()\n valid = ('1', '100', 'reference', 'ignore')\n assert scale in valid, 'Scale must be one of \"{0}\".'.format(valid)\n\n _DOMAIN_RANGE_SCALE = scale\n\n\nclass domain_range_scale(object):\n \"\"\"\n A context manager and decorator temporarily setting *Colour* domain-range\n scale. The following scales are available:\n\n - **'Reference'**, the default *Colour* domain-range scale which varies\n depending on the referenced algorithm, e.g. [0, 1], [0, 10], [0, 100],\n [0, 255], etc...\n - **'1'**, a domain-range scale normalised to [0, 1], it is important to\n acknowledge that this is a soft normalisation and it is possible to\n use negative out of gamut values or high dynamic range data exceeding\n 1.\n\n Parameters\n ----------\n scale : unicode\n **{'Reference', '1'}**,\n *Colour* domain-range scale to set.\n \"\"\"\n\n def __init__(self, scale):\n self._scale = scale\n self._previous_scale = get_domain_range_scale()\n\n def __enter__(self):\n \"\"\"\n Called upon entering the context manager and decorator.\n \"\"\"\n\n set_domain_range_scale(self._scale)\n\n return self\n\n def __exit__(self, *args):\n \"\"\"\n Called upon exiting the context manager and decorator.\n \"\"\"\n\n set_domain_range_scale(self._previous_scale)\n\n def __call__(self, function):\n \"\"\"\n Calls the wrapped definition.\n \"\"\"\n\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n with self:\n return function(*args, **kwargs)\n\n return wrapper\n\n\ndef to_domain_1(a, scale_factor=100, dtype=DEFAULT_FLOAT_DTYPE):\n \"\"\"\n Scales given array :math:`a` to domain **'1'**. The behaviour is as\n follows:\n\n - If *Colour* domain-range scale is **'Reference'** or **'1'**, the\n definition is almost entirely by-passed and will just conveniently\n convert array :math:`a` to :class:`np.ndarray`.\n - If *Colour* domain-range scale is **'100'** (currently unsupported\n private value only used for unit tests), array :math:`a` is divided by\n ``scale_factor``, typically 100.\n\n Parameters\n ----------\n a : array_like\n :math:`a` to scale to domain **'1'**.\n scale_factor : numeric or array_like, optional\n Scale factor, usually *numeric* but can be an *array_like* if some\n axis need different scaling to be brought to domain **'1'**.\n dtype : object, optional\n Data type used for the conversion to :class:`np.ndarray`.\n\n Returns\n -------\n ndarray\n :math:`a` scaled to domain **'1'**.\n\n Examples\n --------\n With *Colour* domain-range scale set to **'Reference'**:\n\n >>> with domain_range_scale('Reference'):\n ... to_domain_1(1)\n array(1.0)\n\n With *Colour* domain-range scale set to **'1'**:\n\n >>> with domain_range_scale('1'):\n ... to_domain_1(1)\n array(1.0)\n\n With *Colour* domain-range scale set to **'100'** (unsupported):\n\n >>> with domain_range_scale('100'):\n ... to_domain_1(1)\n array(0.01)\n \"\"\"\n\n a = np.asarray(a, dtype).copy()\n\n if _DOMAIN_RANGE_SCALE == '100':\n a /= scale_factor\n\n return a\n\n\ndef to_domain_10(a, scale_factor=10, dtype=DEFAULT_FLOAT_DTYPE):\n \"\"\"\n Scales given array :math:`a` to domain **'10'**, used by\n *Munsell Renotation System*. The behaviour is as follows:\n\n - If *Colour* domain-range scale is **'Reference'**, the\n definition is almost entirely by-passed and will just conveniently\n convert array :math:`a` to :class:`np.ndarray`.\n - If *Colour* domain-range scale is **'1'**, array :math:`a` is\n multiplied by ``scale_factor``, typically 10.\n - If *Colour* domain-range scale is **'100'** (currently unsupported\n private value only used for unit tests), array :math:`a` is\n divided by ``scale_factor``, typically 10.\n\n Parameters\n ----------\n a : array_like\n :math:`a` to scale to domain **'10'**.\n scale_factor : numeric or array_like, optional\n Scale factor, usually *numeric* but can be an *array_like* if some\n axis need different scaling to be brought to domain **'10'**.\n dtype : object, optional\n Data type used for the conversion to :class:`np.ndarray`.\n\n Returns\n -------\n ndarray\n :math:`a` scaled to domain **'10'**.\n\n Examples\n --------\n With *Colour* domain-range scale set to **'Reference'**:\n\n >>> with domain_range_scale('Reference'):\n ... to_domain_10(1)\n array(1.0)\n\n With *Colour* domain-range scale set to **'1'**:\n\n >>> with domain_range_scale('1'):\n ... to_domain_10(1)\n array(10.0)\n\n With *Colour* domain-range scale set to **'100'** (unsupported):\n\n >>> with domain_range_scale('100'):\n ... to_domain_10(1)\n array(0.1)\n \"\"\"\n\n a = np.asarray(a, dtype).copy()\n\n if _DOMAIN_RANGE_SCALE == '1':\n a *= scale_factor\n\n if _DOMAIN_RANGE_SCALE == '100':\n a /= scale_factor\n\n return a\n\n\ndef to_domain_100(a, scale_factor=100, dtype=DEFAULT_FLOAT_DTYPE):\n \"\"\"\n Scales given array :math:`a` to domain **'100'**. The behaviour is as\n follows:\n\n - If *Colour* domain-range scale is **'Reference'** or **'100'**\n (currently unsupported private value only used for unit tests), the\n definition is almost entirely by-passed and will just conveniently\n convert array :math:`a` to :class:`np.ndarray`.\n - If *Colour* domain-range scale is **'1'**, array :math:`a` is\n multiplied by ``scale_factor``, typically 100.\n\n Parameters\n ----------\n a : array_like\n :math:`a` to scale to domain **'100'**.\n scale_factor : numeric or array_like, optional\n Scale factor, usually *numeric* but can be an *array_like* if some\n axis need different scaling to be brought to domain **'100'**.\n dtype : object, optional\n Data type used for the conversion to :class:`np.ndarray`.\n\n Returns\n -------\n ndarray\n :math:`a` scaled to domain **'100'**.\n\n Examples\n --------\n With *Colour* domain-range scale set to **'Reference'**:\n\n >>> with domain_range_scale('Reference'):\n ... to_domain_100(1)\n array(1.0)\n\n With *Colour* domain-range scale set to **'1'**:\n\n >>> with domain_range_scale('1'):\n ... to_domain_100(1)\n array(100.0)\n\n With *Colour* domain-range scale set to **'100'** (unsupported):\n\n >>> with domain_range_scale('100'):\n ... to_domain_100(1)\n array(1.0)\n \"\"\"\n\n a = np.asarray(a, dtype).copy()\n\n if _DOMAIN_RANGE_SCALE == '1':\n a *= scale_factor\n\n return a\n\n\ndef to_domain_degrees(a, scale_factor=360, dtype=DEFAULT_FLOAT_DTYPE):\n \"\"\"\n Scales given array :math:`a` to degrees domain. The behaviour is as\n follows:\n\n - If *Colour* domain-range scale is **'Reference'**, the\n definition is almost entirely by-passed and will just conveniently\n convert array :math:`a` to :class:`np.ndarray`.\n - If *Colour* domain-range scale is **'1'**, array :math:`a` is\n multiplied by ``scale_factor``, typically 360.\n - If *Colour* domain-range scale is **'100'** (currently unsupported\n private value only used for unit tests), array :math:`a` is\n multiplied by ``scale_factor`` / 100, typically 360 / 100.\n\n Parameters\n ----------\n a : array_like\n :math:`a` to scale to degrees domain.\n scale_factor : numeric or array_like, optional\n Scale factor, usually *numeric* but can be an *array_like* if some\n axis need different scaling to be brought to degrees domain.\n dtype : object, optional\n Data type used for the conversion to :class:`np.ndarray`.\n\n Returns\n -------\n ndarray\n :math:`a` scaled to degrees domain.\n\n Examples\n --------\n With *Colour* domain-range scale set to **'Reference'**:\n\n >>> with domain_range_scale('Reference'):\n ... to_domain_degrees(1)\n array(1.0)\n\n With *Colour* domain-range scale set to **'1'**:\n\n >>> with domain_range_scale('1'):\n ... to_domain_degrees(1)\n array(360.0)\n\n With *Colour* domain-range scale set to **'100'** (unsupported):\n\n >>> with domain_range_scale('100'):\n ... to_domain_degrees(1)\n array(3.6)\n \"\"\"\n\n a = np.asarray(a, dtype).copy()\n\n if _DOMAIN_RANGE_SCALE == '1':\n a *= scale_factor\n\n if _DOMAIN_RANGE_SCALE == '100':\n a *= scale_factor / 100\n\n return a\n\n\ndef to_domain_int(a, bit_depth=8, dtype=DEFAULT_FLOAT_DTYPE):\n \"\"\"\n Scales given array :math:`a` to int domain. The behaviour is as follows:\n\n - If *Colour* domain-range scale is **'Reference'**, the\n definition is almost entirely by-passed and will just conveniently\n convert array :math:`a` to :class:`np.ndarray`.\n - If *Colour* domain-range scale is **'1'**, array :math:`a` is\n multiplied by :math:`2^{bit\\\\_depth} - 1`.\n - If *Colour* domain-range scale is **'100'** (currently unsupported\n private value only used for unit tests), array :math:`a` is\n multiplied by :math:`2^{bit\\\\_depth} - 1`.\n\n Parameters\n ----------\n a : array_like\n :math:`a` to scale to int domain.\n bit_depth : numeric or array_like, optional\n Bit depth, usually *int* but can be an *array_like* if some axis need\n different scaling to be brought to int domain.\n dtype : object, optional\n Data type used for the conversion to :class:`np.ndarray`.\n\n Returns\n -------\n ndarray\n :math:`a` scaled to int domain.\n\n Notes\n -----\n - To avoid precision issues and rounding, the scaling is performed on\n floating-point numbers.\n\n Examples\n --------\n With *Colour* domain-range scale set to **'Reference'**:\n\n >>> with domain_range_scale('Reference'):\n ... to_domain_int(1)\n array(1.0)\n\n With *Colour* domain-range scale set to **'1'**:\n\n >>> with domain_range_scale('1'):\n ... to_domain_int(1)\n array(255.0)\n\n With *Colour* domain-range scale set to **'100'** (unsupported):\n\n >>> with domain_range_scale('100'):\n ... to_domain_int(1)\n array(2.55)\n \"\"\"\n\n a = np.asarray(a, dtype).copy()\n\n maximum_code_value = 2 ** bit_depth - 1\n if _DOMAIN_RANGE_SCALE == '1':\n a *= maximum_code_value\n\n if _DOMAIN_RANGE_SCALE == '100':\n a *= maximum_code_value / 100\n\n return a\n\n\ndef from_range_1(a, scale_factor=100):\n \"\"\"\n Scales given array :math:`a` from range **'1'**. The behaviour is as\n follows:\n\n - If *Colour* domain-range scale is **'Reference'** or **'1'**, the\n definition is entirely by-passed.\n - If *Colour* domain-range scale is **'100'** (currently unsupported\n private value only used for unit tests), array :math:`a` is multiplied\n by ``scale_factor``, typically 100.\n\n Parameters\n ----------\n a : array_like\n :math:`a` to scale from range **'1'**.\n scale_factor : numeric or array_like, optional\n Scale factor, usually *numeric* but can be an *array_like* if some\n axis need different scaling to be brought from range **'1'**.\n\n Returns\n -------\n ndarray\n :math:`a` scaled from range **'1'**.\n\n Examples\n --------\n With *Colour* domain-range scale set to **'Reference'**:\n\n >>> with domain_range_scale('Reference'):\n ... from_range_1(1)\n 1\n\n With *Colour* domain-range scale set to **'1'**:\n\n >>> with domain_range_scale('1'):\n ... from_range_1(1)\n 1\n\n With *Colour* domain-range scale set to **'100'** (unsupported):\n\n >>> with domain_range_scale('100'):\n ... from_range_1(1)\n 100\n \"\"\"\n\n if _DOMAIN_RANGE_SCALE == '100':\n a *= scale_factor\n\n return a\n\n\ndef from_range_10(a, scale_factor=10):\n \"\"\"\n Scales given array :math:`a` from range **'10'**, used by\n *Munsell Renotation System*. The behaviour is as follows:\n\n - If *Colour* domain-range scale is **'Reference'**, the\n definition is entirely by-passed.\n - If *Colour* domain-range scale is **'1'**, array :math:`a` is\n divided by ``scale_factor``, typically 10.\n - If *Colour* domain-range scale is **'100'** (currently unsupported\n private value only used for unit tests), array :math:`a` is\n multiplied by ``scale_factor``, typically 10.\n\n Parameters\n ----------\n a : array_like\n :math:`a` to scale from range **'10'**.\n scale_factor : numeric or array_like, optional\n Scale factor, usually *numeric* but can be an *array_like* if some\n axis need different scaling to be brought from range **'10'**.\n\n Returns\n -------\n ndarray\n :math:`a` scaled from range **'10'**.\n\n Examples\n --------\n With *Colour* domain-range scale set to **'Reference'**:\n\n >>> with domain_range_scale('Reference'):\n ... from_range_10(1)\n 1\n\n With *Colour* domain-range scale set to **'1'**:\n\n >>> with domain_range_scale('1'):\n ... from_range_10(1)\n 0.1\n\n With *Colour* domain-range scale set to **'100'** (unsupported):\n\n >>> with domain_range_scale('100'):\n ... from_range_10(1)\n 10\n \"\"\"\n\n if _DOMAIN_RANGE_SCALE == '1':\n a /= scale_factor\n\n if _DOMAIN_RANGE_SCALE == '100':\n a *= scale_factor\n\n return a\n\n\ndef from_range_100(a, scale_factor=100):\n \"\"\"\n Scales given array :math:`a` from range **'100'**. The behaviour is as\n follows:\n\n - If *Colour* domain-range scale is **'Reference'** or **'100'**\n (currently unsupported private value only used for unit tests), the\n definition is entirely by-passed.\n - If *Colour* domain-range scale is **'1'**, array :math:`a` is\n divided by ``scale_factor``, typically 100.\n\n Parameters\n ----------\n a : array_like\n :math:`a` to scale from range **'100'**.\n scale_factor : numeric or array_like, optional\n Scale factor, usually *numeric* but can be an *array_like* if some\n axis need different scaling to be brought from range **'100'**.\n\n Returns\n -------\n ndarray\n :math:`a` scaled from range **'100'**.\n\n Examples\n --------\n With *Colour* domain-range scale set to **'Reference'**:\n\n >>> with domain_range_scale('Reference'):\n ... from_range_100(1)\n 1\n\n With *Colour* domain-range scale set to **'1'**:\n\n >>> with domain_range_scale('1'):\n ... from_range_100(1)\n 0.01\n\n With *Colour* domain-range scale set to **'100'** (unsupported):\n\n >>> with domain_range_scale('100'):\n ... from_range_100(1)\n 1\n \"\"\"\n\n if _DOMAIN_RANGE_SCALE == '1':\n a /= scale_factor\n\n return a\n\n\ndef from_range_degrees(a, scale_factor=360):\n \"\"\"\n Scales given array :math:`a` from degrees range. The behaviour is as\n follows:\n\n - If *Colour* domain-range scale is **'Reference'**, the\n definition is entirely by-passed.\n - If *Colour* domain-range scale is **'1'**, array :math:`a` is\n divided by ``scale_factor``, typically 360.\n - If *Colour* domain-range scale is **'100'** (currently unsupported\n private value only used for unit tests), array :math:`a` is\n divided by ``scale_factor`` / 100, typically 360 / 100.\n\n Parameters\n ----------\n a : array_like\n :math:`a` to scale from degrees range.\n scale_factor : numeric or array_like, optional\n Scale factor, usually *numeric* but can be an *array_like* if some\n axis need different scaling to be brought from degrees range.\n\n Returns\n -------\n ndarray\n :math:`a` scaled from degrees range.\n\n Examples\n --------\n With *Colour* domain-range scale set to **'Reference'**:\n\n >>> with domain_range_scale('Reference'):\n ... from_range_degrees(1)\n 1\n\n With *Colour* domain-range scale set to **'1'**:\n\n >>> with domain_range_scale('1'):\n ... from_range_degrees(1) # doctest: +ELLIPSIS\n 0.0027777...\n\n With *Colour* domain-range scale set to **'100'** (unsupported):\n\n >>> with domain_range_scale('100'):\n ... from_range_degrees(1) # doctest: +ELLIPSIS\n 0.2777777...\n \"\"\"\n\n if _DOMAIN_RANGE_SCALE == '1':\n a /= scale_factor\n\n if _DOMAIN_RANGE_SCALE == '100':\n a /= scale_factor / 100\n\n return a\n\n\ndef from_range_int(a, bit_depth=8, dtype=DEFAULT_FLOAT_DTYPE):\n \"\"\"\n Scales given array :math:`a` from int range. The behaviour is as follows:\n\n - If *Colour* domain-range scale is **'Reference'**, the\n definition is entirely by-passed.\n - If *Colour* domain-range scale is **'1'**, array :math:`a` is converted\n to :class:`np.ndarray` and divided by :math:`2^{bit\\\\_depth} - 1`.\n - If *Colour* domain-range scale is **'100'** (currently unsupported\n private value only used for unit tests), array :math:`a` is converted\n to :class:`np.ndarray` and divided by :math:`2^{bit\\\\_depth} - 1`.\n\n Parameters\n ----------\n a : array_like\n :math:`a` to scale from int range.\n bit_depth : numeric or array_like, optional\n Bit depth, usually *int* but can be an *array_like* if some axis need\n different scaling to be brought from int range.\n dtype : object, optional\n Data type used for the conversion to :class:`np.ndarray`.\n\n Returns\n -------\n ndarray\n :math:`a` scaled from int range.\n\n Notes\n -----\n - To avoid precision issues and rounding, the scaling is performed on\n floating-point numbers.\n\n Examples\n --------\n With *Colour* domain-range scale set to **'Reference'**:\n\n >>> with domain_range_scale('Reference'):\n ... from_range_int(1)\n 1\n\n With *Colour* domain-range scale set to **'1'**:\n\n >>> with domain_range_scale('1'):\n ... from_range_int(1) # doctest: +ELLIPSIS\n array(0.0039215...)\n\n With *Colour* domain-range scale set to **'100'** (unsupported):\n\n >>> with domain_range_scale('100'):\n ... from_range_int(1) # doctest: +ELLIPSIS\n array(0.3921568...)\n \"\"\"\n\n maximum_code_value = 2 ** bit_depth - 1\n if _DOMAIN_RANGE_SCALE == '1':\n a = np.asarray(a, dtype)\n a /= maximum_code_value\n\n if _DOMAIN_RANGE_SCALE == '100':\n a = np.asarray(a, dtype)\n a /= maximum_code_value / 100\n\n return a\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n:math:`LLAB(l:c)` Colour Appearance Model\n=========================================\n\nDefines *:math:`LLAB(l:c)`* colour appearance model objects:\n\n- :class:`colour.appearance.LLAB_InductionFactors`\n- :attr:`colour.LLAB_VIEWING_CONDITIONS`\n- :class:`colour.LLAB_Specification`\n- :func:`colour.XYZ_to_LLAB`\n\nSee Also\n--------\n`LLAB(l:c) Colour Appearance Model Jupyter Notebook\n<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\\\nblob/master/notebooks/appearance/llab.ipynb>`_\n\nReferences\n----------\n- :cite:`Fairchild2013x` : Fairchild, M. D. (2013). LLAB Model. In Color\n Appearance Models (3rd ed., pp. 6025-6178). Wiley. ISBN:B00DAYO8E2\n- :cite:`Luo1996b` : Luo, M. R., Lo, M.-C., & Kuo, W.-G. (1996). The LLAB\n (l:c) colour model. Color Research & Application, 21(6), 412-429.\n doi:10.1002/(SICI)1520-6378(199612)21:6<412::AID-COL4>3.0.CO;2-Z\n- :cite:`Luo1996c` : Luo, M. R., & Morovic, J. (1996). Two Unsolved Issues in\n Colour Management - Colour Appearance and Gamut Mapping. In Conference:\n 5th International Conference on High Technology: Imaging Science and\n Technology - Evolution & Promise (pp. 136-147). Retrieved from\n http://www.researchgate.net/publication/\\236348295_Two_Unsolved_Issues_in\\\n_Colour_Management__Colour_Appearance_and_Gamut_Mapping\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nfrom collections import namedtuple\n\nfrom colour.algebra import polar_to_cartesian, spow\nfrom colour.utilities import (CaseInsensitiveMapping, as_float_array,\n dot_vector, from_range_degrees, to_domain_100,\n tsplit, tstack)\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'LLAB_InductionFactors', 'LLAB_VIEWING_CONDITIONS',\n 'LLAB_XYZ_TO_RGB_MATRIX', 'LLAB_RGB_TO_XYZ_MATRIX',\n 'LLAB_ReferenceSpecification', 'LLAB_Specification', 'XYZ_to_LLAB',\n 'XYZ_to_RGB_LLAB', 'chromatic_adaptation', 'f',\n 'opponent_colour_dimensions', 'hue_angle', 'chroma_correlate',\n 'colourfulness_correlate', 'saturation_correlate', 'final_opponent_signals'\n]\n\n\nclass LLAB_InductionFactors(\n namedtuple('LLAB_InductionFactors', ('D', 'F_S', 'F_L', 'F_C'))):\n \"\"\"\n *:math:`LLAB(l:c)`* colour appearance model induction factors.\n\n Parameters\n ----------\n D : numeric or array_like\n *Discounting-the-Illuminant* factor :math:`D`.\n F_S : numeric or array_like\n Surround induction factor :math:`F_S`.\n F_L : numeric or array_like\n *Lightness* induction factor :math:`F_L`.\n F_C : numeric or array_like\n *Chroma* induction factor :math:`F_C`.\n\n References\n ----------\n :cite:`Fairchild2013x`, :cite:`Luo1996b`, :cite:`Luo1996c`\n \"\"\"\n\n\nLLAB_VIEWING_CONDITIONS = CaseInsensitiveMapping({\n 'Reference Samples & Images, Average Surround, Subtending > 4': (\n LLAB_InductionFactors(1, 3, 0, 1)),\n 'Reference Samples & Images, Average Surround, Subtending < 4': (\n LLAB_InductionFactors(1, 3, 1, 1)),\n 'Television & VDU Displays, Dim Surround': (LLAB_InductionFactors(\n 0.7, 3.5, 1, 1)),\n 'Cut Sheet Transparency, Dim Surround': (LLAB_InductionFactors(\n 1, 5, 1, 1.1)),\n '35mm Projection Transparency, Dark Surround': (LLAB_InductionFactors(\n 0.7, 4, 1, 1))\n})\nLLAB_VIEWING_CONDITIONS.__doc__ = \"\"\"\nReference :math:`LLAB(l:c)` colour appearance model viewing conditions.\n\nReferences\n----------\n:cite:`Fairchild2013x`, :cite:`Luo1996b`, :cite:`Luo1996c`\n\nLLAB_VIEWING_CONDITIONS : CaseInsensitiveMapping\n **{'Reference Samples & Images, Average Surround, Subtending > 4',\n 'Reference Samples & Images, Average Surround, Subtending < 4',\n 'Television & VDU Displays, Dim Surround',\n 'Cut Sheet Transparency, Dim Surround':,\n '35mm Projection Transparency, Dark Surround'}**\n\nAliases:\n\n- 'ref_average_4_plus':\n 'Reference Samples & Images, Average Surround, Subtending > 4'\n- 'ref_average_4_minus':\n 'Reference Samples & Images, Average Surround, Subtending < 4'\n- 'tv_dim': 'Television & VDU Displays, Dim Surround'\n- 'sheet_dim': 'Cut Sheet Transparency, Dim Surround'\n- 'projected_dark': '35mm Projection Transparency, Dark Surround'\n\"\"\"\nLLAB_VIEWING_CONDITIONS['ref_average_4_plus'] = ( # yapf: disable\n LLAB_VIEWING_CONDITIONS['Reference Samples & Images, '\n 'Average Surround, Subtending > 4'])\nLLAB_VIEWING_CONDITIONS['ref_average_4_minus'] = ( # yapf: disable\n LLAB_VIEWING_CONDITIONS['Reference Samples & Images, '\n 'Average Surround, Subtending < 4'])\nLLAB_VIEWING_CONDITIONS['tv_dim'] = (\n LLAB_VIEWING_CONDITIONS['Television & VDU Displays, Dim Surround'])\nLLAB_VIEWING_CONDITIONS['sheet_dim'] = (\n LLAB_VIEWING_CONDITIONS['Cut Sheet Transparency, Dim Surround'])\nLLAB_VIEWING_CONDITIONS['projected_dark'] = (\n LLAB_VIEWING_CONDITIONS['35mm Projection Transparency, Dark Surround'])\n\nLLAB_XYZ_TO_RGB_MATRIX = np.array([\n [0.8951, 0.2664, -0.1614],\n [-0.7502, 1.7135, 0.0367],\n [0.0389, -0.0685, 1.0296],\n])\n\"\"\"\nLLAB(l:c) colour appearance model *CIE XYZ* tristimulus values to normalised\ncone responses matrix.\n\nLLAB_XYZ_TO_RGB_MATRIX : array_like, (3, 3)\n\"\"\"\n\nLLAB_RGB_TO_XYZ_MATRIX = np.linalg.inv(LLAB_XYZ_TO_RGB_MATRIX)\n\"\"\"\nLLAB(l:c) colour appearance model normalised cone responses to *CIE XYZ*\ntristimulus values matrix.\n\nLLAB_RGB_TO_XYZ_MATRIX : array_like, (3, 3)\n\"\"\"\n\n\nclass LLAB_ReferenceSpecification(\n namedtuple('LLAB_ReferenceSpecification',\n ('L_L', 'Ch_L', 'h_L', 's_L', 'C_L', 'HC', 'A_L', 'B_L'))):\n \"\"\"\n Defines the *:math:`LLAB(l:c)`* colour appearance model reference\n specification.\n\n This specification has field names consistent with *Fairchild (2013)*\n reference.\n\n Parameters\n ----------\n L_L : numeric or array_like\n Correlate of *Lightness* :math:`L_L`.\n Ch_L : numeric or array_like\n Correlate of *chroma* :math:`Ch_L`.\n h_L : numeric or array_like\n *Hue* angle :math:`h_L` in degrees.\n s_L : numeric or array_like\n Correlate of *saturation* :math:`s_L`.\n C_L : numeric or array_like\n Correlate of *colourfulness* :math:`C_L`.\n HC : numeric or array_like\n *Hue* :math:`h` composition :math:`H^C`.\n A_L : numeric or array_like\n Opponent signal :math:`A_L`.\n B_L : numeric or array_like\n Opponent signal :math:`B_L`.\n\n References\n ----------\n :cite:`Fairchild2013x`, :cite:`Luo1996b`, :cite:`Luo1996c`\n \"\"\"\n\n\nclass LLAB_Specification(\n namedtuple('LLAB_Specification',\n ('J', 'C', 'h', 's', 'M', 'HC', 'a', 'b'))):\n \"\"\"\n Defines the *:math:`LLAB(l:c)`* colour appearance model specification.\n\n This specification has field names consistent with the remaining colour\n appearance models in :mod:`colour.appearance` but diverge from\n *Fairchild (2013)* reference.\n\n Parameters\n ----------\n J : numeric or array_like\n Correlate of *Lightness* :math:`L_L`.\n C : numeric or array_like\n Correlate of *chroma* :math:`Ch_L`.\n h : numeric or array_like\n *Hue* angle :math:`h_L` in degrees.\n s : numeric or array_like\n Correlate of *saturation* :math:`s_L`.\n M : numeric or array_like\n Correlate of *colourfulness* :math:`C_L`.\n HC : numeric or array_like\n *Hue* :math:`h` composition :math:`H^C`.\n a : numeric or array_like\n Opponent signal :math:`A_L`.\n b : numeric or array_like\n Opponent signal :math:`B_L`.\n\n Notes\n -----\n - This specification is the one used in the current model implementation.\n\n References\n ----------\n :cite:`Fairchild2013x`, :cite:`Luo1996b`, :cite:`Luo1996c`\n \"\"\"\n\n\ndef XYZ_to_LLAB(\n XYZ,\n XYZ_0,\n Y_b,\n L,\n surround=LLAB_VIEWING_CONDITIONS[\n 'Reference Samples & Images, Average Surround, Subtending < 4']):\n \"\"\"\n Computes the *:math:`LLAB(l:c)`* colour appearance model correlates.\n\n Parameters\n ----------\n XYZ : array_like\n *CIE XYZ* tristimulus values of test sample / stimulus.\n XYZ_0 : array_like\n *CIE XYZ* tristimulus values of reference white.\n Y_b : numeric or array_like\n Luminance factor of the background in :math:`cd/m^2`.\n L : numeric or array_like\n Absolute luminance :math:`L` of reference white in :math:`cd/m^2`.\n surround : LLAB_InductionFactors, optional\n Surround viewing conditions induction factors.\n\n Returns\n -------\n LLAB_Specification\n *:math:`LLAB(l:c)`* colour appearance model specification.\n\n Notes\n -----\n\n +--------------------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +==========================+=======================+===============+\n | ``XYZ`` | [0, 100] | [0, 1] |\n +--------------------------+-----------------------+---------------+\n | ``XYZ_0`` | [0, 100] | [0, 1] |\n +--------------------------+-----------------------+---------------+\n\n +--------------------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +==========================+=======================+===============+\n | ``LLAB_Specification.h`` | [0, 360] | [0, 1] |\n +--------------------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Fairchild2013x`, :cite:`Luo1996b`, :cite:`Luo1996c`\n\n Examples\n --------\n >>> XYZ = np.array([19.01, 20.00, 21.78])\n >>> XYZ_0 = np.array([95.05, 100.00, 108.88])\n >>> Y_b = 20.0\n >>> L = 318.31\n >>> surround = LLAB_VIEWING_CONDITIONS['ref_average_4_minus']\n >>> XYZ_to_LLAB(XYZ, XYZ_0, Y_b, L, surround) # doctest: +ELLIPSIS\n LLAB_Specification(J=37.3668650..., C=0.0089496..., h=270..., \\\ns=0.0002395..., M=0.0190185..., HC=None, a=..., b=-0.0190185...)\n \"\"\"\n\n _X, Y, _Z = tsplit(to_domain_100(XYZ))\n RGB = XYZ_to_RGB_LLAB(to_domain_100(XYZ))\n RGB_0 = XYZ_to_RGB_LLAB(to_domain_100(XYZ_0))\n\n # Reference illuminant *CIE Standard Illuminant D Series* *D65*.\n XYZ_0r = np.array([95.05, 100.00, 108.88])\n RGB_0r = XYZ_to_RGB_LLAB(XYZ_0r)\n\n # Computing chromatic adaptation.\n XYZ_r = chromatic_adaptation(RGB, RGB_0, RGB_0r, Y, surround.D)\n\n # -------------------------------------------------------------------------\n # Computing the correlate of *Lightness* :math:`L_L`.\n # -------------------------------------------------------------------------\n # Computing opponent colour dimensions.\n L_L, a, b = tsplit(\n opponent_colour_dimensions(XYZ_r, Y_b, surround.F_S, surround.F_L))\n\n # Computing perceptual correlates.\n # -------------------------------------------------------------------------\n # Computing the correlate of *chroma* :math:`Ch_L`.\n # -------------------------------------------------------------------------\n Ch_L = chroma_correlate(a, b)\n\n # -------------------------------------------------------------------------\n # Computing the correlate of *colourfulness* :math:`C_L`.\n # -------------------------------------------------------------------------\n C_L = colourfulness_correlate(L, L_L, Ch_L, surround.F_C)\n\n # -------------------------------------------------------------------------\n # Computing the correlate of *saturation* :math:`s_L`.\n # -------------------------------------------------------------------------\n s_L = saturation_correlate(Ch_L, L_L)\n\n # -------------------------------------------------------------------------\n # Computing the *hue* angle :math:`h_L`.\n # -------------------------------------------------------------------------\n h_L = hue_angle(a, b)\n # TODO: Implement hue composition computation.\n\n # -------------------------------------------------------------------------\n # Computing final opponent signals.\n # -------------------------------------------------------------------------\n A_L, B_L = tsplit(final_opponent_signals(C_L, h_L))\n\n return LLAB_Specification(L_L, Ch_L, from_range_degrees(h_L), s_L, C_L,\n None, A_L, B_L)\n\n\ndef XYZ_to_RGB_LLAB(XYZ):\n \"\"\"\n Converts from *CIE XYZ* tristimulus values to normalised cone responses.\n\n Parameters\n ----------\n XYZ : array_like\n *CIE XYZ* tristimulus values.\n\n Returns\n -------\n ndarray\n Normalised cone responses.\n\n Examples\n --------\n >>> XYZ = np.array([19.01, 20.00, 21.78])\n >>> XYZ_to_RGB_LLAB(XYZ) # doctest: +ELLIPSIS\n array([ 0.9414279..., 1.0404012..., 1.0897088...])\n \"\"\"\n\n _X, Y, _Z = tsplit(XYZ)\n\n Y = tstack([Y, Y, Y])\n XYZ_n = XYZ / Y\n\n return dot_vector(LLAB_XYZ_TO_RGB_MATRIX, XYZ_n)\n\n\ndef chromatic_adaptation(RGB, RGB_0, RGB_0r, Y, D=1):\n \"\"\"\n Applies chromatic adaptation to given *RGB* normalised cone responses\n array.\n\n Parameters\n ----------\n RGB : array_like\n *RGB* normalised cone responses array of test sample / stimulus.\n RGB_0 : array_like\n *RGB* normalised cone responses array of reference white.\n RGB_0r : array_like\n *RGB* normalised cone responses array of reference illuminant\n *CIE Standard Illuminant D Series* *D65*.\n Y : numeric or array_like\n Tristimulus values :math:`Y` of the stimulus.\n D : numeric or array_like, optional\n *Discounting-the-Illuminant* factor normalised to domain [0, 1].\n\n Returns\n -------\n ndarray\n Adapted *CIE XYZ* tristimulus values.\n\n Examples\n --------\n >>> RGB = np.array([0.94142795, 1.04040120, 1.08970885])\n >>> RGB_0 = np.array([0.94146023, 1.04039386, 1.08950293])\n >>> RGB_0r = np.array([0.94146023, 1.04039386, 1.08950293])\n >>> Y = 20.0\n >>> chromatic_adaptation(RGB, RGB_0, RGB_0r, Y) # doctest: +ELLIPSIS\n array([ 19.01, 20. , 21.78])\n \"\"\"\n\n R, G, B = tsplit(RGB)\n R_0, G_0, B_0 = tsplit(RGB_0)\n R_0r, G_0r, B_0r = tsplit(RGB_0r)\n Y = as_float_array(Y)\n\n beta = spow(B_0 / B_0r, 0.0834)\n\n R_r = (D * (R_0r / R_0) + 1 - D) * R\n G_r = (D * (G_0r / G_0) + 1 - D) * G\n B_r = (D * (B_0r / spow(B_0, beta)) + 1 - D) * spow(B, beta)\n\n RGB_r = tstack([R_r, G_r, B_r])\n\n Y = tstack([Y, Y, Y])\n\n XYZ_r = dot_vector(LLAB_RGB_TO_XYZ_MATRIX, RGB_r * Y)\n\n return XYZ_r\n\n\ndef f(x, F_S):\n \"\"\"\n Defines the nonlinear response function of the *:math:`LLAB(l:c)`* colour\n appearance model used to model the nonlinear behaviour of various visual\n responses.\n\n Parameters\n ----------\n x : numeric or array_like or array_like\n Visual response variable :math:`x`.\n F_S : numeric or array_like\n Surround induction factor :math:`F_S`.\n\n Returns\n -------\n numeric or array_like\n Modeled visual response variable :math:`x`.\n\n Examples\n --------\n >>> x = np.array([0.23350512, 0.23351103, 0.23355179])\n >>> f(0.200009186234000, 3) # doctest: +ELLIPSIS\n array(0.5848125...)\n \"\"\"\n\n x = as_float_array(x)\n F_S = as_float_array(F_S)\n\n x_m = np.where(\n x > 0.008856,\n spow(x, 1 / F_S),\n ((spow(0.008856, 1 / F_S) - (16 / 116)) / 0.008856) * x + (16 / 116),\n )\n\n return x_m\n\n\ndef opponent_colour_dimensions(XYZ, Y_b, F_S, F_L):\n \"\"\"\n Returns opponent colour dimensions from given adapted *CIE XYZ* tristimulus\n values.\n\n The opponent colour dimensions are based on a modified *CIE L\\\\*a\\\\*b\\\\**\n colourspace formulae.\n\n Parameters\n ----------\n XYZ : array_like\n Adapted *CIE XYZ* tristimulus values.\n Y_b : numeric or array_like\n Luminance factor of the background in :math:`cd/m^2`.\n F_S : numeric or array_like\n Surround induction factor :math:`F_S`.\n F_L : numeric or array_like\n Lightness induction factor :math:`F_L`.\n\n Returns\n -------\n ndarray\n Opponent colour dimensions.\n\n Examples\n --------\n >>> XYZ = np.array([19.00999572, 20.00091862, 21.77993863])\n >>> Y_b = 20.0\n >>> F_S = 3.0\n >>> F_L = 1.0\n >>> opponent_colour_dimensions(XYZ, Y_b, F_S, F_L) # doctest: +ELLIPSIS\n array([ 3.7368047...e+01, -4.4986443...e-03, -5.2604647...e-03])\n \"\"\"\n\n X, Y, Z = tsplit(XYZ)\n Y_b = as_float_array(Y_b)\n F_S = as_float_array(F_S)\n F_L = as_float_array(F_L)\n\n # Account for background lightness contrast.\n z = 1 + F_L * spow(Y_b / 100, 0.5)\n\n # Computing modified *CIE L\\\\*a\\\\*b\\\\** colourspace array.\n L = 116 * spow(f(Y / 100, F_S), z) - 16\n a = 500 * (f(X / 95.05, F_S) - f(Y / 100, F_S))\n b = 200 * (f(Y / 100, F_S) - f(Z / 108.88, F_S))\n\n Lab = tstack([L, a, b])\n\n return Lab\n\n\ndef hue_angle(a, b):\n \"\"\"\n Returns the *hue* angle :math:`h_L` in degrees.\n\n Parameters\n ----------\n a : numeric or array_like\n Opponent colour dimension :math:`a`.\n b : numeric or array_like\n Opponent colour dimension :math:`b`.\n\n Returns\n -------\n numeric or ndarray\n *Hue* angle :math:`h_L` in degrees.\n\n Examples\n --------\n >>> hue_angle(-4.49864756e-03, -5.26046353e-03) # doctest: +ELLIPSIS\n 229.4635727...\n \"\"\"\n\n a = as_float_array(a)\n b = as_float_array(b)\n\n h_L = np.degrees(np.arctan2(b, a)) % 360\n\n return h_L\n\n\ndef chroma_correlate(a, b):\n \"\"\"\n Returns the correlate of *chroma* :math:`Ch_L`.\n\n Parameters\n ----------\n a : numeric or array_like\n Opponent colour dimension :math:`a`.\n b : numeric or array_like\n Opponent colour dimension :math:`b`.\n\n Returns\n -------\n numeric or ndarray\n Correlate of *chroma* :math:`Ch_L`.\n\n Examples\n --------\n >>> a = -4.49864756e-03\n >>> b = -5.26046353e-03\n >>> chroma_correlate(a, b) # doctest: +ELLIPSIS\n 0.0086506...\n \"\"\"\n\n a = as_float_array(a)\n b = as_float_array(b)\n\n c = spow(a ** 2 + b ** 2, 0.5)\n Ch_L = 25 * np.log(1 + 0.05 * c)\n\n return Ch_L\n\n\ndef colourfulness_correlate(L, L_L, Ch_L, F_C):\n \"\"\"\n Returns the correlate of *colourfulness* :math:`C_L`.\n\n Parameters\n ----------\n L : numeric or array_like\n Absolute luminance :math:`L` of reference white in :math:`cd/m^2`.\n L_L : numeric or array_like\n Correlate of *Lightness* :math:`L_L`.\n Ch_L : numeric or array_like\n Correlate of *chroma* :math:`Ch_L`.\n F_C : numeric or array_like\n Chroma induction factor :math:`F_C`.\n\n Returns\n -------\n numeric or ndarray\n Correlate of *colourfulness* :math:`C_L`.\n\n Examples\n --------\n >>> L = 318.31\n >>> L_L = 37.368047493928195\n >>> Ch_L = 0.008650662051714\n >>> F_C = 1.0\n >>> colourfulness_correlate(L, L_L, Ch_L, F_C) # doctest: +ELLIPSIS\n 0.0183832...\n \"\"\"\n\n L = as_float_array(L)\n L_L = as_float_array(L_L)\n Ch_L = as_float_array(Ch_L)\n F_C = as_float_array(F_C)\n\n S_C = 1 + 0.47 * np.log10(L) - 0.057 * np.log10(L) ** 2\n S_M = 0.7 + 0.02 * L_L - 0.0002 * L_L ** 2\n C_L = Ch_L * S_M * S_C * F_C\n\n return C_L\n\n\ndef saturation_correlate(Ch_L, L_L):\n \"\"\"\n Returns the correlate of *saturation* :math:`S_L`.\n\n Parameters\n ----------\n Ch_L : numeric or array_like\n Correlate of *chroma* :math:`Ch_L`.\n L_L : numeric or array_like\n Correlate of *Lightness* :math:`L_L`.\n\n Returns\n -------\n numeric or ndarray\n Correlate of *saturation* :math:`S_L`.\n\n Examples\n --------\n >>> Ch_L = 0.008650662051714\n >>> L_L = 37.368047493928195\n >>> saturation_correlate(Ch_L, L_L) # doctest: +ELLIPSIS\n 0.0002314...\n \"\"\"\n\n Ch_L = as_float_array(Ch_L)\n L_L = as_float_array(L_L)\n\n S_L = Ch_L / L_L\n\n return S_L\n\n\ndef final_opponent_signals(C_L, h_L):\n \"\"\"\n Returns the final opponent signals :math:`A_L` and :math:`B_L`.\n\n Parameters\n ----------\n C_L : numeric or array_like\n Correlate of *colourfulness* :math:`C_L`.\n h_L : numeric or array_like\n Correlate of *hue* :math:`h_L` in degrees.\n\n Returns\n -------\n ndarray\n Final opponent signals :math:`A_L` and :math:`B_L`.\n\n Examples\n --------\n >>> C_L = 0.0183832899143\n >>> h_L = 229.46357270858391\n >>> final_opponent_signals(C_L, h_L) # doctest: +ELLIPSIS\n array([-0.0119478..., -0.0139711...])\n \"\"\"\n\n AB_L = polar_to_cartesian(tstack([C_L, np.radians(h_L)]))\n\n return AB_L\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nColour Quality Plotting\n=======================\n\nDefines the colour quality plotting objects:\n\n- :func:`colour.plotting.plot_single_sd_colour_rendering_index_bars`\n- :func:`colour.plotting.plot_multi_sds_colour_rendering_indexes_bars`\n- :func:`colour.plotting.plot_single_sd_colour_quality_scale_bars`\n- :func:`colour.plotting.plot_multi_sds_colour_quality_scales_bars`\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\nfrom itertools import cycle\n\nfrom colour.constants import DEFAULT_FLOAT_DTYPE\nfrom colour.colorimetry import sds_and_multi_sds_to_sds\nfrom colour.plotting import (COLOUR_STYLE_CONSTANTS,\n XYZ_to_plotting_colourspace, artist,\n label_rectangles, override_style, render)\nfrom colour.quality import (colour_quality_scale, colour_rendering_index)\nfrom colour.quality.cri import TCS_ColorimetryData\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'plot_colour_quality_bars', 'plot_single_sd_colour_rendering_index_bars',\n 'plot_multi_sds_colour_rendering_indexes_bars',\n 'plot_single_sd_colour_quality_scale_bars',\n 'plot_multi_sds_colour_quality_scales_bars'\n]\n\n\n@override_style()\ndef plot_colour_quality_bars(specifications,\n labels=True,\n hatching=None,\n hatching_repeat=2,\n **kwargs):\n \"\"\"\n Plots the colour quality data of given illuminants or light sources colour\n quality specifications.\n\n Parameters\n ----------\n specifications : array_like\n Array of illuminants or light sources colour quality specifications.\n labels : bool, optional\n Add labels above bars.\n hatching : bool or None, optional\n Use hatching for the bars.\n hatching_repeat : int, optional\n Hatching pattern repeat.\n\n Other Parameters\n ----------------\n \\\\**kwargs : dict, optional\n {:func:`colour.plotting.artist`,\n :func:`colour.plotting.quality.plot_colour_quality_bars`,\n :func:`colour.plotting.render`},\n Please refer to the documentation of the previously listed definitions.\n\n Returns\n -------\n tuple\n Current figure and axes.\n\n Examples\n --------\n >>> from colour import (ILLUMINANTS_SDS,\n ... LIGHT_SOURCES_SDS, SpectralShape)\n >>> illuminant = ILLUMINANTS_SDS['FL2']\n >>> light_source = LIGHT_SOURCES_SDS['Kinoton 75P']\n >>> light_source = light_source.copy().align(SpectralShape(360, 830, 1))\n >>> cqs_i = colour_quality_scale(illuminant, additional_data=True)\n >>> cqs_l = colour_quality_scale(light_source, additional_data=True)\n >>> plot_colour_quality_bars([cqs_i, cqs_l]) # doctest: +ELLIPSIS\n (<Figure size ... with 1 Axes>, \\\n<matplotlib.axes._subplots.AxesSubplot object at 0x...>)\n\n .. image:: ../_static/Plotting_Plot_Colour_Quality_Bars.png\n :align: center\n :alt: plot_colour_quality_bars\n \"\"\"\n\n settings = {'uniform': True}\n settings.update(kwargs)\n\n _figure, axes = artist(**settings)\n\n bar_width = 0.5\n y_ticks_interval = 10\n count_s, count_Q_as = len(specifications), 0\n patterns = cycle(COLOUR_STYLE_CONSTANTS.hatch.patterns)\n if hatching is None:\n hatching = False if count_s == 1 else True\n for i, specification in enumerate(specifications):\n Q_a, Q_as, colorimetry_data = (specification.Q_a, specification.Q_as,\n specification.colorimetry_data)\n\n count_Q_as = len(Q_as)\n colours = ([[1] * 3] + [\n np.clip(XYZ_to_plotting_colourspace(x.XYZ), 0, 1)\n for x in colorimetry_data[0]\n ])\n\n x = (i + np.arange(\n 0, (count_Q_as + 1) * (count_s + 1), (count_s + 1),\n dtype=DEFAULT_FLOAT_DTYPE)) * bar_width\n y = [s[1].Q_a for s in sorted(Q_as.items(), key=lambda s: s[0])]\n y = np.array([Q_a] + list(y))\n\n bars = axes.bar(\n x,\n np.abs(y),\n color=colours,\n width=bar_width,\n edgecolor=COLOUR_STYLE_CONSTANTS.colour.dark,\n label=specification.name)\n\n hatches = ([next(patterns) * hatching_repeat] * (count_Q_as + 1)\n if hatching else np.where(y < 0, next(patterns),\n None).tolist())\n\n for j, bar in enumerate(bars.patches):\n bar.set_hatch(hatches[j])\n\n if labels:\n label_rectangles(\n ['{0:.1f}'.format(y_v) for y_v in y],\n bars,\n rotation='horizontal' if count_s == 1 else 'vertical',\n offset=(0 if count_s == 1 else 3 / 100 * count_s + 65 / 1000,\n 0.025),\n text_size=-5 / 7 * count_s + 12.5,\n axes=axes)\n\n axes.axhline(\n y=100, color=COLOUR_STYLE_CONSTANTS.colour.dark, linestyle='--')\n\n axes.set_xticks((np.arange(\n 0, (count_Q_as + 1) * (count_s + 1), (count_s + 1),\n dtype=DEFAULT_FLOAT_DTYPE) * bar_width + (count_s * bar_width / 2)),\n ['Qa'] + [\n 'Q{0}'.format(index + 1)\n for index in range(0, count_Q_as + 1, 1)\n ])\n axes.set_yticks(range(0, 100 + y_ticks_interval, y_ticks_interval))\n\n aspect = 1 / (120 / (bar_width + len(Q_as) + bar_width * 2))\n bounding_box = (-bar_width, ((count_Q_as + 1) * (count_s + 1)) / 2, 0, 120)\n\n settings = {\n 'axes': axes,\n 'aspect': aspect,\n 'bounding_box': bounding_box,\n 'legend': hatching,\n 'title': 'Colour Quality',\n }\n settings.update(kwargs)\n\n return render(**settings)\n\n\n@override_style()\ndef plot_single_sd_colour_rendering_index_bars(sd, **kwargs):\n \"\"\"\n Plots the *Colour Rendering Index* (CRI) of given illuminant or light\n source spectral distribution.\n\n Parameters\n ----------\n sd : SpectralDistribution\n Illuminant or light source spectral distribution to plot the\n *Colour Rendering Index* (CRI).\n\n Other Parameters\n ----------------\n \\\\**kwargs : dict, optional\n {:func:`colour.plotting.artist`,\n :func:`colour.plotting.quality.plot_colour_quality_bars`,\n :func:`colour.plotting.render`},\n Please refer to the documentation of the previously listed definitions.\n labels : bool, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Add labels above bars.\n hatching : bool or None, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Use hatching for the bars.\n hatching_repeat : int, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Hatching pattern repeat.\n\n Returns\n -------\n tuple\n Current figure and axes.\n\n Examples\n --------\n >>> from colour import ILLUMINANTS_SDS\n >>> illuminant = ILLUMINANTS_SDS['FL2']\n >>> plot_single_sd_colour_rendering_index_bars(illuminant)\n ... # doctest: +ELLIPSIS\n (<Figure size ... with 1 Axes>, \\\n<matplotlib.axes._subplots.AxesSubplot object at 0x...>)\n\n .. image:: ../_static/Plotting_\\\nPlot_Single_SD_Colour_Rendering_Index_Bars.png\n :align: center\n :alt: plot_single_sd_colour_rendering_index_bars\n \"\"\"\n\n return plot_multi_sds_colour_rendering_indexes_bars([sd], **kwargs)\n\n\n@override_style()\ndef plot_multi_sds_colour_rendering_indexes_bars(sds, **kwargs):\n \"\"\"\n Plots the *Colour Rendering Index* (CRI) of given illuminants or light\n sources spectral distributions.\n\n Parameters\n ----------\n sds : array_like or MultiSpectralDistributions\n Spectral distributions or multi-spectral distributions to\n plot. `sds` can be a single\n :class:`colour.MultiSpectralDistributions` class instance, a list\n of :class:`colour.MultiSpectralDistributions` class instances or a\n list of :class:`colour.SpectralDistribution` class instances.\n\n Other Parameters\n ----------------\n \\\\**kwargs : dict, optional\n {:func:`colour.plotting.artist`,\n :func:`colour.plotting.quality.plot_colour_quality_bars`,\n :func:`colour.plotting.render`},\n Please refer to the documentation of the previously listed definitions.\n labels : bool, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Add labels above bars.\n hatching : bool or None, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Use hatching for the bars.\n hatching_repeat : int, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Hatching pattern repeat.\n\n Returns\n -------\n tuple\n Current figure and axes.\n\n Examples\n --------\n >>> from colour import (ILLUMINANTS_SDS,\n ... LIGHT_SOURCES_SDS)\n >>> illuminant = ILLUMINANTS_SDS['FL2']\n >>> light_source = LIGHT_SOURCES_SDS['Kinoton 75P']\n >>> plot_multi_sds_colour_rendering_indexes_bars(\n ... [illuminant, light_source]) # doctest: +ELLIPSIS\n (<Figure size ... with 1 Axes>, \\\n<matplotlib.axes._subplots.AxesSubplot object at 0x...>)\n\n .. image:: ../_static/Plotting_\\\nPlot_Multi_SDS_Colour_Rendering_Indexes_Bars.png\n :align: center\n :alt: plot_multi_sds_colour_rendering_indexes_bars\n \"\"\"\n\n sds = sds_and_multi_sds_to_sds(sds)\n\n settings = dict(kwargs)\n settings.update({'standalone': False})\n\n specifications = [\n colour_rendering_index(sd, additional_data=True) for sd in sds\n ]\n\n # *colour rendering index* colorimetry data tristimulus values are\n # computed in [0, 100] domain however `plot_colour_quality_bars` expects\n # [0, 1] domain. As we want to keep `plot_colour_quality_bars` definition\n # agnostic from the colour quality data, we update the test sd\n # colorimetry data tristimulus values domain.\n for specification in specifications:\n colorimetry_data = specification.colorimetry_data\n for i, c_d in enumerate(colorimetry_data[0]):\n colorimetry_data[0][i] = TCS_ColorimetryData(\n c_d.name, c_d.XYZ / 100, c_d.uv, c_d.UVW)\n\n _figure, axes = plot_colour_quality_bars(specifications, **settings)\n\n title = 'Colour Rendering Index - {0}'.format(', '.join(\n [sd.strict_name for sd in sds]))\n\n settings = {'axes': axes, 'title': title}\n settings.update(kwargs)\n\n return render(**settings)\n\n\n@override_style()\ndef plot_single_sd_colour_quality_scale_bars(sd,\n method='NIST CQS 9.0',\n **kwargs):\n \"\"\"\n Plots the *Colour Quality Scale* (CQS) of given illuminant or light source\n spectral distribution.\n\n Parameters\n ----------\n sd : SpectralDistribution\n Illuminant or light source spectral distribution to plot the\n *Colour Quality Scale* (CQS).\n method : unicode, optional\n **{NIST CQS 7.4'}**,\n *Colour Quality Scale* (CQS) computation method.\n\n Other Parameters\n ----------------\n \\\\**kwargs : dict, optional\n {:func:`colour.plotting.artist`,\n :func:`colour.plotting.quality.plot_colour_quality_bars`,\n :func:`colour.plotting.render`},\n Please refer to the documentation of the previously listed definitions.\n labels : bool, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Add labels above bars.\n hatching : bool or None, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Use hatching for the bars.\n hatching_repeat : int, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Hatching pattern repeat.\n\n Returns\n -------\n tuple\n Current figure and axes.\n\n Examples\n --------\n >>> from colour import ILLUMINANTS_SDS\n >>> illuminant = ILLUMINANTS_SDS['FL2']\n >>> plot_single_sd_colour_quality_scale_bars(illuminant)\n ... # doctest: +ELLIPSIS\n (<Figure size ... with 1 Axes>, \\\n<matplotlib.axes._subplots.AxesSubplot object at 0x...>)\n\n .. image:: ../_static/Plotting_\\\nPlot_Single_SD_Colour_Quality_Scale_Bars.png\n :align: center\n :alt: plot_single_sd_colour_quality_scale_bars\n \"\"\"\n\n return plot_multi_sds_colour_quality_scales_bars([sd], method, **kwargs)\n\n\n@override_style()\ndef plot_multi_sds_colour_quality_scales_bars(sds,\n method='NIST CQS 9.0',\n **kwargs):\n \"\"\"\n Plots the *Colour Quality Scale* (CQS) of given illuminants or light\n sources spectral distributions.\n\n Parameters\n ----------\n sds : array_like or MultiSpectralDistributions\n Spectral distributions or multi-spectral distributions to\n plot. `sds` can be a single\n :class:`colour.MultiSpectralDistributions` class instance, a list\n of :class:`colour.MultiSpectralDistributions` class instances or a\n list of :class:`colour.SpectralDistribution` class instances.\n method : unicode, optional\n **{NIST CQS 7.4'}**,\n *Colour Quality Scale* (CQS) computation method.\n\n Other Parameters\n ----------------\n \\\\**kwargs : dict, optional\n {:func:`colour.plotting.artist`,\n :func:`colour.plotting.quality.plot_colour_quality_bars`,\n :func:`colour.plotting.render`},\n Please refer to the documentation of the previously listed definitions.\n labels : bool, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Add labels above bars.\n hatching : bool or None, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Use hatching for the bars.\n hatching_repeat : int, optional\n {:func:`colour.plotting.quality.plot_colour_quality_bars`},\n Hatching pattern repeat.\n\n Returns\n -------\n tuple\n Current figure and axes.\n\n Examples\n --------\n >>> from colour import (ILLUMINANTS_SDS,\n ... LIGHT_SOURCES_SDS)\n >>> illuminant = ILLUMINANTS_SDS['FL2']\n >>> light_source = LIGHT_SOURCES_SDS['Kinoton 75P']\n >>> plot_multi_sds_colour_quality_scales_bars([illuminant, light_source])\n ... # doctest: +ELLIPSIS\n (<Figure size ... with 1 Axes>, \\\n<matplotlib.axes._subplots.AxesSubplot object at 0x...>)\n\n .. image:: ../_static/Plotting_\\\nPlot_Multi_SDS_Colour_Quality_Scales_Bars.png\n :align: center\n :alt: plot_multi_sds_colour_quality_scales_bars\n \"\"\"\n\n sds = sds_and_multi_sds_to_sds(sds)\n\n settings = dict(kwargs)\n settings.update({'standalone': False})\n\n specifications = [colour_quality_scale(sd, True, method) for sd in sds]\n\n _figure, axes = plot_colour_quality_bars(specifications, **settings)\n\n title = 'Colour Quality Scale - {0}'.format(', '.join(\n [sd.strict_name for sd in sds]))\n\n settings = {'axes': axes, 'title': title}\n settings.update(kwargs)\n\n return render(**settings)\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.models.rgb.transfer_functions.viper_log`\nmodule.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport unittest\n\nfrom colour.models.rgb.transfer_functions import (log_encoding_ViperLog,\n log_decoding_ViperLog)\nfrom colour.utilities import domain_range_scale, ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['TestLogEncoding_ViperLog', 'TestLogDecoding_ViperLog']\n\n\nclass TestLogEncoding_ViperLog(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.viper_log.\\\nlog_encoding_ViperLog` definition unit tests methods.\n \"\"\"\n\n def test_log_encoding_ViperLog(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.viper_log.\\\nlog_encoding_ViperLog` definition.\n \"\"\"\n\n self.assertAlmostEqual(log_encoding_ViperLog(0.0), -np.inf, places=7)\n\n self.assertAlmostEqual(\n log_encoding_ViperLog(0.18), 0.636008067010413, places=7)\n\n self.assertAlmostEqual(log_encoding_ViperLog(1.0), 1.0, places=7)\n\n def test_n_dimensional_log_encoding_ViperLog(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.viper_log.\\\nlog_encoding_ViperLog` definition n-dimensional arrays support.\n \"\"\"\n\n x = 0.18\n y = log_encoding_ViperLog(x)\n\n x = np.tile(x, 6)\n y = np.tile(y, 6)\n np.testing.assert_almost_equal(log_encoding_ViperLog(x), y, decimal=7)\n\n x = np.reshape(x, (2, 3))\n y = np.reshape(y, (2, 3))\n np.testing.assert_almost_equal(log_encoding_ViperLog(x), y, decimal=7)\n\n x = np.reshape(x, (2, 3, 1))\n y = np.reshape(y, (2, 3, 1))\n np.testing.assert_almost_equal(log_encoding_ViperLog(x), y, decimal=7)\n\n def test_domain_range_scale_log_encoding_ViperLog(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.viper_log.\\\nlog_encoding_ViperLog` definition domain and range scale support.\n \"\"\"\n\n x = 0.18\n y = log_encoding_ViperLog(x)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n log_encoding_ViperLog(x * factor), y * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_log_encoding_ViperLog(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.viper_log.\\\nlog_encoding_ViperLog` definition nan support.\n \"\"\"\n\n log_encoding_ViperLog(\n np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nclass TestLogDecoding_ViperLog(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.viper_log.\\\nlog_decoding_ViperLog` definition unit tests methods.\n \"\"\"\n\n def test_log_decoding_ViperLog(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.viper_log.\\\nlog_decoding_ViperLog` definition.\n \"\"\"\n\n self.assertAlmostEqual(log_decoding_ViperLog(-np.inf), 0.0, places=7)\n\n self.assertAlmostEqual(\n log_decoding_ViperLog(0.636008067010413), 0.18, places=7)\n\n self.assertAlmostEqual(log_decoding_ViperLog(1.0), 1.0, places=7)\n\n def test_n_dimensional_log_decoding_ViperLog(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.viper_log.\\\nlog_decoding_ViperLog` definition n-dimensional arrays support.\n \"\"\"\n\n y = 0.636008067010413\n x = log_decoding_ViperLog(y)\n\n y = np.tile(y, 6)\n x = np.tile(x, 6)\n np.testing.assert_almost_equal(log_decoding_ViperLog(y), x, decimal=7)\n\n y = np.reshape(y, (2, 3))\n x = np.reshape(x, (2, 3))\n np.testing.assert_almost_equal(log_decoding_ViperLog(y), x, decimal=7)\n\n y = np.reshape(y, (2, 3, 1))\n x = np.reshape(x, (2, 3, 1))\n np.testing.assert_almost_equal(log_decoding_ViperLog(y), x, decimal=7)\n\n def test_domain_range_scale_log_decoding_ViperLog(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.viper_log.\\\nlog_decoding_ViperLog` definition domain and range scale support.\n \"\"\"\n\n y = 0.636008067010413\n x = log_decoding_ViperLog(y)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n log_decoding_ViperLog(y * factor), x * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_log_decoding_ViperLog(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.viper_log.\\\nlog_decoding_ViperLog` definition nan support.\n \"\"\"\n\n log_decoding_ViperLog(\n np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.array"
],
[
"numpy.maximum"
],
[
"numpy.reshape",
"numpy.array",
"numpy.tile"
],
[
"numpy.errstate",
"numpy.asarray"
],
[
"numpy.log",
"numpy.radians",
"numpy.linalg.inv",
"numpy.arctan2",
"numpy.log10",
"numpy.array"
],
[
"numpy.arange",
"numpy.abs"
],
[
"numpy.reshape",
"numpy.array",
"numpy.tile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NestLakerJasonLIN/pipedream | [
"cad624f79a71f44ba79099f0c38321347b13e5c2",
"cad624f79a71f44ba79099f0c38321347b13e5c2",
"cad624f79a71f44ba79099f0c38321347b13e5c2",
"cad624f79a71f44ba79099f0c38321347b13e5c2",
"cad624f79a71f44ba79099f0c38321347b13e5c2",
"cad624f79a71f44ba79099f0c38321347b13e5c2",
"cad624f79a71f44ba79099f0c38321347b13e5c2"
] | [
"profiler/torchmodules/torchlogger/activation_gradient_logger.py",
"runtime/image_classification/models/vgg16/gpus=16_straight/stage5.py",
"profiler/translation/seq2seq/train/smoothing.py",
"runtime/translation/main_with_runtime.py",
"runtime/translation/seq2seq/utils.py",
"runtime/image_classification/models/vgg16/gpus=16/stage0.py",
"runtime/image_classification/models/vgg16/gpus=16_straight/stage8.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport os\nimport pickle\nimport torch\n\n\nclass ActivationAndGradientLogger:\n def __init__(self, directory):\n self.directory = directory\n try:\n os.mkdir(self.directory)\n except:\n pass\n self.iteration = 0\n self.forward_counter = 0\n self.backward_counter = 0\n\n def reset_counters(self):\n self.forward_counter = 0\n self.backward_counter = 0\n\n def hook_modules(self, module, iteration):\n self.iteration = iteration\n sub_directory = os.path.join(self.directory, str(iteration))\n try:\n os.mkdir(sub_directory)\n except:\n pass\n self.hook_modules_helper(module, sub_directory)\n\n def hook_modules_helper(self, module, sub_directory):\n sub_modules = module.__dict__['_modules']\n\n for name, sub_module in sub_modules.items():\n if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:\n break\n\n sub_sub_modules = sub_module.__dict__['_modules']\n if len(sub_sub_modules) > 0:\n # Recursively visit this module's descendants.\n self.hook_modules_helper(sub_module, sub_directory)\n else:\n def forward_hook(*args):\n activation = args[2]\n filename = os.path.join(sub_directory, 'activations.%d.pkl' % self.forward_counter)\n with open(filename, 'wb') as f:\n torch.save(activation, f)\n self.forward_counter += 1\n\n def backward_hook(*args):\n gradient = args[2]\n filename = os.path.join(sub_directory, 'gradients.%d.pkl' % self.backward_counter)\n with open(filename, 'wb') as f:\n torch.save(gradient, f)\n self.backward_counter += 1\n\n sub_module.register_forward_hook(forward_hook)\n sub_module.register_backward_hook(backward_hook)\n\n def unhook_modules(self, module):\n self.unhook_modules_helper(module)\n self.reset_counters()\n\n def unhook_modules_helper(self, module):\n sub_modules = module.__dict__['_modules']\n\n for name, sub_module in sub_modules.items():\n if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:\n break\n\n sub_sub_modules = sub_module.__dict__['_modules']\n if len(sub_sub_modules) > 0:\n # Recursively visit this module's descendants.\n self.unhook_modules_helper(sub_module)\n else:\n sub_module.reset_hooks()\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport torch\n\n\nclass Stage5(torch.nn.Module):\n def __init__(self):\n super(Stage5, self).__init__()\n self.layer1 = torch.nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self._initialize_weights()\n\n def forward(self, input0):\n out0 = input0.clone()\n out1 = self.layer1(out0)\n return out1\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, torch.nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, torch.nn.BatchNorm2d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, torch.nn.Linear):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n torch.nn.init.constant_(m.bias, 0)\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport torch\nimport torch.nn as nn\n\nclass LabelSmoothing(nn.Module):\n \"\"\"\n NLL loss with label smoothing.\n \"\"\"\n def __init__(self, padding_idx, smoothing=0.0):\n \"\"\"\n Constructor for the LabelSmoothing module.\n\n :param padding_idx: index of the PAD token\n :param smoothing: label smoothing factor\n \"\"\"\n super(LabelSmoothing, self).__init__()\n self.padding_idx = padding_idx\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n\n def forward(self, x, target):\n logprobs = torch.nn.functional.log_softmax(x, dim=-1, dtype=torch.float32)\n\n non_pad_mask = (target != self.padding_idx)\n nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))\n nll_loss = nll_loss.squeeze(1)[non_pad_mask]\n smooth_loss = -logprobs.mean(dim=-1)[non_pad_mask]\n loss = self.confidence * nll_loss + self.smoothing * smooth_loss\n return loss.sum()\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport argparse\nfrom collections import OrderedDict\nimport importlib\nimport json\nimport os\nimport shutil\nimport sys\nimport time\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\n\nsys.path.append(\"..\")\nimport runtime\nimport sgd\nimport adam\n\nfrom seq2seq.utils import l2_promote\nfrom seq2seq.utils import build_gnmt_criterion\nfrom seq2seq.data.tokenizer import Tokenizer\nimport seq2seq.data.config as config\nfrom seq2seq.data.dataset import LazyParallelDataset\nfrom seq2seq.data.dataset import ParallelDataset\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('--data_dir', type=str,\n help='path to dataset')\nparser.add_argument('--distributed_backend', type=str,\n help='distributed backend to use (gloo|nccl)')\nparser.add_argument('--module', '-m', required=True,\n help='name of module that contains model and tensor_shapes definition')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=8, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=128, type=int,\n metavar='N', help='mini-batch size (default: 128)')\nparser.add_argument('--grad-clip', default=5.0, type=float,\n help='enabled gradient clipping and sets maximum gradient norm value')\nparser.add_argument('--eval-batch-size', default=100, type=int,\n help='eval mini-batch size (default: 100)')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--lr_policy', default='step', type=str,\n help='policy for controlling learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--print-freq', '-p', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--fp16', action='store_true',\n help='train model in fp16 precision')\nparser.add_argument('--loss_scale', type=float, default=1,\n help='static loss scale, positive power of 2 to improve fp16 convergence')\nparser.add_argument('--master_addr', default=None, type=str,\n help=\"IP address of master (machine with rank 0)\")\nparser.add_argument('--config_path', default=None, type=str,\n help=\"Path of configuration file\")\nparser.add_argument('--no_input_pipelining', action='store_true',\n help=\"No pipelining of inputs\")\nparser.add_argument('--rank', default=None, type=int,\n help=\"Rank of worker\")\nparser.add_argument('--local_rank', default=0, type=int,\n help=\"Local rank of worker\")\nparser.add_argument('--forward_only', action='store_true',\n help=\"Run forward pass only\")\nparser.add_argument('--num_minibatches', default=None, type=int,\n help=\"Number of minibatches to run\")\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--checkpoint_dir', default='', type=str, metavar='PATH',\n help='path to directory to save checkpoints')\nparser.add_argument('--checkpoint_dir_not_nfs', action='store_true',\n help='checkpoint dir is not on a shared NFS server')\nparser.add_argument('-s', '--synthetic_data', action='store_true',\n help=\"Use synthetic data\")\nparser.add_argument('-v', '--verbose_frequency', default=0, type=int, metavar='N',\n help=\"Log verbose information\")\nparser.add_argument('--num_ranks_in_server', default=1, type=int,\n help=\"number of gpus per machine\")\n\nparser.add_argument('--max-length-train', default=50, type=int,\n help='maximum sequence length for training')\nparser.add_argument('--min-length-train', default=0, type=int,\n help='minimum sequence length for training')\nparser.add_argument('--no-bucketing', action='store_true',\n help='enables bucketing')\n\n# Recompute tensors from forward pass, instead of saving them.\nparser.add_argument('--recompute', action='store_true',\n help='Recompute tensors in backward pass')\n# Macrobatching reduces the number of weight versions to save,\n# by not applying updates every minibatch.\nparser.add_argument('--macrobatch', action='store_true',\n help='Macrobatch updates to save memory')\n\nbest_prec1 = 0\n\n\n# Helper methods.\ndef is_first_stage():\n return args.stage is None or (args.stage == 0)\n\ndef is_last_stage():\n return args.stage is None or (args.stage == (args.num_stages-1))\n\n# Synthetic Dataset class.\nclass SyntheticDataset(torch.utils.data.dataset.Dataset):\n def __init__(self, input_size, length, num_classes=1000):\n self.tensor = Variable(torch.rand(*input_size)).type(torch.FloatTensor)\n self.target = torch.Tensor(1).random_(0, num_classes)[0].type(torch.LongTensor)\n self.length = length\n\n def __getitem__(self, index):\n return self.tensor, self.target\n\n def __len__(self):\n return self.length\n\ndef main():\n global args, best_prec1\n args = parser.parse_args()\n\n # Special case handling for GNMT model\n l2_promote()\n\n torch.cuda.set_device(args.local_rank)\n\n # build tokenizer\n tokenizer = Tokenizer(os.path.join(args.data_dir, config.VOCAB_FNAME))\n\n # define loss function\n criterion = build_gnmt_criterion(\n vocab_size=tokenizer.vocab_size, padding_idx=config.PAD, smoothing=0.1)\n\n # create stages of the model\n module = importlib.import_module(args.module)\n args.arch = module.arch()\n model = module.model(criterion)\n\n input_size = [args.max_length_train, args.batch_size]\n training_tensor_shapes = {\"input0\": input_size, \"input1\": [args.batch_size],\n \"input2\": input_size, \"target\": [args.max_length_train * args.batch_size],\n \"target_length\": [args.batch_size]}\n dtypes = {\"input0\": torch.int64, \"input1\": torch.int64, \"input2\": torch.int64,\n \"target\": torch.int64, \"target_length\": torch.int32}\n inputs_module_destinations = {\"input0\": 0, \"input1\": 0, \"input2\": 0}\n target_tensor_names = {\"target\", \"target_length\"}\n for module_id, (stage, inputs, outputs) in enumerate(model[:-1]): # Skip last layer (loss).\n input_tensors = []\n for module_input in inputs:\n if module_input in inputs_module_destinations:\n inputs_module_destinations[module_input] = module_id\n\n input_tensor = torch.ones(tuple(training_tensor_shapes[module_input]),\n dtype=dtypes[module_input]).cuda()\n input_tensors.append(input_tensor)\n stage.cuda()\n # PyTorch should not maintain metadata for a backward pass on\n # synthetic inputs. Without the following line, the runtime is\n # as much as 1.5x slower in a full DP configuration.\n with torch.no_grad():\n output_tensors = stage(*tuple(input_tensors))\n if not type(output_tensors) is tuple:\n output_tensors = [output_tensors]\n for output, output_tensor in zip(outputs,\n list(output_tensors)):\n training_tensor_shapes[output] = list(output_tensor.size())\n dtypes[output] = output_tensor.dtype\n\n eval_tensor_shapes = {}\n for key in training_tensor_shapes:\n eval_tensor_shapes[key] = tuple(\n training_tensor_shapes[key])\n training_tensor_shapes[key] = tuple(\n training_tensor_shapes[key])\n\n configuration_maps = {\n 'module_to_stage_map': None,\n 'stage_to_rank_map': None,\n 'stage_to_depth_map': None\n }\n if args.config_path is not None:\n json_config_file = json.load(open(args.config_path, 'r'))\n configuration_maps['module_to_stage_map'] = json_config_file.get(\"module_to_stage_map\", None)\n configuration_maps['stage_to_rank_map'] = json_config_file.get(\"stage_to_rank_map\", None)\n configuration_maps['stage_to_rank_map'] = {\n int(k): v for (k, v) in configuration_maps['stage_to_rank_map'].items()}\n configuration_maps['stage_to_depth_map'] = json_config_file.get(\"stage_to_depth_map\", None)\n\n r = runtime.StageRuntime(\n model=model, distributed_backend=args.distributed_backend,\n fp16=args.fp16, loss_scale=args.loss_scale,\n training_tensor_shapes=training_tensor_shapes,\n eval_tensor_shapes=eval_tensor_shapes,\n training_tensor_dtypes=dtypes,\n inputs_module_destinations=inputs_module_destinations,\n target_tensor_names=target_tensor_names,\n configuration_maps=configuration_maps,\n master_addr=args.master_addr,\n rank=args.rank, local_rank=args.local_rank,\n num_ranks_in_server=args.num_ranks_in_server,\n verbose_freq=args.verbose_frequency,\n model_type=runtime.TRANSLATION,\n enable_recompute=args.recompute)\n\n # stage needed to determine if current stage is the first stage\n # num_stages needed to determine if current stage is the last stage\n # num_ranks needed to determine number of warmup_minibatches in case of pipelining\n args.stage = r.stage\n args.num_stages = r.num_stages\n args.num_ranks = r.num_ranks\n if not is_first_stage():\n args.synthetic_data = True\n\n # define optimizer\n if args.no_input_pipelining:\n num_versions = 1\n else:\n # number of versions is the total number of machines following the current\n # stage, shared amongst all replicas in this stage\n num_versions = r.num_warmup_minibatches + 1\n\n # if specified, resume from checkpoint\n if args.resume:\n checkpoint_file_path = \"%s.%d.pth.tar\" % (args.resume, r.stage)\n assert os.path.isfile(checkpoint_file_path)\n print(\"=> loading checkpoint '{}'\".format(checkpoint_file_path))\n checkpoint = torch.load(checkpoint_file_path)\n args.start_epoch = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n r.load_state_dict(checkpoint['state_dict'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(checkpoint_file_path, checkpoint['epoch']))\n\n # TODO: make this configurable by args\n use_adam_optimizer = True\n if use_adam_optimizer:\n optimizer = adam.AdamWithWeightStashing(\n modules=r.modules(), master_parameters=r.master_parameters,\n model_parameters=r.model_parameters, loss_scale=args.loss_scale,\n num_versions=num_versions, lr=args.lr, betas=(0.9,0.999),\n weight_decay=args.weight_decay, verbose_freq=args.verbose_frequency,\n macrobatch=args.macrobatch)\n else:\n optimizer = sgd.SGDWithWeightStashing(\n modules=r.modules(), master_parameters=r.master_parameters,\n model_parameters=r.model_parameters, loss_scale=args.loss_scale,\n num_versions=num_versions, lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay, verbose_freq=args.verbose_frequency)\n\n if args.resume:\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n cudnn.benchmark = True\n\n train_dataset = LazyParallelDataset(\n src_fname=os.path.join(args.data_dir, config.SRC_TRAIN_FNAME),\n tgt_fname=os.path.join(args.data_dir, config.TGT_TRAIN_FNAME),\n tokenizer=tokenizer,\n min_len=args.min_length_train,\n max_len=args.max_length_train,\n sort=False,\n max_size=None)\n\n val_dataset = ParallelDataset(\n src_fname=os.path.join(args.data_dir, config.SRC_VAL_FNAME),\n tgt_fname=os.path.join(args.data_dir, config.TGT_VAL_FNAME),\n tokenizer=tokenizer,\n min_len=args.min_length_train,\n max_len=args.max_length_train,\n sort=True)\n\n distributed_sampler = False\n if configuration_maps['stage_to_rank_map'] is not None:\n num_ranks_in_first_stage = len(configuration_maps['stage_to_rank_map'][0])\n if num_ranks_in_first_stage > 1:\n distributed_sampler = True\n\n # TODO: fix random seeds\n train_loader = train_dataset.get_loader(\n batch_size=args.batch_size, seeds=range(args.epochs),\n batch_first=False, shuffle=True,\n bucketing=not args.no_bucketing, num_workers=args.workers,\n world_size=r.num_ranks_in_first_stage,\n rank=r.rank_in_stage if r.stage == 0 else 0\n )\n\n val_loader = val_dataset.get_loader(\n batch_size=args.batch_size, batch_first=False,\n shuffle=True, num_workers=args.workers,\n world_size=r.num_ranks_in_first_stage,\n seeds=range(args.epochs),\n rank=r.rank_in_stage if r.stage == 0 else 0\n )\n\n # if checkpoint is loaded, start by running validation\n if args.resume:\n assert args.start_epoch > 0\n validate(val_loader, r, args.start_epoch-1)\n\n for epoch in range(args.start_epoch, args.epochs):\n if distributed_sampler:\n train_loader.sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args.epochs, r, args.lr_policy)\n\n # train or run forward pass only for one epoch\n if args.forward_only:\n validate(val_loader, r, epoch)\n else:\n train(train_loader, r, optimizer, epoch)\n\n # evaluate on validation set\n prec1 = validate(val_loader, r, epoch)\n if r.stage != r.num_stages: prec1 = 0\n\n # remember best prec@1 and save checkpoint\n best_prec1 = max(prec1, best_prec1)\n\n should_save_checkpoint = args.checkpoint_dir_not_nfs or r.rank_in_stage == 0\n if args.checkpoint_dir and should_save_checkpoint:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': r.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer' : optimizer.state_dict(),\n 'tokenizer': tokenizer.get_state()\n }, args.checkpoint_dir, r.stage, epoch)\n\n\ndef train(train_loader, r, optimizer, epoch):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n n = r.num_iterations(loader_size=len(train_loader))\n if args.num_minibatches is not None:\n n = min(n, args.num_minibatches)\n r.train(n)\n if not is_first_stage(): train_loader = None\n r.set_loader(train_loader)\n\n end = time.time()\n epoch_start_time = time.time()\n\n if args.no_input_pipelining:\n num_warmup_minibatches = 0\n else:\n num_warmup_minibatches = r.num_warmup_minibatches\n\n if args.verbose_frequency > 0:\n print(\"Letting in %d warm-up minibatches\" % num_warmup_minibatches)\n print(\"Running training for %d minibatches\" % n)\n\n # start num_warmup_minibatches forward passes\n for i in range(num_warmup_minibatches):\n r.run_forward()\n\n for i in range(n - num_warmup_minibatches):\n # perform forward pass\n r.run_forward()\n\n if is_last_stage():\n # measure accuracy and record loss\n output, target, loss, num_tokens = r.output, r.target, r.loss.item(), r.num_tokens()\n losses.update(loss, num_tokens)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n epoch_time = (end - epoch_start_time) / 3600.0\n full_epoch_time = (epoch_time / float(i+1)) * float(n)\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Epoch time [hr]: {epoch_time:.3f} ({full_epoch_time:.3f})\\t'\n 'Memory: {memory:.3f} ({cached_memory:.3f})\\t'\n 'Loss: {loss.val:.4f} ({loss.avg:.4f})\\t'.format(\n epoch, i, n, batch_time=batch_time,\n epoch_time=epoch_time, full_epoch_time=full_epoch_time,\n loss=losses, # top1=top1, top5=top5,\n memory=(float(torch.cuda.memory_allocated()) / 10**9),\n cached_memory=(float(torch.cuda.memory_cached()) / 10**9)))\n import sys; sys.stdout.flush()\n else:\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\tMemory: {memory:.3f} ({cached_memory:.3f})'.format(\n epoch, i, n, memory=(float(torch.cuda.memory_allocated()) / 10**9),\n cached_memory=(float(torch.cuda.memory_cached()) / 10**9)))\n import sys; sys.stdout.flush()\n\n # perform backward pass\n if args.fp16:\n r.zero_grad()\n else:\n optimizer.zero_grad()\n optimizer.load_old_params()\n\n r.run_backward()\n optimizer.load_new_params()\n optimizer.step()\n\n # finish remaining backward passes\n for i in range(num_warmup_minibatches):\n optimizer.zero_grad()\n optimizer.load_old_params()\n r.run_backward()\n optimizer.load_new_params()\n optimizer.step()\n\n # wait for all helper threads to complete\n r.wait()\n\n print(\"Epoch %d: %.3f seconds\" % (epoch, time.time() - epoch_start_time))\n print(\"Epoch start time: %.3f, epoch end time: %.3f\" % (epoch_start_time, time.time()))\n\n\ndef validate(val_loader, r, epoch):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n n = r.num_iterations(loader_size=len(val_loader))\n if args.num_minibatches is not None:\n n = min(n, args.num_minibatches)\n r.eval(n)\n if not is_first_stage(): val_loader = None\n r.set_loader(val_loader)\n\n end = time.time()\n epoch_start_time = time.time()\n\n if args.no_input_pipelining:\n num_warmup_minibatches = 0\n else:\n num_warmup_minibatches = r.num_warmup_minibatches\n\n if args.verbose_frequency > 0:\n print(\"Letting in %d warm-up minibatches\" % num_warmup_minibatches)\n print(\"Running validation for %d minibatches\" % n)\n\n with torch.no_grad():\n for i in range(num_warmup_minibatches):\n r.run_forward()\n\n for i in range(n - num_warmup_minibatches):\n # perform forward pass\n r.run_forward()\n r.run_ack()\n\n if is_last_stage():\n output, target, loss, num_tokens = r.output, r.target, r.loss.item(), r.num_tokens()\n\n # measure accuracy and record loss\n # prec1, prec5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss, output.size(0))\n # top1.update(prec1[0], output.size(0))\n # top5.update(prec5[0], output.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Test: [{0}][{1}/{2}]\\t'\n 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Memory: {memory:.3f} ({cached_memory:.3f})\\t'\n 'Loss: {loss.val:.4f} ({loss.avg:.4f})\\t'.format(\n epoch, i, n, batch_time=batch_time, loss=losses,\n memory=(float(torch.cuda.memory_allocated()) / 10**9),\n cached_memory=(float(torch.cuda.memory_cached()) / 10**9)))\n import sys; sys.stdout.flush()\n\n if is_last_stage():\n print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n for i in range(num_warmup_minibatches):\n r.run_ack()\n\n # wait for all helper threads to complete\n r.wait()\n\n print('Epoch %d: %.3f seconds' % (epoch, time.time() - epoch_start_time))\n print(\"Epoch start time: %.3f, epoch end time: %.3f\" % (epoch_start_time, time.time()))\n\n return top1.avg\n\n\n# TODO: Verify that checkpointing works correctly for GNMT\ndef save_checkpoint(state, checkpoint_dir, stage, epoch):\n assert os.path.isdir(checkpoint_dir)\n checkpoint_file_path = os.path.join(checkpoint_dir, \"checkpoint.%d.pth.tar.epoch.%d\" % (stage, epoch))\n torch.save(state, checkpoint_file_path)\n print(\"Saved checkpoint to %s\" % checkpoint_file_path)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef adjust_learning_rate(optimizer, epoch, total_epochs, r, lr_policy):\n \"\"\" Adjusts learning rate based on stage, epoch, and policy.\n\n Gets learning rate for stage from runtime and adjusts based on policy.\n\n Supported LR policies:\n - step\n - polynomial decay\n - exponential decay\n \"\"\"\n stage_base_lr = r.get_adjusted_learning_rate(base_lr=args.lr)\n if lr_policy == \"step\":\n lr = stage_base_lr * (0.1 ** (epoch // 30))\n elif lr_policy == \"polynomial\":\n power = 2.0\n lr = stage_base_lr * ((1.0 - (float(epoch) / float(total_epochs))) ** power)\n elif lr_policy == \"exponential_decay\":\n decay_rate = 0.97\n lr = stage_base_lr * (decay_rate ** (float(epoch) / float(total_epochs)))\n else:\n raise NotImplementedError\n print(\"Epoch: %d\\tLearning rate: %f\" % (epoch, lr))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport ctypes\nimport torch\n\n\nfrom seq2seq.train.smoothing import LabelSmoothing\nfrom seq2seq.train.smoothing import CrossEntropyWrapper\n\n\ndef l2_promote():\n # Check what's the device limit for current device, should be 64 by default\n pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int))\n result = torch.cuda.cudart().cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))\n\n # Set device limit on the current device\n # cudaLimitMaxL2FetchGranularity = 0x05\n result = torch.cuda.cudart().cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))\n\n # Get the device limit again, should be 128\n result = torch.cuda.cudart().cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))\n\n\ndef build_gnmt_criterion(vocab_size, padding_idx, smoothing):\n if smoothing == 0.:\n loss_weight = torch.ones(vocab_size)\n loss_weight[padding_idx] = 0\n criterion = CrossEntropyWrapper(weight=loss_weight, size_average=False)\n else:\n criterion = LabelSmoothing(padding_idx, smoothing)\n\n return criterion\n\n\ndef get_rank():\n \"\"\"\n Gets distributed rank or returns zero if distributed is not initialized.\n \"\"\"\n if torch.distributed.is_initialized():\n rank = torch.distributed.get_rank()\n else:\n rank = 0\n return rank\n\n\ndef get_world_size():\n \"\"\"\n Gets total number of distributed workers or returns one if distributed is\n not initialized.\n \"\"\"\n if torch.distributed.is_initialized():\n world_size = torch.distributed.get_world_size()\n else:\n world_size = 1\n return world_size\n\n\ndef barrier():\n \"\"\"\n Works as a temporary distributed barrier, currently pytorch\n doesn't implement barrier for NCCL backend.\n Calls all_reduce on dummy tensor and synchronizes with GPU.\n \"\"\"\n if torch.distributed.is_initialized():\n torch.distributed.all_reduce(torch.cuda.FloatTensor(1))\n torch.cuda.synchronize()\n\n\nclass AverageMeter:\n \"\"\"\n Computes and stores the average and current value\n \"\"\"\n def __init__(self, skip_first=True):\n self.reset()\n self.skip = skip_first\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n\n if self.skip:\n self.skip = False\n else:\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def reduce(self, op):\n \"\"\"\n Reduces average value over all workers.\n\n :param op: 'sum' or 'mean', reduction operator\n \"\"\"\n if op not in ('sum', 'mean'):\n raise NotImplementedError\n\n distributed = (get_world_size() > 1)\n if distributed:\n if(hasattr(dist, \"get_backend\")):\n backend = dist.get_backend()\n else:\n backend = dist._backend\n\n cuda = (backend == dist.dist_backend.NCCL)\n\n if cuda:\n avg = torch.cuda.FloatTensor([self.avg])\n _sum = torch.cuda.FloatTensor([self.sum])\n else:\n avg = torch.FloatTensor([self.avg])\n _sum = torch.FloatTensor([self.sum])\n dist.all_reduce(avg, op=dist.reduce_op.SUM)\n dist.all_reduce(_sum, op=dist.reduce_op.SUM)\n self.avg = avg.item()\n self.sum = _sum.item()\n\n if op == 'mean':\n self.avg /= get_world_size()\n self.sum /= get_world_size()\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport torch\n\n\nclass Stage0(torch.nn.Module):\n def __init__(self):\n super(Stage0, self).__init__()\n self.layer2 = torch.nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer3 = torch.nn.ReLU(inplace=True)\n self.layer4 = torch.nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer5 = torch.nn.ReLU(inplace=True)\n self.layer6 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n self.layer7 = torch.nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer8 = torch.nn.ReLU(inplace=True)\n self.layer9 = torch.nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer10 = torch.nn.ReLU(inplace=True)\n self.layer11 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n self.layer12 = torch.nn.Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer13 = torch.nn.ReLU(inplace=True)\n self.layer14 = torch.nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer15 = torch.nn.ReLU(inplace=True)\n self.layer16 = torch.nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer17 = torch.nn.ReLU(inplace=True)\n self.layer18 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n self.layer19 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer20 = torch.nn.ReLU(inplace=True)\n self.layer21 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer22 = torch.nn.ReLU(inplace=True)\n self.layer23 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer24 = torch.nn.ReLU(inplace=True)\n self.layer25 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n self.layer26 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer27 = torch.nn.ReLU(inplace=True)\n self.layer28 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer29 = torch.nn.ReLU(inplace=True)\n self.layer30 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer31 = torch.nn.ReLU(inplace=True)\n self.layer32 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n self._initialize_weights()\n\n def forward(self, input0):\n out0 = input0.clone()\n out2 = self.layer2(out0)\n out3 = self.layer3(out2)\n out4 = self.layer4(out3)\n out5 = self.layer5(out4)\n out6 = self.layer6(out5)\n out7 = self.layer7(out6)\n out8 = self.layer8(out7)\n out9 = self.layer9(out8)\n out10 = self.layer10(out9)\n out11 = self.layer11(out10)\n out12 = self.layer12(out11)\n out13 = self.layer13(out12)\n out14 = self.layer14(out13)\n out15 = self.layer15(out14)\n out16 = self.layer16(out15)\n out17 = self.layer17(out16)\n out18 = self.layer18(out17)\n out19 = self.layer19(out18)\n out20 = self.layer20(out19)\n out21 = self.layer21(out20)\n out22 = self.layer22(out21)\n out23 = self.layer23(out22)\n out24 = self.layer24(out23)\n out25 = self.layer25(out24)\n out26 = self.layer26(out25)\n out27 = self.layer27(out26)\n out28 = self.layer28(out27)\n out29 = self.layer29(out28)\n out30 = self.layer30(out29)\n out31 = self.layer31(out30)\n out32 = self.layer32(out31)\n out33 = out32.size(0)\n out34 = out32.view(out33, -1)\n return out34\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, torch.nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, torch.nn.BatchNorm2d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, torch.nn.Linear):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n torch.nn.init.constant_(m.bias, 0)\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport torch\n\n\nclass Stage8(torch.nn.Module):\n def __init__(self):\n super(Stage8, self).__init__()\n self.layer1 = torch.nn.ReLU(inplace=True)\n self.layer2 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n self.layer3 = torch.nn.Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.layer4 = torch.nn.ReLU(inplace=True)\n self._initialize_weights()\n\n def forward(self, input0):\n out0 = input0.clone()\n out1 = self.layer1(out0)\n out2 = self.layer2(out1)\n out3 = self.layer3(out2)\n out4 = self.layer4(out3)\n return out4\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, torch.nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, torch.nn.BatchNorm2d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, torch.nn.Linear):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n torch.nn.init.constant_(m.bias, 0)\n"
] | [
[
"torch.save"
],
[
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.init.normal_",
"torch.nn.init.kaiming_normal_"
],
[
"torch.nn.functional.log_softmax"
],
[
"torch.cuda.set_device",
"torch.load",
"torch.Tensor",
"torch.cuda.memory_cached",
"torch.no_grad",
"torch.rand",
"torch.cuda.memory_allocated",
"torch.save"
],
[
"torch.cuda.synchronize",
"torch.ones",
"torch.distributed.is_initialized",
"torch.cuda.FloatTensor",
"torch.FloatTensor",
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.cuda.cudart"
],
[
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.init.normal_",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
],
[
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.init.normal_",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PriyamvadaKumar/AWS_BioActive_Classification | [
"b6a4413618586712ca4dc196f2dfaa3ceca804fb"
] | [
"bioactive_lab.py"
] | [
"import os, sys\ndirpath = os.getcwd()\nsys.path.insert(0, dirpath + '/goal_tether_functions')\nsys.path.insert(0, dirpath + '/predictive_modelers')\nsys.path.insert(0, dirpath + '/predictive_modelers/assessment_resources')\nsys.path.insert(0, dirpath + '/active_learners')\nsys.path.insert(0, dirpath + '/data_acquisition')\nsys.path.insert(0, dirpath + '/diagnostics')\nfrom createCampaign_battleship import main as createCampaign\n# from createImageCampaign_Bria import main as createCampaign\nfrom runCampaign2 import main as runCampaign\nfrom database import *\nimport outputManager\nimport time\nimport boto3\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.cluster import KMeans\n\n\n# Part 1 Plotting Function\ndef plot_simulation_accuracy(acc, title, mul_accuracy=False):\n fig, ax = plt.subplots()\n ax.set_ylabel(\"Accuracy (%)\")\n ax.set_xlabel(\"Iterations\")\n ax.set_title(title)\n if mul_accuracy:\n ax.plot(np.arange(len(acc[0])), acc[0], label=\"Full Space\")\n ax.plot(np.arange(len(acc[1])), acc[1], label=\"Forward Modeling\")\n ax.plot(np.arange(len(acc[2])), acc[2], label=\"Prediction Only\")\n else:\n ax.plot(np.arange(len(acc)), acc)\n ax.legend()\n plt.show()\n\n\ndef average_arrays(mat):\n array = []\n for i in range(25):\n avg = 0\n for m in range(len(mat)):\n if len(mat[m]) < i:\n continue\n avg += mat[m][i]\n avg = avg/len(mat)\n array.append(avg)\n return array\n\nwd =os.getcwd()\nprint(\"Current Working Directory: \", wd)\nprint()\n\nif path.exists(\"data/data.csv\") is False:\n print(\"Retrieving Data from S3\")\n\n# read data from S3\ns3 = boto3.resource('s3')\ns3.Bucket('whatyouknowaboutmybucket').download_file('data.csv', wd + '/data/data.csv')\n\nif path.exists(\"data/data.csv\") is False:\n print(\"Retrieving Data from S3\")\n time.sleep(5)\n\ndata = pd.read_csv(\"data/data.csv\").dropna().to_numpy()\nfeatures = data[:, 4:]\nlabels = data[:, 2]\n\nl = LabelEncoder()\nlabels = l.fit_transform(labels)\nprint(l.classes_)\n\ns = KMeans(n_clusters=5)\n# s.decision_function(features[:1000])\ns.fit_transform(features[:1500])\nprint(s.score(features[1500:]))\n\nd = np.zeros((20,20))\n\n# create groundTruth\nfor i in range(len(data)):\n if data[i][0] - 1 >= len(d) or data[i][1] >= len(d[0]):\n continue\n d[data[i][0]-1][data[i][1]-1] = s.predict(features[i].reshape(1,-1))\n\nprint(d)\n\n\nnp.savetxt('data_acquisition/project.txt', d)\n\n\nprint(labels)\n\n\n\n\n\n\n# exit()\n'''\ncampaign = createCampaign()\nrunCampaign(campaign)\nacc = [np.array(campaign.accuracy_full), np.array(campaign.accuracy_forwardModeling),\n np.array(campaign.accuracy_onlyPredictions)]\n\nplot_simulation_accuracy(acc, \"Model Accuracies for a Single Simulation\", mul_accuracy=True)\n'''\n\n# Part 2 of Assignment - 2 independent variables (0-20) and 1 dependent variable (0-10) for 20 simulations\n\nacc = []\nfor i in range(1):\n campaign = createCampaign()\n campaign.randoseed = 2\n # campaign.ESS.iVars = [('int', 0, 9), ('int', 0, 9)]\n # campaign.ESS.dVars = [('int', 0, 2)]\n campaign.groundtruthData = 'data_acquisition/project.txt'\n campaign.simsFlag = True\n runCampaign(campaign)\n acc = [campaign.accuracy_full, campaign.accuracy_forwardModeling, campaign.accuracy_onlyPredictions]\n# acc = average_arrays(acc)\nplot_simulation_accuracy(acc, \"Three Accuracies for the Experimental Space\", mul_accuracy=True)\n\n\n# Part 3 of Assignment -\n# acc1, acc2, acc3, acc4 = [], [], [], []\n# for i in range(5):\n# campaign = createCampaign()\n# campaign.ESS.high_homogeneity = True\n# campaign.ESS.h_num = 2\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 2)]\n# campaign.ESS.dimarr = [20,20]\n# runCampaign(campaign)\n# acc = campaign.accuracy_onlyPredictions\n# acc1.append(acc)\n#\n# for i in range(5):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.h_num = 2\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 2)]\n# runCampaign(campaign)\n# acc = campaign.accuracy_onlyPredictions\n# acc2.append(acc)\n#\n# for i in range(5):\n# campaign = createCampaign()\n# campaign.ESS.high_homogeneity = True\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20,20]\n# runCampaign(campaign)\n# acc = campaign.accuracy_onlyPredictions\n# acc3.append(acc)\n#\n# for i in range(5):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20,20]\n# runCampaign(campaign)\n# acc = campaign.accuracy_onlyPredictions\n# acc4.append(acc)\n#\n# acc1, acc2, acc3, acc4 = average_arrays(acc1), average_arrays(acc2), average_arrays(acc3), average_arrays(acc4)\n#\n# plt.plot([i+1 for i in range(len(acc1))], acc1, label=\"H-2\", color=\"blue\")\n# plt.plot([i+1 for i in range(len(acc2))], acc2, label=\"L-2\", color=\"green\")\n# plt.plot([i+1 for i in range(len(acc3))], acc3, label=\"H-10\", color=\"red\")\n# plt.plot([i+1 for i in range(len(acc4))], acc4, label=\"L-10\", color=\"black\")\n# plt.ylabel(\"Accuracy (%)\")\n# plt.xlabel(\"Iterations\")\n# plt.title(\"Different Homogeneity within Experimental Spaces\")\n# plt.legend()\n# plt.show()\n\n\n# Part 4 of Assignment -\n\n# acc1, acc2, acc3, acc4 = [], [], [], []\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0\n# campaign.randoseed= 45\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc = campaign.accuracy_onlyPredictions\n# acc1.append(acc)\n#\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.randoseed = 1\n# campaign.ESS.error = 0.1\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc = campaign.accuracy_onlyPredictions\n# acc2.append(acc)\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0.5\n# campaign.randoseed = 2\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc = campaign.accuracy_onlyPredictions\n# acc3.append(acc)\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 1.0\n# campaign.randoseed=3\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc = campaign.accuracy_onlyPredictions\n# acc4.append(acc)\n#\n# acc1, acc2, acc3, acc4 = average_arrays(acc1), average_arrays(acc2), average_arrays(acc3), average_arrays(acc4)\n#\n# plt.plot([i+1 for i in range(len(acc1))], acc1, label=\"0.0\", color=\"blue\")\n# plt.plot([i+1 for i in range(len(acc2))], acc2, label=\"0.1\", color=\"green\")\n# plt.plot([i+1 for i in range(len(acc3))], acc3, label=\"0.5\", color=\"red\")\n# plt.plot([i+1 for i in range(len(acc4))], acc4, label=\"1.0\", color=\"black\")\n# plt.ylabel(\"Accuracy (%)\")\n# plt.xlabel(\"Iterations\")\n# plt.title(\"Different Error Rates within Experimental Spaces\")\n# plt.legend()\n# plt.show()\n\n\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0\n# campaign.randoseed = 53\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc1 = campaign.accuracy_onlyPredictions\n#\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0\n# campaign.randoseed = 39\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc2 = campaign.accuracy_onlyPredictions\n#\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0.1\n# campaign.randoseed = 32\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc3 = campaign.accuracy_onlyPredictions\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0.1\n# campaign.randoseed = 17\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc4 = campaign.accuracy_onlyPredictions\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0.5\n# campaign.randoseed = 3\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc5 = campaign.accuracy_onlyPredictions\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0.5\n# campaign.randoseed = 15\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc6 = campaign.accuracy_onlyPredictions\n#\n#\n# plt.plot([i+1 for i in range(len(acc1))], acc1, label=\"0.0 - B\", color=\"blue\")\n# plt.plot([i+1 for i in range(len(acc2))], acc2, label=\"0.0 - N\", color=\"green\")\n# plt.plot([i+1 for i in range(len(acc3))], acc3, label=\"0.1 - B\", color=\"red\")\n# plt.plot([i+1 for i in range(len(acc4))], acc4, label=\"0.1 - N\", color=\"black\")\n# plt.plot([i+1 for i in range(len(acc5))], acc5, label=\"0.5 - B\", color=\"yellow\")\n# plt.plot([i+1 for i in range(len(acc6))], acc6, label=\"0.5 - N\", color=\"cyan\")\n# plt.ylabel(\"Accuracy (%)\")\n# plt.xlabel(\"Iterations\")\n# plt.title(\"Different Categorical Models within Experimental Spaces\")\n# plt.legend()\n# plt.show()\n"
] | [
[
"pandas.read_csv",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.subplots",
"numpy.savetxt",
"sklearn.preprocessing.LabelEncoder",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
polewczakp/pyAudioAnalysis | [
"7dc2d8e18da1ca2f2485a402bb7399b43bbb2b24"
] | [
"pyAudioAnalysis/audioSegmentation.py"
] | [
"from __future__ import print_function\nimport os\nimport csv\nimport glob\nimport scipy\nimport sklearn\nimport numpy as np\nimport hmmlearn.hmm\nimport sklearn.cluster\nimport pickle as cpickle\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import distance\nimport sklearn.discriminant_analysis\nfrom pyAudioAnalysis import audioBasicIO\nfrom pyAudioAnalysis import audioTrainTest as at\nfrom pyAudioAnalysis import MidTermFeatures as mtf\nfrom pyAudioAnalysis import ShortTermFeatures as stf\n\n\"\"\" General utility functions \"\"\"\n\n\ndef smooth_moving_avg(signal, window=11):\n window = int(window)\n if signal.ndim != 1:\n raise ValueError(\"\")\n if signal.size < window:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n if window < 3:\n return signal\n s = np.r_[2 * signal[0] - signal[window - 1::-1],\n signal, 2 * signal[-1] - signal[-1:-window:-1]]\n w = np.ones(window, 'd')\n y = np.convolve(w/w.sum(), s, mode='same')\n return y[window:-window + 1]\n\n\ndef self_similarity_matrix(feature_vectors):\n \"\"\"\n This function computes the self-similarity matrix for a sequence\n of feature vectors.\n ARGUMENTS:\n - feature_vectors: a np matrix (nDims x nVectors) whose i-th column\n corresponds to the i-th feature vector\n\n RETURNS:\n - sim_matrix: the self-similarity matrix (nVectors x nVectors)\n \"\"\"\n norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])\n norm_feature_vectors = norm_feature_vectors[0].T\n sim_matrix = 1.0 - distance.squareform(\n distance.pdist(norm_feature_vectors.T, 'cosine'))\n return sim_matrix\n\n\ndef labels_to_segments(labels, window):\n \"\"\"\n ARGUMENTS:\n - labels: a sequence of class labels (per time window)\n - window: window duration (in seconds)\n\n RETURNS:\n - segments: a sequence of segment's limits: segs[i, 0] is start and\n segs[i, 1] are start and end point of segment i\n - classes: a sequence of class flags: class[i] is the class ID of\n the i-th segment\n \"\"\"\n\n if len(labels)==1:\n segs = [0, window]\n classes = labels\n return segs, classes\n\n\n num_segs = 0\n index = 0\n classes = []\n segment_list = []\n cur_label = labels[index]\n while index < len(labels) - 1:\n previous_value = cur_label\n while True:\n index += 1\n compare_flag = labels[index]\n if (compare_flag != cur_label) | (index == len(labels) - 1):\n num_segs += 1\n cur_label = labels[index]\n segment_list.append((index * window))\n classes.append(previous_value)\n break\n segments = np.zeros((len(segment_list), 2))\n\n for i in range(len(segment_list)):\n if i > 0:\n segments[i, 0] = segment_list[i-1]\n segments[i, 1] = segment_list[i]\n return segments, classes\n\n\ndef segments_to_labels(start_times, end_times, labels, window):\n \"\"\"\n This function converts segment endpoints and respective segment\n labels to fix-sized class labels.\n ARGUMENTS:\n - start_times: segment start points (in seconds)\n - end_times: segment endpoints (in seconds)\n - labels: segment labels\n - window: fix-sized window (in seconds)\n RETURNS:\n - flags: np array of class indices\n - class_names: list of classnames (strings)\n \"\"\"\n flags = []\n class_names = list(set(labels))\n index = window / 2.0\n while index < end_times[-1]:\n for i in range(len(start_times)):\n if start_times[i] < index <= end_times[i]:\n break\n flags.append(class_names.index(labels[i]))\n index += window\n return np.array(flags), class_names\n\n\ndef compute_metrics(confusion_matrix, class_names):\n \"\"\"\n This function computes the precision, recall and f1 measures,\n given a confusion matrix\n \"\"\"\n f1 = []\n recall = []\n precision = []\n n_classes = confusion_matrix.shape[0]\n if len(class_names) != n_classes:\n print(\"Error in computePreRec! Confusion matrix and class_names \"\n \"list must be of the same size!\")\n else:\n for i, c in enumerate(class_names):\n precision.append(confusion_matrix[i, i] /\n np.sum(confusion_matrix[:, i]))\n recall.append(confusion_matrix[i, i] /\n np.sum(confusion_matrix[i, :]))\n f1.append(2 * precision[-1] * recall[-1] /\n (precision[-1] + recall[-1]))\n return recall, precision, f1\n\n\ndef read_segmentation_gt(gt_file):\n \"\"\"\n This function reads a segmentation ground truth file,\n following a simple CSV format with the following columns:\n <segment start>,<segment end>,<class label>\n\n ARGUMENTS:\n - gt_file: the path of the CSV segment file\n RETURNS:\n - seg_start: a np array of segments' start positions\n - seg_end: a np array of segments' ending positions\n - seg_label: a list of respective class labels (strings)\n \"\"\"\n with open(gt_file, 'rt') as f_handle:\n reader = csv.reader(f_handle, delimiter='\\t')\n start_times = []\n end_times = []\n labels = []\n for row in reader:\n if len(row) == 3:\n start_times.append(float(row[0]))\n end_times.append(float(row[1]))\n labels.append((row[2]))\n return np.array(start_times), np.array(end_times), labels\n\n\ndef plot_segmentation_results(flags_ind, flags_ind_gt, class_names, mt_step,\n evaluate_only=False):\n \"\"\"\n This function plots statistics on the classification-segmentation results \n produced either by the fix-sized supervised method or the HMM method.\n It also computes the overall accuracy achieved by the respective method \n if ground-truth is available.\n \"\"\"\n \n flags = [class_names[int(f)] for f in flags_ind]\n segments, classes = labels_to_segments(flags, mt_step)\n min_len = min(flags_ind.shape[0], flags_ind_gt.shape[0]) \n if min_len > 0:\n accuracy = np.sum(flags_ind[0:min_len] ==\n flags_ind_gt[0:min_len]) / float(min_len)\n else:\n accuracy = -1\n\n if not evaluate_only:\n duration = segments[-1, 1]\n s_percentages = np.zeros((len(class_names), ))\n percentages = np.zeros((len(class_names), ))\n av_durations = np.zeros((len(class_names), ))\n\n for i_seg in range(segments.shape[0]):\n s_percentages[class_names.index(classes[i_seg])] += \\\n (segments[i_seg, 1]-segments[i_seg, 0])\n\n for i in range(s_percentages.shape[0]):\n percentages[i] = 100.0 * s_percentages[i] / duration\n class_sum = sum(1 for c in classes if c == class_names[i])\n if class_sum > 0:\n av_durations[i] = s_percentages[i] / class_sum\n else:\n av_durations[i] = 0.0\n\n for i in range(percentages.shape[0]):\n print(class_names[i], percentages[i], av_durations[i])\n\n font = {'size': 10}\n plt.rc('font', **font)\n\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n ax1.set_yticks(np.array(range(len(class_names))))\n ax1.axis((0, duration, -1, len(class_names)))\n ax1.set_yticklabels(class_names)\n ax1.plot(np.array(range(len(flags_ind))) * mt_step +\n mt_step / 2.0, flags_ind)\n if flags_ind_gt.shape[0] > 0:\n ax1.plot(np.array(range(len(flags_ind_gt))) * mt_step +\n mt_step / 2.0, flags_ind_gt + 0.05, '--r')\n plt.xlabel(\"time (seconds)\")\n if accuracy >= 0:\n plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))\n\n ax2 = fig.add_subplot(223)\n plt.title(\"Classes percentage durations\")\n ax2.axis((0, len(class_names) + 1, 0, 100))\n ax2.set_xticks(np.array(range(len(class_names) + 1)))\n ax2.set_xticklabels([\" \"] + class_names)\n print(np.array(range(len(class_names))), percentages)\n ax2.bar(np.array(range(len(class_names))) + 0.5, percentages)\n\n ax3 = fig.add_subplot(224)\n plt.title(\"Segment average duration per class\")\n ax3.axis((0, len(class_names)+1, 0, av_durations.max()))\n ax3.set_xticks(np.array(range(len(class_names) + 1)))\n ax3.set_xticklabels([\" \"] + class_names)\n ax3.bar(np.array(range(len(class_names))) + 0.5, av_durations)\n fig.tight_layout()\n plt.show()\n return accuracy\n\n\ndef evaluate_speaker_diarization(labels, labels_gt):\n\n min_len = min(labels.shape[0], labels_gt.shape[0])\n labels = labels[0:min_len]\n labels_gt = labels_gt[0:min_len]\n\n unique_flags = np.unique(labels)\n unique_flags_gt = np.unique(labels_gt)\n\n # compute contigency table:\n contigency_matrix = np.zeros((unique_flags.shape[0],\n unique_flags_gt.shape[0]))\n for i in range(min_len):\n contigency_matrix[int(np.nonzero(unique_flags == labels[i])[0]),\n int(np.nonzero(unique_flags_gt == labels_gt[i])[0])] += 1.0\n\n columns, rows = contigency_matrix.shape\n row_sum = np.sum(contigency_matrix, axis=0)\n column_sum = np.sum(contigency_matrix, axis=1)\n matrix_sum = np.sum(contigency_matrix)\n\n purity_clust = np.zeros((columns, ))\n purity_speak = np.zeros((rows, ))\n # compute cluster purity:\n for i in range(columns):\n purity_clust[i] = np.max((contigency_matrix[i, :])) / (column_sum[i])\n\n for j in range(rows):\n purity_speak[j] = np.max((contigency_matrix[:, j])) / (row_sum[j])\n\n purity_cluster_m = np.sum(purity_clust * column_sum) / matrix_sum\n purity_speaker_m = np.sum(purity_speak * row_sum) / matrix_sum\n\n return purity_cluster_m, purity_speaker_m\n\n\ndef train_hmm_compute_statistics(features, labels):\n \"\"\"\n This function computes the statistics used to train\n an HMM joint segmentation-classification model\n using a sequence of sequential features and respective labels\n\n ARGUMENTS:\n - features: a np matrix of feature vectors (numOfDimensions x n_wins)\n - labels: a np array of class indices (n_wins x 1)\n RETURNS:\n - class_priors: matrix of prior class probabilities\n (n_classes x 1)\n - transmutation_matrix: transition matrix (n_classes x n_classes)\n - means: means matrix (numOfDimensions x 1)\n - cov: deviation matrix (numOfDimensions x 1)\n \"\"\"\n unique_labels = np.unique(labels)\n n_comps = len(unique_labels)\n\n n_feats = features.shape[0]\n\n if features.shape[1] < labels.shape[0]:\n print(\"trainHMM warning: number of short-term feature vectors \"\n \"must be greater or equal to the labels length!\")\n labels = labels[0:features.shape[1]]\n\n # compute prior probabilities:\n class_priors = np.zeros((n_comps,))\n for i, u_label in enumerate(unique_labels):\n class_priors[i] = np.count_nonzero(labels == u_label)\n # normalize prior probabilities\n class_priors = class_priors / class_priors.sum()\n\n # compute transition matrix:\n transmutation_matrix = np.zeros((n_comps, n_comps))\n for i in range(labels.shape[0]-1):\n transmutation_matrix[int(labels[i]), int(labels[i + 1])] += 1\n # normalize rows of transition matrix:\n for i in range(n_comps):\n transmutation_matrix[i, :] /= transmutation_matrix[i, :].sum()\n\n means = np.zeros((n_comps, n_feats))\n for i in range(n_comps):\n means[i, :] = \\\n np.array(features[:,\n np.nonzero(labels == unique_labels[i])[0]].mean(axis=1))\n\n cov = np.zeros((n_comps, n_feats))\n for i in range(n_comps):\n \"\"\"\n cov[i, :, :] = np.cov(features[:, np.nonzero(labels == u_labels[i])[0]])\n \"\"\"\n # use line above if HMM using full gaussian distributions are to be used\n cov[i, :] = np.std(features[:,\n np.nonzero(labels == unique_labels[i])[0]],\n axis=1)\n\n return class_priors, transmutation_matrix, means, cov\n\n\ndef train_hmm_from_file(wav_file, gt_file, hmm_model_name, mid_window, mid_step):\n \"\"\"\n This function trains a HMM model for segmentation-classification\n using a single annotated audio file\n ARGUMENTS:\n - wav_file: the path of the audio filename\n - gt_file: the path of the ground truth filename\n (a csv file of the form <segment start in seconds>,\n <segment end in seconds>,<segment label> in each row\n - hmm_model_name: the name of the HMM model to be stored\n - mt_win: mid-term window size\n - mt_step: mid-term window step\n RETURNS:\n - hmm: an object to the resulting HMM\n - class_names: a list of class_names\n\n After training, hmm, class_names, along with the mt_win and mt_step\n values are stored in the hmm_model_name file\n \"\"\"\n\n seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)\n flags, class_names = segments_to_labels(seg_start, seg_end, seg_labs, mid_step)\n sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)\n features, _, _ = \\\n mtf.mid_feature_extraction(signal, sampling_rate,\n mid_window * sampling_rate,\n mid_step * sampling_rate,\n round(sampling_rate * 0.050),\n round(sampling_rate * 0.050))\n class_priors, transumation_matrix, means, cov = \\\n train_hmm_compute_statistics(features, flags)\n hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], \"diag\")\n\n hmm.covars_ = cov\n hmm.means_ = means\n hmm.startprob_ = class_priors\n hmm.transmat_ = transumation_matrix\n\n save_hmm(hmm_model_name, hmm, class_names, mid_window, mid_step)\n\n return hmm, class_names\n\n\ndef train_hmm_from_directory(folder_path, hmm_model_name, mid_window, mid_step):\n \"\"\"\n This function trains a HMM model for segmentation-classification using\n a where WAV files and .segment (ground-truth files) are stored\n ARGUMENTS:\n - folder_path: the path of the data diretory\n - hmm_model_name: the name of the HMM model to be stored\n - mt_win: mid-term window size\n - mt_step: mid-term window step\n RETURNS:\n - hmm: an object to the resulting HMM\n - class_names: a list of class_names\n\n After training, hmm, class_names, along with the mt_win\n and mt_step values are stored in the hmm_model_name file\n \"\"\"\n\n flags_all = np.array([])\n class_names_all = []\n for i, f in enumerate(glob.glob(folder_path + os.sep + '*.wav')):\n # for each WAV file\n wav_file = f\n gt_file = f.replace('.wav', '.segments')\n if os.path.isfile(gt_file):\n seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)\n flags, class_names = \\\n segments_to_labels(seg_start, seg_end, seg_labs, mid_step)\n for c in class_names:\n # update class names:\n if c not in class_names_all:\n class_names_all.append(c)\n sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)\n feature_vector, _, _ = \\\n mtf.mid_feature_extraction(signal, sampling_rate,\n mid_window * sampling_rate,\n mid_step * sampling_rate,\n round(sampling_rate * 0.050),\n round(sampling_rate * 0.050))\n\n flag_len = len(flags)\n feat_cols = feature_vector.shape[1]\n min_sm = min(feat_cols, flag_len)\n feature_vector = feature_vector[:, 0:min_sm]\n flags = flags[0:min_sm]\n\n flags_new = []\n # append features and labels\n for j, fl in enumerate(flags):\n flags_new.append(class_names_all.index(class_names_all[flags[j]]))\n\n flags_all = np.append(flags_all, np.array(flags_new))\n\n if i == 0:\n f_all = feature_vector\n else:\n f_all = np.concatenate((f_all, feature_vector), axis=1)\n\n # compute HMM statistics\n class_priors, transmutation_matrix, means, cov = \\\n train_hmm_compute_statistics(f_all, flags_all)\n # train the HMM\n hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], \"diag\")\n hmm.covars_ = cov\n hmm.means_ = means\n hmm.startprob_ = class_priors\n hmm.transmat_ = transmutation_matrix\n\n save_hmm(hmm_model_name, hmm, class_names_all, mid_window, mid_step)\n\n return hmm, class_names_all\n\n\ndef save_hmm(hmm_model_name, model, classes, mid_window, mid_step):\n \"\"\"Save HMM model\"\"\"\n with open(hmm_model_name, \"wb\") as f_handle:\n cpickle.dump(model, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)\n cpickle.dump(classes, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)\n cpickle.dump(mid_window, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)\n cpickle.dump(mid_step, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)\n\n\ndef hmm_segmentation(audio_file, hmm_model_name, plot_results=False,\n gt_file=\"\"):\n sampling_rate, signal = audioBasicIO.read_audio_file(audio_file)\n\n with open(hmm_model_name, \"rb\") as f_handle:\n hmm = cpickle.load(f_handle)\n class_names = cpickle.load(f_handle)\n mid_window = cpickle.load(f_handle)\n mid_step = cpickle.load(f_handle)\n\n features, _, _ = \\\n mtf.mid_feature_extraction(signal, sampling_rate,\n mid_window * sampling_rate,\n mid_step * sampling_rate,\n round(sampling_rate * 0.050),\n round(sampling_rate * 0.050))\n\n # apply model\n labels = hmm.predict(features.T)\n labels_gt, class_names_gt, accuracy, cm = \\\n load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)\n return labels, class_names, accuracy, cm\n\n\ndef load_ground_truth_segments(gt_file, mt_step):\n seg_start, seg_end, seg_labels = read_segmentation_gt(gt_file)\n labels, class_names = segments_to_labels(seg_start, seg_end, seg_labels,\n mt_step)\n labels_temp = []\n for index, label in enumerate(labels):\n # \"align\" labels with GT\n if class_names[labels[index]] in class_names:\n labels_temp.append(class_names.index(class_names[\n labels[index]]))\n else:\n labels_temp.append(-1)\n labels = np.array(labels_temp)\n return labels, class_names\n\n\ndef calculate_confusion_matrix(predictions, ground_truth, classes):\n cm = np.zeros((len(classes), len(classes)))\n for index in range(min(predictions.shape[0], ground_truth.shape[0])):\n cm[int(ground_truth[index]), int(predictions[index])] += 1\n return cm\n\n\ndef mid_term_file_classification(input_file, model_name, model_type,\n plot_results=False, gt_file=\"\"):\n \"\"\"\n This function performs mid-term classification of an audio stream.\n Towards this end, supervised knowledge is used,\n i.e. a pre-trained classifier.\n ARGUMENTS:\n - input_file: path of the input WAV file\n - model_name: name of the classification model\n - model_type: svm or knn depending on the classifier type\n - plot_results: True if results are to be plotted using\n matplotlib along with a set of statistics\n\n RETURNS:\n - segs: a sequence of segment's endpoints: segs[i] is the\n endpoint of the i-th segment (in seconds)\n - classes: a sequence of class flags: class[i] is the\n class ID of the i-th segment\n \"\"\"\n labels = []\n accuracy = 0.0\n class_names = []\n cm = np.array([])\n if not os.path.isfile(model_name):\n print(\"mtFileClassificationError: input model_type not found!\")\n return labels, class_names, accuracy, cm\n\n # Load classifier:\n if model_type == \"knn\":\n classifier, mean, std, class_names, mt_win, mid_step, st_win, \\\n st_step, compute_beat = at.load_model_knn(model_name)\n else:\n classifier, mean, std, class_names, mt_win, mid_step, st_win, \\\n st_step, compute_beat = at.load_model(model_name)\n if compute_beat:\n print(\"Model \" + model_name + \" contains long-term music features \"\n \"(beat etc) and cannot be used in \"\n \"segmentation\")\n return labels, class_names, accuracy, cm\n # load input file\n sampling_rate, signal = audioBasicIO.read_audio_file(input_file)\n\n # could not read file\n if sampling_rate == 0:\n return labels, class_names, accuracy, cm\n\n # convert stereo (if) to mono\n signal = audioBasicIO.stereo_to_mono(signal)\n\n # mid-term feature extraction:\n mt_feats, _, _ = \\\n mtf.mid_feature_extraction(signal, sampling_rate,\n mt_win * sampling_rate,\n mid_step * sampling_rate,\n round(sampling_rate * st_win),\n round(sampling_rate * st_step))\n posterior_matrix = []\n\n # for each feature vector (i.e. for each fix-sized segment):\n for col_index in range(mt_feats.shape[1]):\n # normalize current feature v\n feature_vector = (mt_feats[:, col_index] - mean) / std\n\n # classify vector:\n label_predicted, posterior = \\\n at.classifier_wrapper(classifier, model_type, feature_vector)\n labels.append(label_predicted)\n\n # update probability matrix\n posterior_matrix.append(np.max(posterior))\n labels = np.array(labels)\n\n # convert fix-sized flags to segments and classes\n segs, classes = labels_to_segments(labels, mid_step)\n segs[-1] = len(signal) / float(sampling_rate)\n # Load grount-truth:\n labels_gt, class_names_gt, accuracy, cm = \\\n load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)\n\n return labels, class_names, accuracy, cm\n\n\ndef load_ground_truth(gt_file, labels, class_names, mid_step, plot_results):\n accuracy = 0\n cm = np.array([])\n labels_gt = np.array([])\n if os.path.isfile(gt_file):\n # load ground truth and class names\n labels_gt, class_names_gt = load_ground_truth_segments(gt_file,\n mid_step)\n # map predicted labels to ground truth class names\n # Note: if a predicted label does not belong to the ground truth\n # classes --> -1\n labels_new = []\n for il, l in enumerate(labels):\n if class_names[int(l)] in class_names_gt:\n labels_new.append(class_names_gt.index(class_names[int(l)]))\n else:\n labels_new.append(-1)\n labels_new = np.array(labels_new)\n cm = calculate_confusion_matrix(labels_new, labels_gt, class_names_gt)\n\n accuracy = plot_segmentation_results(labels_new, labels_gt,\n class_names, mid_step, not plot_results)\n if accuracy >= 0:\n print(\"Overall Accuracy: {0:.2f}\".format(accuracy))\n\n return labels_gt, class_names, accuracy, cm\n\n\ndef evaluate_segmentation_classification_dir(dir_name, model_name, method_name):\n\n accuracies = []\n class_names = []\n cm_total = np.array([])\n for index, wav_file in enumerate(glob.glob(dir_name + os.sep + '*.wav')):\n print(wav_file)\n\n gt_file = wav_file.replace('.wav', '.segments')\n\n if method_name.lower() in [\"svm\", \"svm_rbf\", \"knn\", \"randomforest\",\n \"gradientboosting\", \"extratrees\"]:\n flags_ind, class_names, accuracy, cm_temp = \\\n mid_term_file_classification(wav_file, model_name, method_name,\n False, gt_file)\n else:\n flags_ind, class_names, accuracy, cm_temp = \\\n hmm_segmentation(wav_file, model_name, False, gt_file)\n if accuracy > 0:\n if not index:\n cm_total = np.copy(cm_temp)\n else:\n cm_total = cm_total + cm_temp\n accuracies.append(accuracy)\n print(cm_temp, class_names)\n print(cm_total)\n\n if len(cm_total.shape) > 1:\n cm_total = cm_total / np.sum(cm_total)\n rec, pre, f1 = compute_metrics(cm_total, class_names)\n\n print(\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \")\n print(\"Average Accuracy: {0:.1f}\".\n format(100.0*np.array(accuracies).mean()))\n print(\"Average recall: {0:.1f}\".format(100.0*np.array(rec).mean()))\n print(\"Average precision: {0:.1f}\".format(100.0*np.array(pre).mean()))\n print(\"Average f1: {0:.1f}\".format(100.0*np.array(f1).mean()))\n print(\"Median Accuracy: {0:.1f}\".\n format(100.0*np.median(np.array(accuracies))))\n print(\"Min Accuracy: {0:.1f}\".format(100.0*np.array(accuracies).min()))\n print(\"Max Accuracy: {0:.1f}\".format(100.0*np.array(accuracies).max()))\n else:\n print(\"Confusion matrix was empty, accuracy for every file was 0\")\n\n\ndef silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,\n weight=0.5, plot=False):\n \"\"\"\n Event Detection (silence removal)\n ARGUMENTS:\n - signal: the input audio signal\n - sampling_rate: sampling freq\n - st_win, st_step: window size and step in seconds\n - smoothWindow: (optinal) smooth window (in seconds)\n - weight: (optinal) weight factor (0 < weight < 1)\n the higher, the more strict\n - plot: (optinal) True if results are to be plotted\n RETURNS:\n - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9],\n [1.4, 3.0]] means that\n the resulting segments are (0.1 - 0.9) seconds\n and (1.4, 3.0) seconds\n \"\"\"\n\n if weight >= 1:\n weight = 0.99\n if weight <= 0:\n weight = 0.01\n\n # Step 1: feature extraction\n signal = audioBasicIO.stereo_to_mono(signal)\n st_feats, _ = stf.feature_extraction(signal, sampling_rate,\n st_win * sampling_rate,\n st_step * sampling_rate)\n\n # Step 2: train binary svm classifier of low vs high energy frames\n # keep only the energy short-term sequence (2nd feature)\n st_energy = st_feats[1, :]\n en = np.sort(st_energy)\n # number of 10% of the total short-term windows\n st_windows_fraction = int(len(en) / 10)\n\n # compute \"lower\" 10% energy threshold\n low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15\n\n # compute \"higher\" 10% energy threshold\n high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15\n\n # get all features that correspond to low energy\n low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]\n\n # get all features that correspond to high energy\n high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]\n\n # form the binary classification task and ...\n features = [low_energy.T, high_energy.T]\n # normalize and train the respective svm probabilistic model\n\n # (ONSET vs SILENCE)\n features_norm, mean, std = at.normalize_features(features)\n svm = at.train_svm(features_norm, 1.0)\n\n # Step 3: compute onset probability based on the trained svm\n prob_on_set = []\n for index in range(st_feats.shape[1]):\n # for each frame\n cur_fv = (st_feats[:, index] - mean) / std\n # get svm probability (that it belongs to the ONSET class)\n prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])\n prob_on_set = np.array(prob_on_set)\n\n # smooth probability:\n prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)\n\n # Step 4A: detect onset frame indices:\n prog_on_set_sort = np.sort(prob_on_set)\n\n # find probability Threshold as a weighted average\n # of top 10% and lower 10% of the values\n nt = int(prog_on_set_sort.shape[0] / 10)\n threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +\n weight * np.mean(prog_on_set_sort[-nt::]))\n\n max_indices = np.where(prob_on_set > threshold)[0]\n # get the indices of the frames that satisfy the thresholding\n index = 0\n seg_limits = []\n time_clusters = []\n\n # Step 4B: group frame indices to onset segments\n while index < len(max_indices):\n # for each of the detected onset indices\n cur_cluster = [max_indices[index]]\n if index == len(max_indices)-1:\n break\n while max_indices[index+1] - cur_cluster[-1] <= 2:\n cur_cluster.append(max_indices[index+1])\n index += 1\n if index == len(max_indices)-1:\n break\n index += 1\n time_clusters.append(cur_cluster)\n seg_limits.append([cur_cluster[0] * st_step,\n cur_cluster[-1] * st_step])\n\n # Step 5: Post process: remove very small segments:\n min_duration = 0.2\n seg_limits_2 = []\n for s_lim in seg_limits:\n if s_lim[1] - s_lim[0] > min_duration:\n seg_limits_2.append(s_lim)\n seg_limits = seg_limits_2\n\n if plot:\n time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /\n sampling_rate)\n\n plt.subplot(2, 1, 1)\n plt.plot(time_x, signal)\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.subplot(2, 1, 2)\n plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step), \n prob_on_set)\n plt.title('Signal')\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.title('svm Probability')\n plt.show()\n\n return seg_limits\n\n\ndef speaker_diarization(filename, n_speakers, mid_window=2.0, mid_step=0.2,\n short_window=0.05, lda_dim=35, plot_res=False):\n \"\"\"\n ARGUMENTS:\n - filename: the name of the WAV file to be analyzed\n - n_speakers the number of speakers (clusters) in\n the recording (<=0 for unknown)\n - mid_window (opt) mid-term window size\n - mid_step (opt) mid-term window step\n - short_window (opt) short-term window size\n - lda_dim (opt LDA dimension (0 for no LDA)\n - plot_res (opt) 0 for not plotting the results 1 for plotting\n \"\"\"\n sampling_rate, signal = audioBasicIO.read_audio_file(filename)\n signal = audioBasicIO.stereo_to_mono(signal)\n duration = len(signal) / sampling_rate\n\n base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"data/models\")\n\n classifier_all, mean_all, std_all, class_names_all, _, _, _, _, _ = \\\n at.load_model_knn(os.path.join(base_dir, \"knn_speaker_10\"))\n classifier_fm, mean_fm, std_fm, class_names_fm, _, _, _, _, _ = \\\n at.load_model_knn(os.path.join(base_dir, \"knn_speaker_male_female\"))\n\n mid_feats, st_feats, _ = \\\n mtf.mid_feature_extraction(signal, sampling_rate,\n mid_window * sampling_rate,\n mid_step * sampling_rate,\n round(sampling_rate * short_window),\n round(sampling_rate * short_window * 0.5))\n\n mid_term_features = np.zeros((mid_feats.shape[0] + len(class_names_all) +\n len(class_names_fm), mid_feats.shape[1]))\n\n for index in range(mid_feats.shape[1]):\n feature_norm_all = (mid_feats[:, index] - mean_all) / std_all\n feature_norm_fm = (mid_feats[:, index] - mean_fm) / std_fm\n _, p1 = at.classifier_wrapper(classifier_all, \"knn\", feature_norm_all)\n _, p2 = at.classifier_wrapper(classifier_fm, \"knn\", feature_norm_fm)\n start = mid_feats.shape[0]\n end = mid_feats.shape[0] + len(class_names_all)\n mid_term_features[0:mid_feats.shape[0], index] = mid_feats[:, index]\n mid_term_features[start:end, index] = p1 + 1e-4\n mid_term_features[end::, index] = p2 + 1e-4\n\n mid_feats = mid_term_features # TODO\n feature_selected = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41,\n 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]\n\n mid_feats = mid_feats[feature_selected, :]\n\n mid_feats_norm, mean, std = at.normalize_features([mid_feats.T])\n mid_feats_norm = mid_feats_norm[0].T\n n_wins = mid_feats.shape[1]\n\n # remove outliers:\n dist_all = np.sum(distance.squareform(distance.pdist(mid_feats_norm.T)),\n axis=0)\n m_dist_all = np.mean(dist_all)\n i_non_outliers = np.nonzero(dist_all < 1.2 * m_dist_all)[0]\n\n # TODO: Combine energy threshold for outlier removal:\n # EnergyMin = np.min(mt_feats[1,:])\n # EnergyMean = np.mean(mt_feats[1,:])\n # Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0\n # i_non_outliers = np.nonzero(mt_feats[1,:] > Thres)[0]\n # print i_non_outliers\n\n mt_feats_norm_or = mid_feats_norm\n mid_feats_norm = mid_feats_norm[:, i_non_outliers]\n\n # LDA dimensionality reduction:\n if lda_dim > 0:\n\n # extract mid-term features with minimum step:\n window_ratio = int(round(mid_window / short_window))\n step_ratio = int(round(short_window / short_window))\n mt_feats_to_red = []\n num_of_features = len(st_feats)\n num_of_stats = 2\n for index in range(num_of_stats * num_of_features):\n mt_feats_to_red.append([])\n\n # for each of the short-term features:\n for index in range(num_of_features):\n cur_pos = 0\n feat_len = len(st_feats[index])\n while cur_pos < feat_len:\n n1 = cur_pos\n n2 = cur_pos + window_ratio\n if n2 > feat_len:\n n2 = feat_len\n short_features = st_feats[index][n1:n2]\n mt_feats_to_red[index].append(np.mean(short_features))\n mt_feats_to_red[index + num_of_features].\\\n append(np.std(short_features))\n cur_pos += step_ratio\n mt_feats_to_red = np.array(mt_feats_to_red)\n mt_feats_to_red_2 = np.zeros((mt_feats_to_red.shape[0] +\n len(class_names_all) +\n len(class_names_fm),\n mt_feats_to_red.shape[1]))\n limit = mt_feats_to_red.shape[0] + len(class_names_all)\n for index in range(mt_feats_to_red.shape[1]):\n feature_norm_all = (mt_feats_to_red[:, index] - mean_all) / std_all\n feature_norm_fm = (mt_feats_to_red[:, index] - mean_fm) / std_fm\n _, p1 = at.classifier_wrapper(classifier_all, \"knn\",\n feature_norm_all)\n _, p2 = at.classifier_wrapper(classifier_fm, \"knn\", feature_norm_fm)\n mt_feats_to_red_2[0:mt_feats_to_red.shape[0], index] = \\\n mt_feats_to_red[:, index]\n mt_feats_to_red_2[mt_feats_to_red.shape[0]:limit, index] = p1 + 1e-4\n mt_feats_to_red_2[limit::, index] = p2 + 1e-4\n mt_feats_to_red = mt_feats_to_red_2\n mt_feats_to_red = mt_feats_to_red[feature_selected, :]\n mt_feats_to_red, mean, std = at.normalize_features([mt_feats_to_red.T])\n mt_feats_to_red = mt_feats_to_red[0].T\n labels = np.zeros((mt_feats_to_red.shape[1], ))\n lda_step = 1.0\n lda_step_ratio = lda_step / short_window\n for index in range(labels.shape[0]):\n labels[index] = int(index * short_window / lda_step_ratio)\n clf = sklearn.discriminant_analysis.\\\n LinearDiscriminantAnalysis(n_components=lda_dim)\n clf.fit(mt_feats_to_red.T, labels)\n mid_feats_norm = (clf.transform(mid_feats_norm.T)).T\n\n if n_speakers <= 0:\n s_range = range(2, 10)\n else:\n s_range = [n_speakers]\n cluster_labels = []\n sil_all = []\n cluster_centers = []\n \n for speakers in s_range:\n k_means = sklearn.cluster.KMeans(n_clusters=speakers)\n k_means.fit(mid_feats_norm.T)\n cls = k_means.labels_ \n means = k_means.cluster_centers_\n\n cluster_labels.append(cls)\n cluster_centers.append(means)\n sil_1, sil_2 = [], []\n for c in range(speakers):\n # for each speaker (i.e. for each extracted cluster)\n clust_per_cent = np.nonzero(cls == c)[0].shape[0] / float(len(cls))\n if clust_per_cent < 0.020:\n sil_1.append(0.0)\n sil_2.append(0.0)\n else:\n # get subset of feature vectors\n mt_feats_norm_temp = mid_feats_norm[:, cls == c]\n # compute average distance between samples\n # that belong to the cluster (a values)\n dist = distance.pdist(mt_feats_norm_temp.T)\n sil_1.append(np.mean(dist)*clust_per_cent)\n sil_temp = []\n for c2 in range(speakers):\n # compute distances from samples of other clusters\n if c2 != c:\n clust_per_cent_2 = np.nonzero(cls == c2)[0].shape[0] /\\\n float(len(cls))\n mid_features_temp = mid_feats_norm[:, cls == c2]\n dist = distance.cdist(mt_feats_norm_temp.T,\n mid_features_temp.T)\n sil_temp.append(np.mean(dist)*(clust_per_cent\n + clust_per_cent_2)/2.0)\n sil_temp = np.array(sil_temp)\n # ... and keep the minimum value (i.e.\n # the distance from the \"nearest\" cluster)\n sil_2.append(min(sil_temp))\n sil_1 = np.array(sil_1)\n sil_2 = np.array(sil_2)\n sil = []\n for c in range(speakers):\n # for each cluster (speaker) compute silhouette\n sil.append((sil_2[c] - sil_1[c]) / (max(sil_2[c], sil_1[c]) + 1e-5))\n # keep the AVERAGE SILLOUETTE\n sil_all.append(np.mean(sil))\n\n imax = int(np.argmax(sil_all))\n # optimal number of clusters\n num_speakers = s_range[imax]\n\n # generate the final set of cluster labels\n # (important: need to retrieve the outlier windows:\n # this is achieved by giving them the value of their\n # nearest non-outlier window)\n cls = np.zeros((n_wins,))\n for index in range(n_wins):\n j = np.argmin(np.abs(index-i_non_outliers))\n cls[index] = cluster_labels[imax][j]\n \n # Post-process method 1: hmm smoothing\n for index in range(1):\n # hmm training\n start_prob, transmat, means, cov = \\\n train_hmm_compute_statistics(mt_feats_norm_or, cls)\n hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], \"diag\")\n hmm.startprob_ = start_prob\n hmm.transmat_ = transmat \n hmm.means_ = means\n hmm.covars_ = cov\n cls = hmm.predict(mt_feats_norm_or.T) \n \n # Post-process method 2: median filtering:\n cls = scipy.signal.medfilt(cls, 13)\n cls = scipy.signal.medfilt(cls, 11)\n\n class_names = [\"speaker{0:d}\".format(c) for c in range(num_speakers)]\n\n # load ground-truth if available\n gt_file = filename.replace('.wav', '.segments')\n # if groundtruth exists\n if os.path.isfile(gt_file):\n seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)\n flags_gt, class_names_gt = segments_to_labels(seg_start, seg_end,\n seg_labs, mid_step)\n\n if plot_res:\n fig = plt.figure() \n if n_speakers > 0:\n ax1 = fig.add_subplot(111)\n else:\n ax1 = fig.add_subplot(211)\n ax1.set_yticks(np.array(range(len(class_names))))\n ax1.axis((0, duration, -1, len(class_names)))\n ax1.set_yticklabels(class_names)\n ax1.plot(np.array(range(len(cls))) * mid_step + mid_step / 2.0, cls)\n\n if os.path.isfile(gt_file):\n if plot_res:\n ax1.plot(np.array(range(len(flags_gt))) *\n mid_step + mid_step / 2.0, flags_gt, 'r')\n purity_cluster_m, purity_speaker_m = \\\n evaluate_speaker_diarization(cls, flags_gt)\n print(\"{0:.1f}\\t{1:.1f}\".format(100 * purity_cluster_m,\n 100 * purity_speaker_m))\n if plot_res:\n plt.title(\"Cluster purity: {0:.1f}% - \"\n \"Speaker purity: {1:.1f}%\".format(100 * purity_cluster_m,\n 100 * purity_speaker_m))\n if plot_res:\n plt.xlabel(\"time (seconds)\")\n if n_speakers <= 0:\n plt.subplot(212)\n plt.plot(s_range, sil_all)\n plt.xlabel(\"number of clusters\")\n plt.ylabel(\"average clustering's sillouette\")\n plt.show()\n return cls\n\n\ndef speaker_diarization_evaluation(folder_name, lda_dimensions):\n \"\"\"\n This function prints the cluster purity and speaker purity for\n each WAV file stored in a provided directory (.SEGMENT files\n are needed as ground-truth)\n ARGUMENTS:\n - folder_name: the full path of the folder where the WAV and\n segment (ground-truth) files are stored\n - lda_dimensions: a list of LDA dimensions (0 for no LDA)\n \"\"\"\n types = ('*.wav', )\n wav_files = []\n for files in types:\n wav_files.extend(glob.glob(os.path.join(folder_name, files)))\n \n wav_files = sorted(wav_files)\n\n # get number of unique speakers per file (from ground-truth) \n num_speakers = []\n for wav_file in wav_files:\n gt_file = wav_file.replace('.wav', '.segments')\n if os.path.isfile(gt_file):\n _, _, seg_labs = read_segmentation_gt(gt_file)\n num_speakers.append(len(list(set(seg_labs))))\n else:\n num_speakers.append(-1)\n \n for dim in lda_dimensions:\n print(\"LDA = {0:d}\".format(dim))\n for i, wav_file in enumerate(wav_files):\n speaker_diarization(wav_file, num_speakers[i], 2.0, 0.2, 0.05, dim,\n plot_res=False)\n\n\ndef music_thumbnailing(signal, sampling_rate, short_window=1.0, short_step=0.5,\n thumb_size=10.0, limit_1=0, limit_2=1):\n \"\"\"\n This function detects instances of the most representative part of a\n music recording, also called \"music thumbnails\".\n A technique similar to the one proposed in [1], however a wider set of\n audio features is used instead of chroma features.\n In particular the following steps are followed:\n - Extract short-term audio features. Typical short-term window size: 1\n second\n - Compute the self-similarity matrix, i.e. all pairwise similarities\n between feature vectors\n - Apply a diagonal mask is as a moving average filter on the values of the\n self-similarty matrix.\n The size of the mask is equal to the desirable thumbnail length.\n - Find the position of the maximum value of the new (filtered)\n self-similarity matrix. The audio segments that correspond to the\n diagonial around that position are the selected thumbnails\n \n\n ARGUMENTS:\n - signal: input signal\n - sampling_rate: sampling frequency\n - short_window: window size (in seconds)\n - short_step: window step (in seconds)\n - thumb_size: desider thumbnail size (in seconds)\n \n RETURNS:\n - A1: beginning of 1st thumbnail (in seconds)\n - A2: ending of 1st thumbnail (in seconds)\n - B1: beginning of 2nd thumbnail (in seconds)\n - B2: ending of 2nd thumbnail (in seconds)\n\n USAGE EXAMPLE:\n import audioFeatureExtraction as aF\n [fs, x] = basicIO.readAudioFile(input_file)\n [A1, A2, B1, B2] = musicThumbnailing(x, fs)\n\n [1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing\n of popular music using chroma-based representations.\n Multimedia, IEEE Transactions on, 7(1), 96-104.\n \"\"\"\n signal = audioBasicIO.stereo_to_mono(signal)\n # feature extraction:\n st_feats, _ = stf.feature_extraction(signal, sampling_rate,\n sampling_rate * short_window,\n sampling_rate * short_step)\n\n # self-similarity matrix\n sim_matrix = self_similarity_matrix(st_feats)\n\n # moving filter:\n m_filter = int(round(thumb_size / short_step))\n diagonal = np.eye(m_filter, m_filter)\n sim_matrix = scipy.signal.convolve2d(sim_matrix, diagonal, 'valid')\n\n # post-processing (remove main diagonal elements)\n min_sm = np.min(sim_matrix)\n for i in range(sim_matrix.shape[0]):\n for j in range(sim_matrix.shape[1]):\n if abs(i-j) < 5.0 / short_step or i > j:\n sim_matrix[i, j] = min_sm\n\n # find max position:\n sim_matrix[0:int(limit_1 * sim_matrix.shape[0]), :] = min_sm\n sim_matrix[:, 0:int(limit_1 * sim_matrix.shape[0])] = min_sm\n sim_matrix[int(limit_2 * sim_matrix.shape[0])::, :] = min_sm\n sim_matrix[:, int(limit_2 * sim_matrix.shape[0])::] = min_sm\n\n rows, cols = np.unravel_index(sim_matrix.argmax(), sim_matrix.shape)\n i1 = rows\n i2 = rows\n j1 = cols\n j2 = cols\n\n while i2-i1 < m_filter:\n if i1 <= 0 or j1 <= 0 or i2 >= sim_matrix.shape[0]-2 or \\\n j2 >= sim_matrix.shape[1]-2:\n break\n if sim_matrix[i1-1, j1-1] > sim_matrix[i2 + 1, j2 + 1]:\n i1 -= 1\n j1 -= 1 \n else: \n i2 += 1\n j2 += 1 \n\n return short_step * i1, short_step * i2, short_step * j1, short_step * j2, \\\n sim_matrix\n\n\n\n"
] | [
[
"sklearn.cluster.KMeans",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.concatenate",
"numpy.mean",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"numpy.where",
"numpy.unique",
"scipy.signal.medfilt",
"numpy.arange",
"numpy.eye",
"numpy.copy",
"matplotlib.pyplot.subplot",
"numpy.argmax",
"numpy.std",
"numpy.count_nonzero",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.nonzero",
"scipy.spatial.distance.cdist",
"scipy.signal.convolve2d",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"numpy.abs",
"numpy.sort",
"numpy.ones",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.xlabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
BwCai/DCAA-UDA | [
"359c2122060aebfbe4384c918768c261fe2dc9c7"
] | [
"models/adaptation_model_stage1.py"
] | [
"from models.base_model import BaseModel\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport os, sys\nimport torch\nimport numpy as np\nimport itertools\n\nfrom torch.autograd import Variable\nfrom optimizers import get_optimizer\nfrom schedulers import get_scheduler\nfrom models.sync_batchnorm import SynchronizedBatchNorm2d, DataParallelWithCallback\nfrom models.deeplab_multimodal import DeepLab\nfrom models.decoder import Decoder\nfrom models.aspp import ASPP\nfrom models.discriminator import FCDiscriminator, FCDiscriminator_low, FCDiscriminator_out, FCDiscriminator_class\nfrom loss import get_loss_function\nfrom .utils import freeze_bn, GradReverse, normalisation_pooling\nfrom metrics import runningScore\nimport pdb\n\ndef multimodal_merger(multi_modal_data, is_upsample=False, up_size=None):\n \"\"\"\n [Func Handler] multimodal_merger:\n @Input Params:\n multi_modal_data: dict.\n examples: {\n \"feat_cls\": feat_cls,\n \"output\": output,\n }\n @Reture:\n merge_out: dict.\n examples: {\n \"feat_cls\": feat_cls,\n \"output_comb\": output_comb,\n \"output\": output,\n }\n \"\"\"\n feat_cls = multi_modal_data['feat_cls']\n # merge class features\n feat_cls_cat = torch.cat(feat_cls, 1) # concat \n # merge output pred\n output = multi_modal_data['output']\n output_comb = 0\n for _i in range(len(output)):\n if is_upsample:\n output[_i] = F.interpolate(output[_i], size=up_size, mode='bilinear', align_corners=True)\n output_comb += output[_i]\n\n merge_out = {\n 'feat_cls': feat_cls,\n 'feat_cls_cat': feat_cls_cat,\n 'output_comb': output_comb,\n 'output': output,\n }\n return merge_out\n\nclass CustomMetricsMultimodalMerger():\n \"\"\"\n [Func Handler] objective_vectors_multimodal_merger:\n @Input Params:\n multi_modal_data: dict.\n examples: {\n \"class_threshold_group\": [model.class_threshold_group[modal_idx][i], ...]\n \"objective_vectors_group\": [model.objective_vectors_group[modal_idx][i], ...],\n }\n cate_idx: int. 0 ~ 18\n modal_ids: list.\n examples: [0, 1] or [0,]\n @Reture:\n merge_out: dict.\n examples: {\n \"class_threshold\": class_threshold,\n \"objective_vectors\": objective_vectors,\n }\n \"\"\"\n\n def __init__(self, modal_num, category_num, model):\n self.modal_num = modal_num\n self.category_num = category_num\n self._model = model\n\n def initialize_model(model):\n self._model = model\n\n def merge_class_threshold(self, modal_ids=[]):\n assert self._model is not None, \"[ERROR] Deeplab Model not initialize before using!\"\n _class_threshold_group = self._model.class_threshold_group[modal_ids]\n return torch.mean(_class_threshold_group, dim=0) # modal_num x 19 --> 19\n\n def merge_clu_threshold(self, clu_threshold, modal_ids=[]):\n _clu_threshold_group = clu_threshold[modal_ids]\n return torch.mean(_clu_threshold_group, dim=0)\n\n def merge_objective_vectors(self, modal_ids=[]):\n assert self._model is not None, \"[ERROR] Deeplab Model not initialize before using!\"\n _modal_num, _cate_num, _feat_dim = self._model.objective_vectors_group.size()\n _objective_vectors = self._model.objective_vectors_group[modal_ids]\n # modal_num x 19 x 256 --> 19 x modal_num x 256 --> 19 x (modal_num x 256)\n assert _objective_vectors.dim() == 4, \"objective_vector dimension != 4\"\n _objective_vectors = _objective_vectors.permute(1, 0, 2).contiguous()\n\n return _objective_vectors.view(_cate_num, -1)\n\nclass CustomMetrics():\n def __init__(self, numbers=19, modal_num=3, model=None):\n self.class_numbers = numbers\n self.classes_recall_thr = np.zeros([19, 3])\n self.classes_recall_thr_num = np.zeros([19])\n self.classes_recall_clu = np.zeros([19, 3])\n self.classes_recall_clu_num = np.zeros([19])\n self.running_metrics_val_threshold = runningScore(self.class_numbers)\n self.running_metrics_val_clusters = runningScore(self.class_numbers)\n self.clu_threshold = torch.full((modal_num + 1, 19), 2.5).cuda()\n self.multimodal_merger = CustomMetricsMultimodalMerger(\n modal_num=modal_num + 1, category_num=numbers, model=model\n )\n \n def update(self, feat_cls, outputs, labels, modal_ids=[0,]): \n '''calculate accuracy. caring about recall but not IoU'''\n batch, width, height = labels.shape\n labels = labels.reshape([batch, 1, width, height]).float()\n labels = F.interpolate(labels, size=feat_cls.size()[2:], mode='nearest')\n outputs_threshold = outputs.clone()\n outputs_threshold = F.softmax(outputs_threshold, dim=1)\n #self.running_metrics_val_threshold.update(labels.cpu().numpy(), outputs_threshold.argmax(1).cpu().numpy())\n self.running_metrics_val_threshold.update(labels, outputs_threshold.argmax(1))\n\n _class_threshold_set = self.multimodal_merger.merge_class_threshold(modal_ids=modal_idx)\n for i in range(19):\n outputs_threshold[:, i, :, :] = torch.where(outputs_threshold[:, i, :, :] > _class_threshold_set[i], torch.Tensor([1]).cuda(), torch.Tensor([0]).cuda())\n\n _batch, _channel, _w, _h = outputs_threshold.shape\n _tmp = torch.full([_batch, 1, _w, _h], 0.2,).cuda()\n _tmp = torch.cat((outputs_threshold, _tmp), 1)\n threshold_arg = _tmp.argmax(1, keepdim=True)\n threshold_arg[threshold_arg == 19] = 250 #ignore index\n truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), threshold_arg.cpu().int().numpy())\n self.classes_recall_thr[:, 0] += truth\n self.classes_recall_thr[:, 2] += pred_all\n self.classes_recall_thr[:, 1] += truth_all\n\n outputs_cluster = outputs.clone()\n _objective_vectors_set = self.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)\n\n for i in range(19):\n outputs_cluster[:, i, :, :] = torch.norm( _objective_vectors_set[i].reshape(-1,1,1).expand(-1,128,256) - feat_cls, 2, dim=1,)\n\n outputs_cluster_min, outputs_cluster_arg = outputs_cluster.min(dim=1, keepdim=True)\n outputs_cluster_second = outputs_cluster.scatter_(1, outputs_cluster_arg, 100)\n if torch.unique(outputs_cluster_second.argmax(1) - outputs_cluster_arg.squeeze()).squeeze().item() != 0:\n raise NotImplementedError('wrong when computing L2 norm!!')\n outputs_cluster_secondmin, outputs_cluster_secondarg = outputs_cluster_second.min(dim=1, keepdim=True)\n #self.running_metrics_val_clusters.update(labels.cpu().numpy(), outputs_cluster_arg.cpu().numpy())\n self.running_metrics_val_clusters.update(labels, outputs_cluster_arg)\n \n tmp_arg = outputs_cluster_arg.clone()\n pdb.set_trace()\n _clu_thresholds = self.multimodal_merger.merge_clu_threshold(self.clu_threshold, modal_ids=modal_ids)\n\n outputs_cluster_arg[(outputs_cluster_secondmin - outputs_cluster_min) < _clu_thresholds] = 250\n truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), outputs_cluster_arg.cpu().int().numpy())\n self.classes_recall_clu[:, 0] += truth\n self.classes_recall_clu[:, 2] += pred_all\n self.classes_recall_clu[:, 1] += truth_all\n return threshold_arg, outputs_cluster_arg\n\n def calc_recall(self, gt, argmax):\n truth = np.zeros([self.class_numbers])\n pred_all = np.zeros([self.class_numbers])\n truth_all = np.zeros([self.class_numbers])\n for i in range(self.class_numbers):\n truth[i] = (gt == i)[argmax == i].sum()\n pred_all[i] = (argmax == i).sum()\n truth_all[i] = (gt == i).sum()\n pass\n return truth, pred_all, truth_all\n \n def calc_mean_Clu_recall(self, ):\n return np.mean(self.classes_recall_clu[:, 0] / self.classes_recall_clu[:, 1])\n \n def calc_mean_Thr_recall(self, ):\n return np.mean(self.classes_recall_thr[:, 0] / self.classes_recall_thr[:, 1])\n\n def reset(self, ):\n self.running_metrics_val_clusters.reset()\n self.running_metrics_val_threshold.reset()\n self.classes_recall_clu = np.zeros([19, 3])\n self.classes_recall_thr = np.zeros([19, 3])\n\nclass CustomModel():\n def __init__(self, cfg, writer, logger, use_pseudo_label=False, modal_num=3):\n self.cfg = cfg\n self.writer = writer\n self.class_numbers = 19\n self.logger = logger\n cfg_model = cfg['model']\n self.cfg_model = cfg_model\n self.best_iou = -100\n self.iter = 0\n self.nets = []\n self.split_gpu = 0\n self.default_gpu = cfg['model']['default_gpu']\n self.PredNet_Dir = None\n self.valid_classes = cfg['training']['valid_classes']\n self.G_train = True\n self.cls_feature_weight = cfg['training']['cls_feature_weight']\n self.use_pseudo_label = use_pseudo_label\n self.modal_num = modal_num\n\n # cluster vectors & cuda initialization\n self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()\n self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()\n self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()\n self.class_threshold_group = torch.full([self.modal_num + 1, 19], 0.95).cuda()\n\n #self.metrics = CustomMetrics(self.class_numbers)\n self.metrics = CustomMetrics(self.class_numbers, modal_num=self.modal_num, model=self)\n\n bn = cfg_model['bn']\n if bn == 'sync_bn':\n BatchNorm = SynchronizedBatchNorm2d\n elif bn == 'bn':\n BatchNorm = nn.BatchNorm2d\n elif bn == 'gn':\n BatchNorm = nn.GroupNorm\n else:\n raise NotImplementedError('batch norm choice {} is not implemented'.format(bn))\n if use_pseudo_label:\n self.PredNet = DeepLab(\n num_classes=19,\n backbone=cfg_model['basenet']['version'],\n output_stride=16,\n bn=cfg_model['bn'],\n freeze_bn=True,\n modal_num=self.modal_num\n ).cuda()\n self.load_PredNet(cfg, writer, logger, dir=None, net=self.PredNet)\n self.PredNet_DP = self.init_device(self.PredNet, gpu_id=self.default_gpu, whether_DP=True) \n self.PredNet.eval()\n self.PredNet_num = 0\n\n self.BaseNet = DeepLab(\n num_classes=19,\n backbone=cfg_model['basenet']['version'],\n output_stride=16,\n bn=cfg_model['bn'],\n freeze_bn=True,\n modal_num=self.modal_num\n )\n\n logger.info('the backbone is {}'.format(cfg_model['basenet']['version']))\n\n self.BaseNet_DP = self.init_device(self.BaseNet, gpu_id=self.default_gpu, whether_DP=True)\n self.nets.extend([self.BaseNet])\n self.nets_DP = [self.BaseNet_DP]\n\n # Discriminator\n self.SOURCE_LABEL = 0\n self.TARGET_LABEL = 1\n self.DNets = []\n self.DNets_DP = []\n for _ in range(self.modal_num+1):\n _net_d = FCDiscriminator(inplanes=19)\n self.DNets.append(_net_d)\n _net_d_DP = self.init_device(_net_d, gpu_id=self.default_gpu, whether_DP=True)\n self.DNets_DP.append(_net_d_DP)\n\n self.nets.extend(self.DNets)\n self.nets_DP.extend(self.DNets_DP)\n\n self.optimizers = []\n self.schedulers = [] \n optimizer_cls = torch.optim.SGD\n optimizer_params = {k:v for k, v in cfg['training']['optimizer'].items() \n if k != 'name'}\n\n optimizer_cls_D = torch.optim.Adam\n optimizer_params_D = {k:v for k, v in cfg['training']['optimizer_D'].items() \n if k != 'name'}\n\n if self.use_pseudo_label:\n self.BaseOpti = optimizer_cls(self.BaseNet.parameters(), **optimizer_params)\n else:\n self.BaseOpti = optimizer_cls(self.BaseNet.optim_parameters(cfg['training']['optimizer']['lr']), **optimizer_params)\n self.optimizers.extend([self.BaseOpti])\n\n self.DiscOptis = []\n for _d_net in self.DNets: \n self.DiscOptis.append(\n optimizer_cls_D(_d_net.parameters(), **optimizer_params_D)\n )\n self.optimizers.extend(self.DiscOptis)\n\n self.schedulers = [] \n if self.use_pseudo_label:\n self.BaseSchedule = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])\n self.schedulers.extend([self.BaseSchedule])\n else:\n \"\"\"BaseSchedule detail see FUNC: scheduler_step()\"\"\"\n self.learning_rate = cfg['training']['optimizer']['lr']\n self.gamma = cfg['training']['lr_schedule']['gamma']\n self.num_steps = cfg['training']['lr_schedule']['max_iter']\n self._BaseSchedule_nouse = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])\n self.schedulers.extend([self._BaseSchedule_nouse])\n\n self.DiscSchedules = []\n for _disc_opt in self.DiscOptis:\n self.DiscSchedules.append(\n get_scheduler(_disc_opt, cfg['training']['lr_schedule'])\n )\n self.schedulers.extend(self.DiscSchedules)\n\n self.setup(cfg, writer, logger)\n\n self.adv_source_label = 0\n self.adv_target_label = 1\n self.bceloss = nn.BCEWithLogitsLoss(reduce=False)\n self.loss_fn = get_loss_function(cfg)\n self.mseloss = nn.MSELoss()\n self.l1loss = nn.L1Loss()\n self.smoothloss = nn.SmoothL1Loss()\n self.triplet_loss = nn.TripletMarginLoss()\n\n def create_PredNet(self,):\n ss = DeepLab(\n num_classes=19,\n backbone=self.cfg_model['basenet']['version'],\n output_stride=16,\n bn=self.cfg_model['bn'],\n freeze_bn=True,\n modal_num=self.modal_num,\n ).cuda()\n ss.eval()\n return ss\n\n def setup(self, cfg, writer, logger):\n '''\n set optimizer and load pretrained model\n '''\n for net in self.nets:\n # name = net.__class__.__name__\n self.init_weights(cfg['model']['init'], logger, net)\n print(\"Initializition completed\")\n if hasattr(net, '_load_pretrained_model') and cfg['model']['pretrained']:\n print(\"loading pretrained model for {}\".format(net.__class__.__name__))\n net._load_pretrained_model()\n '''load pretrained model\n '''\n if cfg['training']['resume_flag']:\n self.load_nets(cfg, writer, logger)\n pass\n\n def lr_poly(self):\n return self.learning_rate * ((1 - float(self.iter) / self.num_steps) ** (self.gamma))\n\n def adjust_basenet_learning_rate(self):\n lr = self.lr_poly()\n self.BaseOpti.param_groups[0]['lr'] = lr\n if len(self.BaseOpti.param_groups) > 1:\n self.BaseOpti.param_groups[1]['lr'] = lr * 10\n\n def forward(self, input):\n feat, feat_low, att_mask, feat_cls, output = self.BaseNet_DP(input)\n\n return feat, feat_low, feat_cls, output\n\n def forward_Up(self, input):\n feat, feat_low, feat_cls, outputs = self.forward(input)\n output = F.interpolate(outputs[-1], size=input.size()[2:], mode='bilinear', align_corners=True)\n return feat, feat_low, feat_cls, output\n\n def PredNet_Forward(self, input):\n with torch.no_grad():\n _, _, att_mask, feat_cls, output_result = self.PredNet_DP(input)\n return _, _, feat_cls, output_result\n\n def calculate_mean_vector(self, feat_cls, outputs, labels, ):\n outputs_softmax = F.softmax(outputs, dim=1)\n outputs_argmax = outputs_softmax.argmax(dim=1, keepdim=True)\n outputs_argmax = self.process_label(outputs_argmax.float())\n labels_expanded = self.process_label(labels)\n outputs_pred = labels_expanded * outputs_argmax\n scale_factor = F.adaptive_avg_pool2d(outputs_pred, 1)\n vectors = []\n ids = []\n for n in range(feat_cls.size()[0]):\n for t in range(self.class_numbers):\n if scale_factor[n][t].item()==0:\n continue\n if (outputs_pred[n][t] > 0).sum() < 10:\n continue\n s = feat_cls[n] * outputs_pred[n][t]\n scale = torch.sum(outputs_pred[n][t]) / labels.shape[2] / labels.shape[3] * 2\n s = normalisation_pooling()(s, scale)\n s = F.adaptive_avg_pool2d(s, 1) / scale_factor[n][t]\n vectors.append(s)\n ids.append(t)\n return vectors, ids\n\n def step(self, source_x, source_label, source_modal_ids, target_x, target_label, target_modal_ids, use_pseudo_loss=False):\n assert len(source_modal_ids) == source_x.size(0), \"modal_ids' batchsize != source_x's batchsize\"\n _, _, source_feat_cls, source_output = self.forward(input=source_x) \n\n \"\"\"source_output: [B x 19 x W x H, ...]\n select modal-branch output in each batchsize\n Specific-modal output\n \"\"\"\n source_output_modal_k = torch.stack(\n [\n source_output[_modal_i][_batch_i]\n for _batch_i, _modal_i in enumerate(source_modal_ids)\n ], \n dim=0,\n )\n # attention output & specific-modal output\n source_output_comb = torch.cat([source_output_modal_k, source_output[-1]], dim=0)\n\n source_label_comb = torch.cat([source_label, source_label.clone()], dim=0)\n\n source_outputUp = F.interpolate(source_output_comb, size=source_x.size()[-2:], mode='bilinear', align_corners=True)\n\n loss_GTA = self.loss_fn(input=source_outputUp, target=source_label_comb)\n #self.PredNet.eval()\n\n # adversarial loss\n # -----------------------------\n \"\"\"Generator (segmentation)\"\"\"\n # -----------------------------\n # On Source Domain \n loss_adv = torch.Tensor([0]).cuda()\n _batch_size = 0\n \n _, _, _, target_output = self.forward(target_x)\n\n target_modal_ids_tensor = torch.Tensor(target_modal_ids).cuda()\n for t_out, _d_net_DP, _d_net, modal_idx in zip(target_output, self.DNets_DP, self.DNets, range(len(target_output))):\n # set grad false\n self.set_requires_grad(self.logger, _d_net, requires_grad = False)\n # true/false discriminator\n t_D_out = _d_net_DP(F.softmax(t_out))\n #source_modal_ids\n loss_temp = torch.mean(self.bceloss(\n t_D_out,\n torch.FloatTensor(t_D_out.data.size()).fill_(1.0).cuda()\n ), [1,2,3])\n\n if modal_idx >= self.modal_num:\n loss_adv += torch.mean(loss_temp)\n elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:\n loss_adv += 0.0\n else:\n loss_adv += torch.mean(torch.masked_select(loss_temp, target_modal_ids_tensor==modal_idx))\n\n _batch_size += t_out.size(0)\n\n #loss_adv /= _batch_size\n loss_adv *= self.cfg['training']['loss_adv_lambda']\n\n loss_G = torch.Tensor([0]).cuda()\n loss_G = loss_G + loss_GTA + loss_adv\n\n self.BaseOpti.zero_grad()\n if loss_G.item() != 0:\n loss_G.backward()\n self.BaseOpti.step()\n\n # -----------------------------\n \"\"\"Discriminator \"\"\"\n # -----------------------------\n \n _batch_size = 0\n loss_D_comb = torch.Tensor([0]).cuda()\n source_modal_ids_tensor = torch.Tensor(source_modal_ids).cuda()\n for s_out, t_out, _d_net_DP, _d_net, _disc_opt, modal_idx in zip(source_output, target_output, self.DNets_DP, self.DNets, self.DiscOptis, range(len(source_output))):\n self.set_requires_grad(self.logger, _d_net, requires_grad = True)\n\n _batch_size = 0\n loss_D = torch.Tensor([0]).cuda()\n # source domain\n s_D_out = _d_net_DP(F.softmax(s_out.detach()))\n\n loss_temp_s = torch.mean(self.bceloss(\n s_D_out,\n torch.FloatTensor(s_D_out.data.size()).fill_(1.0).cuda()\n ), [1,2,3])\n\n if modal_idx >= self.modal_num:\n loss_D += torch.mean(loss_temp_s)\n elif torch.mean(torch.as_tensor((modal_idx==source_modal_ids_tensor), dtype=torch.float32)) == 0:\n loss_D += 0.0\n else:\n loss_D += torch.mean(torch.masked_select(loss_temp_s, source_modal_ids_tensor==modal_idx))\n\n # target domain\n _batch_size += (s_out.size(0) + t_out.size(0))\n \n t_D_out = _d_net_DP(F.softmax(t_out.detach()))\n loss_temp_t = torch.mean(self.bceloss(\n t_D_out,\n torch.FloatTensor(t_D_out.data.size()).fill_(0.0).cuda()\n ), [1,2,3])\n\n if modal_idx >= self.modal_num:\n loss_D += torch.mean(loss_temp_t)\n elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:\n loss_D += 0.0\n else:\n loss_D += torch.mean(torch.masked_select(loss_temp_t, target_modal_ids_tensor==modal_idx))\n\n loss_D *= self.cfg['training']['loss_adv_lambda']*0.5\n\n loss_D_comb += loss_D\n \n _disc_opt.zero_grad()\n if loss_D_comb.item() != 0:\n loss_D_comb.backward()\n _disc_opt.step()\n\n return loss_GTA, loss_adv, loss_D_comb\n\n\n def process_label(self, label):\n batch, channel, w, h = label.size()\n pred1 = torch.zeros(batch, 20, w, h).cuda()\n id = torch.where(label < 19, label, torch.Tensor([19]).cuda())\n pred1 = pred1.scatter_(1, id.long(), 1)\n return pred1\n\n def class_vectors_alignment(self, ids, vectors, modal_ids=[0,]):\n #loss = torch.Tensor([0]).cuda(self.default_gpu)\n loss = torch.Tensor([0]).cuda()\n\n \"\"\"construct category objective vectors\"\"\"\n # objective_vectors_group 2 x 19 x 256 --> 19 x 512\n _objective_vectors_set = self.metrics.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)\n\n for i in range(len(ids)):\n if ids[i] not in self.valid_classes:\n continue\n new_loss = self.smoothloss(vectors[i].squeeze().cuda(), _objective_vectors[ids[i]])\n while (new_loss.item() > 5):\n new_loss = new_loss / 10\n loss = loss + new_loss\n loss = loss / len(ids) * 10\n return loss\n\n def freeze_bn_apply(self):\n for net in self.nets:\n net.apply(freeze_bn)\n for net in self.nets_DP:\n net.apply(freeze_bn)\n\n def scheduler_step(self):\n if self.use_pseudo_label:\n for scheduler in self.schedulers:\n scheduler.step()\n else:\n \"\"\"skipped _BaseScheduler_nouse\"\"\"\n for scheduler in self.schedulers[1:]:\n scheduler.step()\n # baseNet scheduler\n self.adjust_basenet_learning_rate()\n \n def optimizer_zerograd(self):\n for optimizer in self.optimizers:\n optimizer.zero_grad()\n \n def optimizer_step(self):\n for opt in self.optimizers:\n opt.step()\n\n def init_device(self, net, gpu_id=None, whether_DP=False):\n gpu_id = gpu_id or self.default_gpu\n device = torch.device(\"cuda:{}\".format(gpu_id) if torch.cuda.is_available() else 'cpu')\n net = net.to(device)\n # if torch.cuda.is_available():\n if whether_DP:\n net = DataParallelWithCallback(net, device_ids=range(torch.cuda.device_count()))\n return net\n \n def eval(self, net=None, logger=None):\n \"\"\"Make specific models eval mode during test time\"\"\"\n if net == None:\n for net in self.nets:\n net.eval()\n for net in self.nets_DP:\n net.eval()\n if logger!=None: \n logger.info(\"Successfully set the model eval mode\") \n else:\n net.eval()\n if logger!=None: \n logger(\"Successfully set {} eval mode\".format(net.__class__.__name__))\n return\n\n def train(self, net=None, logger=None):\n if net==None:\n for net in self.nets:\n net.train()\n for net in self.nets_DP:\n net.train()\n else:\n net.train()\n return\n\n def set_requires_grad(self, logger, net, requires_grad = False):\n \"\"\"Set requires_grad=Fasle for all the networks to avoid unnecessary computations\n Parameters:\n net (BaseModel) -- the network which will be operated on\n requires_grad (bool) -- whether the networks require gradients or not\n \"\"\"\n for parameter in net.parameters():\n parameter.requires_grad = requires_grad\n \n def set_requires_grad_layer(self, logger, net, layer_type='batchnorm', requires_grad=False): \n ''' set specific type of layers whether needing grad\n '''\n\n # print('Warning: all the BatchNorm params are fixed!')\n # logger.info('Warning: all the BatchNorm params are fixed!')\n for net in self.nets:\n for _i in net.modules():\n if _i.__class__.__name__.lower().find(layer_type.lower()) != -1:\n _i.weight.requires_grad = requires_grad\n return\n\n def init_weights(self, cfg, logger, net, init_type='normal', init_gain=0.02):\n \"\"\"Initialize network weights.\n\n Parameters:\n net (network) -- network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n\n We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\n work better for some applications. Feel free to try yourself.\n \"\"\"\n init_type = cfg.get('init_type', init_type)\n init_gain = cfg.get('init_gain', init_gain)\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n nn.init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n nn.init.xavier_normal_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n nn.init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.constant_(m.bias.data, 0.0)\n elif isinstance(m, SynchronizedBatchNorm2d) or classname.find('BatchNorm2d') != -1 \\\n or isinstance(m, nn.GroupNorm):\n m.weight.data.fill_(1)\n m.bias.data.zero_() # BatchNorm Layer's weight is not a matrix; only normal distribution applies.\n\n\n print('initialize {} with {}'.format(init_type, net.__class__.__name__))\n logger.info('initialize {} with {}'.format(init_type, net.__class__.__name__))\n net.apply(init_func) # apply the initialization function <init_func>\n pass\n\n def adaptive_load_nets(self, net, model_weight):\n model_dict = net.state_dict()\n pretrained_dict = {k : v for k, v in model_weight.items() if k in model_dict}\n \n # print(\"[INFO] Pretrained dict:\", pretrained_dict.keys())\n model_dict.update(pretrained_dict)\n net.load_state_dict(model_dict)\n\n def load_nets(self, cfg, writer, logger): # load pretrained weights on the net\n if os.path.isfile(cfg['training']['resume']):\n logger.info(\n \"Loading model and optimizer from checkpoint '{}'\".format(cfg['training']['resume'])\n )\n checkpoint = torch.load(cfg['training']['resume'])\n _k = -1\n net_state_no = {}\n for net in self.nets:\n name = net.__class__.__name__\n if name not in net_state_no:\n net_state_no[name] = 0\n else:\n net_state_no[name] += 1\n _k += 1\n if checkpoint.get(name) == None:\n continue\n if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:\n continue\n if isinstance(checkpoint[name], list):\n self.adaptive_load_nets(net, checkpoint[name][net_state_no[name]][\"model_state\"])\n else:\n print(\"*****************************************\")\n print(\"[WARNING] Using depreciated load version! Model {}\".format(name))\n print(\"*****************************************\")\n self.adaptive_load_nets(net, checkpoint[name][\"model_state\"])\n if cfg['training']['optimizer_resume']:\n if isinstance(checkpoint[name], list):\n self.adaptive_load_nets(self.optimizers[_k], checkpoint[name][net_state_no[name]][\"optimizer_state\"])\n self.adaptive_load_nets(self.schedulers[_k], checkpoint[name][net_state_no[name]][\"scheduler_state\"])\n else:\n self.adaptive_load_nets(self.optimizers[_k], checkpoint[name][\"optimizer_state\"])\n self.adaptive_load_nets(self.schedulers[_k], checkpoint[name][\"scheduler_state\"])\n self.iter = checkpoint[\"iter\"]\n #self.best_iou = checkpoint['best_iou']\n logger.info(\n \"Loaded checkpoint '{}' (iter {})\".format(\n cfg['training']['resume'], checkpoint[\"iter\"]\n )\n )\n else:\n raise Exception(\"No checkpoint found at '{}'\".format(cfg['training']['resume']))\n\n\n def load_PredNet(self, cfg, writer, logger, dir=None, net=None): # load pretrained weights on the net\n dir = dir or cfg['training']['Pred_resume']\n best_iou = 0\n if os.path.isfile(dir):\n logger.info(\n \"Loading model and optimizer from checkpoint '{}'\".format(dir)\n )\n checkpoint = torch.load(dir)\n name = net.__class__.__name__\n if checkpoint.get(name) == None:\n return\n if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:\n return\n if isinstance(checkpoint[name], list):\n self.adaptive_load_nets(net, checkpoint[name][0][\"model_state\"])\n else:\n self.adaptive_load_nets(net, checkpoint[name][\"model_state\"])\n iter = checkpoint[\"iter\"]\n best_iou = checkpoint['best_iou']\n logger.info(\n \"Loaded checkpoint '{}' (iter {}) (best iou {}) for PredNet\".format(\n dir, checkpoint[\"iter\"], best_iou\n )\n )\n else:\n raise Exception(\"No checkpoint found at '{}'\".format(dir))\n if hasattr(net, 'best_iou'):\n #net.best_iou = best_iou\n pass\n return best_iou\n\n\n def set_optimizer(self, optimizer): #set optimizer to all nets\n pass\n\n def reset_objective_SingleVector(self,):\n self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()\n self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()\n self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()\n\n def update_objective_SingleVector(self, vectors, vectors_num, name='moving_average'):\n #vector = vector.squeeze().detach()\n if torch.sum(vectors) == 0:\n return\n if name == 'moving_average':\n self.objective_vectors_group = self.objective_vectors_group * 0.9999 + 0.0001 * vectors\n self.objective_vectors_num_group += vectors_num\n self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)\n elif name == 'mean':\n self.objective_vectors_group = self.objective_vectors_group * self.objective_vectors_num_group + vectors\n self.objective_vectors_num_group += vectors_num\n self.objective_vectors_group = self.objective_vectors_group / self.objective_vectors_num_group\n self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)\n else:\n raise NotImplementedError('no such updating way of objective vectors {}'.format(name))\n\ndef grad_reverse(x):\n return GradReverse()(x)\n\n"
] | [
[
"torch.mean",
"torch.nn.functional.softmax",
"torch.cat",
"torch.load",
"torch.zeros",
"torch.sum",
"torch.nn.BCEWithLogitsLoss",
"numpy.mean",
"torch.no_grad",
"torch.nn.functional.interpolate",
"torch.cuda.is_available",
"torch.nn.L1Loss",
"torch.masked_select",
"numpy.zeros",
"torch.nn.SmoothL1Loss",
"torch.full",
"torch.nn.init.constant_",
"torch.nn.init.xavier_normal_",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.init.normal_",
"torch.cuda.device_count",
"torch.as_tensor",
"torch.Tensor",
"torch.nn.init.orthogonal_",
"torch.nn.TripletMarginLoss",
"torch.nn.MSELoss",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OSUrobotics/KinovaGrasping | [
"f22af60d3683fdc4ffecf49ccff179fbc6750748"
] | [
"gym-kinova-gripper/plotting_code/other_plots.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\n\n## Extra plotting functions that can be called for quick analysis\n\ndef plot_timestep_distribution(success_timesteps=None, fail_timesteps=None, all_timesteps=None, expert_saving_dir=None):\n \"\"\" Plot the distribution of time steps over successful and failed episodes \"\"\"\n if all_timesteps is None:\n success_timesteps = np.load(expert_saving_dir + \"/success_timesteps.npy\")\n fail_timesteps = np.load(expert_saving_dir + \"/fail_timesteps.npy\")\n all_timesteps = np.load(expert_saving_dir + \"/all_timesteps.npy\")\n\n n_bins = 40\n # We can set the number of bins with the `bins` kwarg\n plt.hist(all_timesteps, bins=n_bins, color=\"g\")\n plt.title(\"Total time steps distribution for all episodes (3x speed)\", weight='bold')\n plt.xlabel('# of time steps per episode')\n plt.ylabel('# of episodes with the time step count')\n plt.xlim(0, 800)\n plt.savefig(expert_saving_dir + \"/total_timestep_distribution\")\n plt.clf()\n\n plt.hist(success_timesteps, bins=n_bins, color=\"b\")\n plt.title(\"Time steps distribution for Successful episodes (3x speed)\", weight='bold')\n plt.xlabel('# of time steps per episode')\n plt.ylabel('# of episodes with the time step count')\n plt.savefig(expert_saving_dir + \"/success_timestep_distribution\")\n plt.clf()\n\n plt.hist(fail_timesteps, bins=n_bins, color=\"r\")\n plt.title(\"Time steps distribution for Failed episodes (3x speed)\", weight='bold')\n plt.xlabel('# of time steps per episode')\n plt.ylabel('# of episodes with the time step count')\n plt.savefig(expert_saving_dir + \"/fail_timestep_distribution\")\n plt.clf()\n\n\n'''\n# Plot the average velocity over an episode\ndef plot_average_velocity(replay_buffer,num_timesteps):\n \"\"\" Plot the average velocity over a certain number of episodes \"\"\"\n velocity_dir = \"./expert_average_velocity\"\n if not os.path.isdir(velocity_dir):\n os.mkdir(velocity_dir)\n\n #num_episodes = len(f1_vels)\n\n #plt.plot(np.arrange(len(f1_vels)), f1_vels)\n\n max_timesteps = 30\n timestep_vel_count = np.zeros(max_timesteps)\n wrist_avg_vels = np.zeros(max_timesteps)\n f1_avg_vels = np.zeros(max_timesteps)\n f2_avg_vels = np.zeros(max_timesteps)\n f3_avg_vels = np.zeros(max_timesteps)\n\n for episode_actions in replay_buffer.action:\n for timestep_idx in range(len(episode_actions)):\n timestep_vel_count[timestep_idx] += 1\n wrist_avg_vels[timestep_idx] = (wrist_avg_vels[timestep_idx] + episode_actions[timestep_idx][0]) / timestep_vel_count[timestep_idx]\n f1_avg_vels[timestep_idx] = (f1_avg_vels[timestep_idx] + episode_actions[timestep_idx][1]) / \\\n timestep_vel_count[timestep_idx]\n f2_avg_vels[timestep_idx] = (f2_avg_vels[timestep_idx] + episode_actions[timestep_idx][2]) / \\\n timestep_vel_count[timestep_idx]\n f3_avg_vels[timestep_idx] = (f3_avg_vels[timestep_idx] + episode_actions[timestep_idx][3]) / \\\n timestep_vel_count[timestep_idx]\n\n num_episodes = len(replay_buffer.action)\n print(\"replay_buffer.action: \",replay_buffer.action)\n print(\"f1_avg_vels: \",f1_avg_vels)\n plt.plot(np.arange(num_timesteps), f1_avg_vels, color=\"r\", label=\"Finger1\")\n plt.plot(np.arange(num_timesteps), f2_avg_vels, color=\"b\", label=\"Finger2\")\n plt.plot(np.arange(num_timesteps), f3_avg_vels, color=\"g\", label=\"Finger3\")\n plt.plot(np.arange(num_timesteps), wrist_avg_vels, color=\"y\", label=\"Wrist\")\n plt.legend()\n\n plt.title(\"Average velocity over \"+str(num_episodes)+\" episodes\", weight='bold')\n plt.xlabel('Timestep within an episode')\n plt.ylabel('Average Velocity at Timestep')\n #plt.savefig(velocity_dir + \"/velocity_plot\")\n #plt.clf()\n plt.show()\n'''"
] | [
[
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wenyuC94/LogConcComp | [
"b17d6ba6a102ba83a8415774b0e6da27a362bd5d"
] | [
"src/utils.py"
] | [
"import os\nimport numpy as np\nimport numba as nb\n\ndef create_folder(storage_path):\n if not os.path.isdir(storage_path):\n os.makedirs(storage_path,exist_ok=True)\n lsdir = os.listdir(storage_path)\n for item in [\"info\",\"hist\",\"soln\",\"figs\"]:\n if item not in lsdir:\n os.makedirs(storage_path+item+\"/\",exist_ok=True)\n if item == \"figs\":\n lsdir_figs = os.listdir(storage_path+item+\"/\")\n for item1 in [\"crop\",\"raw\"]:\n if item1 not in lsdir_figs:\n os.makedirs(storage_path+item+\"/\"+item1+\"/\",exist_ok=True)\n \n \ndef time_to_string(runtime):\n seconds = runtime%60\n runmins = (runtime-seconds)/60\n mins = int(runmins%60)\n runhrs = (runmins-mins)/60\n hrs = int(runhrs)\n return \"%.2d:%.2d:%05.2f\"%(hrs,mins,seconds)\n\ndef multivariate_laplace(n,d,rng=None, random_state=None):\n rng = rng if rng is not None else np.random.RandomState(random_state)\n X = rng.randn(n,d)\n Z = rng.exponential(size=(n,1))\n return X*np.sqrt(Z)\n\n\[email protected](cache=True)\ndef np_apply_along_axis(func1d, axis, arr):\n assert arr.ndim == 2\n assert axis in [0, 1]\n if axis == 0:\n result = np.empty(arr.shape[1])\n for i in range(len(result)):\n result[i] = func1d(arr[:, i])\n else:\n result = np.empty(arr.shape[0])\n for i in range(len(result)):\n result[i] = func1d(arr[i, :])\n return result\n\n\[email protected](cache=True)\ndef np_apply_along_axis_kd(funckd, axis, arr, k = -1):\n assert arr.ndim == 2\n assert axis in [0, 1]\n if axis == 0:\n k = k if k > 0 else arr.shape[0]\n result = np.empty((k,arr.shape[1]))\n for i in range(arr.shape[1]):\n result[:, i] = funckd(arr[:, i])\n else:\n k = k if k > 0 else arr.shape[1]\n result = np.empty((arr.shape[0],k))\n for i in range(arr.shape[0]):\n result[i, :] = funckd(arr[i, :])\n return result\n\[email protected](cache=True)\ndef split(n, B):\n sep = n//B\n rem = n%B\n indices = []\n last = 0\n cur = 0\n for i in range(B):\n cur = last + sep + (i < rem)\n indices.append(cur)\n last = cur\n return indices\n\n"
] | [
[
"numpy.random.RandomState",
"numpy.sqrt",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
1suancaiyu/STEP | [
"54195112990feaee137f5137775c736d07c2d26f",
"54195112990feaee137f5137775c736d07c2d26f"
] | [
"classifier_stgcn_real_only/utils/temp.py",
"classifier_hybrid/main.py"
] | [
"import h5py\nimport os\nimport numpy as np\n\nbase_path = os.path.dirname(os.path.realpath(__file__))\nfeature_file = '/media/uttaran/FCE1-7BF3/Gamma/Gait/classifier_stgcn/model_classifier_stgcn/featuresCombineddeep_features.txt'\nf = np.loadtxt(feature_file)\nfCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/featuresCombined.h5', 'r')\nfkeys = fCombined.keys()\ndfCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/deepFeaturesCombined.h5', 'w')\nfor i, fkey in enumerate(fkeys):\n fname = [fkey][0]\n feature = f[i, :]\n dfCombined.create_dataset(fname, data=feature)\ndfCombined.close()\n",
"import argparse\nimport os\nimport numpy as np\nfrom utils import loader, processor\n\nimport torch\nimport torchlight\n\n\nbase_path = os.path.dirname(os.path.realpath(__file__))\ndata_path = os.path.join(base_path, '../data')\nftype = 'Combined'\ncoords = 3\njoints = 16\ncycles = 1\nmodel_path = os.path.join(base_path, 'model_classifier_combined2/features'+ftype)\n\n\nparser = argparse.ArgumentParser(description='Gait Gen')\nparser.add_argument('--batch-size', type=int, default=8, metavar='B',\n help='input batch size for training (default: 8)')\nparser.add_argument('--num-worker', type=int, default=4, metavar='W',\n help='input batch size for training (default: 4)')\nparser.add_argument('--start_epoch', type=int, default=0, metavar='SE',\n help='starting epoch of training (default: 0)')\nparser.add_argument('--num_epoch', type=int, default=1000, metavar='NE',\n help='number of epochs to train (default: 500)')\nparser.add_argument('--optimizer', type=str, default='Adam', metavar='O',\n help='optimizer (default: SGD)')\nparser.add_argument('--base-lr', type=float, default=0.001, metavar='L',\n help='base learning rate (default: 0.1)')\nparser.add_argument('--step', type=list, default=[0.5, 0.75, 0.875], metavar='[S]',\n help='fraction of steps when learning rate will be decreased (default: [0.5, 0.75, 0.875])')\nparser.add_argument('--nesterov', action='store_true', default=True,\n help='use nesterov')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='momentum (default: 0.9)')\nparser.add_argument('--weight-decay', type=float, default=1e-4, metavar='D',\n help='Weight decay (default: 1e-4)')\nparser.add_argument('--eval-interval', type=int, default=1, metavar='EI',\n help='interval after which model is evaluated (default: 1)')\nparser.add_argument('--log-interval', type=int, default=100, metavar='LI',\n help='interval after which log is printed (default: 100)')\nparser.add_argument('--show-topk', type=list, default=[1], metavar='[K]',\n help='top K accuracy to show (default: [1])')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--pavi-log', action='store_true', default=False,\n help='pavi log')\nparser.add_argument('--print-log', action='store_true', default=True,\n help='print log')\nparser.add_argument('--save-log', action='store_true', default=True,\n help='save log')\nparser.add_argument('--work-dir', type=str, default=model_path, metavar='WD',\n help='path to save')\n# TO ADD: save_result\n\nargs = parser.parse_args()\ndevice = 'cuda:0'\n\ndata_train, data_test, labels_train, labels_test = loader.load_data(data_path, ftype, joints, coords, cycles=cycles)\naff_features = len(data_train[0][0])\nnum_classes = np.unique(labels_train).shape[0]\ndata_loader = {\n 'train': torch.utils.data.DataLoader(\n dataset=loader.TrainTestLoader(data_train, labels_train, joints, coords, num_classes),\n batch_size=args.batch_size,\n shuffle=True,\n drop_last=True),\n 'test': torch.utils.data.DataLoader(\n dataset=loader.TrainTestLoader(data_test, labels_test, joints, coords, num_classes),\n batch_size=args.batch_size,\n shuffle=True,\n drop_last=True)}\ngraph_dict = {'strategy': 'spatial'}\npr = processor.Processor(args, data_loader, coords, aff_features, num_classes, graph_dict, device=device)\npr.train()\n"
] | [
[
"numpy.loadtxt"
],
[
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.