repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
THUMNLab/AutoGL
[ "7b551961e90f5042d9b91d92c083f3f09dd9dbdd", "7b551961e90f5042d9b91d92c083f3f09dd9dbdd" ]
[ "autogl/module/nas/estimator/one_shot.py", "autogl/solver/classifier/graph_classifier.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom . import register_nas_estimator\nfrom ..space import BaseSpace\nfrom .base import BaseEstimator\n\n\n@register_nas_estimator(\"oneshot\")\nclass OneShotEstimator(BaseEstimator):\n \"\"\"\n One shot estimator.\n\n Use model directly to get estimations.\n \"\"\"\n\n def infer(self, model: BaseSpace, dataset, mask=\"train\"):\n device = next(model.parameters()).device\n dset = dataset[0].to(device)\n pred = model(dset)[getattr(dset, f\"{mask}_mask\")]\n y = dset.y[getattr(dset, f\"{mask}_mask\")]\n loss = getattr(F, self.loss_f)(pred, y)\n # acc=sum(pred.max(1)[1]==y).item()/y.size(0)\n probs = F.softmax(pred, dim=1).detach().cpu().numpy()\n y = y.cpu()\n metrics = [eva.evaluate(probs, y) for eva in self.evaluation]\n return metrics, loss\n", "\"\"\"\nAuto Classfier for Graph Node Classification\n\"\"\"\nimport time\nimport json\n\nfrom copy import deepcopy\n\nimport torch\nimport numpy as np\nimport yaml\n\nfrom .base import BaseClassifier\nfrom ...module.feature import FEATURE_DICT\nfrom ...module.model import BaseModel, MODEL_DICT\nfrom ...module.train import TRAINER_DICT, get_feval, BaseGraphClassificationTrainer\nfrom ..base import _initialize_single_model, _parse_hp_space\nfrom ..utils import LeaderBoard, set_seed\nfrom ...datasets import utils\nfrom ...utils import get_logger\n\nLOGGER = get_logger(\"GraphClassifier\")\n\n\nclass AutoGraphClassifier(BaseClassifier):\n \"\"\"\n Auto Multi-class Graph Classifier.\n\n Used to automatically solve the graph classification problems.\n\n Parameters\n ----------\n feature_module: autogl.module.feature.BaseFeatureEngineer or str or None\n The (name of) auto feature engineer used to process the given dataset.\n Disable feature engineer by setting it to ``None``. Default ``deepgl``.\n\n graph_models: list of autogl.module.model.BaseModel or list of str\n The (name of) models to be optimized as backbone. Default ``['gat', 'gcn']``.\n\n hpo_module: autogl.module.hpo.BaseHPOptimizer or str or None\n The (name of) hpo module used to search for best hyper parameters.\n Disable hpo by setting it to ``None``. Default ``anneal``.\n\n ensemble_module: autogl.module.ensemble.BaseEnsembler or str or None\n The (name of) ensemble module used to ensemble the multi-models found.\n Disable ensemble by setting it to ``None``. Default ``voting``.\n\n max_evals: int (Optional)\n If given, will set the number eval times the hpo module will use.\n Only be effective when hpo_module is ``str``. Default ``None``.\n\n trainer_hp_space: Iterable[dict] (Optional)\n trainer hp space or list of trainer hp spaces configuration.\n If a single trainer hp is given, will specify the hp space of trainer for\n every model. If a list of trainer hp is given, will specify every model\n with corrsponding trainer hp space. Default ``None``.\n\n model_hp_spaces: Iterable[Iterable[dict]] (Optional)\n model hp space configuration.\n If given, will specify every hp space of every passed model. Default ``None``.\n\n size: int (Optional)\n The max models ensemble module will use. Default ``None``.\n\n device: torch.device or str\n The device where model will be running on. If set to ``auto``, will use gpu\n when available. You can also specify the device by directly giving ``gpu`` or\n ``cuda:0``, etc. Default ``auto``.\n \"\"\"\n\n # pylint: disable=W0102\n\n def __init__(\n self,\n feature_module=None,\n graph_models=[\"gin\", \"topkpool\"],\n # nas_algorithms=None,\n # nas_spaces=None,\n # nas_estimators=None,\n hpo_module=\"anneal\",\n ensemble_module=\"voting\",\n max_evals=50,\n default_trainer=None,\n trainer_hp_space=None,\n model_hp_spaces=None,\n size=4,\n device=\"auto\",\n ):\n\n super().__init__(\n feature_module=feature_module,\n graph_models=graph_models,\n nas_algorithms=None, # nas_algorithms,\n nas_spaces=None, # nas_spaces,\n nas_estimators=None, # nas_estimators,\n hpo_module=hpo_module,\n ensemble_module=ensemble_module,\n max_evals=max_evals,\n default_trainer=default_trainer or \"GraphClassificationFull\",\n trainer_hp_space=trainer_hp_space,\n model_hp_spaces=model_hp_spaces,\n size=size,\n device=device,\n )\n\n self.dataset = None\n\n def _init_graph_module(\n self,\n graph_models,\n num_classes,\n num_features,\n feval,\n device,\n loss,\n num_graph_features,\n ) -> \"AutoGraphClassifier\":\n # load graph network module\n self.graph_model_list = []\n if isinstance(graph_models, (list, tuple)):\n for model in graph_models:\n if isinstance(model, str):\n if model in MODEL_DICT:\n self.graph_model_list.append(\n MODEL_DICT[model](\n num_classes=num_classes,\n num_features=num_features,\n num_graph_features=num_graph_features,\n device=device,\n init=False,\n )\n )\n else:\n raise KeyError(\"cannot find model %s\" % (model))\n elif isinstance(model, type) and issubclass(model, BaseModel):\n self.graph_model_list.append(\n model(\n num_classes=num_classes,\n num_features=num_features,\n num_graph_features=num_graph_features,\n device=device,\n init=False,\n )\n )\n elif isinstance(model, BaseModel):\n # setup the hp of num_classes and num_features\n model.set_num_classes(num_classes)\n model.set_num_features(num_features)\n model.set_num_graph_features(num_graph_features)\n self.graph_model_list.append(model.to(device))\n elif isinstance(model, BaseGraphClassificationTrainer):\n # receive a trainer list, put trainer to list\n assert (\n model.get_model() is not None\n ), \"Passed trainer should contain a model\"\n model.model.set_num_classes(num_classes)\n model.model.set_num_features(num_features)\n model.model.set_num_graph_features(num_graph_features)\n model.update_parameters(\n num_classes=num_classes,\n num_features=num_features,\n num_graph_features=num_graph_features,\n loss=loss,\n feval=feval,\n device=device,\n )\n self.graph_model_list.append(model)\n else:\n raise KeyError(\"cannot find graph network %s.\" % (model))\n else:\n raise ValueError(\n \"need graph network to be (list of) str or a BaseModel class/instance, get\",\n graph_models,\n \"instead.\",\n )\n\n # wrap all model_cls with specified trainer\n for i, model in enumerate(self.graph_model_list):\n # set model hp space\n if self._model_hp_spaces is not None:\n if self._model_hp_spaces[i] is not None:\n if isinstance(model, BaseGraphClassificationTrainer):\n model.model.hyper_parameter_space = self._model_hp_spaces[i]\n else:\n model.hyper_parameter_space = self._model_hp_spaces[i]\n # initialize trainer if needed\n if isinstance(model, BaseModel):\n name = (\n self._default_trainer\n if isinstance(self._default_trainer, str)\n else self._default_trainer[i]\n )\n model = TRAINER_DICT[name](\n model=model,\n num_features=num_features,\n num_classes=num_classes,\n loss=loss,\n feval=feval,\n device=device,\n num_graph_features=num_graph_features,\n init=False,\n )\n # set trainer hp space\n if self._trainer_hp_space is not None:\n if isinstance(self._trainer_hp_space[0], list):\n current_hp_for_trainer = self._trainer_hp_space[i]\n else:\n current_hp_for_trainer = self._trainer_hp_space\n model.hyper_parameter_space = current_hp_for_trainer\n self.graph_model_list[i] = model\n\n return self\n\n \"\"\"\n # currently disabled\n def _init_nas_module(\n self, num_features, num_classes, num_graph_features, feval, device, loss\n ):\n for algo, space, estimator in zip(\n self.nas_algorithms, self.nas_spaces, self.nas_estimators\n ):\n # TODO: initialize important parameters\n pass\n \"\"\"\n\n # pylint: disable=arguments-differ\n def fit(\n self,\n dataset,\n time_limit=-1,\n inplace=False,\n train_split=None,\n val_split=None,\n evaluation_method=\"infer\",\n seed=None,\n ) -> \"AutoGraphClassifier\":\n \"\"\"\n Fit current solver on given dataset.\n\n Parameters\n ----------\n dataset: torch_geometric.data.dataset.Dataset\n The multi-graph dataset needed to fit on.\n\n time_limit: int\n The time limit of the whole fit process (in seconds). If set below 0, will ignore\n time limit. Default ``-1``.\n\n inplace: bool\n Whether we process the given dataset in inplace manner. Default ``False``.\n Set it to True if you want to save memory by modifying the given dataset directly.\n\n train_split: float or int (Optional)\n The train ratio (in ``float``) or number (in ``int``) of dataset. If you want to use\n default train/val/test split in dataset, please set this to ``None``.\n Default ``None``.\n\n val_split: float or int (Optional)\n The validation ratio (in ``float``) or number (in ``int``) of dataset. If you want to\n use default train/val/test split in dataset, please set this to ``None``.\n Default ``None``.\n\n evaluation_method: (list of) str autogl.module.train.evaluation\n A (list of) evaluation method for current solver. If ``infer``, will automatically\n determine. Default ``infer``.\n\n seed: int (Optional)\n The random seed. If set to ``None``, will run everything at random.\n Default ``None``.\n\n Returns\n -------\n self: autogl.solver.AutoGraphClassifier\n A reference of current solver.\n \"\"\"\n\n set_seed(seed)\n\n if time_limit < 0:\n time_limit = 3600 * 24\n time_begin = time.time()\n\n # initialize leaderboard\n if evaluation_method == \"infer\":\n if hasattr(dataset, \"metric\"):\n evaluation_method = [dataset.metric]\n else:\n num_of_label = dataset.num_classes\n if num_of_label == 2:\n evaluation_method = [\"auc\"]\n else:\n evaluation_method = [\"acc\"]\n assert isinstance(evaluation_method, list)\n evaluator_list = get_feval(evaluation_method)\n\n self.leaderboard = LeaderBoard(\n [e.get_eval_name() for e in evaluator_list],\n {e.get_eval_name(): e.is_higher_better() for e in evaluator_list},\n )\n\n # set up the dataset\n if train_split is None and val_split is None:\n assert hasattr(dataset, \"train_split\") and hasattr(dataset, \"val_split\"), (\n \"The dataset has no default train/val split! \"\n \"Please manually pass train and val ratio.\"\n )\n LOGGER.info(\"Use the default train/val/test ratio in given dataset\")\n # if hasattr(dataset.train_split, \"n_splits\"):\n # cross_validation = True\n\n elif train_split is not None and val_split is not None:\n utils.graph_random_splits(dataset, train_split, val_split, seed=seed)\n else:\n LOGGER.error(\n \"Please set both train_split and val_split explicitly. Detect %s is None.\",\n \"train_split\" if train_split is None else \"val_split\",\n )\n raise ValueError(\n \"In consistent setting of train/val split. Detect {} is None.\".format(\n \"train_split\" if train_split is None else \"val_split\"\n )\n )\n\n # feature engineering\n if self.feature_module is not None:\n self.feature_module.fit(dataset.train_split)\n dataset = self.feature_module.transform(dataset, inplace=inplace)\n\n self.dataset = dataset\n assert dataset[0].x is not None, (\n \"Does not support fit on non node-feature dataset!\"\n \" Please add node features to dataset or specify feature engineers that generate\"\n \" node features.\"\n )\n\n # initialize graph networks\n self._init_graph_module(\n self.gml,\n num_features=dataset.num_node_features,\n num_classes=dataset.num_classes,\n feval=evaluator_list,\n device=self.runtime_device,\n loss=\"cross_entropy\" if not hasattr(dataset, \"loss\") else dataset.loss,\n num_graph_features=0\n if not hasattr(dataset.data, \"gf\")\n else dataset.data.gf.size(1),\n )\n\n # currently disabled\n \"\"\"\n self._init_nas_module(\n num_features=dataset.num_node_features,\n num_classes=dataset.num_classes,\n feval=evaluator_list,\n device=self.runtime_device,\n loss=\"cross_entropy\" if not hasattr(dataset, \"loss\") else dataset.loss,\n num_graph_features=0\n if not hasattr(dataset.data, \"gf\")\n else dataset.data.gf.size(1),\n )\n\n # neural architecture search\n if self.nas_algorithms is not None:\n # perform nas and add them to trainer list\n for algo, space, estimator in zip(\n self.nas_algorithms, self.nas_spaces, self.nas_estimators\n ):\n trainer = algo.search(space, self.dataset, estimator)\n self.graph_model_list.append(trainer)\n \"\"\"\n\n # train the models and tune hpo\n result_valid = []\n names = []\n for idx, model in enumerate(self.graph_model_list):\n if time_limit < 0:\n time_for_each_model = None\n else:\n time_for_each_model = (time_limit - time.time() + time_begin) / (\n len(self.graph_model_list) - idx\n )\n if self.hpo_module is None:\n model.initialize()\n model.train(dataset, True)\n optimized = model\n else:\n optimized, _ = self.hpo_module.optimize(\n trainer=model, dataset=dataset, time_limit=time_for_each_model\n )\n # to save memory, all the trainer derived will be mapped to cpu\n optimized.to(torch.device(\"cpu\"))\n name = str(optimized)\n names.append(name)\n performance_on_valid, _ = optimized.get_valid_score(return_major=False)\n result_valid.append(\n optimized.get_valid_predict_proba().detach().cpu().numpy()\n )\n self.leaderboard.insert_model_performance(\n name,\n dict(\n zip(\n [e.get_eval_name() for e in evaluator_list],\n performance_on_valid,\n )\n ),\n )\n self.trained_models[name] = optimized\n\n # fit the ensemble model\n if self.ensemble_module is not None:\n performance = self.ensemble_module.fit(\n result_valid,\n dataset.data.y[dataset.val_index].cpu().detach().numpy(),\n names,\n evaluator_list,\n n_classes=dataset.num_classes,\n )\n self.leaderboard.insert_model_performance(\n \"ensemble\",\n dict(zip([e.get_eval_name() for e in evaluator_list], performance)),\n )\n\n return self\n\n def fit_predict(\n self,\n dataset,\n time_limit=-1,\n inplace=False,\n train_split=None,\n val_split=None,\n evaluation_method=\"infer\",\n seed=None,\n use_ensemble=True,\n use_best=True,\n name=None,\n ) -> np.ndarray:\n \"\"\"\n Fit current solver on given dataset and return the predicted value.\n\n Parameters\n ----------\n dataset: torch_geometric.data.dataset.Dataset\n The dataset needed to fit on. This dataset must have only one graph.\n\n time_limit: int\n The time limit of the whole fit process (in seconds). If set below 0, will\n ignore time limit. Default ``-1``.\n\n inplace: bool\n Whether we process the given dataset in inplace manner. Default ``False``.\n Set it to True if you want to save memory by modifying the given dataset directly.\n\n train_split: float or int (Optional)\n The train ratio (in ``float``) or number (in ``int``) of dataset. If you want to\n use default train/val/test split in dataset, please set this to ``None``.\n Default ``None``.\n\n val_split: float or int (Optional)\n The validation ratio (in ``float``) or number (in ``int``) of dataset. If you want\n to use default train/val/test split in dataset, please set this to ``None``.\n Default ``None``.\n\n evaluation_method: (list of) str or autogl.module.train.evaluation\n A (list of) evaluation method for current solver. If ``infer``, will automatically\n determine. Default ``infer``.\n\n seed: int (Optional)\n The random seed. If set to ``None``, will run everything at random.\n Default ``None``.\n\n use_ensemble: bool\n Whether to use ensemble to do the predict. Default ``True``.\n\n use_best: bool\n Whether to use the best single model to do the predict. Will only be effective when\n ``use_ensemble`` is ``False``. Default ``True``.\n\n name: str or None\n The name of model used to predict. Will only be effective when ``use_ensemble`` and\n ``use_best`` both are ``False``. Default ``None``.\n\n Returns\n -------\n result: np.ndarray\n An array of shape ``(N,)``, where ``N`` is the number of test nodes. The prediction\n on given dataset.\n \"\"\"\n self.fit(\n dataset=dataset,\n time_limit=time_limit,\n inplace=inplace,\n train_split=train_split,\n val_split=val_split,\n evaluation_method=evaluation_method,\n seed=seed,\n )\n return self.predict(\n dataset=dataset,\n inplaced=inplace,\n inplace=inplace,\n use_ensemble=use_ensemble,\n use_best=use_best,\n name=name,\n )\n\n def predict_proba(\n self,\n dataset=None,\n inplaced=False,\n inplace=False,\n use_ensemble=True,\n use_best=True,\n name=None,\n mask=\"test\",\n ) -> np.ndarray:\n \"\"\"\n Predict the node probability.\n\n Parameters\n ----------\n dataset: torch_geometric.data.dataset.Dataset or None\n The dataset needed to predict. If ``None``, will use the processed dataset\n passed to ``fit()`` instead. Default ``None``.\n\n inplaced: bool\n Whether the given dataset is processed. Only be effective when ``dataset``\n is not ``None``. If you pass the dataset to ``fit()`` with ``inplace=True``,\n and you pass the dataset again to this method, you should set this argument\n to ``True``. Otherwise ``False``. Default ``False``.\n\n inplace: bool\n Whether we process the given dataset in inplace manner. Default ``False``.\n Set it to True if you want to save memory by modifying the given dataset directly.\n\n use_ensemble: bool\n Whether to use ensemble to do the predict. Default ``True``.\n\n use_best: bool\n Whether to use the best single model to do the predict. Will only be effective when\n ``use_ensemble`` is ``False``. Default ``True``.\n\n name: str or None\n The name of model used to predict. Will only be effective when ``use_ensemble`` and\n ``use_best`` both are ``False``. Default ``None``.\n\n mask: str\n The data split to give prediction on. Default ``test``.\n\n Returns\n -------\n result: np.ndarray\n An array of shape ``(N,C,)``, where ``N`` is the number of test nodes and ``C`` is\n the number of classes. The prediction on given dataset.\n \"\"\"\n if dataset is None:\n dataset = self.dataset\n elif not inplaced:\n if self.feature_module is not None:\n dataset = self.feature_module.transform(dataset, inplace=inplace)\n\n if use_ensemble:\n LOGGER.info(\"Ensemble argument on, will try using ensemble model.\")\n\n if not use_ensemble and use_best:\n LOGGER.info(\n \"Ensemble argument off and best argument on, will try using best model.\"\n )\n\n if (use_ensemble and self.ensemble_module is not None) or (\n not use_best and name == \"ensemble\"\n ):\n # we need to get all the prediction of every model trained\n predict_result = []\n names = []\n for model_name in self.trained_models:\n predict_result.append(\n self._predict_proba_by_name(dataset, model_name, mask)\n )\n names.append(model_name)\n return self.ensemble_module.ensemble(predict_result, names)\n\n if use_ensemble and self.ensemble_module is None:\n LOGGER.warning(\n \"Cannot use ensemble because no ensebmle module is given. \"\n \"Will use best model instead.\"\n )\n\n if use_best or (use_ensemble and self.ensemble_module is None):\n # just return the best model we have found\n best_model_name = self.leaderboard.get_best_model()\n return self._predict_proba_by_name(dataset, best_model_name, mask)\n\n if name is not None:\n # return model performance by name\n return self._predict_proba_by_name(dataset, name, mask)\n\n LOGGER.error(\n \"No model name is given while ensemble and best arguments are off.\"\n )\n raise ValueError(\n \"You need to specify a model name if you do not want use ensemble and best model.\"\n )\n\n def _predict_proba_by_name(self, dataset, name, mask):\n self.trained_models[name].to(self.runtime_device)\n predicted = (\n self.trained_models[name]\n .predict_proba(dataset, mask=mask)\n .detach()\n .cpu()\n .numpy()\n )\n self.trained_models[name].to(torch.device(\"cpu\"))\n return predicted\n\n def predict(\n self,\n dataset=None,\n inplaced=False,\n inplace=False,\n use_ensemble=True,\n use_best=True,\n name=None,\n mask=\"test\",\n ) -> np.ndarray:\n \"\"\"\n Predict the node class number.\n\n Parameters\n ----------\n dataset: torch_geometric.data.dataset.Dataset or None\n The dataset needed to predict. If ``None``, will use the processed dataset passed\n to ``fit()`` instead. Default ``None``.\n\n inplaced: bool\n Whether the given dataset is processed. Only be effective when ``dataset``\n is not ``None``. If you pass the dataset to ``fit()`` with ``inplace=True``, and\n you pass the dataset again to this method, you should set this argument to ``True``.\n Otherwise ``False``. Default ``False``.\n\n inplace: bool\n Whether we process the given dataset in inplace manner. Default ``False``.\n Set it to True if you want to save memory by modifying the given dataset directly.\n\n use_ensemble: bool\n Whether to use ensemble to do the predict. Default ``True``.\n\n use_best: bool\n Whether to use the best single model to do the predict. Will only be effective\n when ``use_ensemble`` is ``False``. Default ``True``.\n\n name: str or None\n The name of model used to predict. Will only be effective when ``use_ensemble``\n and ``use_best`` both are ``False``. Default ``None``.\n\n Returns\n -------\n result: np.ndarray\n An array of shape ``(N,)``, where ``N`` is the number of test nodes.\n The prediction on given dataset.\n \"\"\"\n proba = self.predict_proba(\n dataset, inplaced, inplace, use_ensemble, use_best, name, mask\n )\n return np.argmax(proba, axis=1)\n\n @classmethod\n def from_config(cls, path_or_dict, filetype=\"auto\") -> \"AutoGraphClassifier\":\n \"\"\"\n Load solver from config file.\n\n You can use this function to directly load a solver from predefined config dict\n or config file path. Currently, only support file type of ``json`` or ``yaml``,\n if you pass a path.\n\n Parameters\n ----------\n path_or_dict: str or dict\n The path to the config file or the config dictionary object\n\n filetype: str\n The filetype the given file if the path is specified. Currently only support\n ``json`` or ``yaml``. You can set to ``auto`` to automatically detect the file\n type (from file name). Default ``auto``.\n\n Returns\n -------\n solver: autogl.solver.AutoGraphClassifier\n The solver that is created from given file or dictionary.\n \"\"\"\n assert filetype in [\"auto\", \"yaml\", \"json\"], (\n \"currently only support yaml file or json file type, but get type \"\n + filetype\n )\n if isinstance(path_or_dict, str):\n if filetype == \"auto\":\n if path_or_dict.endswith(\".yaml\") or path_or_dict.endswith(\".yml\"):\n filetype = \"yaml\"\n elif path_or_dict.endswith(\".json\"):\n filetype = \"json\"\n else:\n LOGGER.error(\n \"cannot parse the type of the given file name, \"\n \"please manually set the file type\"\n )\n raise ValueError(\n \"cannot parse the type of the given file name, \"\n \"please manually set the file type\"\n )\n if filetype == \"yaml\":\n path_or_dict = yaml.load(\n open(path_or_dict, \"r\").read(), Loader=yaml.FullLoader\n )\n else:\n path_or_dict = json.load(open(path_or_dict, \"r\"))\n\n # load the dictionary\n path_or_dict = deepcopy(path_or_dict)\n solver = cls(None, [], None, None)\n fe_list = path_or_dict.pop(\"feature\", None)\n if fe_list is not None:\n fe_list_ele = []\n for feature_engineer in fe_list:\n name = feature_engineer.pop(\"name\")\n if name is not None:\n fe_list_ele.append(FEATURE_DICT[name](**feature_engineer))\n if fe_list_ele != []:\n solver.set_feature_module(fe_list_ele)\n\n models = path_or_dict.pop(\"models\", [{\"name\": \"gin\"}, {\"name\": \"topkpool\"}])\n model_hp_space = [\n _parse_hp_space(model.pop(\"hp_space\", None)) for model in models\n ]\n model_list = [\n _initialize_single_model(model.pop(\"name\"), model) for model in models\n ]\n\n trainer = path_or_dict.pop(\"trainer\", None)\n default_trainer = \"GraphClassificationFull\"\n trainer_space = None\n if isinstance(trainer, dict):\n # global default\n default_trainer = trainer.pop(\"name\", \"GraphClassificationFull\")\n trainer_space = _parse_hp_space(trainer.pop(\"hp_space\", None))\n default_kwargs = {\"num_features\": None, \"num_classes\": None}\n default_kwargs.update(trainer)\n default_kwargs[\"init\"] = False\n for i in range(len(model_list)):\n model = model_list[i]\n trainer_wrapper = TRAINER_DICT[default_trainer](\n model=model, **default_kwargs\n )\n model_list[i] = trainer_wrapper\n elif isinstance(trainer, list):\n # sequential trainer definition\n assert len(trainer) == len(\n model_list\n ), \"The number of trainer and model does not match\"\n trainer_space = []\n for i in range(len(model_list)):\n train, model = trainer[i], model_list[i]\n default_trainer = train.pop(\"name\", \"GraphClassificationFull\")\n trainer_space.append(_parse_hp_space(train.pop(\"hp_space\", None)))\n default_kwargs = {\"num_features\": None, \"num_classes\": None}\n default_kwargs.update(train)\n default_kwargs[\"init\"] = False\n trainer_wrap = TRAINER_DICT[default_trainer](\n model=model, **default_kwargs\n )\n model_list[i] = trainer_wrap\n\n solver.set_graph_models(\n model_list, default_trainer, trainer_space, model_hp_space\n )\n\n hpo_dict = path_or_dict.pop(\"hpo\", {\"name\": \"anneal\"})\n if hpo_dict is not None:\n name = hpo_dict.pop(\"name\")\n solver.set_hpo_module(name, **hpo_dict)\n\n ensemble_dict = path_or_dict.pop(\"ensemble\", {\"name\": \"voting\"})\n if ensemble_dict is not None:\n name = ensemble_dict.pop(\"name\")\n solver.set_ensemble_module(name, **ensemble_dict)\n\n return solver\n" ]
[ [ "torch.nn.functional.softmax" ], [ "torch.device", "numpy.argmax" ] ]
kumagai-group/vise
[ "8adfe61ad8f31767ec562f02f271e2495f357cd4" ]
[ "vise/analyzer/dielectric_function.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) 2020. Distributed under the terms of the MIT License.\nfrom dataclasses import dataclass\nfrom math import sqrt, pi\nfrom typing import List\n\nimport numpy as np\nfrom monty.json import MSONable\nfrom tqdm import tqdm\nfrom vise.util.mix_in import ToJsonFileMixIn\nfrom scipy.constants import physical_constants as pc\n\neV_to_inv_cm = pc[\"electron volt-inverse meter relationship\"][0] / 100\n\n\ndef diele_func_to_coeff(freq, real, imag):\n return (2 * sqrt(2) * pi * sqrt(sqrt(real ** 2 + imag ** 2) - real)\n * freq * eV_to_inv_cm)\n\n\n@dataclass\nclass DieleFuncData(MSONable, ToJsonFileMixIn):\n energies: List[float] # in eV\n diele_func_real: List[List[float]] # [xx, yy, zz, xy, yz, xz]\n diele_func_imag: List[List[float]] # [xx, yy, zz, xy, yz, xz]\n band_gap: float # in eV\n\n @property\n def ave_absorption_coeff(self):\n reals = [sum(self.diele_func_real[i][:3]) / 3\n for i in range(len(self.energies))]\n imags = [sum(self.diele_func_imag[i][:3]) / 3\n for i in range(len(self.energies))]\n return [diele_func_to_coeff(freq, real, imag)\n for freq, real, imag in zip(self.energies, reals, imags)]\n\n def target_coeff_min_e(self, target_coeff: float = 10**4):\n for e, coeff in zip(self.energies, self.ave_absorption_coeff):\n if coeff > target_coeff:\n return e\n return None\n\n\ndef make_shifted_diele_func(diele_func_data: DieleFuncData,\n original_band_gap: float,\n shift: float) -> DieleFuncData:\n imag = imag_shift(diele_func_data.diele_func_imag,\n diele_func_data.energies,\n original_band_gap + shift, shift)\n real = kramers_kronig_trans(imag, diele_func_data.energies)\n return DieleFuncData(diele_func_data.energies,\n real.tolist(),\n imag.tolist(),\n original_band_gap + shift)\n\n\ndef imag_shift(diele_func_imag: List[List[float]],\n energies: List[float],\n band_gap: float,\n shift: float) -> np.ndarray:\n energies = np.array(energies)\n assert shift > 0\n result = []\n for energy_grid in energies:\n old_e = energy_grid - shift\n right_idx = np.argwhere(energies > old_e)[0][0]\n left_e, right_e = energies[right_idx - 1], energies[right_idx]\n # linear interpolation\n left_ratio = (right_e - old_e) / (right_e - left_e)\n\n inner_result = []\n for imag_idx in range(6):\n if energy_grid < band_gap:\n inner_result.append(0.0)\n else:\n old_diele = \\\n diele_func_imag[right_idx - 1][imag_idx] * left_ratio + \\\n diele_func_imag[right_idx][imag_idx] * (1 - left_ratio)\n inner_result.append(\n old_diele * (energy_grid - shift) / energy_grid)\n\n result.append(inner_result)\n\n return np.array(result)\n\n\ndef kramers_kronig_trans(diele_func_imag: np.array,\n energies: List[float],\n ita: float = 0.01) -> np.ndarray:\n mesh = energies[1] - energies[0]\n result = []\n ee2ss = [[e ** 2 - energy_grid ** 2 for e in energies]\n for energy_grid in energies]\n for imag_idx in tqdm(range(6)):\n imags = diele_func_imag[:, imag_idx]\n if imag_idx == 0 or \\\n (imag_idx > 0\n and np.allclose(\n imags, diele_func_imag[:, imag_idx - 1]) is False):\n if np.count_nonzero(imags) == 0:\n inner_result = [0.0] * len(energies)\n else:\n inner_result = []\n for ee2s in ee2ss:\n integrals = [e * imag * ee2 / (ee2 ** 2 + ita ** 2)\n for e, ee2, imag in zip(energies, ee2s, imags)]\n integral = sum(integrals) * mesh * 2 / pi\n if imag_idx < 3:\n integral += 1\n inner_result.append(integral)\n\n result.append(inner_result)\n\n return np.array(result).T" ]
[ [ "numpy.array", "numpy.allclose", "numpy.argwhere", "numpy.count_nonzero" ] ]
RobinCondat/pytorch-retinanet
[ "14a2085cd3785a667454898dc65f5324b1b9c6b8" ]
[ "retinanet/losses_vehicle.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom retinanet.config_experiment_2 import INDEXES_MIX, VEHICLE_INDEXES\n\ndef calc_iou(a, b):\n area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])\n\n iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])\n ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])\n\n iw = torch.clamp(iw, min=0)\n ih = torch.clamp(ih, min=0)\n\n ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih\n\n ua = torch.clamp(ua, min=1e-8)\n\n intersection = iw * ih\n\n IoU = intersection / ua\n\n return IoU\n\ndef cal_ioa(a, b):\n # Intersection over Area (for ignore regions)\n area = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]),dim=1)\n area = torch.clamp(area, min=1e-8)\n\n iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 0])\n ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 1])\n\n iw = torch.clamp(iw, min=0)\n ih = torch.clamp(ih, min=0)\n\n intersection = iw * ih\n\n IoA = intersection / area\n\n return IoA\n\n\nclass FocalLoss(nn.Module):\n #def __init__(self):\n\n def forward(self, classifications, regressions, anchors, annotations, dataset, ignore_index=None, merge_index=None):\n\n classes_from_other_datasets = [i for i in range(classifications.shape[-1]+1) if i not in INDEXES_MIX[dataset]]\n alpha = 0.25\n gamma = 2.0\n batch_size = classifications.shape[0]\n classification_losses = []\n regression_losses = []\n\n anchor = anchors[0, :, :]\n num_anchors = anchor.shape[0]\n\n anchor_widths = anchor[:, 2] - anchor[:, 0]\n anchor_heights = anchor[:, 3] - anchor[:, 1]\n anchor_ctr_x = anchor[:, 0] + 0.5 * anchor_widths\n anchor_ctr_y = anchor[:, 1] + 0.5 * anchor_heights\n if merge_index is not None:\n classifications = torch.cat((classifications,torch.zeros((classifications.shape[0],classifications.shape[1],1)).cuda()),2)\n print(classifications.shape)\n for j in range(batch_size):\n classification = classifications[j, :, :]\n regression = regressions[j, :, :]\n\n bbox_annotation = annotations[j, :, :]\n bbox_annotation = bbox_annotation[bbox_annotation[:, 4] != -1]\n \n # Merge vehicle detections in vehicle class\n if merge_index is not None:\n if merge_index not in classes_from_other_datasets:\n #print(torch.max(classification[:,VEHICLE_INDEXES], dim=1)[0].shape)\n classification[:,merge_index] = torch.max(classification[:,VEHICLE_INDEXES], dim=1)[0]\n\n # Ignore class from other datasets\n classification[:,classes_from_other_datasets]=0\n\n classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)\n\n if bbox_annotation.shape[0] == 0:\n if torch.cuda.is_available():\n alpha_factor = torch.ones(classification.shape).cuda() * alpha\n\n alpha_factor = 1. - alpha_factor\n focal_weight = classification\n\n focal_weight = alpha_factor * torch.pow(focal_weight, gamma)\n\n bce = -(torch.log(1.0 - classification))\n\n cls_loss = focal_weight * bce\n classification_losses.append(cls_loss.sum())\n regression_losses.append(torch.tensor(0).float().cuda())\n \n else:\n alpha_factor = torch.ones(classification.shape) * alpha\n\n alpha_factor = 1. - alpha_factor\n focal_weight = classification\n\n focal_weight = alpha_factor * torch.pow(focal_weight, gamma)\n\n bce = -(torch.log(1.0 - classification))\n\n cls_loss = focal_weight * bce\n classification_losses.append(cls_loss.sum())\n regression_losses.append(torch.tensor(0).float())\n \n continue\n\n # Filter ignore class (via ignore_index)\n if ignore_index is not None:\n # On sépare ici les annotations en 2 objets : \n # - bbox_annotation (pour tous les objets à détecter) \n # - ignore_annotation (pour toutes les régions à ignorer)\n ignore_annotation = bbox_annotation[bbox_annotation[:,4] == ignore_index]\n bbox_annotation = bbox_annotation[bbox_annotation[:,4] != ignore_index]\n\n if bbox_annotation.shape[0] != 0:\n IoU = calc_iou(anchors[0, :, :], bbox_annotation[:, :4]) # num_anchors x num_annotations_to_detect\n IoU_max, IoU_argmax = torch.max(IoU, dim=1) # num_anchors x 1\n else:\n IoU_max = None\n IoU_argmax = None\n \n if ignore_index is not None:\n # On calcule ici l'intersection over area : \n # tous les anchors ayant une IoA avec une région à ignorer supérieure à 0.5 seront ignorées pour la suite\n if ignore_annotation.shape[0] !=0:\n IoA = cal_ioa(anchors[0, :, :], ignore_annotation[:, :4]) # num_anchors x num_annotations_to_ignore \n IoA_max, IoA_argmax = torch.max(IoA, dim=1) # num_anchors x 1\n else:\n IoA_max = None\n IoA_argmax = None\n \n # compute the loss for classification\n targets = torch.ones(classification.shape) * -1\n\n if torch.cuda.is_available():\n targets = targets.cuda()\n\n if IoU_max is not None:\n targets[torch.lt(IoU_max, 0.4), :] = 0\n else:\n targets = targets*0\n \n if ignore_index is not None:\n if IoA_max is not None:\n ignore_indices = torch.ge(IoA_max, 0.5)\n else:\n ignore_indices = (torch.ones((num_anchors)) * 0).type(torch.ByteTensor)\n if IoU_max is not None:\n positive_indices = torch.ge(IoU_max, 0.5)\n num_positive_anchors = positive_indices.sum()\n \n else:\n positive_indices = (torch.ones((num_anchors)) * 0).type(torch.ByteTensor)\n num_positive_anchors = torch.tensor(0)\n \n if ignore_index is not None:\n if ignore_indices is not None:\n targets[ignore_indices, :] = -1\n \n if IoU_argmax is not None:\n assigned_annotations = bbox_annotation[IoU_argmax, :]\n targets[positive_indices, :] = 0\n targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1\n \n if torch.cuda.is_available():\n alpha_factor = torch.ones(targets.shape).cuda() * alpha\n else:\n alpha_factor = torch.ones(targets.shape) * alpha\n\n alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)\n focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)\n \n focal_weight = alpha_factor * torch.pow(focal_weight, gamma)\n\n bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))\n\n cls_loss = focal_weight * bce\n\n if torch.cuda.is_available():\n cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape).cuda())\n else:\n cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape))\n classification_losses.append(cls_loss.sum()/torch.clamp(num_positive_anchors.float(), min=1.0))\n \n # compute the loss for regression\n\n if num_positive_anchors > 0:\n assigned_annotations = assigned_annotations[positive_indices, :]\n\n anchor_widths_pi = anchor_widths[positive_indices]\n anchor_heights_pi = anchor_heights[positive_indices]\n anchor_ctr_x_pi = anchor_ctr_x[positive_indices]\n anchor_ctr_y_pi = anchor_ctr_y[positive_indices]\n\n gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]\n gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]\n gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths\n gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights\n\n # clip widths to 1\n gt_widths = torch.clamp(gt_widths, min=1)\n gt_heights = torch.clamp(gt_heights, min=1)\n\n targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi\n targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi\n targets_dw = torch.log(gt_widths / anchor_widths_pi)\n targets_dh = torch.log(gt_heights / anchor_heights_pi)\n\n targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh))\n targets = targets.t()\n\n if torch.cuda.is_available():\n targets = targets/torch.Tensor([[0.1, 0.1, 0.2, 0.2]]).cuda()\n else:\n targets = targets/torch.Tensor([[0.1, 0.1, 0.2, 0.2]])\n\n negative_indices = 1 + (~positive_indices)\n\n regression_diff = torch.abs(targets - regression[positive_indices, :])\n\n regression_loss = torch.where(\n torch.le(regression_diff, 1.0 / 9.0),\n 0.5 * 9.0 * torch.pow(regression_diff, 2),\n regression_diff - 0.5 / 9.0\n )\n regression_losses.append(regression_loss.mean())\n else:\n if torch.cuda.is_available():\n regression_losses.append(torch.tensor(0).float().cuda())\n else:\n regression_losses.append(torch.tensor(0).float())\n\n return torch.stack(classification_losses).mean(dim=0, keepdim=True), torch.stack(regression_losses).mean(dim=0, keepdim=True)\n\n \n" ]
[ [ "torch.unsqueeze", "torch.ones", "torch.stack", "torch.le", "torch.pow", "torch.Tensor", "torch.ge", "torch.lt", "torch.tensor", "torch.eq", "torch.cuda.is_available", "torch.log", "torch.abs", "torch.max", "torch.zeros", "torch.ne", "torch.clamp" ] ]
PeterouZh/PyTorch-StudioGAN
[ "faef6048d25dadee4fa31b2955f16f7d1ca8e1e2" ]
[ "src/main.py" ]
[ "# PyTorch StudioGAN: https://github.com/POSTECH-CVLab/PyTorch-StudioGAN\n# The MIT License (MIT)\n# See license file or visit https://github.com/POSTECH-CVLab/PyTorch-StudioGAN for details\n\n# src/main.py\n\n\nimport json\nimport os\nimport sys\nimport random\nimport warnings\nfrom argparse import ArgumentParser\n\nfrom utils.misc import *\nfrom utils.make_hdf5 import make_hdf5\nfrom utils.log import make_run_name\nfrom loader import prepare_train_eval\n\nimport torch\nfrom torch.backends import cudnn\nimport torch.multiprocessing as mp\n\n\n\nRUN_NAME_FORMAT = (\n \"{framework}-\"\n \"{phase}-\"\n \"{timestamp}\"\n)\n\n\ndef main():\n parser = ArgumentParser(add_help=False)\n parser.add_argument('-c', '--config_path', type=str, default='./src/configs/CIFAR10/ContraGAN.json')\n parser.add_argument('--checkpoint_folder', type=str, default=None)\n parser.add_argument('-current', '--load_current', action='store_true', help='whether you load the current or best checkpoint')\n parser.add_argument('--log_output_path', type=str, default=None)\n\n parser.add_argument('-DDP', '--distributed_data_parallel', action='store_true')\n parser.add_argument('-n', '--nodes', default=1, type=int, metavar='N')\n parser.add_argument('-nr', '--nr', default=0, type=int, help='ranking within the nodes')\n\n parser.add_argument('--seed', type=int, default=-1, help='seed for generating random numbers')\n parser.add_argument('--num_workers', type=int, default=8, help='')\n parser.add_argument('-sync_bn', '--synchronized_bn', action='store_true', help='whether turn on synchronized batchnorm')\n parser.add_argument('-mpc', '--mixed_precision', action='store_true', help='whether turn on mixed precision training')\n parser.add_argument('-LARS', '--LARS_optimizer', action='store_true', help='whether turn on LARS optimizer')\n parser.add_argument('-rm_API', '--disable_debugging_API', action='store_true', help='whether disable pytorch autograd debugging mode')\n\n parser.add_argument('--reduce_train_dataset', type=float, default=1.0, help='control the number of train dataset')\n parser.add_argument('--truncated_factor', type=float, default=-1.0, help='factor for truncation trick')\n parser.add_argument('-stat_otf', '--bn_stat_OnTheFly', action='store_true', help='when evaluating, use the statistics of a batch')\n parser.add_argument('-std_stat', '--standing_statistics', action='store_true')\n parser.add_argument('--standing_step', type=int, default=-1, help='# of steps for accumulation batchnorm')\n parser.add_argument('--freeze_layers', type=int, default=-1, help='# of layers for freezing discriminator')\n\n parser.add_argument('-l', '--load_all_data_in_memory', action='store_true')\n parser.add_argument('-t', '--train', action='store_true')\n parser.add_argument('-e', '--eval', action='store_true')\n parser.add_argument('-s', '--save_images', action='store_true')\n parser.add_argument('-iv', '--image_visualization', action='store_true', help='select whether conduct image visualization')\n parser.add_argument('-knn', '--k_nearest_neighbor', action='store_true', help='select whether conduct k-nearest neighbor analysis')\n parser.add_argument('-itp', '--interpolation', action='store_true', help='whether conduct interpolation analysis')\n parser.add_argument('-fa', '--frequency_analysis', action='store_true', help='whether conduct frequency analysis')\n parser.add_argument('-tsne', '--tsne_analysis', action='store_true', help='whether conduct tsne analysis')\n parser.add_argument('--nrow', type=int, default=10, help='number of rows to plot image canvas')\n parser.add_argument('--ncol', type=int, default=8, help='number of cols to plot image canvas')\n\n parser.add_argument('--print_every', type=int, default=100, help='control log interval')\n parser.add_argument('--save_every', type=int, default=2000, help='control evaluation and save interval')\n parser.add_argument('--eval_type', type=str, default='test', help='[train/valid/test]')\n\n from template_lib.v2.config_cfgnode import update_parser_defaults_from_yaml, global_cfg\n update_parser_defaults_from_yaml(parser=parser)\n args = parser.parse_args()\n\n if not args.train and \\\n not args.eval and \\\n not args.save_images and \\\n not args.image_visualization and \\\n not args.k_nearest_neighbor and \\\n not args.interpolation and \\\n not args.frequency_analysis and \\\n not args.tsne_analysis:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n if args.config_path is not None:\n with open(args.config_path) as f:\n model_configs = json.load(f)\n train_configs = vars(args)\n else:\n raise NotImplementedError\n\n hdf5_path_train = make_hdf5(model_configs['data_processing'], train_configs, mode=\"train\") \\\n if train_configs['load_all_data_in_memory'] else None\n\n if train_configs['seed'] == -1:\n train_configs['seed'] = random.randint(1,4096)\n cudnn.benchmark, cudnn.deterministic = True, False\n else:\n cudnn.benchmark, cudnn.deterministic = False, True\n\n fix_all_seed(train_configs['seed'])\n gpus_per_node, rank = torch.cuda.device_count(), torch.cuda.current_device()\n world_size = gpus_per_node*train_configs['nodes']\n if world_size == 1:\n warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.')\n\n run_name = make_run_name(RUN_NAME_FORMAT, framework=train_configs['config_path'].split('/')[-1][:-5], phase='train')\n if train_configs['disable_debugging_API']: torch.autograd.set_detect_anomaly(False)\n check_flags(train_configs, model_configs, world_size)\n\n if train_configs['distributed_data_parallel'] and world_size > 1:\n print(\"Train the models through DistributedDataParallel (DDP) mode.\")\n mp.spawn(prepare_train_eval, nprocs=gpus_per_node, args=(gpus_per_node, world_size, run_name,\n train_configs, model_configs, hdf5_path_train))\n else:\n prepare_train_eval(rank, gpus_per_node, world_size, run_name, train_configs, model_configs, hdf5_path_train=hdf5_path_train)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.autograd.set_detect_anomaly", "torch.multiprocessing.spawn", "torch.cuda.current_device", "torch.cuda.device_count" ] ]
anigasan/tensorflow
[ "5b780b4983007661ca479bf4d7ed9a260d8ce43f" ]
[ "tensorflow/lite/python/convert.py" ]
[ "# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Converts a frozen graph into a TFLite FlatBuffer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport enum # pylint: disable=g-bad-import-order\nimport os as _os\nimport platform as _platform\nimport subprocess as _subprocess\nimport tempfile as _tempfile\n\nimport six\nfrom six.moves import map\n\nfrom tensorflow.lite.python import lite_constants\nfrom tensorflow.lite.python import util\nfrom tensorflow.lite.python import wrap_toco\nfrom tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2\nfrom tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2\nfrom tensorflow.lite.toco import types_pb2 as _types_pb2\nfrom tensorflow.python.platform import resource_loader as _resource_loader\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export as _tf_export\n\n\n# Find the toco_from_protos binary using the resource loader if using from\n# bazel, otherwise we are in a pip where console_scripts already has\n# the toco_from_protos tool.\nif lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:\n _toco_from_proto_bin = \"\"\nelse:\n _toco_from_proto_bin = _resource_loader.get_path_to_datafile(\n \"../toco/python/toco_from_protos\")\n\nif _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):\n _toco_from_proto_bin = \"toco_from_protos\"\n\n\ndef _try_convert_to_unicode(output):\n if output is None:\n return u\"\"\n\n if isinstance(output, bytes):\n try:\n return six.ensure_text(output)\n except UnicodeDecodeError:\n pass\n return output\n\n\n@_tf_export(\"lite.OpsSet\")\nclass OpsSet(enum.Enum):\n \"\"\"Enum class defining the sets of ops available to generate TFLite models.\n\n WARNING: Experimental interface, subject to change.\n \"\"\"\n # Convert model using TensorFlow Lite builtin ops.\n TFLITE_BUILTINS = \"TFLITE_BUILTINS\"\n\n # Convert model using TensorFlow ops. Not all TensorFlow ops are available.\n # WARNING: Experimental interface, subject to change.\n SELECT_TF_OPS = \"SELECT_TF_OPS\"\n\n # Convert model using only TensorFlow Lite quantized int8 operations.\n # Specifying this will throw an error for operations that do not yet have\n # quantized implementations.\n TFLITE_BUILTINS_INT8 = \"TFLITE_BUILTINS_INT8\"\n\n def __str__(self):\n return self.value\n\n @staticmethod\n def get_options():\n \"\"\"Returns a list of OpsSet options as a list of strings.\"\"\"\n return [str(option) for option in list(OpsSet)]\n\n\nclass ConverterError(Exception):\n \"\"\"Raised when an error occurs during model conversion.\"\"\"\n pass\n\n\ndef toco_convert_protos(model_flags_str,\n toco_flags_str,\n input_data_str,\n debug_info_str=None,\n enable_mlir_converter=False):\n \"\"\"Convert `input_data_str` according to model and toco parameters.\n\n Unless you know what you are doing consider using\n the more friendly `tf.compat.v1.lite.toco_convert`.\n\n Args:\n model_flags_str: Serialized proto describing model properties, see\n `toco/model_flags.proto`.\n toco_flags_str: Serialized proto describing conversion properties, see\n `toco/toco_flags.proto`.\n input_data_str: Input data in serialized form (e.g. a graphdef is common)\n debug_info_str: Serialized `GraphDebugInfo` proto describing logging\n information. (default None)\n enable_mlir_converter: Enables MLIR-based conversion instead of the default\n TOCO conversion. (default False)\n Returns:\n Converted model in serialized form (e.g. a TFLITE model is common).\n Raises:\n ConverterError: When conversion fails in TFLiteConverter, usually due to\n ops not being supported.\n RuntimeError: When conversion fails, an exception is raised with the error\n message embedded.\n \"\"\"\n # TODO(aselle): When toco does not use fatal errors for failure, we can\n # switch this on.\n if not _toco_from_proto_bin:\n try:\n model_str = wrap_toco.wrapped_toco_convert(model_flags_str,\n toco_flags_str, input_data_str,\n debug_info_str,\n enable_mlir_converter)\n return model_str\n except Exception as e:\n raise ConverterError(str(e))\n\n # Windows and TemporaryFile are not that useful together,\n # since you cannot have two readers/writers. So we have to\n # make the temporaries and close and delete them explicitly.\n toco_filename, model_filename, input_filename, output_filename = (\n None, None, None, None)\n try:\n # Build all input files\n with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \\\n _tempfile.NamedTemporaryFile(delete=False) as fp_model, \\\n _tempfile.NamedTemporaryFile(delete=False) as fp_input, \\\n _tempfile.NamedTemporaryFile(delete=False) as fp_debug:\n toco_filename = fp_toco.name\n input_filename = fp_input.name\n model_filename = fp_model.name\n debug_filename = fp_debug.name\n\n fp_model.write(model_flags_str)\n fp_toco.write(toco_flags_str)\n fp_input.write(six.ensure_binary(input_data_str))\n debug_info_str = debug_info_str if debug_info_str else \"\"\n # if debug_info_str contains a \"string value\", then the call to\n # fp_debug.write(debug_info_str) will fail with the following error\n #\n # TypeError: a bytes-like object is required, not 'str'\n #\n # Some of the subtests within the \"convert_test\" unit-test fail\n # with the error shown above. So watch out for that scenario and\n # convert debug_info_str to bytes where needed\n if not isinstance(debug_info_str, bytes):\n fp_debug.write(debug_info_str.encode(\"utf-8\"))\n else:\n fp_debug.write(debug_info_str)\n\n # Reserve an output file\n with _tempfile.NamedTemporaryFile(delete=False) as fp:\n output_filename = fp.name\n\n # Run\n cmd = [\n _toco_from_proto_bin,\n model_filename,\n toco_filename,\n input_filename,\n output_filename,\n \"--debug_proto_file={}\".format(debug_filename),\n ]\n if enable_mlir_converter:\n cmd.append(\"--enable_mlir_converter\")\n cmdline = \" \".join(cmd)\n is_windows = _platform.system() == \"Windows\"\n proc = _subprocess.Popen(\n cmdline,\n shell=True,\n stdout=_subprocess.PIPE,\n stderr=_subprocess.STDOUT,\n close_fds=not is_windows)\n stdout, stderr = proc.communicate()\n exitcode = proc.returncode\n if exitcode == 0:\n with open(output_filename, \"rb\") as fp:\n return fp.read()\n else:\n stdout = _try_convert_to_unicode(stdout)\n stderr = _try_convert_to_unicode(stderr)\n raise ConverterError(\"See console for info.\\n%s\\n%s\\n\" % (stdout, stderr))\n finally:\n # Must manually cleanup files.\n for filename in [\n toco_filename, input_filename, model_filename, output_filename]:\n try:\n _os.unlink(filename)\n except (OSError, TypeError):\n pass\n\n\ndef build_toco_convert_protos(input_tensors,\n output_tensors,\n inference_type=lite_constants.FLOAT,\n inference_input_type=None,\n input_format=lite_constants.TENSORFLOW_GRAPHDEF,\n input_shapes=None,\n output_format=lite_constants.TFLITE,\n quantized_input_stats=None,\n default_ranges_stats=None,\n drop_control_dependency=True,\n reorder_across_fake_quant=False,\n allow_custom_ops=False,\n custom_opdefs=None,\n change_concat_input_ranges=False,\n post_training_quantize=False,\n quantize_to_float16=False,\n dump_graphviz_dir=None,\n dump_graphviz_video=False,\n target_ops=None,\n allow_nonexistent_arrays=False,\n debug_info=None,\n conversion_summary_dir=None):\n \"\"\"Builds protocol buffers describing a conversion of a model using TOCO.\n\n Typically this is to convert from TensorFlow GraphDef to TFLite, in which\n case the default `input_format` and `output_format` are sufficient.\n\n Args:\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.shape` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n inference_type: Target data type of real-number arrays in the output file.\n Must be `{tf.float32, tf.uint8}`. (default tf.float32)\n Must be `{tf.float32, tf.uint8}`. (default `inference_type`)\n inference_input_type: Target data type of real-number input arrays. Allows\n for a different type for input arrays in the case of quantization.\n input_format: Type of data to read Currently must be\n `{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)\n input_shapes: Input array shape. It needs to be a list of the same length\n as `input_tensors`, or None. (default None)\n output_format: Output file format. Currently must be `{TFLITE,\n GRAPHVIZ_DOT}`. (default TFLITE)\n quantized_input_stats: List of tuples of floats representing the mean and\n standard deviation. Each tuple maps to the corresponding input tensor.\n Only need if `inference_input_type` is `QUANTIZED_UINT8`.\n real_input_value = (quantized_input_value - mean_value) / std_dev_value.\n (default None)\n default_ranges_stats: Tuple of integers representing (min, max) range values\n for all arrays without a specified range. Intended for experimenting with\n quantization via \"dummy quantization\". (default None)\n drop_control_dependency: Boolean indicating whether to drop control\n dependencies silently. This is due to TFLite not supporting control\n dependencies. (default True)\n reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant\n nodes in unexpected locations. Used when the location of the FakeQuant\n nodes is preventing graph transformations necessary to convert the graph.\n Results in a graph that differs from the quantized training graph,\n potentially causing differing arithmetic behavior. (default False)\n allow_custom_ops: Boolean indicating whether to allow custom operations.\n When false any unknown operation is an error. When true, custom ops are\n created for any op that is unknown. The developer will need to provide\n these to the TensorFlow Lite runtime with a custom resolver.\n (default False)\n custom_opdefs: List of strings representing custom ops OpDefs that are\n included in the GraphDef. Required when using custom operations with the\n MLIR-based converter. (default None)\n change_concat_input_ranges: Boolean to change behavior of min/max ranges for\n inputs and outputs of the concat operator for quantized models. Changes\n the ranges of concat operator overlap when true. (default False)\n post_training_quantize: Boolean indicating whether to quantize the weights\n of the converted float model. Model size will be reduced and there will be\n latency improvements (at the cost of accuracy).\n (default False)\n quantize_to_float16: Boolean indicating whether to convert float buffers\n to float16. (default False)\n dump_graphviz_dir: Full filepath of folder to dump the graphs at various\n stages of processing GraphViz .dot files. Preferred over\n --output_format=GRAPHVIZ_DOT in order to keep the requirements of the\n output file. (default None)\n dump_graphviz_video: Boolean indicating whether to dump the graph after\n every graph transformation. (default False)\n target_ops: Experimental flag, subject to change. Set of OpsSet\n options indicating which converter to use.\n (default set([OpsSet.TFLITE_BUILTINS]))\n allow_nonexistent_arrays: Allow specifying array names that don't exist\n or are unused in the final graph. (default False)\n debug_info: `GraphDebugInfo` proto containing the stack traces for the\n original nodes referred by the converted graph.\n conversion_summary_dir: A string, the path to the generated conversion logs.\n\n Returns:\n model_flags, toco_flags, debug_info: three protocol buffers describing the\n conversion process and debug information.\n\n Raises:\n ValueError:\n If the input tensor type is unknown\n Missing mean_values or std_dev_values\n RuntimeError: If TOCO fails to convert (in which case the runtime error's\n error text will contain the TOCO error log)\n \"\"\"\n toco = _toco_flags_pb2.TocoFlags()\n toco.input_format = input_format\n toco.output_format = output_format\n toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)\n if inference_input_type:\n toco.inference_input_type = util.convert_dtype_to_tflite_type(\n inference_input_type)\n else:\n toco.inference_input_type = toco.inference_type\n toco.drop_control_dependency = drop_control_dependency\n toco.reorder_across_fake_quant = reorder_across_fake_quant\n toco.allow_custom_ops = allow_custom_ops\n if custom_opdefs:\n toco.custom_opdefs.extend(custom_opdefs)\n toco.post_training_quantize = post_training_quantize\n toco.quantize_to_float16 = quantize_to_float16\n if default_ranges_stats:\n toco.default_ranges_min = default_ranges_stats[0]\n toco.default_ranges_max = default_ranges_stats[1]\n if dump_graphviz_dir:\n toco.dump_graphviz_dir = dump_graphviz_dir\n toco.dump_graphviz_include_video = dump_graphviz_video\n if conversion_summary_dir:\n toco.conversion_summary_dir = conversion_summary_dir\n if target_ops:\n if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):\n toco.enable_select_tf_ops = True\n elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):\n toco.enable_select_tf_ops = True\n toco.force_select_tf_ops = True\n\n model = _model_flags_pb2.ModelFlags()\n model.change_concat_input_ranges = change_concat_input_ranges\n for idx, input_tensor in enumerate(input_tensors):\n input_array = model.input_arrays.add()\n input_array.name = util.get_tensor_name(input_tensor)\n input_array.data_type = util.convert_dtype_to_tflite_type(\n input_tensor.dtype)\n\n if toco.inference_input_type in \\\n [_types_pb2.QUANTIZED_UINT8, _types_pb2.INT8]:\n if not quantized_input_stats:\n raise ValueError(\"std_dev and mean must be defined when \"\n \"inference_input_type is QUANTIZED_UINT8.\")\n input_array.mean_value, input_array.std_value = quantized_input_stats[idx]\n if input_shapes is None:\n shape = input_tensor.shape\n else:\n shape = input_shapes[idx]\n input_array.shape.dims.extend(list(map(int, shape)))\n\n for output_tensor in output_tensors:\n model.output_arrays.append(util.get_tensor_name(output_tensor))\n\n model.allow_nonexistent_arrays = allow_nonexistent_arrays\n\n return model, toco, debug_info\n\n\ndef toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,\n enable_mlir_converter, *args, **kwargs):\n \"\"\"\"Convert a model using TOCO.\n\n This function is used to convert GraphDefs that cannot be loaded into\n TensorFlow to TFLite. Conversion can be customized by providing arguments\n that are forwarded to `build_toco_convert_protos` (see documentation for\n details).\n\n Args:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_arrays_with_shape: Tuple of strings representing input tensor names\n and list of integers representing input shapes\n (e.g., [(\"foo\" : [1, 16, 16, 3])]). Use only when graph cannot be loaded\n into TensorFlow and when `input_tensors` is None. (default None)\n output_arrays: List of output tensors to freeze graph with. Use only when\n graph cannot be loaded into TensorFlow and when `output_tensors` is None.\n (default None)\n enable_mlir_converter: Enables MLIR-based conversion instead of TOCO\n conversion.\n *args: See `build_toco_convert_protos`,\n **kwargs: See `build_toco_convert_protos`.\n\n Returns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\n Raises:\n Defined in `build_toco_convert_protos`.\n \"\"\"\n model_flags, toco_flags, _ = build_toco_convert_protos(\n input_tensors=[], output_tensors=[], *args, **kwargs)\n\n for idx, (name, shape) in enumerate(input_arrays_with_shape):\n input_array = model_flags.input_arrays.add()\n if toco_flags.inference_input_type == _types_pb2.QUANTIZED_UINT8:\n if ((\"quantized_input_stats\" not in kwargs) or\n (not kwargs[\"quantized_input_stats\"])):\n raise ValueError(\"std_dev and mean must be defined when \"\n \"inference_input_type is QUANTIZED_UINT8.\")\n input_array.mean_value, input_array.std_value = kwargs[\n \"quantized_input_stats\"][idx]\n input_array.name = name\n input_array.shape.dims.extend(list(map(int, shape)))\n\n for name in output_arrays:\n model_flags.output_arrays.append(name)\n\n data = toco_convert_protos(\n model_flags.SerializeToString(),\n toco_flags.SerializeToString(),\n input_data.SerializeToString(),\n enable_mlir_converter=enable_mlir_converter)\n return data\n\n\ndef toco_convert_impl(input_data, input_tensors, output_tensors,\n enable_mlir_converter, *args, **kwargs):\n \"\"\"\"Convert a model using TOCO.\n\n Typically this function is used to convert from TensorFlow GraphDef to TFLite.\n Conversion can be customized by providing arguments that are forwarded to\n `build_toco_convert_protos` (see documentation for details).\n\n Args:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.shape` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n enable_mlir_converter: Enables MLIR-based conversion instead of TOCO\n conversion.\n *args: See `build_toco_convert_protos`,\n **kwargs: See `build_toco_convert_protos`.\n\n Returns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\n Raises:\n Defined in `build_toco_convert_protos`.\n \"\"\"\n model_flags, toco_flags, debug_info = build_toco_convert_protos(\n input_tensors, output_tensors, *args, **kwargs)\n debug_info_str = debug_info.SerializeToString() if debug_info else None\n data = toco_convert_protos(\n model_flags.SerializeToString(),\n toco_flags.SerializeToString(),\n input_data.SerializeToString(),\n debug_info_str=debug_info_str,\n enable_mlir_converter=enable_mlir_converter)\n return data\n\n\n@_tf_export(v1=[\"lite.toco_convert\"])\[email protected](None, \"Use `lite.TFLiteConverter` instead.\")\ndef toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):\n \"\"\"Convert a model using TOCO.\n\n Typically this function is used to convert from TensorFlow GraphDef to TFLite.\n Conversion can be customized by providing arguments that are forwarded to\n `build_toco_convert_protos` (see documentation for details). This function has\n been deprecated. Please use `lite.TFLiteConverter` instead.\n\n Args:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.shape` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n *args: See `build_toco_convert_protos`,\n **kwargs: See `build_toco_convert_protos`.\n\n Returns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\n Raises:\n Defined in `build_toco_convert_protos`.\n \"\"\"\n enable_mlir_converter = kwargs.get(\"enable_mlir_converter\", False)\n return toco_convert_impl(input_data, input_tensors, output_tensors,\n enable_mlir_converter, *args, **kwargs)\n" ]
[ [ "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.platform.resource_loader.get_path_to_datafile", "tensorflow.python.util.tf_export.tf_export", "tensorflow.lite.toco.toco_flags_pb2.TocoFlags", "tensorflow.lite.toco.model_flags_pb2.ModelFlags", "tensorflow.lite.python.util.get_tensor_name", "tensorflow.lite.python.util.convert_dtype_to_tflite_type", "tensorflow.lite.python.wrap_toco.wrapped_toco_convert" ] ]
cverluise/parseEPO
[ "be1171a0f8e6fcafa711fa291aebb1fc2260d5e6" ]
[ "parseepo/serialize.py" ]
[ "import html2text\nimport pandas as pd\nfrom wasabi import Printer\n\nfrom parseepo import validate\nfrom parseepo.exception import SingleAttrException\nfrom parseepo.utils import prepare_name\n\nh = html2text.HTML2Text()\nmsg = Printer()\nNAMES = [\"EP\", \"Num\", \"Ext\", \"publication_date\", \"language\", \"attr\", \"text\"]\nNESTED_ATTR = [\"TITLE\", \"CLAIM\", \"AMEND\", \"title\", \"claims\", \"amendment\"]\n\n\ndef format_patent_df(\n data: list, prepare_names: bool = False, handle_html: bool = False\n):\n \"\"\"\n Return data as a prepared DataFrame from a list of rows\n Nb: Input is [publication_number[Row]].\n E.g. [['EP','0700059 A1','1996-03-06','de','TITLE',' Elektroma...'],\n ['EP','0700059 A1','1996-03-06','en','TITLE',' Electroma...'],\n ...\n :param data: List[List]\n :param prepare_names: bool, True if you want to prepare names for BQ compatibility\n :param handle_html: bool, True if you want to handle html\n :return: pd.DataFrame\n publication_date language attr text publication_number\n 0 1996-03-06 ... ... ... EP-0700059-A1\n 1 1996-03-06 ... ... ... EP-0700059-A1\n 2 1996-03-06 ... ... ... EP-0700059-A1\n 3 1996-03-06 ... ... ... EP-0700059-A1\n 4 1996-03-06 ... ... ... EP-0700059-A1\n 5 1996-03-06 ... ... ... EP-0700059-A1\n 6 1996-03-06 ... ... ... EP-0700059-A1\n \"\"\"\n\n df_ = pd.DataFrame(data, columns=NAMES)\n df_[\"publication_number\"] = df_[\"EP\"] + \"-\" + df_[\"Num\"] + \"-\" + df_[\"Ext\"]\n df_ = df_.drop([\"EP\", \"Num\", \"Ext\"], axis=1)\n\n if prepare_names:\n df_[\"attr\"] = df_[\"attr\"].apply(lambda x: prepare_name(x, True))\n if handle_html:\n df_[\"text\"] = df_[\"text\"].apply(lambda x: h.handle(x))\n return df_\n\n\ndef unnest_attr(patent_dict: dict, publication_number: str):\n \"\"\"\n Unnest flat attributes returned as nested by the batch aggregation operation in\n serialize_patent.\n Raises warning if expected flat attributes has multiple values.\n :param patent_dict: dict, returned by serialize_patent\n :param publication_number: str, e.g. 'EP-0600083-A1'\n :return: dict\n In:\n { ...,\n 'PDFEP': {'language': ['en'],\n 'text': ['https://data.epo.org/publication-server/...']},\n }\n Out:\n {...,\n 'PDFEP': 'https://data.epo.org/publication-server/...',}\n\n \"\"\"\n attrs = list(filter(lambda x: x not in NESTED_ATTR, patent_dict.keys()))\n for attr in attrs:\n val = patent_dict[attr][\"text\"]\n try:\n validate.single_attr(val, attr, publication_number)\n except SingleAttrException:\n msg.warn(\n f\"{publication_number}: {attr} has more than 1 value. Only the first value \"\n f\"was kept. Add {attr} to the list NESTED_ATTR to fix this behavior.\"\n )\n patent_dict.update(\n {\n attr: {\n \"text\": patent_dict[attr][\"text\"][0],\n \"language\": patent_dict[attr][\"language\"][0],\n }\n }\n )\n\n\ndef serialize_patent_df(patent_df: pd.DataFrame):\n \"\"\"\n Return the serialized patent\n :param patent_df: pd.DataFrame, returned by format_patent_df\n :return: dict\n {'ABSTR': '<p id=\"pa01\" num=\"0001\">A device ...',\n 'CLAIM': {'language': ['en'],\n 'text': ['<claim id=\"c-en-0001\" ...']},\n 'DESCR': '<heading id=\"h0001\">Field of ...',\n 'PDFEP': 'https://data.epo.org/publication-server/...',\n 'TITLE': {'language': ['de', 'en', 'fr'],\n 'text': ['VORRICHTUNG ZUM ...',\n 'DEVICE FOR CONVEYING ...',\n \"DISPOSITIF D'ACHEMINEMENT ...']},\n 'publication_date': '1994-06-08',\n 'publication_number': 'EP-0600083-A1'}\n \"\"\"\n publication_number = patent_df[\"publication_number\"].values[0]\n publication_date = patent_df[\"publication_date\"].values[0]\n\n out = (\n patent_df.drop([\"publication_number\", \"publication_date\"], axis=1)\n .groupby(\"attr\")\n .aggregate(list)\n .T.to_dict()\n )\n\n unnest_attr(out, publication_number)\n out.update({\"publication_number\": publication_number})\n out.update({\"publication_date\": publication_date})\n return out\n\n\ndef serialize_patent(\n data: list, prepare_names: bool = False, handle_html: bool = False\n):\n \"\"\"\n Return the serialized patent\n :param data: List[List[str]], E.g.\n [['EP','0700059 A1','1996-03-06','de','TITLE',' Elektroma...'],\n ['EP','0700059 A1','1996-03-06','en','TITLE',' Electroma...'],\n :param prepare_names: bool, True if you want to prepare names for BQ compatibility\n :param handle_html: bool, True if you want to handle html\n :return: dict\n \"\"\"\n out = format_patent_df(data, prepare_names, handle_html)\n out = serialize_patent_df(out)\n return out\n" ]
[ [ "pandas.DataFrame" ] ]
luigiluz/pyampd
[ "cd247030f5a4ccd971da837b9b873cacbd7adfb3" ]
[ "pyampd/ampd.py" ]
[ "import numpy as np\nfrom scipy.ndimage import uniform_filter1d\nfrom scipy.signal import detrend\n\n\ndef find_peaks_original(x, scale=None, debug=False):\n \"\"\"Find peaks in quasi-periodic noisy signals using AMPD algorithm.\n\n Automatic Multi-Scale Peak Detection originally proposed in\n \"An Efficient Algorithm for Automatic Peak Detection in\n Noisy Periodic and Quasi-Periodic Signals\", Algorithms 2012, 5, 588-603\n https://doi.org/10.1109/ICRERA.2016.7884365\n\n Optimized implementation by Igor Gotlibovych, 2018\n\n\n Parameters\n ----------\n x : ndarray\n 1-D array on which to find peaks\n scale : int, optional\n specify maximum scale window size of (2 * scale + 1)\n debug : bool, optional\n if set to True, return the Local Scalogram Matrix, `LSM`,\n and scale with most local maxima, `l`,\n together with peak locations\n\n Returns\n -------\n pks: ndarray\n The ordered array of peak indices found in `x`\n\n \"\"\"\n x = detrend(x)\n N = len(x)\n L = N // 2\n if scale:\n L = min(scale, L)\n\n # create LSM matix\n LSM = np.zeros((L, N), dtype=bool)\n for k in np.arange(1, L):\n LSM[k - 1, k:N - k] = (\n (x[0:N - 2 * k] < x[k:N - k]) & (x[k:N - k] > x[2 * k:N])\n )\n\n # Find scale with most maxima\n G = LSM.sum(axis=1)\n l_scale = np.argmax(G)\n\n # find peaks that persist on all scales up to l\n pks_logical = np.min(LSM[0:l_scale, :], axis=0)\n pks = np.flatnonzero(pks_logical)\n if debug:\n return pks, LSM, l_scale\n return pks\n\n\ndef find_peaks(x, scale=None, debug=False):\n \"\"\"Find peaks in quasi-periodic noisy signals using AMPD algorithm.\n\n Extended implementation handles peaks near start/end of the signal.\n\n Optimized implementation by Igor Gotlibovych, 2018\n\n\n Parameters\n ----------\n x : ndarray\n 1-D array on which to find peaks\n scale : int, optional\n specify maximum scale window size of (2 * scale + 1)\n debug : bool, optional\n if set to True, return the Local Scalogram Matrix, `LSM`,\n weigted number of maxima, 'G',\n and scale at which G is maximized, `l`,\n together with peak locations\n\n Returns\n -------\n pks: ndarray\n The ordered array of peak indices found in `x`\n\n \"\"\"\n x = detrend(x)\n N = len(x)\n L = N // 2\n if scale:\n L = min(scale, L)\n\n # create LSM matix\n LSM = np.ones((L, N), dtype=bool)\n for k in np.arange(1, L + 1):\n LSM[k - 1, 0:N - k] &= (x[0:N - k] > x[k:N]\n ) # compare to right neighbours\n LSM[k - 1, k:N] &= (x[k:N] > x[0:N - k]) # compare to left neighbours\n\n # Find scale with most maxima\n G = LSM.sum(axis=1)\n G = G * np.arange(\n N // 2, N // 2 - L, -1\n ) # normalize to adjust for new edge regions\n l_scale = np.argmax(G)\n\n # find peaks that persist on all scales up to l\n pks_logical = np.min(LSM[0:l_scale, :], axis=0)\n pks = np.flatnonzero(pks_logical)\n if debug:\n return pks, LSM, G, l_scale\n return pks\n\n\ndef find_peaks_adaptive(x, window=None, debug=False):\n \"\"\"Find peaks in quasi-periodic noisy signals using ASS-AMPD algorithm.\n\n Adaptive Scale Selection Automatic Multi-Scale Peak Detection,\n an extension of AMPD -\n \"An Efficient Algorithm for Automatic Peak Detection in\n Noisy Periodic and Quasi-Periodic Signals\", Algorithms 2012, 5, 588-603\n https://doi.org/10.1109/ICRERA.2016.7884365\n\n Optimized implementation by Igor Gotlibovych, 2018\n\n\n Parameters\n ----------\n x : ndarray\n 1-D array on which to find peaks\n window : int, optional\n sliding window size for adaptive scale selection\n debug : bool, optional\n if set to True, return the Local Scalogram Matrix, `LSM`,\n and `adaptive_scale`,\n together with peak locations\n\n Returns\n -------\n pks: ndarray\n The ordered array of peak indices found in `x`\n\n \"\"\"\n x = detrend(x)\n N = len(x)\n if not window:\n window = N\n if window > N:\n window = N\n L = window // 2\n\n # create LSM matix\n LSM = np.ones((L, N), dtype=bool)\n for k in np.arange(1, L + 1):\n LSM[k - 1, 0:N - k] &= (x[0:N - k] > x[k:N]\n ) # compare to right neighbours\n LSM[k - 1, k:N] &= (x[k:N] > x[0:N - k]) # compare to left neighbours\n\n # Create continuos adaptive LSM\n ass_LSM = uniform_filter1d(LSM * window, window, axis=1, mode='nearest')\n normalization = np.arange(L, 0, -1) # scale normalization weight\n ass_LSM = ass_LSM * normalization.reshape(-1, 1)\n\n # Find adaptive scale at each point\n adaptive_scale = ass_LSM.argmax(axis=0)\n\n # construct reduced LSM\n LSM_reduced = LSM[:adaptive_scale.max(), :]\n mask = (np.indices(LSM_reduced.shape)[0] > adaptive_scale\n ) # these elements are outside scale of interest\n LSM_reduced[mask] = 1\n\n # find peaks that persist on all scales up to l\n pks_logical = np.min(LSM_reduced, axis=0)\n pks = np.flatnonzero(pks_logical)\n if debug:\n return pks, ass_LSM, adaptive_scale\n return pks\n" ]
[ [ "scipy.signal.detrend", "numpy.ones", "numpy.zeros", "numpy.argmax", "numpy.arange", "numpy.indices", "numpy.min", "scipy.ndimage.uniform_filter1d", "numpy.flatnonzero" ] ]
bionlplab/heart_failure_mortality
[ "f3bbfe65fe6f2c2a076acb38697133b472bf2231" ]
[ "extract_features.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom utils import *\nfrom sklearn.preprocessing import StandardScaler\nfrom collections import defaultdict\nimport re\n\ndef format_labels(file_path, timelines, mapping):\n\tmost_recent = mapping.sort_values([\"subject_id\", \"ordering_date\"], ascending=False).drop_duplicates(\"subject_id\", keep=\"first\")\n\n\tlabel_features = pd.read_csv(file_path)\n\tformatted_features = reformat4pycox([\"report_id\"], label_features)\n\n\t#Connect subject to report\n\tdata_frames = [timelines, most_recent]\n\tdata_df = reduce(lambda left,right: pd.merge(left,right,on=\"subject_id\"), data_frames)\n\n\t#Connect report to labels\n\tdata_frames = [data_df, formatted_features]\n\tdata_df = reduce(lambda left,right: pd.merge(left,right,on=\"report_id\"), data_frames)\n\n\tfor i in [\"ordering_date\", \"report_id\"]:\n\t del data_df[i]\n\n\treturn data_df\n\ndef format_hidden_features(file_path, timelines, mapping):\n\tloaded = np.load(file_path)\n\n\tmost_recent = mapping.sort_values([\"subject_id\", \"ordering_date\"], ascending=False).drop_duplicates(\"subject_id\", keep=\"first\")\n\treport_ids = list(most_recent['report_id'])\n\n\tmutable_file = {} \n\tfor id in report_ids:\n\t mutable_file[id] = loaded[id].flatten()\n\tloaded = mutable_file\n\n\tlabel_features = pd.DataFrame(loaded.values(), index=loaded)\n\n\tcols = list(label_features.columns)\n\txcols = [\"x\" + str(i) for i in cols]\n\trename_dict = dict(zip(cols,xcols))\n\trename_dict[\"index\"] = \"report_id\"\n\n\tlabel_features = label_features.reset_index().rename(columns=rename_dict)\n\n\t#Connect subject to report\n\tdata_frames = [timelines, most_recent]\n\tdata_df = reduce(lambda left,right: pd.merge(left,right,on=\"subject_id\"), data_frames)\n\n\t#Connect report to labels\n\tdata_frames = [data_df, label_features]\n\tdata_df = reduce(lambda left,right: pd.merge(left,right,on=\"report_id\"), data_frames)\n\n\tfor i in [\"ordering_date\", \"report_id\"]:\n\t del data_df[i]\n\n\treturn data_df\n\ndef format_hf_sequence(file_path, timelines, mapping):\n\tloaded = np.load(file_path)\n\t \n\ttop3_reports = mapping.sort_values([\"subject_id\", \"ordering_date\"], ascending=True).groupby(\"subject_id\").tail(3)\n\n\t#Create a list of report ids\n\treport_dict = top3_reports.groupby(\"subject_id\")[\"report_id\"].apply(list).to_dict()\n\n\t#Create a dict of report arrays. Format: key: array of report embeddings\n\tembedding_dict = defaultdict(list)\n\n\tfor k,v in report_dict.items():\n\t\tfor vi in v:\n\t\t embedding_dict[k].append(loaded[vi])\n\n\t\tembedding_dict[k] = np.vstack(embedding_dict[k])\n\n\t#Converting embedding dict into dataframe\n\tlabel_features = pd.DataFrame(embedding_dict.values(), index=embedding_dict)\n\n\tlabel_features[0] = label_features[0].apply(lambda x: add_paddings(x))\n\n\tlist2d = label_features[0]\n\n\tmerged = list(itertools.chain(*list2d))\n\n\tscaler = StandardScaler()\n\tscaler.fit(merged)\n\n\tlabel_features[0] = label_features[0].apply(lambda x: scaler.transform(x))\n\n\tcols = list(label_features.columns)\n\txcols = [\"x\" + str(i) for i in cols]\n\trename_dict = dict(zip(cols,xcols))\n\n\tlabel_features = label_features.rename(columns=rename_dict)\n\tlabel_features = label_features.reset_index().rename(columns={\"index\": \"subject_id\"})\n\n\tdata_frames = [timelines, label_features]\n\tdata_df = reduce(lambda left,right: pd.merge(left,right,on=\"subject_id\"), data_frames)\n\n\treturn data_df\n" ]
[ [ "numpy.vstack", "numpy.load", "pandas.read_csv", "pandas.merge", "sklearn.preprocessing.StandardScaler" ] ]
derekmpham/mindmeld
[ "18189f956e4e3eb92df61fde95ec82f73b9efa91" ]
[ "mindmeld/converter/dialogflow.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This module contains the DialogflowConverter class used to convert Dialogflow projects\ninto Mindmeld projects\"\"\"\n\nimport json\nimport logging\nimport os\nimport re\n\nfrom sklearn.model_selection import train_test_split\n\nfrom mindmeld.converter.converter import Converter\n\nlogger = logging.getLogger(__name__)\n\n\nclass DialogflowConverter(Converter):\n \"\"\"The class is a sub class of the abstract Converter class. This class\n contains the methods required to convert a Dialogflow project into a MindMeld project\n \"\"\"\n\n sys_entity_map = {\n \"@sys.date-time\": \"sys_interval\",\n \"@sys.date\": \"sys_time\",\n \"@sys.date-period\": \"sys_interval\",\n \"@sys.time\": \"sys_time\",\n \"@sys.time-period\": \"sys_duration\",\n \"@sys.duration\": \"sys_duration\",\n \"@sys.number\": \"sys_number\",\n \"@sys.cardinal\": \"sys_number\",\n \"@sys.ordinal\": \"sys_ordinal\",\n \"@sys.unit-currency\": \"sys_amount-of-money\",\n \"@sys.unit-volume\": \"sys_volume\",\n \"@sys.email\": \"sys_email\",\n \"@sys.phone-number\": \"sys_phone-number\",\n \"@sys.url\": \"sys_url\",\n }\n\n # TODO: provide support for entities listed in sys_entity_map_todo\n sys_entity_map_todo = [\n \"@sys.number-integer\",\n \"@sys.number-sequence\",\n \"@sys.flight-number\",\n \"@sys.unit-area\",\n \"@sys.unit-length\",\n \"@sys.unit-speed\",\n \"@sys.unit-information\",\n \"@sys.percentage\",\n \"@sys.temperature\",\n \"@sys.duration\",\n \"@sys.age\",\n \"@sys.currency-name\",\n \"@sys.unit-area-name\",\n \"@sys.unit-length-name\",\n \"@sys.unit-speed-name\",\n \"@sys.unit-volume-name\",\n \"@sys.unit-weight-name\",\n \"@sys.unit-information-name\",\n \"@sys.address\",\n \"@sys.zip-code\",\n \"@sys.geo-capital\",\n \"@sys.geo-country\",\n \"@sys.geo-country-code\",\n \"@sys.geo-city\",\n \"@sys.geo-state\",\n \"@sys.geo-city\",\n \"@sys.geo-state\",\n \"@sys.place-attraction\",\n \"@sys.airport\",\n \"@sys.location\",\n \"@sys.given-name\",\n \"@sys.last-name\",\n \"@sys.person\",\n \"@sys.music-artist\",\n \"@sys.music-genre\",\n \"@sys.color\",\n \"@sys.language\",\n \"@sys.any\",\n ]\n\n def __init__(self, dialogflow_project_directory, mindmeld_project_directory):\n if os.path.exists(os.path.dirname(dialogflow_project_directory)):\n self.dialogflow_project_directory = dialogflow_project_directory\n self.mindmeld_project_directory = mindmeld_project_directory\n self.directory = os.path.dirname(os.path.realpath(__file__))\n self.entities_list = set()\n self.intents_list = set()\n else:\n msg = \"`{dialogflow_project_directory}` does not exist. Please verify.\"\n msg = msg.format(dialogflow_project_directory=dialogflow_project_directory)\n raise FileNotFoundError(msg)\n\n def create_mindmeld_directory(self):\n self.create_directory(self.mindmeld_project_directory)\n self.create_directory(os.path.join(self.mindmeld_project_directory, \"data\"))\n self.create_directory(os.path.join(self.mindmeld_project_directory, \"domains\"))\n self.create_directory(\n os.path.join(self.mindmeld_project_directory, \"domains\", \"general\")\n )\n self.create_directory(os.path.join(self.mindmeld_project_directory, \"entities\"))\n\n # =========================\n # create training data (entities, intents)\n # =========================\n\n def _create_entities_directories(self, entities):\n \"\"\" Creates directories + files for all languages/files.\n Currently does not use meta data in entityName.json files (the keys in var entities).\n \"\"\"\n for languages in entities.values():\n for sub in languages.values():\n dialogflow_entity_file = os.path.join(\n self.dialogflow_project_directory, \"entities\", sub + \".json\"\n )\n\n mindmeld_entity_directory_name = self.clean_check(\n sub, self.entities_list\n )\n\n mindmeld_entity_directory = os.path.join(\n self.mindmeld_project_directory,\n \"entities\",\n mindmeld_entity_directory_name,\n )\n\n self.create_directory(mindmeld_entity_directory)\n\n self._create_entity_file(\n dialogflow_entity_file, mindmeld_entity_directory\n )\n\n @staticmethod\n def _create_entity_file(dialogflow_entity_file, mindmeld_entity_directory):\n source_en = open(dialogflow_entity_file, \"r\")\n target_gazetteer = open(\n os.path.join(mindmeld_entity_directory, \"gazetteer.txt\"), \"w\"\n )\n target_mapping = open(\n os.path.join(mindmeld_entity_directory, \"mapping.json\"), \"w\"\n )\n\n datastore = json.load(source_en)\n mapping_dict = {\"entities\": []}\n\n for item in datastore:\n new_dict = {}\n while (\"value\" in item) and (item[\"value\"] in item[\"synonyms\"]):\n item[\"synonyms\"].remove(item[\"value\"])\n new_dict[\"whitelist\"] = item[\"synonyms\"]\n new_dict[\"cname\"] = item[\"value\"]\n mapping_dict[\"entities\"].append(new_dict)\n\n target_gazetteer.write(item[\"value\"] + \"\\n\")\n\n json.dump(mapping_dict, target_mapping, ensure_ascii=False, indent=2)\n\n source_en.close()\n target_gazetteer.close()\n target_mapping.close()\n\n def _create_intents_directories(self, intents):\n \"\"\" Creates directories + files for all languages/files.\"\"\"\n\n for languages in intents.values():\n for language, sub in languages.items():\n dialogflow_intent_file = os.path.join(\n self.dialogflow_project_directory, \"intents\", sub + \".json\"\n )\n\n mindmeld_intent_directory_name = self.clean_check(\n sub, self.intents_list\n )\n mindmeld_intent_directory = os.path.join(\n self.mindmeld_project_directory,\n \"domains\",\n \"general\",\n mindmeld_intent_directory_name,\n )\n\n self.create_directory(mindmeld_intent_directory)\n\n self._create_intent_file(\n dialogflow_intent_file, mindmeld_intent_directory, language\n )\n\n def _create_intent_file(\n self, dialogflow_intent_file, mindmeld_intent_directory, language\n ):\n source_en = open(dialogflow_intent_file, \"r\")\n target_test = open(os.path.join(mindmeld_intent_directory, \"test.txt\"), \"w\")\n target_train = open(os.path.join(mindmeld_intent_directory, \"train.txt\"), \"w\")\n\n datastore = json.load(source_en)\n all_text = []\n\n for usersay in datastore:\n sentence = \"\"\n for texts in usersay[\"data\"]:\n df_text = texts[\"text\"]\n if \"meta\" in texts and texts[\"meta\"] != \"@sys.ignore\":\n df_meta = texts[\"meta\"]\n\n if re.match(\n \"(@sys.).+\", df_meta\n ): # if text is a dialogflow sys entity\n if df_meta in DialogflowConverter.sys_entity_map:\n mm_meta = DialogflowConverter.sys_entity_map[df_meta]\n else:\n mm_meta = \"[DNE: {sysEntity}]\".format(sysEntity=df_meta[1:])\n logger.info(\n \"Unfortunately mindmeld does not currently support\"\n \"%s as a sys entity.\"\n \"Please create an entity for this.\",\n df_meta[1:],\n )\n\n entity_type = self.clean_name(mm_meta) + \"_entries_\" + language\n part = \"{\" + df_text + \"|\" + entity_type + \"}\"\n else:\n entity_type = (\n self.clean_name(df_meta[1:]) + \"_entries_\" + language\n )\n part = \"{\" + df_text + \"|\" + entity_type + \"}\"\n else:\n part = df_text\n\n sentence += part\n all_text.append(sentence)\n\n train, test = train_test_split(all_text, test_size=0.2)\n\n target_test.write(\"\\n\".join(test))\n target_train.write(\"\\n\".join(train))\n\n source_en.close()\n target_test.close()\n target_train.close()\n\n def _get_file_names(self, level):\n \"\"\" Gets the names of the entities from Dialogflow as a dictionary.\n levels (str): either \"entities\" or \"intents\"\n\n ex. if we had the following files in our entities directory:\n [\"test.json\", \"test_entries_en.json\", \"test_entries_de.json\"]\n it returns:\n {'test': {'en': 'test_entries_en', 'de': 'test_entries_de'}} \"\"\"\n\n directory = os.path.join(self.dialogflow_project_directory, level)\n files = os.listdir(directory)\n\n w = {\"entities\": \"entries\", \"intents\": \"usersays\"}\n p = r\".+(?<=(_\" + w[level] + \"_))(.*)(?=(.json))\"\n\n info = {}\n for name in files:\n match = re.match(p, name)\n\n if match:\n isbase = False\n base = name[: match.start(1)]\n language = match.group(2)\n else:\n isbase = True\n base = name[:-5]\n\n if base not in info:\n info[base] = {}\n\n if not isbase:\n info[base][language] = name[:-5]\n\n return info\n\n def create_mindmeld_training_data(self):\n entities = self._get_file_names(\"entities\")\n self._create_entities_directories(entities)\n\n intents = self._get_file_names(\"intents\")\n self._create_intents_directories(intents)\n\n # =========================\n # create init\n # =========================\n\n @staticmethod\n def create_handle(params):\n return \"@app.handle(\" + params + \")\"\n\n @staticmethod\n def create_header(function_name):\n return \"def \" + function_name + \"(request, responder):\"\n\n @staticmethod\n def create_function(handles, function_name, replies):\n assert isinstance(handles, list)\n\n result = \"\"\n for handle in handles:\n result += DialogflowConverter.create_handle(handle) + \"\\n\"\n result += DialogflowConverter.create_header(function_name) + \"\\n\"\n result += \" \" + \"replies = {}\".format(replies) + \"\\n\"\n result += \" \" + \"responder.reply(replies)\"\n return result\n\n @staticmethod\n def clean_name(name):\n \"\"\" Takes in a string and returns a valid folder name (no spaces, all lowercase).\"\"\"\n name = re.sub(r\"[^\\w\\s-]\", \"\", name).strip().lower()\n name = re.sub(r\"[-\\s]+\", \"_\", name)\n return name\n\n def clean_check(self, name, lst):\n \"\"\" Takes in a list of strings and a name.\n Returns name cleaned if the cleaned name is not found in lst.\"\"\"\n cleaned = self.clean_name(name)\n\n if cleaned not in lst:\n lst.add(cleaned)\n return cleaned\n else:\n logger.error(\n \"%s name has been created twice. Please ensure there \"\n \"are no duplicate names in the dialogflow files and \"\n \"filenames are valid (no spaces or special characters)\",\n cleaned,\n )\n\n def create_mindmeld_init(self):\n with open(\n os.path.join(self.mindmeld_project_directory, \"__init__.py\"), \"w\"\n ) as target:\n begin_info = [\n \"# -*- coding: utf-8 -*-\",\n '\"\"\"This module contains the MindMeld application\"\"\"',\n \"from mindmeld import Application\",\n \"app = Application(__name__)\",\n \"__all__ = ['app']\",\n ]\n\n for info, spacing in zip(begin_info, [1, 2, 1, 1, 0]):\n target.write(info + \"\\n\" * spacing)\n\n intents = self._get_file_names(\"intents\")\n\n for i, main in enumerate(intents.keys()):\n\n df_main = os.path.join(\n self.dialogflow_project_directory, \"intents\", main + \".json\"\n )\n\n with open(df_main) as source:\n if \"usersays\" in df_main:\n logger.error(\n \"Please check if your intent file\"\n \"names are correctly labeled.\"\n )\n\n datastore = json.load(source)\n replies = []\n\n for response in datastore[\"responses\"]:\n for message in response[\"messages\"]:\n language = message[\"lang\"]\n\n if \"speech\" in message:\n data = message[\"speech\"]\n\n replies = data if isinstance(data, list) else [data]\n\n if datastore[\"fallbackIntent\"]:\n function_name = \"default\" + \"_\" + language\n if language == \"en\":\n # TODO: support multiple defaults for languages\n handles = [\n \"default=True\",\n \"intent='unsupported'\",\n ]\n else:\n handles = [\"intent='unsupported'\"]\n else:\n function_name = \"renameMe\" + str(i) + \"_\" + language\n handles = [\n \"intent=\"\n + \"'\"\n + self.clean_name(datastore[\"name\"])\n + \"_usersays_\"\n + language\n + \"'\"\n ]\n\n target.write(\n \"\\n\\n\\n\"\n + self.create_function(\n handles=handles,\n function_name=function_name,\n replies=replies,\n )\n )\n target.write(\"\\n\")\n\n # =========================\n # convert project\n # =========================\n\n def convert_project(self):\n \"\"\" Converts a Dialogflow project into a MindMeld project.\n\n Dialogflow projects consist of entities and intents.\n note on languages:\n Dialogflow supports multiple languages and locales. They store their training\n data for different languages in different files. So, the name of each training\n file ends with a meta tag, two letters long for language, and an additional\n two letters for dialect (if applicable). For example, a file ending in \"_en-au\"\n indicates it's in English (Australia). Below we use \"la\" to represent this\n meta tag.\n\n entities folder contains:\n entityName.json - Meta data about entityName for all languages.\n entityName_entries_la.json - One for each language, contains entitiy mappings.\n\n intents folder contain:\n intentName.json - Contains rules, information about conversation flow, meta data.\n Contains previously mentioned information and responses for all languages.\n intentName_usersays_la.json - one for each language,\n contains training data to recognize intentName\n\n Limitations:\n - The converter is unable to create an entity when it encounters an\n unrecognized entity (an entity not defined under entities folder\n or system entities), and labels such entities as DNE in training data.\n - The converter currently does not automatically convert features like\n slot filling, contexts, and follow-up intents. Users can still implement such\n features and more.\n - Information in agent.json are not copied over.\n - There is no official support for different languages. Users can still\n implement this. The converter is able to successfully convert dialogflow\n bots that support multiple languages.\n\n Mindmeld:\n - Users can store data locally\n - Users can build a knowledge base (currently beta in Dialogflow).\n - Users can configure the machine learning models to best suit their needs.\n - Users have more flexibility in defining their own features, including\n ones like slot filling, contexts, and follow-up intents.\n \"\"\"\n\n logger.info(\"Converting project.\")\n\n # Create project directory with sub folders\n self.create_mindmeld_directory()\n\n # Transfer over test data from Dialogflow project and reformat to Mindmeld project\n self.create_mindmeld_training_data()\n file_loc = os.path.dirname(os.path.realpath(__file__))\n\n self.create_config(self.mindmeld_project_directory, file_loc)\n self.create_main(self.mindmeld_project_directory, file_loc)\n self.create_mindmeld_init()\n\n logger.info(\"Project converted.\")\n" ]
[ [ "sklearn.model_selection.train_test_split" ] ]
offy284/Keras-GAN
[ "6652c626ba584ffd1c25ca4e925e6f131077395c" ]
[ "music_preprocessor/music_preprocessor.py" ]
[ "import itertools\nimport shutil\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nfrom tqdm import tqdm\nimport numpy as np\nimport scipy\nfrom scipy.io.wavfile import write, read\nfrom scipy.fftpack import fft\nfrom scipy import signal\nfrom scipy.fft import fftshift\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport matplotlib.pyplot as plt\n\nRESOLUTION_SCALE = 10\n\n\ndef flatten_dir(dir):\n print(\"Flattening MusicData directory...\")\n all_files = []\n dups = 0\n\n for root, _dirs, files in itertools.islice(os.walk(dir), 1, None):\n try:\n for filename in files:\n all_files.append(os.path.join(root, filename))\n except:\n dups += 1\n for filename in all_files:\n try:\n shutil.move(filename, dir)\n except:\n dups += 1\n\n print(f\"{dups} duplicate files removed\")\n\n\ndef generate_big_music(resolution_scale=RESOLUTION_SCALE):\n print(\"Generating big_music from MusicData directory...\")\n onlyfiles = [f for f in listdir(\"MusicData/\") if isfile(join(\"MusicData/\", f))]\n\n print(\"Normalizing big_music...\")\n square_size = 28 * resolution_scale\n big_music = np.empty((1)) # np.empty((len(onlyfiles), square_size, square_size, 1))\n\n for i in tqdm(range(len(onlyfiles))):\n file = onlyfiles[i]\n if \"-converted\" in file:\n x = scipy.io.wavfile.read(f\"MusicData/{file}\")\n x = x[1]\n\n #big_music = big_music.reshape(-1)\n\n '''\n print(f\"Building spectrogram...\")\n \n plt.specgram(x, Fs=44100)\n plt.savefig(f'MusicImageData/{file}.png')\n \n x = x.reshape(-1, 1)\n\n min_max_scaler = MinMaxScaler()\n x = (min_max_scaler.fit_transform(x) - .5) * 2\n\n samples = list(np.empty((int(x.shape[0] / square_size / square_size), square_size, square_size, 1)))\n rows = np.zeros((square_size, square_size, 1))\n cols = np.zeros((square_size, 1))\n\n for samplei in tqdm(range(len(samples))):\n for yi in range(square_size):\n for xi in range(square_size):\n cols[xi] = x[xi + yi * square_size + samplei * square_size * square_size]\n rows[yi] = cols\n samples[samplei] = rows\n '''\n\n print(\"Numpyifying x...\")\n big_music = np.concatenate([big_music, x])\n\n print(f\"big_music is of shape {big_music.shape}\")\n\n freqs, times, spectrogram = signal.spectrogram(big_music, 44100)\n spectrogram = spectrogram.reshape((spectrogram.shape[1], spectrogram.shape[0]))\n\n print(spectrogram.shape)\n\n filename = f\"spectrogram.npy\"\n print(f\"Saving {filename}...\")\n np.save(f\"{filename}\", spectrogram)\n\n filename = f\"freqs.npy\"\n print(f\"Saving {filename}...\")\n np.save(f\"{filename}\", freqs)\n\n filename = f\"times.npy\"\n print(f\"Saving {filename}...\")\n np.save(f\"{filename}\", times)\n\n\nif __name__ == '__main__':\n print(\"Music Preprocessor v0.1\")\n #flatten_dir()\n generate_big_music()" ]
[ [ "numpy.save", "numpy.empty", "scipy.signal.spectrogram", "scipy.io.wavfile.read", "numpy.concatenate" ] ]
Didou09/tofu
[ "4a4e1f058bab8e7556ed9d518f90807cec605476" ]
[ "tofu/geom/_core_optics.py" ]
[ "\n\"\"\"\nThis module is the geometrical part of the ToFu general package\nIt includes all functions and object classes necessary for tomography on Tokamaks\n\"\"\"\n\n# Built-in\nimport sys\nimport os\nimport warnings\nimport copy\n\n\n# Common\nimport numpy as np\nimport scipy.interpolate as scpinterp\nimport scipy.stats as scpstats\nimport datetime as dtm\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n# ToFu-specific\nfrom tofu import __version__ as __version__\nimport tofu.pathfile as tfpf\nimport tofu.utils as utils\nfrom . import _def as _def\nfrom . import _GG as _GG\nfrom . import _core\nfrom . import _check_optics\nfrom . import _comp_optics as _comp_optics\nfrom . import _plot_optics as _plot_optics\nimport tofu.spectro._rockingcurve as _rockingcurve\n\n\n__all__ = ['CrystalBragg']\n\n\n_Type = 'Tor'\n_NTHREADS = 16\n\n# rotate / translate instance\n_RETURN_COPY = False\n_USE_NON_PARALLELISM = True\n\n\n\"\"\"\n###############################################################################\n###############################################################################\n Ves class and functions\n###############################################################################\n###############################################################################\n\"\"\"\n\n\nclass CrystalBragg(utils.ToFuObject):\n \"\"\" A class defining crystals for Bragg diffraction\n\n A crystal can be of Type flat, cylindrical or spherical\n It is characterized by its:\n - geometry (Type, dimensions, curvature radii and position/orientation)\n - Material and lattice\n - Bragg parameters (angle vs lambda)\n\n\n Parameters\n ----------\n Id : str / tfpf.ID\n A name string or a pre-built tfpf.ID class to be used to identify this\n particular instance, if a string is provided, it is fed to tfpf.ID()\n dgeom : dict\n An array (2,N) or (N,2) defining the contour of the vacuum vessel in a\n cross-section, if not closed, will be closed automatically\n dspectral: str\n Flag indicating whether the vessel will be a torus ('Tor') or a linear\n device ('Lin')\n SavePath : None / str\n If provided, forces the default saving path of the object to the\n provided value\n\n \"\"\"\n\n # Fixed (class-wise) dictionary of default properties\n _ddef = {\n 'Id': {\n 'shot': 0, 'Exp': 'dummy', 'Diag': 'dummy',\n 'include': [\n 'Mod', 'Cls', 'Exp', 'Diag', 'Name', 'shot', 'version',\n ],\n },\n 'dgeom': {'Type': 'sph', 'Typeoutline': 'rect'},\n 'dmat': {},\n 'dbragg': {'braggref': np.pi/4.},\n 'dmisc': {'color': 'k'},\n }\n _dplot = {'cross':{'Elt':'P',\n 'dP':{'color':'k','lw':2},\n 'dI':{'color':'k','ls':'--','marker':'x','ms':8,'mew':2},\n 'dBs':{'color':'b','ls':'--','marker':'x','ms':8,'mew':2},\n 'dBv':{'color':'g','ls':'--','marker':'x','ms':8,'mew':2},\n 'dVect':{'color':'r','scale':10}},\n 'hor':{'Elt':'P',\n 'dP':{'color':'k','lw':2},\n 'dI':{'color':'k','ls':'--'},\n 'dBs':{'color':'b','ls':'--'},\n 'dBv':{'color':'g','ls':'--'},\n 'Nstep':50},\n '3d':{}}\n # _DEFLAMB = 3.971561e-10\n # _DEFNPEAKS = 12\n # _DREFLECT_DTYPES = {'specular':0, 'diffusive':1, 'ccube':2}\n\n\n # Does not exist beofre Python 3.6 !!!\n def __init_subclass__(cls, color='k', **kwdargs):\n # Python 2\n super(CrystalBragg,cls).__init_subclass__(**kwdargs)\n # Python 3\n #super().__init_subclass__(**kwdargs)\n cls._ddef = copy.deepcopy(CrystalBragg._ddef)\n cls._dplot = copy.deepcopy(CrystalBragg._dplot)\n cls._set_color_ddef(cls._color)\n\n @classmethod\n def _set_color_ddef(cls, color):\n cls._ddef['dmisc']['color'] = mpl.colors.to_rgba(color)\n\n def __init__(self, dgeom=None, dmat=None, dbragg=None,\n Id=None, Name=None, Exp=None, Diag=None, shot=None,\n fromdict=None, sep=None,\n SavePath=os.path.abspath('./'),\n SavePath_Include=tfpf.defInclude, color=None):\n\n # To replace __init_subclass__ for Python 2\n if sys.version[0]=='2':\n self._dstrip = utils.ToFuObjectBase._dstrip.copy()\n self.__class__._strip_init()\n\n # Create a dplot at instance level\n self._dplot = copy.deepcopy(self.__class__._dplot)\n\n kwdargs = locals()\n del kwdargs['self']\n # super()\n super(CrystalBragg,self).__init__(**kwdargs)\n\n def _reset(self):\n # super()\n super(CrystalBragg,self)._reset()\n self._dgeom = dict.fromkeys(self._get_keys_dgeom())\n self._dmat = dict.fromkeys(self._get_keys_dmat())\n self._dbragg = dict.fromkeys(self._get_keys_dbragg())\n self._dmisc = dict.fromkeys(self._get_keys_dmisc())\n #self._dplot = copy.deepcopy(self.__class__._ddef['dplot'])\n\n @classmethod\n def _checkformat_inputs_Id(cls, Id=None, Name=None,\n Exp=None, Diag=None, shot=None, Type=None,\n include=None,\n **kwdargs):\n if Id is not None:\n assert isinstance(Id,utils.ID)\n Name, Exp, Type = Id.Name, Id.Exp, Id.Type\n if Type is None:\n Type = cls._ddef['dgeom']['Type']\n if Exp is None:\n Exp = cls._ddef['Id']['Exp']\n if Diag is None:\n Diag = cls._ddef['Id']['Diag']\n if shot is None:\n shot = cls._ddef['Id']['shot']\n if include is None:\n include = cls._ddef['Id']['include']\n\n dins = {'Name':{'var':Name, 'cls':str},\n 'Exp': {'var':Exp, 'cls':str},\n 'Diag': {'var':Diag, 'cls':str},\n 'shot': {'var':shot, 'cls':int},\n 'Type': {'var':Type, 'in':['sph']},\n 'include':{'var':include, 'listof':str}}\n dins, err, msg = cls._check_InputsGeneric(dins)\n if err:\n raise Exception(msg)\n\n kwdargs.update({'Name':Name, 'shot':shot,\n 'Exp':Exp, 'Diag':Diag, 'Type':Type,\n 'include':include})\n return kwdargs\n\n ###########\n # Get largs\n ###########\n\n @staticmethod\n def _get_largs_dgeom(sino=True):\n largs = ['dgeom']\n return largs\n\n @staticmethod\n def _get_largs_dmat():\n largs = ['dmat']\n return largs\n\n @staticmethod\n def _get_largs_dbragg():\n largs = ['dbragg']\n return largs\n\n @staticmethod\n def _get_largs_dmisc():\n largs = ['color']\n return largs\n\n ###########\n # Get keys of dictionnaries\n ###########\n\n @staticmethod\n def _get_keys_dgeom():\n lk = ['Type', 'Typeoutline',\n 'summit', 'center', 'extenthalf', 'surface',\n 'nin', 'nout', 'e1', 'e2', 'rcurve',\n 'move', 'move_param', 'move_kwdargs']\n return lk\n\n @staticmethod\n def _get_keys_dmat():\n lk = ['formula', 'density', 'symmetry',\n 'lengths', 'angles', 'cut', 'd',\n 'alpha', 'beta', 'nin', 'nout', 'e1', 'e2']\n return lk\n\n @staticmethod\n def _get_keys_dbragg():\n lk = ['rockingcurve', 'lambref', 'braggref']\n return lk\n\n @staticmethod\n def _get_keys_dmisc():\n lk = ['color']\n return lk\n\n ###########\n # _init\n ###########\n\n def _init(self, dgeom=None, dmat=None, dbragg=None,\n color=None, **kwdargs):\n allkwds = dict(locals(), **kwdargs)\n largs = self._get_largs_dgeom()\n kwds = self._extract_kwdargs(allkwds, largs)\n self.set_dgeom(**kwds)\n largs = self._get_largs_dmat()\n kwds = self._extract_kwdargs(allkwds, largs)\n self.set_dmat(**kwds)\n largs = self._get_largs_dbragg()\n kwds = self._extract_kwdargs(allkwds, largs)\n self.set_dbragg(**kwds)\n largs = self._get_largs_dmisc()\n kwds = self._extract_kwdargs(allkwds, largs)\n self._set_dmisc(**kwds)\n self._dstrip['strip'] = 0\n\n ###########\n # set dictionaries\n ###########\n\n def set_dgeom(self, dgeom=None):\n self._dgeom = _check_optics._checkformat_dgeom(\n dgeom=dgeom, ddef=self._ddef['dgeom'],\n valid_keys=self._get_keys_dgeom(),\n )\n if self._dgeom['move'] is not None:\n self.set_move(\n move=self._dgeom['move'],\n param=self._dgeom['move_param'],\n **self._dgeom['move_kwdargs'],\n )\n\n def set_dmat(self, dmat=None):\n self._dmat = _check_optics._checkformat_dmat(\n dmat=dmat, dgeom=self._dgeom,\n ddef=self._ddef['dmat'],\n valid_keys=self._get_keys_dmat()\n )\n\n def set_dbragg(self, dbragg=None):\n self._dbragg = _check_optics._checkformat_dbragg(\n dbragg=dbragg,\n ddef=self._ddef['dbragg'],\n valid_keys=self._get_keys_dbragg(),\n dmat=self._dmat,\n )\n\n def _set_color(self, color=None):\n color = _check_optics._checkformat_inputs_dmisc(\n color=color, ddef=self._ddef,\n )\n self._dmisc['color'] = color\n self._dplot['cross']['dP']['color'] = color\n self._dplot['hor']['dP']['color'] = color\n # self._dplot['3d']['dP']['color'] = color\n\n def _set_dmisc(self, color=None):\n self._set_color(color)\n\n ###########\n # strip dictionaries\n ###########\n\n def _strip_dgeom(self, lkeep=None):\n lkeep = self._get_keys_dgeom()\n utils.ToFuObject._strip_dict(self._dgeom, lkeep=lkeep)\n\n def _strip_dmat(self, lkeep=None):\n lkeep = self._get_keys_dmat()\n utils.ToFuObject._strip_dict(self._dmat, lkeep=lkeep)\n\n def _strip_dbragg(self, lkeep=None):\n lkeep = self._get_keys_dbragg()\n utils.ToFuObject._strip_dict(self._dbragg, lkeep=lkeep)\n\n def _strip_dmisc(self, lkeep=['color']):\n utils.ToFuObject._strip_dict(self._dmisc, lkeep=lkeep)\n\n ###########\n # rebuild dictionaries\n ###########\n\n def _rebuild_dgeom(self, lkeep=None):\n lkeep = self._get_keys_dgeom()\n reset = utils.ToFuObject._test_Rebuild(self._dgeom, lkeep=lkeep)\n if reset:\n utils.ToFuObject._check_Fields4Rebuild(self._dgeom,\n lkeep=lkeep, dname='dgeom')\n self._set_dgeom(dgeom=self._dgeom)\n\n def _rebuild_dmat(self, lkeep=None):\n lkeep = self._get_keys_dmat()\n reset = utils.ToFuObject._test_Rebuild(self._dmat, lkeep=lkeep)\n if reset:\n utils.ToFuObject._check_Fields4Rebuild(self._dmat,\n lkeep=lkeep, dname='dmat')\n self.set_dmat(self._dmat)\n\n def _rebuild_dbragg(self, lkeep=None):\n lkeep = self._get_keys_dbragg()\n reset = utils.ToFuObject._test_Rebuild(self._dbragg, lkeep=lkeep)\n if reset:\n utils.ToFuObject._check_Fields4Rebuild(self._dbragg,\n lkeep=lkeep, dname='dbragg')\n self.set_dbragg(self._dbragg)\n\n def _rebuild_dmisc(self, lkeep=['color']):\n reset = utils.ToFuObject._test_Rebuild(self._dmisc, lkeep=lkeep)\n if reset:\n utils.ToFuObject._check_Fields4Rebuild(self._dmisc,\n lkeep=lkeep, dname='dmisc')\n self._set_dmisc(color=self.dmisc['color'])\n\n ###########\n # _strip and get/from dict\n ###########\n\n @classmethod\n def _strip_init(cls):\n cls._dstrip['allowed'] = [0,1]\n nMax = max(cls._dstrip['allowed'])\n doc = \"\"\"\n 1: Remove nothing\"\"\"\n doc = utils.ToFuObjectBase.strip.__doc__.format(doc,nMax)\n if sys.version[0]=='2':\n cls.strip.__func__.__doc__ = doc\n else:\n cls.strip.__doc__ = doc\n\n def strip(self, strip=0):\n # super()\n super(CrystalBragg, self).strip(strip=strip)\n\n def _strip(self, strip=0):\n if strip==0:\n self._rebuild_dgeom()\n self._rebuild_dmat()\n self._rebuild_dbragg()\n self._rebuild_dmisc()\n else:\n self._strip_dgeom()\n self._strip_dmat()\n self._strip_dbragg()\n self._strip_dmisc()\n\n def _to_dict(self):\n dout = {'dgeom':{'dict':self._dgeom, 'lexcept':None},\n 'dmat':{'dict':self._dmat, 'lexcept':None},\n 'dbragg':{'dict':self._dbragg, 'lexcept':None},\n 'dmisc':{'dict':self._dmisc, 'lexcept':None},\n 'dplot':{'dict':self._dplot, 'lexcept':None}}\n return dout\n\n def _from_dict(self, fd):\n self._dgeom.update(**fd.get('dgeom', {}))\n self._dmat.update(**fd.get('dmat', {}))\n self._dbragg.update(**fd.get('dbragg', {}))\n self._dmisc.update(**fd.get('dmisc', {}))\n self._dplot.update(**fd.get('dplot', {}))\n\n # -----------\n # Properties\n # -----------\n\n @property\n def Type(self):\n \"\"\"Return the type of structure \"\"\"\n return self._Id.Type\n\n @property\n def dgeom(self):\n return self._dgeom\n\n @property\n def dmat(self):\n \"\"\"Return the polygon defining the structure cross-section\"\"\"\n return self._dmat\n\n @property\n def dbragg(self):\n \"\"\"Return the polygon defining the structure cross-section\"\"\"\n return self._dbragg\n\n @property\n def dmisc(self):\n return self._dmisc\n\n # @property\n # def nin(self):\n # return self._dgeom['nin']\n\n # @property\n # def nout(self):\n # return self._dgeom['nout']\n\n # @property\n # def e1(self):\n # return self._dgeom['e1']\n\n # @property\n # def e2(self):\n # return self._dgeom['e2']\n\n @property\n def summit(self):\n return self._dgeom['summit']\n\n @property\n def center(self):\n return self._dgeom['center']\n\n @property\n def ismobile(self):\n return self._dgeom['move'] not in [None, False]\n\n @property\n def rockingcurve(self):\n if self._dbragg.get('rockingcurve') is not None:\n if self._dbragg['rockingcurve'].get('type') is not None:\n return self._dbragg['rockingcurve']\n raise Exception(\"rockingcurve was not set!\")\n\n # --------------------------------------\n # methods for getting unit vectors basis\n # --------------------------------------\n\n def get_unit_vectors(self, use_non_parallelism=None):\n \"\"\" Return the unit vectors (direct orthonormal basis)\n\n Depending on:\n use_non_parallelism: True => return the geometrical basis\n use_non_parallelism: False => return the mesh basis\n\n \"\"\"\n if use_non_parallelism is None:\n use_non_parallelism = _USE_NON_PARALLELISM\n\n if use_non_parallelism is True:\n nout = self._dmat['nout']\n e1 = self._dmat['e1']\n e2 = self._dmat['e2']\n else:\n nout = self._dgeom['nout']\n e1 = self._dgeom['e1']\n e2 = self._dgeom['e2']\n return nout, e1, e2, use_non_parallelism\n\n # -----------------\n # methods for color\n # -----------------\n\n def set_color(self, col):\n self._set_color(col)\n\n def get_color(self):\n return self._dmisc['color']\n\n # -----------------\n # methods for printing\n # -----------------\n\n def get_summary(self, sep=' ', line='-', just='l',\n table_sep=None, verb=True, return_=False):\n \"\"\" Summary description of the object content \"\"\"\n\n # -----------------------\n # Build material\n col0 = [\n 'formula', 'symmetry', 'cut', 'density',\n 'd (A)',\n 'bragg({:9.6} A) (deg)'.format(self._dbragg['lambref']*1e10),\n 'Type', 'outline', 'surface (cm²)', 'rcurve', 'rocking curve',\n ]\n ar0 = [self._dmat['formula'], self._dmat['symmetry'],\n str(self._dmat['cut']), str(self._dmat['density']),\n '{0:5.3f}'.format(self._dmat['d']*1.e10),\n str(self._dbragg['braggref']*180./np.pi),\n self._dgeom['Type'], self._dgeom['Typeoutline'],\n '{0:5.1f}'.format(self._dgeom['surface']*1.e4),\n '{0:6.3f}'.format(self._dgeom['rcurve'])]\n try:\n ar0.append(self.rockingcurve['type'])\n except Exception as err:\n ar0.append('None')\n\n\n # -----------------------\n # Build geometry\n col1 = ['half-extent', 'summit', 'center', 'nout', 'e1',\n 'alpha', 'beta']\n ar1 = [\n str(np.round(self._dgeom['extenthalf'], decimals=3)),\n str(np.round(self._dgeom['summit'], decimals=2)),\n str(np.round(self._dgeom['center'], decimals=2)),\n str(np.round(self._dmat['nout'], decimals=3)),\n str(np.round(self._dmat['e1'], decimals=3)),\n str(np.round(self._dmat['alpha'], decimals=6)),\n str(np.round(self._dmat['beta'], decimals=6)),\n ]\n if self._dgeom.get('move') not in [None, False]:\n col1 += ['move', 'param']\n ar1 += [self._dgeom['move'],\n str(np.round(self._dgeom['move_param'], decimals=5))]\n\n if self._dmisc.get('color') is not None:\n col1.append('color')\n ar1.append(str(self._dmisc['color']))\n\n lcol = [col0, col1]\n lar = [ar0, ar1]\n return self._get_summary(lar, lcol,\n sep=sep, line=line, table_sep=table_sep,\n verb=verb, return_=return_)\n # -----------------\n # methods for moving\n # -----------------\n\n def _update_or_copy(self, dgeom, pinhole=None,\n return_copy=None,\n name=None, diag=None, shot=None):\n if return_copy is None:\n return_copy = _RETURN_COPY\n for kk, vv in self._dgeom.items():\n if kk not in dgeom.keys():\n dgeom[kk] = vv\n if return_copy is True:\n if name is None:\n name = self.Id.Name + 'copy'\n if diag is None:\n diag = self.Id.Diag\n if shot is None:\n diag = self.Id.shot\n return self.__class__(dgeom=dgeom,\n dbragg=self._dbragg,\n dmat=self._dmat,\n color=self._dmisc['color'],\n Exp=self.Id.Exp,\n Diag=diag,\n Name=name,\n shot=shot,\n SavePath=self.Id.SavePath)\n else:\n dgeom0 = self.dgeom\n try:\n self.set_dgeom(dgeom=dgeom)\n self._dmat = _check_optics._checkformat_dmat(\n dmat={\n k0: v0 for k0, v0 in self._dmat.items()\n if k0 not in ['nin', 'nout', 'e1', 'e2']\n },\n dgeom=self._dgeom,\n ddef=self._ddef['dmat'],\n valid_keys=self._get_keys_dmat()\n )\n except Exception as err:\n # Make sure instance does not move\n self.set_dgeom(dgeom=dgeom0)\n msg = (str(err)\n + \"\\nAn exception occured during updating\\n\"\n + \" => instance unmoved\")\n raise Exception(msg)\n\n def _rotate_or_translate(self, func, **kwdargs):\n pts = np.array([self._dgeom['summit'], self._dgeom['center']]).T\n if 'rotate' in func.__name__:\n vect = np.array([\n self._dgeom['nout'],\n self._dgeom['e1'],\n self._dgeom['e2']\n ]).T\n pts, vect = func(pts=pts, vect=vect, **kwdargs)\n return {'summit': pts[:, 0], 'center': pts[:, 1],\n 'nout': vect[:, 0], 'nin': -vect[:, 0],\n 'e1': vect[:, 1], 'e2': vect[:, 2]}\n else:\n pts = func(pts=pts, **kwdargs)\n return {'summit': pts[:, 0], 'center': pts[:, 1]}\n\n def translate_in_cross_section(self, distance=None, direction_rz=None,\n phi=None,\n return_copy=None,\n diag=None, name=None, shot=None):\n \"\"\" Translate the instance in the cross-section \"\"\"\n if phi is None:\n phi = np.arctan2(*self.summit[1::-1])\n msg = (\"Poloidal plane was not explicitely specified\\n\"\n + \" => phi set to self.summit's phi ({})\".format(phi))\n warnings.warn(msg)\n dgeom = self._rotate_or_translate(\n self._translate_pts_poloidal_plane,\n phi=phi, direction_rz=direction_rz, distance=distance)\n return self._update_or_copy(dgeom,\n return_copy=return_copy,\n diag=diag, name=name, shot=shot)\n\n def translate_3d(self, distance=None, direction=None,\n return_copy=None,\n diag=None, name=None, shot=None):\n \"\"\" Translate the instance in provided direction \"\"\"\n dgeom = self._rotate_or_translate(\n self._translate_pts_3d,\n direction=direction, distance=distance)\n return self._update_or_copy(dgeom,\n return_copy=return_copy,\n diag=diag, name=name, shot=shot)\n\n def rotate_in_cross_section(self, angle=None, axis_rz=None,\n phi=None,\n return_copy=None,\n diag=None, name=None, shot=None):\n \"\"\" Rotate the instance in the cross-section \"\"\"\n if phi is None:\n phi = np.arctan2(*self.summit[1::-1])\n msg = (\"Poloidal plane was not explicitely specified\\n\"\n + \" => phi set to self.summit's phi ({})\".format(phi))\n warnings.warn(msg)\n dgeom = self._rotate_or_translate(\n self._rotate_pts_vectors_in_poloidal_plane,\n axis_rz=axis_rz, angle=angle, phi=phi)\n return self._update_or_copy(dgeom,\n return_copy=return_copy,\n diag=diag, name=name, shot=shot)\n\n def rotate_around_torusaxis(self, angle=None,\n return_copy=None,\n diag=None, name=None, shot=None):\n \"\"\" Rotate the instance around the torus axis \"\"\"\n dgeom = self._rotate_or_translate(\n self._rotate_pts_vectors_around_torusaxis,\n angle=angle)\n return self._update_or_copy(dgeom,\n return_copy=return_copy,\n diag=diag, name=name, shot=shot)\n\n def rotate_around_3daxis(self, angle=None, axis=None,\n return_copy=None,\n diag=None, name=None, shot=None):\n \"\"\" Rotate the instance around the provided 3d axis \"\"\"\n dgeom = self._rotate_or_translate(\n self._rotate_pts_vectors_around_3daxis,\n axis=axis, angle=angle)\n return self._update_or_copy(dgeom,\n return_copy=return_copy,\n diag=diag, name=name, shot=shot)\n\n def set_move(self, move=None, param=None, **kwdargs):\n \"\"\" Set the default movement parameters\n\n A default movement can be set for the instance, it can be any of the\n pre-implemented movement (rotations or translations)\n This default movement is the one that will be called when using\n self.move()\n\n Specify the type of movement via the name of the method (passed as a\n str to move)\n\n Specify, for the geometry of the instance at the time of defining this\n default movement, the current value of the associated movement\n parameter (angle / distance). This is used to set an arbitrary\n difference for user who want to use absolute position values\n The desired incremental movement to be performed when calling self.move\n will be deduced by substracting the stored param value to the provided\n param value. Just set the current param value to 0 if you don't care\n about a custom absolute reference.\n\n kwdargs must be a parameters relevant to the chosen method (axis,\n direction...)\n\n e.g.:\n self.set_move(move='rotate_around_3daxis',\n param=0.,\n axis=([0.,0.,0.], [1.,0.,0.]))\n self.set_move(move='translate_3d',\n param=0.,\n direction=[0.,1.,0.])\n \"\"\"\n move, param, kwdargs = self._checkformat_set_move(move, param, kwdargs)\n self._dgeom['move'] = move\n self._dgeom['move_param'] = param\n if isinstance(kwdargs, dict) and len(kwdargs) == 0:\n kwdargs = None\n self._dgeom['move_kwdargs'] = kwdargs\n\n def move(self, param):\n \"\"\" Set new position to desired param according to default movement\n\n Can only be used if default movement was set before\n See self.set_move()\n \"\"\"\n param = self._move(param, dictname='_dgeom')\n self._dgeom['move_param'] = param\n\n # -----------------\n # methods for rocking curve\n # -----------------\n\n def get_rockingcurve_func(self, lamb=None, n=None):\n \"\"\" Return the rocking curve function\n\n Also return the wavelength (lamb) (in meters) for which it was computed\n and the associated reference bragg angle (in rad)\n\n \"\"\"\n drock = self.rockingcurve\n if drock['type'] == 'tabulated-1d':\n if lamb is not None and lamb != drock['lamb']:\n msg = (\"rocking curve was tabulated only for:\\n\"\n + \"\\tlamb = {} m\\n\".format(lamb)\n + \" => Please let lamb=None\")\n raise Exception(msg)\n lamb = drock['lamb']\n bragg = self._checkformat_bragglamb(lamb=lamb, n=n)\n func = scpinterp.interp1d(drock['dangle'] + bragg, drock['value'],\n kind='linear', bounds_error=False,\n fill_value=0, assume_sorted=True)\n\n elif drock['type'] == 'tabulated-2d':\n lmin, lmax = drock['lamb'].min(), drock['lamb'].max()\n if lamb is None:\n lamb = drock['lamb']\n if lamb < lmin or lamb > lmax:\n msg = (\"rocking curve was tabulated only in interval:\\n\"\n + \"\\tlamb in [{}; {}] m\\n\".format(lmin, lmax)\n + \" => Please set lamb accordingly\")\n raise Exception(msg)\n bragg = self._checkformat_bragglamb(lamb=lamb, n=n)\n\n def func(angle, lamb=lamb, bragg=bragg, drock=drock):\n return scpinterp.interp2d(drock['dangle']+bragg, drock['lamb'],\n drock['value'], kind='linear',\n bounds_error=False, fill_value=0,\n assume_sorted=True)(angle, lamb)\n\n else:\n # TBC\n raise NotImplementedError\n def func(angle, d=d, delta_bragg=delta_bragg,\n Rmax=drock['Rmax'], sigma=drock['sigma']):\n core = sigma**2/((angle - (bragg+delta_bragg))**2 + sigma**2)\n if Rmax is None:\n return core/(sigma*np.pi)\n else:\n return Rmax*core\n return func, lamb, bragg\n\n def plot_rockingcurve(self, lamb=None, n=None, sigma=None,\n npts=None, color=None, ang_units=None,\n dmargin=None, fs=None, ax=None, legend=None):\n drock = self.rockingcurve\n func, lamb, bragg = self.get_rockingcurve_func(lamb=lamb, n=n)\n axtit = 'Rocking curve for ' + self.Id.Name\n return _plot_optics.CrystalBragg_plot_rockingcurve(\n func=func, bragg=bragg, lamb=lamb,\n sigma=sigma, npts=npts,\n ang_units=ang_units, axtit=axtit, color=color,\n fs=fs, ax=ax, legend=legend)\n\n def compute_rockingcurve(\n self, ih=None, ik=None, il=None, lamb=None,\n use_non_parallelism=None, na=None,\n alpha_limits=None,\n therm_exp=None, plot_therm_exp=None,\n plot_asf=None, plot_power_ratio=None,\n plot_asymmetry=None, plot_cmaps=None,\n verb=None, returnas=None,\n ):\n return _rockingcurve.compute_rockingcurve(\n ih=ih, ik=ik, il=il, lamb=lamb,\n use_non_parallelism=use_non_parallelism, na=na,\n alpha_limits=alpha_limits,\n therm_exp=therm_exp, plot_therm_exp=plot_therm_exp,\n plot_asf=plot_asf, plot_power_ratio=plot_power_ratio,\n plot_asymmetry=plot_asymmetry, plot_cmaps=plot_cmaps,\n verb=None, returnas=None,\n )\n\n def plot_var_temp_changes_wavelengths(\n self, ih=None, ik=None, il=None, lambdas=None,\n use_non_parallelism=None, na=None,\n alpha_limits=None,\n therm_exp=None, plot_therm_exp=None,\n plot_asf=None, plot_power_ratio=None,\n plot_asymmetry=None, plot_cmaps=None,\n quantity=None,\n curv_radius=None, pixel_size=None,\n ):\n return _rockingcurve.plot_var_temp_changes_wavelengths(\n ih=ih, ik=ik, il=il, lambdas=lambdas,\n use_non_parallelism=use_non_parallelism, na=na,\n alpha_limits=alpha_limits,\n therm_exp=therm_exp, plot_therm_exp=plot_therm_exp,\n plot_asf=plot_asf, plot_power_ratio=plot_power_ratio,\n plot_asymmetry=plot_asymmetry, plot_cmaps=plot_cmaps,\n quantity=quantity,\n curv_radius=curv_radius, pixel_size=pixel_size,\n )\n\n # -----------------\n # methods for surface and contour sampling\n # -----------------\n\n def sample_outline_plot(self, use_non_parallelism=None, res=None):\n if self._dgeom['Type'] == 'sph':\n if self._dgeom['Typeoutline'] == 'rect':\n nout, e1, e2, use_non_parallelism = self.get_unit_vectors(\n use_non_parallelism=use_non_parallelism,\n )\n outline = _comp_optics.CrystBragg_sample_outline_plot_sphrect(\n self._dgeom['summit'] - nout*self._dgeom['rcurve'],\n nout,\n e1,\n e2,\n self._dgeom['rcurve'],\n self._dgeom['extenthalf'],\n res,\n )\n else:\n raise NotImplementedError\n else:\n raise NotImplementedError\n return outline\n\n # -----------------\n # methods for surface and contour sampling\n # -----------------\n\n def _checkformat_bragglamb(self, bragg=None, lamb=None, n=None):\n lc = [lamb is not None, bragg is not None]\n if not any(lc):\n lamb = self._dbragg['lambref']\n lc[0] = True\n assert np.sum(lc) == 1, \"Provide lamb xor bragg!\"\n if lc[0]:\n bragg = self.get_bragg_from_lamb(\n np.atleast_1d(lamb), n=n,\n )\n else:\n bragg = np.atleast_1d(bragg)\n return bragg\n\n def _checkformat_get_Rays_from(self, phi=None, bragg=None):\n assert phi is not None\n assert bragg is not None\n bragg = np.atleast_1d(bragg)\n phi = np.atleast_1d(phi)\n nrays = max(phi.size, bragg.size)\n if not phi.shape == bragg.shape:\n if phi.size == 1:\n phi = np.full(bragg.shape, phi[0])\n elif bragg.size == 1:\n bragg = np.full(phi.shape, bragg[0])\n else:\n msg = \"phi and bragg/lamb must have the same shape!\\n\"\n msg += \" phi.shape: %s\\n\"%str(phi.shape)\n msg += \" bragg/lamb.shape: %s\\n\"%str(bragg.shape)\n raise Exception(msg)\n return phi, bragg\n\n def _get_rays_from_cryst(\n self,\n phi=None, bragg=None,\n lamb=None, n=None,\n dtheta=None, psi=None,\n ntheta=None, npsi=None,\n use_non_parallelism=None,\n include_summit=None,\n grid=None,\n ):\n\n # Get phi, bragg\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb)\n phi, bragg = self._checkformat_get_Rays_from(phi=phi, bragg=bragg)\n # assert phi.ndim == 1\n\n # Get local summits, nout, e1, e2\n pts_start, nout, e1, e2 = self.get_local_noute1e2(\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n )\n nin = -nout\n # reshape for broadcast\n if grid is True:\n nin = nin[..., None]\n e1 = e1[..., None]\n e2 = e2[..., None]\n else:\n assert bragg.shape == nin.shape[1:]\n\n # Compute start point (D) and unit vectors (us)\n vect = (\n np.sin(bragg)*nin\n + np.cos(bragg)*(np.cos(phi)*e1 + np.sin(phi)*e2)\n )\n return pts_start, vect\n\n def get_rays_from_cryst(\n self,\n phi=None, bragg=None,\n lamb=None, n=None,\n dtheta=None, psi=None,\n use_non_parallelism=None,\n ntheta=None, npsi=None,\n include_summit=None,\n det=None, config=None, length=None,\n returnas=None,\n return_xixj=None,\n grid=None,\n ):\n \"\"\" Return rays stemming from the crystal\n\n The rays are defined by a start point (on the crystal surface) and\n either an end point or a unit vector\n\n Start points\n ------------\n The start point is the crystal summit by default\n But that can be changed using:\n - ('dtheta', 'psi'): can be arbitrary but with same shape\n up to 4 dimensions\n - ('ntheta', 'npsi', 'include_summit'): will be used to\n compute the envelop (contour) of the crystal, as 2 1d arrays\n\n These arguments are fed to self.get_local_noute1e2() which will compute\n the start points and return them as shape (3, psi.shape)\n\n End point or unit vector\n ------------------------\n End point are computed automatically if:\n - 'config' is provided: ray-tracing is done like for any camera\n - 'det' is provided: xi and xj can be computed\n\n Returning format\n ----------------\n\n The rays can be returned as:\n - '(pts, vect, length)': a tuple of:\n - pts: array of start points on the crystal\n (only the summit by default)\n - vect: array\n - length:\n - '(pts, vect)': a tuple with only pts and vect\n - 'pts': a tuple, where both start and end points are returned\n All arrays represent (X, Y, Z) cartesian coordinates in the tokamak's\n frame\n\n Optionally, can return the (xi, xj) coordinates of points if a detector\n (det) is provided.\n\n \"\"\"\n\n # -----------\n # Check input\n if returnas is None:\n returnas = 'pts'\n if return_xixj is None:\n return_xixj = False\n\n lret = ['(pts, vect, length)', '(pts, vect)', 'pts'] # , object]\n if returnas not in lret:\n msg = (\n \"Arg returnas must be in:\\n\"\n + \"\\t- '(pts, vect, length)': starting points, unit vector,\"\n + \" length\\n\"\n + \"\\t- 'pts': starting and ending points\\n\"\n # + \"\\t- object: CamLOS1D instance\\n\"\n )\n raise Exception(msg)\n\n det = self._checkformat_det(det)\n if length is None:\n length = 10.\n\n if grid is None:\n try:\n grid = bragg.shape != dtheta.shape\n except Exception as err:\n grid = True\n\n # -----------\n # Get starting point and vectors\n pts_start, vect = self._get_rays_from_cryst(\n phi=phi, bragg=bragg,\n lamb=lamb, n=n,\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n grid=grid,\n )\n\n if returnas == '(pts, vect)':\n return pts_start, vect\n\n # -----------\n # Get length (minimum between conf, det, length)\n vshape = vect.shape\n dk = {\n k0: np.full(vshape[1:], np.nan)\n for k0 in ['config', 'det', 'length']\n }\n xi, xj = None, None\n if config is not None:\n # Here insert ray-tracing from config!\n if vshape != pts_start.shape:\n if len(vshape) == 3 and len(pts_start.shape) == 2:\n D = np.reshape(\n np.repeat(pts_start[..., None], vshape[-1], axis=-1),\n (3, -1),\n )\n u = vect.reshape((3, -1))\n else:\n msg = (\n \"Not treated case!\\n\"\n f\"\\t- pts_start.shape: {pts_start.shape}\\n\"\n f\"\\t- vect.shape: {vshape}\\n\"\n )\n raise Exception(msg)\n else:\n if len(vshape) > 2:\n D = pts_start.reshape((3, -1))\n u = vect.reshape((3, -1))\n else:\n D = pts_start\n u = vect\n\n rays = _core.Rays(\n dgeom=(D, u),\n config=config,\n strict=False,\n Name='dummy',\n Diag='dummy',\n Exp='dummy',\n )\n if u.shape != vshape:\n kout = rays.dgeom['kOut'].reshape(vshape[1:])\n else:\n kout = rays.dgeom['kOut']\n dk['config'] = kout\n\n if det is not None and det is not False:\n shape = tuple([3] + [1 for ii in range(vect.ndim-1)])\n cent = det['cent'].reshape(shape)\n nout = det['nout'].reshape(shape)\n if grid is True:\n k = (\n np.sum((cent-pts_start[..., None])*nout, axis=0)\n / np.sum(vect*nout, axis=0)\n )\n else:\n k = (\n np.sum((cent-pts_start)*nout, axis=0)\n / np.sum(vect*nout, axis=0)\n )\n dk['det'][k >= 0.] = k[k >= 0.]\n if return_xixj is True:\n if grid:\n pts_end = pts_start[..., None] + dk['det'][None, ...]*vect\n else:\n pts_end = pts_start + dk['det'][None, ...]*vect\n ei = det['ei'].reshape(shape)\n ej = det['ej'].reshape(shape)\n xi = np.sum((pts_end - cent)*ei, axis=0)\n xj = np.sum((pts_end - cent)*ej, axis=0)\n\n if length is not None:\n dk['length'][:] = length\n\n k = np.nanmin([vv for vv in dk.values() if vv is not None], axis=0)\n\n # -----------\n # return\n if returnas == 'pts':\n if grid:\n pts_end = pts_start[..., None] + k[None, ...]*vect\n if return_xixj:\n return pts_start, pts_end, xi, xj\n else:\n return pts_start, pts_end\n else:\n pts_end = pts_start + k[None, ...]*vect\n if return_xixj:\n return pts_start, pts_end, xi, xj\n else:\n return pts_start, pts_end\n elif returnas == '(pts, vect, length)':\n if return_xixj:\n return pts_start, vect, k, xi, xj\n else:\n return pts_start, vect, k\n\n # -----------------\n # methods for crystal splitting\n # -----------------\n\n def split(self, direction=None, nb=None):\n\n # ------------\n # check inputs\n if direction is None:\n direction = 'e1'\n if direction not in ['e1', 'e2']:\n msg = (\n \"Arg direction must be either:\\n\"\n \"\\t- 'e1': split along vector 'e1' (~horizontally)\\n\"\n \"\\t- 'e2': split along vector 'e2' (~vertically)\\n\"\n f\"You provided: {direction}\"\n )\n raise Exception(msg)\n\n if nb is None:\n nb = 2\n if not (isinstance(nb, int) and nb > 1):\n msg = (\n \"Arg nb must be a int > 1 !\\n\"\n \"It specifies the number of equal parts desired\\n\"\n f\"You provided: {nb}\"\n )\n raise Exception(msg)\n\n # ---------------\n # split\n\n edges = np.linspace(-1, 1, nb+1)\n mid = 0.5*(edges[1:] + edges[:-1])[None, :]\n if direction == 'e2':\n dtheta = mid*self._dgeom['extenthalf'][1]\n psi = np.zeros((1, nb), dtype=float)\n extenthalf = [\n self._dgeom['extenthalf'][0],\n self._dgeom['extenthalf'][1]/nb,\n ]\n else:\n dtheta = np.zeros((1, nb), dtype=float)\n psi = mid*self._dgeom['extenthalf'][0]\n extenthalf = [\n self._dgeom['extenthalf'][0]/nb,\n self._dgeom['extenthalf'][1],\n ]\n\n nouts = (\n np.cos(dtheta)*(\n self._dgeom['nout'][:, None]*np.cos(psi)\n + self._dgeom['e1'][:, None]*np.sin(psi)\n )\n + np.sin(dtheta)*self._dgeom['e2'][:, None]\n )\n e1s = (\n -self._dgeom['nout'][:, None]*np.sin(psi)\n + self._dgeom['e1'][:, None]*np.cos(psi)\n )\n e2s = np.array([\n nouts[1, :]*e1s[2, :] - nouts[2, :]*e1s[1, :],\n nouts[2, :]*e1s[0, :] - nouts[0, :]*e1s[2, :],\n nouts[0, :]*e1s[1, :] - nouts[1, :]*e1s[0, :],\n\n ])\n\n # -----------\n # Construct list of instances\n\n lobj = [\n self.__class__(\n dgeom={\n 'rcurve': self._dgeom['rcurve'],\n 'center': self._dgeom['center'],\n 'nout': nouts[:, ii],\n 'e1': e1s[:, ii],\n 'e2': e2s[:, ii],\n 'extenthalf': extenthalf,\n },\n dmat={\n k0: v0 for k0, v0 in self._dmat.items()\n if k0 not in ['nin', 'nout', 'e1', 'e2']\n },\n dbragg=dict(self._dbragg),\n Name=f\"{self.Id.Name}{ii}\",\n Exp=self.Id.Exp,\n )\n for ii in range(nb)\n ]\n\n return lobj\n\n\n\n # -----------------\n # methods for general plotting\n # -----------------\n\n def plot(\n self, dcryst=None,\n phi=None, bragg=None, lamb=None, pts=None,\n n=None, config=None, det=None, length=None,\n dtheta=None, psi=None,\n ntheta=None, npsi=None,\n include_summit=None,\n dax=None, proj=None, res=None, element=None,\n color=None, ddet=None,\n dleg=None, draw=True, dmargin=None,\n use_non_parallelism=None, grid=None,\n rays_npts=None, rays_color=None,\n fs=None, wintit=None, tit=None,\n ):\n \"\"\" Plot the crystal in desired projeection\n\n The projection is 3d, cross-section or horizontal\n Optionaly add rays reflected on cryst at:\n - lamb / phi: desired wavelength and incidence angle\n and either:\n - psi, dtheta : desired pts on the crystal surface\n - pts: emitted from desired pts (e.g.: in the plasma)\n (need to be refresh with get_rays_from_cryst method\n if new pts are wanted)\n\n Parameters\n ----------\n dax: None / dict\n dict of axes to be used, with keys:\n - 'cross': axe where to plot cross-section view\n - 'hor': axe where to plot horizontal (from top) view\n - '3d': axe where to plot 3d view\n if None, a new figure and axes are created\n proj: None / str\n key indicating which plot to make:\n - 'cross': cross-section projection\n - 'hor': horizontal projection\n - 'all': cross-section + horizontal view\n - '3d': 3d view\n element: None / str\n char string where each letter indicates an element to plot\n - 'o': outline (edges of crystal)\n - 's': summit (geometrical center of the crystal)\n - 'c': center (of the sphere of curvature)\n - 'r': rowland circle (plotted in e1 direction)\n - 'v': local unit vectors e1, e2, nout\n If None, default to 'oscvr'\n res: None / float\n Resolution for the discretization of the outline\n dcryst: None / dict\n dict of dict for plotting the various elements of the crystal:\n - 'outline': dict of properties fed to plot()\n - 'cent': dict of properties fed to plot()\n - 'summit': dict of properties fed to plot()\n - 'rowland': dict of properties fed to plot()\n - 'vectors': dict of properties fed to quiver()\n ddet: None / dict\n dict of dict for plotting the various elements of the det:\n - 'outline': dict of properties fed to plot()\n - 'cent': dict of properties fed to plot()\n - 'vectors': dict of properties fed to quiver()\n color: None / str / tuple\n color to be used for plotting\n Overwrites all colors in dcryst and ddet\n det: None / dict\n Optionnal associated detector to be plotted, as a dict with keys:\n - 'cent': 1d array of cartesian coordinates of the center\n - 'nout': 1d array of cartesian coordinates of unit vector\n oriented towards the crystal\n - 'ei': 1d array of cartesian coordinates of unit vector\n - 'ej': 1d array of cartesian coordinates of unit vector\n - 'outline': 2d array of outline coordinates in (ei, ej)\n dleg: None / dict\n dict of properties to be passed to plt.legend()\n if False legend is not plotted\n use_non_parallelism: None / str\n Return the unit vectors (direct orthonormal basis)\n Depending on:\n - use_non_parallelism: True => return the geometrical basis\n - use_non_parallelism: False => return the mesh basis\n \"\"\"\n if det is None:\n det = False\n det = self._checkformat_det(det)\n\n lc = [\n dtheta is not None or psi is not None or phi is not None,\n pts is not None\n ]\n if np.sum(lc) == 2:\n msg = (\n \"For ray tracing, please provide either:\\n\"\n + \"\\t- dtheta, psi, phi, lamb/bragg\\n\"\n + \"\\t- pts, lamb/bragg\\n\"\n )\n raise Exception(msg)\n\n # Add rays?\n if lc[0]:\n # Get one way\n # pts.shape = (3, nlamb, npts, ndtheta)\n pts_summit, pts1 = self.get_rays_from_cryst(\n phi=phi, lamb=lamb, bragg=bragg,\n n=n, use_non_parallelism=use_non_parallelism,\n dtheta=dtheta, psi=psi,\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n config=config, det=det,\n returnas='pts', return_xixj=False,\n grid=grid,\n )\n # Get the other way\n pts2, xi, xj = self.get_rays_from_cryst(\n phi=phi+np.pi, lamb=lamb, bragg=bragg,\n n=n, use_non_parallelism=use_non_parallelism,\n dtheta=dtheta, psi=psi,\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n config=config, det=det,\n returnas='pts', return_xixj=True,\n grid=grid,\n )[1:]\n elif lc[1]:\n c0 = (\n isinstance(pts, np.ndarray)\n and pts.ndim == 2\n and pts.shape[0] == 3\n )\n if not c0:\n msg = (\"Arg pts must be a (3, npts) np.array!\")\n raise Exception(msg)\n\n # pts.shape = (nlamb, npts, ndtheta)\n dtheta, psi, phi, bragg, _, _ = self.calc_raytracing_from_lambpts(\n pts=pts,\n lamb=lamb,\n ndtheta=ntheta,\n )\n pts_summit, pts2, xi, xj = self.get_rays_from_cryst(\n phi=phi+np.pi, lamb=None, bragg=bragg,\n n=n, use_non_parallelism=use_non_parallelism,\n dtheta=dtheta, psi=psi,\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n config=config, det=det,\n returnas='pts', return_xixj=True,\n grid=grid,\n )\n pts1 = np.repeat(\n np.repeat(\n np.repeat(\n pts[:, None, :], dtheta.shape[0], axis=1,\n )[..., None],\n dtheta.shape[2],\n axis=-1,\n )[..., None],\n 2,\n axis=-1,\n )\n else:\n pts_summit, pts1, pts2, xi, xj = None, None, None, None, None\n return _plot_optics.CrystalBragg_plot(\n cryst=self, dcryst=dcryst,\n det=det, ddet=ddet,\n dax=dax, proj=proj, res=res, element=element,\n color=color,\n pts_summit=pts_summit, pts1=pts1, pts2=pts2,\n xi=xi, xj=xj,\n rays_color=rays_color, rays_npts=rays_npts,\n dleg=dleg, draw=draw, fs=fs, dmargin=dmargin,\n use_non_parallelism=use_non_parallelism,\n wintit=wintit, tit=tit,\n )\n\n # -----------------\n # methods for generic first-approx\n # -----------------\n\n def get_phi_from_magaxis_summit(\n self,\n axis_r,\n axis_z,\n axis_npts=None,\n lamb=None,\n lamb_tol=None,\n bragg=None,\n n=None,\n use_non_parallelism=None,\n ):\n \"\"\" Return phi of a magnteic axis (at lamb with tolerance)\n\n axis_r and axis_z must be np.ndarrays of the same shape\n The magnetic axis is discretized toroidally in axis_npts (def: 1000)\n\n The pts closest to the chosen lamb are picked\n If no pts is found within tolerance, an error is raised\n\n \"\"\"\n\n # --------------------\n # Check / format input\n\n if axis_npts is None:\n axis_npts = 1000\n\n axis_r = np.atleast_1d(axis_r)\n axis_z = np.atleast_1d(axis_z)\n assert axis_r.shape == axis_z.shape\n\n if lamb_tol is None:\n lamb_tol = 0.01e-10\n\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)\n\n # --------------\n # Disretize axis\n\n shaperz = axis_r.shape\n phi_ax = np.full(shaperz, np.nan)\n\n # Compute phi\n theta_cryst = np.arctan2(\n self._dgeom['summit'][1],\n self._dgeom['summit'][0],\n )\n\n theta_ax = theta_cryst + np.pi/2*np.linspace(-1, 1, axis_npts)\n shapetheta = np.r_[[1 for ii in shaperz], axis_npts]\n theta_ax = theta_ax.reshape(shapetheta)\n\n axis_x = (axis_r[..., None] * np.cos(theta_ax)).ravel()\n axis_y = (axis_r[..., None] * np.sin(theta_ax)).ravel()\n axis_z = (np.repeat(axis_z[..., None], axis_npts, axis=-1)).ravel()\n\n # ----------------------------------------------\n # Compute bragg, phi, lamb of each point on axis\n\n (\n bragg_ax_full, phi_ax_full, lamb_ax_full,\n ) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n pts=np.array([axis_x, axis_y, axis_z]),\n dtheta=None, psi=None,\n ntheta=None, npsi=None,\n n=None,\n use_non_parallelism=use_non_parallelism,\n grid=None,\n return_lamb=True,\n )\n\n # -------------------------------------\n # Select points on axis closest to lamb\n\n # lamb_ax_full = self.get_lamb_from_bragg(bragg_ax_full)\n shape_full = tuple(np.r_[shaperz, axis_npts])\n lamb_ax_full = lamb_ax_full.reshape(shape_full)\n phi_ax_full = phi_ax_full.reshape(shape_full)\n dlamb = np.abs(lamb_ax_full - lamb)\n\n indok = np.any(dlamb <= lamb_tol, axis=-1)\n indmin = np.nanargmin(dlamb[indok, :], axis=-1)\n indtup = tuple([iii for iii in indok.nonzero()] + [indmin])\n phi_ax[indok] = phi_ax_full[indtup]\n\n return phi_ax\n\n def get_bragg_from_lamb(self, lamb=None, n=None):\n \"\"\" Braggs' law: n*lamb = 2dsin(bragg) \"\"\"\n if self._dmat['d'] is None:\n msg = \"Interplane distance d no set !\\n\"\n msg += \" => self.set_dmat({'d':...})\"\n raise Exception(msg)\n if lamb is None:\n lamb = self._dbragg['lambref']\n return _comp_optics.get_bragg_from_lamb(\n np.atleast_1d(lamb), self._dmat['d'], n=n,\n )\n\n def get_lamb_from_bragg(self, bragg=None, n=None):\n \"\"\" Braggs' law: n*lamb = 2dsin(bragg) \"\"\"\n if self._dmat['d'] is None:\n msg = \"Interplane distance d no set !\\n\"\n msg += \" => self.set_dmat({'d':...})\"\n raise Exception(msg)\n if bragg is None:\n bragg = self._dbragg['braggref']\n return _comp_optics.get_lamb_from_bragg(np.atleast_1d(bragg),\n self._dmat['d'], n=n)\n\n def update_non_parallelism(self, alpha=None, beta=None):\n \"\"\" Compute new values of unit vectors nout, e1 and e2 into\n dmat basis, due to non parallelism\n\n Update new values into dmat dict\n \"\"\"\n if alpha is None:\n alpha = 0\n if beta is None:\n beta = 0\n\n (self._dmat['nin'], self._dmat['nout'], self._dmat['e1'],\n self._dmat['e2']) = _comp_optics.get_vectors_from_angles(\n alpha, beta,\n self._dgeom['nout'], self._dgeom['e1'],\n self._dgeom['e2'],\n )\n self._dmat['alpha'], self._dmat['beta'] = alpha, beta\n\n def calc_meridional_sagital_focus(\n self,\n rcurve=None,\n bragg=None,\n alpha=None,\n use_non_parallelism=None,\n verb=None,\n ):\n \"\"\" Compute sagittal and meridional focuses distances.\n Optionnal result according to non-parallelism, using first the\n update_non_parallelism method.\n\n parameters\n ----------\n rcurve: float\n in dgeom dict., curvature radius of the crystal.\n bragg: float\n in dbragg dict., reference bragg angle of the crystal.\n alpha: float\n in dmat dict., amplitude of the non-parallelism\n as an a angle defined by user, in radian.\n use_non_parallelism: str\n Need to be True to use new alpha angle\n\n Return\n ------\n merid_ref: float\n Distance crystal-meridional focus (m), for a perfect crystal\n sagit_ref: float\n Distance crystal-sagital focus (m), for a perfect crystal\n merid_unp: float\n Distance crystal-meridional focus (m), using non_parallelism\n sagit_unp: float\n Distance crystal-sagital focus (m), using non_parallelism\n\n \"\"\"\n # Check inputs\n if rcurve is None:\n rcurve = self._dgeom['rcurve']\n if bragg is None:\n bragg = self._dbragg['braggref']\n if use_non_parallelism is True:\n alpha = self._dmat['alpha']\n if use_non_parallelism is False:\n alpha = 0.0\n\n # Compute\n return _comp_optics.calc_meridional_sagital_focus(\n rcurve=rcurve,\n bragg=bragg,\n alpha=alpha,\n use_non_parallelism=use_non_parallelism,\n verb=verb,\n )\n\n def get_rowland_dist_from_lambbragg(self, bragg=None, lamb=None, n=None):\n \"\"\" Return the array of dist from cryst summit to pts on rowland \"\"\"\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n if np.all(np.isnan(bragg)):\n msg = (\"There is no available bragg angle!\\n\"\n + \" => Check the vlue of self.dmat['d'] vs lamb\")\n raise Exception(msg)\n return _comp_optics.get_rowland_dist_from_bragg(\n bragg=bragg, rcurve=self._dgeom['rcurve'],\n )\n\n def get_detector_ideal(\n self,\n bragg=None, lamb=None,\n rcurve=None, n=None,\n ddist=None, di=None, dj=None,\n dtheta=None, dpsi=None, tilt=None,\n lamb0=None, lamb1=None, dist01=None,\n use_non_parallelism=None,\n tangent_to_rowland=None, plot=False,\n ):\n \"\"\" Return approximate ideal detector geometry\n\n Assumes infinitesimal and ideal crystal\n Returns a dict containing the position and orientation of a detector if\n it was placed ideally on the rowland circle, centered on the\n desired bragg angle (in rad) or wavelength (in m)\n The detector can be tangential to the Rowland circle or perpendicular\n to the line between the crystal and the detector\n Assumes detector center matching lamb (m) / bragg (rad)\n\n The detector can be translated towards / away from the crystal\n to make sure the distance between 2 spectral lines\n\n (lamb0 and lamb1) on the detector's plane matches\n a desired distance (dist01, in m)\n\n Finally, a desired offset (translation) can be added\n via (ddist, di, dj), in m\n Similarly, an extra rotation can be added via (dtheta, dpsi, tilt)\n\n Detector is described by center position\n and (nout, ei, ej) unit vectors\n By convention, nout = np.cross(ei, ej)\n Vectors (ei, ej) define an orthogonal frame in the detector's plane\n All coordinates are 3d (X, Y, Z in the tokamak's frame)\n\n Return:\n -------\n det: dict\n dict of detector geometrical characteristics:\n 'cent': np.ndarray\n (3,) array of (x, y, z) coordinates of detector center\n 'nout': np.ndarray\n (3,) array of (x, y, z) coordinates of unit vector\n perpendicular to detector' surface\n oriented towards crystal\n 'ei': np.ndarray\n (3,) array of (x, y, z) coordinates of unit vector\n defining first coordinate in detector's plane\n 'ej': np.ndarray\n (3,) array of (x, y, z) coordinates of unit vector\n defining second coordinate in detector's plane\n 'outline': np.darray\n (2, N) array to build detector's contour\n where the last point is identical to the first.\n (for example for WEST X2D spectrometer:\n x*np.r_[-1,-1,1,1,-1], y*np.r_[-1,1,1,-1,-1])\n \"\"\"\n\n # ---------------------\n # Check / format inputs\n\n if rcurve is None:\n rcurve = self._dgeom['rcurve']\n\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n if np.all(np.isnan(bragg)):\n msg = (\"There is no available bragg angle!\\n\"\n + \" => Check the vlue of self.dmat['d'] vs lamb\")\n raise Exception(msg)\n\n lc = [lamb0 is not None, lamb1 is not None, dist01 is not None]\n if any(lc) and not all(lc):\n msg = (\n \"Arg lamb0, lamb1 and dist01 must be provided together:\\n\"\n + \"\\t- lamb0: line0 wavelength ({})\\n\".format(lamb0)\n + \"\\t- lamb1: line1 wavelength ({})\\n\".format(lamb1)\n + \"\\t- dist01: distance (m) on detector between lines \"\n + \"({})\".format(dist01)\n )\n raise Exception(msg)\n bragg01 = None\n if all(lc):\n bragg01 = self._checkformat_bragglamb(\n lamb=np.r_[lamb0, lamb1], n=n,\n )\n\n # split into 2 different condition because of dmat\n lc = [rcurve is None, self._dgeom['summit'] is None]\n if any(lc):\n msg = (\n \"Some missing fields in dgeom for computation:\"\n + \"\\n\\t-\" + \"\\n\\t-\".join(['rcurve'] + 'summit')\n )\n raise Exception(msg)\n\n nout, e1, e2, use_non_parallelism = self.get_unit_vectors(\n use_non_parallelism=use_non_parallelism,\n )\n\n lc = [cc is None for cc in [nout, e1, e2]]\n if any(lc):\n msg = (\n \"\"\"\n Field 'nout', 'e1', 'e2' missing!\n \"\"\"\n )\n raise Exception(msg)\n\n # Compute crystal-centered parameters in (nout, e1, e2)\n (det_dist, n_crystdet_rel,\n det_nout_rel, det_ei_rel) = _comp_optics.get_approx_detector_rel(\n rcurve, bragg,\n bragg01=bragg01, dist01=dist01,\n tangent_to_rowland=tangent_to_rowland)\n\n # Deduce absolute position in (x, y, z)\n det_cent, det_nout, det_ei, det_ej = _comp_optics.get_det_abs_from_rel(\n det_dist, n_crystdet_rel, det_nout_rel, det_ei_rel,\n self._dgeom['summit'], nout, e1, e2,\n ddist=ddist, di=di, dj=dj,\n dtheta=dtheta, dpsi=dpsi, tilt=tilt)\n\n if plot:\n dax = self.plot()\n p0 = np.repeat(det_cent[:,None], 3, axis=1)\n vv = np.vstack((det_nout, det_ei, det_ej)).T\n dax['cross'].plot(np.hypot(det_cent[0], det_cent[1]),\n det_cent[2], 'xb')\n dax['hor'].plot(det_cent[0], det_cent[1], 'xb')\n dax['cross'].quiver(np.hypot(p0[0, :], p0[1, :]), p0[2, :],\n np.hypot(vv[0, :], vv[1, :]), vv[2, :],\n units='xy', color='b')\n dax['hor'].quiver(p0[0, :], p0[1, :], vv[0, :], vv[1, :],\n units='xy', color='b')\n return {'cent': det_cent, 'nout': det_nout,\n 'ei': det_ei, 'ej': det_ej}\n\n def _checkformat_det(self, det=None):\n lc = [det is None, det is False, isinstance(det, dict)]\n msg = (\"det must be:\\n\"\n + \"\\t- False: not det provided\\n\"\n + \"\\t- None: use default approx det from:\\n\"\n + \"\\t self.get_detector_ideal()\\n\"\n + \"\\t- dict: a dictionary of 3d (x,y,z) coordinates of a point\"\n + \" (local frame center) and 3 unit vectors forming a direct \"\n + \"orthonormal basis attached to the detector's frame\\n\"\n + \"\\t\\t\\t\\t- 'cent': detector center\\n\"\n + \"\\t\\t\\t\\t- 'nout': unit vector perpendicular to surface, \"\n + \"in direction of the crystal\\n\"\n + \"\\t\\t\\t\\t- 'ei': unit vector, first coordinate on surface\\n\"\n + \"\\t\\t\\t\\t- 'ej': unit vector, second coordinate on surfacei\\n\"\n + \" You provided: {}\".format(det))\n if not any(lc):\n raise Exception(msg)\n if lc[0]:\n det = self.get_detector_ideal(lamb=self._dbragg['lambref'])\n elif lc[2]:\n lk = ['cent', 'nout', 'ei', 'ej']\n c0 = (isinstance(det, dict)\n and all([(kk in det.keys()\n and hasattr(det[kk], '__iter__')\n and np.atleast_1d(det[kk]).size == 3\n and not np.any(np.isnan(det[kk])))\n for kk in lk]))\n if not c0:\n raise Exception(msg)\n for k0 in lk:\n det[k0] = np.atleast_1d(det[k0]).ravel()\n return det\n\n def get_local_noute1e2(\n self,\n dtheta=None, psi=None,\n ntheta=None, npsi=None,\n use_non_parallelism=None,\n include_summit=None,\n ):\n \"\"\" Return (vout, ve1, ve2) associated to pts on the crystal's surface\n\n All points on the spherical crystal's surface are identified\n by (dtheta, psi) coordinates, where:\n - theta = np.pi/2 + dtheta (dtheta=0 default) for the center\n (for the diffracted beam), from frame's basis vector ez\n - psi = 0 for the center, positive in direction of e1\n They are the spherical coordinates from a sphere centered on the\n crystal's center of curvature.\n\n Args (dtheta, psi) can be:\n - arbitrary: same shape and dimension up to 4\n - 'envelop': will be computed to represent the crystal contour\n will be returned as 2 1d arrays\n\n Return the pts themselves and the 3 perpendicular local unit vectors\n (nout, e1, e2), where nout is towards the outside of the sphere and\n nout = np.cross(e1, e2)\n\n In all cases, the output have shape (3, psi.shape)\n\n Return:\n -------\n summ: np.ndarray\n coordinates of the points on the surface\n vout: np.ndarray\n coordinates of outward unit vector\n ve1: np.ndarray\n coordinates of first tangential unit vector\n ve2: np.ndarray\n coordinates of second tangential unit vector\n\n All are cartesian (X, Y, Z) coordinates in the tokamak's frame\n\n \"\"\"\n # Get local basis at crystal summit\n nout, e1, e2, use_non_parallelism = self.get_unit_vectors(\n use_non_parallelism=use_non_parallelism,\n )\n nin = -nout\n\n # Get vectors at any points from psi & dtheta\n vout, ve1, ve2 = _comp_optics.CrystBragg_get_noute1e2_from_psitheta(\n nout, e1, e2,\n psi=psi, dtheta=dtheta,\n e1e2=True, sameshape=False,\n extenthalf_psi=self._dgeom['extenthalf'][0],\n extenthalf_dtheta=self._dgeom['extenthalf'][1],\n ntheta=ntheta, npsi=npsi,\n include_summit=include_summit,\n )\n vin = -vout\n # cent no longer dgeom['center'] because no longer a fixed point\n cent = self._dgeom['summit'] + self._dgeom['rcurve']*nin\n reshape = np.r_[3, [1 for ii in range(vout.ndim - 1)]]\n cent = cent.reshape(reshape)\n\n # Redefining summit according to nout at each point at crystal\n summ = cent + self._dgeom['rcurve']*vout\n return summ, vout, ve1, ve2\n\n def calc_xixj_from_braggphi(\n self,\n phi=None,\n bragg=None,\n lamb=None,\n n=None,\n dtheta=None,\n psi=None,\n det=None,\n use_non_parallelism=None,\n strict=None,\n return_strict=None,\n data=None,\n plot=True,\n dax=None,\n ):\n \"\"\" Assuming crystal's summit as frame origin\n\n According to [1], this assumes a local frame centered on the crystal\n\n These calculations are independent from the tokamak's frame:\n The origin of the local frame is the crystal's summit\n The (O, ez) axis is the crystal's normal\n The crystal is tangent to (O, ex, ey)\n\n [1] tofu/Notes_Upgrades/SpectroX2D/SpectroX2D_EllipsesOnPlane.pdf\n\n Parameters:\n -----------\n Z: float\n Detector's plane intersection with (O, ez) axis\n n: np.ndarray\n (3,) array containing local (x,y,z) coordinates of the plane's\n normal vector\n \"\"\"\n if return_strict is None:\n return_strict = False\n\n # Check / format inputs\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n phi = np.atleast_1d(phi)\n\n # Check / get det\n det = self._checkformat_det(det)\n\n # Get local summit nout, e1, e2 if non-centered\n if dtheta is None:\n dtheta = 0.\n if psi is None:\n psi = 0.\n\n # Probably to update with use_non_parallelism?\n # Get back summit & vectors at any point at the crystal surface,\n # according to parallelism properties\n summit, nout, e1, e2 = self.get_local_noute1e2(\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n ntheta=None, npsi=None,\n include_summit=False,\n )\n\n # Compute\n xi, xj, strict = _comp_optics.calc_xixj_from_braggphi(\n det_cent=det['cent'],\n det_nout=det['nout'], det_ei=det['ei'], det_ej=det['ej'],\n det_outline=det.get('outline'),\n summit=summit, nout=nout, e1=e1, e2=e2,\n bragg=bragg, phi=phi, strict=strict,\n )\n\n if plot:\n dax = _plot_optics.CrystalBragg_plot_approx_detector_params(\n bragg, xi, xj, data, dax,\n )\n if return_strict is True:\n return xi, xj, strict\n else:\n return xi, xj\n\n def plot_line_on_det_tracing(\n self, lamb=None, n=None,\n nphi=None,\n det=None, johann=None,\n use_non_parallelism=None,\n lpsi=None, ldtheta=None,\n strict=None,\n ax=None, dleg=None,\n rocking=None, fs=None, dmargin=None,\n wintit=None, tit=None,\n ):\n \"\"\" Visualize the de-focusing by ray-tracing of chosen lamb\n Possibility to plot few wavelength' arcs on the same plot.\n Args:\n - lamb: array of min size 1, in 1e-10 [m]\n - det: dict\n - xi_bounds: np.min & np.max of _XI\n - xj_bounds: np.min & np.max of _XJ\n (from \"inputs_temp/XICS_allshots_C34.py\" l.649)\n - johann: True or False\n \"\"\"\n # Check / format inputs\n if lamb is None:\n lamb = self._dbragg['lambref']\n lamb = np.atleast_1d(lamb).ravel()\n nlamb = lamb.size\n\n if johann is None:\n johann = lpsi is not None or ldtheta is not None\n if rocking is None:\n rocking = False\n\n if det is None or det.get('outline') is None:\n msg = (\"Please provide det as a dict with 'outline'!\")\n raise Exception(msg)\n\n # Get local basis\n nout, e1, e2, use_non_parallelism = self.get_unit_vectors(\n use_non_parallelism=use_non_parallelism,\n )\n nin = -nout\n\n # Compute lamb / phi\n _, phi = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=det['outline'][0, :], xj=det['outline'][1, :], det=det,\n dtheta=0, psi=0,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=False,\n )\n phimin, phimax = np.nanmin(phi), np.nanmax(phi)\n phimin, phimax = phimin-(phimax-phimin)/10, phimax+(phimax-phimin)/10\n\n # Get reference ray-tracing\n bragg = self._checkformat_bragglamb(lamb=lamb, n=n)\n if nphi is None:\n nphi = 100\n phi = np.linspace(phimin, phimax, nphi)\n\n xi = np.full((nlamb, nphi), np.nan)\n xj = np.full((nlamb, nphi), np.nan)\n for ll in range(nlamb):\n xi[ll, :], xj[ll, :] = self.calc_xixj_from_braggphi(\n bragg=np.full(phi.shape, bragg[ll]),\n phi=phi,\n dtheta=0.,\n psi=0.,\n n=n,\n det=det,\n use_non_parallelism=use_non_parallelism,\n strict=strict,\n plot=False,\n )\n\n # Get johann-error raytracing (multiple positions on crystal)\n xi_er, xj_er = None, None\n if johann and not rocking:\n if lpsi is None:\n lpsi = np.linspace(-1., 1., 15)\n if ldtheta is None:\n ldtheta = np.linspace(-1., 1., 15)\n lpsi, ldtheta = np.meshgrid(lpsi, ldtheta)\n lpsi = lpsi.ravel()\n ldtheta = ldtheta.ravel()\n\n lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi]\n ldtheta = self._dgeom['extenthalf'][1]*np.r_[ldtheta]\n npsi = lpsi.size\n assert npsi == ldtheta.size\n\n xi_er = np.full((nlamb, npsi*nphi), np.nan)\n xj_er = np.full((nlamb, npsi*nphi), np.nan)\n for l in range(nlamb):\n for ii in range(npsi):\n i0 = np.arange(ii*nphi, (ii+1)*nphi)\n xi_er[l, i0], xj_er[l, i0] = self.calc_xixj_from_braggphi(\n phi=phi, bragg=bragg[l], lamb=None, n=n,\n dtheta=ldtheta[ii], psi=lpsi[ii],\n det=det, plot=False,\n use_non_parallelism=use_non_parallelism,\n strict=strict,\n )\n\n # Get rocking curve error\n if rocking:\n pass\n\n # Plot\n return _plot_optics.CrystalBragg_plot_line_tracing_on_det(\n lamb, xi, xj, xi_er, xj_er,\n det=det, ax=ax, dleg=dleg,\n johann=johann, rocking=rocking,\n fs=fs, dmargin=dmargin, wintit=wintit, tit=tit)\n\n def calc_johannerror(\n self,\n xi=None, xj=None, err=None,\n det=None, n=None,\n lpsi=None, ldtheta=None,\n lambda_interval_min=None,\n lambda_interval_max=None,\n use_non_parallelism=None,\n plot=True, fs=None, cmap=None,\n vmin=None, vmax=None, tit=None, wintit=None,\n ):\n \"\"\" Plot the johann error\n\n The johann error is the error (scattering) induced by defocalization\n due to finite crystal dimensions\n There is a johann error on wavelength (lamb => loss of spectral\n resolution) and on directionality (phi)\n If provided, lpsi and ldtheta are taken as normalized variations with\n respect to the crystal summit and to its extenthalf.\n Typical values are:\n - lpsi = [-1, 1, 1, -1]\n - ldtheta = [-1, -1, 1, 1]\n They must have the same len()\n\n First affecting a reference lambda according to:\n - pixel's position\n - crystal's summit\n Then, computing error on bragg and phi angles on each pixels by\n computing lambda and phi from the crystal's outline\n Provide lambda_interval_min/max to ensure the given wavelength interval\n is detected over the whole surface area.\n A True/False boolean is then returned.\n \"\"\"\n\n # Check xi, xj once before to avoid doing it twice\n if err is None:\n err = 'abs'\n if lambda_interval_min is None:\n lambda_interval_min = 3.93e-10\n if lambda_interval_max is None:\n lambda_interval_max = 4.00e-10\n\n xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)\n\n # Check / format inputs\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xii, xj=xjj, det=det,\n dtheta=0, psi=0,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n\n # Only one summit was selected\n bragg, phi, lamb = bragg[..., 0], phi[..., 0], lamb[..., 0]\n\n # Check lambda interval into lamb array\n c0 = (\n np.min(lamb) < lambda_interval_min\n and np.max(lamb) > lambda_interval_max\n )\n if c0:\n test_lambda_interv = True\n else:\n test_lambda_interv = False\n\n # Get err from multiple ldtheta, lpsi\n if lpsi is None:\n lpsi = np.r_[-1., 0., 1., 1., 1., 0., -1, -1]\n lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi]\n if ldtheta is None:\n ldtheta = np.r_[-1., -1., -1., 0., 1., 1., 1., 0.]\n ldtheta = self._dgeom['extenthalf'][1]*np.r_[ldtheta]\n npsi = lpsi.size\n assert npsi == ldtheta.size\n\n (\n braggerr, phierr, lamberr,\n ) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xii, xj=xjj, det=det,\n dtheta=ldtheta, psi=lpsi,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n err_lamb = np.nanmax(np.abs(lamb[..., None] - lamberr), axis=-1)\n err_phi = np.nanmax(np.abs(phi[..., None] - phierr), axis=-1)\n\n # absolute vs relative error\n if 'rel' in err:\n if err == 'rel':\n err_lamb = 100.*err_lamb / (np.nanmax(lamb) - np.nanmin(lamb))\n err_phi = 100.*err_phi / (np.nanmax(phi) - np.nanmin(phi))\n elif err == 'rel2':\n err_lamb = 100.*err_lamb / np.mean(lamb)\n err_phi = 100.*err_phi / np.mean(phi)\n err_lamb_units = '%'\n err_phi_units = '%'\n else:\n err_lamb_units = 'm'\n err_phi_units = 'rad'\n\n if plot is True:\n ax = _plot_optics.CrystalBragg_plot_johannerror(\n xi, xj, lamb, phi,\n err_lamb, err_phi,\n err_lamb_units=err_lamb_units,\n err_phi_units=err_phi_units,\n cmap=cmap, vmin=vmin, vmax=vmax,\n fs=fs, tit=tit, wintit=wintit,\n )\n return (\n err_lamb, err_phi, err_lamb_units, err_phi_units,\n test_lambda_interv,\n )\n\n def plot_focal_error_summed(\n self,\n dist_min=None, dist_max=None,\n di_min=None, di_max=None,\n ndist=None, ndi=None,\n lamb=None, bragg=None,\n xi=None, xj=None,\n err=None,\n use_non_parallelism=None,\n tangent_to_rowland=None, n=None,\n plot=None,\n pts=None,\n det_ref=None, plot_dets=None, nsort=None,\n dcryst=None,\n lambda_interval_min=None,\n lambda_interval_max=None,\n contour=None,\n fs=None,\n ax=None,\n cmap=None,\n vmin=None,\n vmax=None,\n return_ax=None,\n ):\n \"\"\"\n Using the calc_johannerror method, computing the sum of the\n focalization error over the whole detector for different positions\n characterized by the translations ddist and di in the equatorial plane\n (dist_min, dist_max, ndist) (di_min, di_max, ndi).\n\n Parameters:\n -----------\n - lamb/bragg : float\n Automatically set to crystal's references\n - xi, xj : np.ndarray\n pixelization of the detector\n (from \"inputs_temp/XICS_allshots_C34.py\" l.649)\n - alpha, beta : float\n Values of Non Parallelism references angles\n - use_non_parallelism : str\n - tangent_to_rowland : str\n - plot_dets : str\n Possibility to plot the nsort- detectors with the lowest\n summed focalization error, next to the Best Approximate Real\n detector\n dict(np.load('det37_CTVD_incC4_New.npz', allow_pickle=True))\n - nsort : float\n Number of best detector's position to plot\n - lambda_interv_min/max : float\n To ensure the given wavelength interval is detected over the whole\n surface area. A True/False boolean is then returned.\n \"\"\"\n\n # Check / format inputs\n if dist_min is None:\n dist_min = -0.15\n if dist_max is None:\n dist_max = 0.15\n if di_min is None:\n di_min = -0.40\n if di_max is None:\n di_max = 0.40\n if ndist is None:\n ndist = 21\n if ndi is None:\n ndi = 21\n if err is None:\n err = 'rel'\n if plot is None:\n plot = True\n if plot_dets is None:\n plot_dets = det_ref is not None\n if nsort is None:\n nsort = 5\n if return_ax is None:\n return_ax = True\n if lambda_interval_min is None:\n lambda_interval_min = 3.93e-10\n if lambda_interval_max is None:\n lambda_interval_max = 4.00e-10\n\n l0 = [dist_min, dist_max, ndist, di_min, di_max, ndi]\n c0 = any([l00 is not None for l00 in l0])\n if not c0:\n msg = (\n \"Please give the ranges of ddist and di translations\\n\"\n \"\\t to compute the different detector's position\\n\"\n \"\\t Provided:\\n\"\n \"\\t\\t- dist_min, dist_max, ndist: ({}, {}, {})\\n\".format(\n dist_min, dist_max, ndist,\n )\n + \"\\t\\t- di_min, di_max, ndi: ({}, {}, {})\\n\".format(\n di_min, di_max, ndi,\n )\n )\n raise Exception(msg)\n\n # ------------\n # Compute local coordinates of det_ref\n (\n ddist0, di0, dj0,\n dtheta0, dpsi0, tilt0,\n ) = self._get_local_coordinates_of_det(\n bragg=bragg,\n lamb=lamb,\n det_ref=det_ref,\n use_non_parallelism=use_non_parallelism,\n )\n\n # angle between nout vectors from get_det_approx() &\n ## get_det_approx(tangent=False)\n\n det1 = self.get_detector_ideal(\n lamb=lamb,\n bragg=bragg,\n use_non_parallelism=use_non_parallelism,\n tangent_to_rowland=True,\n )\n det2 = self.get_detector_ideal(\n lamb=lamb,\n bragg=bragg,\n use_non_parallelism=use_non_parallelism,\n tangent_to_rowland=False,\n )\n cos_angle_nout = np.sum(\n det1['nout'] * det2['nout']\n ) / (\n np.linalg.norm(det1['nout'] * np.linalg.norm(det2['nout']))\n )\n angle_nout = np.arccos(cos_angle_nout)\n\n # Compute\n ddist = np.linspace(dist_min, dist_max, int(ndist))\n di = np.linspace(di_min, di_max, int(ndi))\n error_lambda = np.full((di.size, ddist.size), np.nan)\n test_lamb_interv = np.zeros((di.size, ddist.size), dtype='bool')\n end = '\\r'\n for ii in range(ddist.size):\n for jj in range(di.size):\n\n # print progression\n if ii == ndist-1 and jj == ndi-1:\n end = '\\n'\n msg = (\n \"Computing mean focal error for det \"\n f\"({ii+1}, {jj+1})/({ndist}, {ndi})\"\n ).ljust(60)\n print(msg, end=end, flush=True)\n\n # Get det\n dpsi0bis = float(dpsi0)\n if tangent_to_rowland:\n dpsi0bis = dpsi0 - angle_nout\n\n det = self.get_detector_ideal(\n ddist=ddist[ii],\n di=di[jj],\n dj=dj0,\n dtheta=dtheta0,\n dpsi=dpsi0bis,\n tilt=tilt0,\n lamb=lamb,\n bragg=bragg,\n use_non_parallelism=use_non_parallelism,\n tangent_to_rowland=False,\n )\n\n # Integrate error\n (\n error_lambda_temp, test_lamb_interv[jj, ii],\n ) = self.calc_johannerror(\n xi=xi, xj=xj,\n det=det,\n err=err,\n lambda_interval_min=lambda_interval_min,\n lambda_interval_max=lambda_interval_max,\n plot=False,\n )[::4]\n error_lambda[jj, ii] = np.nanmean(error_lambda_temp)\n\n if 'rel' in err:\n units = '%'\n else:\n units = 'm'\n\n if plot:\n ax = _plot_optics.CrystalBragg_plot_focal_error_summed(\n cryst=self, dcryst=dcryst,\n lamb=lamb, bragg=bragg,\n error_lambda=error_lambda,\n ddist=ddist, di=di,\n ddist0=ddist0, di0=di0, dj0=dj0,\n dtheta0=dtheta0, dpsi0=dpsi0, tilt0=tilt0,\n angle_nout=angle_nout,\n det_ref=det_ref,\n units=units,\n plot_dets=plot_dets, nsort=nsort,\n tangent_to_rowland=tangent_to_rowland,\n use_non_parallelism=use_non_parallelism,\n pts=pts,\n test_lamb_interv=test_lamb_interv,\n contour=contour,\n fs=fs,\n ax=ax,\n cmap=cmap,\n vmin=vmin,\n vmax=vmax,\n )\n if return_ax:\n return error_lambda, ddist, di, test_lamb_interv, ax\n else:\n return error_lambda, ddist, di, test_lamb_interv\n\n def _get_local_coordinates_of_det(\n self,\n bragg=None,\n lamb=None,\n det_ref=None,\n use_non_parallelism=None,\n ):\n \"\"\"\n Computation of translation (ddist, di, dj) and angular\n (dtheta, dpsi, tilt) properties of an arbitrary detector choosen by\n the user.\n \"\"\"\n\n # ------------\n # check inputs\n\n if det_ref is None:\n msg = (\n \"You need to provide your arbitrary detector\\n\"\n + \"\\t in order to compute its spatial properties !\\n\"\n + \"\\t You provided: {}\".format(det)\n )\n raise Exception(msg)\n\n # Checkformat det\n det_ref = self._checkformat_det(det=det_ref)\n\n # ------------\n # get approx detect\n\n det_approx = self.get_detector_ideal(\n bragg=bragg, lamb=lamb,\n tangent_to_rowland=False,\n use_non_parallelism=use_non_parallelism,\n )\n\n # ------------\n # get vector delta between centers\n\n delta = det_ref['cent'] - det_approx['cent']\n ddist = np.sum(delta * (-det_approx['nout']))\n di = np.sum(delta * det_approx['ei'])\n dj = np.sum(delta * det_approx['ej'])\n\n # ---------------\n # get angles from unit vectors\n dtheta, dpsi, tilt = None, None, None\n\n # use formulas in _comp_optics.get_det_abs_from_rel()\n sindtheta = np.sum(det_approx['ej'] * det_ref['nout'])\n costheta_cospsi = np.sum(det_approx['nout'] * det_ref['nout'])\n costheta_sinpsi = np.sum(det_approx['ei'] * det_ref['nout'])\n costheta = np.sqrt(costheta_cospsi**2 + costheta_sinpsi**2)\n dtheta = np.arctan2(sindtheta, costheta)\n dpsi = np.arctan2(\n costheta_sinpsi / costheta,\n costheta_cospsi / costheta,\n )\n\n # ---------\n # tilt\n det_ei2 = (\n np.cos(dpsi)*det_approx['ei'] - np.sin(dpsi)*det_approx['nout']\n )\n det_ej2 = np.cross(det_ref['nout'], det_ei2)\n costilt = np.sum(det_ref['ei']*det_ei2)\n sintilt = np.sum(det_ref['ei']*det_ej2)\n tilt = np.arctan2(sintilt, costilt)\n\n return ddist, di, dj, dtheta, dpsi, tilt\n\n def get_lambbraggphi_from_ptsxixj_dthetapsi(\n self,\n pts=None,\n xi=None, xj=None, det=None,\n dtheta=None, psi=None,\n ntheta=None, npsi=None,\n n=None,\n use_non_parallelism=None,\n grid=None,\n return_lamb=None,\n ):\n \"\"\" Return the lamb, bragg and phi for provided pts and dtheta/psi\n\n if grid = True:\n compute all pts / dtheta/psi comnbinations\n => return (npts, ndtheta) arrays\n else:\n each pts is associated to a single dtheta/psi\n => assumes npts == ndtheta == npsi\n => return (npts,) arrays\n\n \"\"\"\n\n # Check / Format inputs\n if return_lamb is None:\n return_lamb = True\n det = self._checkformat_det(det)\n\n # Get local basis\n summ, vout, ve1, ve2 = self.get_local_noute1e2(\n dtheta=dtheta, psi=psi,\n ntheta=ntheta, npsi=npsi,\n use_non_parallelism=use_non_parallelism,\n include_summit=True,\n )\n\n # Derive bragg, phi\n bragg, phi = _comp_optics.calc_braggphi_from_xixjpts(\n pts=pts,\n xi=xi, xj=xj, det=det,\n summit=summ, nin=-vout, e1=ve1, e2=ve2,\n grid=grid,\n )\n\n # Derive lamb\n if return_lamb is True:\n lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)\n return bragg, phi, lamb\n else:\n return bragg, phi\n\n def get_lamb_avail_from_pts(\n self,\n pts=None,\n n=None, ndtheta=None,\n det=None, nlamb=None, klamb=None,\n use_non_parallelism=None,\n strict=None,\n return_phidtheta=None,\n return_xixj=None,\n ):\n \"\"\" Return the wavelength accessible from plasma points on the crystal\n\n For a given plasma point, only a certain lambda interval can be\n bragg-diffracted on the crystal (due to bragg's law and the crystal's\n dimensions)\n\n Beware, for a given pts and lamb, there can be up to 2 sets of\n solutions\n All non-valid solutions are set to nans, such that most of the time\n there is only one\n\n For a set of given:\n - pts (3, npts) array, (x, y, z) coordinates\n Using:\n - nlamb: sampling of the lamb interval (default: 100)\n - ndtheta: sampling of the lamb interval (default: 20)\n - det: (optional) a detector dict, for xi and xj\n Returns:\n - lamb: (npts, nlamb) array of sampled valid wavelength interval\n - phi: (npts, nlamb, ndtheta, 2) array of phi\n - dtheta: (npts, nlamb, ndtheta, 2) array of dtheta\n - psi: (npts, nlamb, ndtheta, 2) array of psi\n And optionally (return_xixj=True and det provided as dict):\n - xi: (npts, nlamb, ndtheta, 2) array of xi\n - xj: (npts, nlamb, ndtheta, 2) array of xj\n\n The result is computed with or w/o taking into account non-parallelism\n\n \"\"\"\n # Check / format\n if ndtheta is None:\n ndtheta = 20\n if nlamb is None:\n nlamb = 100\n assert nlamb >= 2, \"nlamb must be >= 2\"\n if return_phidtheta is None:\n return_phidtheta = True\n if return_xixj is None:\n return_xixj = det is not None\n if det is None:\n return_xixj = False\n if det is None:\n strict = False\n\n # Get lamb min / max\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n pts=pts,\n dtheta='envelop', psi='envelop',\n ntheta=None, npsi=None,\n n=n, grid=True,\n use_non_parallelism=use_non_parallelism,\n return_lamb=True,\n )\n lambmin = np.nanmin(lamb, axis=1)\n lambmax = np.nanmax(lamb, axis=1)\n if klamb is None:\n klamb = np.linspace(0, 1, nlamb)\n elif not (isinstance(klamb, np.ndarray) and klamb.ndim == 1):\n msg = \"Please provide klamb as a 1d vector!\"\n raise Exception(msg)\n nlamb = klamb.size\n lamb = lambmin[:, None] + (lambmax-lambmin)[:, None]*klamb\n\n return _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(\n cryst=self,\n lamb=lamb,\n n=n,\n ndtheta=ndtheta,\n pts=pts,\n use_non_parallelism=use_non_parallelism,\n return_phidtheta=return_phidtheta,\n return_xixj=return_xixj,\n strict=strict,\n det=det,\n )\n\n def _calc_dthetapsiphi_from_lambpts(\n self,\n pts=None, bragg=None, lamb=None,\n n=None, ndtheta=None,\n use_non_parallelism=None,\n grid=None,\n ):\n\n # Check / Format inputs\n pts = _comp_optics._checkformat_pts(pts)\n npts = pts.shape[1]\n\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n\n # get nout, e1, e2\n nout, e1, e2, use_non_parallelism = self.get_unit_vectors(\n use_non_parallelism=use_non_parallelism\n )\n\n # Compute dtheta, psi, indnan (nlamb, npts, ndtheta)\n # In general there are 2 solutions! (only close to rowland in practice)\n dtheta, psi, indok, grid = _comp_optics.calc_dthetapsiphi_from_lambpts(\n pts,\n bragg,\n summit=self._dgeom['summit'], # To be updated (non-paralellism)?\n rcurve=self._dgeom['rcurve'],\n nout=nout, e1=e1, e2=e2,\n extenthalf=self._dgeom['extenthalf'],\n ndtheta=ndtheta,\n grid=grid,\n )\n\n # reshape bragg for matching dtheta.shape\n if grid is True:\n bragg = np.repeat(\n np.repeat(\n np.repeat(bragg[:, None], npts, axis=-1)[..., None],\n dtheta.shape[2],\n axis=-1,\n )[..., None],\n 2,\n axis=-1,\n )\n pts = pts[:, None, :, None, None]\n else:\n bragg = np.repeat(\n np.repeat(bragg[:, None], dtheta.shape[1], axis=1)[..., None],\n 2,\n axis=-1,\n )\n pts = pts[..., None, None]\n bragg[~indok] = np.nan\n\n # Get corresponding phi and re-check bragg, for safety\n bragg2, phi = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n pts=pts,\n dtheta=dtheta, psi=psi,\n grid=False,\n use_non_parallelism=use_non_parallelism,\n return_lamb=False,\n )\n\n c0 = (\n bragg2.shape == bragg.shape\n and np.allclose(bragg, bragg2, equal_nan=True)\n )\n if not c0:\n try:\n plt.figure()\n plt.plot(bragg, bragg2, '.')\n except Exception as err:\n pass\n msg = (\n \"Inconsistency detected in bragg angle computations:\\n\"\n + \"\\t- from the points and lamb\\n\"\n + \"\\t- from the points and (dtheta, psi)\\n\"\n + \"\\nContext:\\n\"\n + \"\\t- use_non_parallelism: {}\\n\".format(use_non_parallelism)\n + \"\\t- bragg.shape = {}\\n\".format(bragg.shape)\n + \"\\t- bragg2.shape = {}\\n\".format(bragg2.shape)\n )\n raise Exception(msg)\n\n return dtheta, psi, phi, bragg\n\n def calc_raytracing_from_lambpts(\n self,\n lamb=None, bragg=None, pts=None,\n xi_bounds=None, xj_bounds=None, nphi=None,\n det=None, n=None, ndtheta=None,\n johann=False, lpsi=None, ldtheta=None,\n rocking=False, strict=None, plot=None, fs=None,\n dmargin=None, wintit=None,\n tit=None, proj=None,\n legend=None, draw=None, returnas=None,\n ):\n \"\"\" Visualize the de-focusing by ray-tracing of chosen lamb\n\n If plot, 3 different plots can be produced:\n - det: plots the intersection of rays with detector plane\n - '2d': plots the geometry of the rays in 2d cross and hor\n - '3d': plots the geometry of the rays in 3d\n Specify the plotting option by setting plot to any of these (or a list)\n \"\"\"\n # Check / format inputs\n if returnas is None:\n returnas = 'data'\n if plot is None or plot is True:\n plot = ['det', '3d']\n if isinstance(plot, str):\n plot = plot.split('+')\n assert all([ss in ['det', '2d', '3d'] for ss in plot])\n assert returnas in ['data', 'ax']\n\n pts = _comp_optics._checkformat_pts(pts)\n npts = pts.shape[1]\n\n # Get dtheta, psi and phi from pts/lamb\n dtheta, psi, phi, bragg = self._calc_dthetapsiphi_from_lambpts(\n pts=pts, lamb=lamb, bragg=bragg, n=n, ndtheta=ndtheta,\n )\n ndtheta = dtheta.shape[-1]\n # assert dtheta.shape == (nlamb, npts, ndtheta)\n\n # Check / get det\n det = self._checkformat_det(det)\n\n # Compute xi, xj of reflexion (phi -> phi + np.pi)\n xi, xj = self.calc_xixj_from_braggphi(\n bragg=bragg, phi=phi+np.pi, n=n,\n dtheta=dtheta, psi=psi,\n det=det, strict=strict, plot=False,\n )\n\n # Plot to be checked - unnecessary ?\n plot = False\n if plot is not False:\n ptscryst, ptsdet = None, None\n if '2d' in plot or '3d' in plot:\n ptscryst = self.get_local_noute1e2(dtheta, psi)[0]\n ptsdet = (det['cent'][:, None, None, None]\n + xi[None, ...]*det['ei'][:, None, None, None]\n + xj[None, ...]*det['ej'][:, None, None, None])\n\n ax = _plot_optics.CrystalBragg_plot_raytracing_from_lambpts(\n xi=xi, xj=xj, lamb=lamb,\n xi_bounds=xi_bounds, xj_bounds=xj_bounds,\n pts=pts, ptscryst=ptscryst, ptsdet=ptsdet,\n det_cent=det['cent'], det_nout=det['nout'],\n det_ei=det['ei'], det_ej=det['ej'],\n cryst=self, proj=plot, fs=fs, dmargin=dmargin,\n wintit=wintit, tit=tit, legend=legend, draw=draw)\n if returnas == 'ax':\n return ax\n return dtheta, psi, phi, bragg, xi, xj\n\n def _calc_spect1d_from_data2d(self, data, lamb, phi,\n nlambfit=None, nphifit=None,\n nxi=None, nxj=None,\n spect1d=None, mask=None, vertsum1d=None):\n if nlambfit is None:\n nlambfit = nxi\n if nphifit is None:\n nphifit = nxj\n return _comp_optics._calc_spect1d_from_data2d(\n data, lamb, phi,\n nlambfit=nlambfit,\n nphifit=nphifit,\n spect1d=spect1d,\n mask=mask,\n vertsum1d=vertsum1d,\n )\n\n def plot_data_vs_lambphi(\n self,\n xi=None, xj=None, data=None, mask=None,\n det=None, dtheta=None, psi=None, n=None,\n nlambfit=None, nphifit=None,\n magaxis=None, npaxis=None,\n dlines=None, spect1d='mean',\n lambmin=None, lambmax=None,\n xjcut=None, dxj=None,\n plot=True, fs=None, tit=None, wintit=None,\n cmap=None, vmin=None, vmax=None,\n returnas=None,\n ):\n # Check / format inputs\n assert data is not None\n if returnas is None:\n returnas = 'spect'\n lreturn = ['ax', 'spect']\n if returnas not in lreturn:\n msg = (\"Arg returnas must be in {}\\n:\".format(lreturn)\n + \"\\t- 'spect': return a 1d vertically averaged spectrum\\n\"\n + \"\\t- 'ax' : return a list of axes instances\")\n raise Exception(msg)\n\n xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)\n nxi = xi.size if xi is not None else np.unique(xii).size\n nxj = xj.size if xj is not None else np.unique(xjj).size\n\n # Compute lamb / phi\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xii, xj=xjj, det=det,\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n\n # Compute lambfit / phifit and spectrum1d\n (spect1d, lambfit, phifit,\n vertsum1d, phiminmax) = self._calc_spect1d_from_data2d(\n data, lamb, phi,\n nlambfit=nlambfit, nphifit=nphifit, nxi=nxi, nxj=nxj,\n spect1d=spect1d, mask=mask, vertsum1d=True\n )\n\n # Get phiref from mag axis\n lambax, phiax = None, None\n if magaxis is not None:\n if npaxis is None:\n npaxis = 1000\n thetacryst = np.arctan2(self._dgeom['summit'][1],\n self._dgeom['summit'][0])\n thetaax = thetacryst + np.pi/2*np.linspace(-1, 1, npaxis)\n pts = np.array([magaxis[0]*np.cos(thetaax),\n magaxis[0]*np.sin(thetaax),\n np.full((npaxis,), magaxis[1])])\n braggax, phiax = self.calc_braggphi_from_pts(pts)\n lambax = self.get_lamb_from_bragg(braggax)\n phiax = np.arctan2(np.sin(phiax-np.pi), np.cos(phiax-np.pi))\n ind = ((lambax >= lambfit[0]) & (lambax <= lambfit[-1])\n & (phiax >= phifit[0]) & (phiax <= phifit[-1]))\n lambax, phiax = lambax[ind], phiax[ind]\n ind = np.argsort(lambax)\n lambax, phiax = lambax[ind], phiax[ind]\n\n # Get lamb / phi for xj\n lambcut, phicut, spectcut = None, None, None\n if xjcut is not None:\n if dxj is None:\n dxj = 0.002\n xjcut = np.sort(np.atleast_1d(xjcut).ravel())\n xicutf = np.tile(xi, (xjcut.size, 1))\n xjcutf = np.repeat(xjcut[:, None], nxi, axis=1)\n (\n braggcut, phicut, lambcut,\n ) = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xicutf, xj=xjcutf, det=det,\n dtheta=0, psi=0,\n use_non_parallelism=use_non_parallelism,\n n=1,\n grid=True,\n return_lamb=True,\n )\n indxj = [(np.abs(xj-xjc) <= dxj).nonzero()[0] for xjc in xjcut]\n spectcut = np.array([np.nanmean(data[ixj, :], axis=0)\n for ixj in indxj])\n\n # plot\n ax = None\n if plot:\n ax = _plot_optics.CrystalBragg_plot_data_vs_lambphi(\n xi, xj, bragg, lamb, phi, data,\n lambfit=lambfit, phifit=phifit, spect1d=spect1d,\n vertsum1d=vertsum1d, lambax=lambax, phiax=phiax,\n lambmin=lambmin, lambmax=lambmax, phiminmax=phiminmax,\n xjcut=xjcut, lambcut=lambcut, phicut=phicut, spectcut=spectcut,\n cmap=cmap, vmin=vmin, vmax=vmax, dlines=dlines,\n tit=tit, wintit=wintit, fs=fs)\n if returnas == 'spect':\n return spect1d, lambfit\n elif returnas == 'ax':\n return ax\n\n def get_plasmadomain_at_lamb(\n self,\n config=None,\n struct=None,\n domain=None,\n res=None,\n det=None,\n xixj_lim=None,\n strict=None,\n bragg=None,\n lamb=None,\n # for available lamb determination\n ndtheta=None,\n nlamb=None,\n n=None,\n use_non_parallelism=None,\n # plotting\n plot=None,\n dax=None,\n plot_as=None,\n lcolor=None,\n return_dax=None,\n ):\n \"\"\" Return pts in the plasma domain and a mask\n\n The mask is True only for points for which the desired wavelength is\n accesible from the crystal (and from the detector if strict=True and\n det is provided)\n\n More than one value of lamb can be provided (nlamb >= 1)\n\n pts is returned as a (3, npts) array\n lambok is returned as a (nlamb, npts) array\n\n \"\"\"\n\n # ------------\n # check inputs\n\n struct = _check_optics._check_config_get_Ves(\n config=config, struct=struct,\n )\n\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)\n\n # To be refined if xjlim is narrow\n if ndtheta is None:\n ndtheta = 5\n # To be refined if xilim is narrow\n if nlamb is None:\n nlamb = 11\n if strict is None:\n strict = True\n\n if plot is None:\n plot = True\n if return_dax is None:\n return_dax = plot is True\n\n # -------------\n # sample volume\n\n (\n pts, dV, ind, (resR, resZ, resPhi),\n ) = config.dStruct['dObj']['Ves'][struct].get_sampleV(\n res=res,\n domain=domain,\n returnas='(R, Z, Phi)',\n )\n\n # ------------------------------\n # check access from crystal only\n\n ptsXYZ = np.array([\n pts[0, :]*np.cos(pts[2, :]),\n pts[0, :]*np.sin(pts[2, :]),\n pts[1, :],\n ])\n\n lamb_access = self.get_lamb_avail_from_pts(\n pts=ptsXYZ,\n nlamb=2,\n use_non_parallelism=use_non_parallelism,\n return_phidtheta=False,\n return_xixj=False,\n strict=False,\n )\n\n lambok = np.zeros((lamb.size, pts.shape[1]), dtype=bool)\n for ii, ll in enumerate(lamb):\n lambok[ii, :] = (\n (lamb_access[:, 0] <= ll) & (ll <= lamb_access[:, 1])\n )\n\n # ---------------\n # refactor pts and lambok\n\n indok = np.any(lambok, axis=0)\n pts = pts[:, indok]\n ptsXYZ = ptsXYZ[:, indok]\n lambok = lambok[:, indok]\n\n # ---------------\n # check strict\n if strict is True:\n\n # det vs detbis if xixj_lim\n detbis = dict(det)\n if xixj_lim is not None:\n detbis['outline'] = np.array([\n np.r_[\n xixj_lim[0][0],\n xixj_lim[0][1]*np.r_[1, 1],\n xixj_lim[0][0],\n ],\n np.r_[\n xixj_lim[1][0]*np.r_[1, 1],\n xixj_lim[1][1]*np.r_[1, 1],\n ],\n ])\n detbis['outline'] = np.concatenate(\n (detbis['outline'], detbis['outline'][:, 0:1]),\n axis=1,\n )\n\n # intersection with detbis\n for kk, ll in enumerate(lamb):\n lambi = _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(\n cryst=self,\n lamb=np.full((lambok[kk, :].sum(), 1), ll),\n n=n,\n ndtheta=ndtheta,\n pts=ptsXYZ[:, lambok[kk, :]],\n use_non_parallelism=use_non_parallelism,\n return_phidtheta=False,\n return_xixj=False,\n strict=strict,\n det=detbis,\n )\n lambok[kk, lambok[kk, :]] = ~np.isnan(lambi[:, 0])\n\n # -------\n # return\n\n if plot:\n dax = _plot_optics.CrystalBragg_plot_plasma_domain_at_lamb(\n cryst=self,\n det=det,\n xixj_lim=xixj_lim,\n config=config,\n lamb=lamb,\n pts=pts,\n reseff=[resR, resZ, resPhi],\n lambok=lambok,\n dax=dax,\n plot_as=plot_as,\n lcolor=lcolor,\n )\n\n # ---------------\n # return\n\n if return_dax is True:\n return pts, lambok, dax\n else:\n return pts, lambok\n\n def calc_signal_from_emissivity(\n self,\n emis=None,\n config=None,\n struct=None,\n domain=None,\n res=None,\n det=None,\n xixj_lim=None,\n strict=None,\n bragg=None,\n lamb=None,\n binning=None,\n # for available lamb determination\n ndtheta=None,\n nlamb=None,\n n=None,\n use_non_parallelism=None,\n # plotting\n plot=None,\n vmin=None,\n vmax=None,\n vmin_bin=None,\n vmax_bin=None,\n cmap=None,\n dax=None,\n fs=None,\n dmargin=None,\n tit=None,\n return_dax=None,\n ):\n \"\"\" Return pts in the plasma domain and a mask\n\n The mask is True only for points for which the desired wavelength is\n accesible from the crystal (and from the detector if strict=True and\n det is provided)\n\n More than one value of lamb can be provided (nlamb >= 1)\n\n pts is returned as a (3, npts) array\n lambok is returned as a (nlamb, npts) array\n\n \"\"\"\n\n # ------------\n # check inputs\n\n (\n struct, lamb, binning,\n ) = _check_optics._check_calc_signal_from_emissivity(\n emis=emis, config=config, struct=struct,\n lamb=lamb, det=det, binning=binning,\n )\n\n bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n)\n lamb = self.get_lamb_from_bragg(bragg=bragg, n=n)\n\n # To be refined if xjlim is narrow\n if ndtheta is None:\n ndtheta = 5\n # To be refined if xilim is narrow\n if nlamb is None:\n nlamb = 11\n if strict is None:\n strict = True\n\n if plot is None:\n plot = True\n if return_dax is None:\n return_dax = plot is True\n\n # -------------\n # sample volume\n\n (\n pts, dV, ind, (resR, resZ, resPhi),\n ) = config.dStruct['dObj']['Ves'][struct].get_sampleV(\n res=res,\n domain=domain,\n returnas='(R, Z, Phi)',\n )\n\n # ------------------------------\n # check access from crystal only\n\n ptsXYZ = np.array([\n pts[0, :]*np.cos(pts[2, :]),\n pts[0, :]*np.sin(pts[2, :]),\n pts[1, :],\n ])\n\n lamb_access = self.get_lamb_avail_from_pts(\n pts=ptsXYZ,\n nlamb=2,\n use_non_parallelism=use_non_parallelism,\n return_phidtheta=False,\n return_xixj=False,\n strict=False,\n )\n\n lambok = np.zeros((lamb.size, pts.shape[1]), dtype=bool)\n for ii, ll in enumerate(lamb):\n lambok[ii, :] = (\n (lamb_access[:, 0] <= ll) & (ll <= lamb_access[:, 1])\n )\n\n # ---------------\n # refactor pts and lambok\n\n indok = np.any(lambok, axis=0)\n pts = pts[:, indok]\n ptsXYZ = ptsXYZ[:, indok]\n lambok = lambok[:, indok]\n\n # ---------------\n # check strict\n\n # det vs detbis if xixj_lim\n detbis = dict(det)\n if xixj_lim is not None:\n detbis['outline'] = np.array([\n np.r_[\n xixj_lim[0][0],\n xixj_lim[0][1]*np.r_[1, 1],\n xixj_lim[0][0],\n ],\n np.r_[\n xixj_lim[1][0]*np.r_[1, 1],\n xixj_lim[1][1]*np.r_[1, 1],\n ],\n ])\n detbis['outline'] = np.concatenate(\n (detbis['outline'], detbis['outline'][:, 0:1]),\n axis=1,\n )\n\n # intersection with detbis\n shape = tuple(np.r_[pts.shape[1], lamb.size, ndtheta, 2])\n xi = np.full(shape, np.nan)\n xj = np.full(shape, np.nan)\n val = np.full(shape, np.nan)\n for kk, ll in enumerate(lamb):\n (\n lambi, xii, xji,\n ) = _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj(\n cryst=self,\n lamb=np.full((lambok[kk, :].sum(), 1), ll),\n n=n,\n ndtheta=ndtheta,\n pts=ptsXYZ[:, lambok[kk, :]],\n use_non_parallelism=use_non_parallelism,\n return_phidtheta=False,\n return_xixj=True,\n strict=True,\n det=detbis,\n )\n\n iok = ~np.isnan(lambi[:, 0])\n iokf = lambok[kk, :].nonzero()[0][iok]\n lambok[kk, lambok[kk, :]] = iok\n xi[iokf, kk, :, :] = xii[iok, 0, :, :]\n xj[iokf, kk, :, :] = xji[iok, 0, :, :]\n val[iokf, kk, :, :] = emis(\n r=pts[0, iokf],\n z=pts[1, iokf],\n phi=pts[2, iokf],\n lamb=lamb[kk:kk+1],\n t=None,\n )[:, 0, None, None]\n\n # -------\n # Optional binning\n\n binned = None\n if binning is not False:\n iok = np.isfinite(val)\n binned = scpstats.binned_statistic_2d(\n xi[iok].ravel(),\n xj[iok].ravel(),\n val[iok].ravel(),\n statistic='mean',\n bins=binning,\n expand_binnumbers=False,\n )[0]\n\n # -------\n # return\n\n if plot:\n dax = _plot_optics.CrystalBragg_plot_signal_from_emissivity(\n cryst=self,\n det=det,\n xixj_lim=xixj_lim,\n config=config,\n lamb=lamb,\n pts=pts,\n reseff=[resR, resZ, resPhi],\n xi=xi,\n xj=xj,\n val=val,\n lambok=lambok,\n binning=binning,\n binned=binned,\n # plotting\n vmin=vmin,\n vmax=vmax,\n vmin_bin=vmin_bin,\n vmax_bin=vmax_bin,\n cmap=cmap,\n dax=dax,\n fs=fs,\n dmargin=dmargin,\n tit=tit,\n )\n\n # ---------------\n # return\n\n if return_dax is True:\n return pts, val, xi, xj, binned, dax\n else:\n return pts, val, xi, xj, binned\n\n @staticmethod\n def fit1d_dinput(\n dlines=None, dconstraints=None, dprepare=None,\n data=None, lamb=None,\n mask=None, domain=None, pos=None, subset=None,\n same_spectrum=None, same_spectrum_dlamb=None,\n focus=None, valid_fraction=None, valid_nsigma=None,\n focus_half_width=None, valid_return_fract=None,\n ):\n \"\"\" Return a formatted dict of lines and constraints\n\n To be fed to _fit12d.multigausfit1d_from_dlines()\n Provides a user-friendly way of defining constraints\n \"\"\"\n\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.fit1d_dinput(\n dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,\n data=data, lamb=lamb,\n mask=mask, domain=domain, pos=pos, subset=subset,\n same_spectrum=same_spectrum,\n same_spectrum_dlamb=same_spectrum_dlamb,\n focus=focus, valid_fraction=valid_fraction,\n valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,\n valid_return_fract=valid_return_fract)\n\n def fit1d(\n self,\n # Input data kwdargs\n data=None, lamb=None,\n dinput=None, dprepare=None, dlines=None, dconstraints=None,\n mask=None, domain=None, subset=None, pos=None,\n same_spectrum=None, same_spectrum_dlamb=None,\n focus=None, valid_fraction=None, valid_nsigma=None,\n focus_half_width=None,\n # Optimization kwdargs\n dx0=None, dscales=None, x0_scale=None, bounds_scale=None,\n method=None, tr_solver=None, tr_options=None, max_nfev=None,\n xtol=None, ftol=None, gtol=None,\n loss=None, verbose=None, chain=None, jac=None, showonly=None,\n # Results extraction kwdargs\n amp=None, coefs=None, ratio=None,\n Ti=None, width=None, vi=None, shift=None,\n pts_lamb_total=None, pts_lamb_detail=None,\n # Saving and plotting kwdargs\n save=None, name=None, path=None,\n plot=None, fs=None, dmargin=None,\n tit=None, wintit=None, returnas=None,\n ):\n\n # ----------------------\n # Get dinput for 1d fitting from dlines, dconstraints, dprepare...\n if dinput is None:\n dinput = self.fit1d_dinput(\n dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,\n data=data, lamb=lamb,\n mask=mask, domain=domain, pos=pos, subset=subset,\n focus=focus, valid_fraction=valid_fraction,\n valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,\n same_spectrum=same_spectrum,\n same_spectrum_dlamb=same_spectrum_dlamb)\n\n # ----------------------\n # return\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.fit1d(\n # Input data kwdargs\n data=data, lamb=lamb,\n dinput=dinput, dprepare=dprepare,\n dlines=dlines, dconstraints=dconstraints,\n mask=mask, domain=domain, subset=subset, pos=pos,\n # Optimization kwdargs\n method=method, tr_solver=tr_solver, tr_options=tr_options,\n xtol=xtol, ftol=ftol, gtol=gtol,\n max_nfev=max_nfev, loss=loss, chain=chain,\n dx0=dx0, x0_scale=x0_scale, bounds_scale=bounds_scale,\n jac=jac, verbose=verbose,\n save=save, name=name, path=path,\n amp=amp, coefs=coefs, ratio=ratio,\n Ti=Ti, width=width, vi=vi, shift=shift,\n pts_lamb_total=pts_lamb_total,\n pts_lamb_detail=pts_lamb_detail,\n plot=plot, fs=fs, wintit=wintit, tit=tit)\n\n @staticmethod\n def fit1d_extract(\n dfit1d=None,\n amp=None, coefs=None, ratio=None,\n Ti=None, width=None,\n vi=None, shift=None,\n pts_lamb_total=None, pts_lamb_detail=None,\n ):\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.fit1d_extract(\n dfit1d=dfit,\n amp=amp, coefs=coefs, ratio=ratio,\n Ti=Ti, width=width,\n vi=vi, shift=shift,\n pts_lamb_total=pts_lamb_total, pts_lamb_detail=pts_lamb_detail)\n\n def fit1d_from2d(self):\n \"\"\" Useful for optimizing detector or crystal position\n\n Given a set of 2d images on a detector\n Transform the 2d (xi, xj) image into (lamb, phi)\n Slice nphi 1d spectra\n Fit them using a dict of reference lines (dlines)\n Optionally provide constraints for the fitting\n Return the vertical profiles of the wavelength shitf of each line\n To be used as input for an cost function and optimization\n\n 1d fitting is used instead of 2d because:\n - faster (for optimization)\n - does not require a choice of nbsplines\n - easier to understand and decide for user\n\n \"\"\"\n # Check / format inputs\n if lphi is None:\n msg = (\"Arg lphi must be provided !\")\n raise Exception(msg)\n\n # ----------------------\n # Prepare input data\n # (geometrical transform, domain, binning, subset, noise...)\n if dprepare is None:\n dprepare = self.fit2d_prepare(\n data=data, xi=xi, xj=xj, n=n,\n det=det, dtheta=dtheta, psi=psi,\n mask=mask, domain=domain,\n pos=pos, binning=binning,\n nbsplines=False, subset=False,\n lphi=lphi, lphi_tol=lphi_tol)\n\n # ----------------------\n # Get dinput for 2d fitting from dlines, and dconstraints\n if dinput is None:\n dinput = self.fit2d_dinput(\n dlines=dlines, dconstraints=dconstraints,\n deg=deg, knots=knots, nbsplines=nbsplines,\n domain=dprepare['domain'],\n dataphi1d=dprepare['dataphi1d'], phi1d=dprepare['phi1d'])\n\n # ----------------------\n # fit\n out = self.fit1d(\n xi=None, xj=None, data=None, mask=None,\n det=None, dtheta=None, psi=None, n=None,\n nlambfit=None, nphifit=None,\n lambmin=None, lambmax=None,\n dlines=None, spect1d=None,\n dconstraints=None, dx0=None,\n same_spectrum=None, dlamb=None,\n double=None,\n dscales=None, x0_scale=None, bounds_scale=None,\n method=None, max_nfev=None,\n xtol=None, ftol=None, gtol=None,\n loss=None, verbose=0, chain=None,\n jac=None, showonly=None,\n plot=None, fs=None, dmargin=None,\n tit=None, wintit=None, returnas=None,\n )\n pass\n\n def fit2d_dinput(\n self, dlines=None, dconstraints=None, dprepare=None,\n data=None, xi=None, xj=None, n=None,\n det=None, dtheta=None, psi=None,\n mask=None, domain=None, pos=None, binning=None, subset=None,\n # lphi=None, lphi_tol=None,\n deg=None, knots=None, nbsplines=None,\n focus=None, valid_fraction=None, valid_nsigma=None,\n focus_half_width=None, valid_return_fract=None,\n ):\n \"\"\" Return a formatted dict of lines and constraints\n\n To be fed to _fit12d.multigausfit1d_from_dlines()\n Provides a user-friendly way of defining constraints\n \"\"\"\n\n import tofu.spectro._fit12d as _fit12d\n if dprepare is None:\n # ----------------------\n # Geometrical transform\n xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj)\n nxi = xi.size if xi is not None else np.unique(xii).size\n nxj = xj.size if xj is not None else np.unique(xjj).size\n\n # Compute lamb / phi\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xii, xj=xjj, det=det,\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n\n # ----------------------\n # Prepare input data (domain, binning, subset, noise...)\n dprepare = _fit12d.multigausfit2d_from_dlines_prepare(\n data, lamb, phi,\n mask=mask, domain=domain,\n pos=pos, binning=binning,\n nbsplines=nbsplines, subset=subset,\n nxi=nxi, nxj=nxj,\n ) # , lphi=lphi, lphi_tol=lphi_tol)\n return _fit12d.fit2d_dinput(\n dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,\n deg=deg, knots=knots, nbsplines=nbsplines,\n focus=focus, valid_fraction=valid_fraction,\n valid_nsigma=valid_nsigma, focus_half_width=focus_half_width,\n valid_return_fract=valid_return_fract)\n\n def fit2d(\n self,\n # Input data kwdargs\n data=None, xi=None, xj=None,\n det=None, dtheta=None, psi=None, n=None,\n dinput=None, dprepare=None, dlines=None, dconstraints=None,\n mask=None, domain=None, subset=None, pos=None, binning=None,\n focus=None, valid_fraction=None, valid_nsigma=None,\n focus_half_width=None,\n deg=None, knots=None, nbsplines=None,\n # Optimization kwdargs\n dx0=None, dscales=None, x0_scale=None, bounds_scale=None,\n method=None, tr_solver=None, tr_options=None, max_nfev=None,\n xtol=None, ftol=None, gtol=None,\n loss=None, verbose=None, chain=None, jac=None, showonly=None,\n predeclare=None, debug=None,\n # Results extraction kwdargs\n amp=None, coefs=None, ratio=None,\n Ti=None, width=None, vi=None, shift=None,\n pts_lamb_total=None, pts_lamb_detail=None,\n # Saving and plotting kwdargs\n save=None, name=None, path=None,\n plot=None, fs=None, dmargin=None,\n tit=None, wintit=None, returnas=None,\n ):\n\n # npts=None, dax=None,\n # spect1d=None, nlambfit=None,\n # plotmode=None, angunits=None, indspect=None,\n # cmap=None, vmin=None, vmax=None):\n \"\"\" Perform 2d fitting of a 2d spectrometre image\n\n Fit the spectrum by a sum of gaussians\n Modulate each gaussian parameters by bsplines in the spatial direction\n\n data must be provided in shape (nt, nxi, nxj), where:\n - nt is the number of time steps\n - nxi is the nb. of pixels in the horizontal / spectral direction\n - nxj is the nb. of pixels in the vertical / spacial direction\n\n \"\"\"\n\n # ----------------------\n # Geometrical transform in dprepare\n if dinput is None:\n dinput = self.fit2d_dinput(\n dlines=dlines, dconstraints=dconstraints, dprepare=dprepare,\n data=data, xi=xi, xj=xj, n=n,\n det=det, dtheta=dtheta, psi=psi,\n mask=mask, domain=domain,\n pos=pos, binning=binning, subset=subset,\n deg=deg, knots=knots, nbsplines=nbsplines,\n focus=focus, valid_fraction=valid_fraction,\n valid_nsigma=valid_nsigma, focus_half_width=focus_half_width)\n\n # ----------------------\n # return\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.fit2d(\n dinput=dinput, dprepare=dprepare,\n dlines=dlines, dconstraints=dconstraints,\n lamb=lamb, phi=phi, data=data, mask=mask,\n nxi=dinput['dprepare']['nxi'], nxj=dinput['dprepare']['nxj'],\n domain=domain, pos=pos, binning=binning, subset=subset,\n deg=deg, knots=knots, nbsplines=nbsplines,\n method=method, tr_solver=tr_solver, tr_options=tr_options,\n xtol=xtol, ftol=ftol, gtol=gtol,\n max_nfev=max_nfev, loss=loss, chain=chain,\n dx0=dx0, x0_scale=x0_scale, bounds_scale=bounds_scale,\n jac=jac, verbose=verbose,\n save=save, name=name, path=path,\n plot=plot)\n\n @staticmethod\n def fit2d_extract(dfit2d=None,\n amp=None, Ti=None, vi=None,\n pts_phi=None, npts_phi=None,\n pts_lamb_phi_total=None,\n pts_lamb_phi_detail=None):\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.fit2d_extract_data(\n dfit2d=dfit2d,\n amp=amp, Ti=Ti, vi=vi,\n pts_phi=pts_phi, npts_phi=npts_phi,\n pts_lamb_phi_total=pts_lamb_phi_total,\n pts_lamb_phi_detail=pts_lamb_phi_detail)\n\n def fit2d_plot(self, dfit2d=None, ratio=None,\n dax=None, plotmode=None, angunits=None,\n cmap=None, vmin=None, vmax=None,\n dmargin=None, tit=None, wintit=None, fs=None):\n dout = self.fit2d_extract(\n dfit2d,\n amp=amp, Ti=Ti, vi=vi,\n pts_lamb_phi_total=pts_lamb_phi_total,\n pts_lamb_phi_detail=pts_lamb_phi_detail)\n return _plot_optics.CrystalBragg_plot_data_fit2d(\n dfit2d=dfit2d, dout=dout, ratio=ratio,\n dax=dax, plotmode=plotmode, angunits=angunits,\n cmap=cmap, vmin=vmin, vmax=vmax,\n dmargin=dmargin, tit=tit, wintit=wintit, fs=fs)\n\n def noise_analysis(\n self, data=None, xi=None, xj=None, n=None,\n det=None, dtheta=None, psi=None,\n mask=None, valid_fraction=None, nxerrbin=None,\n margin=None, domain=None, nlamb=None,\n deg=None, knots=None, nbsplines=None,\n loss=None, max_nfev=None,\n xtol=None, ftol=None, gtol=None,\n method=None, tr_solver=None, tr_options=None,\n verbose=None, plot=None,\n ms=None, dcolor=None,\n dax=None, fs=None, dmargin=None,\n wintit=None, tit=None, sublab=None,\n save_fig=None, name_fig=None, path_fig=None,\n fmt=None, return_dax=None,\n ):\n\n # ----------------------\n # Geometrical transform\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xi, xj=xj, det=det,\n dtheta=dtheta, psi=psi,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.noise_analysis_2d(\n data, lamb, phi,\n mask=mask, valid_fraction=valid_fraction,\n margin=margin, nxerrbin=nxerrbin,\n nlamb=nlamb, deg=deg, knots=knots, nbsplines=nbsplines,\n loss=loss, max_nfev=max_nfev,\n xtol=xtol, ftol=ftol, gtol=gtol,\n method=method, tr_solver=tr_solver, tr_options=tr_options,\n verbose=verbose, plot=plot,\n ms=ms, dcolor=dcolor,\n dax=dax, fs=fs, dmargin=dmargin,\n wintit=wintit, tit=tit, sublab=sublab,\n save_fig=save_fig, name_fig=name_fig, path_fig=path_fig,\n fmt=fmt, return_dax=return_dax)\n\n @staticmethod\n def noise_analysis_plot(\n dnoise=None, margin=None, valid_fraction=None,\n ms=None, dcolor=None,\n dax=None, fs=None, dmargin=None,\n wintit=None, tit=None, sublab=None,\n save=None, name=None, path=None, fmt=None,\n ):\n import tofu.spectro._plot as _plot_spectro\n return _plot_spectro.plot_noise_analysis(\n dnoise=dnoise, margin=margin, valid_fraction=valid_fraction,\n ms=ms, dcolor=dcolor,\n dax=dax, fs=fs, dmargin=dmargin,\n wintit=wintit, tit=tit, sublab=sublab,\n save=save, name=name, path=path, fmt=fmt)\n\n def noise_analysis_scannbs(\n self, data=None, xi=None, xj=None, n=None,\n det=None, dtheta=None, psi=None,\n mask=None, nxerrbin=None,\n domain=None, nlamb=None,\n deg=None, knots=None, nbsplines=None, lnbsplines=None,\n loss=None, max_nfev=None,\n xtol=None, ftol=None, gtol=None,\n method=None, tr_solver=None, tr_options=None,\n verbose=None, plot=None,\n ms=None, dax=None, fs=None, dmargin=None,\n wintit=None, tit=None, sublab=None,\n save_fig=None, name_fig=None, path_fig=None,\n fmt=None, return_dax=None,\n ):\n\n # ----------------------\n # Geometrical transform\n bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi(\n xi=xi, xj=xj, det=det,\n dtheta=0, psi=0,\n use_non_parallelism=use_non_parallelism,\n n=n,\n grid=True,\n return_lamb=True,\n )\n\n import tofu.spectro._fit12d as _fit12d\n return _fit12d.noise_analysis_2d_scannbs(\n data, lamb, phi,\n mask=mask, nxerrbin=nxerrbin, nlamb=nlamb,\n deg=deg, knots=knots, nbsplines=nbsplines, lnbsplines=lnbsplines,\n loss=loss, max_nfev=max_nfev,\n xtol=xtol, ftol=ftol, gtol=gtol,\n method=method, tr_solver=tr_solver, tr_options=tr_options,\n verbose=verbose, plot=plot,\n ms=ms, dax=dax, fs=fs, dmargin=dmargin,\n wintit=wintit, tit=tit, sublab=sublab,\n save_fig=save_fig, name_fig=name_fig, path_fig=path_fig,\n fmt=fmt, return_dax=return_dax)\n\n @staticmethod\n def noise_analysis_scannbs_plot(\n dnoise_scan=None, ms=None,\n dax=None, fs=None, dmargin=None,\n wintit=None, tit=None, sublab=None,\n save=None, name=None, path=None, fmt=None,\n ):\n import tofu.spectro._plot as _plot_spectro\n return _plot_spectro.plot_noise_analysis_scannbs(\n dnoise=dnoise_scan, ms=ms,\n dax=dax, fs=fs, dmargin=dmargin,\n wintit=wintit, tit=tit, sublab=sublab,\n save=save, name=name, path=path, fmt=fmt)\n" ]
[ [ "numpy.sum", "scipy.interpolate.interp1d", "numpy.any", "numpy.cross", "numpy.argsort", "scipy.interpolate.interp2d", "matplotlib.pyplot.plot", "numpy.meshgrid", "numpy.isfinite", "numpy.vstack", "numpy.allclose", "matplotlib.colors.to_rgba", "numpy.nanmean", "matplotlib.pyplot.figure", "numpy.abs", "numpy.arccos", "numpy.cos", "numpy.isnan", "numpy.linspace", "numpy.round", "numpy.unique", "numpy.mean", "numpy.tile", "numpy.zeros", "numpy.hypot", "numpy.repeat", "numpy.arange", "numpy.nanargmin", "numpy.max", "numpy.min", "numpy.array", "numpy.linalg.norm", "numpy.arctan2", "numpy.nanmax", "numpy.atleast_1d", "numpy.nanmin", "numpy.sqrt", "numpy.sin", "numpy.concatenate", "numpy.full" ] ]
srihari-nagaraj/anuvaad
[ "b09b01a033a033e97db6e404c088e0e6332053e4" ]
[ "anuvaad-etl/anuvaad-extractor/document-processor/evaluator/evaluator_string/src/notebooks/tesseract_ocr_evaluation_local.py" ]
[ "import glob\nimport uuid\nimport json\nimport requests\nimport copy,time\nimport os\nimport cv2\nimport numpy as np\nfrom time import sleep\nimport pandas as pd\nimport logging\nfrom collections import Counter\nimport pytesseract\nfrom pytesseract import Output\n#from pytesseract import pytesseract\nfrom difflib import SequenceMatcher\nfrom io import StringIO\nfrom dynamic_adjustment import coord_adjustment\nimport ast\nfrom leven import levenshtein\nfrom horizontal_merging import horzontal_merging\n\nocr_level = \"LINE\"\ntext_processing = True\nREJECT_FILTER = 2\n#crop_factor= 5\n#crop_factor_y= 4\ncrop_factor= 5\ncrop_factor_y= 0\ncrop_save = True\ndigitization = True\nvis_thresh=0.90\nLANG_MAPPING = {\n \"en\" : [\"Latin\",\"eng\"],\n \"kn\" : ['Kannada',\"kan\"],\n \"gu\": [\"guj\"],\n \"or\": [\"ori\"],\n \"hi\" : [\"Devanagari\",\"hin\",\"eng\"],\n \"bn\" : [\"Bengali\",\"ben\"],\n \"mr\": [\"Devanagari\",\"hin\",\"eng\"],\n \"ta\": ['Tamil',\"tam\"],\n \"te\" : [\"Telugu\",\"tel\"],\n \"ml\" :[\"Malayalam\"],\n \"ma\" :[\"Marathi\"]\n}\n\n\n#path = '/home/ubuntu/tesseract_evaluation/data/'\n#output_path = '/home/ubuntu/tesseract_evaluation/result/'\n#output_path_boxes= '/home/ubuntu/tesseract_evaluation/test_word_boxes/'\n#base_path = '/home/ubuntu/tesseract_evaluation/test_word_boxes/'\npath = '/home/naresh/Tarento/testing_document_processor/test_pipeline/data/'\noutput_path = '/home/naresh/Tarento/testing_document_processor/test_pipeline/result/'\noutput_path_boxes= '/home/naresh/Tarento/testing_document_processor/test_word_boxes/'\nbase_path= '/home/naresh/Tarento/testing_document_processor/test_word_boxes/'\n\n\npsms = [6,7,8,9,10,11]\ntoken = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyTmFtZSI6ImRoaXJhai5kYWdhQHRhcmVudG8uY29tIiwicGFzc3dvcmQiOiJiJyQyYiQxMiRuTXdNcHpCVlBXVVUvSlVLWXBKYWkuQUd2SUNJalJVcUdIbnBPenRzai5VRU55emlSZmk1TyciLCJleHAiOjE2MTk3Njg2NjN9.14IL5_kw83F5gxjUMSw6kCDLYQhjAg306AwJj0DsxWc'\n\n\nword_url = \"https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate\"\ngoogle_url = \"https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate\"\nlayout_url = \"https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate\"\nsegmenter_url = \"https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/async/initiate\"\nbs_url =\"https://auth.anuvaad.org/anuvaad-etl/wf-manager/v1/workflow/jobs/search/bulk\"\n\nevaluator_url = \"https://auth.anuvaad.org/anuvaad-etl/document-processor/evaluator/v0/process\"\n\n#evaluator_url = 'http://0.0.0.0:5001/anuvaad-etl/document-processor/evaluator/v0/process'\n\ndownload_url =\"https://auth.anuvaad.org/download/\"\nupload_url = 'https://auth.anuvaad.org/anuvaad-api/file-uploader/v0/upload-file'\n\n\nheaders = {\n 'auth-token' :token }\n\n\n\n\n\nclass Draw:\n \n def __init__(self,input_json,save_dir,regions,prefix='',color= (255,0,0),thickness=5): \n self.json = input_json\n self.save_dir = save_dir\n self.regions = regions\n self.prefix = prefix\n self.color = color\n self.thickness=thickness\n if self.prefix == 'seg':\n #print('drawing children')\n self.draw_region_children()\n else:\n self.draw_region__sub_children()\n \n def get_coords(self,page_index):\n return self.json['outputs'][0]['pages'][page_index][self.regions]\n \n def get_page_count(self):\n return(self.json['outputs'][0]['page_info'])\n \n def get_page(self,page_index):\n page_path = self.json['outputs'][0]['page_info'][page_index]\n page_path = page_path.split('upload')[1]#'/'.join(page_path.split('/')[1:])\n #print(page_path) \n return download_file(download_url,headers,page_path,f_type='image')\n\n def draw_region(self):\n font = cv2.FONT_HERSHEY_SIMPLEX \n for page_index in range(len(self.get_page_count())) :\n nparr = np.frombuffer(self.get_page(page_index), np.uint8)\n image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n for region in self.get_coords(page_index) :\n ground = region['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x']) ,int(pt['y'])])\n cv2.polylines(image, [np.array(pts)],True, self.color, self.thickness)\n if 'class' not in region.keys():\n region['class'] = 'TEXT'\n cv2.putText(image, str(region['class']), (pts[0][0],pts[0][1]), font, \n 2, (0,125,255), 3, cv2.LINE_AA)\n \n image_path = os.path.join(self.save_dir , '{}_{}_{}.png'.format(self.regions,self.prefix,page_index)) \n cv2.imwrite(image_path , image)\n \n def draw_region_children(self):\n font = cv2.FONT_HERSHEY_SIMPLEX \n fontScale = 2\n thickness =3\n\n\n for page_index in range(len(self.get_page_count())) :\n nparr = np.frombuffer(self.get_page(page_index), np.uint8)\n image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n for region_index,region in enumerate(self.get_coords(page_index)) :\n try:\n ground = region['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x']) ,int(pt['y'])])\n #print(pts)\n region_color = (0 ,0,125+ 130*(region_index/ len(self.get_coords(page_index))))\n cv2.polylines(image, [np.array(pts)],True, region_color, self.thickness)\n cv2.putText(image, str(region_index), (pts[0][0],pts[0][1]), font, \n fontScale, region_color, thickness, cv2.LINE_AA)\n for line_index, line in enumerate(region['children']):\n ground = line['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x']) ,int(pt['y'])])\n\n line_color = (125 + 130*(region_index/ len(self.get_coords(page_index))) ,0,0)\n cv2.polylines(image, [np.array(pts)],True, line_color, self.thickness -2)\n cv2.putText(image, str(line_index), (pts[0][0],pts[0][1]), font, \n fontScale, line_color, thickness, cv2.LINE_AA)\n except Exception as e:\n print(str(e))\n print(region)\n \n image_path = os.path.join(self.save_dir , '{}_{}.png'.format(self.prefix,page_index))\n cv2.imwrite(image_path , image)\n def draw_region__sub_children(self): \n for page_index in range(len(self.get_page_count())) :\n nparr = np.frombuffer(self.get_page(page_index), np.uint8)\n image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n \n font = cv2.FONT_HERSHEY_SIMPLEX \n fontScale = 2\n\n # Blue color in BGR \n color = (0 ,255,0) \n\n # Line thickness of 2 px \n thickness = 3\n\n # Using cv2.putText() method \n \n for region_index,region in enumerate(self.get_coords(page_index)) :\n try:\n ground = region['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x']) ,int(pt['y'])])\n #print(pts)\n region_color = (0,0,255)\n cv2.polylines(image, [np.array(pts)],True, region_color, self.thickness)\n for line_index, line in enumerate(region['regions']):\n ground = line['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x'])-1 ,int(pt['y']) -1 ])\n\n line_color = (255,0,0)\n cv2.polylines(image, [np.array(pts)],True, line_color, self.thickness -2)\n \n cv2.putText(image, str(line_index), (pts[0][0],pts[0][1]), font, \n fontScale, (255,0,0), thickness, cv2.LINE_AA)\n for word_index, word in enumerate(line['regions']):\n ground = word['boundingBox']['vertices']\n pts = []\n for pt in ground:\n pts.append([int(pt['x']) -3,int(pt['y'])-3])\n\n word_color = (0,255,0)\n cv2.polylines(image, [np.array(pts)],True, word_color, self.thickness -2)\n\n cv2.putText(image, str(word_index), (pts[0][0],pts[0][1]), font, \n fontScale-1,(0,255,0), thickness, cv2.LINE_AA)\n except Exception as e:\n print(str(e))\n print(region)\n \n \n \n #print(self.prefix)\n image_path = os.path.join(self.save_dir , '{}_{}_{}.png'.format(self.prefix,self.regions,page_index))\n cv2.imwrite(image_path , image)\n\n\n\n\n\n# # google vision pipeline\n\n\ndef google_ocr_v15(url,headers,pdf_name):\n \n file = {\n \"files\": [\n {\n \"locale\": \"hi\",\n \"path\": pdf_name,\n \"type\": \"pdf\",\n \"config\":{\n \"OCR\": {\n \"option\": \"HIGH_ACCURACY\",\n \"language\": \"hi\",\n \"top_correction\":\"True\",\n \"craft_word\": \"True\",\n \"craft_line\": \"True\",\n }\n }}\n ],\n \"workflowCode\": \"WF_A_FCWDLDBSOD15GV\"\n }\n res = requests.post(url,json=file,headers=headers)\n return res.json()\n\n\n\n\n\ndef upload_file(pdf_file,headers,url):\n #url = 'https://auth.anuvaad.org/anuvaad-api/file-uploader/v0/upload-file'\n files = [\n ('file',(open(pdf_file,'rb')))] \n\n response = requests.post(url, headers=headers, files=files)\n \n return response.json()\n\n\n\n\n\ndef download_file(download_url,headers,outputfile,f_type='json'):\n download_url =download_url+str(outputfile)\n res = requests.get(download_url,headers=headers)\n if f_type == 'json':\n return res.json()\n else :\n return res.content\n\n\n\n\n\ndef save_json(path,res):\n with open(path, \"w\", encoding='utf8') as write_file:\n json.dump(res, write_file,ensure_ascii=False )\n\n\n\n\n\ndef bulk_search(job_id,bs_url,headers):\n bs_request = {\n \"jobIDs\": [job_id],\n \"taskDetails\":\"true\"\n }\n print(job_id)\n res = requests.post(bs_url,json=bs_request,headers=headers, timeout = 10000)\n print(res.json())\n \n \n while(1):\n \n in_progress = res.json()['jobs'][0]['status']\n \n if in_progress == 'COMPLETED':\n outputfile = res.json()['jobs'][0]['output'][0]['outputFile']\n print(in_progress)\n return outputfile\n break\n sleep(0.5)\n print(in_progress)\n res = requests.post(bs_url,json=bs_request,headers=headers, timeout = 10000)\n \n \n\n\n\n\n\ndef execute_module(module,url,input_file,module_code,pdf_dir,overwirte=True , draw=True):\n \n \n \n output_path = os.path.join(pdf_dir,'{}.json'.format(module_code))\n if os.path.exists(output_path) and not overwirte:\n print(' loading *****************{}'.format(module_code ))\n with open(output_path,'r') as wd_file :\n response = json.load(wd_file)\n \n wf_res = pdf_dir + '/{}_wf.json'.format(module_code)\n with open(wf_res,'r') as wd_file :\n json_file = json.load(wd_file) \n #json_file = upload_file(output_path,headers,upload_url)['data']\n else :\n if module_code in ['wd','gv']:\n res = upload_file(input_file,headers,upload_url)\n print('upload response **********', res)\n pdf_name = res['data']\n response = module(url,headers,pdf_name)\n \n else : \n response = module(url,headers,input_file)\n \n if 'eval' in module_code :\n json_file = response['outputFile']\n response = download_file(download_url,headers,json_file)\n save_json(output_path,response)\n return json_file,response\n \n \n print(' response *****************{} {}'.format(module_code ,response ))\n job_id = response['jobID']\n json_file = bulk_search(job_id,bs_url,headers)\n save_json(pdf_dir + '/{}_wf.json'.format(module_code),json_file) \n print('bulk search response **************',json_file )\n response = download_file(download_url,headers,json_file)\n save_json(output_path,response)\n if draw :\n if module_code in ['wd','gv']:\n Draw(response,pdf_dir,regions='lines',prefix=module_code)\n else :\n Draw(response,pdf_dir,regions='regions',prefix=module_code)\n \n return json_file,response\n \n\n\n\ndef evaluate__and_save_input(pdf_files,output_dir,headers,word_url,layout_url,download_url,upload_url,bs_url):\n word_responses = {}\n layout_responses = {}\n segmenter_responses = []\n for pdf in pdf_files:\n #try :\n pdf_name = pdf.split('/')[-1].split('.')[0]\n print(pdf , ' is being processed')\n pdf_output_dir = os.path.join(output_dir,pdf_name)\n os.system('mkdir -p \"{}\"'.format(pdf_output_dir))\n\n\n wd_json,_ = execute_module(google_ocr_v15,word_url,input_file=pdf,module_code='gv',pdf_dir=pdf_output_dir,overwirte=False , draw=False)\n\n\n\n\ndef main(path,headers,word_url,layout_url,download_url,upload_url,bs_url):\n pdf_names = glob.glob(path + '/*.pdf')\n \n \n return evaluate__and_save_input(pdf_names,output_path,headers,word_url,layout_url,download_url,upload_url,bs_url)\n \n\nif digitization:\n main(path,headers,word_url,layout_url,download_url,upload_url,bs_url)\n\n\ndef bound_coordinate(corrdinate,max):\n if corrdinate < 0 :\n corrdinate = 0\n if corrdinate > max:\n corrdinate = max - 2\n return int(corrdinate)\ndef get_image_from_box(image, box, height=140):\n #box = data['box']\n #scale = np.sqrt((box[1, 1] - box[2, 1])**2 + (box[0, 1] - box[3, 1])**2) / height\n #print(\"scale is \",scale)\n #w = int(np.sqrt((box[0, 0] - box[1, 0])**2 + (box[2, 0] - box[3, 0])**2) / scale)\n w = max(abs(box[0, 0] - box[1, 0]),abs(box[2, 0] - box[3, 0]))\n height = max(abs(box[0, 1] - box[3, 1]),abs(box[1, 1] - box[2, 1]))\n pts1 = np.float32(box)\n #w=2266-376\n pts2 = np.float32([[0, 0], [int(w), 0],[int(w),int(height)],[0,int(height)]])\n M = cv2.getPerspectiveTransform(pts1, pts2)\n result_img = cv2.warpPerspective(image,M,(int(w), int(height))) #flags=cv2.INTER_NEAREST\n return result_img\n\ndef process_dfs(temp_df):\n\ttemp_df = temp_df[temp_df.text.notnull()]\n\ttext = \"\"\n\tconf=0\n\ttemp_dict1 = []\n\tfor index, row in temp_df.iterrows():\n\t\ttemp_dict2 = {}\n\t\tconf = conf + row[\"conf\"]\n\t\ttemp_dict2[\"text\"]=row['text']\n\t\ttemp_dict2[\"conf\"]=row['conf']\n\t\ttext = text +\" \"+ str(row['text'])\n\t\ttemp_dict1.append(temp_dict2)\n\treturn text,temp_dict1\ndef process_dfs_updated(temp_df,language,psm_val,image):\n\ttemp_df = temp_df[temp_df.text.notnull()]\n\ttext = \"\"\n\tconf=0\n\ttemp_dict1 = []\n\tif len(temp_df)>0:\n\t\tfor index, row in temp_df.iterrows():\n\t\t\ttemp_dict2 = {}\n\t\t\torg_conf = row[\"conf\"]\n\t\t\torg_text = row['text']\n\t\t\tflag = True\n\t\t\tif row[\"conf\"]<50:\n\t\t\t\tprint(row[\"top\"],row[\"height\"],row[\"left\"],row[\"width\"])\n\t\t\t\tcrop_image = image[ int(row[\"top\"]):int(row[\"top\"]+row[\"height\"]), int(row[\"left\"]):int(row[\"left\"]+row[\"width\"])]\n\t\t\t\tfor psm in psms:\n\t\t\t\t\t\n\t\t\t\t\tdf2 = pytesseract.image_to_data(crop_image,config='--psm '+str(psm), lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)\n\t\t\t\t\ttemp_df2 = df2[df2.text.notnull()]\n\t\t\t\t\tif len(temp_df2)>0:\n\t\t\t\t\t\tnew_conf = temp_df2.iloc[0].conf\n\t\t\t\t\t\tif org_conf<new_conf:\n\t\t\t\t\t\t\torg_conf = new_conf\n\t\t\t\t\t\t\torg_text = temp_df2.iloc[0].text\n\t\t\t\t\t\n\t\t\tif flag:\n\t\t\t\tprint(\"old text\", row['text'])\n\t\t\t\tprint(\"new text\", org_text)\t\t\n\t\t\tconf = conf + org_conf\n\t\t\ttemp_dict2[\"text\"]=org_text\n\t\t\ttemp_dict2[\"conf\"]=org_conf\n\t\t\ttext = text +\" \"+ str(org_text)\n\t\t\ttemp_dict1.append(temp_dict2)\n\treturn text,temp_dict1\n \ndef check_psm(path,coord,language,mode_height,save_base_path,psm_val,org_score,org_text,line_text,org_conf):\n\tfor psm in psms:\n\t\ttext,conf_dict = get_text(path,coord,language,mode_height,save_base_path,psm)\n\t\tif text_processing:\n\t\t\ttext_list = text.split()\n\t\t\ttext = \" \".join(text_list)\n\t\t\tscore,message,match_count = seq_matcher(text,line_text)\n\t\t\tif score==1.0 or score==1:\n\t\t\t\torg_score = score\n\t\t\t\torg_text = text\n\t\t\t\torg_conf = conf_dict\n\t\t\t\tbreak\n\t\t\telif score>org_score:\n\t\t\t\torg_score =score\n\t\t\t\torg_text = text\n\t\t\t\torg_conf = conf_dict\n\t\t\t\t\n\treturn org_text, org_conf,org_score\n\t\t\n \n \n\t\t\n\t\ndef get_text(path,coord,language,mode_height,save_base_path,psm_val):\n #try:\n\n\tpath = path.split('upload')[1]\n\n\timage = download_file(download_url,headers,path,f_type='image')\n\tnparr = np.frombuffer(image, np.uint8)\n\timage = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\t#image = cv2.imread(\"/home/naresh/crop.jpeg\",0)\n\theight, width,channel = image.shape\n\n\t# left = bound_coordinate(coord[0] , width)\n\t# top = bound_coordinate(coord[1],height )\n\t# right = bound_coordinate(coord[2] ,width)\n\t# bottom = bound_coordinate(coord[3], height)\n\t# region_width = abs(right-left)\n\t# region_height = abs(bottom-top)\n\n\t# if left==right==top==bottom==0 or region_width==0 or region_height==0:\n\t# return \"\"\t\n\n\tcrop_image = get_image_from_box(image, coord, height=abs(coord[0,1]-coord[2,1]))\n\t#crop_image = image[ top:bottom, left:right]\n\t#crop_image_cv = image[ coord[0,1]:coord[2,1], coord[0,0]:coord[1,0]]\n\tsave_path = save_base_path+\"/\"+\"_psm_pers\"+str(psm_val)+\"--\"+str(uuid.uuid4()) + '.jpg'\n\n\tif crop_save:\n\t cv2.imwrite(save_path,crop_image)\n\n\t#if abs(bottom-top) > 3*mode_height:\n\t#print(LANG_MAPPING[language][0])\n\tif abs(coord[1,1]-coord[2,1])>mode_height:\n\t #text = pytesseract.image_to_string(crop_image,config='--psm 6', lang=LANG_MAPPING[language][1])\n\t dfs = pytesseract.image_to_data(crop_image,config='--psm 6', lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)\n\t #text,conf_dict = process_dfs(dfs)\n\t text,conf_dict = process_dfs_updated(dfs,language,6,crop_image)\n\t \n\telse:\n\t #text = pytesseract.image_to_string(crop_image,config='--psm '+str(psm_val), lang=LANG_MAPPING[language][1])\n\t dfs = pytesseract.image_to_data(crop_image,config='--psm '+str(psm_val), lang=LANG_MAPPING[language][0],output_type=Output.DATAFRAME)\n\t #text,conf_dict = process_dfs(dfs)\n\t text,conf_dict = process_dfs_updated(dfs,language,psm_val,crop_image)\n\treturn text,conf_dict\n #except:\n\n #print(\"xxxxxxxxxxxxxxxxxxxxxxxxxx\",coord)\n #print([0.0])\n #return \"\",[0.0]\n\n\ndef merger_text(line):\n text = \"\"\n word_count=0\n for word_idx, word in enumerate(line['regions']):\n if \"text\" in word.keys() and word[\"text\"].replace(\" \", \"\") != \"\":\n text = text+\" \"+ word[\"text\"]\n word_count=word_count+1\n return text, word_count\n\n\n\ndef get_coord(bbox):\n temp_box = []\n temp_box_cv = []\n temp_box.append([bbox[\"boundingBox\"]['vertices'][0]['x'],bbox[\"boundingBox\"]['vertices'][0]['y']])\n temp_box.append([bbox[\"boundingBox\"]['vertices'][1]['x'],bbox[\"boundingBox\"]['vertices'][1]['y']])\n temp_box.append([bbox[\"boundingBox\"]['vertices'][2]['x'],bbox[\"boundingBox\"]['vertices'][2]['y']])\n temp_box.append([bbox[\"boundingBox\"]['vertices'][3]['x'],bbox[\"boundingBox\"]['vertices'][3]['y']])\n \n temp_box_cv.append(bbox[\"boundingBox\"]['vertices'][0]['x'])\n temp_box_cv.append(bbox[\"boundingBox\"]['vertices'][0]['y'])\n temp_box_cv.append(bbox[\"boundingBox\"]['vertices'][2]['x'])\n temp_box_cv.append(bbox[\"boundingBox\"]['vertices'][2]['y'])\n temp_box = np.array(temp_box)\n return temp_box,temp_box_cv\ndef frequent_height(page_info):\n text_height = []\n if len(page_info) > 0 :\n for idx, level in enumerate(page_info):\n coord_crop,coord = get_coord(level)\n if len(coord)!=0:\n text_height.append(abs(coord[3]-coord[1]))\n occurence_count = Counter(text_height)\n return occurence_count.most_common(1)[0][0]\n else :\n return 0\ndef remove_space(a):\n return a.replace(\" \", \"\")\n\ndef seq_matcher(tgt_text,gt_text):\n tgt_text = remove_space(tgt_text)\n gt_text = remove_space(gt_text)\n score = SequenceMatcher(None, gt_text, tgt_text).ratio()\n mismatch_count = levenshtein(tgt_text, gt_text)\n match_count = abs(len(gt_text)-mismatch_count)\n score = match_count/len(gt_text)\n \n\n# matchs = list(SequenceMatcher(None, gt_text, tgt_text).get_matching_blocks())\n# match_count=0\n## match_lis = []\n# for match in matchs:\n# match_count = match_count + match.size\n \n message = {\"ground\":True,\"input\":True}\n if score==0.0:\n if len(gt_text)>0 and len(tgt_text)==0:\n message['input'] = \"text missing in tesseract\"\n if len(gt_text)==0 and len(tgt_text)>0:\n message['ground'] = \"text missing in google vision\"\n if score==1.0 and len(gt_text)==0 and len(tgt_text)==0:\n message['ground'] = \"text missing in google vision\"\n message['input'] = \"text missing in tesseract\"\n return score,message,match_count\n\ndef count_mismatch_char(gt ,tgt) :\n count=0\n gt_count = len(gt)\n for i,j in zip(gt,tgt):\n if i==j:\n count=count+1\n mismatch_char = abs(gt_count-count)\n return mismatch_char\ndef correct_region(region):\n box = region['boundingBox']['vertices']\n tmp=0\n \n region['boundingBox']= {'vertices' : [{'x':box[0]['x']-crop_factor,'y':box[0]['y']-crop_factor_y},\\\n {'x':box[1]['x']+crop_factor+tmp,'y':box[1]['y']-crop_factor_y},\\\n {'x':box[2]['x']+crop_factor+tmp,'y':box[2]['y']+crop_factor_y},\\\n {'x':box[3]['x']-crop_factor,'y': box[3]['y']+crop_factor_y}]}\n return region\n \n\n\ndef sort_line(line):\n line['regions'].sort(key=lambda x: x['boundingBox']['vertices'][0]['x'],reverse=False)\n return line\n\n\ndef cell_ocr_word(lang, page_path, line,save_base_path,mode_height):\n cell_text =\"\"\n conf_dicts=[]\n #updated_lines = horzontal_merging(line['regions'])\n dynamic_line = coord_adjustment(page_path,line['regions'] ,save_base_path)\n for word_idx, word in enumerate(dynamic_line):\n word = correct_region(word)\n coord_crop, coord = get_coord(word)\n if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :\n text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8) \n cell_text = cell_text +\" \" +text\n conf_dicts.extend(conf_dict)\n return cell_text,conf_dicts\n\ndef cell_text_ocr(lang, page_path, line,save_base_path,mode_height):\n cell_text =\"\"\n cell_regions = []\n #updated_lines = horzontal_merging(line['regions'])\n for word_idx, word in enumerate(line['regions']):\n word = correct_region(word)\n coord_crop, coord = get_coord(word)\n if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :\n text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8) \n cell_text = cell_text +\" \" +text\n return cell_text\n\ndef cell_ocr(lang, page_path, line,save_base_path,mode_height,psm):\n text =\"\"\n cell_google_text = \"\"\n conf_dicts = []\n updated_lines = horzontal_merging(line['regions'])\n dynamic_line = coord_adjustment(page_path,updated_lines ,save_base_path)\n \n for updated_line in dynamic_line:\n line_text = updated_line['text']\n cell_google_text= cell_google_text + \" \"+line_text\n corrected_line = correct_region(updated_line)\n coord_crop, coord = get_coord(corrected_line)\n if len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :\n tess_text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,psm) \n text = text + \" \" + tess_text\n conf_dicts.extend(conf_dict)\n \n return cell_google_text,text,conf_dicts\n\ndef text_extraction(df,lang, page_path, regions,save_base_path):\n final_score = 0\n total_words = 0\n total_lines = 0\n total_chars = 0\n total_match_chars = 0\n for idx, level in enumerate(regions):\n mode_height = frequent_height(level['regions'])\n\n if ocr_level==\"WORD\":\n for line_idx, line in enumerate(level['regions']):\n #word_regions = coord_adjustment(page_path, line['regions'],save_base_path)\n for word_idx, word in enumerate(line['regions']):\n word = correct_region(word)\n coord_crop, coord = get_coord(word)\n word_text = word['text']\n if len(word_text)>0 and len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :\n text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,8)\n if text_processing:\n text_list = text.split()\n text = \" \".join(text_list)\n score,message,match_count = seq_matcher(text,word['text'])\n final_score = final_score+score\n total_words = total_words+1\n total_chars = total_chars+len(remove_space(word['text']))\n total_match_chars= total_match_chars+match_count\n word['char_match'] = match_count\n word['tess_text'] = text\n word['conf_dict'] = conf_dict\n word['score'] = score\n word['message'] = message\n columns = word.keys()\n df2 = pd.DataFrame([word],columns=columns)\n df = df.append(df2, ignore_index=True)\n elif len(word_text)>0:\n score,message,match_count = seq_matcher(\"\",word['text'])\n word['char_match'] = match_count\n word['tess_text'] = \" \"\n word['conf_dict'] = None\n word['score'] = score\n word['message'] = message\n columns = word.keys()\n df2 = pd.DataFrame([word],columns=columns)\n df = df.append(df2, ignore_index=True)\n if ocr_level==\"LINE\":\n lines_adjusted = coord_adjustment(page_path, level['regions'],save_base_path)\n for line_idx, line_org in enumerate(lines_adjusted):\n line_sorted = copy.deepcopy(sort_line(line_org))\n line_text,total_word = merger_text(line_sorted)\n line = copy.deepcopy(correct_region(line_sorted))\n psm = 7\n \n if total_word<2:\n #print(line_text)\n psm=8\n coord_crop, coord = get_coord(line)\n\n print(\"line text\",line_text)\n if len(remove_space(line_text))>0 and len(coord)!=0 and abs(coord_crop[1,1]-coord_crop[2,1]) > REJECT_FILTER :\n if 'class' in line.keys() and line['class']==\"CELL\":\n line_text,text,conf_dict = cell_ocr(lang, page_path, line,save_base_path,mode_height,psm)\n elif 'class' in line.keys() and line['class']==\"CELL_TEXT\":\n text,conf_dict = cell_ocr_word(lang, page_path, line,save_base_path,mode_height)\n else:\n \n text,conf_dict = get_text(page_path, coord_crop, lang,mode_height,save_base_path,psm)\n \n if text_processing:\n text_list = text.split()\n text = \" \".join(text_list)\n score,message,match_count = seq_matcher(text,line_text)\n #if score < 1.0:\n #text, conf_dict,score = check_psm(page_path,coord_crop,lang,mode_height,save_base_path,psm,score,text,line_text,conf_dict)\n final_score = final_score+score\n total_lines = total_lines+1\n total_chars = total_chars+len(remove_space(line_text))\n total_match_chars= total_match_chars+match_count\n line['char_match'] = match_count\n line['tess_text'] = text\n line['text'] = line_text \n line['conf_dict'] = conf_dict\n line['score'] = score\n line['message'] = message\n columns = line.keys()\n df2 = pd.DataFrame([line],columns=columns)\n df = df.append(df2, ignore_index=True)\n elif len(remove_space(line_text))>0:\n score,message,match_count = seq_matcher(\"\",line_text)\n line['char_match'] = match_count\n line['tess_text'] = \" \"\n line['conf_dict'] = None\n line['text'] = line_text\n line['score'] = score\n line['message'] = message\n columns = line.keys()\n df2 = pd.DataFrame([line],columns=columns)\n df = df.append(df2, ignore_index=True)\n\n #return regions,final_score/total_words,df,total_chars,total_match_chars\n return regions,final_score/total_lines,df,total_chars,total_match_chars\n\n\njson_files_path = glob.glob(output_path+\"/*/gv.json\")\n\n\ndef tesseract(json_files):\n \n output = []\n dfs =[]\n for json_file in json_files:\n file_name = json_file.split('/')[-1].split('.json')[0]\n pdf_name = json_file.split('/')[-2]\n print(\"file name--------------------->>>>>>>>>>>>>>>>>>\",pdf_name)\n if not os.path.exists(base_path+pdf_name):\n os.mkdir(base_path+pdf_name)\n save_base_path = base_path+pdf_name\n with open(json_file,'r+') as f:\n data = json.load(f)\n columns = [\"page_path\",\"page_data\",\"file_eval_info\"]\n final_df = pd.DataFrame(columns=columns)\n Draw(data,save_base_path,regions='regions')\n lang = data['outputs'][0]['config']['OCR']['language']\n total_page = len(data['outputs'][0]['pages'])\n file_score = 0; total_chars_file = 0\n file_data = []; total_match_chars_file = 0\n page_paths = []\n page_data_counts = []\n for idx,page_data in enumerate(data['outputs'][0]['pages']):\n t1 = time.time()\n print(\"processing started for page no. \",idx)\n page_path = page_data['path']\n regions = page_data['regions'][1:]\n df = pd.DataFrame()\n regions,score,df,total_chars,total_match_chars = text_extraction(df,lang, page_path, regions,save_base_path)\n file_score = file_score + score\n total_chars_file =total_chars_file +total_chars\n total_match_chars_file = total_match_chars_file+total_match_chars\n file_data.append(df.to_csv())\n page_paths.append(page_path)\n char_details = {\"total_chars\":total_chars,\"total_match_chars\":total_match_chars}\n page_data_counts.append(char_details)\n data['outputs'][0]['pages'][idx][\"regions\"][1:] = copy.deepcopy(regions)\n t2 = t1+time.time()\n print(\"processing completed for page in {}\".format(t2))\n file_eval_info = {\"total_chars\":total_chars_file,\"total_match_chars\":total_match_chars_file,\"score\":total_match_chars_file/total_chars_file}\n\n print(file_eval_info)\n final_df[\"page_path\"] = page_paths\n final_df[\"page_data\"] = file_data\n final_df[\"file_eval_info\"] = [file_eval_info]*len(page_paths)\n \n print(\"file level evaluation result------------------->>>>>>>>>>>>>>>>>>>>>>>>>>>\",file_eval_info)\n data['outputs'][0]['score'] = file_score/total_page\n with open(save_base_path+\"/\"+file_name+\".json\", 'w') as outfile:\n json.dump(data, outfile)\n final_df.to_csv(save_base_path+\"/\"+file_name+'.csv')\n return output,final_df\n \n\noutput,dfs = tesseract(json_files_path)\n\n\n\ndef draw_thresh_box(df,path,page_index,save_path):\n path = path.split('upload')[1]\n \n image = download_file(download_url,headers,path,f_type='image')\n nparr = np.frombuffer(image, np.uint8)\n image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n font = cv2.FONT_HERSHEY_SIMPLEX \n color= (255,0,0);thickness=5\n df =df.reset_index()\n for row in df.iterrows():\n row2 = row[1].to_dict()\n boxes = row2['boundingBox']\n boxes2 = ast.literal_eval(boxes)\n ground = boxes2['vertices']\n \n pts = []\n for pt in ground:\n pts.append([int(pt['x']) ,int(pt['y'])])\n cv2.polylines(image, [np.array(pts)],True, color, thickness)\n cv2.putText(image, str(row2['text']), (pts[0][0],pts[0][1]), font, \n 2, (0,0,255), 2, cv2.LINE_AA)\n cv2.putText(image, str(row2['tess_text']), (pts[1][0],pts[1][1]), font, \n 2, (0,255,0), 2, cv2.LINE_AA)\n\n image_path = os.path.join(save_path , '{}.png'.format(page_index)) \n cv2.imwrite(image_path , image)\n\ndef visualize_results(df_paths,thresh):\n for df_path in glob.glob(df_paths+\"*/*.csv\"):\n save_path = base_path + df_path.split('/')[-2]+\"/\"\n \n df = pd.read_csv(df_path)\n for idx,(page_path,page_data) in enumerate(zip(df['page_path'],df['page_data'])):\n df_string = StringIO(page_data)\n page_df = pd.read_csv(df_string, sep=\",\")\n filtered_df = page_df[page_df['score']<thresh]\n draw_thresh_box(filtered_df,page_path,idx,save_path)\n \nvisualize_results(base_path,vis_thresh)\n\n\n\n\n\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "numpy.float32", "numpy.array", "numpy.frombuffer" ] ]
phanvanthinh98/keras_LSTM
[ "b22cff1e9fd762226ec3dc9d3af3e300484dd833" ]
[ "keras/wrappers/scikit_learn.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Wrapper for using the Scikit-Learn API with Keras models.\"\"\"\n# pylint: disable=g-classes-have-attributes\n\nimport copy\nimport types\n\nimport numpy as np\n\nfrom keras import losses\nfrom keras.models import Sequential\nfrom keras.utils.generic_utils import has_arg\nfrom keras.utils.np_utils import to_categorical\nfrom tensorflow.python.util.tf_export import keras_export\n\n\nclass BaseWrapper(object):\n \"\"\"Base class for the Keras scikit-learn wrapper.\n\n Warning: This class should not be used directly.\n Use descendant classes instead.\n\n Args:\n build_fn: callable function or class instance\n **sk_params: model parameters & fitting parameters\n\n The `build_fn` should construct, compile and return a Keras model, which\n will then be used to fit/predict. One of the following\n three values could be passed to `build_fn`:\n 1. A function\n 2. An instance of a class that implements the `__call__` method\n 3. None. This means you implement a class that inherits from either\n `KerasClassifier` or `KerasRegressor`. The `__call__` method of the\n present class will then be treated as the default `build_fn`.\n\n `sk_params` takes both model parameters and fitting parameters. Legal model\n parameters are the arguments of `build_fn`. Note that like all other\n estimators in scikit-learn, `build_fn` should provide default values for\n its arguments, so that you could create the estimator without passing any\n values to `sk_params`.\n\n `sk_params` could also accept parameters for calling `fit`, `predict`,\n `predict_proba`, and `score` methods (e.g., `epochs`, `batch_size`).\n fitting (predicting) parameters are selected in the following order:\n\n 1. Values passed to the dictionary arguments of\n `fit`, `predict`, `predict_proba`, and `score` methods\n 2. Values passed to `sk_params`\n 3. The default values of the `keras.models.Sequential`\n `fit`, `predict`, `predict_proba` and `score` methods\n\n When using scikit-learn's `grid_search` API, legal tunable parameters are\n those you could pass to `sk_params`, including fitting parameters.\n In other words, you could use `grid_search` to search for the best\n `batch_size` or `epochs` as well as the model parameters.\n \"\"\"\n\n def __init__(self, build_fn=None, **sk_params):\n self.build_fn = build_fn\n self.sk_params = sk_params\n self.check_params(sk_params)\n\n def check_params(self, params):\n \"\"\"Checks for user typos in `params`.\n\n Args:\n params: dictionary; the parameters to be checked\n\n Raises:\n ValueError: if any member of `params` is not a valid argument.\n \"\"\"\n legal_params_fns = [\n Sequential.fit, Sequential.predict, Sequential.predict_classes,\n Sequential.evaluate\n ]\n if self.build_fn is None:\n legal_params_fns.append(self.__call__)\n elif (not isinstance(self.build_fn, types.FunctionType) and\n not isinstance(self.build_fn, types.MethodType)):\n legal_params_fns.append(self.build_fn.__call__)\n else:\n legal_params_fns.append(self.build_fn)\n\n for params_name in params:\n for fn in legal_params_fns:\n if has_arg(fn, params_name):\n break\n else:\n if params_name != 'nb_epoch':\n raise ValueError('{} is not a legal parameter'.format(params_name))\n\n def get_params(self, **params): # pylint: disable=unused-argument\n \"\"\"Gets parameters for this estimator.\n\n Args:\n **params: ignored (exists for API compatibility).\n\n Returns:\n Dictionary of parameter names mapped to their values.\n \"\"\"\n res = self.sk_params.copy()\n res.update({'build_fn': self.build_fn})\n return res\n\n def set_params(self, **params):\n \"\"\"Sets the parameters of this estimator.\n\n Args:\n **params: Dictionary of parameter names mapped to their values.\n\n Returns:\n self\n \"\"\"\n self.check_params(params)\n self.sk_params.update(params)\n return self\n\n def fit(self, x, y, **kwargs):\n \"\"\"Constructs a new model with `build_fn` & fit the model to `(x, y)`.\n\n Args:\n x : array-like, shape `(n_samples, n_features)`\n Training samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`\n True labels for `x`.\n **kwargs: dictionary arguments\n Legal arguments are the arguments of `Sequential.fit`\n\n Returns:\n history : object\n details about the training history at each epoch.\n \"\"\"\n if self.build_fn is None:\n self.model = self.__call__(**self.filter_sk_params(self.__call__))\n elif (not isinstance(self.build_fn, types.FunctionType) and\n not isinstance(self.build_fn, types.MethodType)):\n self.model = self.build_fn(\n **self.filter_sk_params(self.build_fn.__call__))\n else:\n self.model = self.build_fn(**self.filter_sk_params(self.build_fn))\n\n if (losses.is_categorical_crossentropy(self.model.loss) and\n len(y.shape) != 2):\n y = to_categorical(y)\n\n fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))\n fit_args.update(kwargs)\n\n history = self.model.fit(x, y, **fit_args)\n\n return history\n\n def filter_sk_params(self, fn, override=None):\n \"\"\"Filters `sk_params` and returns those in `fn`'s arguments.\n\n Args:\n fn : arbitrary function\n override: dictionary, values to override `sk_params`\n\n Returns:\n res : dictionary containing variables\n in both `sk_params` and `fn`'s arguments.\n \"\"\"\n override = override or {}\n res = {}\n for name, value in self.sk_params.items():\n if has_arg(fn, name):\n res.update({name: value})\n res.update(override)\n return res\n\n\n@keras_export('keras.wrappers.scikit_learn.KerasClassifier')\nclass KerasClassifier(BaseWrapper):\n \"\"\"Implementation of the scikit-learn classifier API for Keras.\n \"\"\"\n\n def fit(self, x, y, **kwargs):\n \"\"\"Constructs a new model with `build_fn` & fit the model to `(x, y)`.\n\n Args:\n x : array-like, shape `(n_samples, n_features)`\n Training samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`\n True labels for `x`.\n **kwargs: dictionary arguments\n Legal arguments are the arguments of `Sequential.fit`\n\n Returns:\n history : object\n details about the training history at each epoch.\n\n Raises:\n ValueError: In case of invalid shape for `y` argument.\n \"\"\"\n y = np.array(y)\n if len(y.shape) == 2 and y.shape[1] > 1:\n self.classes_ = np.arange(y.shape[1])\n elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1:\n self.classes_ = np.unique(y)\n y = np.searchsorted(self.classes_, y)\n else:\n raise ValueError('Invalid shape for y: ' + str(y.shape))\n self.n_classes_ = len(self.classes_)\n return super(KerasClassifier, self).fit(x, y, **kwargs)\n\n def predict(self, x, **kwargs):\n \"\"\"Returns the class predictions for the given test data.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n **kwargs: dictionary arguments\n Legal arguments are the arguments\n of `Sequential.predict_classes`.\n\n Returns:\n preds: array-like, shape `(n_samples,)`\n Class predictions.\n \"\"\"\n kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)\n classes = self.model.predict_classes(x, **kwargs)\n return self.classes_[classes]\n\n def predict_proba(self, x, **kwargs):\n \"\"\"Returns class probability estimates for the given test data.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n **kwargs: dictionary arguments\n Legal arguments are the arguments\n of `Sequential.predict_classes`.\n\n Returns:\n proba: array-like, shape `(n_samples, n_outputs)`\n Class probability estimates.\n In the case of binary classification,\n to match the scikit-learn API,\n will return an array of shape `(n_samples, 2)`\n (instead of `(n_sample, 1)` as in Keras).\n \"\"\"\n kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)\n probs = self.model.predict(x, **kwargs)\n\n # check if binary classification\n if probs.shape[1] == 1:\n # first column is probability of class 0 and second is of class 1\n probs = np.hstack([1 - probs, probs])\n return probs\n\n def score(self, x, y, **kwargs):\n \"\"\"Returns the mean accuracy on the given test data and labels.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n y: array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`\n True labels for `x`.\n **kwargs: dictionary arguments\n Legal arguments are the arguments of `Sequential.evaluate`.\n\n Returns:\n score: float\n Mean accuracy of predictions on `x` wrt. `y`.\n\n Raises:\n ValueError: If the underlying model isn't configured to\n compute accuracy. You should pass `metrics=[\"accuracy\"]` to\n the `.compile()` method of the model.\n \"\"\"\n y = np.searchsorted(self.classes_, y)\n kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)\n\n loss_name = self.model.loss\n if hasattr(loss_name, '__name__'):\n loss_name = loss_name.__name__\n if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:\n y = to_categorical(y)\n\n outputs = self.model.evaluate(x, y, **kwargs)\n if not isinstance(outputs, list):\n outputs = [outputs]\n for name, output in zip(self.model.metrics_names, outputs):\n if name in ['accuracy', 'acc']:\n return output\n raise ValueError('The model is not configured to compute accuracy. '\n 'You should pass `metrics=[\"accuracy\"]` to '\n 'the `model.compile()` method.')\n\n\n@keras_export('keras.wrappers.scikit_learn.KerasRegressor')\nclass KerasRegressor(BaseWrapper):\n \"\"\"Implementation of the scikit-learn regressor API for Keras.\n \"\"\"\n\n def predict(self, x, **kwargs):\n \"\"\"Returns predictions for the given test data.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n **kwargs: dictionary arguments\n Legal arguments are the arguments of `Sequential.predict`.\n\n Returns:\n preds: array-like, shape `(n_samples,)`\n Predictions.\n \"\"\"\n kwargs = self.filter_sk_params(Sequential.predict, kwargs)\n return np.squeeze(self.model.predict(x, **kwargs))\n\n def score(self, x, y, **kwargs):\n \"\"\"Returns the mean loss on the given test data and labels.\n\n Args:\n x: array-like, shape `(n_samples, n_features)`\n Test samples where `n_samples` is the number of samples\n and `n_features` is the number of features.\n y: array-like, shape `(n_samples,)`\n True labels for `x`.\n **kwargs: dictionary arguments\n Legal arguments are the arguments of `Sequential.evaluate`.\n\n Returns:\n score: float\n Mean accuracy of predictions on `x` wrt. `y`.\n \"\"\"\n kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)\n loss = self.model.evaluate(x, y, **kwargs)\n if isinstance(loss, list):\n return -loss[0]\n return -loss\n" ]
[ [ "numpy.searchsorted", "numpy.arange", "numpy.hstack", "tensorflow.python.util.tf_export.keras_export", "numpy.array", "numpy.unique" ] ]
daniil-lyakhov/deep-object-reid
[ "b0f7d6a2d4cff8c417a66d82c09d16788d81ec67" ]
[ "torchreid/models/mobilenetv3.py" ]
[ "import math\n\nimport torch\nimport torch.nn as nn\nfrom torch.cuda.amp import autocast\n\nfrom torchreid.losses import AngleSimpleLinear\nfrom torchreid.ops import Dropout, EvalModeSetter, rsc\nfrom .common import HSigmoid, HSwish, ModelInterface, make_divisible\nimport timm\n\nfrom torchreid.integration.nncf.compression import get_no_nncf_trace_context_manager, nullcontext\n\n__all__ = ['mobilenetv3_large', 'mobilenetv3_large_075', 'mobilenetv3_small', 'mobilenetv3_large_150',\n 'mobilenetv3_large_125']\n\npretrained_urls = {\n 'mobilenetv3_small':\n 'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-small-55df8e1f.pth?raw=true',\n 'mobilenetv3_large':\n 'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-large-1cd25616.pth?raw=true',\n 'mobilenetv3_large_075':\n 'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-large-0.75-9632d2a8.pth?raw=true',\n 'mobilenetv3_large_21k':\n 'https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/mobilenetv3_large_100_miil_21k.pth'\n}\n\n\nSHOULD_NNCF_SKIP_SE_LAYERS = False\nSHOULD_NNCF_SKIP_HEAD = False\nno_nncf_se_layer_context = get_no_nncf_trace_context_manager() if SHOULD_NNCF_SKIP_SE_LAYERS else nullcontext\nno_nncf_head_context = get_no_nncf_trace_context_manager() if SHOULD_NNCF_SKIP_HEAD else nullcontext\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=4):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, make_divisible(channel // reduction, 8)),\n nn.ReLU(inplace=True),\n nn.Linear(make_divisible(channel // reduction, 8), channel),\n HSigmoid()\n )\n\n def forward(self, x):\n with no_nncf_se_layer_context():\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y\n\n\ndef conv_3x3_bn(inp, oup, stride, IN_conv1=False):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n nn.BatchNorm2d(oup) if not IN_conv1 else nn.InstanceNorm2d(oup, affine=True),\n HSwish()\n )\n\n\ndef conv_1x1_bn(inp, oup, loss='softmax'):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n HSwish() if loss == 'softmax' else nn.PReLU()\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se, use_hs):\n super(InvertedResidual, self).__init__()\n assert stride in [1, 2]\n\n self.identity = stride == 1 and inp == oup\n\n if inp == hidden_dim:\n self.conv = nn.Sequential(\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n HSwish() if use_hs else nn.ReLU(inplace=True),\n # Squeeze-and-Excite\n SELayer(hidden_dim) if use_se else nn.Identity(),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n else:\n self.conv = nn.Sequential(\n # pw\n nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),\n nn.BatchNorm2d(hidden_dim),\n HSwish() if use_hs else nn.ReLU(inplace=True),\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n # Squeeze-and-Excite\n SELayer(hidden_dim) if use_se else nn.Identity(),\n HSwish() if use_hs else nn.ReLU(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n\n def forward(self, x):\n if self.identity:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass MobileNetV3(ModelInterface):\n def __init__(self,\n cfgs,\n mode,\n IN_conv1=False,\n num_classes=1000,\n width_mult=1.,\n in_channels=3,\n input_size=(224, 224),\n dropout_cls = None,\n pooling_type='avg',\n IN_first=False,\n self_challenging_cfg=False,\n **kwargs):\n\n super().__init__(**kwargs)\n self.in_size = input_size\n self.num_classes = num_classes\n self.input_IN = nn.InstanceNorm2d(in_channels, affine=True) if IN_first else None\n self.pooling_type = pooling_type\n self.self_challenging_cfg = self_challenging_cfg\n self.width_mult = width_mult\n self.dropout_cls = dropout_cls\n # setting of inverted residual blocks\n self.cfgs = cfgs\n assert mode in ['large', 'small']\n # building first layer\n input_channel = make_divisible(16 * self.width_mult, 8)\n stride = 1 if self.in_size[0] < 100 else 2\n layers = [conv_3x3_bn(3, input_channel, stride, IN_conv1)]\n # building inverted residual blocks\n block = InvertedResidual\n flag = True\n for k, t, c, use_se, use_hs, s in self.cfgs:\n if (self.in_size[0] < 100) and (s == 2) and flag:\n s = 1\n flag = False\n output_channel = make_divisible(c * self.width_mult, 8)\n exp_size = make_divisible(input_channel * t, 8)\n layers.append(block(input_channel, exp_size, output_channel, k, s, use_se, use_hs))\n input_channel = output_channel\n self.features = nn.Sequential(*layers)\n self.num_features = exp_size\n # building last several layers\n self.conv = conv_1x1_bn(input_channel, exp_size, self.loss)\n output_channel = {'large': 1280, 'small': 1024}\n output_channel = make_divisible(output_channel[mode] * self.width_mult, 8) if self.width_mult > 1.0 else output_channel[mode]\n\n if self.loss == 'softmax' or self.loss == 'asl':\n self.classifier = nn.Sequential(\n nn.Linear(exp_size, output_channel),\n nn.BatchNorm1d(output_channel),\n HSwish(),\n Dropout(**self.dropout_cls),\n nn.Linear(output_channel, self.num_classes),\n )\n else:\n assert self.loss in ['am_softmax', 'am_binary']\n self.classifier = nn.Sequential(\n nn.Linear(exp_size, output_channel),\n nn.BatchNorm1d(output_channel),\n nn.PReLU(),\n Dropout(**self.dropout_cls),\n AngleSimpleLinear(output_channel, self.num_classes),\n )\n self._initialize_weights()\n self.forward = autocast(self.mix_precision)(self.forward)\n\n def extract_features(self, x):\n y = self.conv(self.features(x))\n return y\n\n def infer_head(self, x, skip_pool=False):\n if not skip_pool:\n glob_features = self._glob_feature_vector(x, self.pooling_type, reduce_dims=False)\n else:\n glob_features = x\n\n logits = self.classifier(glob_features.view(x.shape[0], -1))\n return glob_features, logits\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def forward(self, x, return_featuremaps=False, get_embeddings=False, gt_labels=None):\n if self.input_IN is not None:\n x = self.input_IN(x)\n\n y = self.extract_features(x)\n if return_featuremaps:\n return y\n\n with no_nncf_head_context():\n glob_features, logits = self.infer_head(y, skip_pool=False)\n if self.training and self.self_challenging_cfg.enable and gt_labels is not None:\n glob_features = rsc(\n features = glob_features,\n scores = logits,\n labels = gt_labels,\n retain_p = 1.0 - self.self_challenging_cfg.drop_p,\n retain_batch = 1.0 - self.self_challenging_cfg.drop_batch_p\n )\n\n with EvalModeSetter([self.output], m_type=(nn.BatchNorm1d, nn.BatchNorm2d)):\n _, logits = self.infer_head(x, skip_pool=True)\n\n if not self.training and self.is_classification():\n return [logits]\n\n if get_embeddings:\n out_data = [logits, glob_features]\n elif self.loss in ['softmax', 'am_softmax', 'asl', 'am_binary']:\n out_data = [logits]\n elif self.loss in ['triplet']:\n out_data = [logits, glob_features]\n else:\n raise KeyError(\"Unsupported loss: {}\".format(self.loss))\n\n return tuple(out_data)\n\n\ndef init_pretrained_weights(model, key='', **kwargs):\n \"\"\"Initializes model with pretrained weights.\n Layers that don't match with pretrained layers in name or size are kept unchanged.\n \"\"\"\n import os\n import errno\n import gdown\n\n from torchreid.utils import load_pretrained_weights\n\n def _get_torch_home():\n ENV_TORCH_HOME = 'TORCH_HOME'\n ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'\n DEFAULT_CACHE_DIR = '~/.cache'\n torch_home = os.path.expanduser(\n os.getenv(\n ENV_TORCH_HOME,\n os.path.join(\n os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'\n )\n )\n )\n return torch_home\n\n torch_home = _get_torch_home()\n model_dir = os.path.join(torch_home, 'checkpoints')\n try:\n os.makedirs(model_dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n filename = key + '_imagenet.pth'\n cached_file = os.path.join(model_dir, filename)\n if not os.path.exists(cached_file):\n gdown.download(pretrained_urls[key], cached_file)\n model = load_pretrained_weights(model, cached_file, **kwargs)\n\n\ndef mobilenetv3_large_075(pretrained=False, **kwargs):\n \"\"\"\n Constructs a MobileNetV3-Large model\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [3, 1, 16, 0, 0, 1],\n [3, 4, 24, 0, 0, 2],\n [3, 3, 24, 0, 0, 1],\n [5, 3, 40, 1, 0, 2],\n [5, 3, 40, 1, 0, 1],\n [5, 3, 40, 1, 0, 1],\n [3, 6, 80, 0, 1, 2],\n [3, 2.5, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [5, 6, 160, 1, 1, 2],\n [5, 6, 160, 1, 1, 1],\n [5, 6, 160, 1, 1, 1]\n ]\n\n net = MobileNetV3(cfgs, mode='large', width_mult =.75, **kwargs)\n if pretrained:\n init_pretrained_weights(net, key='mobilenetv3_large_075')\n\n return net\n\n\ndef mobilenetv3_large(pretrained=False, **kwargs):\n \"\"\"\n Constructs a MobileNetV3-Large model\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [3, 1, 16, 0, 0, 1],\n [3, 4, 24, 0, 0, 2],\n [3, 3, 24, 0, 0, 1],\n [5, 3, 40, 1, 0, 2],\n [5, 3, 40, 1, 0, 1],\n [5, 3, 40, 1, 0, 1],\n [3, 6, 80, 0, 1, 2],\n [3, 2.5, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [5, 6, 160, 1, 1, 2],\n [5, 6, 160, 1, 1, 1],\n [5, 6, 160, 1, 1, 1]\n ]\n\n net = MobileNetV3(cfgs, mode='large', width_mult = 1., **kwargs)\n if pretrained:\n init_pretrained_weights(net, key='mobilenetv3_large')\n\n return net\n\n\ndef mobilenetv3_large_150(pretrained=False, **kwargs):\n \"\"\"\n Constructs a MobileNetV3-Large model\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [3, 1, 16, 0, 0, 1],\n [3, 4, 24, 0, 0, 2],\n [3, 3, 24, 0, 0, 1],\n [5, 3, 40, 1, 0, 2],\n [5, 3, 40, 1, 0, 1],\n [5, 3, 40, 1, 0, 1],\n [3, 6, 80, 0, 1, 2],\n [3, 2.5, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [5, 6, 160, 1, 1, 2],\n [5, 6, 160, 1, 1, 1],\n [5, 6, 160, 1, 1, 1]\n ]\n\n net = MobileNetV3(cfgs, mode='large', width_mult = 1.5, **kwargs)\n if pretrained:\n raise NotImplementedError(\"The weights for this configuration are not available\")\n\n return net\n\n\ndef mobilenetv3_large_125(pretrained=False, **kwargs):\n \"\"\"\n Constructs a MobileNetV3-Large model\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [3, 1, 16, 0, 0, 1],\n [3, 4, 24, 0, 0, 2],\n [3, 3, 24, 0, 0, 1],\n [5, 3, 40, 1, 0, 2],\n [5, 3, 40, 1, 0, 1],\n [5, 3, 40, 1, 0, 1],\n [3, 6, 80, 0, 1, 2],\n [3, 2.5, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [5, 6, 160, 1, 1, 2],\n [5, 6, 160, 1, 1, 1],\n [5, 6, 160, 1, 1, 1]\n ]\n\n net = MobileNetV3(cfgs, mode='large', width_mult = 1.25, **kwargs)\n if pretrained:\n raise NotImplementedError(\"The weights for this configuration are not available\")\n\n return net\n\n\ndef mobilenetv3_small(pretrained=False, **kwargs):\n \"\"\"\n Constructs a MobileNetV3-Small model\n \"\"\"\n cfgs = [\n # k, t, c, SE, HS, s\n [3, 1, 16, 1, 0, 2],\n [3, 4.5, 24, 0, 0, 2],\n [3, 3.67, 24, 0, 0, 1],\n [5, 4, 40, 1, 1, 2],\n [5, 6, 40, 1, 1, 1],\n [5, 6, 40, 1, 1, 1],\n [5, 3, 48, 1, 1, 1],\n [5, 3, 48, 1, 1, 1],\n [5, 6, 96, 1, 1, 2],\n [5, 6, 96, 1, 1, 1],\n [5, 6, 96, 1, 1, 1],\n ]\n net = MobileNetV3(cfgs, mode='small', width_mult = 1., **kwargs)\n if pretrained:\n init_pretrained_weights(net, key='mobilenetv3_small')\n\n return net\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.PReLU", "torch.nn.Linear", "torch.nn.BatchNorm1d", "torch.nn.AdaptiveAvgPool2d", "torch.cuda.amp.autocast", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.InstanceNorm2d", "torch.nn.Identity", "torch.nn.ReLU" ] ]
Harshs27/lingvo
[ "bd396e651488b2e2c4a7416be077b4a0226c87c8" ]
[ "lingvo/core/conv_layers_builder_test.py" ]
[ "# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for conv layers builder.\"\"\"\n\nfrom absl.testing import parameterized\nfrom lingvo import compat as tf\nfrom lingvo.core import bn_layers\nfrom lingvo.core import conv_layers_builder\nfrom lingvo.core import conv_layers_with_time_padding\nfrom lingvo.core import layers\nfrom lingvo.core import test_utils\nimport numpy as np\n\n\nclass ConvPaddedLayersTest(test_utils.TestCase):\n\n def _ConvTestHelper(self, dilation, stride, activation, batch_norm,\n weight_norm, in_dim, out_dim, filter_shape, conv_last,\n causal_conv):\n with self.session(use_gpu=True) as sess:\n p1 = layers.Conv2DLayer.Params().Set(\n name='conv_2d01',\n filter_shape=filter_shape + [in_dim, out_dim],\n filter_stride=stride,\n dilation_rate=dilation,\n activation=activation,\n batch_norm=batch_norm,\n weight_norm=weight_norm,\n bias=not batch_norm,\n conv_last=conv_last,\n causal_convolution=causal_conv)\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=weight_norm)\n if batch_norm:\n norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(\n decay=0.999)\n builder_params.norm_layer_tpl = norm_p\n else:\n builder_params.norm_layer_tpl = None\n p2 = builder_params.Instantiate().Conv2D(\n 'conv_2d02',\n in_dim,\n out_dim,\n filter_shape,\n stride=stride,\n dilation=dilation,\n activation=activation,\n conv_last=conv_last,\n is_causal=causal_conv)\n\n l1 = p1.Instantiate()\n l2 = p2.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n l1_theta = l1.theta.Transform(tf.identity)\n l2_theta = l2.theta.Transform(tf.identity)\n conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)\n conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)\n\n tf.logging.info(l1_theta)\n tf.logging.info(l2_theta)\n l1_num_vars = l1_theta.Flatten()\n l2_num_var2 = l2_theta.Flatten()\n if len(l1_num_vars) != len(l2_num_var2):\n tf.logging.info(\n 'Mismatched number of vars: l1: %d vars, l2: %d vars',\n len(l1_num_vars), len(l2_num_var2))\n\n w1 = l1_theta.w\n w2 = l2_theta.conv_2d.w\n # b1 = l1_theta.b\n # b2 = l2_theta.bn_or_bias.b\n\n tf.global_variables_initializer().run()\n v1, p1 = sess.run([conv_out1, out1_padding])\n w1_v = sess.run(w1)\n v2, p2 = sess.run([conv_out2, out2_padding], feed_dict={w2: w1_v})\n\n self.assertAllClose(v1, v2)\n self.assertAllClose(p1, p2)\n\n def testConvBasic(self):\n dilation = [1, 1]\n stride = [2, 3]\n activation = 'NONE'\n batch_norm = False\n weight_norm = False\n in_dim = 3\n out_dim = 3\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,\n in_dim, out_dim, filter_shape, conv_last, causal_conv)\n\n def testConvBnWnTanh(self):\n dilation = [1, 1]\n stride = [2, 3]\n activation = 'TANH'\n batch_norm = True\n weight_norm = True\n in_dim = 3\n out_dim = 3\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,\n in_dim, out_dim, filter_shape, conv_last, causal_conv)\n\n def testConvGn(self):\n dilation = [1, 1]\n stride = [2, 3]\n activation = 'TANH'\n in_dim = 3\n out_dim = 4\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n\n with self.session(use_gpu=True) as sess:\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=True)\n builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(\n num_groups=2)\n p = builder_params.Instantiate().Conv2D(\n 'conv_2d02',\n in_dim,\n out_dim,\n filter_shape,\n stride=stride,\n dilation=dilation,\n activation=activation,\n conv_last=conv_last,\n is_causal=causal_conv)\n\n l = p.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)\n tf.global_variables_initializer().run()\n v = sess.run(tf.reduce_sum(conv_out, 0))\n\n expected_out = [[[-0.35070014, -1.7821487, 0.8349923, 1.1709788],\n [-0.18872532, 0.9702145, 0.5534694, -1.1386856]],\n [[0.34970748, -0.5403709, -0.9809327, -2.0930214],\n [0.54232424, 1.1565661, 1.0349312, 1.3458138]],\n [[0, 0, 0, 0], [0, 0, 0, 0]]]\n\n self.assertAllClose(v, expected_out)\n\n def testConvLastWnTanh(self):\n dilation = [1, 1]\n stride = [2, 3]\n activation = 'TANH'\n batch_norm = False\n weight_norm = True\n in_dim = 3\n out_dim = 3\n filter_shape = [2, 2]\n conv_last = True\n causal_conv = False\n self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,\n in_dim, out_dim, filter_shape, conv_last, causal_conv)\n\n def testConvLastCausal(self):\n dilation = [1, 1]\n stride = [2, 3]\n activation = 'TANH'\n batch_norm = True\n weight_norm = True\n in_dim = 3\n out_dim = 3\n filter_shape = [2, 1]\n conv_last = True\n causal_conv = True\n self._ConvTestHelper(dilation, stride, activation, batch_norm, weight_norm,\n in_dim, out_dim, filter_shape, conv_last, causal_conv)\n\n def _DepthwiseConvTestHelper(self, dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n filter_shape, conv_last, causal_conv):\n with self.session(use_gpu=True) as sess:\n p1 = layers.DepthwiseConv2DLayer.Params().Set(\n name='conv_2d01',\n filter_shape=filter_shape + [in_dim, depth_multiplier],\n filter_stride=stride,\n dilation_rate=dilation,\n activation=activation,\n batch_norm=batch_norm,\n weight_norm=weight_norm,\n bias=not batch_norm,\n conv_last=conv_last,\n causal_convolution=causal_conv)\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=weight_norm)\n if batch_norm:\n norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(\n decay=0.999)\n builder_params.norm_layer_tpl = norm_p\n else:\n builder_params.norm_layer_tpl = None\n\n p2 = builder_params.Instantiate().DepthwiseConv2D(\n 'conv_2d02',\n in_dim,\n depth_multiplier,\n filter_shape,\n stride=stride,\n activation=activation,\n dilation=dilation,\n conv_last=conv_last,\n is_causal=causal_conv)\n\n l1 = p1.Instantiate()\n l2 = p2.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n l1_theta = l1.theta.Transform(tf.identity)\n l2_theta = l2.theta.Transform(tf.identity)\n conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)\n conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)\n\n tf.logging.info(l1_theta)\n tf.logging.info(l2_theta)\n l1_num_vars = l1_theta.Flatten()\n l2_num_var2 = l2_theta.Flatten()\n if len(l1_num_vars) != len(l2_num_var2):\n tf.logging.info(\n 'Mismatched number of vars: l1: %d vars, l2: %d vars',\n len(l1_num_vars), len(l2_num_var2))\n\n w1 = l1_theta.w\n w2 = l2_theta.conv_2d.w\n # b1 = l1_theta.b\n # b2 = l2_theta.bn_or_bias.b\n\n tf.global_variables_initializer().run()\n v1, p1 = sess.run([conv_out1, out1_padding])\n w1_v = sess.run([w1])[0]\n v2, p2 = sess.run([conv_out2, out2_padding], feed_dict={w2: w1_v})\n\n self.assertAllClose(v1, v2)\n self.assertAllClose(p1, p2)\n\n def testDepthConvBasic(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'NONE'\n batch_norm = False\n weight_norm = False\n in_dim = 3\n depth_multiplier = 2\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n filter_shape, conv_last, causal_conv)\n\n def testDepthConvBnWnTanh(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n batch_norm = True\n weight_norm = True\n in_dim = 3\n depth_multiplier = 3\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n filter_shape, conv_last, causal_conv)\n\n def testDepthConvGn(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n in_dim = 4\n depth_multiplier = 1\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n\n with self.session(use_gpu=True) as sess:\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=True)\n builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(\n num_groups=2)\n p = builder_params.Instantiate().DepthwiseConv2D(\n 'conv_2d02',\n in_dim,\n depth_multiplier,\n filter_shape,\n stride=stride,\n activation=activation,\n dilation=dilation,\n conv_last=conv_last,\n is_causal=causal_conv)\n l = p.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 4]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)\n tf.global_variables_initializer().run()\n v = sess.run(tf.reduce_sum(conv_out, 0))\n\n expected_out = [[[-0.77095497, 0.30285388, -0.05714864, 1.0386012],\n [0.74034333, 0.04982221, -0.41769135, -2.9531932],\n [-0.2647084, -0.1936804, 0.6598473, 0.42537105]],\n [[1.3095646, -0.85996866, 2.2734299, -1.8457952],\n [-0.9542263, -0.14199251, 0.51472515, 0.91931283],\n [0.47267163, 1.4824618, 0.4548889, 0.93488806]],\n [[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]]]\n\n self.assertAllClose(expected_out, v)\n\n def testDepthConvLastWnTanh(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n batch_norm = False\n weight_norm = True\n in_dim = 3\n depth_multiplier = 3\n filter_shape = [2, 2]\n conv_last = True\n causal_conv = False\n self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n filter_shape, conv_last, causal_conv)\n\n def testDepthConvLastCausal(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n batch_norm = True\n weight_norm = True\n in_dim = 3\n depth_multiplier = 3\n filter_shape = [2, 1]\n conv_last = True\n causal_conv = True\n self._DepthwiseConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n filter_shape, conv_last, causal_conv)\n\n def _SeparableConvTestHelper(self, dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier, out_dim,\n filter_shape, conv_last, causal_conv,\n assert_equality=True):\n with self.session(use_gpu=True) as sess:\n p1 = layers.SeparableConv2DLayer.Params().Set(\n name='conv_2d01',\n filter_shape=filter_shape + [in_dim, out_dim],\n depth_multiplier=depth_multiplier,\n filter_stride=stride,\n dilation_rate=dilation,\n activation=activation,\n batch_norm=batch_norm,\n weight_norm=weight_norm,\n bias=not batch_norm,\n conv_last=conv_last,\n causal_convolution=causal_conv)\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=weight_norm)\n if batch_norm:\n norm_p = conv_layers_with_time_padding.ConvBatchNormLayer.Params().Set(\n decay=0.999)\n builder_params.norm_layer_tpl = norm_p\n else:\n builder_params.norm_layer_tpl = None\n p2 = builder_params.Instantiate().SeparableConv2D(\n 'conv_2d02',\n in_dim,\n out_dim,\n depth_multiplier,\n filter_shape,\n stride=stride,\n activation=activation,\n dilation=dilation,\n conv_last=conv_last,\n is_causal=causal_conv)\n\n l1 = p1.Instantiate()\n l2 = p2.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 3]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n l1_theta = l1.theta.Transform(tf.identity)\n l2_theta = l2.theta.Transform(tf.identity)\n conv_out1, out1_padding = l1.FProp(l1_theta, conv_in, conv_pad)\n conv_out2, out2_padding = l2.FProp(l2_theta, conv_in, conv_pad)\n\n tf.logging.info(l1_theta)\n tf.logging.info(l2_theta)\n l1_num_vars = l1_theta.Flatten()\n l2_num_var2 = l2_theta.Flatten()\n if len(l1_num_vars) != len(l2_num_var2):\n tf.logging.info(\n 'Mismatched number of vars: l1: %d vars, l2: %d vars',\n len(l1_num_vars), len(l2_num_var2))\n\n pointwise_conv_w1 = l1_theta.w\n depth_conv_w1 = l1_theta.depthwise_conv.w\n pointwise_conv_w2 = l2_theta.conv_1x1.w\n depth_conv_w2 = l2_theta.conv_2d.w\n # b1 = l1_theta.b\n # b2 = l2_theta.bn_or_bias.b\n tf.global_variables_initializer().run()\n v1, p1 = sess.run([conv_out1, out1_padding])\n p_w1_v, d_w1_v = sess.run([pointwise_conv_w1, depth_conv_w1])\n v2, p2 = sess.run([conv_out2, out2_padding],\n feed_dict={\n pointwise_conv_w2: p_w1_v,\n depth_conv_w2: d_w1_v\n })\n\n if assert_equality:\n self.assertAllClose(v1, v2)\n self.assertAllClose(p1, p2)\n\n def testSeparableConv2DLayerBasic(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'NONE'\n batch_norm = False\n weight_norm = False\n in_dim = 3\n depth_multiplier = 3\n out_dim = 2\n filter_shape = [2, 2]\n conv_last = False\n causal_conv = False\n self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n out_dim, filter_shape, conv_last, causal_conv)\n\n def testSeparableConvWnWnTanh(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n batch_norm = False\n weight_norm = True\n in_dim = 3\n depth_multiplier = 3\n out_dim = 2\n filter_shape = [2, 1]\n conv_last = False\n causal_conv = True\n self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n out_dim, filter_shape, conv_last, causal_conv)\n\n def testSeparableConvLastBnWnTanh(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n batch_norm = True\n weight_norm = True\n in_dim = 3\n depth_multiplier = 3\n out_dim = 2\n filter_shape = [2, 1]\n conv_last = True\n causal_conv = True\n # New implementation is not equivallent to the old.\n self._SeparableConvTestHelper(dilation, stride, activation, batch_norm,\n weight_norm, in_dim, depth_multiplier,\n out_dim, filter_shape, conv_last, causal_conv,\n assert_equality=False)\n\n def testSeparableConvGn(self):\n dilation = [1, 1]\n stride = [2, 2]\n activation = 'TANH'\n in_dim = 4\n depth_multiplier = 1\n out_dim = 2\n filter_shape = [2, 1]\n conv_last = True\n causal_conv = True\n\n with self.session(use_gpu=True) as sess:\n builder_params = conv_layers_builder.Builder.Params().Set(\n weight_norm=True)\n builder_params.norm_layer_tpl = bn_layers.GroupNormLayer.Params().Set(\n num_groups=2)\n p = builder_params.Instantiate().SeparableConv2D(\n 'conv_2d02',\n in_dim,\n out_dim,\n depth_multiplier,\n filter_shape,\n stride=stride,\n activation=activation,\n dilation=dilation,\n conv_last=conv_last,\n is_causal=causal_conv)\n\n l = p.Instantiate()\n\n conv_in = tf.constant(np.random.normal(size=[4, 5, 6, 4]), tf.float32)\n conv_pad = np.full([4, 5], 0.0)\n conv_pad[2, 3] = 1.0\n conv_pad[2, 4] = 1.0\n conv_pad = tf.constant(conv_pad, tf.float32)\n conv_out, _ = l.FProp(l.theta, conv_in, conv_pad)\n tf.global_variables_initializer().run()\n v = sess.run(tf.reduce_sum(conv_out, 0))\n\n expected_out = [[[0.00963847, -0.04019006], [0.36265337, -0.06592329],\n [0.65582913, -0.1533944]],\n [[0.7512939, -0.7282307], [0.96100605, -1.9509676],\n [0.4639647, 0.2485837]], [[0., 0.], [0., 0.], [0., 0.]]]\n\n self.assertAllClose(expected_out, v)\n\n\nclass CausalPoolingLayerTest(test_utils.TestCase, parameterized.TestCase):\n \"\"\"Tests for CausalPoolingLayer.\"\"\"\n\n @parameterized.named_parameters(\n {\n 'testcase_name': 'max_pooling',\n 'pooling_type': 'MAX',\n 'left_context': 2,\n 'inputs': np.array([-2, 0, 2, 4, 0, 0]),\n 'input_paddings': np.array([0, 0, 0, 0, 1, 1]),\n 'expected_output': np.array([-2, 0, 2, 4, 0, 0]),\n 'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),\n }, {\n 'testcase_name': 'avg_pooling',\n 'pooling_type': 'AVG',\n 'left_context': 2,\n 'inputs': np.array([-2, 0, 2, 4, 0, 0]),\n 'input_paddings': np.array([0, 0, 0, 0, 1, 1]),\n 'expected_output': np.array([-2, -1, 1, 3, 0, 0]),\n 'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),\n }, {\n 'testcase_name': 'max_pooling_large_window',\n 'pooling_type': 'MAX',\n 'left_context': 10,\n 'inputs': np.array([-2, 0, 2, 4, 0, 0]),\n 'input_paddings': np.array([0, 0, 0, 0, 1, 1]),\n 'expected_output': np.array([-2, 0, 2, 4, 0, 0]),\n 'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),\n }, {\n 'testcase_name': 'avg_pooling_large_window',\n 'pooling_type': 'AVG',\n 'left_context': 10,\n 'inputs': np.array([-2, 0, 2, 4, 0, 0]),\n 'input_paddings': np.array([0, 0, 0, 0, 1, 1]),\n 'expected_output': np.array([-2, -1, 0, 1, 0, 0]),\n 'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),\n }, {\n 'testcase_name': 'avg_pooling_infinte_window',\n 'pooling_type': 'AVG',\n 'left_context': -1,\n 'inputs': np.array([-2, 0, 2, 4, 0, 0]),\n 'input_paddings': np.array([0, 0, 0, 0, 1, 1]),\n 'expected_output': np.array([-2, -1, 0, 1, 0, 0]),\n 'expected_output_padding': np.array([0, 0, 0, 0, 1, 1]),\n })\n def testSimpleCase(self, pooling_type, left_context, inputs, input_paddings,\n expected_output, expected_output_padding):\n inputs = inputs[np.newaxis, :, np.newaxis, np.newaxis]\n input_paddings = input_paddings[np.newaxis, :]\n param = conv_layers_builder.CausalPoolingLayer.Params().Set(\n name='test_layer', pooling_type=pooling_type, left_context=left_context)\n pooling_layer = param.Instantiate()\n with self.session(use_gpu=True) as sess:\n inputs = tf.convert_to_tensor(inputs, dtype=tf.float32)\n input_paddings = tf.convert_to_tensor(input_paddings, dtype=tf.float32)\n output, output_paddings = pooling_layer.FPropDefaultTheta(\n inputs, input_paddings)\n tf.global_variables_initializer().run()\n output_val, output_paddings_val = sess.run([output, output_paddings])\n\n self.assertAllClose(expected_output, output_val.flatten())\n self.assertAllEqual(expected_output_padding, output_paddings_val.flatten())\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "numpy.array", "numpy.full", "numpy.random.normal" ] ]
541867329/pydata-notebook
[ "867f204d7abac96dbae80e6cdd2e3661e554d1dd" ]
[ "mydemo/matplotlibDemo/clickEvent.py" ]
[ "from matplotlib.pyplot import figure, show\nimport numpy as npy\nfrom numpy.random import rand\n\nif 1: # picking on a scatter plot (matplotlib.collections.RegularPolyCollection)\n\n x, y, c, s = rand(4, 100)\n\n\n def onpick3(event):\n ind = event.ind\n print('onpick3 scatter:', ind, npy.take(x, ind), npy.take(y, ind))\n\n\n fig = figure()\n ax1 = fig.add_subplot(111)\n col = ax1.scatter(x, y, 100 * s, c, picker=True)\n # fig.savefig('pscoll.eps')\n fig.canvas.mpl_connect('pick_event', onpick3)\n\nshow()\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "numpy.random.rand", "numpy.take" ] ]
suresh-guttikonda/iGibson
[ "a69e623058180146466cd52d4bb3c00d1facdacf" ]
[ "igibson/robots/jr2_robot.py" ]
[ "import gym\nimport numpy as np\n\nfrom igibson.robots.robot_locomotor import LocomotorRobot\n\n\nclass JR2(LocomotorRobot):\n \"\"\"\n JR2 robot (no arm)\n Reference: https://cvgl.stanford.edu/projects/jackrabbot/\n Uses joint velocity control\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.velocity = config.get(\"velocity\", 1.0)\n LocomotorRobot.__init__(\n self,\n \"jr2_urdf/jr2.urdf\",\n action_dim=4,\n scale=config.get(\"robot_scale\", 1.0),\n is_discrete=config.get(\"is_discrete\", True),\n control=\"velocity\",\n )\n\n def set_up_continuous_action_space(self):\n \"\"\"\n Set up continuous action space\n \"\"\"\n self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)\n self.action_high = self.velocity * np.ones([self.action_dim])\n self.action_low = -self.action_high\n\n def set_up_discrete_action_space(self):\n \"\"\"\n Set up discrete action space\n \"\"\"\n self.action_list = [\n [self.velocity, self.velocity, 0, self.velocity],\n [-self.velocity, -self.velocity, 0, -self.velocity],\n [self.velocity, -self.velocity, -self.velocity, 0],\n [-self.velocity, self.velocity, self.velocity, 0],\n [0, 0, 0, 0],\n ]\n self.action_space = gym.spaces.Discrete(len(self.action_list))\n self.setup_keys_to_action()\n\n def setup_keys_to_action(self):\n self.keys_to_action = {\n (ord(\"w\"),): 0, # forward\n (ord(\"s\"),): 1, # backward\n (ord(\"d\"),): 2, # turn right\n (ord(\"a\"),): 3, # turn left\n (): 4,\n }\n" ]
[ [ "numpy.ones" ] ]
teja-ambati1202/Insurance-Fraud-Detection
[ "a9bbdd5a2af68e0e90f8e16ba43129bab709614b" ]
[ "Training_Raw_data_validation/rawValidation.py" ]
[ "import sqlite3\r\nfrom datetime import datetime\r\nfrom os import listdir\r\nimport os\r\nimport re\r\nimport json\r\nimport shutil\r\nimport pandas as pd\r\nfrom application_logging.logger import App_Logger\r\n\r\n\r\n\r\n\r\n\r\nclass Raw_Data_validation:\r\n\r\n \"\"\"\r\n This class shall be used for handling all the validation done on the Raw Training Data!!.\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n def __init__(self,path):\r\n self.Batch_Directory = path\r\n self.schema_path = 'schema_training.json'\r\n self.logger = App_Logger()\r\n\r\n\r\n def valuesFromSchema(self):\r\n \"\"\"\r\n Method Name: valuesFromSchema\r\n Description: This method extracts all the relevant information from the pre-defined \"Schema\" file.\r\n Output: LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, Number of Columns\r\n On Failure: Raise ValueError,KeyError,Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n try:\r\n with open(self.schema_path, 'r') as f:\r\n dic = json.load(f)\r\n f.close()\r\n pattern = dic['SampleFileName']\r\n LengthOfDateStampInFile = dic['LengthOfDateStampInFile']\r\n LengthOfTimeStampInFile = dic['LengthOfTimeStampInFile']\r\n column_names = dic['ColName']\r\n NumberofColumns = dic['NumberofColumns']\r\n\r\n file = open(\"Training_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n message =\"LengthOfDateStampInFile:: %s\" %LengthOfDateStampInFile + \"\\t\" + \"LengthOfTimeStampInFile:: %s\" % LengthOfTimeStampInFile +\"\\t \" + \"NumberofColumns:: %s\" % NumberofColumns + \"\\n\"\r\n self.logger.log(file,message)\r\n\r\n file.close()\r\n\r\n\r\n\r\n except ValueError:\r\n file = open(\"Training_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n self.logger.log(file,\"ValueError:Value not found inside schema_training.json\")\r\n file.close()\r\n raise ValueError\r\n\r\n except KeyError:\r\n file = open(\"Training_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n self.logger.log(file, \"KeyError:Key value error incorrect key passed\")\r\n file.close()\r\n raise KeyError\r\n\r\n except Exception as e:\r\n file = open(\"Training_Logs/valuesfromSchemaValidationLog.txt\", 'a+')\r\n self.logger.log(file, str(e))\r\n file.close()\r\n raise e\r\n\r\n return LengthOfDateStampInFile, LengthOfTimeStampInFile, column_names, NumberofColumns\r\n\r\n\r\n def manualRegexCreation(self):\r\n \"\"\"\r\n Method Name: manualRegexCreation\r\n Description: This method contains a manually defined regex based on the \"FileName\" given in \"Schema\" file.\r\n This Regex is used to validate the filename of the training data.\r\n Output: Regex pattern\r\n On Failure: None\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n regex = \"['fraudDetection']+['\\_'']+[\\d_]+[\\d]+\\.csv\"\r\n return regex\r\n\r\n def createDirectoryForGoodBadRawData(self):\r\n\r\n \"\"\"\r\n Method Name: createDirectoryForGoodBadRawData\r\n Description: This method creates directories to store the Good Data and Bad Data\r\n after validating the training data.\r\n\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n try:\r\n path = os.path.join(\"Training_Raw_files_validated/\", \"Good_Raw/\")\r\n if not os.path.isdir(path):\r\n os.makedirs(path)\r\n path = os.path.join(\"Training_Raw_files_validated/\", \"Bad_Raw/\")\r\n if not os.path.isdir(path):\r\n os.makedirs(path)\r\n\r\n except OSError as ex:\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Error while creating Directory %s:\" % ex)\r\n file.close()\r\n raise OSError\r\n\r\n def deleteExistingGoodDataTrainingFolder(self):\r\n\r\n \"\"\"\r\n Method Name: deleteExistingGoodDataTrainingFolder\r\n Description: This method deletes the directory made to store the Good Data\r\n after loading the data in the table. Once the good files are\r\n loaded in the DB,deleting the directory ensures space optimization.\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n try:\r\n path = 'Training_Raw_files_validated/'\r\n # if os.path.isdir(\"ids/\" + userName):\r\n # if os.path.isdir(path + 'Bad_Raw/'):\r\n # shutil.rmtree(path + 'Bad_Raw/')\r\n if os.path.isdir(path + 'Good_Raw/'):\r\n shutil.rmtree(path + 'Good_Raw/')\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"GoodRaw directory deleted successfully!!!\")\r\n file.close()\r\n except OSError as s:\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Error while Deleting Directory : %s\" %s)\r\n file.close()\r\n raise OSError\r\n\r\n def deleteExistingBadDataTrainingFolder(self):\r\n\r\n \"\"\"\r\n Method Name: deleteExistingBadDataTrainingFolder\r\n Description: This method deletes the directory made to store the bad Data.\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n try:\r\n path = 'Training_Raw_files_validated/'\r\n if os.path.isdir(path + 'Bad_Raw/'):\r\n shutil.rmtree(path + 'Bad_Raw/')\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"BadRaw directory deleted before starting validation!!!\")\r\n file.close()\r\n except OSError as s:\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Error while Deleting Directory : %s\" %s)\r\n file.close()\r\n raise OSError\r\n\r\n def moveBadFilesToArchiveBad(self):\r\n\r\n \"\"\"\r\n Method Name: moveBadFilesToArchiveBad\r\n Description: This method deletes the directory made to store the Bad Data\r\n after moving the data in an archive folder. We archive the bad\r\n files to send them back to the client for invalid data issue.\r\n Output: None\r\n On Failure: OSError\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n now = datetime.now()\r\n date = now.date()\r\n time = now.strftime(\"%H%M%S\")\r\n try:\r\n\r\n source = 'Training_Raw_files_validated/Bad_Raw/'\r\n if os.path.isdir(source):\r\n path = \"TrainingArchiveBadData\"\r\n if not os.path.isdir(path):\r\n os.makedirs(path)\r\n dest = 'TrainingArchiveBadData/BadData_' + str(date)+\"_\"+str(time)\r\n if not os.path.isdir(dest):\r\n os.makedirs(dest)\r\n files = os.listdir(source)\r\n for f in files:\r\n if f not in os.listdir(dest):\r\n shutil.move(source + f, dest)\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file,\"Bad files moved to archive\")\r\n path = 'Training_Raw_files_validated/'\r\n if os.path.isdir(path + 'Bad_Raw/'):\r\n shutil.rmtree(path + 'Bad_Raw/')\r\n self.logger.log(file,\"Bad Raw Data Folder Deleted successfully!!\")\r\n file.close()\r\n except Exception as e:\r\n file = open(\"Training_Logs/GeneralLog.txt\", 'a+')\r\n self.logger.log(file, \"Error while moving bad files to archive:: %s\" % e)\r\n file.close()\r\n raise e\r\n\r\n\r\n\r\n\r\n def validationFileNameRaw(self,regex,LengthOfDateStampInFile,LengthOfTimeStampInFile):\r\n \"\"\"\r\n Method Name: validationFileNameRaw\r\n Description: This function validates the name of the training csv files as per given name in the schema!\r\n Regex pattern is used to do the validation.If name format do not match the file is moved\r\n to Bad Raw Data folder else in Good raw data.\r\n Output: None\r\n On Failure: Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n\r\n\r\n # delete the directories for good and bad data in case last run was unsuccessful and folders were not deleted.\r\n self.deleteExistingBadDataTrainingFolder()\r\n self.deleteExistingGoodDataTrainingFolder()\r\n #create new directories\r\n self.createDirectoryForGoodBadRawData()\r\n onlyfiles = [f for f in listdir(self.Batch_Directory)]\r\n try:\r\n f = open(\"Training_Logs/nameValidationLog.txt\", 'a+')\r\n for filename in onlyfiles:\r\n if (re.match(regex, filename)):\r\n splitAtDot = re.split('.csv', filename)\r\n splitAtDot = (re.split('_', splitAtDot[0]))\r\n if len(splitAtDot[1]) == LengthOfDateStampInFile:\r\n if len(splitAtDot[2]) == LengthOfTimeStampInFile:\r\n shutil.copy(\"Training_Batch_Files/\" + filename, \"Training_Raw_files_validated/Good_Raw\")\r\n self.logger.log(f,\"Valid File name!! File moved to GoodRaw Folder :: %s\" % filename)\r\n\r\n else:\r\n shutil.copy(\"Training_Batch_Files/\" + filename, \"Training_Raw_files_validated/Bad_Raw\")\r\n self.logger.log(f,\"Invalid File Name!! File moved to Bad Raw Folder :: %s\" % filename)\r\n else:\r\n shutil.copy(\"Training_Batch_Files/\" + filename, \"Training_Raw_files_validated/Bad_Raw\")\r\n self.logger.log(f,\"Invalid File Name!! File moved to Bad Raw Folder :: %s\" % filename)\r\n else:\r\n shutil.copy(\"Training_Batch_Files/\" + filename, \"Training_Raw_files_validated/Bad_Raw\")\r\n self.logger.log(f, \"Invalid File Name!! File moved to Bad Raw Folder :: %s\" % filename)\r\n\r\n f.close()\r\n\r\n except Exception as e:\r\n f = open(\"Training_Logs/nameValidationLog.txt\", 'a+')\r\n self.logger.log(f, \"Error occured while validating FileName %s\" % e)\r\n f.close()\r\n raise e\r\n\r\n\r\n\r\n\r\n def validateColumnLength(self,NumberofColumns):\r\n \"\"\"\r\n Method Name: validateColumnLength\r\n Description: This function validates the number of columns in the csv files.\r\n It is should be same as given in the schema file.\r\n If not same file is not suitable for processing and thus is moved to Bad Raw Data folder.\r\n If the column number matches, file is kept in Good Raw Data for processing.\r\n\r\n Output: None\r\n On Failure: Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n try:\r\n f = open(\"Training_Logs/columnValidationLog.txt\", 'a+')\r\n self.logger.log(f,\"Column Length Validation Started!!\")\r\n for file in listdir('Training_Raw_files_validated/Good_Raw/'):\r\n csv = pd.read_csv(\"Training_Raw_files_validated/Good_Raw/\" + file)\r\n if csv.shape[1] == NumberofColumns:\r\n pass\r\n else:\r\n shutil.move(\"Training_Raw_files_validated/Good_Raw/\" + file, \"Training_Raw_files_validated/Bad_Raw\")\r\n self.logger.log(f, \"Invalid Column Length for the file!! File moved to Bad Raw Folder :: %s\" % file)\r\n self.logger.log(f, \"Column Length Validation Completed!!\")\r\n except OSError:\r\n f = open(\"Training_Logs/columnValidationLog.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured while moving the file :: %s\" % OSError)\r\n f.close()\r\n raise OSError\r\n except Exception as e:\r\n f = open(\"Training_Logs/columnValidationLog.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured:: %s\" % e)\r\n f.close()\r\n raise e\r\n f.close()\r\n\r\n def validateMissingValuesInWholeColumn(self):\r\n \"\"\"\r\n Method Name: validateMissingValuesInWholeColumn\r\n Description: This function validates if any column in the csv file has all values missing.\r\n If all the values are missing, the file is not suitable for processing.\r\n SUch files are moved to bad raw data.\r\n Output: None\r\n On Failure: Exception\r\n\r\n Written By: iNeuron Intelligence\r\n Version: 1.0\r\n Revisions: None\r\n\r\n \"\"\"\r\n try:\r\n f = open(\"Training_Logs/missingValuesInColumn.txt\", 'a+')\r\n self.logger.log(f,\"Missing Values Validation Started!!\")\r\n\r\n for file in listdir('Training_Raw_files_validated/Good_Raw/'):\r\n csv = pd.read_csv(\"Training_Raw_files_validated/Good_Raw/\" + file)\r\n count = 0\r\n for columns in csv:\r\n if (len(csv[columns]) - csv[columns].count()) == len(csv[columns]):\r\n count+=1\r\n shutil.move(\"Training_Raw_files_validated/Good_Raw/\" + file,\r\n \"Training_Raw_files_validated/Bad_Raw\")\r\n self.logger.log(f,\"Invalid Column for the file!! File moved to Bad Raw Folder :: %s\" % file)\r\n break\r\n if count==0:\r\n csv.rename(columns={\"Unnamed: 0\": \"Wafer\"}, inplace=True)\r\n csv.to_csv(\"Training_Raw_files_validated/Good_Raw/\" + file, index=None, header=True)\r\n except OSError:\r\n f = open(\"Training_Logs/missingValuesInColumn.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured while moving the file :: %s\" % OSError)\r\n f.close()\r\n raise OSError\r\n except Exception as e:\r\n f = open(\"Training_Logs/missingValuesInColumn.txt\", 'a+')\r\n self.logger.log(f, \"Error Occured:: %s\" % e)\r\n f.close()\r\n raise e\r\n f.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "pandas.read_csv" ] ]
inqlee0704/pyqct
[ "304612ed558e7c46fe987ecfea8145cbc5721700" ]
[ "QCT/get_S_norm.py" ]
[ "# ##############################################################################\n# Usage: python get_S_norm.py Subj I1 I2\n# Time: ~ 20s\n# Ref: \n# ##############################################################################\n# 20220118, In Kyu Lee\n# No version suffix\n# ##############################################################################\n# v1c: 08/11/2021, In Kyu Lee\n# - Fixed: when V_IN < V_EX, s_norm returns nan issue.\n# - ownpow is used\n# v1b: 08/10/2021, In Kyu Lee\n# - S* stat is added\n# 03/18/2021, In Kyu Lee\n# Calculate S*\n# ##############################################################################\n# Input: \n# - displacement img, ex) PMSN03001_EX0-TO-PMSN03001_IN0-SSTVD_disp_resample.mhd'\n# - IN lobe mask, ex) PMSN03001_IN0_vida-lobes.img\n# Output:\n# - s* image, ex) PMSN03001_EX0-TO-PMSN03001_IN0-SSTVD_s_norm.img\n# - s* stat, ex) PMSN03001_EX0-TO-PMSN03001_IN0-SSTVD_lobar_s_norm.txt\n# ##############################################################################w\n\n# import libraries\nimport os\nimport sys\nimport numpy as np\nimport time\nimport pandas as pd\nfrom medpy.io import load, save\nimport SimpleITK as sitk\nsitk.ProcessObject_SetGlobalWarningDisplay(False)\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef ownpow(a, b):\n if a > 0:\n return a**b\n if a < 0:\n temp = abs(a)**b\n return -1*temp\n\nstart = time.time()\nSubj = str(sys.argv[1]) # PMSN03001\nI1 = str(sys.argv[2]) # 'IN0'\nI2 = str(sys.argv[3]) # 'EX0'\n\ndisp_path = f'{Subj}_{I2}-TO-{Subj}_{I1}-SSTVD_disp_resample.mhd'\nhisto_EX = pd.read_csv(f'{Subj}_{I2}_vida-histo.csv')\nhisto_IN = pd.read_csv(f'{Subj}_{I1}_vida-histo.csv')\ns_norm_stat_path = f'{Subj}_{I2}-TO-{Subj}_{I1}-SSTVD_lobar_s_norm.txt'\n\nIN_lobe_path = f'{Subj}_{I1}_vida-lobes.img'\nif not os.path.exists(IN_lobe_path):\n IN_lobe_path = f'{Subj}_{I1}_vida-lobes.img.gz'\n\ns_norm_img_path = f'{Subj}_{I2}-TO-{Subj}_{I1}-SSTVD_s_norm.img'\n# V_cm3_IN \nV_EX = histo_EX.loc[histo_EX.location=='both', 'total-volume-cm3'].values[0]\nV_IN = histo_IN.loc[histo_IN.location=='both', 'total-volume-cm3'].values[0]\n# cm^3 -> mm^3\nV_EX = V_EX * 1000\nV_IN = V_IN * 1000\n\n# Data Loading . . .\ndisp, disp_h = load(disp_path)\nIN_lobe_img, IN_lobe_header = load(IN_lobe_path)\ns_norm_h = disp_h\n# [mm]\ns = (disp[:,:,:,0]**2+disp[:,:,:,1]**2+disp[:,:,:,2]**2)**0.5\n# This doesn't work if V_IN- V_EX is negative\n# s_norm = s/((V_IN-V_EX)**(1/3))\ns_norm = s/ownpow(V_IN-V_EX,1/3)\n\n# Prep stat\ns_norm_l0 = np.mean(s_norm[IN_lobe_img==8])\ns_norm_l1 = np.mean(s_norm[IN_lobe_img==16])\ns_norm_l2 = np.mean(s_norm[IN_lobe_img==32])\ns_norm_l3 = np.mean(s_norm[IN_lobe_img==64])\ns_norm_l4 = np.mean(s_norm[IN_lobe_img==128])\ns_norm_mean = (s_norm_l0 + s_norm_l1 + s_norm_l2 + s_norm_l3 + s_norm_l4)/5\n\ns_norm_l0_sd = np.std(s_norm[IN_lobe_img==8])\ns_norm_l1_sd = np.std(s_norm[IN_lobe_img==16])\ns_norm_l2_sd = np.std(s_norm[IN_lobe_img==32])\ns_norm_l3_sd = np.std(s_norm[IN_lobe_img==64])\ns_norm_l4_sd = np.std(s_norm[IN_lobe_img==128])\ns_norm_sd = np.std(s_norm[IN_lobe_img!=0])\n\n# CV = std/mean\ns_norm_l0_cv = s_norm_l0_sd/s_norm_l0\ns_norm_l1_cv = s_norm_l1_sd/s_norm_l1\ns_norm_l2_cv = s_norm_l2_sd/s_norm_l2\ns_norm_l3_cv = s_norm_l3_sd/s_norm_l3\ns_norm_l4_cv = s_norm_l4_sd/s_norm_l4\ns_norm_cv = s_norm_sd/s_norm_mean\n\ns_norm_stat = pd.DataFrame({'Lobes':['Lobe0','Lobe1','Lobe2','Lobe3','Lobe4','All'],\n 'sStar_m':np.float16([s_norm_l0,s_norm_l1,s_norm_l2,s_norm_l3,s_norm_l4,s_norm_mean]),\n 'sStar_sd':np.float16([s_norm_l0_sd,s_norm_l1_sd,s_norm_l2_sd,s_norm_l3_sd,s_norm_l4_sd,s_norm_sd]),\n 'sStar_cv':np.float16([s_norm_l0_cv,s_norm_l1_cv,s_norm_l2_cv,s_norm_l3_cv,s_norm_l4_cv,s_norm_cv])})\n\n\n# Save\nsave(s_norm,s_norm_img_path,hdr=s_norm_h)\ns_norm_stat.to_csv(s_norm_stat_path, index=False, sep=' ')\nend = time.time()\nprint(f'Elapsed time: {end-start}s')\n" ]
[ [ "pandas.read_csv", "numpy.std", "numpy.mean", "numpy.float16" ] ]
a-maumau/pixel_objectness.pytorch
[ "f5acb972be694662d839b99eb33e66a807d6031e" ]
[ "trainer.py" ]
[ "import os\nimport math\nimport argparse\nfrom datetime import datetime\n\nimport torch \nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom PIL import Image\n\nimport data_loader\nfrom mau_ml_util.train_logger import TrainLogger\n#from mau_ml_util.metric import SegmentationMetric\nfrom metric_from_latest_mmu import SegmentationMetric\nfrom templates import Template_Trainer\n\ntorch.backends.cudnn.benchmark = True\n\nclass ColorMap(object):\n def __init__(self, base_color=[[0,0,1], [0,1,1], [0,1,0], [1,1,0], [1,0,0]]):\n \"\"\"\n color_points: list of [int, int, int]\n each value of component represent R,G,B.\n \"\"\"\n\n self.base_color = base_color\n self.num_color_min1 = len(self.base_color)-1\n\n def __call__(self, val):\n return self.to_colormap(val)\n\n def to_colormap(self, val):\n \"\"\"\n returns tpule of (R,G,B) value in range [0,1].\n \"\"\"\n\n fract_between = 0\n\n if val <= 0:\n idx1 = idx2 = 0\n elif val >= 1:\n idx1 = idx2 = self.num_color_min1\n else:\n val = val * (self.num_color_min1)\n idx1 = math.floor(val);\n idx2 = idx1+1;\n fract_between = val - idx1\n \n r = (self.base_color[idx2][0] - self.base_color[idx1][0])*fract_between + self.base_color[idx1][0]\n g = (self.base_color[idx2][1] - self.base_color[idx1][1])*fract_between + self.base_color[idx1][1]\n b = (self.base_color[idx2][2] - self.base_color[idx1][2])*fract_between + self.base_color[idx1][2]\n\n return (r,g,b) \n\nclass Trainer_PixelObjectness(Template_Trainer):\n def __init__(self, args, model, optimizer, lr_policy):\n self.args = args \n self.lr_policy = lr_policy\n self.iter_wise = self.lr_policy.iteration_wise\n\n # for loggin the training\n val_head = [\"iter\" if self.iter_wise else \"epoch\", \"mean_pixel_accuracy\"]\n for i in range(self.args.class_num):\n val_head.append(\"mean_precision_class_{}\".format(i))\n for i in range(self.args.class_num):\n val_head.append(\"mean_IoU_class_{}\".format(i))\n self.tlog = self.get_train_logger({\"train\":[\"iter\" if self.iter_wise else \"epoch\", \"batch_mean_total_loss\"], \"val\":val_head},\n save_dir=self.args.save_dir, save_name=self.args.save_name, arguments=self.get_argparse_arguments(self.args),\n use_http_server=self.args.use_http_server, use_msg_server=self.args.use_msg_server, notificate=False,\n visualize_fetch_stride=self.args.viz_fetch_stride, http_port=self.args.http_server_port, msg_port=self.args.msg_server_port)\n \n\n\n # paths\n self.save_dir = self.tlog.log_save_path\n self.model_param_dir = self.tlog.mkdir(\"model_param\")\n\n if torch.cuda.is_available() and not self.args.nogpu:\n self.map_device = torch.device('cuda:{}'.format(self.args.gpu_device_num))\n else:\n self.map_device = torch.device('cpu')\n\n self.model = model\n if torch.cuda.is_available() and not args.nogpu:\n self.model = self.model.to(self.map_device)\n\n self.optimizer = optimizer\n\n self.train_loader = data_loader.get_train_loader(self.args, [(0.5, 0.5, 0.5),(0.5, 0.5, 0.5)])#[(0.485, 0.456, 0.406),(0.229, 0.224, 0.225)])\n self.val_loader = data_loader.get_val_loader(self.args, [(0.5, 0.5, 0.5),(0.5, 0.5, 0.5)])\n\n self.cmap = self._gen_cmap()\n\n if self.args.show_parameters:\n for idx, m in enumerate(model.modules()):\n print(idx, '->', m)\n print(args)\n\n print(\"\\nsaving at {}\\n\".format(self.save_dir))\n\n # PASCAL VOC color maps\n # borrowed from https://gist.github.com/wllhf/a4533e0adebe57e3ed06d4b50c8419ae\n def _gen_cmap_voc(self, class_num=255):\n def bitget(byteval, idx):\n return ((byteval & (1 << idx)) != 0)\n\n cmap = np.zeros((class_num+1, 3), dtype='uint8')\n for i in range(class_num+1):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7-j)\n g = g | (bitget(c, 1) << 7-j)\n b = b | (bitget(c, 2) << 7-j)\n c = c >> 3\n\n cmap[i] = np.array([r, g, b])\n\n return cmap\n\n def _gen_cmap(self, max_value=255):\n mapper = ColorMap()\n cmap = []\n\n for v in range(max_value+1):\n cmap.append(np.uint8(np.array(mapper(v/max_value))*255))\n\n return cmap\n\n def convert_to_color_map(self, img_array, color_map=None, class_num=255):\n \"\"\"\n img_array: numpy.ndarray\n shape must be (width, height)\n \"\"\"\n\n if color_map is None:\n color_map = self._gen_cmap()\n\n new_img = np.empty(shape=(img_array.shape[0], img_array.shape[1], 3), dtype='uint8')\n\n for c in range(class_num+1):\n index = np.where(img_array == c)\n new_img[index] = color_map[c]\n\n return new_img\n\n def validate(self, count):\n with torch.no_grad():\n self.model.eval()\n\n # logging\n pix_acc = 0.0\n precision_class = []\n jaccard_class = []\n\n #data_count_precision = [0 for i in range(self.args.class_num)]\n #data_count_jaccard = [0 for i in range(self.args.class_num)]\n \n metric = SegmentationMetric(self.args.class_num, map_device=self.map_device)\n\n if self.args.quiet:\n _trainval_loader = self.val_loader\n else:\n _trainval_loader = self.to_tqdm(self.val_loader, desc=\"train val\")\n\n for b, (image, mask, original_image) in enumerate(_trainval_loader):\n batch_size = image.shape[0]\n\n img = self.format_tensor(image, requires_grad=False, map_device=self.map_device)\n mask = self.format_tensor(mask, requires_grad=False, map_device=self.map_device)\n\n outputs, prob_maps = self.model.inference(img)\n outputs = F.interpolate(outputs, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)\n prob_maps = F.interpolate(prob_maps, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)\n\n metric(outputs, mask)\n \n # save only few batch for sample\n if b < 1:\n self.tlog.setup_output(\"{}_{}_batch_{}_sample\".format(\"iter\" if self.iter_wise else \"epoch\", count, b))\n\n # test color image\n #test_img = np.ones((256,256))\n #for i in range(256):\n # test_img[i] = test_img[i]*i\n # \n #self.tlog.pack_output(Image.fromarray(self.convert_to_color_map(np.uint8(test_img))))\n \n for n in range(batch_size):\n self.tlog.pack_output(Image.fromarray(np.uint8(original_image[n].detach().numpy())))\n\n pred_img = np.uint8(outputs[n].squeeze(0).cpu().detach().numpy())\n prob_img = prob_maps[n].squeeze(0).cpu().detach().numpy()\n self.tlog.pack_output(Image.fromarray(pred_img*255), not_in_schema=True)\n self.tlog.pack_output(Image.fromarray(self.convert_to_color_map(np.uint8(prob_img[1]*255), self.cmap)))\n\n gt_img = np.uint8(mask[n].cpu().detach().numpy())\n self.tlog.pack_output(Image.fromarray(gt_img*255), not_in_schema=True)\n\n self.tlog.pack_output(None, \" \")\n\n self.tlog.pack_output(None, \"validation sample\", [\"left: input\", \"center: pred cmap\", \"right: output mask\"])\n self.tlog.flush_output()\n\n pix_acc = metric.calc_pix_acc()\n precision = metric.calc_mean_precision()\n jaccard_index = metric.calc_mean_jaccard_index()\n\n # might I should return the non evaluated class with nan and filter the list\n # by filter(lambda n: n!=float(\"nan\"), items)\n\n for class_id in range(self.args.class_num):\n precision_class.append(precision[\"class_{}\".format(class_id)])\n jaccard_class.append(jaccard_index[\"class_{}\".format(class_id)])\n\n #data_count_precision[class_id] += len(precision[\"class_{}\".format(str(class_id))])\n #data_count_jaccard[class_id] += len(jaccard_index[\"class_{}\".format(str(class_id))])\n\n # logging, this implementation is not caring missing value\n #mean_precision_classes = [y/x if x > 0 else 0 for y, x in zip(precision_class, data_count_precision)]\n #mean_iou_classes = [y/x if x > 0 else 0 for y, x in zip(jaccard_class, data_count_jaccard)]\n \n # clac. with out background\n log_msg_data = [count, pix_acc, np.mean(precision_class[1:]), np.mean(jaccard_class[1:])]\n\n self.tlog.log(\"val\", [count, pix_acc]+precision_class+jaccard_class)\n self.tlog.log_message(\"[{}] mean pix acc.:{:.5f}, precision:{:.5f}, IoU:{:.5f}\".format(*log_msg_data), \"LOG\", \"validation\")\n\n if not self.args.quiet:\n tqdm.write(\"[{}] mean pix acc.:{:.5f}, precision:{:.5f}, IoU:{:.5f}\".format(*log_msg_data))\n\n self.model.train()\n\n def train(self):\n train_finish = False\n \n if self.args.quiet:\n epochs = range(1, self.args.epochs+1)\n else:\n epochs = self.to_tqdm(range(1, self.args.epochs+1), desc=\"train\")\n\n curr_iter = 0\n epoch = 0\n\n total_loss = 0.0\n data_num = 0\n\n # for epoch wise and iter wise\n decay_arg = {\"curr_iter\":curr_iter, \"curr_epoch\":epoch}\n\n for epoch in epochs:\n if not self.iter_wise:\n total_loss = 0.0\n data_num = 0\n\n if self.args.quiet:\n _train_loader = self.train_loader\n else:\n _train_loader = self.to_tqdm(self.train_loader)\n\n for img, mask in _train_loader:\n # loss log will be showed in size averaged\n data_num += 1\n\n self.optimizer.zero_grad()\n\n images = self.format_tensor(img, map_device=self.map_device)\n masks = self.format_tensor(mask, map_device=self.map_device)\n\n output = self.model(images)\n output = F.interpolate(output, size=[self.args.crop_size, self.args.crop_size], mode='bilinear', align_corners=False)\n\n batch_loss = self.model.loss(output, masks)\n total_loss += batch_loss.item()\n \n batch_loss.backward()\n self.optimizer.step()\n\n curr_iter += 1\n\n if not self.args.quiet:\n _train_loader.set_description(\"{: 3d}: train[{}] loss: {:.5f}\".format(curr_iter if self.iter_wise else epoch, self.args.save_name, total_loss/data_num))\n\n if self.iter_wise:\n self.lr_policy.decay_lr(**decay_arg)\n \n if curr_iter % self.args.trainval_every == 0:\n self.validate(curr_iter)\n\n if curr_iter % self.args.save_every == 0:\n state = {'iter': curr_iter,\n 'optimizer_state_dict' : self.optimizer.state_dict()}\n self.model.save(add_state=state, file_name=os.path.join(self.model_param_dir,'model_param_iter{}.pth'.format(curr_iter)))\n \n self.tlog.log_message(\"[iter:{}] model saved.\".format(curr_iter), \"LOG\", \"train\")\n\n if curr_iter % self.args.log_every == 0:\n if not self.args.quiet:\n tqdm.write(\"[#{: 3d}] {} iter mean loss: {:.5f}\".format(curr_iter, self.args.log_every, total_loss/data_num))\n \n self.tlog.log(\"train\", [curr_iter, float(total_loss/data_num)])\n self.tlog.log_message(\"[{}] {} iter mean loss:{:.5f}\".format(\"iter:{}\".format(curr_iter), self.args.log_every, float(total_loss/data_num)), \"LOG\", \"train\")\n\n total_loss = 0\n data_num = 0\n\n if curr_iter == self.args.max_iter:\n train_finish = True\n _train_loader.close()\n break\n \n if train_finish:\n epochs.close()\n break\n\n if not self.iter_wise:\n if not self.args.quiet:\n tqdm.write(\"[# {: 3d}] batch mean loss: {:.5f}\".format(epoch, total_loss/data_num))\n \n if epoch % self.args.log_every == 0:\n self.tlog.log(\"train\", [epoch, float(total_loss/data_num)])\n self.tlog.log_message(\"[{}] batch mean loss:{:.5f}\".format(\"epoch:{}\".format(epoch), float(total_loss/data_num)), \"LOG\", \"train\")\n\n # check train validation\n if epoch % self.args.trainval_every == 0:\n self.validate(epoch)\n\n self.lr_policy.decay_lr(**decay_arg)\n #if epoch % self.args.decay_every == 0:\n # for param_group in self.optimizer.param_groups:\n # param_group['lr'] *= self.args.decay_value\n #\n # self.tlog.log_message(\"[epoch:{}] decay learning rate by {}\".format(epoch, self.args.decay_value), \"LOG\", \"train\")\n \n # save model\n if epoch % self.args.save_every == 0:\n state = {'epoch': epoch,\n 'optimizer_state_dict' : self.optimizer.state_dict()}\n self.model.save(add_state=state, file_name=os.path.join(self.model_param_dir,'model_param_e{}.pth'.format(epoch)))\n \n self.tlog.log_message(\"[epoch:{}] model saved.\".format(epoch), \"LOG\", \"train\")\n\n self.model.save(add_state={'optimizer_state_dict' : self.optimizer.state_dict()},\n file_name=os.path.join(self.model_param_dir, 'model_param_fin_{}.pth'.format(datetime.now().strftime(\"%Y%m%d_%H-%M-%S\"))))\n\n print(\"data is saved at {}\".format(self.save_dir))\n\n def test_loader(self):\n from matplotlib import pylab as plt\n import time\n\n if self.args.quiet:\n epochs = range(1, self.args.epochs+1)\n else:\n epochs = self.to_tqdm(range(1, self.args.epochs+1), desc=\"train\")\n\n for epoch in epochs:\n if self.args.quiet:\n _train_loader = self.train_loader\n else:\n _train_loader = self.to_tqdm(self.train_loader)\n\n for img, mask in _train_loader:\n batch_size = img.shape[0]\n\n img = img.numpy()\n mask = mask.numpy()\n\n for i in range(batch_size):\n _img = np.uint8(img[i]*255).transpose(1,2,0)\n _mask = self.convert_to_color_map(np.uint8(mask[i]), self.cmap)\n\n merged_img = np.concatenate([_img, _mask], axis=1)\n\n plt.imshow(merged_img)\n plt.show()\n\n" ]
[ [ "numpy.empty", "numpy.zeros", "numpy.mean", "numpy.concatenate", "torch.no_grad", "matplotlib.pylab.show", "torch.cuda.is_available", "numpy.array", "matplotlib.pylab.imshow", "numpy.where", "torch.device", "torch.nn.functional.interpolate", "numpy.uint8" ] ]
solad5/acgan-gpt2
[ "52901a996fd235355f8c3f6b83037c85b1fdb415" ]
[ "gpt2_model.py" ]
[ "'''\n code by TaeHwan Jung(@graykode)\n Original Paper and repository here : https://github.com/openai/gpt-2\n GPT2 Pytorch Model : https://github.com/huggingface/pytorch-pretrained-BERT\n'''\n\nimport copy\nimport torch\nimport math\nimport torch.nn as nn\nfrom torch.nn.parameter import Parameter\n\ndef gelu(x):\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\ndef load_weight(model, state_dict):\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if key.endswith(\".g\"):\n new_key = key[:-2] + \".weight\"\n elif key.endswith(\".b\"):\n new_key = key[:-2] + \".bias\"\n elif key.endswith(\".w\"):\n new_key = key[:-2] + \".weight\"\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n start_model = model\n if hasattr(model, \"transformer\") and all(not s.startswith('transformer.') for s in state_dict.keys()):\n start_model = model.transformer\n load(start_model, prefix=\"\")\n\n # Make sure we are still sharing the output and input embeddings after loading weights\n model.set_tied()\n return model\n\nclass LayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n \"\"\"\n super(LayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.weight * x + self.bias\n\n\nclass Conv1D(nn.Module):\n def __init__(self, nf, nx):\n super(Conv1D, self).__init__()\n self.nf = nf\n w = torch.empty(nx, nf)\n nn.init.normal_(w, std=0.02)\n self.weight = Parameter(w)\n self.bias = Parameter(torch.zeros(nf))\n\n def forward(self, x):\n size_out = x.size()[:-1] + (self.nf,)\n x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)\n x = x.view(*size_out)\n return x\n\n\nclass Attention(nn.Module):\n def __init__(self, nx, n_ctx, config, scale=False):\n super(Attention, self).__init__()\n n_state = nx # in Attention: n_state=768 (nx=n_embd)\n # [switch nx => n_state from Block to Attention to keep identical to TF implem]\n assert n_state % config.n_head == 0\n self.register_buffer(\"bias\", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))\n self.n_head = config.n_head\n self.split_size = n_state\n self.scale = scale\n self.c_attn = Conv1D(n_state * 3, nx)\n self.c_proj = Conv1D(n_state, nx)\n\n def _attn(self, q, k, v):\n w = torch.matmul(q, k)\n if self.scale:\n w = w / math.sqrt(v.size(-1))\n nd, ns = w.size(-2), w.size(-1)\n b = self.bias[:, :, ns - nd:ns, :ns]\n # Here the bias b also serves as the mask to remove future information\n w = w * b - 1e10 * (1 - b)\n w = nn.Softmax(dim=-1)(w)\n return torch.matmul(w, v)\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states\n\n def split_heads(self, x, k=False):\n new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states\n if k:\n return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n else:\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def forward(self, x, layer_past=None):\n x = self.c_attn(x)\n query, key, value = x.split(self.split_size, dim=2)\n query = self.split_heads(query)\n key = self.split_heads(key, k=True)\n value = self.split_heads(value)\n if layer_past is not None:\n past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below\n key = torch.cat((past_key, key), dim=-1)\n value = torch.cat((past_value, value), dim=-2)\n present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking\n a = self._attn(query, key, value)\n a = self.merge_heads(a)\n a = self.c_proj(a)\n return a, present\n\n\nclass MLP(nn.Module):\n def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)\n super(MLP, self).__init__()\n nx = config.n_embd\n self.c_fc = Conv1D(n_state, nx)\n self.c_proj = Conv1D(nx, n_state)\n self.act = gelu\n\n def forward(self, x):\n h = self.act(self.c_fc(x))\n h2 = self.c_proj(h)\n return h2\n\n\nclass Block(nn.Module):\n def __init__(self, n_ctx, config, scale=False):\n super(Block, self).__init__()\n nx = config.n_embd\n self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)\n self.attn = Attention(nx, n_ctx, config, scale)\n self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)\n self.mlp = MLP(4 * nx, config)\n\n def forward(self, x, layer_past=None):\n a, present = self.attn(self.ln_1(x), layer_past=layer_past)\n x = x + a\n m = self.mlp(self.ln_2(x))\n x = x + m\n return x, present\n\n\nclass Transformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.n_layer = config.n_layer\n self.n_embd = config.n_embd\n self.n_vocab = config.vocab_size\n\n self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n self.wpe = nn.Embedding(config.n_positions, config.n_embd)\n block = Block(config.n_ctx, config, scale=True)\n self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])\n self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)\n\n def set_embeddings_weights(self, model_embeddings_weights):\n embed_shape = model_embeddings_weights.shape\n self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)\n self.decoder.weight = model_embeddings_weights # Tied weights\n\n def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):\n if past is None:\n past_length = 0\n past = [None] * len(self.h)\n else:\n past_length = past[0][0].size(-2)\n if position_ids is None:\n position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long,\n device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_ids.size(-1))\n position_ids = position_ids.view(-1, position_ids.size(-1))\n\n inputs_embeds = self.wte(input_ids)\n position_embeds = self.wpe(position_ids)\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))\n token_type_embeds = self.wte(token_type_ids)\n else:\n token_type_embeds = 0\n hidden_states = inputs_embeds + position_embeds + token_type_embeds\n presents = []\n for block, layer_past in zip(self.h, past):\n hidden_states, present = block(hidden_states, layer_past)\n presents.append(present)\n hidden_states = self.ln_f(hidden_states)\n output_shape = input_shape + (hidden_states.size(-1),)\n return hidden_states.view(*output_shape), presents\n\n\nclass LinearReadoutHead(nn.Module):\n def __init__(self, model_embeddings_weights, config):\n super().__init__()\n self.n_embd = config.n_embd\n self.set_embeddings_weights(model_embeddings_weights)\n\n def set_embeddings_weights(self, model_embeddings_weights):\n embed_shape = model_embeddings_weights.shape\n self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)\n self.decoder.weight = model_embeddings_weights # Tied weights\n\n def forward(self, hidden_state):\n # Truncated Language modeling logits (we remove the last token)\n # h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)\n lm_logits = self.decoder(hidden_state)\n return lm_logits\n\n\nclass GPT2(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transformer = Transformer(config)\n self.readout_head = LinearReadoutHead(self.transformer.wte.weight, config)\n\n def set_tied(self):\n \"\"\" Make sure we are sharing the embeddings\n \"\"\"\n self.readout_head.set_embeddings_weights(self.transformer.wte.weight)\n\n def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):\n hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)\n return hidden_states" ]
[ [ "torch.empty", "torch.ones", "torch.nn.Linear", "torch.pow", "torch.nn.Softmax", "torch.sqrt", "torch.nn.Embedding", "torch.nn.init.normal_", "torch.zeros", "torch.cat", "torch.nn.parameter.Parameter", "torch.matmul" ] ]
ManuLado/Enviar-comandos-a-marlin
[ "5ba596c9b0db47125e2e29ed8084e61d326e8777", "5ba596c9b0db47125e2e29ed8084e61d326e8777" ]
[ "take_images.py", "pano_libs/P0.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Graba video leido desde la arducam\n# Se le debe indicar el archivo de video a grabar y\n# la duración de la captura en segundos.\n\n# SINTAXIS: python capturar_video.py VIDEO TIEMPO\n# 1- Ruta del video\n# 2- Tiempo de grabacion en segundos\n\nfrom ctypes import *\n\nimport ctypes\n\nimport sys\nimport os\n\nimport time\nfrom PIL import Image\nimport numpy as np\nimport thread as thread\nimport math\n\nfrom select import select\nfrom evdev import InputDevice\nfrom evdev import ecodes\nfrom astropy.io import fits\nimport ArducamSDK\n\n# Analisis de argumentos\nif (len(sys.argv)==3):\n NOMBREIMG = sys.argv[1];\n NUMIMG = int(sys.argv[2]);\nelse:\n print (\"Se requieren 2 argumentos: NOMBRE_IMAGENES NUMERO_IMAGENES\")\n exit()\n\n#### CONFIGURACION ARDUCAMSDK ################\nCOLOR_BYTE2RGB = 47 # No se modifico del original\nCAMERA_MT9M001 = 0x4D091031 # No se modifico del original\nSensorShipAddr = 186\nI2C_MODE_8_16 = 1\nusbVid = 0x52CB # No se modifico del original\nWidth = 1280 #1280\nHeight = 1024 #1024\ncfg ={\"u32CameraType\":CAMERA_MT9M001,\n \"u32Width\":Width,\"u32Height\":Height,\n \"u32UsbVersion\":1,\n \"u8PixelBytes\":1,\n \"u16Vid\":0x52cb,\n \"u8PixelBits\":8,\n \"u32SensorShipAddr\":SensorShipAddr,\n \"emI2cMode\":I2C_MODE_8_16 }\n\n# FLAGS\nglobal saveFlag,downFlag,flag,H_value,V_value,lx,ly,mx,my,dx,dy,W_zoom,H_zooM,handle,openFlag,initTime,storeFlag,bufferData,globalGain\nglobal testPatternFlag\nglobal integrationTime\nglobal shutterWidth\n\nopenFlag = False\nhandle = {}\ndownFlag = False\nflag = True\nsaveFlag = False\nstoreFlag = False\nsaveNum=0\nH_value = 0\nV_value = 0\nW_zoom = 0\nH_zoom = 0\nlx = 0\nly = 0\nmx = 0\nmy = 0\ndx = 0\ndy = 0\ntestPatternFlag = False;\n\nregArr=[[0x01, 0x000C], # Row Start\n [0x02, 0x0014], # Column Start\n [0x03, Height - 1], # Window Height 0x03FF\n [0x04, Width - 1], # Window Width 0x04FF\n [0x05, 0x0009], # Horizontal Blanking\n [0x06, 0x0019], # Vertical Blanking\n [0x07, 0x0002], # Output Control\n [0x09, 0x0419], # Shutter Width 0x0419 (max: 0x3FFF)\n [0x0B, 0x0000], # Frame Restart\n [0x0C, 0x0000],#0x0100], \n [0x0D, 0x0000], \n [0x1E, 0x8000], # Read Mode 1 0x8000\n [0x20, 0x1104], \n [0x2B, 0x0008], \n [0x2C, 0x0008], \n [0x2D, 0x0008], \n [0x2E, 0x0008],\n [0x32, 0x0FFC], # Test Data Register\n [0x35, 0x0067], # Global Gain 0x0008 (max: 0x0067)\n [0x5F, 0x0904], \n #[0x60, 0x0000], # BLC offset: Even row, even column\n #[0x61, 0x0000], # BLC offset: Odd row, odd column\n #[0x62, 0x049F], # Black Level Calibration Control 0x0498 (No-BLC: 0x049F; Manual-BLC: 0x0499 & reg0x60/61/63/64)\n #[0x63, 0x0000], # BLC offset: Even row, odd column\n #[0x64, 0x0000], # BLC offset: Odd row, Even column\n [0x60, 0x002F], # BLC offset: Even row, even column\n [0x61, 0x002F], # BLC offset: Odd row, odd column\n [0x62, 0x0499], # Black Level Calibration Control 0x0498 (No-BLC: 0x049F; Manual-BLC: 0x0499 & reg0x60/61/63/64)\n [0x63, 0x000F], # BLC offset: Even row, odd column\n [0x64, 0x000F], # BLC offset: Odd row, Even column\n [0xF1, 0x0001], \n [0xFFFF, 0xFFFF]\n]\n\nglobalGain = regArr[18][1];\n\n# Cálculo del tiempo de integración inicial (pag 16 del datasheet)\nrowTime = regArr[3][1] + 1 + 244 + regArr[4][1] - 19; #[pixel clock periods] default: 1514\nresetDelay = 4*regArr[9][1] #[pixel clock periods] default: 0\noverheadTime = 180; #[pixel clock periods]\nshutterWidth = regArr[7][1]\nintegrationPeriods = shutterWidth*rowTime - overheadTime - resetDelay;\nclockPeriod = 1000.0/24e6; #[ms]\nintegrationTime = integrationPeriods * clockPeriod; #[ms]\nwith open('integrationtime.txt','w') as it:\n it.write(str(integrationTime)+\"\\n\")\n\nprint (\"Initial integration time: %.3fms\"%(integrationTime));\nprint (\"Initial gain: 0x%02x\"%(globalGain));\n\na_lock = thread.allocate_lock();\n\ndef readThread(threadName,read_Flag):\n global flag,handle,storeFlag,bufferData,openFlag\n global a_lock\n count = 0\n time0 = time.time()\n time1 = time.time()\n data = {}\n # Wait for the arducam object to be ready\n while openFlag == False:\n time1 = time.time();\n if time1 - time0 > 20:\n #timeout\n exit;\n\n while flag:\n res = ArducamSDK.Py_ArduCam_available(handle)\n #~ print \"Available frames %d\"%(res)\n if res > 0:\n \n res,data = ArducamSDK.Py_ArduCam_read(handle,Width * Height)\n if res == 0:\n count += 1\n time1 = time.time()\n ArducamSDK.Py_ArduCam_del(handle)\n else:\n print (\"read data fail!\")\n \n else:\n #print \"No data availiable\"\n time.sleep(.01);\n \n if len(data) >= Width * Height:\n if time1 - time0 >= 5:\n print (\"%s %f %s\\n\"%(\"fps:\",count*1.0/(time1-time0),\"/s\"))\n count = 0\n time0 = time1\n \n a_lock.acquire();\n bufferData = data;\n data = [];\n storeFlag = True;\n a_lock.release();\n #show(data)\n\t\t#else:\n\t\t#\tprint \"data length is not enough!\"\n if flag == False:\n break\n \nthread.start_new_thread( readThread,(\"Thread-2\", flag,))\n\npass\n\ndef showAndSave(threadName,algoquenoseusa):\n global flag,W_zoom,H_zoom,V_value,H_value,lx,ly,downFlag,saveFlag,saveNum,bufferData,storeFlag\n global a_lock\n global hist_ax\n global NOMBREIMG\n img = np.zeros((Height, Width), dtype=np.uint8);\n while flag:\n a_lock.acquire();\n if storeFlag == True:\n storeFlag = False;\n img = np.frombuffer(bufferData, np.uint8)\n img = np.reshape(img, (Height, Width));\n\n saveNum += 1\n #name = NOMBREIMG + str(saveNum) + \".fits\"\n\t #name = NOMBREIMG + \"_\" + str(saveNum) + \".jpeg\"\n name = NOMBREIMG + \".fits\"\n hdu=fits.PrimaryHDU()\n hdu.data=img\n hdu.writeto(name,overwrite=True)\n print (\"Frame saved to %s\"%(name))\n \n a_lock.release();\n \n if saveNum == NUMIMG:\n flag=False;\n print (\"Total number of adq images = %d\"%(saveNum))\n \n if flag == False:\n break\nthread.start_new_thread( showAndSave,(\"Thread-3\",flag))\npass\n\ndef init_and_read_arducam():\n\tglobal flag,regArr,handle,openFlag\n\tregNum = 0\n\tres,handle = ArducamSDK.Py_ArduCam_autoopen(cfg)\n\tif res == 0:\n\t\topenFlag = True\n\t\tprint (\"device open success!\")\n\t\twhile (regArr[regNum][0] != 0xFFFF):\n\t\t\tArducamSDK.Py_ArduCam_writeSensorReg(handle,regArr[regNum][0],regArr[regNum][1])\n\t\t\tregNum = regNum + 1\n\t\tres = ArducamSDK.Py_ArduCam_beginCapture(handle)\n\t\t\n\t\tif res == 0:\n\t\t\tprint (\"transfer task create success!\")\n\t\t\twhile flag :\t\t\n\t\t\t\tres = ArducamSDK.Py_ArduCam_capture(handle)\n\t\t\t\tif res != 0:\n\t\t\t\t\tprint (\"capture failed!\")\n\t\t\t\t\tflag = False;\n\t\t\t\t\tbreak;\t\t\t\t\t\n\t\t\t\ttime.sleep(0.1)\n\t\t\t\tif flag == False:\t\t\n\t\t\t\t\tbreak\n\t\telse:\n\t\t\tprint (\"transfer task create fail!\")\n\t\t\n\t\ttime.sleep(2);\n\t\tres = ArducamSDK.Py_ArduCam_close(handle)\n\t\tif res == 0:\n\t\t\topenFlag = False\n\t\t\tprint (\"device close success!\")\n\t\telse:\n\t\t\tprint (\"device close fail!\")\n\telse:\n\t\tprint (\"device open fail!\")\n\nif __name__ == \"__main__\":\n\tinitTime = time.time();\n\tinit_and_read_arducam();\n\n", "from random import randrange\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport cv2\r\nimport argparse\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"echo\", help=\"echo the string you use here\")\r\nparser.add_argument(\"nombre1\", help=\"echo the string you use here\")\r\nparser.add_argument(\"nombre2\", help=\"echo the string you use here\")\r\nparser.add_argument(\"nombre3\", help=\"echo the string you use here\")\r\nargs = parser.parse_args()\r\n\r\ndire=args.echo\r\n\r\n\r\nname1=dire+'/'+args.nombre1\r\nname2=dire+'/'+args.nombre2\r\nk=args.nombre3\r\n\r\nfigsize = (10, 10)\r\nrgb_l = cv2.cvtColor(cv2.imread(name1), cv2.COLOR_BGR2RGB)\r\ngray_l = cv2.cvtColor(rgb_l, cv2.COLOR_RGB2GRAY)\r\nrgb_r = cv2.cvtColor(cv2.imread(name2), cv2.COLOR_BGR2RGB)\r\ngray_r = cv2.cvtColor(rgb_r, cv2.COLOR_RGB2GRAY)\r\n# use orb if sift is not installed\r\nfeature_extractor = cv2.ORB_create()\r\n\r\n# find the keypoints and descriptors with chosen feature_extractor\r\nkp_l, desc_l = feature_extractor.detectAndCompute(gray_l, None)\r\nkp_r, desc_r = feature_extractor.detectAndCompute(gray_r, None)\r\n\r\ntest = cv2.drawKeypoints(rgb_l, kp_l, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n\r\nplt.figure(figsize=figsize)\r\nplt.imshow(test)\r\nplt.title(\"keypoints\")\r\n#plt.show()\r\nbf = cv2.BFMatcher()\r\nmatches = bf.knnMatch(desc_l, desc_r, k=2)\r\n\r\n# Apply ratio test\r\ngood_match = []\r\nfor m in matches:\r\n if m[0].distance/m[1].distance < 0.5:\r\n good_match.append(m)\r\ngood_match_arr = np.asarray(good_match)\r\n\r\n# show only 30 matches\r\nim_matches = cv2.drawMatchesKnn(rgb_l, kp_l, rgb_r, kp_r,\r\n good_match[0:30], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\r\n\r\nplt.figure(figsize=(20, 20))\r\nplt.imshow(im_matches)\r\nplt.title(\"keypoints matches\")\r\n#plt.show()\r\ngood_kp_l = np.array([kp_l[m.queryIdx].pt for m in good_match_arr[:, 0]]).reshape(-1, 1, 2)\r\ngood_kp_r = np.array([kp_r[m.trainIdx].pt for m in good_match_arr[:, 0]]).reshape(-1, 1, 2)\r\nH, masked = cv2.findHomography(good_kp_r, good_kp_l, cv2.RANSAC, 5.0)\r\n\r\nprint(H)\r\nrgb_r_warped = cv2.warpPerspective(rgb_r, H, (rgb_l.shape[1] + rgb_r.shape[1], rgb_l.shape[0]))\r\nrgb_r_warped[0:rgb_l.shape[0], 0:rgb_l.shape[1]] = rgb_l\r\n\r\nplt.figure(figsize=figsize)\r\nplt.imshow(rgb_r_warped)\r\nplt.title(\"naive warping\")\r\n#plt.show()\r\ndef warpTwoImages(img1, img2, H):\r\n '''warp img2 to img1 with homograph H\r\n from: https://stackoverflow.com/questions/13063201/how-to-show-the-whole-image-when-using-opencv-warpperspective\r\n '''\r\n h1, w1 = img1.shape[:2]\r\n h2, w2 = img2.shape[:2]\r\n pts1 = np.float32([[0, 0], [0, h1], [w1, h1], [w1, 0]]).reshape(-1, 1, 2)\r\n pts2 = np.float32([[0, 0], [0, h2], [w2, h2], [w2, 0]]).reshape(-1, 1, 2)\r\n pts2_ = cv2.perspectiveTransform(pts2, H)\r\n pts = np.concatenate((pts1, pts2_), axis=0)\r\n [xmin, ymin] = np.int32(pts.min(axis=0).ravel() - 0.5)\r\n [xmax, ymax] = np.int32(pts.max(axis=0).ravel() + 0.5)\r\n t = [-xmin, -ymin]\r\n Ht = np.array([[1, 0, t[0]], [0, 1, t[1]], [0, 0, 1]]) # translate\r\n\r\n result = cv2.warpPerspective(img2, Ht@H, (xmax-xmin, ymax-ymin))\r\n result[t[1]:h1+t[1], t[0]:w1+t[0]] = img1\r\n return result\r\n\r\n\r\nresult = warpTwoImages(rgb_l, rgb_r, H)\r\n\r\nplt.figure(figsize=figsize)\r\nplt.imshow(result)\r\nplt.title(\"better warping\")\r\n#plt.show()\r\ncv2.imwrite(dire+\"_P0/\"+str(k)+\".jpg\",result)" ]
[ [ "numpy.reshape", "numpy.frombuffer", "numpy.zeros" ], [ "matplotlib.pyplot.figure", "numpy.float32", "numpy.asarray", "matplotlib.pyplot.title", "matplotlib.pyplot.imshow", "numpy.array", "numpy.concatenate" ] ]
AndrewFalkowski/SODIS_SIM
[ "4d5da3e0872ee747d399d66fdee1633e7d2b8ab1" ]
[ "BoxThermal.py" ]
[ "import numpy as np\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport numba\nimport time\nfrom scipy.integrate import odeint\n\n\n\n# a sample differential equation dy/dx = (x-y)/2\n\n# def dydx(x,y):\n# return ((x-y)/2)\n\n# # find the value of y for a given x using step size h\n# # and an initial value y0 at x0\n\n# def rungeKutta(x0, y0, x, h):\n# #count num iteratings using step size or step height h\n# n = int(((x - x0)/h))\n# # iterate for number of iterations\n# y = y0\n# for i in range(1, n + 1):\n# # apply runge kutta formulas to find the next value of y\n# k1 = h * dydx(x0, y)\n# k2 = h * dydx(x0 + 0.5 * h, y + 0.5 * k1)\n# k3 = h * dydx(x0 + 0.5 * h, y + 0.5 * k2)\n# k4 = h * dydx(x0 + h, y + k3)\n\n# # update the next value of y\n# y = y + (1.0 / 6.0) * (k1 + 2*k2 + 2*k3 + k4)\n\n# # update the next value of x\n# x0 = x0 + h\n\n# return y\n\n\n# # driver method\n# x0 = 0\n# y = 1\n# x = 2\n# h = 0.2\n# print('The value of y at x is:', rungeKutta(x0, y, x, h))\n\ndef box_dim(A_c, h, prct_f):\n # all dimensions in meters\n box_vol = A_c * h\n vol_f = box_vol * prct_f # L\n m_a = box_vol * (1-prct_f) * 1.225\n m_f = vol_f * 997 # kg\n print('Contained Water: ', m_f, 'Liters')\n A_s = 4 * h * np.sqrt(A_c)\n return m_f, m_a, A_s\n\n# m_f, m_a, A_s = box_dim(0.25, 0.15, 0.9)\n\n\ndef boxODE(x, t, m_f, m_a, A_s):\n\n # constants\n A_c = 0.25 # square meters\n A_s = A_s\n A_f = A_c # square meters\n T_amb = 298 # kelvin\n T_sky = T_amb - 6 # kelvin\n alpha_g = 0.02 # %\n alpha_p = 0.98\n t_g = 0.9 # %\n t_f = 0.85 # %\n # print(t)\n Irr = 0.0426*(t) + 1.38E-6*(t)**2 - 7.94E-11*(t)**3 + 7.3E-16*(t)**4\n # Irr = 600\n x_b = 0.065 # insulation thickness meters\n x_s = 0.065 # insulation thickness meters\n\n k_i = 1.0 # thermal conductivity of side materials, foamed glass # W/mK\n h_rad_g2_g1 = 8\n h_cov_g2_g1 = 20\n h_rad_g1_sky = 8\n h_rad_g1_amb = 8\n h_rad_p_g2 = 20\n h_cov_a_g2 = 8\n h_cov_f_a = 8\n h_cov_p_f = 30\n h_cov_g1_amb = 65\n\n M_f = m_f * 4.187\n M_g1 = 1150 * (A_c * 0.001) * 1.67 # assuming acrylic\n M_g2 = M_g1\n M_p = 8960 * (A_c * 0.065) * 1.0\n # assuming coper\n M_a = 0.718 * m_a\n\n # assign each ODE to a vector element\n T_g1 = x[0]\n T_g2 = x[1]\n T_a = x[2]\n T_p = x[3]\n T_f = x[4]\n\n Q_rad_g2_g1 = h_rad_g2_g1 * A_c * (T_g2 - T_g1)\n Q_cov_g2_g1 = h_cov_g2_g1 * A_c * (T_g2 - T_g1)\n Q_rad_g1_sky = h_rad_g1_sky * A_c * (T_g1 - T_sky)\n Q_cov_g1_amb = h_rad_g1_amb * A_c * (T_g1 - T_amb)\n Q_rad_p_g2 = h_rad_p_g2 * A_c * (T_p - T_g2)\n Q_cov_a_g2 = h_cov_a_g2 * A_c * (T_a - T_g2)\n Q_cov_f_a = h_cov_f_a * (A_c) * (T_f - T_a)\n Q_cov_p_f = h_cov_p_f * A_c * (T_p - T_f)\n U_base = ((x_b/k_i) + 1/(h_cov_g1_amb))**(-1)\n U_side = ((x_s/k_i) + 1/(h_cov_g1_amb))**(-1)\n Q_amb_loss = (U_base*A_c + U_side*A_s)*(T_p - T_amb)\n\n\n\n # define each ODE\n dT_g1dt = (Irr * alpha_g * A_c + Q_rad_g2_g1 + Q_cov_g2_g1 - Q_rad_g1_sky - Q_cov_g1_amb) / M_g1\n dT_g2dt = (Irr * alpha_g * t_g * A_c + Q_rad_p_g2 + Q_cov_a_g2 - Q_rad_g2_g1) / M_g2\n dT_adt = (Q_cov_f_a - Q_cov_a_g2)/M_a\n dT_pdt = (Irr * alpha_p * t_g**2 * t_f * A_c - Q_rad_p_g2 - Q_amb_loss - Q_cov_p_f) / M_p\n dT_fdt = (Q_cov_p_f + Q_cov_f_a) / M_f\n\n return [dT_g1dt, dT_g2dt, dT_adt, dT_pdt, dT_fdt]\n\n# x0 = [298, 298, 298, 298, 285]\n\n\n# # test the defined ODES\n# print(boxODE(x=x0, t=0, m_f=m_f, m_a=m_a, A_s=A_s))\n\n\n# # declare a time vector (time window)\n# t = np.linspace(0,54000,1000)\n# x = odeint(boxODE,x0,t, args=(m_f, m_a, A_s))\n\n# Tf= x[:,4]\n# Tp = x[:,3]\n\n# # plot the results\n# plt.plot((t/3600)+5.8,Tf_2, label='fluid')\n# # plt.plot(t/3600,Tp, label='plate')\n# plt.legend()\n# plt.ylim(298, 340)\n# plt.xlim(0,24)\n# plt.show()\n\n#%%\n\n# xs = np.arange(27000,28201,1)\n# ys = 0.0226*xs - 295\n\n# #%%\n\n# fig = plt.figure(figsize=(5,5))\n# fig, ax1 = plt.subplots()\n\n# plt.plot((t/3600)+5.8,Tf, color='r')\n# plt.plot(xs/3600 + 5.8, ys, color='r')\n# plt.plot(np.arange(27000,27601,1)/3600+5.8, )\n# plt.hlines(338, -100, 100, linestyle=':', color='k')\n# plt.text(6.5, 339, 'Pasteurization Temperature')\n\n# ax1.tick_params(direction='in', length=7,top=True, right=True, left=True)\n# minor_locator_x = AutoMinorLocator(2)\n# minor_locator_y = AutoMinorLocator(2)\n# ax1.get_xaxis().set_minor_locator(minor_locator_x)\n# ax1.get_yaxis().set_minor_locator(minor_locator_y)\n# # rotate and align the tick labels so they look better\n# plt.tick_params(which='minor',\n# direction='in',\n# length=4,\n# right=True,\n# left=True,\n# top=True)\n# plt.xlim(6,21)\n# plt.xlabel('Hour of Day')\n# plt.ylim(298, 350)\n# plt.ylabel('Water Temperature (K)')\n\n# plt.savefig('Figures/comb_img.png', dpi=300)" ]
[ [ "numpy.sqrt" ] ]
rivei/pm4py_with_dash
[ "05ed524c11b44932783864a4465d400ea1300910" ]
[ "python/pm4pyPlus.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 1 22:17:20 2019\n\n@author: Wei\n\"\"\"\n\n#from dash_app import default_log as log\nimport pandas as pd\nimport numpy as np\n#import pytz\nfrom datetime import datetime, tzinfo,timedelta\n\nfrom pm4py.statistics.traces.log import case_statistics\nfrom pm4py.algo.filtering.log.attributes import attributes_filter\n\nMAX_TRACES = 9999\n\ndef filtered_log_df(log, top_trace_n = MAX_TRACES):\n# if top_trace_n == MAX_TRACES:\n# traces_with_count = case_statistics.get_variant_statistics(log) #parameters=(\"max_variants_to_return\":5)\n# #df = pd.DataFrame.from_dict([dict(x) for x in traces_with_count])\n# df = pd.DataFrame()\n# df.columns = ['caseid','actid','actseq','resid','ts','sT']\n# else:\n n_cases = 0\n caseid = []\n actid = []\n actseq = []\n resid = []\n ts = []\n startTime = []\n for case in log:\n actidx = 0\n startT = case[0]['time:timestamp'].timestamp()\n for event in case:\n caseid.append(n_cases)\n actid.append(event['concept:name'])\n actseq.append(actidx)\n resid.append(event['org:resource'])\n ts.append(event['time:timestamp'].timestamp())\n startTime.append(event['time:timestamp'].timestamp() - startT)\n actidx = actidx + 1\n n_cases = n_cases + 1\n df = pd.DataFrame({'caseid': caseid, \n 'actid':actid, \n 'actseq':actseq, \n 'resid':resid, \n 'ts':ts, \n 'sT': startTime}) \n df['preid'] = df['actid'].shift(1)\n df['preid'] = df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)\n\n return df\n\ndef n_cases(log, top_trace_n = MAX_TRACES):\n if top_trace_n == MAX_TRACES:\n df = filtered_log_df(log)\n else:\n df = filtered_log_df(log, top_trace_n)\n return len(df['caseid'].unique())\n \n\ndef n_events(log):\n df = filtered_log_df(log)\n return len(df)\n \ndef n_activities(log):\n df = filtered_log_df(log)\n return len(df['actid'].unique())\n\ndef n_resources(log):\n df = filtered_log_df(log)\n return len(df['resid'].unique())\n\ndef n_traces(log, top_trace_n = MAX_TRACES):\n if top_trace_n == MAX_TRACES:\n traces_with_count = case_statistics.get_variant_statistics(log) #parameters=(\"max_variants_to_return\":5)\n else:\n traces_with_count = case_statistics.get_variant_statistics(log, parameters={\"max_variants_to_return\":top_trace_n})\n \n df = pd.DataFrame.from_dict([dict(x) for x in traces_with_count])\n return len(df)\n\ndef acts_df(log):\n activities = attributes_filter.get_attribute_values(log, \"concept:name\")\n actid = []\n cnt = []\n for act0 in activities.items():\n actid.append(act0[0])\n cnt.append(act0[1]) \n return pd.DataFrame({'id':actid, 'cnt':cnt})\n\ndef traces_df(log):\n traces = case_statistics.get_variant_statistics(log) \n tid = []\n actid = []\n actseq = []\n cnt = []\n n_traces = 0\n for trace in traces:\n actidx = 0\n acts = trace['variant']\n for s in acts.split(','):\n tid.append(n_traces)\n actid.append(s)\n actseq.append(actidx)\n cnt.append(trace['count'])\n actidx = actidx+1\n n_traces = n_traces + 1\n \n trace_df = pd.DataFrame({'id': tid, 'actid': actid, 'actseq':actseq, 'cnt':cnt})\n trace_df['preid'] = trace_df['actid'].shift(1)\n trace_df['preid'] = trace_df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1) \n trace_df['pre_post'] = trace_df.apply(lambda row: row['preid']+\"@@\"+row['actid'], axis = 1)\n \n# def actid2num(sactid, df):\n# nactid = -1\n# for i in range(0, len(df)):\n# if df['id'][i] == sactid:\n# nactid = i/len(df)\n# return nactid\n# \n# act_df = acts_df(log)\n# trace_df['nactid'] = trace_df['actid'].apply(lambda i:actid2num(i, act_df))\n return trace_df\n \n \ndef sort_df(log):\n df = filtered_log_df(log) \n dur = np.zeros(len(df))\n evS = 0\n evE = -1\n for i in range(0, len(df)):\n if df['actseq'][i] == 0:\n evS = i\n if i < len(df) - 1:\n if df['actseq'][i + 1] == 0:\n evE = i\n else:\n evE = i\n \n if evE >= evS:\n for j in range(evS, evE+1):\n dur[j] = df['sT'][evE-1]\n\n df['dur'] = dur\n \n sort_df = df.sort_values(by=['dur','caseid', 'actseq'], ascending = [0,1,1])\n \n sortid = 0\n sid = np.zeros(len(sort_df))\n for i in range(1, len(sort_df)):\n if i < len(sort_df) - 1:\n if sort_df.iloc[i,:]['caseid'] != sort_df.iloc[i-1,:]['caseid']:\n sortid = sortid + 1\n \n sid[i] = sortid\n \n sort_df['sid'] = sid\n return sort_df\n\ndef mtx_df(log):\n df = traces_df(log)\n prelist = (df['preid'].unique())\n actlist = (df['actid'].unique())\n dff = pd.DataFrame(columns=prelist,index = actlist)\n# dff.columns = actlist\n# dff.index = prelist\n\n mtxdf1 = df.groupby('pre_post')['cnt'].sum() #agg(['sum','count','mean'])\n #mtxdf1['abs'] = mtxdf1['sum']/mtxdf1['count']\n# mtxdf= pd.DataFrame({'pre_post':mtxdf1.index, 'cnt': list(mtxdf1)})\n \n for s in mtxdf1.index:\n a = s.split(\"@@\")\n if len(a) != 2:\n print(a[0], a[1])\n else:\n dff[a[0]][a[1]] = mtxdf1[s]\n\n return dff\n \n#\n#activities = log_attributes_filter.get_attribute_values(log, \"concept:name\")\n#actid = []\n#cnt = []\n#for act0 in activities.items():\n# actid.append(act0[0])\n# cnt.append(act0[1])\n#\n#act_df = pd.DataFrame({'id':actid, 'cnt':cnt})\n#\n#n_activities = len(act_df)\n#\n#from pm4py.statistics.traces.log import case_statistics\n#traces = case_statistics.get_variant_statistics(log)#, parameters={\"max_variants_to_return\": 5})\n#\n##acts = []\n##cnt = []\n##tid = []\n##idx = 0\n##for trace in traces:\n## tid.append(idx)\n## acts.append(trace['variant'])\n## cnt.append(trace['count'])\n## idx = idx + 1\n## \n##trace_df = pd.DataFrame({'id': tid, 'acts': acts, 'cnt':cnt})\n##n_traces = len(trace_df)\n#\n#tid = []\n#actid = []\n#actseq = []\n#cnt = []\n#n_traces = 0\n#for trace in traces:\n# actidx = 0\n# acts = trace['variant']\n# for s in acts.split(','):\n# tid.append(n_traces)\n# actid.append(s)\n# actseq.append(actidx)\n# cnt.append(trace['count'])\n# actidx = actidx+1\n# n_traces = n_traces + 1\n# \n#trace_df = pd.DataFrame({'id': tid, 'actid': actid, 'actseq':actseq, 'cnt':cnt})\n#trace_df['preid'] = trace_df['actid'].shift(1)\n#trace_df['preid'] = trace_df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)\n##trace_df['postid'] = trace_df['actid'].shift(1)\n##trace_df['postid'] = trace_df.apply(lambda row: row['preid'] if row['actseq']!=0 else 'START', axis = 1)\n#\n#trace_df['pre_post'] = trace_df.apply(lambda row: row['preid']+\"-\"+row['actid'], axis = 1)\n#\n#def actid2num(sactid, df):\n# nactid = -1\n# for i in range(0, len(df)):\n# if df['id'][i] == sactid:\n# nactid = i/len(df)\n# return nactid\n#\n##actid2num(\"Confirmation of receipt\", act_df)\n#\n#trace_df['nactid'] = trace_df['actid'].apply(lambda i:actid2num(i, act_df))\n#\n## matrix\n#df['pre_post'] = df.apply(lambda row: row['preid']+\"-\"+row['actid'], axis = 1)\n##mtxdf1 = pd.DataFrame({'ant':df['preid'],'con':df})\n#mtxdf1 = df[df['preid']!='START'].groupby('pre_post')['caseid'].count() #agg(['sum','count','mean'])\n##mtxdf1['abs'] = mtxdf1['sum']/mtxdf1['count']\n#mtxdf= pd.DataFrame({'pre_post':mtxdf1.index, 'cnt': list(mtxdf1)})\n#\n##roles Detection: related to resource vs activity?\n##from pm4py.algo.enhancement.roles import factory as roles_factory\n##roles = roles_factory.apply(log)\n#aaa\n" ]
[ [ "pandas.DataFrame" ] ]
guptarohit994/ECE143_group25_project
[ "e31d0425b2a6114eed6c55bdb0491c2c996b94be" ]
[ "statistical_analysis/gpa_scatter.py" ]
[ "\nimport helper\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd \n\ndef plot_gpa_scatter():\n \"\"\"Plotting scatterplot of grades expected and grade received, using the general department list\n \"\"\"\n # obtaining data\n department_df = helper.generate_depts_df(helper.general_dept_list)\n comp_criteria = [\"AvgGradeExpected\",\"AvgGradeReceived\"]\n\n # generating scatterplot graph\n lower_bound = 1.5\n upper_bound = 4.02\n ax = department_df.plot.scatter(x=comp_criteria[0], y=comp_criteria[1], c= \"grey\",ylim=(lower_bound,upper_bound),xlim=(lower_bound,upper_bound), figsize=(10,10), fontsize=20, alpha = 0.3)\n ax.set_xlabel(\"Average Grade Expected\", fontsize = 20)\n ax.set_ylabel(\"Average Grade Received\", fontsize = 20)\n\n # computing least squares best fit line and adding it onto graph\n y = department_df[\"AvgGradeReceived\"]\n x = department_df[\"AvgGradeExpected\"]\n A = np.vstack([x, np.ones(len(x))]).T\n m, c = np.linalg.lstsq(A, y, rcond=None)[0]\n print(\"m:{}, c:{}\".format(m,c))\n ax.plot(np.linspace(lower_bound,4,10),np.linspace(lower_bound,4,10),c=\"red\")\n ax.plot(np.linspace(lower_bound,4,10),(np.linspace(lower_bound,4,10)*m) + c,c=\"blue\")" ]
[ [ "numpy.linalg.lstsq", "numpy.linspace" ] ]
AK391/stylegan_xl
[ "9854d3d0e96eccaad10cab22379c018e1e031cf0" ]
[ "viz/renderer.py" ]
[ "# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\nimport sys\nimport copy\nimport traceback\nimport numpy as np\nimport torch\nimport torch.fft\nimport torch.nn\nimport matplotlib.cm\nimport dnnlib\nfrom torch_utils.ops import upfirdn2d\nimport legacy # pylint: disable=import-error\n\n#----------------------------------------------------------------------------\n\nclass CapturedException(Exception):\n def __init__(self, msg=None):\n if msg is None:\n _type, value, _traceback = sys.exc_info()\n assert value is not None\n if isinstance(value, CapturedException):\n msg = str(value)\n else:\n msg = traceback.format_exc()\n assert isinstance(msg, str)\n super().__init__(msg)\n\n#----------------------------------------------------------------------------\n\nclass CaptureSuccess(Exception):\n def __init__(self, out):\n super().__init__()\n self.out = out\n\n#----------------------------------------------------------------------------\n\ndef _sinc(x):\n y = (x * np.pi).abs()\n z = torch.sin(y) / y.clamp(1e-30, float('inf'))\n return torch.where(y < 1e-30, torch.ones_like(x), z)\n\ndef _lanczos_window(x, a):\n x = x.abs() / a\n return torch.where(x < 1, _sinc(x), torch.zeros_like(x))\n\n#----------------------------------------------------------------------------\n\ndef _construct_affine_bandlimit_filter(mat, a=3, amax=16, aflt=64, up=4, cutoff_in=1, cutoff_out=1):\n assert a <= amax < aflt\n mat = torch.as_tensor(mat).to(torch.float32)\n\n # Construct 2D filter taps in input & output coordinate spaces.\n taps = ((torch.arange(aflt * up * 2 - 1, device=mat.device) + 1) / up - aflt).roll(1 - aflt * up)\n yi, xi = torch.meshgrid(taps, taps)\n xo, yo = (torch.stack([xi, yi], dim=2) @ mat[:2, :2].t()).unbind(2)\n\n # Convolution of two oriented 2D sinc filters.\n fi = _sinc(xi * cutoff_in) * _sinc(yi * cutoff_in)\n fo = _sinc(xo * cutoff_out) * _sinc(yo * cutoff_out)\n f = torch.fft.ifftn(torch.fft.fftn(fi) * torch.fft.fftn(fo)).real\n\n # Convolution of two oriented 2D Lanczos windows.\n wi = _lanczos_window(xi, a) * _lanczos_window(yi, a)\n wo = _lanczos_window(xo, a) * _lanczos_window(yo, a)\n w = torch.fft.ifftn(torch.fft.fftn(wi) * torch.fft.fftn(wo)).real\n\n # Construct windowed FIR filter.\n f = f * w\n\n # Finalize.\n c = (aflt - amax) * up\n f = f.roll([aflt * up - 1] * 2, dims=[0,1])[c:-c, c:-c]\n f = torch.nn.functional.pad(f, [0, 1, 0, 1]).reshape(amax * 2, up, amax * 2, up)\n f = f / f.sum([0,2], keepdim=True) / (up ** 2)\n f = f.reshape(amax * 2 * up, amax * 2 * up)[:-1, :-1]\n return f\n\n#----------------------------------------------------------------------------\n\ndef _apply_affine_transformation(x, mat, up=4, **filter_kwargs):\n _N, _C, H, W = x.shape\n mat = torch.as_tensor(mat).to(dtype=torch.float32, device=x.device)\n\n # Construct filter.\n f = _construct_affine_bandlimit_filter(mat, up=up, **filter_kwargs)\n assert f.ndim == 2 and f.shape[0] == f.shape[1] and f.shape[0] % 2 == 1\n p = f.shape[0] // 2\n\n # Construct sampling grid.\n theta = mat.inverse()\n theta[:2, 2] *= 2\n theta[0, 2] += 1 / up / W\n theta[1, 2] += 1 / up / H\n theta[0, :] *= W / (W + p / up * 2)\n theta[1, :] *= H / (H + p / up * 2)\n theta = theta[:2, :3].unsqueeze(0).repeat([x.shape[0], 1, 1])\n g = torch.nn.functional.affine_grid(theta, x.shape, align_corners=False)\n\n # Resample image.\n y = upfirdn2d.upsample2d(x=x, f=f, up=up, padding=p)\n z = torch.nn.functional.grid_sample(y, g, mode='bilinear', padding_mode='zeros', align_corners=False)\n\n # Form mask.\n m = torch.zeros_like(y)\n c = p * 2 + 1\n m[:, :, c:-c, c:-c] = 1\n m = torch.nn.functional.grid_sample(m, g, mode='nearest', padding_mode='zeros', align_corners=False)\n return z, m\n\n#----------------------------------------------------------------------------\n\nclass Renderer:\n def __init__(self):\n self._device = torch.device('cuda')\n self._pkl_data = dict() # {pkl: dict | CapturedException, ...}\n self._networks = dict() # {cache_key: torch.nn.Module, ...}\n self._pinned_bufs = dict() # {(shape, dtype): torch.Tensor, ...}\n self._cmaps = dict() # {name: torch.Tensor, ...}\n self._is_timing = False\n self._start_event = torch.cuda.Event(enable_timing=True)\n self._end_event = torch.cuda.Event(enable_timing=True)\n self._net_layers = dict() # {cache_key: [dnnlib.EasyDict, ...], ...}\n\n def render(self, **args):\n self._is_timing = True\n self._start_event.record(torch.cuda.current_stream(self._device))\n res = dnnlib.EasyDict()\n try:\n self._render_impl(res, **args)\n except:\n res.error = CapturedException()\n self._end_event.record(torch.cuda.current_stream(self._device))\n if 'image' in res:\n res.image = self.to_cpu(res.image).numpy()\n if 'stats' in res:\n res.stats = self.to_cpu(res.stats).numpy()\n if 'error' in res:\n res.error = str(res.error)\n if self._is_timing:\n self._end_event.synchronize()\n res.render_time = self._start_event.elapsed_time(self._end_event) * 1e-3\n self._is_timing = False\n return res\n\n def get_network(self, pkl, key, **tweak_kwargs):\n data = self._pkl_data.get(pkl, None)\n if data is None:\n print(f'Loading \"{pkl}\"... ', end='', flush=True)\n try:\n with dnnlib.util.open_url(pkl, verbose=False) as f:\n data = legacy.load_network_pkl(f)\n print('Done.')\n except:\n data = CapturedException()\n print('Failed!')\n self._pkl_data[pkl] = data\n self._ignore_timing()\n if isinstance(data, CapturedException):\n raise data\n\n orig_net = data[key]\n cache_key = (orig_net, self._device, tuple(sorted(tweak_kwargs.items())))\n net = self._networks.get(cache_key, None)\n if net is None:\n try:\n net = copy.deepcopy(orig_net)\n net = self._tweak_network(net, **tweak_kwargs)\n net.to(self._device)\n except:\n net = CapturedException()\n self._networks[cache_key] = net\n self._ignore_timing()\n if isinstance(net, CapturedException):\n raise net\n return net\n\n def _tweak_network(self, net):\n # Print diagnostics.\n #for name, value in misc.named_params_and_buffers(net):\n # if name.endswith('.magnitude_ema'):\n # value = value.rsqrt().numpy()\n # print(f'{name:<50s}{np.min(value):<16g}{np.max(value):g}')\n # if name.endswith('.weight') and value.ndim == 4:\n # value = value.square().mean([1,2,3]).sqrt().numpy()\n # print(f'{name:<50s}{np.min(value):<16g}{np.max(value):g}')\n return net\n\n def _get_pinned_buf(self, ref):\n key = (tuple(ref.shape), ref.dtype)\n buf = self._pinned_bufs.get(key, None)\n if buf is None:\n buf = torch.empty(ref.shape, dtype=ref.dtype).pin_memory()\n self._pinned_bufs[key] = buf\n return buf\n\n def to_device(self, buf):\n return self._get_pinned_buf(buf).copy_(buf).to(self._device)\n\n def to_cpu(self, buf):\n return self._get_pinned_buf(buf).copy_(buf).clone()\n\n def _ignore_timing(self):\n self._is_timing = False\n\n def _apply_cmap(self, x, name='viridis'):\n cmap = self._cmaps.get(name, None)\n if cmap is None:\n cmap = matplotlib.cm.get_cmap(name)\n cmap = cmap(np.linspace(0, 1, num=1024), bytes=True)[:, :3]\n cmap = self.to_device(torch.from_numpy(cmap))\n self._cmaps[name] = cmap\n hi = cmap.shape[0] - 1\n x = (x * hi + 0.5).clamp(0, hi).to(torch.int64)\n x = torch.nn.functional.embedding(x, cmap)\n return x\n\n def _render_impl(self, res,\n pkl = None,\n w0_seeds = [[0, 1]],\n stylemix_idx = [],\n stylemix_seed = 0,\n trunc_psi = 1,\n trunc_cutoff = 0,\n random_seed = 0,\n noise_mode = 'const',\n force_fp32 = False,\n layer_name = None,\n sel_channels = 3,\n base_channel = 0,\n img_scale_db = 0,\n img_normalize = False,\n fft_show = False,\n fft_all = True,\n fft_range_db = 50,\n fft_beta = 8,\n input_transform = None,\n untransform = False,\n ):\n # Dig up network details.\n G = self.get_network(pkl, 'G_ema')\n res.img_resolution = G.img_resolution\n res.num_ws = G.num_ws\n res.has_noise = any('noise_const' in name for name, _buf in G.synthesis.named_buffers())\n res.has_input_transform = (hasattr(G.synthesis, 'input') and hasattr(G.synthesis.input, 'transform'))\n\n # Set input transform.\n if res.has_input_transform:\n m = np.eye(3)\n try:\n if input_transform is not None:\n m = np.linalg.inv(np.asarray(input_transform))\n except np.linalg.LinAlgError:\n res.error = CapturedException()\n G.synthesis.input.transform.copy_(torch.from_numpy(m))\n\n # Generate random latents.\n all_seeds = [seed for seed, _weight in w0_seeds] + [stylemix_seed]\n all_seeds = list(set(all_seeds))\n all_zs = np.zeros([len(all_seeds), G.z_dim], dtype=np.float32)\n all_cs = np.zeros([len(all_seeds), G.c_dim], dtype=np.float32)\n for idx, seed in enumerate(all_seeds):\n rnd = np.random.RandomState(seed)\n all_zs[idx] = rnd.randn(G.z_dim)\n cls = rnd.randint(G.c_dim)\n if G.c_dim > 0:\n all_cs[idx, cls] = 1\n\n # Run mapping network.\n w_avg = G.mapping.w_avg[cls]\n all_zs = self.to_device(torch.from_numpy(all_zs))\n all_cs = self.to_device(torch.from_numpy(all_cs))\n all_ws = G.mapping(z=all_zs, c=all_cs, truncation_psi=trunc_psi, truncation_cutoff=trunc_cutoff) - w_avg\n all_ws = dict(zip(all_seeds, all_ws))\n\n # Calculate final W.\n w = torch.stack([all_ws[seed] * weight for seed, weight in w0_seeds]).sum(dim=0, keepdim=True)\n stylemix_idx = [idx for idx in stylemix_idx if 0 <= idx < G.num_ws]\n if len(stylemix_idx) > 0:\n w[:, stylemix_idx] = all_ws[stylemix_seed][np.newaxis, stylemix_idx]\n w += w_avg\n\n # Run synthesis network.\n synthesis_kwargs = dnnlib.EasyDict(noise_mode=noise_mode, force_fp32=force_fp32)\n torch.manual_seed(random_seed)\n out, layers = self.run_synthesis_net(G.synthesis, w, capture_layer=layer_name, **synthesis_kwargs)\n\n # Update layer list.\n cache_key = (G.synthesis, tuple(sorted(synthesis_kwargs.items())))\n if cache_key not in self._net_layers:\n if layer_name is not None:\n torch.manual_seed(random_seed)\n _out, layers = self.run_synthesis_net(G.synthesis, w, **synthesis_kwargs)\n self._net_layers[cache_key] = layers\n res.layers = self._net_layers[cache_key]\n\n # Untransform.\n if untransform and res.has_input_transform:\n out, _mask = _apply_affine_transformation(out.to(torch.float32), G.synthesis.input.transform, amax=6) # Override amax to hit the fast path in upfirdn2d.\n\n # Select channels and compute statistics.\n out = out[0].to(torch.float32)\n if sel_channels > out.shape[0]:\n sel_channels = 1\n base_channel = max(min(base_channel, out.shape[0] - sel_channels), 0)\n sel = out[base_channel : base_channel + sel_channels]\n res.stats = torch.stack([\n out.mean(), sel.mean(),\n out.std(), sel.std(),\n out.norm(float('inf')), sel.norm(float('inf')),\n ])\n\n # Scale and convert to uint8.\n img = sel\n if img_normalize:\n img = img / img.norm(float('inf'), dim=[1,2], keepdim=True).clip(1e-8, 1e8)\n img = img * (10 ** (img_scale_db / 20))\n img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8).permute(1, 2, 0)\n res.image = img\n\n # FFT.\n if fft_show:\n sig = out if fft_all else sel\n sig = sig.to(torch.float32)\n sig = sig - sig.mean(dim=[1,2], keepdim=True)\n sig = sig * torch.kaiser_window(sig.shape[1], periodic=False, beta=fft_beta, device=self._device)[None, :, None]\n sig = sig * torch.kaiser_window(sig.shape[2], periodic=False, beta=fft_beta, device=self._device)[None, None, :]\n fft = torch.fft.fftn(sig, dim=[1,2]).abs().square().sum(dim=0)\n fft = fft.roll(shifts=[fft.shape[0] // 2, fft.shape[1] // 2], dims=[0,1])\n fft = (fft / fft.mean()).log10() * 10 # dB\n fft = self._apply_cmap((fft / fft_range_db + 1) / 2)\n res.image = torch.cat([img.expand_as(fft), fft], dim=1)\n\n @staticmethod\n def run_synthesis_net(net, *args, capture_layer=None, **kwargs): # => out, layers\n submodule_names = {mod: name for name, mod in net.named_modules()}\n unique_names = set()\n layers = []\n\n def module_hook(module, _inputs, outputs):\n outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]\n outputs = [out for out in outputs if isinstance(out, torch.Tensor) and out.ndim in [4, 5]]\n for idx, out in enumerate(outputs):\n if out.ndim == 5: # G-CNN => remove group dimension.\n out = out.mean(2)\n name = submodule_names[module]\n if name == '':\n name = 'output'\n if len(outputs) > 1:\n name += f':{idx}'\n if name in unique_names:\n suffix = 2\n while f'{name}_{suffix}' in unique_names:\n suffix += 1\n name += f'_{suffix}'\n unique_names.add(name)\n shape = [int(x) for x in out.shape]\n dtype = str(out.dtype).split('.')[-1]\n layers.append(dnnlib.EasyDict(name=name, shape=shape, dtype=dtype))\n if name == capture_layer:\n raise CaptureSuccess(out)\n\n hooks = [module.register_forward_hook(module_hook) for module in net.modules()]\n try:\n out = net(*args, **kwargs)\n except CaptureSuccess as e:\n out = e.out\n for hook in hooks:\n hook.remove()\n return out, layers\n\n#----------------------------------------------------------------------------\n" ]
[ [ "torch.empty", "torch.stack", "torch.as_tensor", "torch.nn.functional.affine_grid", "numpy.asarray", "numpy.random.RandomState", "torch.cuda.Event", "torch.nn.functional.grid_sample", "torch.meshgrid", "torch.nn.functional.pad", "torch.sin", "torch.from_numpy", "torch.cuda.current_stream", "torch.arange", "torch.device", "numpy.linspace", "torch.ones_like", "numpy.eye", "torch.manual_seed", "torch.fft.fftn", "torch.kaiser_window", "torch.nn.functional.embedding", "torch.zeros_like" ] ]
jggatter/cumulus
[ "1dfd9dfce5a44ff867859db6f24a356f72c6ccdd" ]
[ "docker/demultiplexing/demuxlet/generate_zarr.py" ]
[ "import argparse\n\nimport pegasusio as pio\nimport pandas as pd\n\nparser = argparse.ArgumentParser(description='Merge demuxlet result with gene-count matrix.')\nparser.add_argument('demux_res', metavar = 'demux_result.best', help = 'Demuxlet demultiplexing results.')\nparser.add_argument('raw_mat', metavar = 'raw_feature_bc_matrix.h5', help = 'Raw gene count matrix in 10x format.')\nparser.add_argument('out_file', metavar = 'output_result.zarr', help = 'Output zarr file.')\nargs = parser.parse_args()\n\ndemux_type_dict = {'SNG': 'singlet', 'DBL': 'doublet', 'AMB': 'unknown'}\n\ndef write_output(assignment_file: str, input_mat_file: str, output_zarr_file: str) -> None:\n df = pd.read_csv(assignment_file, sep = '\\t', header = 0, index_col = 'BARCODE')\n df.index = pd.Index([x[:-2] for x in df.index])\n df['demux_type'] = df['DROPLET.TYPE'].apply(lambda s: demux_type_dict[s])\n df['assignment'] = ''\n df.loc[df['demux_type'] == 'singlet', 'assignment'] = df.loc[df['demux_type'] == 'singlet', 'SNG.BEST.GUESS']\n df.loc[df['demux_type'] == 'doublet', 'assignment'] = df.loc[df['demux_type'] == 'doublet', 'DBL.BEST.GUESS'].apply(lambda s: ','.join(s.split(',')[:-1]))\n\n data = pio.read_input(input_mat_file)\n data.obs['demux_type'] = ''\n data.obs['assignment'] = ''\n\n idx = data.obs_names.isin(df.index)\n barcodes = data.obs_names[idx]\n df_valid = df.loc[barcodes, ['demux_type', 'assignment']]\n data.obs.loc[idx, 'demux_type'] = df_valid['demux_type'].values\n data.obs.loc[idx, 'assignment'] = df_valid['assignment'].values\n\n pio.write_output(data, output_zarr_file, zarr_zipstore = True)\n\n\nif __name__ == '__main__':\n write_output(args.demux_res, args.raw_mat, args.out_file)" ]
[ [ "pandas.read_csv", "pandas.Index" ] ]
HuangHaoyu1997/gym-miniworld
[ "77dc24bf1b1ca8c2cfefadfe3e35a0deb2d08a1a" ]
[ "gym_miniworld/miniworld.py" ]
[ "import math\nfrom enum import IntEnum\nimport numpy as np\nimport gym\nfrom gym import spaces\nfrom .random import *\nfrom .opengl import *\nfrom .objmesh import *\nfrom .entity import *\nfrom .math import *\nfrom .params import *\n\n# Default wall height for room\nDEFAULT_WALL_HEIGHT=2.74\n\n# Texture size/density in texels/meter\nTEX_DENSITY = 512\n\ndef gen_texcs_wall(\n tex,\n min_x,\n min_y,\n width,\n height\n):\n \"\"\"\n Generate texture coordinates for a wall quad\n \"\"\"\n\n xc = (TEX_DENSITY / tex.width)\n yc = (TEX_DENSITY / tex.height)\n\n min_u = (min_x) * xc\n max_u = (min_x + width) * xc\n min_v = (min_y) * yc\n max_v = (min_y + height) * yc\n\n return np.array(\n [\n [min_u, min_v],\n [min_u, max_v],\n [max_u, max_v],\n [max_u, min_v],\n ],\n dtype=np.float32\n )\n\ndef gen_texcs_floor(\n tex,\n poss\n):\n \"\"\"\n Generate texture coordinates for the floor or ceiling\n This is done by mapping x,z positions directly to texture\n coordinates\n \"\"\"\n\n texc_mul = np.array(\n [\n TEX_DENSITY / tex.width,\n TEX_DENSITY / tex.height\n ],\n dtype=float\n )\n\n coords = np.stack([poss[:,0], poss[:,2]], axis=1) * texc_mul\n\n return coords\n\nclass Room:\n \"\"\"\n Represent an individual room and its contents\n \"\"\"\n\n def __init__(\n self,\n outline,\n wall_height=DEFAULT_WALL_HEIGHT,\n floor_tex='floor_tiles_bw',\n wall_tex='concrete',\n ceil_tex='concrete_tiles',\n no_ceiling=False\n ):\n # The outlien should have shape Nx2\n assert len(outline.shape) == 2\n assert outline.shape[1] == 2\n assert outline.shape[0] >= 3\n\n # Add a Y coordinate to the outline points\n outline = np.insert(outline, 1, 0, axis=1)\n\n # Number of outline vertices / walls\n self.num_walls = outline.shape[0]\n\n # List of 2D points forming the outline of the room\n # Shape is Nx3\n self.outline = outline\n\n # Compute the min and max x, z extents\n self.min_x = self.outline[:, 0].min()\n self.max_x = self.outline[:, 0].max()\n self.min_z = self.outline[:, 2].min()\n self.max_z = self.outline[:, 2].max()\n\n # Compute midpoint coordinates\n self.mid_x = (self.max_x + self.min_x) / 2\n self.mid_z = (self.max_z + self.min_z) / 2\n\n # Compute approximate surface area\n self.area = (self.max_x - self.min_x) * (self.max_z - self.min_z)\n\n # Compute room edge directions and normals\n # Compute edge vectors (p1 - p0)\n # For the first point, p0 is the last\n # For the last point, p0 is p_n-1\n next_pts = np.concatenate([self.outline[1:], np.expand_dims(self.outline[0], axis=0)], axis=0)\n self.edge_dirs = next_pts - self.outline\n self.edge_dirs = (self.edge_dirs.T / np.linalg.norm(self.edge_dirs, axis=1)).T\n self.edge_norms = -np.cross(self.edge_dirs, Y_VEC)\n self.edge_norms = (self.edge_norms.T / np.linalg.norm(self.edge_norms, axis=1)).T\n\n # Height of the room walls\n self.wall_height = wall_height\n\n # No ceiling flag\n self.no_ceiling = no_ceiling\n\n # Texture names\n self.wall_tex_name = wall_tex\n self.floor_tex_name = floor_tex\n self.ceil_tex_name = ceil_tex\n\n # Lists of portals, indexed by wall/edge index\n self.portals = [[] for i in range(self.num_walls)]\n\n # List of neighbor rooms\n # Same length as list of portals\n self.neighbors = []\n\n def add_portal(\n self,\n edge,\n start_pos=None,\n end_pos=None,\n min_x=None,\n max_x=None,\n min_z=None,\n max_z=None,\n min_y=0,\n max_y=None\n ):\n \"\"\"\n Create a new portal/opening in a wall of this room\n \"\"\"\n\n if max_y == None:\n max_y = self.wall_height\n\n assert edge <= self.num_walls\n assert max_y > min_y\n\n # Get the edge points, compute the direction vector\n e_p0 = self.outline[edge]\n e_p1 = self.outline[(edge+1) % self.num_walls]\n e_len = np.linalg.norm(e_p1 - e_p0)\n e_dir = (e_p1 - e_p0) / e_len\n x0, _, z0 = e_p0\n x1, _, z1 = e_p1\n dx, _, dz = e_dir\n\n # If the portal extents are specified by x coordinates\n if min_x != None:\n assert min_z == None and max_z == None\n assert start_pos == None and end_pos == None\n assert x0 != x1\n\n m0 = (min_x - x0) / dx\n m1 = (max_x - x0) / dx\n\n if m1 < m0:\n m0, m1 = m1, m0\n\n start_pos, end_pos = m0, m1\n\n # If the portal extents are specified by z coordinates\n elif min_z != None:\n assert min_x == None and max_x == None\n assert start_pos == None and end_pos == None\n assert z0 != z1\n\n m0 = (min_z - z0) / dz\n m1 = (max_z - z0) / dz\n\n if m1 < m0:\n m0, m1 = m1, m0\n\n start_pos, end_pos = m0, m1\n\n else:\n assert min_x == None and max_x == None\n assert min_z == None and max_z == None\n\n assert end_pos > start_pos\n assert start_pos >= 0, \"portal outside of wall extents\"\n assert end_pos <= e_len, \"portal outside of wall extents\"\n\n self.portals[edge].append({\n 'start_pos': start_pos,\n 'end_pos': end_pos,\n 'min_y': min_y,\n 'max_y': max_y\n })\n\n # Sort the portals by start position\n self.portals[edge].sort(key=lambda e: e['start_pos'])\n\n return start_pos, end_pos\n\n def point_inside(self, p):\n \"\"\"\n Test if a point is inside the room\n \"\"\"\n\n # Vector from edge start to test point\n ap = p - self.outline\n\n # Compute the dot products of normals to AP vectors\n dotNAP = np.sum(self.edge_norms * ap, axis=1)\n\n # The point is inside if all the dot products are greater than zero\n return np.all(np.greater(dotNAP, 0))\n\n def _gen_static_data(self, params, rng):\n \"\"\"\n Generate polygons and static data for this room\n Needed for rendering and collision detection\n Note: the wall polygons are quads, but the floor and\n ceiling can be arbitrary n-gons\n \"\"\"\n\n # Load the textures and do texture randomization\n self.wall_tex = Texture.get(self.wall_tex_name, rng)\n self.floor_tex = Texture.get(self.floor_tex_name, rng)\n self.ceil_tex = Texture.get(self.ceil_tex_name, rng)\n\n # Generate the floor vertices\n self.floor_verts = self.outline\n self.floor_texcs = gen_texcs_floor(\n self.floor_tex,\n self.floor_verts\n )\n\n # Generate the ceiling vertices\n # Flip the ceiling vertex order because of backface culling\n self.ceil_verts = np.flip(self.outline, axis=0) + self.wall_height * Y_VEC\n self.ceil_texcs = gen_texcs_floor(\n self.ceil_tex,\n self.ceil_verts\n )\n\n self.wall_verts = []\n self.wall_norms = []\n self.wall_texcs = []\n self.wall_segs = []\n\n def gen_seg_poly(\n edge_p0,\n side_vec,\n seg_start,\n seg_end,\n min_y,\n max_y\n ):\n if seg_end == seg_start:\n return\n\n if min_y == max_y:\n return\n\n s_p0 = edge_p0 + seg_start * side_vec\n s_p1 = edge_p0 + seg_end * side_vec\n\n # If this polygon starts at ground level, add a collidable segment\n if min_y == 0:\n self.wall_segs.append(np.array([s_p1, s_p0]))\n\n # Generate the vertices\n # Vertices are listed in counter-clockwise order\n self.wall_verts.append(s_p0 + min_y * Y_VEC)\n self.wall_verts.append(s_p0 + max_y * Y_VEC)\n self.wall_verts.append(s_p1 + max_y * Y_VEC)\n self.wall_verts.append(s_p1 + min_y * Y_VEC)\n\n # Compute the normal for the polygon\n normal = np.cross(s_p1 - s_p0, Y_VEC)\n normal = -normal / np.linalg.norm(normal)\n for i in range(4):\n self.wall_norms.append(normal)\n\n # Generate the texture coordinates\n texcs = gen_texcs_wall(\n self.wall_tex,\n seg_start,\n min_y,\n seg_end - seg_start,\n max_y - min_y\n )\n self.wall_texcs.append(texcs)\n\n # For each wall\n for wall_idx in range(self.num_walls):\n edge_p0 = self.outline[wall_idx, :]\n edge_p1 = self.outline[(wall_idx+1) % self.num_walls, :]\n wall_width = np.linalg.norm(edge_p1 - edge_p0)\n side_vec = (edge_p1 - edge_p0) / wall_width\n\n if len(self.portals[wall_idx]) > 0:\n seg_end = self.portals[wall_idx][0]['start_pos']\n else:\n seg_end = wall_width\n\n # Generate the first polygon (going up to the first portal)\n gen_seg_poly(\n edge_p0,\n side_vec,\n 0,\n seg_end,\n 0,\n self.wall_height\n )\n\n # For each portal in this wall\n for portal_idx, portal in enumerate(self.portals[wall_idx]):\n portal = self.portals[wall_idx][portal_idx]\n start_pos = portal['start_pos']\n end_pos = portal['end_pos']\n min_y = portal['min_y']\n max_y = portal['max_y']\n\n # Generate the bottom polygon\n gen_seg_poly(\n edge_p0,\n side_vec,\n start_pos,\n end_pos,\n 0,\n min_y\n )\n\n # Generate the top polygon\n gen_seg_poly(\n edge_p0,\n side_vec,\n start_pos,\n end_pos,\n max_y,\n self.wall_height\n )\n\n if portal_idx < len(self.portals[wall_idx]) - 1:\n next_portal = self.portals[wall_idx][portal_idx+1]\n next_portal_start = next_portal['start_pos']\n else:\n next_portal_start = wall_width\n\n # Generate the polygon going up to the next portal\n gen_seg_poly(\n edge_p0,\n side_vec,\n end_pos,\n next_portal_start,\n 0,\n self.wall_height\n )\n\n self.wall_verts = np.array(self.wall_verts)\n self.wall_norms = np.array(self.wall_norms)\n\n if len(self.wall_segs) > 0:\n self.wall_segs = np.array(self.wall_segs)\n else:\n self.wall_segs = np.array([]).reshape(0, 2, 3)\n\n if len(self.wall_texcs) > 0:\n self.wall_texcs = np.concatenate(self.wall_texcs)\n else:\n self.wall_texcs = np.array([]).reshape(0, 2)\n\n def _render(self):\n \"\"\"\n Render the static elements of the room\n \"\"\"\n\n glColor3f(1, 1, 1)\n\n # Draw the floor\n self.floor_tex.bind()\n glBegin(GL_POLYGON)\n glNormal3f(0, 1, 0)\n for i in range(self.floor_verts.shape[0]):\n glTexCoord2f(*self.floor_texcs[i, :])\n glVertex3f(*self.floor_verts[i, :])\n glEnd()\n\n # Draw the ceiling\n if not self.no_ceiling:\n self.ceil_tex.bind()\n glBegin(GL_POLYGON)\n glNormal3f(0, -1, 0)\n for i in range(self.ceil_verts.shape[0]):\n glTexCoord2f(*self.ceil_texcs[i, :])\n glVertex3f(*self.ceil_verts[i, :])\n glEnd()\n\n # Draw the walls\n self.wall_tex.bind()\n glBegin(GL_QUADS)\n for i in range(self.wall_verts.shape[0]):\n glNormal3f(*self.wall_norms[i, :])\n glTexCoord2f(*self.wall_texcs[i, :])\n glVertex3f(*self.wall_verts[i, :])\n glEnd()\n\nclass MiniWorldEnv(gym.Env):\n \"\"\"\n Base class for MiniWorld environments. Implements the procedural\n world generation and simulation logic.\n \"\"\"\n\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 30\n }\n\n # Enumeration of possible actions\n class Actions(IntEnum):\n # Turn left or right by a small amount\n turn_left = 0\n turn_right = 1\n\n # Move forward or back by a small amount\n move_forward = 2\n move_back = 3\n\n # Pick up or drop an object being carried\n pickup = 4\n drop = 5\n\n # Toggle/activate an object\n toggle = 6\n\n # Done completing task\n done = 7\n\n def __init__(\n self,\n max_episode_steps=1500,\n obs_width=80,\n obs_height=60,\n window_width=800,\n window_height=600,\n params=DEFAULT_PARAMS,\n domain_rand=False\n ):\n # Action enumeration for this environment\n self.actions = MiniWorldEnv.Actions\n\n # Actions are discrete integer values\n self.action_space = spaces.Discrete(len(self.actions))\n\n # Observations are RGB images with pixels in [0, 255]\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(obs_height, obs_width, 3),\n dtype=np.uint8\n )\n\n self.reward_range = (-math.inf, math.inf)\n\n # Maximum number of steps per episode\n self.max_episode_steps = max_episode_steps\n\n # Simulation parameters, used for domain randomization\n self.params = params\n\n # Domain randomization enable/disable flag\n self.domain_rand = domain_rand\n\n # Window for displaying the environment to humans\n self.window = None\n\n # Invisible window to render into (shadow OpenGL context)\n self.shadow_window = pyglet.window.Window(width=1, height=1, visible=False)\n\n # Enable depth testing and backface culling\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_CULL_FACE)\n\n # Frame buffer used to render observations\n self.obs_fb = FrameBuffer(obs_width, obs_height, 8)\n\n # Frame buffer used for human visualization\n self.vis_fb = FrameBuffer(window_width, window_height, 16)\n\n # Compute the observation display size\n self.obs_disp_width = 256\n self.obs_disp_height = obs_height * (self.obs_disp_width / obs_width)\n\n # For displaying text\n self.text_label = pyglet.text.Label(\n font_name=\"Arial\",\n font_size=14,\n multiline=True,\n width=400,\n x = window_width + 5,\n y = window_height - (self.obs_disp_height + 19)\n )\n\n # Initialize the state\n self.seed()\n self.reset()\n\n def close(self):\n pass\n\n def seed(self, seed=None):\n self.rand = RandGen(seed)\n return [seed]\n\n def reset(self):\n \"\"\"\n Reset the simulation at the start of a new episode\n This also randomizes many environment parameters (domain randomization)\n \"\"\"\n\n # Step count since episode start\n self.step_count = 0\n\n # Create the agent\n self.agent = Agent()\n\n # List of entities contained\n self.entities = []\n\n # List of rooms in the world\n self.rooms = []\n\n # Wall segments for collision detection\n # Shape is (N, 2, 3)\n self.wall_segs = []\n\n # Generate the world\n self._gen_world()\n\n # Check if domain randomization is enabled or not\n rand = self.rand if self.domain_rand else None\n\n # Randomize elements of the world (domain randomization)\n self.params.sample_many(rand, self, [\n 'sky_color',\n 'light_pos',\n 'light_color',\n 'light_ambient'\n ])\n\n # Get the max forward step distance\n self.max_forward_step = self.params.get_max('forward_step')\n\n # Randomize parameters of the entities\n for ent in self.entities:\n ent.randomize(self.params, rand)\n\n # Compute the min and max x, z extents of the whole floorplan\n self.min_x = min([r.min_x for r in self.rooms])\n self.max_x = max([r.max_x for r in self.rooms])\n self.min_z = min([r.min_z for r in self.rooms])\n self.max_z = max([r.max_z for r in self.rooms])\n\n # Generate static data\n if len(self.wall_segs) == 0:\n self._gen_static_data()\n\n # Pre-compile static parts of the environment into a display list\n self._render_static()\n\n # Generate the first camera image\n obs = self.render_obs()\n\n # Return first observation\n return obs\n\n def _get_carry_pos(self, agent_pos, ent):\n \"\"\"\n Compute the position at which to place an object being carried\n \"\"\"\n\n dist = self.agent.radius + ent.radius + self.max_forward_step\n pos = agent_pos + self.agent.dir_vec * 1.05 * dist\n\n # Adjust the Y-position so the object is visible while being carried\n y_pos = max(self.agent.cam_height - ent.height - 0.3, 0)\n pos = pos + Y_VEC * y_pos\n\n return pos\n\n def move_agent(self, fwd_dist, fwd_drift):\n \"\"\"\n Move the agent forward\n \"\"\"\n\n next_pos = (\n self.agent.pos +\n self.agent.dir_vec * fwd_dist +\n self.agent.right_vec * fwd_drift\n )\n\n if self.intersect(self.agent, next_pos, self.agent.radius):\n return False\n\n carrying = self.agent.carrying\n if carrying:\n next_carrying_pos = self._get_carry_pos(next_pos, carrying)\n\n if self.intersect(carrying, next_carrying_pos, carrying.radius):\n return False\n\n carrying.pos = next_carrying_pos\n\n self.agent.pos = next_pos\n\n return True\n\n def turn_agent(self, turn_angle):\n \"\"\"\n Turn the agent left or right\n \"\"\"\n\n turn_angle *= (math.pi / 180)\n orig_dir = self.agent.dir\n\n self.agent.dir += turn_angle\n\n carrying = self.agent.carrying\n if carrying:\n pos = self._get_carry_pos(self.agent.pos, carrying)\n\n if self.intersect(carrying, pos, carrying.radius):\n self.agent.dir = orig_dir\n return False\n\n carrying.pos = pos\n carrying.dir = self.agent.dir\n\n return True\n\n def step(self, action):\n \"\"\"\n Perform one action and update the simulation\n \"\"\"\n\n self.step_count += 1\n\n rand = self.rand if self.domain_rand else None\n fwd_step = self.params.sample(rand, 'forward_step')\n fwd_drift = self.params.sample(rand, 'forward_drift')\n turn_step = self.params.sample(rand, 'turn_step')\n\n if action == self.actions.move_forward:\n self.move_agent(fwd_step, fwd_drift)\n\n elif action == self.actions.move_back:\n self.move_agent(-fwd_step, fwd_drift)\n\n elif action == self.actions.turn_left:\n self.turn_agent(turn_step)\n\n elif action == self.actions.turn_right:\n self.turn_agent(-turn_step)\n\n # Pick up an object\n elif action == self.actions.pickup:\n # Position at which we will test for an intersection\n test_pos = self.agent.pos + self.agent.dir_vec * 1.5 * self.agent.radius\n ent = self.intersect(self.agent, test_pos, 1.2 * self.agent.radius)\n if not self.agent.carrying:\n if isinstance(ent, Entity):\n if not ent.is_static:\n self.agent.carrying = ent\n\n # Drop an object being carried\n elif action == self.actions.drop:\n if self.agent.carrying:\n self.agent.carrying.pos[1] = 0\n self.agent.carrying = None\n\n # If we are carrying an object, update its position as we move\n if self.agent.carrying:\n ent_pos = self._get_carry_pos(self.agent.pos, self.agent.carrying)\n self.agent.carrying.pos = ent_pos\n self.agent.carrying.dir = self.agent.dir\n\n # Generate the current camera image\n obs = self.render_obs()\n\n # If the maximum time step count is reached\n if self.step_count >= self.max_episode_steps:\n done = True\n reward = 0\n return obs, reward, done, {}\n\n reward = 0\n done = False\n\n return obs, reward, done, {}\n\n def add_rect_room(\n self,\n min_x,\n max_x,\n min_z,\n max_z,\n **kwargs\n ):\n \"\"\"\n Create a rectangular room\n \"\"\"\n\n # 2D outline coordinates of the room,\n # listed in counter-clockwise order when viewed from the top\n outline = np.array([\n # East wall\n [max_x, max_z],\n # North wall\n [max_x, min_z],\n # West wall\n [min_x, min_z],\n # South wall\n [min_x, max_z],\n ])\n\n return self.add_room(outline=outline, **kwargs)\n\n def add_room(self, **kwargs):\n \"\"\"\n Create a new room\n \"\"\"\n\n assert len(self.wall_segs) == 0, \"cannot add rooms after static data is generated\"\n\n room = Room(**kwargs)\n self.rooms.append(room)\n\n return room\n\n def connect_rooms(\n self,\n room_a,\n room_b,\n min_x=None,\n max_x=None,\n min_z=None,\n max_z=None,\n max_y=None\n ):\n \"\"\"\n Connect two rooms along facing edges\n \"\"\"\n\n def find_facing_edges():\n for idx_a in range(room_a.num_walls):\n norm_a = room_a.edge_norms[idx_a]\n\n for idx_b in range(room_b.num_walls):\n norm_b = room_b.edge_norms[idx_b]\n\n # Reject edges that are not facing each other\n if np.dot(norm_a, norm_b) > -0.9:\n continue\n\n dir = room_b.outline[idx_b] - room_a.outline[idx_a]\n\n # Reject edges that are not touching\n if np.dot(norm_a, dir) > 0.05:\n continue\n\n return idx_a, idx_b\n\n return None, None\n\n idx_a, idx_b = find_facing_edges()\n assert idx_a != None, \"matching edges not found in connect_rooms\"\n\n start_a, end_a = room_a.add_portal(\n edge=idx_a,\n min_x=min_x,\n max_x=max_x,\n min_z=min_z,\n max_z=max_z,\n max_y=max_y\n )\n\n start_b, end_b = room_b.add_portal(\n edge=idx_b,\n min_x=min_x,\n max_x=max_x,\n min_z=min_z,\n max_z=max_z,\n max_y=max_y\n )\n\n a = room_a.outline[idx_a] + room_a.edge_dirs[idx_a] * start_a\n b = room_a.outline[idx_a] + room_a.edge_dirs[idx_a] * end_a\n c = room_b.outline[idx_b] + room_b.edge_dirs[idx_b] * start_b\n d = room_b.outline[idx_b] + room_b.edge_dirs[idx_b] * end_b\n\n # If the portals are directly connected, stop\n if np.linalg.norm(a - d) < 0.001:\n return\n\n len_a = np.linalg.norm(b - a)\n len_b = np.linalg.norm(d - c)\n\n # Room outline points must be specified in counter-clockwise order\n outline = np.stack([c, b, a, d])\n outline = np.stack([outline[:, 0], outline[:, 2]], axis=1)\n\n max_y = max_y if max_y != None else room_a.wall_height\n\n room = Room(\n outline,\n wall_height=max_y,\n wall_tex=room_a.wall_tex_name,\n floor_tex=room_a.floor_tex_name,\n ceil_tex=room_a.ceil_tex_name,\n no_ceiling=room_a.no_ceiling,\n )\n\n self.rooms.append(room)\n\n room.add_portal(1, start_pos=0, end_pos=len_a)\n room.add_portal(3, start_pos=0, end_pos=len_b)\n\n def place_entity(\n self,\n ent,\n room=None,\n pos=None,\n dir=None,\n min_x=None,\n max_x=None,\n min_z=None,\n max_z=None\n ):\n \"\"\"\n Place an entity/object in the world.\n Find a position that doesn't intersect with any other object.\n \"\"\"\n\n assert len(self.rooms) > 0, \"create rooms before calling place_entity\"\n assert ent.radius != None, \"entity must have physical size defined\"\n\n # Generate collision detection data\n if len(self.wall_segs) == 0:\n self._gen_static_data()\n\n # If an exact position if specified\n if pos is not None:\n ent.dir = dir if dir != None else self.rand.float(-math.pi, math.pi)\n ent.pos = pos\n self.entities.append(ent)\n return ent\n\n # Keep retrying until we find a suitable position\n while True:\n # Pick a room, sample rooms proportionally to floor surface area\n r = room if room else self.rand.choice(self.rooms, probs=self.room_probs)\n\n # Choose a random point within the square bounding box of the room\n lx = r.min_x if min_x == None else min_x\n hx = r.max_x if max_x == None else max_x\n lz = r.min_z if min_z == None else min_z\n hz = r.max_z if max_z == None else max_z\n pos = self.rand.float(\n low =[lx + ent.radius, 0, lz + ent.radius],\n high=[hx - ent.radius, 0, hz - ent.radius]\n )\n\n # Make sure the position is within the room's outline\n if not r.point_inside(pos):\n continue\n\n # Make sure the position doesn't intersect with any walls\n if self.intersect(ent, pos, ent.radius):\n continue\n\n # Pick a direction\n d = dir if dir != None else self.rand.float(-math.pi, math.pi)\n\n ent.pos = pos\n ent.dir = d\n break\n\n self.entities.append(ent)\n\n return ent\n\n def place_agent(\n self,\n room=None,\n dir=None,\n min_x=None,\n max_x=None,\n min_z=None,\n max_z=None\n ):\n \"\"\"\n Place the agent in the environment at a random position\n and orientation\n \"\"\"\n\n return self.place_entity(\n self.agent,\n room=room,\n dir=dir,\n min_x=min_x,\n max_x=max_x,\n min_z=min_z,\n max_z=max_z\n )\n\n def intersect(self, ent, pos, radius):\n \"\"\"\n Check if an entity intersects with the world\n \"\"\"\n\n # Ignore the Y position\n px, _, pz = pos\n pos = np.array([px, 0, pz])\n\n # Check for intersection with walls\n if intersect_circle_segs(pos, radius, self.wall_segs):\n return True\n\n # Check for entity intersection\n for ent2 in self.entities:\n # Entities can't intersect with themselves\n if ent2 is ent:\n continue\n\n px, _, pz = ent2.pos\n pos2 = np.array([px, 0, pz])\n\n d = np.linalg.norm(pos2 - pos)\n if d < radius + ent2.radius:\n return ent2\n\n return None\n\n def near(self, ent0, ent1=None):\n \"\"\"\n Test if the two entities are near each other.\n Used for \"go to\" or \"put next\" type tasks\n \"\"\"\n\n if ent1 == None:\n ent1 = self.agent\n\n dist = np.linalg.norm(ent0.pos - ent1.pos)\n return dist < ent0.radius + ent1.radius + 1.1 * self.max_forward_step\n\n def _load_tex(self, tex_name):\n \"\"\"\n Load a texture, with or without domain randomization\n \"\"\"\n\n rand = self.rand if self.params.sample(self.rand, 'tex_rand') else None\n return Texture.get(tex_name, rand)\n\n def _gen_static_data(self):\n \"\"\"\n Generate static data needed for rendering and collision detection\n \"\"\"\n\n # Generate the static data for each room\n for room in self.rooms:\n room._gen_static_data(\n self.params,\n self.rand if self.domain_rand else None\n )\n\n # Concatenate the wall segments\n self.wall_segs = np.concatenate([r.wall_segs for r in self.rooms])\n\n # Room selection probabilities\n self.room_probs = np.array([r.area for r in self.rooms], dtype=float)\n self.room_probs /= np.sum(self.room_probs)\n\n def _gen_world(self):\n \"\"\"\n Generate the world. Derived classes must implement this method.\n \"\"\"\n\n raise NotImplementedError\n\n def _reward(self):\n \"\"\"\n Default sparse reward computation\n \"\"\"\n\n return 1.0 - 0.2 * (self.step_count / self.max_episode_steps)\n\n def _render_static(self):\n \"\"\"\n Render the static elements of the scene into a display list.\n Called once at the beginning of each episode.\n \"\"\"\n\n # TODO: manage this automatically\n # glIsList\n glDeleteLists(1, 1);\n glNewList(1, GL_COMPILE);\n\n # Light position\n glLightfv(GL_LIGHT0, GL_POSITION, (GLfloat*4)(*self.light_pos + [1]))\n\n # Background/minimum light level\n glLightfv(GL_LIGHT0, GL_AMBIENT, (GLfloat*4)(*self.light_ambient))\n\n # Diffuse light color\n glLightfv(GL_LIGHT0, GL_DIFFUSE, (GLfloat*4)(*self.light_color))\n\n #glLightf(GL_LIGHT0, GL_SPOT_CUTOFF, 180)\n #glLightf(GL_LIGHT0, GL_SPOT_EXPONENT, 0)\n #glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0)\n #glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0)\n #glLightf(GL_LIGHT0, GL_QUADRATIC_ATTENUATION, 0)\n\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n\n glShadeModel(GL_SMOOTH)\n glEnable(GL_COLOR_MATERIAL)\n glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)\n\n # Render the rooms\n glEnable(GL_TEXTURE_2D)\n for room in self.rooms:\n room._render()\n\n # Render the static entities\n for ent in self.entities:\n if ent.is_static:\n ent.render()\n\n glEndList()\n\n def _render_world(\n self,\n frame_buffer,\n render_agent\n ):\n \"\"\"\n Render the world from a given camera position into a frame buffer,\n and produce a numpy image array as output.\n \"\"\"\n\n # Call the display list for the static parts of the environment\n glCallList(1)\n\n # TODO: keep the non-static entities in a different list for efficiency?\n # Render the non-static entities\n for ent in self.entities:\n if not ent.is_static and ent is not self.agent:\n ent.render()\n #ent.draw_bound()\n\n if render_agent:\n self.agent.render()\n\n # Resolve the rendered image into a numpy array\n img = frame_buffer.resolve()\n\n return img\n\n def render_top_view(self, frame_buffer=None):\n \"\"\"\n Render a top view of the whole map (from above)\n \"\"\"\n\n if frame_buffer == None:\n frame_buffer = self.obs_fb\n\n # Switch to the default OpenGL context\n # This is necessary on Linux Nvidia drivers\n self.shadow_window.switch_to()\n\n # Bind the frame buffer before rendering into it\n frame_buffer.bind()\n\n # Clear the color and depth buffers\n glClearColor(*self.sky_color, 1.0)\n glClearDepth(1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Scene extents to render\n min_x = self.min_x - 1\n max_x = self.max_x + 1\n min_z = self.min_z - 1\n max_z = self.max_z + 1\n\n width = max_x - min_x\n height = max_z - min_z\n aspect = width / height\n fb_aspect = frame_buffer.width / frame_buffer.height\n\n # Adjust the aspect extents to match the frame buffer aspect\n if aspect > fb_aspect:\n # Want to add to denom, add to height\n new_h = width / fb_aspect\n h_diff = new_h - height\n min_z -= h_diff / 2\n max_z += h_diff / 2\n elif aspect < fb_aspect:\n # Want to add to num, add to width\n new_w = height * fb_aspect\n w_diff = new_w - width\n min_x -= w_diff / 2\n max_x += w_diff / 2\n\n # Set the projection matrix\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(\n min_x,\n max_x,\n -max_z,\n -min_z,\n -100, 100.0\n )\n\n # Setup the camera\n # Y maps to +Z, Z maps to +Y\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n m = [\n 1, 0, 0, 0,\n 0, 0, 1, 0,\n 0, -1, 0, 0,\n 0, 0, 0, 1,\n ]\n glLoadMatrixf((GLfloat * len(m))(*m))\n\n return self._render_world(\n frame_buffer,\n render_agent=True\n )\n\n def render_obs(self, frame_buffer=None):\n \"\"\"\n Render an observation from the point of view of the agent\n \"\"\"\n\n if frame_buffer == None:\n frame_buffer = self.obs_fb\n\n # Switch to the default OpenGL context\n # This is necessary on Linux Nvidia drivers\n self.shadow_window.switch_to()\n\n # Bind the frame buffer before rendering into it\n frame_buffer.bind()\n\n # Clear the color and depth buffers\n glClearColor(*self.sky_color, 1.0)\n glClearDepth(1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Set the projection matrix\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(\n self.agent.cam_fov_y,\n frame_buffer.width / float(frame_buffer.height),\n 0.04,\n 100.0\n )\n\n # Setup the camera\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(\n # Eye position\n *self.agent.cam_pos,\n # Target\n *(self.agent.cam_pos + self.agent.cam_dir),\n # Up vector\n 0, 1.0, 0.0\n )\n\n return self._render_world(\n frame_buffer,\n render_agent=False\n )\n\n def render_depth(self, frame_buffer=None):\n \"\"\"\n Produce a depth map\n Values are floating-point, map shape is (H,W,1)\n Distances are in meters from the observer\n \"\"\"\n\n if frame_buffer == None:\n frame_buffer = self.obs_fb\n\n # Render the world\n self.render_obs(frame_buffer)\n\n return frame_buffer.get_depth_map(0.04, 100.0)\n\n def get_visible_ents(self):\n \"\"\"\n Get a list of visible entities.\n Uses OpenGL occlusion queries to approximate visibility.\n :return: set of objects visible to the agent\n \"\"\"\n\n # Allocate the occlusion query ids\n num_ents = len(self.entities)\n query_ids = (GLuint * num_ents)()\n glGenQueries(num_ents, query_ids)\n\n # Switch to the default OpenGL context\n # This is necessary on Linux Nvidia drivers\n self.shadow_window.switch_to()\n\n # Use the small observation frame buffer\n frame_buffer = self.obs_fb\n\n # Bind the frame buffer before rendering into it\n frame_buffer.bind()\n\n # Clear the color and depth buffers\n glClearColor(*self.sky_color, 1.0)\n glClearDepth(1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Set the projection matrix\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(\n self.agent.cam_fov_y,\n frame_buffer.width / float(frame_buffer.height),\n 0.04,\n 100.0\n )\n\n # Setup the cameravisible objects\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(\n # Eye position\n *self.agent.cam_pos,\n # Target\n *(self.agent.cam_pos + self.agent.cam_dir),\n # Up vector\n 0, 1.0, 0.0\n )\n\n # Render the rooms, without texturing\n glDisable(GL_TEXTURE_2D)\n for room in self.rooms:\n room._render()\n\n # For each entity\n for ent_idx, ent in enumerate(self.entities):\n if ent is self.agent:\n continue\n\n glBeginQuery(GL_ANY_SAMPLES_PASSED, query_ids[ent_idx])\n pos = ent.pos\n\n #glColor3f(1, 0, 0)\n drawBox(\n x_min=pos[0] - 0.1,\n x_max=pos[0] + 0.1,\n y_min=pos[1],\n y_max=pos[1] + 0.2,\n z_min=pos[2] - 0.1,\n z_max=pos[2] + 0.1\n )\n\n glEndQuery(GL_ANY_SAMPLES_PASSED)\n\n vis_objs = set()\n\n # Get query results\n for ent_idx, ent in enumerate(self.entities):\n if ent is self.agent:\n continue\n\n visible = (GLuint*1)(1)\n glGetQueryObjectuiv(query_ids[ent_idx], GL_QUERY_RESULT, visible);\n\n if visible[0] != 0:\n vis_objs.add(ent)\n\n # Free the occlusion query ids\n glDeleteQueries(1, query_ids)\n\n #img = frame_buffer.resolve()\n #return img\n\n return vis_objs\n\n def render(self, mode='human', close=False, view='agent'):\n \"\"\"\n Render the environment for human viewing\n \"\"\"\n\n if close:\n if self.window:\n self.window.close()\n return\n\n # Render the human-view image\n assert view in ['agent', 'top']\n if view == 'agent':\n img = self.render_obs(self.vis_fb)\n else:\n img = self.render_top_view(self.vis_fb)\n img_width = img.shape[1]\n img_height = img.shape[0]\n\n if mode == 'rgb_array':\n return img\n\n # Render the agent's view\n obs = self.render_obs()\n obs_width = obs.shape[1]\n obs_height = obs.shape[0]\n\n window_width = img_width + self.obs_disp_width\n window_height = img_height\n\n if self.window is None:\n config = pyglet.gl.Config(double_buffer=True)\n self.window = pyglet.window.Window(\n width=window_width,\n height=window_height,\n resizable=False,\n config=config\n )\n\n self.window.clear()\n self.window.switch_to()\n\n # Bind the default frame buffer\n glBindFramebuffer(GL_FRAMEBUFFER, 0);\n\n # Clear the color and depth buffers\n glClearColor(0, 0, 0, 1.0)\n glClearDepth(1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\n\n # Setup orghogonal projection\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glOrtho(0, window_width, 0, window_height, 0, 10)\n\n # Draw the human render to the rendering window\n img_flip = np.ascontiguousarray(np.flip(img, axis=0))\n img_data = pyglet.image.ImageData(\n img_width,\n img_height,\n 'RGB',\n img_flip.ctypes.data_as(POINTER(GLubyte)),\n pitch=img_width * 3,\n )\n img_data.blit(\n 0,\n 0,\n 0,\n width=img_width,\n height=img_height\n )\n\n # Draw the observation\n obs = np.ascontiguousarray(np.flip(obs, axis=0))\n obs_data = pyglet.image.ImageData(\n obs_width,\n obs_height,\n 'RGB',\n obs.ctypes.data_as(POINTER(GLubyte)),\n pitch=obs_width * 3,\n )\n obs_data.blit(\n img_width,\n img_height - self.obs_disp_height,\n 0,\n width=self.obs_disp_width,\n height=self.obs_disp_height\n )\n\n # Draw the text label in the window\n self.text_label.text = \"pos: (%.2f, %.2f, %.2f)\\nangle: %d\\nsteps: %d\" % (\n *self.agent.pos,\n int(self.agent.dir * 180 / math.pi) % 360,\n self.step_count\n )\n self.text_label.draw()\n\n # Force execution of queued commands\n glFlush()\n\n # If we are not running the Pyglet event loop,\n # we have to manually flip the buffers and dispatch events\n if mode == 'human':\n self.window.flip()\n self.window.dispatch_events()\n\n return img\n" ]
[ [ "numpy.sum", "numpy.dot", "numpy.stack", "numpy.cross", "numpy.insert", "numpy.greater", "numpy.expand_dims", "numpy.flip", "numpy.array", "numpy.concatenate", "numpy.linalg.norm" ] ]
Nitin-Mane/dense-ulearn-vos
[ "9e39d359a53a2343522ce5820fdf27223a4ffcb4" ]
[ "datasets/dataloader_infer.py" ]
[ "\"\"\"\nCopyright (c) 2021 TU Darmstadt\nAuthor: Nikita Araslanov <[email protected]>\nLicense: Apache License 2.0\n\"\"\"\n\nimport os\nimport torch\n\nfrom PIL import Image\n\nimport numpy as np\nimport torchvision.transforms as tf\n\nfrom .dataloader_base import DLBase\n\n\nclass DataSeg(DLBase):\n\n def __init__(self, cfg, split, ignore_labels=[], \\\n root=os.path.expanduser('./data'), renorm=False):\n\n super(DataSeg, self).__init__()\n\n self.cfg = cfg\n self.root = root\n self.split = split\n self.ignore_labels = ignore_labels\n self._init_palette(self.cfg.DATASET.NUM_CLASSES)\n\n # train/val/test splits are pre-cut\n split_fn = os.path.join(self.root, self.split + \".txt\")\n assert os.path.isfile(split_fn)\n\n self.sequence_ids = []\n self.sequence_names = []\n def add_sequence(name):\n vlen = len(self.images)\n assert vlen >= cfg.DATASET.VIDEO_LEN, \\\n \"Detected video shorter [{}] than training length [{}]\".format(vlen, \\\n cfg.DATASET.VIDEO_LEN)\n self.sequence_ids.append(vlen)\n self.sequence_names.append(name)\n return vlen\n\n self.images = []\n self.masks = []\n self.flags = []\n\n token = None\n with open(split_fn, \"r\") as lines:\n for line in lines:\n _flag, _image, _mask = line.strip(\"\\n\").split(' ')\n\n # save every frame\n #_flag = 1\n self.flags.append(int(_flag))\n\n _image = os.path.join(cfg.DATASET.ROOT, _image.lstrip('/'))\n assert os.path.isfile(_image), '%s not found' % _image\n\n # each sequence may have a different length\n # do some book-keeping e.g. to ensure we have\n # sequences long enough for subsequent sampling\n _token = _image.split(\"/\")[-2] # parent directory\n \n # sequence ID is in the filename\n #_token = os.path.basename(_image).split(\"_\")[0] \n if token != _token:\n if not token is None:\n add_sequence(token)\n token = _token\n\n self.images.append(_image)\n\n if _mask is None:\n self.masks.append(None)\n else:\n _mask = os.path.join(cfg.DATASET.ROOT, _mask.lstrip('/'))\n #assert os.path.isfile(_mask), '%s not found' % _mask\n self.masks.append(_mask)\n\n # update the last sequence\n # returns the total amount of frames\n add_sequence(token)\n print(\"Loaded {} sequences\".format(len(self.sequence_ids)))\n\n # definint data augmentation:\n print(\"Dataloader: {}\".format(split), \" #\", len(self.images))\n print(\"\\t {}: no augmentation\".format(split))\n\n self.tf = tf.Compose([tf.ToTensor(), tf.Normalize(mean=self.MEAN, std=self.STD)])\n self._num_samples = len(self.images)\n\n def __len__(self):\n return len(self.sequence_ids)\n\n \n def _mask2tensor(self, mask, num_classes=6):\n h,w = mask.shape\n ones = torch.ones(1,h,w)\n zeros = torch.zeros(num_classes,h,w)\n \n max_idx = mask.max()\n assert max_idx < num_classes, \"{} >= {}\".format(max_idx, num_classes)\n return zeros.scatter(0, mask[None, ...], ones)\n \n def denorm(self, image):\n\n if image.dim() == 3:\n assert image.dim() == 3, \"Expected image [CxHxW]\"\n assert image.size(0) == 3, \"Expected RGB image [3xHxW]\"\n\n for t, m, s in zip(image, self.MEAN, self.STD):\n t.mul_(s).add_(m)\n elif image.dim() == 4:\n # batch mode\n assert image.size(1) == 3, \"Expected RGB image [3xHxW]\"\n\n for t, m, s in zip((0,1,2), self.MEAN, self.STD):\n image[:, t, :, :].mul_(s).add_(m)\n\n return image\n\n\n def __getitem__(self, index):\n \n seq_to = self.sequence_ids[index]\n seq_from = 0 if index == 0 else self.sequence_ids[index - 1]\n\n image0 = Image.open(self.images[seq_from])\n w,h = image0.size\n\n images, masks, fns, flags = [], [], [], []\n tracks = torch.LongTensor(self.cfg.DATASET.NUM_CLASSES).fill_(-1)\n masks = torch.LongTensor(self.cfg.DATASET.NUM_CLASSES, h, w).zero_()\n known_ids = set()\n\n for t in range(seq_from, seq_to):\n\n t0 = t - seq_from\n image = Image.open(self.images[t]).convert('RGB')\n\n fns.append(os.path.basename(self.images[t].replace(\".jpg\", \"\")))\n flags.append(self.flags[t])\n\n if os.path.isfile(self.masks[t]):\n mask = Image.open(self.masks[t])\n mask = torch.from_numpy(np.array(mask, np.long, copy=False))\n\n unique_ids = np.unique(mask)\n for oid in unique_ids:\n if not oid in known_ids:\n tracks[oid] = t0\n known_ids.add(oid)\n masks[oid] = (mask == oid).long()\n else:\n mask = Image.new('L', image.size)\n\n image = self.tf(image)\n images.append(image)\n\n images = torch.stack(images, 0)\n seq_name = self.sequence_names[index]\n flags = torch.LongTensor(flags)\n\n return images, images, masks, tracks, len(known_ids), fns, flags, seq_name\n" ]
[ [ "torch.ones", "torch.stack", "torch.zeros", "torch.LongTensor", "numpy.array", "numpy.unique" ] ]
Carlosbogo/etna
[ "b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94" ]
[ "etna/analysis/eda_utils.py" ]
[ "import math\nimport warnings\nfrom itertools import combinations\nfrom typing import TYPE_CHECKING\nfrom typing import Optional\nfrom typing import Sequence\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport statsmodels.api as sm\nfrom matplotlib.ticker import MaxNLocator\nfrom statsmodels.graphics import utils\n\nif TYPE_CHECKING:\n from etna.datasets import TSDataset\n\nplot_acf = sm.graphics.tsa.plot_acf\nplot_pacf = sm.graphics.tsa.plot_pacf\n\n\ndef cross_corr_plot(ts: \"TSDataset\", n_segments: int = 10, maxlags: int = 21, segments: Optional[Sequence] = None):\n \"\"\"\n Cross-correlation plot between multiple timeseries.\n\n Parameters\n ----------\n ts:\n TSDataset with timeseries data\n n_segments:\n number of random segments to plot\n maxlags:\n number of timeseries shifts for cross-correlation\n segments:\n segments to plot\n \"\"\"\n if not segments:\n segments = list(ts.segments)\n segments = np.random.choice(segments, size=min(len(segments), n_segments), replace=False)\n segment_pairs = list(combinations(segments, r=2))\n if len(segment_pairs) == 0:\n raise ValueError(\"There are no pairs to plot! Try set n_segments > 1.\")\n columns_num = min(2, len(segment_pairs))\n rows_num = math.ceil(len(segment_pairs) / columns_num)\n fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)\n ax = ax.ravel()\n fig.suptitle(\"Cross-correlation\", fontsize=16)\n for i, (segment_1, segment_2) in enumerate(segment_pairs):\n df_segment_1 = ts[:, segment_1, :][segment_1]\n df_segment_2 = ts[:, segment_2, :][segment_2]\n fig, axx = utils.create_mpl_ax(ax[i])\n target_1 = df_segment_1.target\n target_2 = df_segment_2.target\n if target_1.dtype == int or target_2.dtype == int:\n warnings.warn(\n \"At least one target column has integer dtype, \"\n \"it is converted to float in order to calculate correlation.\"\n )\n target_1 = target_1.astype(float)\n target_2 = target_2.astype(float)\n lags, level, _, _ = axx.xcorr(x=target_1, y=target_2, maxlags=maxlags)\n ax[i].plot(lags, level, \"o\", markersize=5)\n ax[i].set_title(f\"{segment_1} vs {segment_2}\")\n ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.show()\n\n\ndef sample_acf_plot(ts: \"TSDataset\", n_segments: int = 10, lags: int = 21, segments: Sequence = None):\n \"\"\"\n Autocorrelation plot for multiple timeseries.\n\n Parameters\n ----------\n ts:\n TSDataset with timeseries data\n n_segments:\n number of random segments to plot\n lags:\n number of timeseries shifts for cross-correlation\n segments:\n segments to plot\n\n Notes\n -----\n https://en.wikipedia.org/wiki/Autocorrelation\n \"\"\"\n if not segments:\n segments = sorted(ts.segments)\n\n k = min(n_segments, len(segments))\n columns_num = min(2, k)\n rows_num = math.ceil(k / columns_num)\n fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)\n ax = ax.ravel()\n fig.suptitle(\"Partial Autocorrelation\", fontsize=16)\n for i, name in enumerate(sorted(np.random.choice(segments, size=k, replace=False))):\n df_slice = ts[:, name, :][name]\n plot_acf(x=df_slice[\"target\"].values, ax=ax[i], lags=lags)\n ax[i].set_title(name)\n plt.show()\n\n\ndef sample_pacf_plot(ts: \"TSDataset\", n_segments: int = 10, lags: int = 21, segments: Sequence = None):\n \"\"\"\n Partial autocorrelation plot for multiple timeseries.\n\n Parameters\n ----------\n ts:\n TSDataset with timeseries data\n n_segments:\n number of random segments to plot\n lags:\n number of timeseries shifts for cross-correlation\n segments:\n segments to plot\n\n Notes\n -----\n https://en.wikipedia.org/wiki/Partial_autocorrelation_function\n \"\"\"\n if not segments:\n segments = sorted(ts.segments)\n\n k = min(n_segments, len(segments))\n columns_num = min(2, k)\n rows_num = math.ceil(k / columns_num)\n fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 5 * rows_num), constrained_layout=True, squeeze=False)\n ax = ax.ravel()\n fig.suptitle(\"Partial Autocorrelation\", fontsize=16)\n for i, name in enumerate(sorted(np.random.choice(segments, size=k, replace=False))):\n df_slice = ts[:, name, :][name]\n plot_pacf(x=df_slice[\"target\"].values, ax=ax[i], lags=lags)\n ax[i].set_title(name)\n plt.show()\n\n\ndef distribution_plot(\n ts: \"TSDataset\",\n n_segments: int = 10,\n segments: Sequence = None,\n shift: int = 30,\n window: int = 30,\n freq: str = \"1M\",\n n_rows: int = 10,\n):\n \"\"\"Distribution of z-values grouped by segments and time frequency.\n\n ... math:\n mean_{i} = \\\\sum_{j=i-\\\\text{shift}}^{i-\\\\text{shift}+\\\\text{window}} \\\\frac{x_{j}}{\\\\text{window}}\n\n Parameters\n ----------\n ts:\n dataset with timeseries data\n n_segments:\n number of random segments to plot\n segments:\n segments to plot\n shift:\n number of timeseries shifts for statistics calc\n window:\n number of points for statistics calc\n freq:\n group for z_{i}\n n_rows:\n maximum number of rows to plot\n \"\"\"\n df_pd = ts.to_pandas(flatten=True)\n\n if not segments:\n segments = df_pd.segment.unique()\n segments = np.random.choice(segments, size=min(len(segments), n_segments), replace=False)\n df_full = df_pd[df_pd.segment.isin(segments)]\n df_full.loc[:, \"mean\"] = (\n df_full.groupby(\"segment\").target.shift(shift).transform(lambda s: s.rolling(window).mean())\n )\n df_full.loc[:, \"std\"] = df_full.groupby(\"segment\").target.shift(shift).transform(lambda s: s.rolling(window).std())\n df_full = df_full.dropna()\n df_full.loc[:, \"z\"] = (df_full[\"target\"] - df_full[\"mean\"]) / df_full[\"std\"]\n\n grouped_data = df_full.groupby([df_full.timestamp.dt.to_period(freq)])\n columns_num = min(2, len(grouped_data))\n rows_num = min(n_rows, math.ceil(len(grouped_data) / columns_num))\n groups = set(list(grouped_data.groups.keys())[-rows_num * columns_num :])\n fig, ax = plt.subplots(rows_num, columns_num, figsize=(20, 7.5 * rows_num), constrained_layout=True, squeeze=False)\n fig.suptitle(f\"Z statistic shift: {shift} window: {window}\", fontsize=16)\n ax = ax.ravel()\n i = 0\n for period, df_slice in grouped_data:\n if period not in groups:\n continue\n sns.boxplot(data=df_slice.sort_values(by=\"segment\"), y=\"z\", x=\"segment\", ax=ax[i], fliersize=False)\n ax[i].set_title(f\"{period}\")\n i += 1\n" ]
[ [ "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.show", "numpy.random.choice", "matplotlib.pyplot.subplots" ] ]
TimoleonLatinopoulos/MortalKombatOpenAI
[ "59dc89d1f50dd74690859e5e1fa18701a5246382" ]
[ "DDQN.py" ]
[ "import tensorflow as tf\nfrom keras.activations import relu\nfrom keras.initializers import VarianceScaling\nfrom keras.layers import Dense, Conv2D, Flatten\nfrom keras.losses import logcosh\n\n\nclass DDQN:\n \"\"\" Implements a Dueling Dual Deep Q-Network based on the frames of the Retro Environment \"\"\"\n\n def __init__(self, n_actions, frame_height=63, frame_width=113, stacked_frames=4, learning_rate=0.00001):\n self.n_actions = n_actions\n self.frame_height = frame_height\n self.frame_width = frame_width\n self.stacked_frames = stacked_frames\n self.learning_rate = learning_rate\n\n self.input = tf.placeholder(shape=[None, self.frame_height, self.frame_width, self.stacked_frames],\n dtype=tf.float32)\n self.input = self.input / 255\n\n # Convolutional layers\n self.conv1 = self.conv_layer(self.input, 32, [8, 8], 4, 'conv1')\n self.conv2 = self.conv_layer(self.conv1, 64, [4, 4], 2, 'conv2')\n self.conv3 = self.conv_layer(self.conv2, 64, [3, 3], 1, 'conv3')\n self.flat = Flatten()(self.conv3)\n self.dense1 = self.dense_layer(self.flat, 512, 'dense1', relu)\n\n # Splitting into value and advantage streams\n self.v_stream, self.a_stream = tf.split(self.dense1, 2, 1)\n self.value = self.dense_layer(self.v_stream, 1, 'value')\n self.advantage = self.dense_layer(self.a_stream, self.n_actions, 'advantage')\n\n # Getting Q-values from value and advantage streams\n self.q_values = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keepdims=True))\n self.prediction = tf.argmax(self.q_values, 1)\n\n # targetQ according to Bellman equation\n self.target_q = tf.placeholder(shape=[None], dtype=tf.float32)\n self.action = tf.placeholder(shape=[None], dtype=tf.uint8)\n self.action_one_hot = tf.one_hot(self.action, self.n_actions, dtype=tf.float32)\n self.Q = tf.reduce_sum(tf.multiply(self.q_values, self.action_one_hot), axis=1)\n\n # Parameter updates\n self.error = logcosh(self.target_q, self.Q)\n self.loss = tf.reduce_mean(self.error)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n self.update = self.optimizer.minimize(self.loss)\n\n @staticmethod\n def conv_layer(_inputs, _filters, _kernel_size, _strides, _name):\n return Conv2D(filters=_filters, kernel_size=_kernel_size, strides=_strides,\n kernel_initializer=VarianceScaling(scale=2.0), padding=\"valid\",\n activation=relu, use_bias=False, name=_name)(_inputs)\n\n @staticmethod\n def dense_layer(_inputs, _units, _name, _activation=None):\n return Dense(activation=_activation, units=_units,\n kernel_initializer=VarianceScaling(scale=2.0), name=_name)(_inputs)\n\n\nclass TargetNetworkUpdater:\n \"\"\" Updates the variables and the weights of the target network based on the main network \"\"\"\n\n def __init__(self, main_vars, target_vars):\n self.main_vars = main_vars\n self.target_vars = target_vars\n\n def update_target_vars(self):\n update_ops = []\n for i, var in enumerate(self.main_vars):\n copy_op = self.target_vars[i].assign(var.value())\n update_ops.append(copy_op)\n return update_ops\n\n def update_networks(self, sess):\n update_ops = self.update_target_vars()\n for copy_op in update_ops:\n sess.run(copy_op)\n" ]
[ [ "tensorflow.placeholder", "tensorflow.train.AdamOptimizer", "tensorflow.reduce_mean", "tensorflow.multiply", "tensorflow.one_hot", "tensorflow.argmax", "tensorflow.split" ] ]
meokz/d3rlpy
[ "40504e2d8b424547558ab82786c523e8f4626a82" ]
[ "d3rlpy/models/torch/encoders.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef _create_activation(activation_type):\n if activation_type == 'relu':\n return torch.relu\n elif activation_type == 'swish':\n return lambda x: x * torch.sigmoid(x)\n raise ValueError('invalid activation_type.')\n\n\ndef create_encoder(observation_shape,\n action_size=None,\n use_batch_norm=False,\n discrete_action=False,\n activation_type='relu',\n **kwargs):\n\n activation = _create_activation(activation_type)\n\n if len(observation_shape) == 3:\n # pixel input\n if action_size is not None:\n return PixelEncoderWithAction(observation_shape,\n action_size,\n use_batch_norm=use_batch_norm,\n discrete_action=discrete_action,\n activation=activation,\n **kwargs)\n return PixelEncoder(observation_shape,\n use_batch_norm=use_batch_norm,\n activation=activation,\n **kwargs)\n elif len(observation_shape) == 1:\n # vector input\n if action_size is not None:\n return VectorEncoderWithAction(observation_shape,\n action_size,\n use_batch_norm=use_batch_norm,\n discrete_action=discrete_action,\n activation=activation,\n **kwargs)\n return VectorEncoder(observation_shape,\n use_batch_norm=use_batch_norm,\n activation=activation,\n **kwargs)\n else:\n raise ValueError('observation_shape must be 1d or 3d.')\n\n\nclass PixelEncoder(nn.Module):\n def __init__(self,\n observation_shape,\n filters=None,\n feature_size=None,\n use_batch_norm=False,\n activation=torch.relu):\n super().__init__()\n\n # default architecture is based on Nature DQN paper.\n if filters is None:\n filters = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]\n if feature_size is None:\n feature_size = 512\n\n self.observation_shape = observation_shape\n self.use_batch_norm = use_batch_norm\n self.activation = activation\n self.feature_size = feature_size\n\n # convolutional layers\n in_channels = [observation_shape[0]] + [f[0] for f in filters[:-1]]\n self.convs = nn.ModuleList()\n self.conv_bns = nn.ModuleList()\n for in_channel, f in zip(in_channels, filters):\n out_channel, kernel_size, stride = f\n conv = nn.Conv2d(in_channel,\n out_channel,\n kernel_size=kernel_size,\n stride=stride)\n self.convs.append(conv)\n\n if use_batch_norm:\n self.conv_bns.append(nn.BatchNorm2d(out_channel))\n\n # last dense layer\n self.fc = nn.Linear(self._get_linear_input_size(), feature_size)\n if use_batch_norm:\n self.fc_bn = nn.BatchNorm1d(feature_size)\n\n def _get_linear_input_size(self):\n x = torch.rand((1, ) + self.observation_shape)\n with torch.no_grad():\n return self._conv_encode(x).view(1, -1).shape[1]\n\n def _conv_encode(self, x):\n h = x\n for i in range(len(self.convs)):\n h = self.activation(self.convs[i](h))\n if self.use_batch_norm:\n h = self.conv_bns[i](h)\n return h\n\n def forward(self, x):\n h = self._conv_encode(x)\n\n h = self.activation(self.fc(h.view(h.shape[0], -1)))\n if self.use_batch_norm:\n h = self.fc_bn(h)\n\n return h\n\n\nclass PixelEncoderWithAction(PixelEncoder):\n def __init__(self,\n observation_shape,\n action_size,\n filters=None,\n feature_size=None,\n use_batch_norm=False,\n discrete_action=False,\n activation=torch.relu):\n self.action_size = action_size\n self.discrete_action = discrete_action\n super().__init__(observation_shape, filters, feature_size,\n use_batch_norm, activation)\n\n def _get_linear_input_size(self):\n size = super()._get_linear_input_size()\n return size + self.action_size\n\n def forward(self, x, action):\n h = self._conv_encode(x)\n\n if self.discrete_action:\n action = F.one_hot(action.view(-1).long(),\n num_classes=self.action_size).float()\n\n # cocat feature and action\n h = torch.cat([h.view(h.shape[0], -1), action], dim=1)\n h = self.activation(self.fc(h))\n if self.use_batch_norm:\n h = self.fc_bn(h)\n\n return h\n\n\nclass VectorEncoder(nn.Module):\n def __init__(self,\n observation_shape,\n hidden_units=None,\n use_batch_norm=False,\n activation=torch.relu):\n super().__init__()\n self.observation_shape = observation_shape\n\n if hidden_units is None:\n hidden_units = [256, 256]\n\n self.use_batch_norm = use_batch_norm\n self.feature_size = hidden_units[-1]\n self.activation = activation\n\n in_units = [observation_shape[0]] + hidden_units[:-1]\n self.fcs = nn.ModuleList()\n self.bns = nn.ModuleList()\n for in_unit, out_unit in zip(in_units, hidden_units):\n self.fcs.append(nn.Linear(in_unit, out_unit))\n if use_batch_norm:\n self.bns.append(nn.BatchNorm1d(out_unit))\n\n def forward(self, x):\n h = x\n for i in range(len(self.fcs)):\n h = self.activation(self.fcs[i](h))\n if self.use_batch_norm:\n h = self.bns[i](h)\n return h\n\n\nclass VectorEncoderWithAction(VectorEncoder):\n def __init__(self,\n observation_shape,\n action_size,\n hidden_units=None,\n use_batch_norm=False,\n discrete_action=False,\n activation=torch.relu):\n self.action_size = action_size\n self.discrete_action = discrete_action\n concat_shape = (observation_shape[0] + action_size, )\n super().__init__(concat_shape, hidden_units, use_batch_norm,\n activation)\n self.observation_shape = observation_shape\n\n def forward(self, x, action):\n if self.discrete_action:\n action = F.one_hot(action.view(-1).long(),\n num_classes=self.action_size).float()\n\n x = torch.cat([x, action], dim=1)\n return super().forward(x)\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.Linear", "torch.nn.BatchNorm1d", "torch.rand", "torch.no_grad", "torch.nn.Conv2d", "torch.nn.ModuleList", "torch.sigmoid", "torch.cat" ] ]
NLP-Discourse-SoochowU/TDDiscourseParser
[ "2f9c7cef85c564c47b368ee4935caf1fad7c598d" ]
[ "treebuilder/partptr/train.py" ]
[ "# coding: UTF-8\r\nimport argparse\r\nimport logging\r\nimport random\r\nimport torch\r\nimport copy\r\nimport numpy as np\r\nfrom dataset import CDTB\r\nfrom collections import Counter\r\nfrom itertools import chain\r\nfrom structure.vocab import Vocab, Label\r\nfrom structure.nodes import node_type_filter, EDU, Relation, Sentence, TEXT\r\nfrom treebuilder.partptr.model import PartitionPtr\r\nfrom treebuilder.partptr.parser import PartitionPtrParser\r\nimport torch.optim as optim\r\nfrom util.eval import parse_eval, gen_parse_report\r\nfrom tensorboardX import SummaryWriter\r\n\r\n\r\ndef build_vocab(dataset):\r\n word_freq = Counter()\r\n pos_freq = Counter()\r\n nuc_freq = Counter()\r\n rel_freq = Counter()\r\n for paragraph in chain(*dataset):\r\n for node in paragraph.iterfind(filter=node_type_filter([EDU, Relation])):\r\n if isinstance(node, EDU):\r\n word_freq.update(node.words)\r\n pos_freq.update(node.tags)\r\n elif isinstance(node, Relation):\r\n nuc_freq[node.nuclear] += 1\r\n rel_freq[node.ftype] += 1\r\n\r\n word_vocab = Vocab(\"word\", word_freq)\r\n pos_vocab = Vocab(\"part of speech\", pos_freq)\r\n nuc_label = Label(\"nuclear\", nuc_freq)\r\n rel_label = Label(\"relation\", rel_freq)\r\n return word_vocab, pos_vocab, nuc_label, rel_label\r\n\r\n\r\ndef gen_decoder_data(root, edu2ids):\r\n # splits s0 s1 s2 s3 s4 s5 s6\r\n # edus s/ e0 e1 e2 e3 e4 e5 /s\r\n splits = [] # [(0, 3, 6, NS), (0, 2, 3, SN), ...]\r\n child_edus = [] # [edus]\r\n\r\n if isinstance(root, EDU):\r\n child_edus.append(root)\r\n elif isinstance(root, Sentence):\r\n for child in root:\r\n _child_edus, _splits = gen_decoder_data(child, edu2ids)\r\n child_edus.extend(_child_edus)\r\n splits.extend(_splits)\r\n elif isinstance(root, Relation):\r\n children = [gen_decoder_data(child, edu2ids) for child in root]\r\n if len(children) < 2:\r\n raise ValueError(\"relation node should have at least 2 children\")\r\n\r\n while children:\r\n left_child_edus, left_child_splits = children.pop(0)\r\n if children:\r\n last_child_edus, _ = children[-1]\r\n start = edu2ids[left_child_edus[0]]\r\n split = edu2ids[left_child_edus[-1]] + 1\r\n end = edu2ids[last_child_edus[-1]] + 1\r\n nuc = root.nuclear\r\n rel = root.ftype\r\n splits.append((start, split, end, nuc, rel))\r\n child_edus.extend(left_child_edus)\r\n splits.extend(left_child_splits)\r\n return child_edus, splits\r\n\r\n\r\ndef numericalize(dataset, word_vocab, pos_vocab, nuc_label, rel_label):\r\n instances = []\r\n for paragraph in filter(lambda d: d.root_relation(), chain(*dataset)):\r\n encoder_inputs = []\r\n decoder_inputs = []\r\n pred_splits = []\r\n pred_nucs = []\r\n pred_rels = []\r\n edus = list(paragraph.edus())\r\n for edu in edus:\r\n edu_word_ids = [word_vocab[word] for word in edu.words]\r\n edu_pos_ids = [pos_vocab[pos] for pos in edu.tags]\r\n encoder_inputs.append((edu_word_ids, edu_pos_ids))\r\n edu2ids = {edu: i for i, edu in enumerate(edus)}\r\n _, splits = gen_decoder_data(paragraph.root_relation(), edu2ids)\r\n for start, split, end, nuc, rel in splits:\r\n decoder_inputs.append((start, end))\r\n pred_splits.append(split)\r\n pred_nucs.append(nuc_label[nuc])\r\n pred_rels.append(rel_label[rel])\r\n instances.append((encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels))\r\n return instances\r\n\r\n\r\ndef gen_batch_iter(instances, batch_size, use_gpu=False):\r\n random_instances = np.random.permutation(instances)\r\n num_instances = len(instances)\r\n offset = 0\r\n while offset < num_instances:\r\n batch = random_instances[offset: min(num_instances, offset+batch_size)]\r\n\r\n # find out max seqlen of edus and words of edus\r\n num_batch = batch.shape[0]\r\n max_edu_seqlen = 0\r\n max_word_seqlen = 0\r\n for encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels in batch:\r\n max_edu_seqlen = max_edu_seqlen if max_edu_seqlen >= len(encoder_inputs) else len(encoder_inputs)\r\n for edu_word_ids, edu_pos_ids in encoder_inputs:\r\n max_word_seqlen = max_word_seqlen if max_word_seqlen >= len(edu_word_ids) else len(edu_word_ids)\r\n\r\n # batch to numpy\r\n e_input_words = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long)\r\n e_input_poses = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long)\r\n e_masks = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.uint8)\r\n\r\n d_inputs = np.zeros([num_batch, max_edu_seqlen-1, 2], dtype=np.long)\r\n d_outputs = np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long)\r\n d_output_nucs = np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long)\r\n d_output_rels = np.zeros([num_batch, max_edu_seqlen - 1], dtype=np.long)\r\n d_masks = np.zeros([num_batch, max_edu_seqlen-1, max_edu_seqlen+1], dtype=np.uint8)\r\n\r\n for batchi, (encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels) in enumerate(batch):\r\n for edui, (edu_word_ids, edu_pos_ids) in enumerate(encoder_inputs):\r\n word_seqlen = len(edu_word_ids)\r\n e_input_words[batchi][edui][:word_seqlen] = edu_word_ids\r\n e_input_poses[batchi][edui][:word_seqlen] = edu_pos_ids\r\n e_masks[batchi][edui][:word_seqlen] = 1\r\n\r\n for di, decoder_input in enumerate(decoder_inputs):\r\n d_inputs[batchi][di] = decoder_input\r\n d_masks[batchi][di][decoder_input[0]+1: decoder_input[1]] = 1\r\n d_outputs[batchi][:len(pred_splits)] = pred_splits\r\n d_output_nucs[batchi][:len(pred_nucs)] = pred_nucs\r\n d_output_rels[batchi][:len(pred_rels)] = pred_rels\r\n\r\n # numpy to torch\r\n e_input_words = torch.from_numpy(e_input_words).long()\r\n e_input_poses = torch.from_numpy(e_input_poses).long()\r\n e_masks = torch.from_numpy(e_masks).byte()\r\n d_inputs = torch.from_numpy(d_inputs).long()\r\n d_outputs = torch.from_numpy(d_outputs).long()\r\n d_output_nucs = torch.from_numpy(d_output_nucs).long()\r\n d_output_rels = torch.from_numpy(d_output_rels).long()\r\n d_masks = torch.from_numpy(d_masks).byte()\r\n\r\n if use_gpu:\r\n e_input_words = e_input_words.cuda()\r\n e_input_poses = e_input_poses.cuda()\r\n e_masks = e_masks.cuda()\r\n d_inputs = d_inputs.cuda()\r\n d_outputs = d_outputs.cuda()\r\n d_output_nucs = d_output_nucs.cuda()\r\n d_output_rels = d_output_rels.cuda()\r\n d_masks = d_masks.cuda()\r\n\r\n yield (e_input_words, e_input_poses, e_masks), (d_inputs, d_masks), (d_outputs, d_output_nucs, d_output_rels)\r\n offset = offset + batch_size\r\n\r\n\r\ndef parse_and_eval(dataset, model):\r\n model.eval()\r\n parser = PartitionPtrParser(model)\r\n golds = list(filter(lambda d: d.root_relation(), chain(*dataset)))\r\n num_instances = len(golds)\r\n strips = []\r\n for paragraph in golds:\r\n edus = []\r\n for edu in paragraph.edus():\r\n edu_copy = EDU([TEXT(edu.text)])\r\n setattr(edu_copy, \"words\", edu.words)\r\n setattr(edu_copy, \"tags\", edu.tags)\r\n edus.append(edu_copy)\r\n strips.append(edus)\r\n parses = []\r\n for edus in strips:\r\n parse = parser.parse(edus)\r\n parses.append(parse)\r\n return num_instances, parse_eval(parses, golds)\r\n\r\n\r\ndef model_score(scores):\r\n eval_score = sum(score[2] for score in scores)\r\n return eval_score\r\n\r\n\r\ndef main(args):\r\n # set seed for reproducibility\r\n random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n np.random.seed(args.seed)\r\n\r\n # load dataset\r\n cdtb = CDTB(args.data, \"TRAIN\", \"VALIDATE\", \"TEST\", ctb_dir=args.ctb_dir, preprocess=True, cache_dir=args.cache_dir)\r\n # build vocabulary\r\n word_vocab, pos_vocab, nuc_label, rel_label = build_vocab(cdtb.train)\r\n\r\n trainset = numericalize(cdtb.train, word_vocab, pos_vocab, nuc_label, rel_label)\r\n logging.info(\"num of instances trainset: %d\" % len(trainset))\r\n logging.info(\"args: %s\" % str(args))\r\n # build model\r\n model = PartitionPtr(hidden_size=args.hidden_size, dropout=args.dropout,\r\n word_vocab=word_vocab, pos_vocab=pos_vocab, nuc_label=nuc_label, rel_label=rel_label,\r\n pretrained=args.pretrained, w2v_size=args.w2v_size, w2v_freeze=args.w2v_freeze,\r\n pos_size=args.pos_size,\r\n split_mlp_size=args.split_mlp_size, nuc_mlp_size=args.nuc_mlp_size,\r\n rel_mlp_size=args.rel_mlp_size,\r\n use_gpu=args.use_gpu)\r\n if args.use_gpu:\r\n model.cuda()\r\n logging.info(\"model:\\n%s\" % str(model))\r\n\r\n # train and evaluate\r\n niter = 0\r\n log_splits_loss = 0.\r\n log_nucs_loss = 0.\r\n log_rels_loss = 0.\r\n log_loss = 0.\r\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)\r\n writer = SummaryWriter(args.log_dir)\r\n logging.info(\"hint: run 'tensorboard --logdir %s' to observe training status\" % args.log_dir)\r\n best_model = None\r\n best_model_score = 0.\r\n for nepoch in range(1, args.epoch + 1):\r\n batch_iter = gen_batch_iter(trainset, args.batch_size, args.use_gpu)\r\n for nbatch, (e_inputs, d_inputs, grounds) in enumerate(batch_iter, start=1):\r\n niter += 1\r\n model.train()\r\n optimizer.zero_grad()\r\n splits_loss, nucs_loss, rels_loss = model.loss(e_inputs, d_inputs, grounds)\r\n loss = args.a_split_loss * splits_loss + args.a_nuclear_loss * nucs_loss + args.a_relation_loss * rels_loss\r\n loss.backward()\r\n optimizer.step()\r\n log_splits_loss += splits_loss.item()\r\n log_nucs_loss += nucs_loss.item()\r\n log_rels_loss += rels_loss.item()\r\n log_loss += loss.item()\r\n if niter % args.log_every == 0:\r\n logging.info(\"[iter %-6d]epoch: %-3d, batch %-5d,\"\r\n \"train splits loss:%.5f, nuclear loss %.5f, relation loss %.5f, loss %.5f\" %\r\n (niter, nepoch, nbatch, log_splits_loss, log_nucs_loss, log_rels_loss, log_loss))\r\n writer.add_scalar(\"train/split_loss\", log_splits_loss, niter)\r\n writer.add_scalar(\"train/nuclear_loss\", log_nucs_loss, niter)\r\n writer.add_scalar(\"train/relation_loss\", log_rels_loss, niter)\r\n writer.add_scalar(\"train/loss\", log_loss, niter)\r\n log_splits_loss = 0.\r\n log_nucs_loss = 0.\r\n log_rels_loss = 0.\r\n log_loss = 0.\r\n if niter % args.validate_every == 0:\r\n num_instances, validate_scores = parse_and_eval(cdtb.validate, model)\r\n logging.info(\"validation on %d instances\" % num_instances)\r\n logging.info(gen_parse_report(*validate_scores))\r\n writer.add_scalar(\"validate/span_f1\", validate_scores[0][2], niter)\r\n writer.add_scalar(\"validate/nuclear_f1\", validate_scores[1][2], niter)\r\n writer.add_scalar(\"validate/coarse_relation_f1\", validate_scores[2][2], niter)\r\n writer.add_scalar(\"validate/fine_relation_f1\", validate_scores[3][2], niter)\r\n new_model_score = model_score(validate_scores)\r\n if new_model_score > best_model_score:\r\n # test on testset with new best model\r\n best_model_score = new_model_score\r\n best_model = copy.deepcopy(model)\r\n logging.info(\"test on new best model\")\r\n num_instances, test_scores = parse_and_eval(cdtb.test, best_model)\r\n logging.info(\"test on %d instances\" % num_instances)\r\n logging.info(gen_parse_report(*test_scores))\r\n writer.add_scalar(\"test/span_f1\", test_scores[0][2], niter)\r\n writer.add_scalar(\"test/nuclear_f1\", test_scores[1][2], niter)\r\n writer.add_scalar(\"test/coarse_relation_f1\", test_scores[2][2], niter)\r\n writer.add_scalar(\"test/fine_relation_f1\", test_scores[3][2], niter)\r\n if best_model:\r\n # evaluation and save best model\r\n logging.info(\"final test result\")\r\n num_instances, test_scores = parse_and_eval(cdtb.test, best_model)\r\n logging.info(\"test on %d instances\" % num_instances)\r\n logging.info(gen_parse_report(*test_scores))\r\n logging.info(\"save best model to %s\" % args.model_save)\r\n with open(args.model_save, \"wb+\") as model_fd:\r\n torch.save(best_model, model_fd)\r\n writer.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig(level=logging.INFO)\r\n arg_parser = argparse.ArgumentParser()\r\n\r\n # dataset parameters\r\n arg_parser.add_argument(\"--data\", default=\"data/CDTB\")\r\n arg_parser.add_argument(\"--ctb_dir\", default=\"data/CTB\")\r\n arg_parser.add_argument(\"--cache_dir\", default=\"data/cache\")\r\n\r\n # model parameters\r\n arg_parser.add_argument(\"-hidden_size\", default=512, type=int)\r\n arg_parser.add_argument(\"-dropout\", default=0.33, type=float)\r\n # w2v_group = arg_parser.add_mutually_exclusive_group(required=True)\r\n arg_parser.add_argument(\"-pretrained\", default=\"data/pretrained/sgns.renmin.word\")\r\n arg_parser.add_argument(\"-w2v_size\", type=int)\r\n arg_parser.add_argument(\"-pos_size\", default=30, type=int)\r\n arg_parser.add_argument(\"-split_mlp_size\", default=64, type=int)\r\n arg_parser.add_argument(\"-nuc_mlp_size\", default=32, type=int)\r\n arg_parser.add_argument(\"-rel_mlp_size\", default=128, type=int)\r\n arg_parser.add_argument(\"--w2v_freeze\", dest=\"w2v_freeze\", action=\"store_true\")\r\n arg_parser.set_defaults(w2v_freeze=True)\r\n\r\n # train parameters\r\n arg_parser.add_argument(\"-epoch\", default=20, type=int)\r\n arg_parser.add_argument(\"-batch_size\", default=64, type=int)\r\n arg_parser.add_argument(\"-lr\", default=0.001, type=float)\r\n arg_parser.add_argument(\"-l2\", default=0.0, type=float)\r\n arg_parser.add_argument(\"-log_every\", default=10, type=int)\r\n arg_parser.add_argument(\"-validate_every\", default=10, type=int)\r\n arg_parser.add_argument(\"-a_split_loss\", default=0.3, type=float)\r\n arg_parser.add_argument(\"-a_nuclear_loss\", default=1.0, type=float)\r\n arg_parser.add_argument(\"-a_relation_loss\", default=1.0, type=float)\r\n arg_parser.add_argument(\"-log_dir\", default=\"data/log\")\r\n arg_parser.add_argument(\"-model_save\", default=\"data/models/treebuilder.partptr.model\")\r\n arg_parser.add_argument(\"--seed\", default=21, type=int)\r\n arg_parser.add_argument(\"--use_gpu\", dest=\"use_gpu\", action=\"store_true\")\r\n arg_parser.set_defaults(use_gpu=True)\r\n\r\n main(arg_parser.parse_args())\r\n" ]
[ [ "numpy.zeros", "numpy.random.permutation", "torch.manual_seed", "torch.save", "numpy.random.seed", "torch.from_numpy" ] ]
LaudateCorpus1/coremltools
[ "777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc", "777a4460d6823e5e91dea4fa3eacb0b11c7d5dfc" ]
[ "coremltools/converters/mil/mil/passes/conv_scale_fusion.py", "coremltools/test/sklearn_tests/test_SVR.py" ]
[ "# Copyright (c) 2021, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nimport numpy as np\n\nfrom coremltools.converters.mil.mil.passes.pass_registry import register_pass\nfrom coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass\nfrom coremltools.converters.mil.mil import Builder as mb\n\n\ndef _try_to_transform(conv_op, scale_op, block):\n\n # get the scale\n if scale_op.x.val is None and scale_op.y.val is None:\n return False\n scale_var = scale_op.x if scale_op.x.val is not None else scale_op.y\n scale = scale_var.val\n\n # for the scalar case, the scalar can be either\n # 1. a python int/float\n # 2. a 0d numpy array\n # 3. a 1d numpy array with shape (1,)\n\n is_scalar = True\n if isinstance(scale, np.ndarray):\n if scale.shape == ():\n scale = scale.tolist()\n elif scale.shape == (1) or scale.shape == (1,):\n scale = scale[0]\n else:\n is_scalar = False\n\n # get weight and bias and groups from conv layer\n if conv_op.weight.val is None:\n return False\n conv_weight = conv_op.weight.val\n conv_bias = conv_op.bias\n groups = conv_op.groups.val\n\n # get type of the conv layer\n is_deconv = conv_op.op_type == 'conv_transpose'\n is_conv_1d = len(conv_weight.shape) == 3\n\n # D_in denotes the spatial dimensions for conv kernel weight\n # for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in]\n # for conv, conv_weight has shape [Cout, Cin / groups, *D_in]\n if is_deconv:\n Cout = conv_weight.shape[1] * groups\n Cin = conv_weight.shape[0]\n else:\n Cout = conv_weight.shape[0]\n Cin = conv_weight.shape[1] * groups\n\n # for the vector scale case, check if the shape is broacastable\n if not is_scalar:\n if not np.product(scale.shape) == Cout:\n return False\n if len(scale.shape) == len(conv_weight.shape):\n if not scale.shape[1] == Cout:\n return False\n elif len(scale.shape) == len(conv_weight.shape) - 1:\n if not scale.shape[0] == Cout:\n return False\n else:\n return False\n\n # transform the scale to 1./scale for the real_div case\n if scale_op.op_type == \"real_div\":\n scale = 1./scale\n\n # get the type of the conv weight\n conv_weight_type = conv_weight.dtype\n\n # create bias for conv if not exist\n if conv_bias is None:\n conv_bias = np.zeros(Cout)\n else:\n conv_bias = conv_bias.val\n conv_bias = conv_bias.astype(conv_weight_type)\n\n # get the original shape of weight and bias\n origin_weight_shape = conv_weight.shape\n origin_bias_shape = conv_bias.shape\n\n # update the weight/bias for conv layer\n if is_scalar:\n new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)\n new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type)\n\n else:\n scale = np.reshape(scale, (Cout))\n new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)\n new_conv_weight = []\n if is_deconv:\n conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])\n conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]))\n\n for i in range(Cout):\n _conv_weight = conv_weight[i] * scale[i]\n new_conv_weight.append(_conv_weight)\n new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type)\n\n if is_deconv:\n new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]))\n new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])\n\n # make sure the updated weight and bias have the same shape as the original ones\n assert new_conv_weight.shape == origin_weight_shape, \"conv weight should have the same shape before and after the fuse_conv_scale pass.\"\n assert new_conv_bias.shape == origin_bias_shape, \"conv bias should have the same shape before and after the fuse_conv_scale pass.\"\n\n # create a new conv op with the new weight, bias value, copying rest of the attributes\n out_name = scale_op.outputs[0].name\n conv_kargs = {\"weight\": new_conv_weight, \"bias\": new_conv_bias, \"name\": out_name, \"before_op\": conv_op}\n\n for k, v in conv_op.inputs.items():\n if k in [\"weight\", \"bias\"]:\n continue\n conv_kargs[k] = v\n\n if is_deconv:\n x = mb.conv_transpose(**conv_kargs)\n else:\n x = mb.conv(**conv_kargs)\n\n scale_op.enclosing_block.replace_uses_of_var_after_op(\n anchor_op=scale_op, old_var=scale_op.outputs[0], new_var=x\n )\n # Remove all the ops at once\n block.remove_ops([conv_op, scale_op])\n return True\n\n@register_pass(namespace=\"common\")\nclass fuse_conv_scale(AbstractGraphPass):\n \"\"\"\n Fold mul/div into conv/conv_transpose by updating the weight/bias of the convolution layers.\n\n The scale const can be a single number (scalar) or a vector with a broacasable shape,\n for instance, if the output of the conv/deconv layer is (B, Cout, H, W),\n const of shape (Cout, 1, 1) and (1, Cout, 1, 1) are allowed.\n\n Given:\n %2 = conv(%1)\n ...\n %3 = mul(%2, constant) # where constant is the scale constant\n ...\n\n Result:\n %3 = conv(%1)\n ...\n\n \"\"\"\n def __init__(self):\n self.ops_to_skip = set()\n\n def set_ops_to_skip(self, prog):\n pass\n\n def _fuse_conv_scale_block(self, block):\n\n def _match_pattern(op):\n if op.op_type == \"conv\" or op.op_type == \"conv_transpose\":\n # abort fusion if op output is also a block output\n if op.outputs[0] in op.enclosing_block.outputs:\n return None\n # find batch_norm op\n child_ops = op.outputs[0].child_ops\n if len(child_ops) == 1:\n scale_op_candidate = list(child_ops)[0]\n if scale_op_candidate.op_type in [\"mul\", \"real_div\"]:\n return scale_op_candidate\n return None\n\n fusion_occurred = False\n for op in list(block.operations):\n for b in op.blocks:\n block_changed = True\n while block_changed:\n block_changed = self._fuse_conv_scale_block(b)\n if len(op.blocks) > 0:\n # This op can't be conv or conv_transpose\n continue\n\n scale_op = _match_pattern(op)\n\n if op in self.ops_to_skip or scale_op in self.ops_to_skip:\n continue\n\n if scale_op is not None:\n with block:\n fusion_occurred = _try_to_transform(op, scale_op, block)\n # has to break as the downstream iterator is affected.\n if fusion_occurred:\n return fusion_occurred\n return fusion_occurred\n\n def apply(self, prog):\n self.set_ops_to_skip(prog)\n for f in prog.functions.values():\n block_changed = True\n while block_changed:\n block_changed = self._fuse_conv_scale_block(f)\n", "# Copyright (c) 2017, Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can be\n# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nimport pandas as pd\nimport numpy as np\nimport random\nimport tempfile\nimport unittest\nimport pytest\n\nfrom coremltools._deps import (\n _HAS_LIBSVM,\n MSG_LIBSVM_NOT_FOUND,\n _HAS_SKLEARN,\n MSG_SKLEARN_NOT_FOUND,\n)\nfrom coremltools.models.utils import evaluate_regressor, _macos_version, _is_macos\n\nif _HAS_LIBSVM:\n import svmutil\n import svm\n from coremltools.converters import libsvm\n\nif _HAS_SKLEARN:\n from sklearn.svm import SVR\n from sklearn.datasets import load_boston\n from coremltools.converters import sklearn as sklearn_converter\n from sklearn.preprocessing import OneHotEncoder\n\n\[email protected](not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND)\nclass SvrScikitTest(unittest.TestCase):\n \"\"\"\n Unit test class for testing scikit-learn sklearn_converter.\n \"\"\"\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n Set up the unit test by loading the dataset and training a model.\n \"\"\"\n if not _HAS_SKLEARN:\n return\n\n scikit_data = load_boston()\n scikit_model = SVR(kernel=\"linear\")\n scikit_model.fit(scikit_data[\"data\"], scikit_data[\"target\"])\n\n # Save the data and the model\n self.scikit_data = scikit_data\n self.scikit_model = scikit_model\n\n def test_conversion_bad_inputs(self):\n # Error on converting an untrained model\n with self.assertRaises(TypeError):\n model = SVR()\n spec = sklearn_converter.convert(model, \"data\", \"out\")\n\n # Check the expected class during covnersion.\n with self.assertRaises(TypeError):\n model = OneHotEncoder()\n spec = sklearn_converter.convert(model, \"data\", \"out\")\n\n @pytest.mark.slow\n def test_evaluation_stress_test(self):\n self._test_evaluation(allow_slow=True)\n\n def test_evaluation(self):\n self._test_evaluation(allow_slow=False)\n\n def _test_evaluation(self, allow_slow):\n \"\"\"\n Test that the same predictions are made\n \"\"\"\n\n # Generate some smallish (some kernels take too long on anything else) random data\n x, y = [], []\n for _ in range(50):\n cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2)\n x.append([cur_x1, cur_x2])\n y.append(1 + 2 * cur_x1 + 3 * cur_x2)\n\n input_names = [\"x1\", \"x2\"]\n df = pd.DataFrame(x, columns=input_names)\n\n # Parameters to test\n kernel_parameters = [\n {},\n {\"kernel\": \"rbf\", \"gamma\": 1.2},\n {\"kernel\": \"linear\"},\n {\"kernel\": \"poly\"},\n {\"kernel\": \"poly\", \"degree\": 2},\n {\"kernel\": \"poly\", \"gamma\": 0.75},\n {\"kernel\": \"poly\", \"degree\": 0, \"gamma\": 0.9, \"coef0\": 2},\n {\"kernel\": \"sigmoid\"},\n {\"kernel\": \"sigmoid\", \"gamma\": 1.3},\n {\"kernel\": \"sigmoid\", \"coef0\": 0.8},\n {\"kernel\": \"sigmoid\", \"coef0\": 0.8, \"gamma\": 0.5},\n ]\n non_kernel_parameters = [\n {},\n {\"C\": 1},\n {\"C\": 1.5, \"epsilon\": 0.5, \"shrinking\": True},\n {\"C\": 0.5, \"epsilon\": 1.5, \"shrinking\": False},\n ]\n\n # Test\n for param1 in non_kernel_parameters:\n for param2 in kernel_parameters:\n cur_params = param1.copy()\n cur_params.update(param2)\n print(\"cur_params=\" + str(cur_params))\n\n cur_model = SVR(**cur_params)\n cur_model.fit(x, y)\n df[\"prediction\"] = cur_model.predict(x)\n\n spec = sklearn_converter.convert(cur_model, input_names, \"target\")\n\n if _is_macos() and _macos_version() >= (10, 13):\n metrics = evaluate_regressor(spec, df)\n self.assertAlmostEqual(metrics[\"max_error\"], 0)\n\n if not allow_slow:\n break\n\n if not allow_slow:\n break\n\n\[email protected](not _HAS_LIBSVM, MSG_LIBSVM_NOT_FOUND)\[email protected](not _HAS_SKLEARN, MSG_SKLEARN_NOT_FOUND)\nclass EpsilonSVRLibSVMTest(unittest.TestCase):\n \"\"\"\n Unit test class for testing the libsvm sklearn converter.\n \"\"\"\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n Set up the unit test by loading the dataset and training a model.\n \"\"\"\n if not _HAS_SKLEARN:\n return\n if not _HAS_LIBSVM:\n return\n\n scikit_data = load_boston()\n prob = svmutil.svm_problem(scikit_data[\"target\"], scikit_data[\"data\"].tolist())\n param = svmutil.svm_parameter()\n param.svm_type = svmutil.EPSILON_SVR\n param.kernel_type = svmutil.LINEAR\n param.eps = 1\n\n self.libsvm_model = svmutil.svm_train(prob, param)\n\n def test_input_names(self):\n data = load_boston()\n df = pd.DataFrame({\"input\": data[\"data\"].tolist()})\n df[\"input\"] = df[\"input\"].apply(np.array)\n\n # Default values\n spec = libsvm.convert(self.libsvm_model)\n if _is_macos() and _macos_version() >= (10, 13):\n (df[\"prediction\"], _, _) = svmutil.svm_predict(\n data[\"target\"], data[\"data\"].tolist(), self.libsvm_model\n )\n metrics = evaluate_regressor(spec, df)\n self.assertAlmostEqual(metrics[\"max_error\"], 0)\n\n # One extra parameters. This is legal/possible.\n num_inputs = len(data[\"data\"][0])\n spec = libsvm.convert(self.libsvm_model, input_length=num_inputs + 1)\n\n # Not enought input names.\n input_names = [\"this\", \"is\", \"not\", \"enought\", \"names\"]\n with self.assertRaises(ValueError):\n libsvm.convert(self.libsvm_model, input_names=input_names)\n with self.assertRaises(ValueError):\n libsvm.convert(self.libsvm_model, input_length=num_inputs - 1)\n\n def test_conversion_from_filesystem(self):\n libsvm_model_path = tempfile.mktemp(suffix=\"model.libsvm\")\n svmutil.svm_save_model(libsvm_model_path, self.libsvm_model)\n spec = libsvm.convert(\n libsvm_model_path, input_names=\"data\", target_name=\"target\"\n )\n\n def test_conversion_bad_inputs(self):\n # Check the expected class during covnersion.\n with self.assertRaises(TypeError):\n model = OneHotEncoder()\n spec = libsvm.convert(model, \"data\", \"out\")\n\n @pytest.mark.slow\n def test_evaluation_stress_test(self):\n self._test_evaluation(allow_slow=True)\n\n def test_evaluation(self):\n self._test_evaluation(allow_slow=False)\n\n def _test_evaluation(self, allow_slow):\n \"\"\"\n Test that the same predictions are made\n \"\"\"\n from svm import svm_parameter, svm_problem\n from svmutil import svm_train, svm_predict\n\n # Generate some smallish (poly kernels take too long on anything else) random data\n x, y = [], []\n for _ in range(50):\n cur_x1, cur_x2 = random.gauss(2, 3), random.gauss(-1, 2)\n x.append([cur_x1, cur_x2])\n y.append(1 + 2 * cur_x1 + 3 * cur_x2)\n\n input_names = [\"x1\", \"x2\"]\n df = pd.DataFrame(x, columns=input_names)\n prob = svm_problem(y, x)\n\n # Parameters\n base_param = \"-s 3\" # model type is epsilon SVR\n non_kernel_parameters = [\"\", \"-c 1.5 -p 0.5 -h 1\", \"-c 0.5 -p 0.5 -h 0\"]\n kernel_parameters = [\n \"\",\n \"-t 2 -g 1.2\", # rbf kernel\n \"-t 0\", # linear kernel\n \"-t 1\",\n \"-t 1 -d 2\",\n \"-t 1 -g 0.75\",\n \"-t 1 -d 0 -g 0.9 -r 2\", # poly kernel\n \"-t 3\",\n \"-t 3 -g 1.3\",\n \"-t 3 -r 0.8\",\n \"-t 3 -r 0.8 -g 0.5\", # sigmoid kernel\n ]\n\n for param1 in non_kernel_parameters:\n for param2 in kernel_parameters:\n param_str = \" \".join([base_param, param1, param2])\n print(param_str)\n param = svm_parameter(param_str)\n\n model = svm_train(prob, param)\n (df[\"prediction\"], _, _) = svm_predict(y, x, model)\n\n spec = libsvm.convert(\n model, input_names=input_names, target_name=\"target\"\n )\n\n if _is_macos() and _macos_version() >= (10, 13):\n metrics = evaluate_regressor(spec, df)\n self.assertAlmostEqual(metrics[\"max_error\"], 0)\n\n if not allow_slow:\n break\n\n if not allow_slow:\n break\n" ]
[ [ "numpy.transpose", "numpy.zeros", "numpy.reshape", "numpy.product", "numpy.array" ], [ "sklearn.svm.SVR", "sklearn.datasets.load_boston", "pandas.DataFrame", "sklearn.preprocessing.OneHotEncoder" ] ]
zeou1/maggot_models
[ "4e1b518c2981ab1ca9607099c3813e8429d94ca4", "4e1b518c2981ab1ca9607099c3813e8429d94ca4", "4e1b518c2981ab1ca9607099c3813e8429d94ca4" ]
[ "notebooks/39.1-BDP-unbiased-clustering.py", "notebooks/103.0-BDP-cascade-invert.py", "notebooks/71.0-BDP-pdiff.py" ]
[ "# %% [markdown]\n# # Imports\nimport json\nimport os\nimport warnings\nfrom operator import itemgetter\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom joblib import Parallel, delayed\nfrom joblib.parallel import Parallel, delayed\nfrom sklearn.metrics import adjusted_rand_score\nimport networkx as nx\n\nfrom graspy.cluster import GaussianCluster, AutoGMMCluster\nfrom graspy.embed import AdjacencySpectralEmbed, OmnibusEmbed\nfrom graspy.models import DCSBMEstimator, SBMEstimator\nfrom graspy.plot import heatmap, pairplot\nfrom graspy.utils import binarize, cartprod, get_lcc, pass_to_ranks\nfrom src.data import load_everything\nfrom src.utils import export_skeleton_json, savefig\nfrom src.visualization import clustergram, palplot, sankey\nfrom src.hierarchy import signal_flow\n\nwarnings.simplefilter(\"ignore\", category=FutureWarning)\n\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\n\n# %% [markdown]\n# # Parameters\nBRAIN_VERSION = \"2019-12-09\"\nGRAPH_TYPES = [\"Gad\", \"Gaa\", \"Gdd\", \"Gda\"]\nGRAPH_TYPE_LABELS = [r\"A $\\to$ D\", r\"A $\\to$ A\", r\"D $\\to$ D\", r\"D $\\to$ A\"]\nN_GRAPH_TYPES = len(GRAPH_TYPES)\n\nSAVEFIGS = True\nDEFAULT_FMT = \"png\"\nDEFUALT_DPI = 150\n\nSAVESKELS = False\n\nMIN_CLUSTERS = 8\nMAX_CLUSTERS = 8\nN_INIT = 50\nPTR = True\nONLY_RIGHT = True\n\nembed = \"LSE\"\ncluster = \"GMM\"\nn_components = 4\nif cluster == \"GMM\":\n gmm_params = {\"n_init\": N_INIT, \"covariance_type\": \"all\"}\nelif cluster == \"AutoGMM\":\n gmm_params = {\"max_agglom_size\": None}\n\nnp.random.seed(23409857)\n\n\ndef stashfig(name, **kws):\n if SAVEFIGS:\n savefig(name, foldername=FNAME, fmt=DEFAULT_FMT, dpi=DEFUALT_DPI, **kws)\n\n\ndef stashskel(name, ids, colors, palette=None, **kws):\n if SAVESKELS:\n return export_skeleton_json(\n name, ids, colors, palette=palette, foldername=FNAME, **kws\n )\n\n\ndef ase(adj, n_components):\n if PTR:\n adj = pass_to_ranks(adj)\n ase = AdjacencySpectralEmbed(n_components=n_components)\n latent = ase.fit_transform(adj)\n latent = np.concatenate(latent, axis=-1)\n return latent\n\n\ndef to_laplace(graph, form=\"DAD\", regularizer=None):\n r\"\"\"\n A function to convert graph adjacency matrix to graph laplacian. \n Currently supports I-DAD, DAD, and R-DAD laplacians, where D is the diagonal\n matrix of degrees of each node raised to the -1/2 power, I is the \n identity matrix, and A is the adjacency matrix.\n \n R-DAD is regularized laplacian: where :math:`D_t = D + regularizer*I`.\n Parameters\n ----------\n graph: object\n Either array-like, (n_vertices, n_vertices) numpy array,\n or an object of type networkx.Graph.\n form: {'I-DAD' (default), 'DAD', 'R-DAD'}, string, optional\n \n - 'I-DAD'\n Computes :math:`L = I - D*A*D`\n - 'DAD'\n Computes :math:`L = D*A*D`\n - 'R-DAD'\n Computes :math:`L = D_t*A*D_t` where :math:`D_t = D + regularizer*I`\n regularizer: int, float or None, optional (default=None)\n Constant to be added to the diagonal of degree matrix. If None, average \n node degree is added. If int or float, must be >= 0. Only used when \n ``form`` == 'R-DAD'.\n Returns\n -------\n L: numpy.ndarray\n 2D (n_vertices, n_vertices) array representing graph \n laplacian of specified form\n References\n ----------\n .. [1] Qin, Tai, and Karl Rohe. \"Regularized spectral clustering\n under the degree-corrected stochastic blockmodel.\" In Advances\n in Neural Information Processing Systems, pp. 3120-3128. 2013\n \"\"\"\n valid_inputs = [\"I-DAD\", \"DAD\", \"R-DAD\"]\n if form not in valid_inputs:\n raise TypeError(\"Unsuported Laplacian normalization\")\n\n A = graph\n\n in_degree = np.sum(A, axis=0)\n out_degree = np.sum(A, axis=1)\n\n # regularize laplacian with parameter\n # set to average degree\n if form == \"R-DAD\":\n if regularizer is None:\n regularizer = 1\n elif not isinstance(regularizer, (int, float)):\n raise TypeError(\n \"Regularizer must be a int or float, not {}\".format(type(regularizer))\n )\n elif regularizer < 0:\n raise ValueError(\"Regularizer must be greater than or equal to 0\")\n regularizer = regularizer * np.mean(out_degree)\n\n in_degree += regularizer\n out_degree += regularizer\n\n with np.errstate(divide=\"ignore\"):\n in_root = 1 / np.sqrt(in_degree) # this is 10x faster than ** -0.5\n out_root = 1 / np.sqrt(out_degree)\n\n in_root[np.isinf(in_root)] = 0\n out_root[np.isinf(out_root)] = 0\n\n in_root = np.diag(in_root) # just change to sparse diag for sparse support\n out_root = np.diag(out_root)\n\n if form == \"I-DAD\":\n L = np.diag(in_degree) - A\n L = in_root @ L @ in_root\n elif form == \"DAD\" or form == \"R-DAD\":\n L = out_root @ A @ in_root\n # return symmetrize(L, method=\"avg\") # sometimes machine prec. makes this necessary\n return L\n\n\ndef lse(adj, n_components, regularizer=None):\n if PTR:\n adj = pass_to_ranks(adj)\n lap = to_laplace(adj, form=\"R-DAD\")\n ase = AdjacencySpectralEmbed(n_components=n_components)\n latent = ase.fit_transform(lap)\n latent = np.concatenate(latent, axis=-1)\n return latent\n\n\ndef omni(adjs, n_components):\n if PTR:\n adjs = [pass_to_ranks(a) for a in adjs]\n omni = OmnibusEmbed(n_components=n_components // len(adjs))\n latent = omni.fit_transform(adjs)\n latent = np.concatenate(latent, axis=-1) # first is for in/out\n latent = np.concatenate(latent, axis=-1) # second is for concat. each graph\n return latent\n\n\ndef ase_concatenate(adjs, n_components):\n if PTR:\n adjs = [pass_to_ranks(a) for a in adjs]\n ase = AdjacencySpectralEmbed(n_components=n_components // len(adjs))\n graph_latents = []\n for a in adjs:\n latent = ase.fit_transform(a)\n latent = np.concatenate(latent, axis=-1)\n graph_latents.append(latent)\n latent = np.concatenate(graph_latents, axis=-1)\n return latent\n\n\ndef sub_ari(known_inds, true_labels, pred_labels):\n true_known_labels = true_labels[known_inds]\n pred_known_labels = pred_labels[known_inds]\n ari = adjusted_rand_score(true_known_labels, pred_known_labels)\n return ari\n\n\n# Set up plotting constants\nplt.style.use(\"seaborn-white\")\nsns.set_palette(\"deep\")\nsns.set_context(\"talk\", font_scale=1)\n\n\n# %% [markdown]\n# # Load the data\n\n\nadj, class_labels, side_labels, skeleton_labels = load_everything(\n \"Gad\",\n version=BRAIN_VERSION,\n return_keys=[\"Merge Class\", \"Hemisphere\"],\n return_ids=True,\n)\n\n\n# select the right hemisphere\nif ONLY_RIGHT:\n side = \"right hemisphere\"\n right_inds = np.where(side_labels == \"R\")[0]\n adj = adj[np.ix_(right_inds, right_inds)]\n class_labels = class_labels[right_inds]\n skeleton_labels = skeleton_labels[right_inds]\nelse:\n side = \"full brain\"\n\n# sort by number of synapses\ndegrees = adj.sum(axis=0) + adj.sum(axis=1)\nsort_inds = np.argsort(degrees)[::-1]\nadj = adj[np.ix_(sort_inds, sort_inds)]\nclass_labels = class_labels[sort_inds]\nskeleton_labels = skeleton_labels[sort_inds]\n\n# remove disconnected nodes\nadj, lcc_inds = get_lcc(adj, return_inds=True)\nclass_labels = class_labels[lcc_inds]\nskeleton_labels = skeleton_labels[lcc_inds]\n\n# remove pendants\ndegrees = np.count_nonzero(adj, axis=0) + np.count_nonzero(adj, axis=1)\nnot_pendant_mask = degrees != 1\nnot_pendant_inds = np.array(range(len(degrees)))[not_pendant_mask]\nadj = adj[np.ix_(not_pendant_inds, not_pendant_inds)]\nclass_labels = class_labels[not_pendant_inds]\nskeleton_labels = skeleton_labels[not_pendant_inds]\n\n# plot degree sequence\nd_sort = np.argsort(degrees)[::-1]\ndegrees = degrees[d_sort]\nplt.figure(figsize=(10, 5))\nsns.scatterplot(x=range(len(degrees)), y=degrees, s=30, linewidth=0)\n\nknown_inds = np.where(class_labels != \"Unk\")[0]\n\n\n# %% [markdown]\n# # Run clustering using LSE on the sum graph\n\nn_verts = adj.shape[0]\n\n\nlatent = lse(adj, n_components, regularizer=None)\npairplot(latent, labels=class_labels, title=embed)\n\nk_list = list(range(MIN_CLUSTERS, MAX_CLUSTERS + 1))\nn_runs = len(k_list)\nout_dicts = []\n\nbin_adj = binarize(adj)\n\nlast_pred_labels = np.zeros(n_verts)\n\nif cluster == \"GMM\":\n ClusterModel = GaussianCluster\nelif cluster == \"AutoGMM\":\n ClusterModel = AutoGMMCluster\n\nfor k in k_list:\n run_name = f\"k = {k}, {cluster}, {embed}, {side} (A to D), PTR, raw\"\n print(run_name)\n print()\n\n # Do clustering\n # TODO: make this autogmm instead\n gmm = ClusterModel(min_components=k, max_components=k, **gmm_params)\n gmm.fit(latent)\n pred_labels = gmm.predict(latent)\n\n # Score unsupervised metrics\n base_dict = {\n \"K\": k,\n \"Cluster\": cluster,\n \"Embed\": embed,\n \"Method\": f\"{cluster} o {embed}\",\n }\n\n # GMM likelihood\n score = gmm.model_.score(latent)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"GMM likelihood\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n\n # GMM BIC\n score = gmm.model_.bic(latent)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"GMM BIC\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n\n # SBM likelihood\n sbm = SBMEstimator(directed=True, loops=False)\n sbm.fit(bin_adj, y=pred_labels)\n score = sbm.score(bin_adj)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"SBM likelihood\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n\n # DCSBM likelihood\n dcsbm = DCSBMEstimator(directed=True, loops=False)\n dcsbm.fit(bin_adj, y=pred_labels)\n score = dcsbm.score(bin_adj)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"DCSBM likelihood\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n\n # ARI of the subset with labels\n score = sub_ari(known_inds, class_labels, pred_labels)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"Simple ARI\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n\n # ARI vs K - 1\n score = adjusted_rand_score(last_pred_labels, pred_labels)\n temp_dict = base_dict.copy()\n temp_dict[\"Metric\"] = \"K-1 ARI\"\n temp_dict[\"Score\"] = score\n out_dicts.append(temp_dict)\n last_pred_labels = pred_labels\n\n save_name = f\"k{k}-{cluster}-{embed}-right-ad-PTR-raw\"\n\n # Plot embedding\n # pairplot(latent, labels=pred_labels, title=run_name)\n # stashfig(\"latent-\" + save_name)\n\n # Plot everything else\n clustergram(adj, class_labels, pred_labels)\n stashfig(\"clustergram-\" + save_name)\n\n # New plot\n # - Compute signal flow\n # - Get the centroid of each cluster and project to 1d\n # - Alternatively, just take the first dimension\n # - For each cluster plot as a node\n\n # output skeletons\n if SAVESKELS:\n _, colormap, pal = stashskel(\n save_name, skeleton_labels, pred_labels, palette=\"viridis\", multiout=True\n )\n\n palplot(k, cmap=\"viridis\")\n stashfig(\"palplot-\" + save_name)\n\n # save dict colormapping\n filename = (\n Path(\"./maggot_models/notebooks/outs\")\n / Path(FNAME)\n / str(\"colormap-\" + save_name + \".json\")\n )\n with open(filename, \"w\") as fout:\n json.dump(colormap, fout)\n\n stashskel(\n save_name, skeleton_labels, pred_labels, palette=\"viridis\", multiout=False\n )\n\n# %% [markdown]\n# # Plot results of unsupervised metrics\n\nresult_df = pd.DataFrame(out_dicts)\nfg = sns.FacetGrid(result_df, col=\"Metric\", col_wrap=3, sharey=False, height=4)\nfg.map(sns.lineplot, \"K\", \"Score\")\nstashfig(f\"metrics-{cluster}-{embed}-right-ad-PTR-raw\")\n\n\n# Modifications i need to make to the above\n# - Increase the height of the sankey diagram overall\n# - Look into color maps that could be better\n# - Color the cluster labels by what gets written to the JSON\n# - Plot the clusters as nodes in a small network\n\n# %% [markdown]\n# # try graph flow\n\n\nnode_signal_flow = signal_flow(adj)\nmean_sf = np.zeros(k)\nfor i in np.unique(pred_labels):\n inds = np.where(pred_labels == i)[0]\n mean_sf[i] = np.mean(node_signal_flow[inds])\n\ncluster_mean_latent = gmm.model_.means_[:, 0]\nblock_probs = SBMEstimator().fit(bin_adj, y=pred_labels).block_p_\nblock_prob_df = pd.DataFrame(data=block_probs, index=range(k), columns=range(k))\nblock_g = nx.from_pandas_adjacency(block_prob_df, create_using=nx.DiGraph)\nplt.figure(figsize=(10, 10))\n# don't ever let em tell you you're too pythonic\npos = dict(zip(range(k), zip(cluster_mean_latent, mean_sf)))\n# nx.draw_networkx_nodes(block_g, pos=pos)\nlabels = nx.get_edge_attributes(block_g, \"weight\")\n# nx.draw_networkx_edge_labels(block_g, pos, edge_labels=labels)\nfrom matplotlib.cm import ScalarMappable\nimport matplotlib as mpl\n\nnorm = mpl.colors.LogNorm(vmin=0.01, vmax=0.1)\n\nsm = ScalarMappable(cmap=\"Reds\", norm=norm)\ncmap = sm.to_rgba(np.array(list(labels.values())) + 0.01)\nnx.draw_networkx(\n block_g,\n pos,\n edge_cmap=\"Reds\",\n edge_color=cmap,\n connectionstyle=\"arc3,rad=0.2\",\n width=1.5,\n)\n\n# %% [markdown]\n# # signal flow marginals\n\nsignal_flow_marginal(adj, pred_labels)\n\n# %% [markdown]\n# #\n\n\ndef signal_flow_marginal(adj, labels, col_wrap=5, palette=\"tab20\"):\n sf = signal_flow(adj)\n uni_labels = np.unique(labels)\n medians = []\n for i in uni_labels:\n inds = np.where(labels == i)[0]\n medians.append(np.median(sf[inds]))\n sort_inds = np.argsort(medians)[::-1]\n col_order = uni_labels[sort_inds]\n plot_df = pd.DataFrame()\n plot_df[\"Signal flow\"] = sf\n plot_df[\"Class\"] = labels\n fg = sns.FacetGrid(\n plot_df,\n col=\"Class\",\n aspect=1.5,\n palette=palette,\n col_order=col_order,\n sharey=False,\n col_wrap=col_wrap,\n xlim=(-3, 3),\n )\n fg = fg.map(sns.distplot, \"Signal flow\") # bins=np.linspace(-2.2, 2.2))\n fg.set(yticks=[], yticklabels=[])\n plt.tight_layout()\n return fg\n\n\nsignal_flow_marginal(adj, class_labels)\nstashfig(\"known-class-sf-marginal\")\n\n# tomorrow\n# DEFINITELY\n# run with unsupervised metrics from k=2-50\n\n# IF TIME\n# run hgmm\n", "# %% [markdown]\n# #\nimport itertools\nimport os\nimport time\nfrom itertools import chain\n\nimport colorcet as cc\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom anytree import LevelOrderGroupIter, Node, RenderTree\nfrom joblib import Parallel, delayed\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom sklearn.decomposition import PCA\n\nfrom graspy.plot import heatmap, pairplot\nfrom src.data import load_metagraph\nfrom src.graph import MetaGraph, preprocess\nfrom src.io import savecsv, savefig, saveskels\nfrom src.traverse import (\n cascades_from_node,\n generate_cascade_tree,\n generate_random_walks,\n path_to_visits,\n to_markov_matrix,\n to_path_graph,\n)\nfrom src.visualization import (\n CLASS_COLOR_DICT,\n barplot_text,\n draw_networkx_nice,\n draw_separators,\n matrixplot,\n remove_shared_ax,\n remove_spines,\n screeplot,\n sort_meta,\n stacked_barplot,\n)\n\nFNAME = os.path.basename(__file__)[:-3]\nprint(FNAME)\n\n\ndef stashfig(name, **kws):\n savefig(name, foldername=FNAME, save_on=True, **kws)\n\n\ndef stashcsv(df, name, **kws):\n savecsv(df, name, foldername=FNAME, save_on=True, **kws)\n\n\n#%% Load and preprocess the data\n\nVERSION = \"2020-03-09\"\nprint(f\"Using version {VERSION}\")\n\nplot_examples = False\nplot_embed = False\nplot_full_mat = False\ngraph_type = \"Gad\"\nthreshold = 0\nweight = \"weight\"\nmg = load_metagraph(graph_type, VERSION)\nmg = preprocess(\n mg,\n threshold=threshold,\n sym_threshold=False,\n remove_pdiff=True,\n binarize=False,\n weight=weight,\n)\nprint(f\"Preprocessed graph {graph_type} with threshold={threshold}, weight={weight}\")\n\n# TODO update this with the mixed groups\n# TODO make these functional for selecting proper paths\n\nout_classes = [\n \"O_dSEZ\",\n \"O_dSEZ;CN\",\n \"O_dSEZ;LHN\",\n \"O_dVNC\",\n \"O_dVNC;O_RG\",\n \"O_dVNC;CN\",\n \"O_RG\",\n \"O_dUnk\",\n \"O_RG-IPC\",\n \"O_RG-ITP\",\n \"O_RG-CA-LP\",\n]\nfrom_groups = [\n (\"sens-ORN\",),\n (\"sens-photoRh5\", \"sens-photoRh6\"),\n (\"sens-MN\",),\n (\"sens-thermo\",),\n (\"sens-vtd\",),\n (\"sens-AN\",),\n]\nfrom_group_names = [\"Odor\", \"Photo\", \"MN\", \"Temp\", \"VTD\", \"AN\"]\n\nout_groups = [\n (\"motor-mAN\", \"motormVAN\", \"motor-mPaN\"),\n (\"O_dSEZ\", \"O_dVNC;O_dSEZ\", \"O_dSEZ;CN\", \"LHN;O_dSEZ\"),\n (\"O_dVNC\", \"O_dVNC;CN\", \"O_RG;O_dVNC\", \"O_dVNC;O_dSEZ\"),\n (\"O_RG\", \"O_RG-IPC\", \"O_RG-ITP\", \"O_RG-CA-LP\", \"O_RG;O_dVNC\"),\n (\"O_dUnk\",),\n]\nout_group_names = [\"Motor\", \"SEZ\", \"VNC\", \"RG\", \"dUnk\"]\n\n\nfrom_classes = list(chain.from_iterable(from_groups)) # make this a flat list\nout_classes = list(chain.from_iterable(out_groups))\n\nclass_key = \"Merge Class\"\n\nadj = nx.to_numpy_array(mg.g, weight=weight, nodelist=mg.meta.index.values)\nn_verts = len(adj)\nmeta = mg.meta.copy()\ng = mg.g.copy()\nmeta[\"idx\"] = range(len(meta))\n\nfrom_inds = meta[meta[class_key].isin(from_classes)][\"idx\"].values\nout_inds = meta[meta[class_key].isin(out_classes)][\"idx\"].values\nind_map = dict(zip(meta.index, meta[\"idx\"]))\ng = nx.relabel_nodes(g, ind_map, copy=True)\nout_ind_map = dict(zip(out_inds, range(len(out_inds))))\n\n# %% [markdown]\n# # Use a method to generate visits\n\npath_type = \"cascade\"\nif path_type == \"cascade\":\n p = 0.01\n not_probs = (\n 1 - p\n ) ** adj # probability of none of the synapses causing postsynaptic\n probs = 1 - not_probs # probability of ANY of the synapses firing onto next\nelif path_type == \"fancy-cascade\":\n alpha = 0.5\n flat = np.full(adj.shape, alpha)\n deg = meta[\"dendrite_input\"].values\n deg[deg == 0] = 1\n flat = flat / deg[None, :]\n not_probs = np.power((1 - flat), adj)\n probs = 1 - not_probs\n\n#%%\nseed = 8888\nmax_depth = 10\nn_bins = 10\nn_sims = 100\nmethod = \"tree\"\nnormalize_n_source = False\n\n\nbasename = f\"-{graph_type}-t{threshold}-pt{path_type}-b{n_bins}-n{n_sims}-m{method}\"\nbasename += f\"-norm{normalize_n_source}\"\nbasename += f\"-plus-inverted\"\n\n\nnp.random.seed(seed)\nif method == \"tree\":\n seeds = np.random.choice(int(1e8), size=len(from_inds), replace=False)\n outs = Parallel(n_jobs=1, verbose=10)(\n delayed(cascades_from_node)(\n fi, probs, out_inds, max_depth, n_sims, seed, n_bins, method\n )\n for fi, seed in zip(from_inds, seeds)\n )\nelif method == \"path\":\n outs = []\n for start_ind in from_inds:\n temp_hist = cascades_from_node(\n start_ind, probs, out_inds, max_depth, n_sims, seed, n_bins, method\n )\n outs.append(temp_hist)\nfrom_hist_mat = np.concatenate(outs, axis=-1)\n\n###\n# invert\nif method == \"tree\":\n seeds = np.random.choice(int(1e8), size=len(out_inds), replace=False)\n outs = Parallel(n_jobs=1, verbose=10)(\n delayed(cascades_from_node)(\n fi, probs.T, from_inds, max_depth, n_sims, seed, n_bins, method\n )\n for fi, seed in zip(out_inds, seeds)\n )\nelif method == \"path\":\n outs = []\n for start_ind in from_inds:\n temp_hist = cascades_from_node(\n start_ind, probs.T, out_inds, max_depth, n_sims, seed, n_bins, method\n )\n outs.append(temp_hist)\nout_hist_mat = np.concatenate(outs, axis=-1)\n\n\n# generate_cascade_paths(start_ind, probs, 1, stop_inds=out_inds, max_depth=10)\n# %% [markdown]\n# # Sort metadata\nfull_hist_mat = np.concatenate((from_hist_mat, out_hist_mat), axis=1)\nhist_mat = full_hist_mat\n# row metadata\nids = pd.Series(index=meta[\"idx\"], data=meta.index, name=\"id\")\nto_class = ids.map(meta[\"Merge Class\"])\nto_class.name = \"to_class\"\nrow_df = pd.concat([ids, to_class], axis=1)\n\n# col metadata\norders = pd.Series(data=len(from_inds) * list(range(n_bins)), name=\"order\")\nfrom_idx = pd.Series(data=np.repeat(from_inds, n_bins), name=\"idx\")\nfrom_ids = from_idx.map(ids)\nfrom_ids.name = \"id\"\nfrom_class = from_ids.map(meta[\"Merge Class\"])\nfrom_class.name = \"class\"\nfrom_col_df = pd.concat([orders, from_idx, from_ids, from_class], axis=1)\n\norders = pd.Series(data=len(out_inds) * list(range(n_bins)), name=\"order\")\nout_idx = pd.Series(data=np.repeat(out_inds, n_bins), name=\"idx\")\nout_ids = out_idx.map(ids)\nout_ids.name = \"id\"\nout_class = out_ids.map(meta[\"Merge Class\"])\nout_class.name = \"class\"\nout_col_df = pd.concat([orders, out_idx, out_ids, out_class], axis=1)\ncol_df = pd.concat([from_col_df, out_col_df], axis=0, ignore_index=True)\n# %% [markdown]\n# #\nlog_mat = np.log10(hist_mat + 1)\nif plot_full_mat:\n shape = log_mat.shape\n figsize = (10, 20)\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n matrixplot(\n log_mat,\n ax=ax,\n col_meta=col_df,\n col_sort_class=[\"from_class\"],\n row_meta=row_df,\n row_sort_class=[\"to_class\"],\n plot_type=\"scattermap\",\n sizes=(0.5, 0.5),\n tick_rot=45,\n )\n stashfig(\"log-full-scatter\" + basename)\n\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n matrixplot(\n log_mat,\n ax=ax,\n col_meta=col_df,\n col_sort_class=[\"from_class\"],\n row_colors=CLASS_COLOR_DICT,\n row_meta=row_df,\n row_sort_class=[\"to_class\"],\n plot_type=\"heatmap\",\n sizes=(0.5, 0.5),\n tick_rot=45,\n )\n stashfig(\"log-full-heat\" + basename)\n\n# %% [markdown]\n# # Screeplots\n\nif plot_embed:\n screeplot(hist_mat.astype(float), title=\"Raw hist mat (full)\")\n stashfig(\"scree-raw-mat\" + basename)\n screeplot(log_mat, title=\"Log hist mat (full)\")\n stashfig(\"scree-log-mat\" + basename)\n\n# %% [markdown]\n# # Pairplots\nif plot_embed:\n pca = PCA(n_components=6)\n embed = pca.fit_transform(log_mat)\n loadings = pca.components_.T\n pg = pairplot(\n embed,\n labels=to_class.values,\n palette=CLASS_COLOR_DICT,\n height=5,\n title=\"Node response embedding (log)\",\n )\n pg._legend.remove()\n stashfig(\"node-pca-log\" + basename)\n pg = pairplot(\n loadings,\n labels=from_class.values,\n height=5,\n title=\"Source class embedding (log)\",\n )\n stashfig(\"source-pca-log\" + basename)\n\n pca = PCA(n_components=6)\n embed = pca.fit_transform(hist_mat.astype(float))\n loadings = pca.components_.T\n pg = pairplot(\n embed,\n labels=to_class.values,\n palette=CLASS_COLOR_DICT,\n height=5,\n title=\"Node response embedding (raw)\",\n )\n pg._legend.remove()\n stashfig(\"node-pca-log\" + basename)\n pg = pairplot(\n loadings,\n labels=from_class.values,\n height=5,\n title=\"Source class embedding (raw)\",\n )\n stashfig(\"source-pca-log\" + basename)\n\n# %% [markdown]\n# # Collapse that matrix\nhist_mat = full_hist_mat\ncollapsed_hist = []\ncollapsed_col_df = []\ngroups = from_groups + out_groups\nnames = from_group_names + out_group_names\nfor fg, fg_name in zip(groups, names):\n from_df = col_df[col_df[\"class\"].isin(fg)]\n n_in_group = len(from_df)\n for order in from_df[\"order\"].unique():\n inds = from_df[from_df[\"order\"] == order].index\n col = hist_mat[:, inds].sum(axis=1)\n if normalize_n_source:\n col = col.astype(float)\n col /= n_in_group\n collapsed_hist.append(col)\n row = {\"order\": order, \"class\": fg_name}\n collapsed_col_df.append(row)\n\n\ncollapsed_col_df = pd.DataFrame(collapsed_col_df)\ncollapsed_hist = np.array(collapsed_hist).T\nlog_collapsed_hist = np.log10(collapsed_hist + 1)\n\n# %% [markdown]\n# #\nif plot_embed:\n pca = PCA(n_components=6)\n embed = pca.fit_transform(log_collapsed_hist)\n loadings = pca.components_.T\n pg = pairplot(\n embed,\n labels=to_class.values,\n palette=CLASS_COLOR_DICT,\n height=5,\n title=\"Collapsed node response embedding (log)\",\n )\n pg._legend.remove()\n stashfig(\"coll-node-pca-log\" + basename)\n pg = pairplot(\n loadings,\n labels=collapsed_col_df[\"from_class\"].values,\n height=5,\n title=\"Collapsed source class embedding (log)\",\n )\n stashfig(\"coll-source-pca-log\" + basename)\n\n pca = PCA(n_components=6)\n embed = pca.fit_transform(collapsed_hist.astype(float))\n loadings = pca.components_.T\n pg = pairplot(\n embed,\n labels=to_class.values,\n palette=CLASS_COLOR_DICT,\n height=5,\n title=\"Collapsed node response embedding (raw)\",\n )\n pg._legend.remove()\n stashfig(\"coll-node-pca-log\" + basename)\n pg = pairplot(\n loadings,\n labels=collapsed_col_df[\"from_class\"].values,\n height=5,\n title=\"Collapsed source class embedding (raw)\",\n )\n stashfig(\"coll-source-pca-log\" + basename)\n\n# %% [markdown]\n# # Compute mean visit over all sources, for plotting\ndef mean_visit(row):\n n_groups = len(row) // n_bins\n s = 0\n for i in range(n_groups):\n group = row[i * n_bins : (i + 1) * n_bins]\n for j, val in enumerate(group):\n s += j * val\n s /= row.sum()\n return s\n\n\nvisits = []\nfor r in collapsed_hist:\n mv = mean_visit(r)\n visits.append(mv)\nvisits = np.array(visits)\nvisits[np.isnan(visits)] = n_bins + 1\nrow_df[\"visit_order\"] = visits\nmean_visit_order = row_df.groupby([\"to_class\"])[\"visit_order\"].mean()\nrow_df[\"group_visit_order\"] = row_df[\"to_class\"].map(mean_visit_order)\nrow_df[\"n_visit\"] = collapsed_hist.sum(axis=1)\n# %% [markdown]\n# #\nfig, ax = plt.subplots(1, 1, figsize=(15, 15))\nsns.set_context(\"talk\", font_scale=0.8)\ngridline_kws = dict(color=\"grey\", linestyle=\"--\", alpha=0.7, linewidth=0.3)\nmatrixplot(\n log_collapsed_hist,\n ax=ax,\n col_meta=collapsed_col_df,\n col_sort_class=[\"class\"],\n row_meta=row_df,\n row_sort_class=[\"to_class\"],\n row_colors=CLASS_COLOR_DICT,\n row_class_order=\"group_visit_order\",\n row_item_order=[\"visit_order\"],\n plot_type=\"heatmap\",\n tick_rot=0,\n row_ticks=False,\n gridline_kws=gridline_kws,\n)\nstashfig(\"collapsed-log-heat\" + basename)\n\n# %% [markdown]\n# #\nsns.set_context(\"talk\", font_scale=1)\ngridline_kws = dict(color=\"grey\", linestyle=\"--\", alpha=0.7, linewidth=0.3)\n\nfig, ax = plt.subplots(1, 1, figsize=(25, 15))\nax, divider, top_cax, left_cax = matrixplot(\n log_collapsed_hist.T,\n ax=ax,\n row_meta=collapsed_col_df,\n row_sort_class=[\"class\"],\n col_meta=row_df,\n col_sort_class=[\"to_class\"],\n col_colors=CLASS_COLOR_DICT,\n col_class_order=\"group_visit_order\",\n col_item_order=[\"visit_order\"],\n plot_type=\"heatmap\",\n tick_rot=45,\n col_ticks=False,\n gridline_kws=gridline_kws,\n)\ncax = divider.append_axes(\"right\", size=\"1%\", pad=0.02, sharey=ax)\nremove_shared_ax(cax)\nsns.heatmap(\n collapsed_col_df[\"order\"][:, None], ax=cax, cbar=False, cmap=\"RdBu\", center=0\n)\ncax.set_xticks([])\ncax.set_yticks([])\ncax.set_ylabel(r\"Hops $\\to$\", rotation=-90, ha=\"center\", va=\"center\", labelpad=20)\ncax.yaxis.set_label_position(\"right\")\ntop_cax.set_yticks([0.5])\ntop_cax.set_yticklabels([\"Class\"], va=\"center\")\nax.set_xlabel(\"Neuron\")\nax.set_ylabel(\"Source class\")\nstashfig(\"collapsed-log-heat-transpose\" + basename, dpi=200)\n\nfig, ax = plt.subplots(1, 1, figsize=(25, 15))\nax, divider, top_cax, left_cax = matrixplot(\n log_collapsed_hist.T,\n ax=ax,\n row_meta=collapsed_col_df,\n row_sort_class=[\"class\"],\n col_meta=row_df,\n col_sort_class=[\"to_class\"],\n col_colors=CLASS_COLOR_DICT,\n col_class_order=\"group_visit_order\",\n col_item_order=[\"visit_order\"],\n plot_type=\"heatmap\",\n tick_rot=45,\n col_ticks=True,\n gridline_kws=gridline_kws,\n)\ncax = divider.append_axes(\"right\", size=\"1%\", pad=0.02, sharey=ax)\nremove_shared_ax(cax)\nsns.heatmap(\n collapsed_col_df[\"order\"][:, None], ax=cax, cbar=False, cmap=\"RdBu\", center=0\n)\ncax.set_xticks([])\ncax.set_yticks([])\ncax.set_ylabel(r\"Hops $\\to$\", rotation=-90, ha=\"center\", va=\"center\", labelpad=20)\ncax.yaxis.set_label_position(\"right\")\ntop_cax.set_yticks([0.5])\ntop_cax.set_yticklabels([\"Class\"], va=\"center\")\nax.set_xlabel(\"Neuron\")\nax.set_ylabel(\"Source class\")\nstashfig(\"collapsed-log-heat-transpose-labeled\" + basename, dpi=200)\n\n# %% [markdown]\n# # clustermap the matrix\n\n\nsns.set_context(\"talk\", font_scale=1)\nlinkage = \"average\"\nmetric = \"euclidean\"\ncolors = np.vectorize(CLASS_COLOR_DICT.get)(row_df[\"to_class\"])\n\nperm_inds, sort_collapsed_col_df = sort_meta(\n collapsed_col_df, sort_class=[\"from_class\"]\n)\nsort_log_collapsed_hist = log_collapsed_hist[:, perm_inds]\n\n\ncg = sns.clustermap(\n data=sort_log_collapsed_hist.T,\n col_cluster=True,\n row_cluster=False,\n col_colors=colors,\n cmap=\"RdBu_r\",\n center=0,\n cbar_pos=None,\n method=linkage,\n metric=metric,\n)\nax = cg.ax_heatmap\ndraw_separators(\n ax,\n ax_type=\"y\",\n sort_meta=sort_collapsed_col_df,\n sort_class=[\"from_class\"],\n tick_rot=0,\n)\nax.xaxis.set_ticks([])\n# ax.set_ylabel(r\"Visits over time $\\to$\")\nax.set_xlabel(\"Neuron\")\nax.yaxis.tick_left()\n# ax.set_yticklabels(ax.get_yticklabels(), ha=\"left\")\nstashfig(\"collapsed-log-clustermap\" + basename)\n# stashfig(\"collapsed-log-clustermap\" + basename, fmt=\"pdf\")\n\n\n# %% [markdown]\n# # Do some plotting for illustration only\n\n\nif plot_examples:\n sns.set_context(\"talk\")\n sns.set_palette(\"Set1\")\n examples = [742, 605, 743, 2282, 596, 2367, 1690, 2313]\n for target_ind in examples:\n row = collapsed_hist[target_ind, :]\n perm_inds, sort_col_df = sort_meta(collapsed_col_df, sort_class=[\"from_class\"])\n sort_row = row[perm_inds]\n\n fig, ax = plt.subplots(1, 1)\n xs = np.arange(len(sort_row)) + 0.5\n divider = make_axes_locatable(ax)\n bot_cax = divider.append_axes(\"bottom\", size=\"3%\", pad=0.02, sharex=ax)\n remove_shared_ax(bot_cax)\n\n ax.bar(x=xs, height=sort_row, width=0.8)\n draw_separators(\n ax, sort_meta=sort_col_df, sort_class=[\"from_class\"], tick_rot=0\n )\n ax.set_xlim(0, len(xs))\n ax.set_ylabel(\"# hits @ time\")\n\n sns.heatmap(\n collapsed_col_df[\"order\"][None, :],\n ax=bot_cax,\n cbar=False,\n cmap=\"RdBu\",\n center=0,\n )\n bot_cax.set_xticks([])\n bot_cax.set_yticks([])\n bot_cax.set_xlabel(r\"Hops $\\to$\", x=0.1, ha=\"left\", labelpad=-22)\n bot_cax.set_xticks([20.5, 24.5, 28.5])\n bot_cax.set_xticklabels([1, 5, 9], rotation=0)\n\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n target_skid = meta.iloc[target_ind, :].name\n ax.set_title(\n f\"Response for cell {target_skid} ({meta[meta['idx'] == target_ind]['Merge Class'].values[0]})\"\n )\n\n stashfig(f\"{target_skid}-response-hist\" + basename)\n\n", "#%%\nfrom src.data import load_metagraph\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nmg = load_metagraph(\"G\", \"2020-01-21\")\n\nis_pdiff = np.where(mg[\"is_pdiff\"])[0]\nmg = mg.reindex(is_pdiff)\ndegree_df = mg.calculate_degrees()\nplt.figure()\nmelt_degree = pd.melt(\n degree_df.reset_index(),\n id_vars=[\"ID\"],\n value_vars=[\"In degree\", \"Out degree\", \"Total degree\"],\n value_name=\"Degree\",\n)\nsns.stripplot(y=\"Degree\", data=melt_degree, x=\"variable\", jitter=0.45)\n\nplt.figure()\nmelt_syns = pd.melt(\n degree_df.reset_index(),\n id_vars=[\"ID\"],\n value_vars=[\"In edgesum\", \"Out edgesum\", \"Total edgesum\"],\n value_name=\"Synapses\",\n)\nsns.stripplot(y=\"Synapses\", data=melt_syns, x=\"variable\", jitter=0.45)\n" ]
[ [ "numpy.sum", "numpy.diag", "matplotlib.pyplot.tight_layout", "numpy.random.seed", "numpy.argsort", "matplotlib.cm.ScalarMappable", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure", "matplotlib.colors.LogNorm", "numpy.where", "numpy.unique", "numpy.mean", "numpy.zeros", "numpy.median", "numpy.count_nonzero", "numpy.ix_", "sklearn.metrics.adjusted_rand_score", "numpy.isinf", "pandas.DataFrame", "numpy.errstate", "numpy.sqrt", "numpy.concatenate" ], [ "pandas.Series", "numpy.vectorize", "pandas.DataFrame", "numpy.random.seed", "numpy.repeat", "matplotlib.pyplot.subplots", "numpy.power", "numpy.log10", "pandas.concat", "numpy.isnan", "numpy.array", "numpy.concatenate", "numpy.full", "sklearn.decomposition.PCA" ], [ "matplotlib.pyplot.figure", "numpy.where" ] ]
theoptips/PySyft
[ "4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc", "4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc" ]
[ "examples/tutorials/advanced/websockets-example-MNIST-parallel/run_websocket_client.py", "syft/federated/federated_client.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import transforms, datasets\n\nimport logging\nimport argparse\nimport sys\nimport asyncio\nimport numpy as np\n\nimport syft as sy\nfrom syft import workers\nfrom syft.frameworks.torch.federated import utils\n\nlogger = logging.getLogger(__name__)\n\nLOG_INTERVAL = 25\n\n\n# Loss function\[email protected]\ndef loss_fn(pred, target):\n return F.nll_loss(input=pred, target=target)\n\n\n# Model\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4 * 4 * 50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4 * 4 * 50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\ndef define_and_get_arguments(args=sys.argv[1:]):\n parser = argparse.ArgumentParser(\n description=\"Run federated learning using websocket client workers.\"\n )\n parser.add_argument(\"--batch_size\", type=int, default=32, help=\"batch size of the training\")\n parser.add_argument(\n \"--test_batch_size\", type=int, default=128, help=\"batch size used for the test data\"\n )\n parser.add_argument(\n \"--training_rounds\", type=int, default=40, help=\"number of federated learning rounds\"\n )\n parser.add_argument(\n \"--federate_after_n_batches\",\n type=int,\n default=10,\n help=\"number of training steps performed on each remote worker before averaging\",\n )\n parser.add_argument(\"--lr\", type=float, default=0.1, help=\"learning rate\")\n parser.add_argument(\"--cuda\", action=\"store_true\", help=\"use cuda\")\n parser.add_argument(\"--seed\", type=int, default=1, help=\"seed used for randomization\")\n parser.add_argument(\"--save_model\", action=\"store_true\", help=\"if set, model will be saved\")\n parser.add_argument(\n \"--verbose\",\n \"-v\",\n action=\"store_true\",\n help=\"if set, websocket client workers will be started in verbose mode\",\n )\n\n args = parser.parse_args(args=args)\n return args\n\n\nasync def fit_model_on_worker(\n worker: workers.WebsocketClientWorker,\n traced_model: torch.jit.ScriptModule,\n batch_size: int,\n curr_round: int,\n max_nr_batches: int,\n lr: float,\n):\n \"\"\"Send the model to the worker and fit the model on the worker's training data.\n\n Args:\n worker: Remote location, where the model shall be trained.\n traced_model: Model which shall be trained.\n batch_size: Batch size of each training step.\n curr_round: Index of the current training round (for logging purposes).\n max_nr_batches: If > 0, training on worker will stop at min(max_nr_batches, nr_available_batches).\n lr: Learning rate of each training step.\n\n Returns:\n A tuple containing:\n * worker_id: Union[int, str], id of the worker.\n * improved model: torch.jit.ScriptModule, model after training at the worker.\n * loss: Loss on last training batch, torch.tensor.\n \"\"\"\n train_config = sy.TrainConfig(\n model=traced_model,\n loss_fn=loss_fn,\n batch_size=batch_size,\n shuffle=True,\n max_nr_batches=max_nr_batches,\n epochs=1,\n lr=lr,\n )\n train_config.send(worker)\n logger.info(\n \"Training round %s, calling fit on worker: %s, lr = %s\",\n curr_round,\n worker.id,\n \"{:.3f}\".format(train_config.lr),\n )\n loss = await worker.async_fit(dataset_key=\"mnist\", return_ids=[0])\n logger.info(\"Training round: %s, worker: %s, avg_loss: %s\", curr_round, worker.id, loss.mean())\n model = train_config.model_ptr.get().obj\n return worker.id, model, loss\n\n\ndef evaluate_models_on_test_data(test_loader, results):\n np.set_printoptions(formatter={\"float\": \"{: .0f}\".format})\n for worker_id, worker_model, _ in results:\n evaluate_model(worker_id, worker_model, \"cpu\", test_loader, print_target_hist=False)\n\n\ndef evaluate_model(worker_id, model, device, test_loader, print_target_hist=False):\n model.eval()\n test_loss = 0.0\n correct = 0\n hist_target = np.zeros(10)\n hist_pred = np.zeros(10)\n\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n hist, _ = np.histogram(target, bins=10, range=(0, 10))\n hist_target += hist\n output = model(data)\n test_loss += loss_fn(output, target).item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n hist, _ = np.histogram(pred, bins=10, range=(0, 10))\n hist_pred += hist\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n if print_target_hist:\n logger.info(\"Target histogram: %s\", hist_target)\n logger.info(\"Prediction hist.: %s\", hist_pred)\n\n logger.info(\n \"%s: Test set: Average loss: %s, Accuracy: %s/%s (%s)\",\n worker_id,\n \"{:.4f}\".format(test_loss),\n correct,\n len(test_loader.dataset),\n \"{:.2f}\".format(100.0 * correct / len(test_loader.dataset)),\n )\n\n\nasync def main():\n args = define_and_get_arguments()\n\n hook = sy.TorchHook(torch)\n\n kwargs_websocket = {\"host\": \"localhost\", \"hook\": hook, \"verbose\": args.verbose}\n alice = workers.WebsocketClientWorker(id=\"alice\", port=8777, **kwargs_websocket)\n bob = workers.WebsocketClientWorker(id=\"bob\", port=8778, **kwargs_websocket)\n charlie = workers.WebsocketClientWorker(id=\"charlie\", port=8779, **kwargs_websocket)\n\n worker_instances = [alice, bob, charlie]\n\n use_cuda = args.cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {\"num_workers\": 1, \"pin_memory\": True} if use_cuda else {}\n\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n \"../data\",\n train=False,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n ),\n ),\n batch_size=args.test_batch_size,\n shuffle=False,\n drop_last=False,\n **kwargs,\n )\n\n model = Net().to(device)\n\n (data, target) = test_loader.__iter__().next()\n traced_model = torch.jit.trace(model, data)\n learning_rate = args.lr\n\n for curr_round in range(1, args.training_rounds + 1):\n logger.info(\"Starting training round %s/%s\", curr_round, args.training_rounds)\n\n results = await asyncio.gather(\n *[\n fit_model_on_worker(\n worker=worker,\n traced_model=traced_model,\n batch_size=args.batch_size,\n curr_round=curr_round,\n max_nr_batches=args.federate_after_n_batches,\n lr=learning_rate,\n )\n for worker in worker_instances\n ]\n )\n models = {}\n loss_values = {}\n\n test_models = curr_round % 10 == 1 or curr_round == args.training_rounds\n if test_models:\n evaluate_models_on_test_data(test_loader, results)\n\n for worker_id, worker_model, worker_loss in results:\n if worker_model is not None:\n models[worker_id] = worker_model\n loss_values[worker_id] = worker_loss\n\n traced_model = utils.federated_avg(models)\n if test_models:\n evaluate_model(\n \"Federated model\", traced_model, \"cpu\", test_loader, print_target_hist=True\n )\n\n # decay learning rate\n learning_rate = max(0.98 * learning_rate, args.lr * 0.01)\n\n if args.save_model:\n torch.save(model.state_dict(), \"mnist_cnn.pt\")\n\n\nif __name__ == \"__main__\":\n # Logging setup\n logger = logging.getLogger(\"run_websocket_server\")\n FORMAT = \"%(asctime)s %(levelname)s %(filename)s(l:%(lineno)d, p:%(process)d) - %(message)s\"\n logging.basicConfig(format=FORMAT)\n logger.setLevel(level=logging.DEBUG)\n\n # Websockets setup\n websockets_logger = logging.getLogger(\"websockets\")\n websockets_logger.setLevel(logging.INFO)\n websockets_logger.addHandler(logging.StreamHandler())\n\n # Run main\n asyncio.get_event_loop().run_until_complete(main())\n", "import torch as th\nfrom torch.utils.data import BatchSampler, RandomSampler, SequentialSampler\n\nfrom syft.generic import ObjectStorage\nfrom syft.federated.train_config import TrainConfig\n\n\nclass FederatedClient(ObjectStorage):\n \"\"\"A Client able to execute federated learning in local datasets.\"\"\"\n\n def __init__(self, datasets=None):\n super().__init__()\n self.datasets = datasets if datasets is not None else dict()\n self.optimizer = None\n self.train_config = None\n\n def add_dataset(self, dataset, key: str):\n self.datasets[key] = dataset\n\n def remove_dataset(self, key: str):\n if key in self.datasets:\n del self.datasets[key]\n\n def set_obj(self, obj: object):\n \"\"\"Registers objects checking if which objects it should cache.\n\n Args:\n obj: An object to be registered.\n \"\"\"\n if isinstance(obj, TrainConfig):\n self.train_config = obj\n self.optimizer = None\n else:\n super().set_obj(obj)\n\n def _build_optimizer(\n self, optimizer_name: str, model, optimizer_args: dict\n ) -> th.optim.Optimizer:\n \"\"\"Build an optimizer if needed.\n\n Args:\n optimizer_name: A string indicating the optimizer name.\n optimizer_args: A dict containing the args used to initialize the optimizer.\n Returns:\n A Torch Optimizer.\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer\n\n if optimizer_name in dir(th.optim):\n optimizer = getattr(th.optim, optimizer_name)\n self.optimizer = optimizer(model.parameters(), **optimizer_args)\n else:\n raise ValueError(\"Unknown optimizer: {}\".format(optimizer_name))\n return self.optimizer\n\n def fit(self, dataset_key: str, **kwargs):\n \"\"\"Fits a model on the local dataset as specified in the local TrainConfig object.\n\n Args:\n dataset_key: Identifier of the local dataset that shall be used for training.\n **kwargs: Unused.\n\n Returns:\n loss: Training loss on the last batch of training data.\n \"\"\"\n if self.train_config is None:\n raise ValueError(\"TrainConfig not defined.\")\n\n if dataset_key not in self.datasets:\n raise ValueError(\"Dataset {} unknown.\".format(dataset_key))\n\n model = self.get_obj(self.train_config._model_id).obj\n loss_fn = self.get_obj(self.train_config._loss_fn_id).obj\n\n self._build_optimizer(\n self.train_config.optimizer, model, optimizer_args=self.train_config.optimizer_args\n )\n\n return self._fit(model=model, dataset_key=dataset_key, loss_fn=loss_fn)\n\n def _create_data_loader(self, dataset_key: str, shuffle: bool = False):\n data_range = range(len(self.datasets[dataset_key]))\n if shuffle:\n sampler = RandomSampler(data_range)\n else:\n sampler = SequentialSampler(data_range)\n data_loader = th.utils.data.DataLoader(\n self.datasets[dataset_key],\n batch_size=self.train_config.batch_size,\n sampler=sampler,\n num_workers=0,\n )\n return data_loader\n\n def _fit(self, model, dataset_key, loss_fn):\n model.train()\n data_loader = self._create_data_loader(\n dataset_key=dataset_key, shuffle=self.train_config.shuffle\n )\n\n loss = None\n iteration_count = 0\n\n for _ in range(self.train_config.epochs):\n for (data, target) in data_loader:\n # Set gradients to zero\n self.optimizer.zero_grad()\n\n # Update model\n output = model(data)\n loss = loss_fn(target=target, pred=output)\n loss.backward()\n self.optimizer.step()\n\n # Update and check interation count\n iteration_count += 1\n if iteration_count >= self.train_config.max_nr_batches >= 0:\n break\n\n return loss\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.nn.Linear", "numpy.zeros", "torch.nn.functional.max_pool2d", "numpy.histogram", "torch.nn.functional.nll_loss", "torch.manual_seed", "torch.no_grad", "numpy.set_printoptions", "torch.cuda.is_available", "torch.nn.Conv2d", "torch.device", "torch.jit.trace" ], [ "torch.utils.data.DataLoader", "torch.utils.data.RandomSampler", "torch.utils.data.SequentialSampler" ] ]
Ali-ry/azureml-examples
[ "817ae89d2766dcafd70937a22cb3a80f100a2906" ]
[ "python-sdk/tutorials/automl-with-azureml/forecasting-recipes-univariate/forecasting_script.py" ]
[ "\"\"\"\r\nThis is the script that is executed on the compute instance. It relies\r\non the model.pkl file which is uploaded along with this script to the\r\ncompute instance.\r\n\"\"\"\r\n\r\nimport argparse\r\nfrom azureml.core import Dataset, Run\r\nfrom azureml.automl.core.shared.constants import TimeSeriesInternal\r\nfrom sklearn.externals import joblib\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\r\n \"--target_column_name\",\r\n type=str,\r\n dest=\"target_column_name\",\r\n help=\"Target Column Name\",\r\n)\r\nparser.add_argument(\r\n \"--test_dataset\", type=str, dest=\"test_dataset\", help=\"Test Dataset\"\r\n)\r\n\r\nargs = parser.parse_args()\r\ntarget_column_name = args.target_column_name\r\ntest_dataset_id = args.test_dataset\r\n\r\nrun = Run.get_context()\r\nws = run.experiment.workspace\r\n\r\n# get the input dataset by id\r\ntest_dataset = Dataset.get_by_id(ws, id=test_dataset_id)\r\n\r\nX_test = (\r\n test_dataset.drop_columns(columns=[target_column_name])\r\n .to_pandas_dataframe()\r\n .reset_index(drop=True)\r\n)\r\ny_test_df = (\r\n test_dataset.with_timestamp_columns(None)\r\n .keep_columns(columns=[target_column_name])\r\n .to_pandas_dataframe()\r\n)\r\n\r\n# generate forecast\r\nfitted_model = joblib.load(\"model.pkl\")\r\n# We have default quantiles values set as below(95th percentile)\r\nquantiles = [0.025, 0.5, 0.975]\r\npredicted_column_name = \"predicted\"\r\nPI = \"prediction_interval\"\r\nfitted_model.quantiles = quantiles\r\npred_quantiles = fitted_model.forecast_quantiles(X_test)\r\npred_quantiles[PI] = pred_quantiles[[min(quantiles), max(quantiles)]].apply(\r\n lambda x: \"[{}, {}]\".format(x[0], x[1]), axis=1\r\n)\r\nX_test[target_column_name] = y_test_df[target_column_name]\r\nX_test[PI] = pred_quantiles[PI]\r\nX_test[predicted_column_name] = pred_quantiles[0.5]\r\n# drop rows where prediction or actuals are nan\r\n# happens because of missing actuals\r\n# or at edges of time due to lags/rolling windows\r\nclean = X_test[\r\n X_test[[target_column_name, predicted_column_name]].notnull().all(axis=1)\r\n]\r\nclean.rename(columns={target_column_name: \"actual\"}, inplace=True)\r\n\r\nfile_name = \"outputs/predictions.csv\"\r\nexport_csv = clean.to_csv(file_name, header=True, index=False) # added Index\r\n\r\n# Upload the predictions into artifacts\r\nrun.upload_file(name=file_name, path_or_stream=file_name)\r\n" ]
[ [ "sklearn.externals.joblib.load" ] ]
nielsbril/best
[ "8a902293605f1bee1abf3ca66ae3708706658772" ]
[ "matching/matching.py" ]
[ "import pandas as pd\nimport argparse\nimport logging\nimport sys\nimport json\n\n\ndef get_best_logger(log_file, verbose):\n # Setup logger - (Python logger breaks PEP8 by default)\n logger = logging.getLogger(__name__)\n if verbose:\n logger.setLevel('DEBUG')\n # file_handler logs to file, stream_handler to console\n file_handler = logging.FileHandler(log_file)\n stream_handler = logging.StreamHandler()\n # formatter sets log format\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s : %(levelname)s - %(message)s')\n # add formatter to both handlers\n file_handler.setFormatter(formatter)\n stream_handler.setFormatter(formatter)\n # add both handlers to logger\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n return logger\n\n\ndef compare_addresses(args):\n \"\"\"Compare the addresses of two files\n \"\"\"\n logger.info('Started reading BOSA address file')\n try:\n bosa = pd.read_csv(args.input_file_1)\n logger.info('Read the BOSA address file')\n except IOError as io:\n logger.fatal(io)\n sys.exit(1)\n\n logger.info('Started reading comparison file')\n try:\n comparison = pd.read_csv(args.input_file_2)\n logger.info('Read the comparison file')\n except IOError as io:\n logger.fatal(io)\n sys.exit(1)\n\n comp_keys = []\n bosa_ids = []\n for comp_key, bosa_key in args.mapping.items():\n try:\n comp_keys.append(comp_key)\n bosa_ids.append(bosa.columns.get_loc(bosa_key))\n except KeyError as ke:\n logger.error(\n 'Column %s of column mapping (%s -> %s) not found in BOSA file', ke, comp_key, bosa_key)\n sys.exit(1)\n\n address_dict = {}\n logger.info('Building data structure to perform matching')\n for i, row in enumerate(bosa.values):\n if i % 50_000 == 0:\n logger.info('Processed %i / %i addresses', i, len(bosa))\n address_dict[tuple(el.lower() if type(\n el) == str else el for el in row[bosa_ids])] = row\n\n extended = perform_exact_matching(\n bosa, comparison, address_dict, comp_keys)\n\n try:\n extended.to_csv(args.output_file, index=False)\n except IOError as io:\n logger.fatal(io)\n sys.exit(1)\n\n\ndef perform_exact_matching(bosa, comparison, address_dict, comp_keys):\n \"\"\"Match the addresses in the comparison file and add address_id and coordinates when matched\n \"\"\"\n addr_id = bosa.columns.get_loc('address_id')\n lon_id = bosa.columns.get_loc('EPSG:4326_lon')\n lat_id = bosa.columns.get_loc('EPSG:4326_lat')\n\n extended = []\n logger.info('Performing matching')\n for i, row in comparison.iterrows():\n if i % 50_000 == 0:\n logger.info('Matched %i / %i addresses', i, len(comparison))\n try:\n key = tuple(el.lower() if type(el) ==\n str else el for el in row[comp_keys])\n except KeyError as ke:\n logger.error('Column %s not found in the comparison file', ke)\n sys.exit(1)\n if key in address_dict:\n # If the address is matched add address_id and coordinates to it\n data = address_dict[key]\n row['address_id'] = data[addr_id]\n row['EPSG:4326_lon'] = data[lon_id]\n row['EPSG:4326_lat'] = data[lat_id]\n extended.append(row)\n extended = pd.DataFrame(extended)\n # Convert column to int type that can handle NaN\n extended['address_id'] = extended['address_id'].astype('Int64')\n\n return extended\n\n\nif __name__ == \"__main__\":\n # Setup argument parser\n parser = argparse.ArgumentParser(\n description='Compare addresses between two csv files.')\n parser.add_argument(\n 'input_file_1', help='BOSA address file, in csv format')\n parser.add_argument(\n 'input_file_2', help='Address file to compare to BOSA address file, in csv format')\n parser.add_argument('output_file', help='Name of file to write output to')\n parser.add_argument('--mode', default='exact',\n choices=['exact'], help='How to compare the addresses.')\n parser.add_argument(\n '--mapping', default={}, type=json.loads, help='Column names to consider in the comparison and how they map to the \\\n column names of the BOSA address file. (as a json dict of {comparison_key: bosa_key})')\n parser.add_argument('--log_name', default=\"compare.log\",\n help='name of the log file')\n parser.add_argument('--verbose', action=\"store_true\",\n help=\"toggle verbose output\", default=False)\n\n args = parser.parse_args()\n\n logger = get_best_logger(args.log_name, args.verbose)\n\n compare_addresses(args)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
spyke/spyke
[ "20934521de9c557924911cf6190690ac1c6f8e80" ]
[ "spyke/sort.py" ]
[ "\"\"\"Spike sorting classes and window\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\n__authors__ = ['Martin Spacek', 'Reza Lotun']\n\nimport os\nimport sys\nimport time\nimport datetime\nfrom copy import copy\nimport operator\nimport random\nimport shutil\nimport hashlib\nimport multiprocessing as mp\n\nfrom PyQt4 import QtCore, QtGui\nfrom PyQt4.QtCore import Qt\nfrom PyQt4.QtGui import QAction, QIcon, QApplication\n\nimport numpy as np\nimport scipy\nimport scipy.signal\n#from scipy.cluster.hierarchy import fclusterdata\n\nimport pylab as pl\n\nimport pyximport\npyximport.install(build_in_temp=False, inplace=True)\nfrom . import util # .pyx file\n\nfrom . import core\nfrom .core import (WaveForm, Gaussian, MAXLONGLONG, R, toiter, intround, printflush, lstrip,\n rstrip, lrstrip, pad, td2days, SpykeToolWindow, NList, NSList, dist,\n USList, ClusterChange, SpikeSelectionSlider, lrrep2Darrstripis, rollwin2D)\nfrom .detect import DEBUG\nfrom .surf import EPOCH\nfrom .plot import SpikeSortPanel, CLUSTERCOLOURDICT, WHITE\nfrom .__version__ import __version__\n\n#MAXCHANTOLERANCE = 100 # um\n\nNSLISTWIDTH = 70 # minimize nslist width, enough for 7 digit spike IDs\nPANELWIDTHPERCOLUMN = 120 # sort panel width per column of channels\nPANELHEIGHTPERROW = 50 # sort panel height per row of channels\nVSCROLLBARWIDTH = 14 # hack\nSORTWINDOWHEIGHT = 1035 # TODO: this should be set programmatically\nMINSORTWINDOWWIDTH = 566\n\nMEANWAVEMAXSAMPLES = 2000\nNPCSPERCHAN = 7\n\nPCALIB = 'mdp'\nICALIB = 'sklearn'\n\nDEFMINISI = 50 # default minimum ISI to check for on export, us\n\nMAXGROUPISI = 100000 # us (100 ms)\nMAXGROUPDT = 100000000 # us (100 s)\n\n\nclass Sort(object):\n \"\"\"A spike sorting session, in which you can detect spikes and sort them into Neurons.\n A .sort file is a single Python2-pickled Sort object. A .json file is a\n jsonpickle-pickled Sort object\"\"\"\n def __init__(self, detector=None, stream=None, tw=None):\n self.__version__ = __version__\n self.fname = ''\n self.user = ''\n self.notes = ''\n self.detector = detector # this Sort's current Detector object\n self.tw = tw # time window (us) relative to spike time\n self.stream = stream\n self.probe = stream.probe # only one probe design per sort allowed\n self.converter = stream.converter\n self.neurons = {}\n self.clusters = {} # neurons with multidm params scaled for plotting\n self.norder = [] # stores order of neuron ids display in nlist\n self.npcsperchan = NPCSPERCHAN\n\n def get_nextnid(self):\n \"\"\"nextnid is used to retrieve the next unique single unit ID\"\"\"\n nids = list(self.neurons)\n if len(nids) == 0:\n return 1 # single unit nids start at 1\n else:\n return max(max(nids) + 1, 1) # at least 1\n\n nextnid = property(get_nextnid)\n\n def get_nextmuid(self):\n \"\"\"nextmuid is used to retrieve the next unique multiunit ID\"\"\"\n nids = list(self.neurons)\n if len(nids) == 0:\n return -1 # multiunit ids start at -1\n else:\n return min(min(nids) - 1, -1) # at most -1\n\n nextmuid = property(get_nextmuid)\n\n def get_good(self):\n \"\"\"Return array of nids marked by user as 'good'\"\"\"\n good = []\n for neuron in self.neurons.values():\n try:\n if neuron.good:\n good.append(neuron.id)\n except AttributeError: # neuron is from older sort, no .good attrib\n neuron.good = False\n return np.asarray(good)\n\n def set_good(self, good):\n \"\"\"Set good flag to True for nids in good, False otherwise\"\"\"\n nids = list(self.neurons)\n assert np.all([ nid in nids for nid in good ]) # make sure all nids in good exist\n notgood = np.setdiff1d(nids, good)\n for nid in notgood:\n neuron = self.neurons[nid]\n neuron.good = False\n for nid in good:\n neuron = self.neurons[nid]\n neuron.good = True\n\n good = property(get_good, set_good)\n\n def get_stream(self):\n try:\n return self._stream\n except AttributeError:\n # this is likely a brand new sort, has yet to be assigned a Stream\n return None\n\n def set_stream(self, stream=None):\n \"\"\"Check stream type and name and probe type, and restore filtmeth, car, sampfreq and\n shcorrect to stream when binding/modifying stream to self\"\"\"\n oldstream = self.stream\n if stream != None and oldstream != None:\n # do stream types match?\n if type(stream) != type(oldstream):\n raise ValueError(\"Stream types don't match: %s, %s\"\n % (type(oldstream), type(stream)))\n # do stream probe types match?\n if type(stream.probe) != type(oldstream.probe):\n raise ValueError(\"Stream probe types don't match: %s, %s\"\n % (type(oldstream.probe), type(stream.probe)))\n # is one stream fname a superset of the other?\n if (stream.fname not in oldstream.fname) and (oldstream.fname not in stream.fname):\n raise ValueError(\"Stream file names are not supersets of each other: %s, %s\"\n % (oldstream.fname, stream.fname))\n else:\n print('Stream file names are similar enough to proceed: %s, %s'\n % (stream.fname, oldstream.fname))\n try:\n stream.filtmeth = self.filtmeth\n stream.car = self.car\n stream.sampfreq = self.sampfreq\n stream.shcorrect = self.shcorrect\n except AttributeError:\n pass # one of the above aren't bound\n self._stream = stream # set it\n print('Bound stream %r to sort %r' % (stream.fname, self.fname))\n # now that tres is known, calculate window timepoints wrt spike time:\n self.calc_twts_twi()\n\n stream = property(get_stream, set_stream)\n\n def calc_twts_twi(self):\n \"\"\"Calculate temporal window timepoints wrt spike time, and the indices of these\n timepoints wrt spike time\"\"\"\n tres = self.tres\n tw = self.tw\n twts = np.arange(tw[0], tw[1], tres)\n twts += twts[0] % tres # get rid of mod, so twts go through zero\n self.twts = twts\n self.twi = intround(twts[0] / tres), intround(twts[-1] / tres)\n #info('twi = %s' % (self.twi,))\n\n def update_tw(self, tw):\n \"\"\"Update tw and everything that depends on it. Note that this shouldn't\n be called directly by the user. Call SpykeWindow.update_spiketw() instead\"\"\"\n oldtw = self.tw\n self.tw = tw\n self.calc_twts_twi()\n dtw = np.asarray(tw) - np.asarray(oldtw) # new minus old\n self.spikes['t0'] += dtw[0]\n self.spikes['t1'] += dtw[1]\n self.spikes['tis'] = self.spikes['tis'] - intround(dtw[0] / self.tres)\n # recalculate any existing templates:\n for neuron in self.neurons.values():\n if neuron.wave.data != None:\n neuron.update_wave()\n print('WARNING: all spike waveforms need to be reloaded!')\n\n def get_tres(self):\n return self.stream.tres\n\n tres = property(get_tres)\n\n def __getstate__(self):\n \"\"\"Get object state for pickling\"\"\"\n # copy it cuz we'll be making changes, this is fast because it's just a shallow copy\n d = self.__dict__.copy()\n # Spikes and wavedata arrays are (potentially) saved separately.\n # usids and PCs/ICs can be regenerated from the spikes array.\n for attr in ['spikes', 'wavedata', 'usids', 'X', 'Xhash']:\n # keep _stream during normal pickling for multiprocessing, but remove it\n # manually when pickling to sort file\n try: del d[attr]\n except KeyError: pass\n return d\n\n def get_nspikes(self):\n try: return len(self.spikes)\n except AttributeError: return 0\n\n nspikes = property(get_nspikes)\n\n def update_usids(self):\n \"\"\"Update usids, which is an array of indices of unsorted spikes\"\"\"\n nids = self.spikes['nid']\n self.usids, = np.where(nids == 0) # 0 means unclustered\n\n def get_spikes_sortedby(self, attr='id'):\n \"\"\"Return array of all spikes, sorted by attribute 'attr'\"\"\"\n vals = self.spikes[attr]\n spikes = self.spikes[vals.argsort()]\n return spikes\n\n def get_wave(self, sid):\n \"\"\"Return WaveForm corresponding to spike sid\"\"\"\n spikes = self.spikes\n nchans = spikes['nchans'][sid]\n chans = spikes['chans'][sid, :nchans]\n t0 = spikes['t0'][sid]\n t1 = spikes['t1'][sid]\n wavedata = self.wavedata[sid, 0:nchans]\n ts = np.arange(t0, t1, self.tres) # build them up\n return WaveForm(data=wavedata, ts=ts, chans=chans, tres=self.tres)\n\n def get_maxchan_wavedata(self, sid=None, nid=None):\n \"\"\"Return wavedata of maxchan of spike sid or neuron nid\"\"\"\n if sid != None:\n assert nid == None\n chani = self.spikes['chani'][sid]\n return self.wavedata[sid, chani]\n elif nid != None:\n assert sid == None\n neuron = self.neurons[nid]\n chani, = np.where(neuron.chans == neuron.chan)\n assert len(chani) == 1\n chani = chani[0] # pull out of length 1 array\n return neuron.wave.data[chani]\n\n def get_mean_wave(self, sids, nid=None):\n \"\"\"Return the mean and std waveform of spike waveforms in sids\"\"\"\n spikes = self.spikes\n nsids = len(sids)\n if nsids > MEANWAVEMAXSAMPLES:\n step = nsids // MEANWAVEMAXSAMPLES + 1 \n s = (\"get_mean_wave() sampling every %d spikes instead of all %d\"\n % (step, nsids))\n if nid != None:\n s = \"neuron %d: \" % nid + s\n print(s)\n sids = sids[::step]\n nsids = len(sids) # update\n \n chanss = spikes['chans'][sids]\n nchanss = spikes['nchans'][sids]\n chanslist = [ chans[:nchans] for chans, nchans in zip(chanss, nchanss) ] # list of arrays\n chanpopulation = np.concatenate(chanslist)\n groupchans = np.unique(chanpopulation) # comes out sorted\n \n wavedata = self.wavedata[sids]\n if wavedata.ndim == 2: # should be 3, get only 2 if nsids == 1\n wavedata.shape = 1, wavedata.shape[0], wavedata.shape[1] # give it a singleton 3rd dim\n nt = wavedata.shape[-1]\n maxnchans = len(groupchans)\n data = np.zeros((maxnchans, nt))\n # all spikes have same nt, but not necessarily same nchans, keep track of\n # how many spikes contributed to each of the group's chans\n nspikes = np.zeros((maxnchans, 1), dtype=int)\n for chans, wd in zip(chanslist, wavedata):\n chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans\n data[chanis] += wd[:len(chans)] # accumulate\n nspikes[chanis] += 1 # inc spike count for this spike's chans\n #t0 = time.time()\n data /= nspikes # normalize all data points appropriately, this is now the mean\n var = np.zeros((maxnchans, nt))\n for chans, wd in zip(chanslist, wavedata):\n chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans\n var[chanis] += (wd[:len(chans)] - data[chanis]) ** 2 # accumulate 2nd moment\n var /= nspikes # normalize all data points appropriately, this is now the variance\n std = np.sqrt(var)\n # keep only those chans that at least 1/2 the spikes contributed to\n bins = list(groupchans) + [np.inf] # concatenate rightmost bin edge\n hist, bins = np.histogram(chanpopulation, bins=bins)\n chans = groupchans[hist >= nsids/2]\n chanis = groupchans.searchsorted(chans)\n data = data[chanis]\n std = std[chanis]\n return WaveForm(data=data, std=std, chans=chans)\n\n def check_ISIs(self, nids='good'):\n \"\"\"Check that interspike intervals of spikes in each nid never fall below DEFMINISI\"\"\"\n print('Checking inter-spike intervals')\n if nids == 'good':\n nids = self.good\n elif nids == 'all':\n nids = sorted(self.neurons)\n for nid in nids:\n neuron = self.neurons[nid]\n spikets = self.spikes['t'][neuron.sids] # should be a sorted copy\n assert spikets.flags['OWNDATA'] # safe to modify in place\n spikets.sort() # just in case it isn't perfectly sorted\n ndupl = (np.diff(spikets) < DEFMINISI).sum()\n if ndupl > 0:\n msg = ('n%d has %d duplicate spikes (given DEFMINISI=%d us).\\n'\n 'Remove duplicate spikes with the ISI tool in the Verify tab'\n % (nid, ndupl, DEFMINISI))\n raise RuntimeError(msg)\n\n def check_wavealign(self, nids='good', maxdti=1):\n \"\"\"Check that each neurons's primary peak on the max chan is no more than +/- maxdti\n timepoints away from the t=0 alignment timepoint\"\"\"\n print('Checking neuron mean waveform alignment')\n if nids == 'good':\n nids = self.good\n elif nids == 'all':\n nids = sorted(self.neurons)\n nt = self.twi[1] - self.twi[0] + 1 # expected number of points of each chan's wavedata\n for nid in nids:\n neuron = self.neurons[nid]\n wd = self.get_maxchan_wavedata(nid=nid)\n assert len(wd) == nt\n # find biggest positive and negative peaks, check which comes first, ensure\n # the primary peak is within maxdti of t=0 alignment timepoint:\n ppeakis, _ = scipy.signal.find_peaks(wd) # positive peak indices\n npeakis, _ = scipy.signal.find_peaks(-wd) # negative peak indices\n pmaxi = ppeakis[wd[ppeakis].argmax()] # max positive peak index\n nmaxi = npeakis[wd[npeakis].argmin()] # max negative peak index\n if nmaxi < pmaxi: # usual case: -ve then +ve peak\n peak1i = nmaxi\n else: # less common: +ve then -ve peak, make sure +ve peak is worthy of alignment\n pmax, nmax = wd[pmaxi], wd[nmaxi]\n if pmax > abs(nmax): # +ve peak is bigger than -ve peak, align to +ve peak\n peak1i = pmaxi\n else:\n peak1i = nmaxi # default to -ve peak\n alignti = 0 - self.twi[0] # +ve\n dti = peak1i - alignti\n #print(\"n%d: dti=%d\" % (nid, dti))\n if abs(dti) > maxdti:\n peak1uV = self.converter.AD2uV(wd[peak1i])\n peak1us = intround(self.tres*(peak1i-alignti))\n msg = ('Primary peak (%+d uV @ t=%d us) of n%d is %+d timepoints away from '\n 'the t=0 us alignment point. Shift it closer and try again'\n % (peak1uV, peak1us, nid, dti))\n raise RuntimeError(msg)\n\n def check_wavepadding(self, nids='good', npad=2):\n \"\"\"Check if any spikes are edge padded, presumably due to being shifted but not\n reloaded. For robustness, check for consistent signs of padding across all channels.\n An edge is considered padded if it does not change over npad datapoints\"\"\"\n print('Checking spike waveform padding')\n assert npad >= 2 # need at least 2 points to do a diff\n if nids == 'good':\n nids = self.good\n elif nids == 'all':\n nids = sorted(self.neurons)\n for nid in nids:\n neuron = self.neurons[nid]\n for sid in neuron.sids:\n wd = self.wavedata[sid] # multichannel waveform data\n # are left and right edges of wavedata identical for npad number of points?\n l, r = wd[:, :npad], wd[:, -npad:] # shape (nchans, npad)\n leftpadded = (np.diff(l, axis=1) == 0).all()\n rightpadded = (np.diff(r, axis=1) == 0).all()\n # handle case where spike is right after or right before a 0-padded\n # region of data due to gaps between experiments:\n if leftpadded:\n if (wd[:, 0] == 0).all():\n leftpadded = False\n if rightpadded:\n if (wd[:, -1] == 0).all():\n rightpadded = False\n if leftpadded or rightpadded:\n msg = ('n%d has s%d that looks like it has been padded.\\n'\n 'leftpadded, rightpadded = %r, %r\\n'\n 'Reload s%d or n%d or all spikes and try again'\n % (nid, sid, leftpadded, rightpadded, sid, nid))\n raise RuntimeError(msg)\n\n def check_contiguous_nids(self):\n \"\"\"Check that neuron IDs are contiguous (no gaps)\"\"\"\n print('Checking that neuron IDs are contiguous')\n nids = np.array(list(self.neurons))\n nids = nids[nids > 0] # only consider +ve nids\n nids.sort()\n if (np.diff(nids) != 1).any():\n raise RuntimeError('Neuron IDs are not contiguous, renumber all and try again')\n\n def exportptcsfiles(self, basepath, sortpath, user='', notes=''):\n \"\"\"Export spike data to binary .ptcs files under basepath, one file per recording\"\"\"\n # First check to make sure various things are OK before exporting:\n self.check_ISIs()\n self.check_wavealign()\n self.check_wavepadding()\n self.check_contiguous_nids()\n spikes = self.spikes\n exportdt = str(datetime.datetime.now()) # get an export datetime stamp\n exportdt = exportdt.split('.')[0] # ditch the us\n if self.stream.is_multi(): # self.stream is a MultiStream\n streams = self.stream.streams\n else: # self.stream is a single Stream\n streams = [self.stream]\n print('Exporting \"good\" clusters to:')\n # do a separate export for each recording:\n # absolute start and stop times of all streams, rounded to nearest raw timepoint:\n tranges = self.stream.tranges\n t0 = tranges[0, 0] # absolute start time of first stream\n for stream, trange in zip(streams, tranges):\n abst0 = trange[0] # absolute start time of this stream relative to t0\n # time delta between this stream and first stream, to nearest raw timepoint, us:\n dt = abst0 - t0\n dt = intround(dt) # to nearest int us\n self.exportptcsfile(stream, basepath, dt, exportdt, sortpath,\n user=user, notes=notes)\n\n def exportptcsfile(self, stream, basepath, dt, exportdt, sortpath, user='', notes=''):\n \"\"\"Export spike data of all \"good\" spikes to binary .ptcs file in basepath.\n Constrain to spikes in stream, and undo any time delta in spike times.\n dt is the integer time difference between start of stream and start of first stream in\n the track, rounded to the nearest us (spike times are stored as int64 us in .ptcs)\"\"\"\n\n # build up list of PTCSNeuronRecords that have spikes in this stream,\n # and tally their spikes\n nsamplebytes = 4 # float32\n nrecs = []\n nspikes = 0\n # only export neurons marked as \"good\", could be single or multi unit:\n for nid in sorted(self.good):\n neuron = self.neurons[nid]\n spikets = self.spikes['t'][neuron.sids] # should be a sorted copy\n assert spikets.flags['OWNDATA'] # safe to modify in place\n spikets.sort() # just in case it isn't perfectly sorted\n spikets -= dt # export spike times relative to t=0 of this recording\n # only include spikes that occurred during this recording\n lo, hi = spikets.searchsorted([stream.t0, stream.t1])\n spikets = spikets[lo:hi]\n if len(spikets) == 0:\n continue # don't save empty neurons\n nrec = PTCSNeuronRecord(neuron, spikets, nsamplebytes, descr='')\n nrecs.append(nrec)\n nspikes += len(spikets)\n nneurons = len(nrecs)\n\n # create the header and write everything to file:\n path = os.path.join(basepath, stream.srcfnameroot)\n try: os.mkdir(path)\n except OSError: pass # path already exists?\n fname = stream.srcfnameroot + '.ptcs'\n fullfname = os.path.join(path, fname)\n header = PTCSHeader(self, sortpath, stream, nneurons, nspikes, nsamplebytes,\n fullfname, exportdt, user=user, notes=notes)\n \n with open(fullfname, 'wb') as f:\n header.write(f)\n for nrec in nrecs:\n nrec.write(f)\n print(fullfname)\n\n def exportcsv(self, fname):\n \"\"\"Export all \"good\" spikes to a .csv file with time (s), nid, and maxchan as the\n columns\"\"\"\n sids = []\n #chans = []\n for nid in sorted(self.good):\n neuron = self.neurons[nid]\n sids.append(neuron.sids)\n # the alternative is to export each spike's unit's channel:\n #chans.append(np.tile(neuron.chan, neuron.nspikes))\n sids = np.hstack(sids)\n spikes = self.spikes[sids]\n tsecs = spikes['t'] / 1e6 # convert from us to s\n nids = spikes['nid']\n chans = spikes['chan']\n #chans = np.hstack(chans)\n data = np.column_stack([tsecs, nids, chans])\n print('Exporting (tsec, nid, chan) of all spikes marked as \"good\" to %s' % fname)\n np.savetxt(fname, data, fmt='%.6f, %d, %d')\n\n def exporttschid(self, basepath):\n \"\"\"Export int64 (timestamp, channel, neuron id) 3 tuples to binary file\"\"\"\n raise NotImplementedError('Needs to be redone to work with multiple streams')\n spikes = self.spikes[self.spikes['nid'] > 0] # don't export unsorted/multiunit spikes\n dt = str(datetime.datetime.now()) # get an export timestamp\n dt = dt.split('.')[0] # ditch the us\n dt = dt.replace(' ', '_')\n dt = dt.replace(':', '.')\n srffnameroot = srffnameroot.replace(' ', '_')\n tschidfname = dt + '_' + srffnameroot + '.tschid'\n tschid = np.empty((len(spikes), 3), dtype=np.int64)\n tschid[:, 0] = spikes['t']\n tschid[:, 1] = spikes['chan']\n tschid[:, 2] = spikes['nid']\n tschid.tofile(os.path.join(path, tschidfname)) # save it\n print(tschidfname)\n\n def exportdin(self, basepath):\n \"\"\"Export stimulus din(s) to binary .din file(s) in basepath\"\"\"\n if self.stream.is_multi(): # self.stream is a MultiStream\n streams = self.stream.streams\n else: # self.stream is a single Stream\n streams = [self.stream]\n dinfiledtype=[('TimeStamp', '<i8'), ('SVal', '<i8')] # pairs of int64s\n print('Exporting DIN(s) to:')\n for stream in streams:\n try: # neither of these attribs should exist for recordings with no stimuli:\n svrecs = stream.srff.digitalsvalrecords\n dsprecs = stream.srff.displayrecords\n except AttributeError:\n continue # no din to export for this stream\n if len(svrecs) == 0 or stream.srff.ndigitalsvalrecords == 0:\n raise ValueError(\"digitalsvalrecords are empty for stream %r. Attribute \"\n \"shouldn't exist\" % stream.fname)\n path = os.path.join(basepath, stream.srcfnameroot)\n try: os.mkdir(path)\n except OSError: pass # path already exists?\n # upcast SVal field from uint16 to int64, creates a copy,\n # but it's not too expensive:\n svrecs = svrecs.astype(dinfiledtype)\n # convert to normal n x 2 int64 array\n svrecs = svrecs.view(np.int64).reshape(-1, 2)\n # Some old recordings (<= ptc15) contain multiple experiments.\n # To deal with this, iterate over stream.srff.displayrecords, export one .din\n # per displayrecord. Append experiment ID to each .din filename, if necessary.\n svrects = svrecs[:, 0]\n dsprects = [ dsprec.TimeStamp for dsprec in dsprecs ]\n svalrecis = svrects.searchsorted(dsprects)\n assert svalrecis[0] == 0\n svalrecis = svalrecis[1:] # exclude the trivial 0 index\n # split sval records according to displayrecord timestamps:\n dins = np.split(svrecs, svalrecis)\n assert len(dins) == len(dsprecs)\n for eid, din in enumerate(dins):\n if eid == 0 and len(dins) == 1:\n eidstr = ''\n elif len(dins) < 10:\n eidstr = '.%d' % eid\n else: # include leading zero to maintain alphabetical fname order\n eidstr = '.%02d' % eid\n dinfname = stream.srcfnameroot + eidstr + '.din'\n fullfname = os.path.join(path, dinfname)\n din.tofile(fullfname) # save it\n print(fullfname)\n\n def exporttextheader(self, basepath):\n \"\"\"Export stimulus text header(s) to .textheader file(s) in basepath\"\"\"\n if self.stream.is_multi(): # self.stream is a MultiStream\n streams = self.stream.streams\n else: # self.stream is a single Stream\n streams = [self.stream]\n print('Exporting text header(s) to:')\n for stream in streams:\n try:\n dsprecs = stream.srff.displayrecords\n except AttributeError: # no textheader to export for this stream\n continue\n if len(dsprecs) == 0:\n raise ValueError(\"displayrecords are empty for stream %r. Attribute \"\n \"shouldn't exist\" % stream.fname)\n path = os.path.join(basepath, stream.srcfnameroot)\n try: os.mkdir(path)\n except OSError: pass # path already exists?\n # Some old recordings (<= ptc15) contain multiple experiments.\n # To deal with this, iterate over stream.srff.displayrecords, export one\n # .textheader per displayrecord. Append experiment ID to each .textheader\n # filename, if necessary.\n for eid, dsprec in enumerate(dsprecs):\n textheader = dsprec.Header.python_tbl\n if eid == 0 and len(dsprecs) == 1:\n eidstr = ''\n elif len(dsprecs) < 10:\n eidstr = '.%d' % eid\n else: # include leading zero to maintain alphabetical fname order\n eidstr = '.%02d' % eid\n textheaderfname = stream.srcfnameroot + eidstr + '.textheader'\n fullfname = os.path.join(path, textheaderfname)\n with open(fullfname, 'w') as f:\n f.write(textheader) # save it\n print(fullfname)\n\n def exportall(self, basepath, sortpath):\n \"\"\"Export spike data, stimulus din and textheader to basepath\"\"\"\n self.exportptcsfiles(basepath, sortpath)\n self.exportdin(basepath)\n self.exporttextheader(basepath)\n\n def exportspikewaves(self, sids, selchans, tis, fname, format):\n \"\"\"Export spike waveform data of selected sids, selchans and tis to binary\n .spikes.zip file or text .spikes.csv file\"\"\"\n nspikes = len(sids)\n chans, chanslist = self.get_common_chans(sids, selchans)\n nchans = len(chans)\n ti0, ti1 = tis\n nt = ti1 - ti0\n # fill in 3D data array:\n dtype = self.wavedata.dtype\n data = np.zeros((nspikes, nchans, nt), dtype=dtype)\n for sii, sid in enumerate(sids):\n spikechans = chanslist[sii]\n spikechanis = spikechans.searchsorted(chans)\n data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]\n if format == 'text': # flatten timepoints of all chans into columns\n data.shape = nspikes, nchans*nt\n stream = self.stream\n assert stream.kind == 'highpass' # should be the only type ever saved to self\n if format == 'binary':\n nids = self.spikes['nid'][sids]\n spiketimes = self.spikes['t'][sids]\n chanpos = stream.probe.siteloc_arr()\n uVperAD = stream.converter.AD2uV(1) # convert 1 AD unit to uV\n with open(fname, 'wb') as f:\n np.savez_compressed(f, data=data, sids=sids, nids=nids,\n spiketimes=spiketimes, chans=chans, tis=tis,\n chanpos=chanpos, uVperAD=uVperAD)\n elif format == 'text':\n np.savetxt(fname, data, fmt='%d', delimiter=',') # data should be int\n else:\n raise ValueError('Unknown format: %r' % format)\n print('Exported %d spikes on chans=%r and tis=%r to %s'\n % (nspikes, list(chans), list(tis), fname))\n \n def get_param_matrix(self, kind=None, sids=None, tis=None, selchans=None, norm=False,\n dims=None, scale=True):\n \"\"\"Organize dims parameters from sids into a data matrix, each column\n corresponding to a dim. To do PCA/ICA clustering on all spikes, one maxchan at\n a time, caller needs to call this multiple times, one for each set of\n maxchan unique spikes,\"\"\"\n spikes = self.spikes\n dtypefields = list(spikes.dtype.fields)\n if sids is None:\n sids = spikes['id'] # default to all spikes\n comps = [ dim for dim in dims if dim.startswith('c') and dim[-1].isdigit() ]\n rmserror = np.any([ dim == 'RMSerror' for dim in dims ])\n ncomp = len(comps)\n hascomps = ncomp > 0\n if hascomps:\n X = self.get_component_matrix(kind, sids, tis=tis, chans=selchans,\n minncomp=ncomp, norm=norm)\n if rmserror:\n rms = self.get_rms_error(sids, tis=tis, chans=selchans)\n\n data = []\n for dim in dims:\n if dim in dtypefields:\n data.append( np.float32(spikes[dim][sids]) )\n elif dim.startswith('c') and dim[-1].isdigit():\n compid = int(lstrip(dim, 'c'))\n data.append( np.float32(X[:, compid]) )\n elif dim == 'RMSerror':\n data.append( np.float32(rms) )\n else:\n raise RuntimeError('Unknown dim %r' % dim)\n # np.column_stack returns a copy, not modifying the original array\n data = np.column_stack(data)\n if scale:\n # ensure 0 mean, and unit variance/stdev\n for dim, d in zip(dims, data.T): # d iterates over columns\n d -= d.mean()\n if dim in ['x0', 'y0'] and self.probe.ncols > 1:\n try: x0std # normalize spatial params by x0 std\n except NameError: x0std = spikes['x0'].std()\n if x0std != 0.0:\n d /= x0std\n #elif dim == 't': # the longer the recording in hours, the greater the\n # # scaling in time\n # trange = d.max() - d.min()\n # tscale = trange / (60*60*1e6)\n # d *= tscale / d.std()\n else: # normalize all other dims by their std\n dstd = d.std()\n if dstd != 0.0:\n d /= dstd\n return data\n\n def get_component_matrix(self, kind, sids, tis=None, chans=None, minncomp=None,\n norm=False):\n \"\"\"Find set of chans common to all sids, and do PCA/ICA on those waveforms. Or,\n if chans are specified, limit PCA/ICA to them. Return component matrix with at\n least minncomp dimensions\"\"\"\n spikes = self.spikes\n nt = self.wavedata.shape[2]\n if tis is None: # use full waveform\n tis = np.asarray([0, nt])\n #print('tis: %r' % (tis,))\n ti0, ti1 = tis\n assert ti0 < ti1 <= nt\n nt = ti1 - ti0\n chans, chanslist = self.get_common_chans(sids, chans)\n nchans = len(chans)\n nspikes = len(sids)\n if nspikes < 2:\n raise RuntimeError(\"Need at least 2 spikes for %s\" % kind)\n if nchans == 0:\n raise RuntimeError(\"Spikes have no common chans for %s\" % kind)\n\n # check if desired components have already been calculated (cache hit):\n Xhash = self.get_Xhash(kind, sids, tis, chans, self.npcsperchan, norm)\n self.Xhash = Xhash # save as key to most recent component matrix in self.X\n try: self.X\n except AttributeError: self.X = {} # init the dimension reduction cache attrib\n if Xhash in self.X:\n print('Cache hit, using cached %ss from tis=%r, chans=%r of %d spikes' %\n (kind[:-1], list(tis), list(chans), nspikes))\n return self.X[Xhash] # no need to recalculate\n\n print('Cache miss, (re)calculating %ss' % kind[:-1])\n\n # collect data between tis from chans from all spikes:\n print('Doing %s on tis=%r, chans=%r of %d spikes' %\n (kind, list(tis), list(chans), nspikes))\n # MDP complains of roundoff errors with float32 for large covariance matrices\n data = np.zeros((nspikes, nchans, nt), dtype=np.float64)\n for sii, sid in enumerate(sids):\n spikechans = chanslist[sii]\n spikechanis = spikechans.searchsorted(chans)\n spikedata = self.wavedata[sid][spikechanis, ti0:ti1]\n if norm:\n # normalize by Vpp of chan with max Vpp:\n maxptp = spikedata.ptp(axis=1).max()\n if maxptp != 0: # prevent div by 0\n spikedata = spikedata / maxptp\n data[sii] = spikedata\n print('Input shape for %s: %r' % (kind, data.shape))\n t0 = time.time()\n data.shape = nspikes, nchans*nt # flatten timepoints of all chans into columns\n print('Reshaped input for %s: %r' % (kind, data.shape))\n if kind == 'PCA': # principal components analysis\n if PCALIB == 'mdp':\n import mdp # delay as late as possible\n X = mdp.pca(data, output_dim=5, svd=False) # svd=False is default\n elif PCALIB == 'sklearn':\n # sklearn's PCA is about 8x slower than mdp.pca, I think because it\n # doesn't tap into scipy.linalg.eig compiled code. RandomizedPCA is faster\n # than PCA, but isn't deterministic, and is still 2-3x slower than mdp.pca\n from sklearn.decomposition import PCA\n pca = PCA(n_components=5)\n X = pca.fit_transform(data) # do both the fit and the transform\n else:\n raise ValueError('Invalid PCALIB %r' % PCALIB)\n if X.shape[1] < minncomp:\n raise RuntimeError(\"Can't satisfy minncomp=%d request\" % minncomp)\n elif kind == 'sPCA': # sparse principal components analysis\n from sklearn.decomposition import SparsePCA\n n_components = 5\n alpha = 1 # sparseness parameter\n n_jobs = mp.cpu_count()\n spca = SparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)\n X = spca.fit_transform(data) # do both the fit and the transform\n elif kind == 'mbsPCA': # mini batch sparse principal components analysis\n from sklearn.decomposition import MiniBatchSparsePCA\n n_components = 5\n alpha = 1 # sparseness parameter\n n_jobs = mp.cpu_count()\n mbspca = MiniBatchSparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)\n X = mbspca.fit_transform(data) # do both the fit and the transform\n elif kind == 'NMF': # non-negative matrix factorization\n from sklearn.decomposition import NMF\n n_components = 5\n init = None # 'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'\n nmf = NMF(n_components=n_components, init=init)\n X = nmf.fit_transform(data) # do both the fit and the transform\n elif kind == 'tSNE': # t-distributed stochastic neighbor embedding\n # limit number of PCs to feed into ICA, keep up to npcsperchan components per\n # chan on average:\n ncomp = min((self.npcsperchan*nchans, data.shape[1]))\n print('ncomp: %d' % ncomp)\n import mdp # delay as late as possible\n # do PCA first, to reduce dimensionality and speed up ICA:\n data = mdp.pca(data, output_dim=ncomp)\n from sklearn.manifold import TSNE\n n_components = 3 # not suited for any more than 3, according to the paper\n #init = 'random', 'pca'\n tsne = TSNE(n_components=n_components)\n X = tsne.fit_transform(data) # do both the fit and the transform\n elif kind == 'ICA': # independent components analysis\n # ensure nspikes >= ndims**2 for good ICA convergence\n maxncomp = intround(np.sqrt(nspikes))\n if maxncomp < minncomp:\n raise RuntimeError(\"Can't satisfy minncomp=%d request\" % minncomp)\n if data.shape[0] <= data.shape[1]:\n raise RuntimeError('Need more observations than dimensions for ICA')\n # limit number of PCs to feed into ICA, keep up to npcsperchan components per\n # chan on average:\n ncomp = min((self.npcsperchan*nchans, maxncomp, data.shape[1]))\n if ICALIB == 'mdp':\n import mdp # delay as late as possible\n # do PCA first, to reduce dimensionality and speed up ICA:\n print('ncomp: %d' % ncomp)\n data = mdp.pca(data, output_dim=ncomp)\n # nonlinearity g='pow3', ie x**3. tanh seems to separate better,\n # but is a bit slower. gaus seems to be slower still, and no better\n # than tanh, but these are just vague impressions.\n # defaults to whitened=False, ie assumes data isn't whitened\n node = mdp.nodes.FastICANode(g='pow3')\n X = node(data)\n pm = node.get_projmatrix()\n X = X[:, np.any(pm, axis=0)] # keep only the non zero columns\n elif ICALIB == 'sklearn':\n from sklearn.decomposition import FastICA\n # when whiten=True (default), FastICA preprocesses the data using PCA, and\n # n_components is the number of PCs that are kept before doing ICA.\n alg = 'parallel' # parallel or deflation, default is parallel\n fun = 'logcosh' # logcosh, exp, or cube, default is logcosh\n maxiter = 100 # default is 200\n tol = 0.5 # default is 0.0001, seems need >~ 0.1 to exit faster\n ## TODO: make FastICA algorithm (parallel, deflation), nonlinearity (logcosh,\n ## exp, cube) and IC sort method (abs(kurtosis) vs. negentropy) GUI options\n print('ncomp=%d, alg=%r, fun=%r, maxiter=%d, tol=%g'\n % (ncomp, alg, fun, maxiter, tol))\n fastica = FastICA(n_components=ncomp, algorithm=alg,\n whiten=True, fun=fun, fun_args=None,\n max_iter=maxiter, tol=tol, w_init=None,\n random_state=None)\n X = fastica.fit_transform(data) # do both the fit and the transform\n #pm = fastica.components_\n print('fastica niters: %d' % (fastica.n_iter_))\n else:\n raise ValueError('Invalid ICALIB %r' % ICALIB)\n if X.shape[1] < 3:\n raise RuntimeError('Need at least 3 columns')\n\n # Sort ICs by decreasing kurtosis or negentropy. For kurtosis, see Scholz2004 (or\n # rather, opposite to their approach, which picked ICs with most negative\n # kurtosis). For methods of estimating negentropy, see Hyvarinen1997.\n\n '''\n # sort by abs(kurtosis) of each IC (column)\n k = scipy.stats.kurtosis(X, axis=0)\n ki = abs(k).argsort()[::-1] # decreasing order of abs(kurtosis)\n print('Sort by abs(kurtosis):')\n print(k[ki])\n X = X[:, ki] # sort the ICs\n '''\n # sort by negentropy of each IC (column), this seems to work better than kurtosis\n # at separating clusters of similar size:\n ne = core.negentropy(X, axis=0)\n assert (ne > 0).all()\n nei = ne.argsort()[::-1] # decreasing order of negentropy\n print('Sort by negentropy:')\n print(ne[nei])\n X = X[:, nei] # sort the ICs\n '''\n import pylab as pl\n pl.figure()\n pl.imshow(pm)\n pl.colorbar()\n pl.title('original projmatrix')\n pl.figure()\n pl.imshow(pm[:, ki])\n pl.colorbar()\n pl.title('decreasing abs(kurtosis) projmatrix')\n pl.figure()\n pl.imshow(pm[:, nei])\n pl.colorbar()\n pl.title('decreasing negentropy projmatrix')\n '''\n else:\n raise ValueError('Unknown kind %r' % kind)\n print('Output shape for %s: %r' % (kind, X.shape))\n self.X[Xhash] = X # cache for fast future retrieval\n print('%s took %.3f sec' % (kind, time.time()-t0))\n unids = list(np.unique(spikes['nid'][sids])) # set of all nids that sids span\n for nid in unids:\n # don't update pos of junk cluster, if any, since it might not have any chans\n # common to all its spikes, and therefore can't have PCA/ICA done on it\n if nid != 0:\n self.clusters[nid].update_comppos(X, sids)\n return X\n\n def get_rms_error(self, sids, tis=None, chans=None):\n \"\"\"Calculate RMS error of spike waveforms (all from the same cluster) relative to\n their cluster's mean waveform. Consider only selected tis and chans\"\"\"\n spikes = self.spikes\n nids = np.unique(spikes['nid'][sids])\n nid = nids[0]\n if len(nids) > 1 or nid == 0:\n raise RuntimeError(\"Spikes must all belong to the same (non-junk) cluster for \"\n \"RMS error calculation\")\n nt = self.wavedata.shape[2]\n if tis is None: # use full waveform\n tis = np.asarray([0, nt])\n #print('tis: %r' % (tis,))\n ti0, ti1 = tis\n assert ti0 < ti1 <= nt\n nt = ti1 - ti0\n chans, chanslist = self.get_common_chans(sids, chans)\n nchans = len(chans)\n nspikes = len(sids)\n if nchans == 0:\n raise RuntimeError(\"Spikes have no common chans for RMS error\")\n\n # collect data between tis from chans from all spikes:\n print('Getting RMS error on tis=%r, chans=%r of %d spikes' %\n (list(tis), list(chans), nspikes))\n data = np.zeros((nspikes, nchans, nt), dtype=np.float64)\n for sii, sid in enumerate(sids):\n spikechans = chanslist[sii]\n spikechanis = spikechans.searchsorted(chans)\n data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]\n\n # get cluster mean waveform between tis on chans:\n wave = self.neurons[nid].get_wave()\n chanis = wave.chans.searchsorted(chans)\n meandata = np.float64(wave.data[chanis, ti0:ti1])\n\n # calculate RMS error between each spike and the cluster mean waveform:\n se = (data - meandata) ** 2 # squared error\n # take mean across timepoints and chans, but not across spikes:\n mse = se.mean(axis=2).mean(axis=1) # mean squared error\n return np.sqrt(mse)\n\n def get_common_chans(self, sids, chans=None):\n \"\"\"Find channels common to all sids, and optionally to chans as well. Also,\n return chanslist, ie list of arrays of chans of sids\"\"\"\n spikes = self.spikes\n chanss = spikes['chans'][sids]\n nchanss = spikes['nchans'][sids]\n #t0 = time.time()\n chanslist = [ cs[:ncs] for cs, ncs in zip(chanss, nchanss) ] # list of arrays\n #print('Building chanslist took %.3f sec' % (time.time()-t0))\n commonchans = util.intersect1d_uint8(chanslist) # find intersection\n if chans is not None and len(chans) > 0:\n # values in chans but not in commonchans:\n diffchans = np.setdiff1d(chans, commonchans)\n commonchans = np.intersect1d(chans, commonchans) # values in both\n if len(diffchans) > 0:\n print('WARNING: ignored chans %r not common to all spikes' % list(diffchans))\n return commonchans, chanslist\n\n def get_Xhash(self, kind, sids, tis, chans, npcsperchan, norm):\n \"\"\"Return MD5 hex digest of args, for uniquely identifying the matrix resulting\n from dimension reduction of spike data\"\"\"\n h = hashlib.md5()\n h.update(kind.encode())\n h.update(sids)\n h.update(tis)\n h.update(chans)\n if kind == 'ICA': # consider npcsperchan only if doing ICA\n h.update(str(npcsperchan).encode())\n h.update(str(norm).encode())\n return h.hexdigest()\n\n def create_neuron(self, id=None, inserti=None):\n \"\"\"Create and return a new Neuron with a unique ID\"\"\"\n if id == None:\n id = self.nextnid\n if id in self.neurons:\n raise RuntimeError('Neuron %d already exists' % id)\n id = int(id) # get rid of numpy ints\n neuron = Neuron(self, id)\n # add neuron to self\n self.neurons[neuron.id] = neuron\n if inserti == None:\n self.norder.append(neuron.id)\n else:\n self.norder.insert(inserti, neuron.id)\n return neuron\n\n def remove_neuron(self, id):\n try:\n del self.neurons[id] # may already be removed due to recursive call\n del self.clusters[id]\n self.norder.remove(id)\n except (KeyError, ValueError):\n pass\n\n def shift(self, sids, nt):\n \"\"\"Shift sid waveforms by nt timepoints: -ve shifts waveforms left, +ve shifts right.\n For speed, pad waveforms with edge values at the appropriate end\"\"\"\n spikes = self.spikes\n wd = self.wavedata\n for sid in sids: # maybe there's a more efficient way than iterating over sids\n core.shiftpad(wd[sid], nt) # modifies wd in-place\n # update spike parameters:\n dt = intround(nt * self.tres) # amount of time to shift by, signed, in us\n # so we can later reload the wavedata accurately, shifting the waveform right and\n # padding it on its left requires decrementing the associated timepoints\n # (and vice versa)\n spikes['t'][sids] -= dt\n spikes['t0'][sids] -= dt\n spikes['t1'][sids] -= dt\n # might result in some out of bounds tis because the original peaks\n # have shifted off the ends. Opposite sign wrt timepoints above, referencing within\n # wavedata:\n spikes['tis'][sids] = spikes['tis'][sids] + nt\n # this in-place operation raises a TypeError in numpy 1.11.2, something related to\n # subtracting an int from an unsigned int:\n #spikes['tis'][sid] += nt\n # caller should treat all sids as dirty\n '''\n # replaced by util.alignbest_cy():\n def alignbest(self, sids, tis, chans):\n \"\"\"Align all sids between tis on chans by best fit according to mean squared error.\n chans are assumed to be a subset of channels of sids. Return sids\n that were actually moved and therefore need to be marked as dirty\"\"\"\n spikes = self.spikes\n nspikes = len(sids)\n nchans = len(chans)\n wd = self.wavedata\n nt = wd.shape[2] # num timepoints in each waveform\n ti0, ti1 = tis\n subnt = ti1 - ti0 # num timepoints to slice from each waveform\n # TODO: make maxshift a f'n of interpolation factor\n maxshift = 2 # shift +/- this many timepoints\n subntdiv2 = subnt // 2\n #print('subntdiv2 on either side of t=0: %d' % subntdiv2)\n if subntdiv2 < maxshift:\n raise ValueError(\"Selected waveform duration too short\")\n #maxshiftus = maxshift * self.stream.tres\n # NOTE: in this case, it may be faster to keep shifts and sti0s and sti1s as lists\n # of ints instead of np int arrays, maybe because their values are faster to iterate\n # over or index with in python loops and lists:\n shifts = range(-maxshift, maxshift+1) # from -maxshift to maxshift, inclusive\n nshifts = len(shifts)\n sti0s = [ ti0+shifti for shifti in range(nshifts) ] # shifted ti0 values\n sti1s = [ ti1+shifti for shifti in range(nshifts) ] # shifted ti1 values\n sti0ssti1s = zip(sti0s, sti1s)\n print(\"Padding waveforms with up to +/- %d points of fake data\" % maxshift)\n\n # not worth subsampling here while calculating meandata, since all this\n # stuff in this loop is needed in the shift loop below\n subsd = np.zeros((nspikes, nchans, subnt), dtype=wd.dtype) # subset of spike data\n spikechanis = np.zeros((nspikes, nchans), dtype=np.int64)\n t0 = time.time()\n for sidi, sid in enumerate(sids):\n spike = spikes[sid]\n nspikechans = spike['nchans']\n spikechans = spike['chans'][:nspikechans]\n spikechanis[sidi] = spikechans.searchsorted(chans)\n subsd[sidi] = wd[sid, spikechanis[sidi], ti0:ti1]\n print('Mean prep loop for best shift took %.3f sec' % (time.time()-t0))\n t0 = time.time()\n meandata = subsd.mean(axis=0) # float64\n print('Mean for best shift took %.3f sec' % (time.time()-t0))\n\n # choose best shifted waveform for each spike\n # widesd holds current spike data plus padding on either side\n # to allow for full width slicing for all time shifts:\n maxnchans = spikes['nchans'].max() # of all spikes in sort\n widesd = np.zeros((maxnchans, maxshift+nt+maxshift), dtype=wd.dtype) \n shiftedsubsd = subsd.copy() # init\n tempsubshifts = np.zeros((nshifts, nchans, subnt), dtype=wd.dtype)\n dirtysids = []\n t0 = time.time()\n for sidi, sid in enumerate(sids):\n # for speed, instead of adding real data, pad start and end with fake values\n chanis = spikechanis[sidi]\n sd = wd[sid] # sid's spike data\n widesd[:, maxshift:-maxshift] = sd # 2D\n widesd[:, :maxshift] = sd[:, 0, None] # pad start with first point per chan\n widesd[:, -maxshift:] = sd[:, -1, None] # pad end with last point per chan\n wideshortsd = widesd[chanis] # sid's padded spike data on chanis, 2D\n\n # keep this inner loop as fast as possible:\n for shifti, (sti0, sti1) in enumerate(sti0ssti1s):\n tempsubshifts[shifti] = wideshortsd[:, sti0:sti1] # len: subnt\n \n errors = tempsubshifts - meandata # (nshifts, nchans, subnt) - (nchans, subnt)\n # get sum squared errors by taking sum across highest two dims - for purpose\n # of error comparison, don't need to take mean or square root. Also, order\n # of summation along axes doesn't matter, as long as it's done on the highest two:\n sserrors = (errors**2).sum(axis=2).sum(axis=1) # nshifts long\n bestshifti = sserrors.argmin()\n bestshift = shifts[bestshifti]\n if bestshift != 0: # no need to update sort.wavedata[sid] if there's no shift\n # update time values:\n dt = bestshift * self.tres # time to shift by, signed, in us\n spikes['t'][sid] += dt # should remain halfway between t0 and t1\n spikes['t0'][sid] += dt\n spikes['t1'][sid] += dt\n # might result in some out of bounds tis because the original peaks\n # have shifted off the ends. Opposite sign, referencing within wavedata:\n spikes['tis'][sid] -= bestshift\n # update sort.wavedata\n wd[sid] = widesd[:, bestshifti:bestshifti+nt]\n shiftedsubsd[sidi] = tempsubshifts[bestshifti]\n dirtysids.append(sid) # mark sid as dirty\n print('Shifting loop took %.3f sec' % (time.time()-t0))\n AD2uV = self.converter.AD2uV\n stdevbefore = AD2uV(subsd.std(axis=0).mean())\n stdevafter = AD2uV(shiftedsubsd.std(axis=0).mean())\n print('stdev went from %.3f to %.3f uV' % (stdevbefore, stdevafter))\n return dirtysids\n '''\n def alignminmax(self, sids, to):\n \"\"\"Align sids by their min or max. Return those that were actually moved\n and therefore need to be marked as dirty\"\"\"\n if not self.stream.is_open():\n raise RuntimeError(\"No open stream to reload spikes from\")\n spikes = self.spikes\n V0s = spikes['V0'][sids]\n V1s = spikes['V1'][sids]\n Vss = np.column_stack((V0s, V1s))\n alignis = spikes['aligni'][sids]\n b = np.column_stack((alignis==0, alignis==1)) # 2D boolean array\n if to == 'min':\n i = Vss[b] > 0 # indices into sids of spikes aligned to the max peak\n elif to == 'max':\n i = Vss[b] < 0 # indices into sids of spikes aligned to the min peak\n else:\n raise ValueError('Unknown to %r' % to)\n sids = sids[i] # sids that need realigning\n nspikes = len(sids)\n print(\"Realigning %d spikes\" % nspikes)\n if nspikes == 0: # nothing to do\n return [] # no sids to mark as dirty\n\n multichantis = spikes['tis'][sids] # nspikes x nchans x 2 arr\n chanis = spikes['chani'][sids] # nspikes arr of max chanis\n # peak tis on max chan of each spike, convert from uint8 to int32 for safe math\n tis = np.int32(multichantis[np.arange(nspikes), chanis]) # nspikes x 2 arr\n # NOTE: tis aren't always in temporal order!\n dpeaktis = tis[:, 1] - tis[:, 0] # could be +ve or -ve\n dpeaks = spikes['dt'][sids] # stored as +ve\n\n # for each spike, decide whether to add or subtract dpeak to/from its temporal values\n ordered = dpeaktis > 0 # in temporal order\n reversed = dpeaktis < 0 # in reversed temporal order\n alignis = spikes['aligni'][sids]\n alignis0 = alignis == 0\n alignis1 = alignis == 1\n dpeaki = np.zeros(nspikes, dtype=int)\n # add dpeak to temporal values to align to later peak\n dpeaki[ordered & alignis0 | reversed & alignis1] = 1\n # subtact dpeak from temporal values to align to earlier peak\n dpeaki[ordered & alignis1 | reversed & alignis0] = -1\n\n # upcast aligni from 1 byte to an int before doing arithmetic on it:\n #dalignis = -np.int32(alignis)*2 + 1\n dts = dpeaki * dpeaks\n dtis = -dpeaki * abs(dpeaktis)\n # shift values\n spikes['t'][sids] += dts\n spikes['t0'][sids] += dts\n spikes['t1'][sids] += dts\n spikes['tis'][sids] = spikes['tis'][sids] + dtis[:, None, None] # update wrt new t0i\n spikes['aligni'][sids[alignis0]] = 1\n spikes['aligni'][sids[alignis1]] = 0\n\n # update wavedata for each shifted spike\n self.reload_spikes(sids)\n return sids # mark all sids as dirty\n\n def choose_new_meanchans(self, sids):\n \"\"\"Get mean waveform of all sids, then find the mean's chan with max Vpp, then\n choose det.maxnchansperspike channels around that maxchan.\n Return meanchans, furthestchan, and furthestchani\"\"\"\n print('Choosing new channel set for all selected spikes')\n det = self.detector\n meanwave = self.get_mean_wave(sids)\n # mean chan with max Vpp:\n maxchan = meanwave.chans[meanwave.data.ptp(axis=1).argmax()]\n maxchani = det.chans.searchsorted(maxchan)\n distances = det.dm.data[maxchani]\n # keep the maxnchansperspike closest chans to maxchan, including maxchan:\n chanis = distances.argsort()[:det.maxnchansperspike]\n meanchans = det.chans[chanis]\n meanchans.sort() # keep them sorted\n print('meanchans: %r' % list(meanchans))\n furthestchan = det.chans[chanis[-1]]\n print('furthestchan: %d' % furthestchan)\n furthestchani = meanchans.searchsorted(furthestchan)\n # sanity checks:\n assert len(meanchans) == det.maxnchansperspike\n assert maxchan in meanchans\n return meanchans, furthestchan, furthestchani\n\n def reload_spikes(self, sids, usemeanchans=False):\n \"\"\"Update wavedata of designated spikes from stream. Optionally fix incorrect\n time values from .sort 0.3 files. Optionally choose new set of channels for all\n sids based on the chans closest to the mean of the sids. It's the caller's\n responsibility to mark sids as dirty and trigger resaving of .wave file\"\"\"\n\n ## TODO: add findmaxchan=False and recenteronmaxchan=False kwargs\n\n nsids = len(sids)\n print('(Re)loading %d spikes' % nsids)\n stream = self.stream\n if not stream.is_open():\n raise RuntimeError(\"No open stream to reload spikes from\")\n spikes = self.spikes\n det = self.detector\n ver_lte_03 = float(self.__version__) <= 0.3\n if ver_lte_03:\n print('Fixing potentially incorrect time values during spike reloading')\n nfixed = 0\n treload = time.time()\n if usemeanchans:\n if ver_lte_03:\n raise RuntimeError(\"Best not to choose new chans from mean until after \"\n \"converting to .sort >= 0.4\")\n meanchans, furthestchan, furthestchani = self.choose_new_meanchans(sids)\n nmeanchans = len(meanchans)\n\n # split up sids into groups efficient for loading from stream:\n ts = spikes[sids]['t'] # noncontig, not a copy\n # ensure they're in temporal order:\n if not (np.diff(ts) >= 0).all():\n print(\"Selected sids aren't in temporal order, sorting by time...\")\n tsis = ts.argsort()\n sids = sids[tsis]\n print(\"Done sorting sids by time\")\n # break up spikes by ISIs >= MAXGROUPISI:\n splitis = np.where(np.diff(ts) >= MAXGROUPISI)[0] + 1\n groups = np.split(sids, splitis)\n # limit each group of sids to no more than MAXGROUPDT:\n groupi = 0\n while groupi < len(groups):\n group = groups[groupi] # group of sids all with ISIs < MAXGROUPISI\n ## TODO: not a copy: is this the optimal way to get the times in this case?\n relts = spikes[group]['t'] - spikes[group[0]]['t']\n splitis = np.where(np.diff(relts // MAXGROUPDT) > 0)[0] + 1\n nsubgroups = len(splitis) + 1\n if nsubgroups > 1:\n # del original group, replace with subgroups\n del groups[groupi]\n subgroups = np.split(group, splitis)\n groups[groupi:groupi] = subgroups\n groupi += len(subgroups)\n else:\n groupi += 1\n print('ngroups: %d' % len(groups))\n\n # process each group:\n sidi = 0 # init sid index across all groups, used as status counter\n for groupi, group in enumerate(groups):\n printflush('<%d>' % groupi, end='')\n assert len(group) > 0 # otherwise something went wrong above\n t0 = spikes[group[0]]['t0']\n t1 = spikes[group[-1]]['t1']\n if ver_lte_03:\n # load a little extra, in case we need to reload misaligned first and/or\n # last spike in this group\n t0 -= 5000 # -5 ms\n t1 += 5000 # +5 ms\n \"\"\"\n Find union of chans of sids in this group, ask Stream for only those such that no\n unnecessary resampling takes place on unneeded chans. Note that this doesn't make\n a difference when CAR is enabled in the stream, because the full set of enabled\n chans have to be maintained in Stream.__call__ until the very end. Don't bother\n cutting out the correct nchans for each sid. At worst, chan 0 (the \"empty\" chans\n array value) will be unnecessarily added to unionchans, and we'll retrieve one\n extra chan when creating tempwave, which will then later be discarded:\n \"\"\"\n unionchans = np.unique(spikes['chans'][group])\n if usemeanchans:\n # now that we have the original unionchans of this group,\n # update this group's spikes array entries with meanchans:\n spikes['nchans'][group] = nmeanchans\n # we're using the max num chans, so assign the full array:\n spikes['chans'][group] = meanchans\n # now update unionchans as well:\n unionchans = np.unique(np.hstack((unionchans, meanchans)))\n if 0 not in stream.chans: # if chan 0 is disabled in stream\n # remove 0 from unionchans, otherwise an error would be raised when\n # calling stream()\n unionchans = unionchans[unionchans != 0]\n # load and resample only what's needed for this group:\n tempwave = stream(t0, t1, unionchans)\n # slice out each spike's reloaded data from tempwave:\n for sid in group:\n # print status:\n if sidi % 10000 == 0:\n printflush(sidi, end='')\n elif sidi % 1000 == 0:\n printflush('.', end='')\n if usemeanchans: # already checked above that ver_lte_03 == False\n # this spike's chans have been set to meanchans, now\n # check that each spike's maxchan is in meanchans:\n chan = spikes[sid]['chan']\n if chan not in meanchans:\n # replace furthest chan with spike's maxchan:\n print(\"spike %d: replacing furthestchan %d with spike's maxchan %d\"\n % (sid, furthestchan, chan))\n nchans = spikes[sid]['nchans']\n chans = spikes[sid]['chans'][:nchans]\n # replace furthest chan with max chan, modifies spikes array in-place:\n chans[furthestchani] = chan\n # make sure chans remain sorted:\n chans.sort()\n # this isn't necessary, because all the above was in-place:\n #spikes['chans'][sid][:nchans] = chans\n spike = spikes[sid]\n nchans = spike['nchans']\n chans = spike['chans'][:nchans]\n rd = tempwave[spike['t0']:spike['t1']][chans].data # reloaded data\n if ver_lte_03: # fix potentially incorrect spike tis\n result = self.reload_spike_ver_lte_03(sid, nchans, tempwave, rd)\n if result == None:\n sidi += 1 # inc status counter\n continue # rollwin2D won't work, skip to next sid\n else:\n rd, fixed = result\n if fixed:\n nfixed += 1\n nt = rd.shape[1]\n self.wavedata[sid, :nchans, :nt] = rd # update wavedata\n sidi += 1 # inc status counter\n print()\n\n if ver_lte_03:\n print('Fixed time values of %d spikes' % nfixed)\n print('(Re)loaded %d spikes, took %.3f sec' % (len(sids), time.time()-treload))\n\n def reload_spike_ver_lte_03(self, sid, nchans, tempwave, rd):\n \"\"\"In sort.__version__ <= 0.3, t, t0, t1, and tis were not updated\n during alignbest() calls. To fix this, load new data with old potentially\n incorrect t0 and t1 values, and compare this new data to existing old data\n in wavedata array. Find where the non-repeating parts of the old data fits\n into the new, and calculate the correction needed to fix the time values.\n Finally, reload new data according to these corrected time values.\"\"\"\n #print('Reloading sid from ver_lte_03: %d' % sid)\n od = self.wavedata[sid, :nchans] # old data\n # indices that strip const values from left and right ends:\n lefti, righti = lrrep2Darrstripis(od)\n od = od[:, lefti:righti] # stripped old data\n # reloaded data rd uses old incorrect t0 and t1, but they should be\n # wide enough to encompass the non-repeating parts of the old data\n width = od.shape[1] # rolling window width\n if not width <= rd.shape[1]:\n print('') # newline\n print(\"WARNING: od.shape[1]=%d > rd.shape[1]=%d for sid %d\" %\n (od.shape[1], rd.shape[1], sid))\n #import pdb; pdb.set_trace()\n return\n odinndis = np.where((rollwin2D(rd, width) == od).all(axis=1).all(axis=1))[0]\n if len(odinndis) == 0: # no hits of old data in new\n dnt = 0 # reload data based on current timepoints\n elif len(odinndis) == 1: # exactly 1 hit of old data in new\n odinndi = odinndis[0] # pull it out\n dnt = odinndi - lefti # num timepoints to correct by, signed\n else:\n raise RuntimeError(\"Multiple hits of old data in new, don't know \"\n \"how to reload spike %d\" % sid)\n newrd, fixed = rd, False\n if dnt != 0:\n dt = intround(dnt * self.tres) # time to correct by, signed, in us\n spikes['t'][sid] += dt # should remain halfway between t0 and t1\n spikes['t0'][sid] += dt\n spikes['t1'][sid] += dt\n # might result in some out of bounds tis because the original peaks\n # have shifted off the ends. Use opposite sign because we're\n # referencing within wavedata:\n # in versions <= 0.3, 'tis' were named 'phasetis':\n spikes['phasetis'][sid] = spikes['phasetis'][sid] - dnt\n spike = spikes[sid]\n # reslice tempwave again now that t0 and t1 have changed\n newrd = tempwave[spike['t0']:spike['t1']][chans].data\n fixed = True\n #printflush('F', end='')\n return newrd, fixed\n\n def reload_spikes_and_templates(self, sids, usemeanchans=False):\n self.reload_spikes(sids, usemeanchans=usemeanchans)\n # update neuron templates:\n unids = np.unique(self.spikes['nid'][sids])\n unids = unids[unids != 0] # exclude junk cluster, which doesn't have a neuron\n neurons = [ self.neurons[nid] for nid in unids ]\n for neuron in neurons:\n neuron.update_wave() # update affected mean waveforms\n\n def init_spike_alignment(self):\n \"\"\"Set initial spike alignment points according to alignment points of each\n spike's neuron\"\"\"\n print('Setting initial spike alignment points')\n ntis, nalignis = {}, {} # tis and aligni derived from each neuron's mean waveform\n for neuron in self.neurons.values():\n nwave = neuron.get_wave() # update and return mean waveform\n mintis = nwave.data.argmin(axis=1)\n maxtis = nwave.data.argmax(axis=1)\n ntis[neuron.id] = np.column_stack([mintis, maxtis])\n # choose aligni with least variance:\n nalignis[neuron.id] = np.argmin([mintis.std(), maxtis.std()])\n AD2uV = self.converter.AD2uV\n for s, wd in zip(self.spikes, self.wavedata):\n sid = s['id']\n # print out progress on a regular basis:\n if sid % 100000 == 0:\n printflush(sid, end='')\n elif sid % 10000 == 0:\n printflush('.', end='')\n nid = s['nid']\n #chan = s['chan']\n nchans = s['nchans']\n chans = s['chans'][:nchans]\n neuronchans = self.neurons[nid].wave.chans\n assert (chans == neuronchans).all()\n s['tis'][:nchans] = ntis[nid] # set according to its neuron, wrt t0i=0\n s['aligni'] = nalignis[nid] # set according to its neuron\n maxchani = s['chani']\n t0i, t1i = int(s['tis'][maxchani, 0]), int(s['tis'][maxchani, 1])\n s['dt'] = abs(t1i - t0i) / self.sampfreq * 1e6 # us\n # note that V0 and V1 might not be of opposite sign, because tis are derived\n # from mean neuron waveform, not from each individual spike:\n s['V0'], s['V1'] = AD2uV(wd[maxchani, t0i]), wd[maxchani, t1i] # uV\n s['Vpp'] = abs(s['V1'] - s['V0']) # uV\n print()\n\n def spatially_localize_spikes(self, sortwin, method='fit'):\n \"\"\"Assuming that wavedata have been extracted and neuron mean waveforms calculated,\n find tis and perform spatial localization of every spike in self\"\"\"\n det = self.detector\n weights2f = self.extractor.weights2spatial\n weights2spatialmean = self.extractor.weights2spatialmean\n f = self.extractor.f\n nreject = 0 # number spikes rejected during spatial localization\n print('Running spatial localization on all %d spikes' % self.nspikes)\n tstart = time.clock()\n\n ## TODO: chan this be multithreaded/processed?\n\n for s, wd in zip(self.spikes, self.wavedata):\n # Get Vpp at each inclchan's tis, use as spatial weights:\n # see core.rowtake() or util.rowtake_cy() for indexing explanation:\n sid = s['id']\n # print out progress on a regular basis:\n if sid % 10000 == 0:\n printflush(sid, end='')\n elif sid % 1000 == 0:\n printflush('.', end='')\n chan = s['chan']\n nchans = s['nchans']\n chans = s['chans'][:nchans]\n maxchani = s['chani']\n chanis = det.chans.searchsorted(chans)\n w = np.float32(wd[np.arange(s['nchans'])[:, None], s['tis'][:nchans]]) # nchans x 2\n w = abs(w).sum(axis=1) # Vpp for each chan, measured at t0i and t1i\n x = det.siteloc[chanis, 0] # 1D array (row)\n y = det.siteloc[chanis, 1]\n if method == 'fit':\n # localize by fitting extractor.f function to wavedata\n params = weights2f(f, w, x, y, maxchani)\n elif method == 'mean':\n # set localization to Vpp-weighted spatial mean and 0 sigma:\n x0, y0 = weights2spatialmean(w, x, y)\n # a very ad-hoc guess for spatial sigma:\n sx = 2 * dist((x0, y0), self.probe.SiteLoc[chan])\n params = x0, y0, sx, sx\n else:\n print('Unknown method %r' % method)\n if params == None: # presumably a non-localizable many-channel noise event\n #printflush('X', end='') # to indicate a rejected spike\n if DEBUG:\n spiket = intround(s['t']) # nearest us\n det.log(\"Reject spike %d at t=%d based on fit params\" % (sid, spiket))\n neuron = self.neurons[s['nid']]\n # remove from its neuron, add to unsorted list of spikes:\n sortwin.MoveSpikes2List(neuron, [sid], update=False)\n # manually set localization params to Vpp-weighted spatial mean and 0 sigma:\n x0, y0 = weights2spatialmean(w, x, y)\n # set sigma to 0 um, and then later round lockr up to 1 um so that only one\n # raster tick shows up for each rejected spike, reducing clutter\n params = x0, y0, 0, 0\n nreject += 1\n # Save spatial fit params, and \"lockout\" only the channels within lockrx*sx\n # of the fit spatial location of the spike, up to a max of inclr. \"Lockout\"\n # in this case only refers to which channels are highlighted with a raster tick\n # for each spike:\n s['x0'], s['y0'], s['sx'], s['sy'] = params\n x0, y0 = s['x0'], s['y0']\n # lockout radius for this spike:\n lockr = min(det.lockrx*s['sx'], det.inclr) # in um\n lockr = max(lockr, 1) # at least 1 um, so at least the maxchan gets a tick\n # test y coords of chans in y array, ylockchaniis can be used to index\n # into x, y and chans:\n ylockchaniis, = np.where(np.abs(y - y0) <= lockr) # convert bool arr to int\n # test Euclid distance from x0, y0 for each ylockchani:\n lockchaniis = ylockchaniis.copy()\n for ylockchanii in ylockchaniis:\n if dist((x[ylockchanii], y[ylockchanii]), (x0, y0)) > lockr:\n # Euclidean distance is too great, remove ylockchanii from lockchaniis:\n lockchaniis = lockchaniis[lockchaniis != ylockchanii]\n lockchans = chans[lockchaniis]\n nlockchans = len(lockchans)\n s['lockchans'][:nlockchans], s['nlockchans'] = lockchans, nlockchans\n print('Spatial localization of spikes took %.3f s' % (time.clock() - tstart))\n\n return nreject\n\n '''\n def get_component_matrix(self, dims=None, weighting=None):\n \"\"\"Convert spike param matrix into pca/ica data for clustering\"\"\"\n\n import mdp # can't delay this any longer\n X = self.get_param_matrix(dims=dims)\n if weighting == None:\n return X\n if weighting.lower() == 'ica':\n node = mdp.nodes.FastICANode()\n elif weighting.lower() == 'pca':\n node = mdp.nodes.PCANode()\n else:\n raise ValueError, 'unknown weighting %r' % weighting\n node.train(X)\n features = node.execute(X) # returns all available components\n #self.node = node\n #self.weighting = weighting\n #self.features = features\n return features\n\n def get_ids(self, cids, spikes):\n \"\"\"Convert a list of cluster ids into 2 dicts: n2sids maps neuron IDs to\n spike IDs; s2nids maps spike IDs to neuron IDs\"\"\"\n cids = np.asarray(cids)\n cids = cids - cids.min() # make sure cluster IDs are 0-based\n uniquecids = set(cids)\n nclusters = len(uniquecids)\n # neuron ID to spike IDs (plural) mapping\n n2sids = dict(zip(uniquecids, [ [] for i in range(nclusters) ]))\n s2nids = {} # spike ID to neuron ID mapping\n for spike, nid in zip(spikes, cids):\n s2nids[spike['id']] = nid\n n2sids[nid].append(spike['id'])\n return n2sids, s2nids\n\n def write_spc_input(self):\n \"\"\"Generate input data file to SPC\"\"\"\n X = self.get_component_matrix()\n # write to space-delimited .dat file. Each row is a spike, each column a param\n spykedir = os.path.dirname(__file__)\n dt = str(datetime.datetime.now())\n dt = dt.split('.')[0] # ditch the us\n dt = dt.replace(' ', '_')\n dt = dt.replace(':', '.')\n self.spcdatfname = os.path.join(spykedir, 'spc', dt+'.dat')\n # not sure why spc adds the dg_01 part:\n self.spclabfname = os.path.join(spykedir, 'spc', dt+'.dg_01.lab')\n f = open(self.spcdatfname, 'w')\n for params in X: # write text data to file, one row at a time\n params.tofile(f, sep=' ', format='%.6f')\n f.write('\\n')\n f.close()\n\n def parse_spc_lab_file(self, fname=None):\n \"\"\"Parse output .lab file from SPC. Each row in the file is the assignment of each\n spin (datapoint) to a cluster, one row per temperature datapoint. First column is\n temperature run number (0-based). 2nd column is the temperature. All remaining\n columns correspond to the datapoints in the order presented in the input .dat file.\n Returns (Ts, cids)\"\"\"\n #spikes = self.get_spikes_sortedby('id')\n if fname == None:\n defaultDir = r\"C:\\Documents and Settings\\Administrator\\Desktop\\Charlie\\From\"\n dlg = wx.FileDialog(None, message=\"Open SPC .lab file\",\n defaultDir=defaultDir, defaultFile='',\n wildcard=\"All files (*.*)|*.*|.lab files (*.lab)|*.lab|\",\n style=wx.OPEN)\n if dlg.ShowModal() == wx.ID_OK:\n fname = dlg.GetPath()\n dlg.Destroy()\n data = np.loadtxt(fname, dtype=np.float32)\n Ts = data[:, 1] # 2nd column\n cids = np.int32(data[:, 2:]) # 3rd column on\n print('Parsed %r' % fname)\n return Ts, cids\n\n def parse_charlies_output(self, fname=None):\n if fname == None:\n fname = (r'C:\\Documents and Settings\\Administrator\\Desktop\\Charlie\\'\n 'From\\2009-07-20\\clustered_events_coiflet_T0.125.txt')\n nids = np.loadtxt(fname, dtype=int) # one neuron id per spike\n return nids\n\n def write_spc_app_input(self):\n \"\"\"Generate input data file to spc_app\"\"\"\n spikes = self.get_spikes_sortedby('id')\n X = self.get_component_matrix()\n # write to tab-delimited data file. Each row is a param, each column a spike\n # (this is the transpose of X)\n # first row has labels \"AFFX\", \"NAME\", and then spike ids\n # first col has labels \"AFFX\", and then param names\n f = open(r'C:\\home\\mspacek\\Desktop\\Work\\SPC\\Weizmann\\spc_app\\spc_app_input.txt', 'w')\n f.write('AFFX\\tNAME\\t')\n for spike in spikes:\n f.write('s%d\\t' % spike['id'])\n f.write('\\n')\n for parami, param in enumerate(['Vpp', 'dt', 'x0', 'y0', 'sx', 'sy', 'theta']):\n f.write(param+'\\t'+param+'\\t')\n for val in X[:, parami]:\n f.write('%f\\t' % val)\n f.write('\\n')\n f.close()\n\n def hcluster(self, t=1.0):\n \"\"\"Hierarchically cluster self.spikes\n\n TODO: consider doing multiple cluster runs. First, cluster by spatial location (x0,\n y0). Then split those clusters up by Vpp. Then those by spatial distrib (sy/sx,\n theta), then by temporal distrib (dt, s1, s2). This will ensure that the lousier\n params will only be considered after the best ones already have, and therefore that\n you start off with pretty good clusters that are then only slightly refined using\n the lousy params\n \"\"\"\n spikes = self.get_spikes_sortedby('id')\n X = self.get_component_matrix()\n print(X)\n # try 'weighted' or 'average' with 'mahalanobis'\n cids = fclusterdata(X, t=t, method='single', metric='euclidean')\n n2sids, s2nids = self.get_ids(cids, spikes)\n return n2sids\n\n def export2Charlie(self, fname='spike_data', onlymaxchan=False, nchans=3, npoints=32):\n \"\"\"Export spike data to a text file, one spike per row.\n Columns are x0, y0, followed by most prominent npoints datapoints\n (1/4, 3/4 wrt spike time) of each nearest nchans. This is to\n give to Charlie to do WPD and SPC on\"\"\"\n if onlymaxchan:\n nchans = 1\n assert np.log2(npoints) % 1 == 0, 'npoints is not a power of 2'\n # get ti - time index each spike is assumed to be centered on\n self.spikes[0].update_wave(self.stream) # make sure it has a wave\n ti = intround(self.spikes[0].wave.data.shape[-1] / 4) # 13 for 50 kHz, 6 for 25 kHz\n dims = self.nspikes, 2+nchans*npoints\n output = np.empty(dims, dtype=np.float32)\n dm = self.detector.dm\n chanis = np.arange(len(dm.data))\n coords = np.asarray(dm.coords)\n xcoords = coords[:, 0]\n ycoords = coords[:, 1]\n sids = list(self.spikes) # self.spikes is a dict!\n sids.sort()\n for sid in sids:\n spike = self.spikes[sid]\n chani = spike.chani # max chani\n x0, y0 = spike.x0, spike.y0\n if onlymaxchan:\n nearestchanis = np.asarray([chani])\n else:\n # find closest chans to x0, y0\n d2s = (xcoords - x0)**2 + (ycoords - y0)**2 # squared distances\n sortis = d2s.argsort()\n nearestchanis = chanis[sortis][0:nchans] # pick the first nchan nearest chans\n if chani not in nearestchanis:\n print(\"WARNING: max chani %d is not among the %d chanis nearest \"\n \"(x0, y0) = (%.1f, %.1f) for spike %d at t=%d\"\n % (chani, nchans, x0, y0, sid, spike.t))\n if spike.wave.data is None:\n spike.update_wave(self.stream)\n row = [x0, y0]\n for chani in nearestchanis:\n chan = dm.chans[chani] # dereference\n try:\n data = spike.wave[chan].data[0] # pull out singleton dimension\n except IndexError: # empty array\n data = np.zeros(data.shape[-1], data.dtype)\n row.extend(data[ti-npoints/4:ti+npoints*3/4])\n output[sid] = row\n dt = str(datetime.datetime.now())\n dt = dt.split('.')[0] # ditch the us\n dt = dt.replace(' ', '_')\n dt = dt.replace(':', '.')\n fname += '.' + dt + '.txt'\n np.savetxt(fname, output, fmt='%.1f', delimiter=' ')\n\n def match(self, templates=None, weighting='signal', sort=True):\n \"\"\"Match templates to all .spikes with nearby maxchans,\n save error values to respective templates.\n\n Note: slowest step by far is loading in the wave data from disk.\n (First match is slow, subsequent ones are ~ 15X faster.)\n Unless something's done about that in advance, don't bother optimizing here much.\n Right now, once waves are loaded, performance is roughly 20000 matches/sec\n\n TODO: Nick's alternative to gaussian distance weighting: have two templates: a mean\n template, and an stdev template, and weight the error between each matched\n spike and the mean on each chan at each timepoint by the corresponding stdev value\n (divide the error by the stdev, so that timepoints with low stdev are more sensitive\n to error)\n\n TODO: looks like I still need to make things more nonlinear - errors at high signal\n values aren't penalized enough, while errors at small signal values are penalized\n too much. Try cubing both signals, then taking sum(err**2)\n\n DONE: maybe even better, instead of doing an elaborate cubing of signal, followed by\n a rather elaborate gaussian spatiotemporal weighting of errors, just take difference\n of signals, and weight the error according to the abs(template_signal) at each point\n in time and across chans. That way, error in parts of the signal far from zero are\n considered more important than deviance of perhaps similar absolute value for signal\n close to zero\n\n \"\"\"\n # None defaults to matching all templates:\n templates = templates or self.templates.values()\n sys.stdout.write('matching')\n t0 = time.time()\n nspikes = len(self.spikes)\n dm = self.detector.dm\n for template in templates:\n template.err = [] # overwrite any existing .err attrib\n tw = template.tw\n templatewave = template.wave[template.chans] # pull out template's enabled chans\n #stdev = template.get_stdev()[template.chans] # pull out template's enabled chans\n # replace any 0s with 1s - TODO: what's best way to avoid singularities?:\n #stdev[stdev == 0] = 1\n # Gaussian weighting in space and/or time:\n weights = template.get_weights(weighting=weighting, sstdev=self.detector.slock/2,\n tstdev=self.detector.tlock/2)\n for spike in self.spikes.values():\n # check if spike.maxchan is outside some minimum distance from template.maxchan\n if dm[template.maxchan, spike.maxchan] > MAXCHANTOLERANCE: # um\n continue # don't even bother\n if spike.wave.data is None or template.tw != TW: # make sure their data line up\n spike.update_wave(tw) # this slows things down a lot, but is necessary\n # slice template's enabled chans out of spike, calculate sum of\n # squared weighted error\n # first impression is that dividing by stdev makes separation worse, not better\n # low stdev means more sensitive to error:\n #err = (templatewave.data - spike.wave[template.chans].data) / stdev * weights\n # pull out template's enabled chans from spike:\n spikewave = spike.wave[template.chans]\n if weighting == 'signal':\n tsdata = np.asarray([templatewave.data, spikewave.data])\n # take elementwise max of abs of template and spike data:\n weights = np.abs(tsdata).max(axis=0)\n err = (templatewave.data - spikewave.data) * weights # weighted error\n err = (err**2).sum(axis=None) # sum of squared weighted error\n template.err.append((spike.id, intround(err)))\n template.err = np.asarray(template.err, dtype=np.int64)\n if sort and len(template.err) != 0:\n i = template.err[:, 1].argsort() # row indices that sort by error\n template.err = template.err[i]\n sys.stdout.write('.')\n print('\\nmatch took %.3f sec' % (time.time()-t0))\n '''\n\nclass Neuron(object):\n \"\"\"A collection of spikes that have been deemed somehow, whether manually\n or automatically, to have come from the same cell. A Neuron's waveform\n is the mean of its member spikes\"\"\"\n def __init__(self, sort, id=None):\n self.sort = sort\n self.id = id # neuron id\n self.wave = WaveForm() # init to empty waveform\n self.sids = np.array([], dtype=int) # indices of spikes that make up this neuron\n # relative reference timestamp, here for symmetry with fellow spike rec\n # (obj.t comes up sometimes):\n self.t = 0\n self.plt = None # Plot currently holding self\n self.cluster = None\n self.good = False # user can mark this neuron as \"good\" if so desired\n #self.fname # not here, let's allow neurons to have spikes from different files?\n\n def get_chans(self):\n if self.wave.data is None:\n self.update_wave()\n return self.wave.chans # self.chans just refers to self.wave.chans\n\n chans = property(get_chans)\n\n def get_chan(self):\n if self.wave.data is None:\n self.update_wave()\n return self.wave.chans[self.wave.data.ptp(axis=1).argmax()] # chan with max Vpp\n\n chan = property(get_chan)\n\n def get_nspikes(self):\n return len(self.sids)\n\n nspikes = property(get_nspikes)\n\n def __getstate__(self):\n \"\"\"Get object state for pickling\"\"\"\n d = self.__dict__.copy()\n # don't save any calculated PCs/ICs:\n #d.pop('X', None)\n #d.pop('Xhash', None)\n # don't save plot self is assigned to, since that'll change anyway on unpickle\n d['plt'] = None\n return d\n\n def get_wave(self):\n \"\"\"Check for valid mean and std waveform before returning it\"\"\"\n # many neuron waveforms saved in old .sort files won't have a wave.std field:\n try:\n self.wave.std\n except AttributeError:\n return self.update_wave()\n if self.wave == None or self.wave.data is None or self.wave.std is None:\n return self.update_wave()\n else:\n return self.wave # return existing waveform\n\n def update_wave(self):\n \"\"\"Update mean and std of self's waveform\"\"\"\n sort = self.sort\n spikes = sort.spikes\n if len(self.sids) == 0: # no member spikes, perhaps I should be deleted?\n raise RuntimeError(\"n%d has no spikes and its waveform can't be updated\" % self.id)\n meanwave = sort.get_mean_wave(self.sids, nid=self.id)\n\n # update self's Waveform object\n self.wave.data = meanwave.data\n self.wave.std = meanwave.std\n self.wave.ts = sort.twts.copy() # meanwave has no .ts, copy for clean jsonpickle\n self.wave.chans = meanwave.chans\n self.wave.tres = sort.tres # meanwave has no .tres\n return self.wave\n\n def __sub__(self, other):\n \"\"\"Return difference array between self and other neurons' waveforms\n on common channels\"\"\"\n selfwavedata, otherwavedata = self.getCommonWaveData(other.chan, other.chans,\n other.wave.data)\n return selfwavedata - otherwavedata\n\n def getCommonWaveData(self, otherchan, otherchans, otherwavedata):\n \"\"\"Return waveform data common to self's chans and otherchans, while\n requiring that both include the other's maxchan\"\"\"\n chans = np.intersect1d(self.chans, otherchans, assume_unique=True)\n if len(chans) == 0:\n raise ValueError('No common chans')\n if self.chan not in chans or otherchan not in chans:\n raise ValueError(\"maxchans aren't part of common chans\")\n selfchanis = self.chans.searchsorted(chans)\n otherchanis = otherchans.searchsorted(chans)\n return self.wave.data[selfchanis], otherwavedata[otherchanis]\n '''\n def get_stdev(self):\n \"\"\"Return 2D array of stddev of each timepoint of each chan of member spikes.\n Assumes self.update_wave has already been called\"\"\"\n data = []\n # TODO: speed this up by pre-allocating memory and then filling in the array\n for spike in self.spikes:\n data.append(spike.wave.data) # collect spike's data\n stdev = np.asarray(data).std(axis=0)\n return stdev\n\n def get_weights(self, weighting=None, sstdev=None, tstdev=None):\n \"\"\"Returns unity, spatial, temporal, or spatiotemporal Gaussian weights\n for self's enabled chans in self.wave.data, given spatial and temporal\n stdevs\"\"\"\n nchans = len(self.wave.chans)\n nt = len(self.wave.data[0]) # assume all chans have the same number of timepoints\n if weighting == None:\n weights = 1\n elif weighting == 'spatial':\n weights = self.get_gaussian_spatial_weights(sstdev) # vector\n elif weighting == 'temporal':\n weights = self.get_gaussian_temporal_weights(tstdev) # vector\n elif weighting == 'spatiotemporal':\n sweights = self.get_gaussian_spatial_weights(sstdev)\n tweights = self.get_gaussian_temporal_weights(tstdev)\n weights = np.outer(sweights, tweights) # matrix, outer product of the two\n elif weighting == 'signal':\n weights = None # this is handled by caller\n #print('\\nweights:\\n%r' % weights)\n return weights\n\n def get_gaussian_spatial_weights(self, stdev):\n \"\"\"Return a vector that weights self.chans according to a 2D gaussian\n centered on self.maxchan with standard deviation stdev in um\"\"\"\n g = Gaussian(mean=0, stdev=stdev)\n # distances between maxchan and all enabled chans:\n d = self.sort.detector.dm[self.maxchan, self.chans]\n weights = g[d]\n weights.shape = (-1, 1) # vertical vector with nchans rows, 1 column\n return weights\n\n def get_gaussian_temporal_weights(self, stdev):\n \"\"\"Return a vector that weights timepoints in self's mean waveform\n by a gaussian centered on t=0, with standard deviation stdev in us\"\"\"\n g = Gaussian(mean=0, stdev=stdev)\n ts = self.wave.ts # template mean timepoints relative to t=0 spike time\n weights = g[ts] # horizontal vector with 1 row, nt timepoints\n return weights\n '''\n\nclass PTCSHeader(object):\n \"\"\"\n Polytrode clustered spikes file header:\n\n formatversion: int64 (currently version 3)\n ndescrbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)\n descr: ndescrbytes of ASCII text\n (padded with null bytes if needed for 8 byte alignment)\n\n nneurons: uint64 (number of neurons)\n nspikes: uint64 (total number of spikes)\n nsamplebytes: uint64 (number of bytes per template waveform sample)\n samplerate: uint64 (Hz)\n\n npttypebytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)\n pttype: npttypebytes of ASCII text\n (padded with null bytes if needed for 8 byte alignment)\n nptchans: uint64 (total num chans in polytrode)\n chanpos: nptchans * 2 * float64\n (array of (x, y) positions, in um, relative to top of polytrode,\n indexed by 0-based channel IDs)\n nsrcfnamebytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)\n srcfname: nsrcfnamebytes of ASCII text\n (source file name, probably .srf, padded with null bytes if needed for\n 8 byte alignment)\n datetime: float64\n (absolute datetime corresponding to t=0 us timestamp, stored as days since\n epoch: December 30, 1899 at 00:00)\n ndatetimestrbytes: uint64 \n datetimestr: ndatetimestrbytes of ASCII text\n (human readable string representation of datetime, preferrably ISO 8601,\n padded with null bytes if needed for 8 byte alignment)\n \"\"\"\n FORMATVERSION = 3 # overall .ptcs file format version, not header format version\n def __init__(self, sort, sortpath, stream, nneurons, nspikes, nsamplebytes,\n fullfname, exportdt, user='', notes=''):\n self.sort = sort\n self.stream = stream\n self.nneurons = nneurons\n self.nspikes = nspikes\n self.nsamplebytes = nsamplebytes\n homelessfullfname = lstrip(fullfname, os.path.expanduser('~'))\n sortfname = sort.fname\n sortfullfname = os.path.join(sortpath, sortfname)\n sortfmoddt = str(datetime.datetime.fromtimestamp(os.path.getmtime(sortfullfname)))\n sortfmoddt = sortfmoddt.split('.')[0] # ditch the us\n sortfsize = os.path.getsize(sortfullfname) # in bytes\n d = {'file_type': '.ptcs (polytrode clustered spikes) file',\n 'original_fname': homelessfullfname, 'export_time': exportdt,\n 'sort': {'fname': sortfname, 'path': sortpath,\n 'fmtime': sortfmoddt, 'fsize': sortfsize},\n 'user': user, 'notes': notes}\n descr = str(d)\n self.descr = pad(descr, align=8)\n self.srcfname = pad(lstrip(stream.fname, '../'), align=8)\n self.pttype = pad(stream.probe.name, align=8)\n self.dt = stream.datetime\n self.dtstr = pad(self.dt.isoformat(), align=8)\n\n def write(self, f):\n s = self.sort\n np.int64(self.FORMATVERSION).tofile(f) # formatversion\n np.uint64(len(self.descr)).tofile(f) # ndescrbytes\n f.write(self.descr) # descr\n \n np.uint64(self.nneurons).tofile(f) # nneurons\n np.uint64(self.nspikes).tofile(f) # nspikes\n np.uint64(self.nsamplebytes).tofile(f) # nsamplebytes\n np.uint64(s.sampfreq).tofile(f) # samplerate\n\n np.uint64(len(self.pttype)).tofile(f) # npttypebytes\n f.write(self.pttype) # pttype\n np.uint64(s.stream.probe.nchans).tofile(f) # nptchans\n np.float64(s.stream.probe.siteloc_arr()).tofile(f) # chanpos\n np.uint64(len(self.srcfname)).tofile(f) # nsrcfnamebytes\n f.write(self.srcfname) # srcfname\n np.float64(td2days(self.dt - EPOCH)).tofile(f) # datetime (in days)\n np.uint64(len(self.dtstr)).tofile(f) # ndatetimestrbytes\n f.write(self.dtstr)\n\n\nclass PTCSNeuronRecord(object):\n \"\"\"\n Polytrode clustered spikes file neuron record:\n \n nid: int64 (signed neuron id, could be -ve, could be non-contiguous with previous)\n ndescrbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment, defaults to 0)\n descr: ndescrbytes of ASCII text\n (padded with null bytes if needed for 8 byte alignment)\n clusterscore: float64\n xpos: float64 (um)\n ypos: float64 (um)\n sigma: float64 (um) (Gaussian spatial sigma)\n nchans: uint64 (num chans in template waveforms)\n chanids: nchans * uint64 (0 based IDs of channels in template waveforms)\n maxchanid: uint64 (0 based ID of max channel in template waveforms)\n nt: uint64 (num timepoints per template waveform channel)\n nwavedatabytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)\n wavedata: nwavedatabytes of nsamplebytes sized floats\n (template waveform data, laid out as nchans * nt, in uV,\n padded with null bytes if needed for 8 byte alignment)\n nwavestdbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)\n wavestd: nwavestdbytes of nsamplebytes sized floats\n (template waveform standard deviation, laid out as nchans * nt, in uV,\n padded with null bytes if needed for 8 byte alignment)\n nspikes: uint64 (number of spikes in this neuron)\n spike timestamps: nspikes * uint64 (us, should be sorted)\n \"\"\"\n def __init__(self, neuron, spikets=None, nsamplebytes=None, descr=''):\n n = neuron\n AD2uV = n.sort.converter.AD2uV\n self.neuron = neuron\n self.spikets = spikets # constrained to stream range, may be < neuron.sids\n self.wavedtype = {2: np.float16, 4: np.float32, 8: np.float64}[nsamplebytes]\n if n.wave.data is None or n.wave.std is None: # some may have never been displayed\n n.update_wave()\n # wavedata and wavestd are nchans * nt * nsamplebytes long:\n self.wavedata = pad(self.wavedtype(AD2uV(n.wave.data)), align=8)\n self.wavestd = pad(self.wavedtype(AD2uV(n.wave.std)), align=8)\n self.descr = pad(descr, align=8)\n \n def write(self, f):\n n = self.neuron\n np.int64(n.id).tofile(f) # nid\n np.uint64(len(self.descr)).tofile(f) # ndescrbytes\n f.write(self.descr) # descr, bytes\n np.float64(np.nan).tofile(f) # clusterscore\n np.float64(n.cluster.pos['x0']).tofile(f) # xpos (um)\n np.float64(n.cluster.pos['y0']).tofile(f) # ypos (um)\n np.float64(n.cluster.pos['sx']).tofile(f) # sigma (um)\n np.uint64(len(n.wave.chans)).tofile(f) # nchans\n np.uint64(n.wave.chans).tofile(f) # chanids\n np.uint64(n.chan).tofile(f) # maxchanid\n np.uint64(len(n.wave.ts)).tofile(f) # nt\n np.uint64(self.wavedata.nbytes).tofile(f) # nwavedatabytes\n self.wavedata.tofile(f) # wavedata \n np.uint64(self.wavestd.nbytes).tofile(f) # nwavestdbytes\n self.wavestd.tofile(f) # wavestd \n np.uint64(len(self.spikets)).tofile(f) # nspikes\n np.uint64(self.spikets).tofile(f) # spike timestamps (us)\n\n\nclass PanelScrollArea(QtGui.QScrollArea):\n \"\"\"A scroll area for the spikesortpanel\"\"\"\n def keyPressEvent(self, event):\n key = event.key()\n # seems the ENTER key needs be handled to directly call plot, unlike in sortwin\n # where the event is passed on to be handled by the list widgets\n if key in [Qt.Key_Enter, Qt.Key_Return]:\n sortwin = self.topLevelWidget()\n sortwin.parent().ui.plotButton.click()\n else:\n QtGui.QScrollArea.keyPressEvent(self, event) # pass it on\n\n\nclass SortWindow(SpykeToolWindow):\n \"\"\"Sort window\"\"\"\n def __init__(self, parent, pos=None):\n SpykeToolWindow.__init__(self, parent, flags=QtCore.Qt.Tool)\n self.spykewindow = parent\n ncols = self.sort.probe.ncols\n nrows = self.sort.probe.nrows\n # try and allow the same amount of horizontal space per column for 2 and 3 col probes:\n if ncols <= 2:\n self.MAINSPLITTERPOS = 300\n else:\n self.MAINSPLITTERPOS = 265 # move it more to the left\n # make horizontal sort slider use as little vertical space as possible\n self.VSPLITTERPOS = 1\n panelwidth = PANELWIDTHPERCOLUMN * ncols\n panelheight = PANELHEIGHTPERROW * nrows\n width = max(self.MAINSPLITTERPOS + panelwidth + VSCROLLBARWIDTH, MINSORTWINDOWWIDTH)\n size = (width, SORTWINDOWHEIGHT)\n self.setWindowTitle('Sort Window')\n self.move(*pos)\n self.resize(*size)\n\n self._source = None # source cluster for comparison\n self.slider = SpikeSelectionSlider(Qt.Horizontal, self)\n self.slider.setInvertedControls(True)\n self.slider.setToolTip('Position of sliding spike selection time window')\n self.connect(self.slider, QtCore.SIGNAL('valueChanged(int)'),\n self.on_slider_valueChanged)\n self.connect(self.slider, QtCore.SIGNAL('sliderPressed()'),\n self.on_slider_sliderPressed)\n\n self.nlist = NList(self)\n self.nlist.setToolTip('Neuron list')\n self.nslist = NSList(self)\n self.nslist.setToolTip('Sorted spike list')\n self.uslist = USList(self) # should really be multicolumn tableview\n self.uslist.setToolTip('Unsorted spike list')\n tw = self.spykewindow.sort.tw\n\n self.panel = SpikeSortPanel(self, tw=tw)\n self.panel.setMinimumSize(QtCore.QSize(panelwidth, panelheight))\n\n self.panelscrollarea = PanelScrollArea(self)\n self.panelscrollarea.setWidget(self.panel)\n self.panelscrollarea.setMinimumWidth(panelwidth + VSCROLLBARWIDTH)\n self.panelscrollarea.setWidgetResizable(True) # allows panel to size bigger than min\n\n self.vsplitter = QtGui.QSplitter(Qt.Vertical)\n self.vsplitter.addWidget(self.slider)\n self.vsplitter.addWidget(self.nlist)\n self.vsplitter.addWidget(self.nslist)\n self.vsplitter.addWidget(self.uslist)\n\n self.mainsplitter = QtGui.QSplitter(Qt.Horizontal)\n self.mainsplitter.addWidget(self.vsplitter)\n self.mainsplitter.addWidget(self.panelscrollarea)\n\n self.layout = QtGui.QVBoxLayout()\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.layout.addWidget(self.mainsplitter)\n\n mainwidget = QtGui.QWidget(self)\n mainwidget.setLayout(self.layout)\n self.setCentralWidget(mainwidget)\n\n self.toolbar = self.setupToolbar()\n self.addToolBar(self.toolbar)\n\n def setupToolbar(self):\n toolbar = QtGui.QToolBar(self)\n toolbar.setObjectName('toolbar')\n toolbar.setFloatable(True)\n toolbar.setIconSize(QtCore.QSize(16, 16)) # like in main spyke window\n\n actionDelete = QAction(QIcon('res/edit-delete.svg'), 'Del', self)\n tt = ('<nobr><b>Del</b> &nbsp; Delete selected spikes or clusters</nobr>\\n'\n '<nobr><b>CTRL+Del</b> &nbsp; Delete selected spikes</nobr>')\n actionDelete.setToolTip(tt)\n self.connect(actionDelete, QtCore.SIGNAL('triggered()'),\n self.on_actionDelete_triggered)\n toolbar.addAction(actionDelete)\n\n actionMergeClusters = QAction('M', self)\n tt = '<nobr><b>M</b> &nbsp; Merge clusters</nobr>'\n actionMergeClusters.setToolTip(tt)\n self.connect(actionMergeClusters, QtCore.SIGNAL('triggered()'),\n self.on_actionMergeClusters_triggered)\n toolbar.addAction(actionMergeClusters)\n\n #actionToggleClustersGood = QAction(QIcon('res/dialog-apply.svg'), 'G', self)\n actionToggleClustersGood = QAction('G', self)\n tt = '<nobr><b>G</b> &nbsp; Toggle clusters as \"good\"</nobr>'\n actionToggleClustersGood.setToolTip(tt)\n self.connect(actionToggleClustersGood, QtCore.SIGNAL('triggered()'),\n self.on_actionToggleClustersGood_triggered)\n toolbar.addAction(actionToggleClustersGood)\n\n actionSplit = QAction('+', self)\n tt = '<nobr><b>+</b> &nbsp; Split off selected spikes</nobr>'\n actionSplit.setToolTip(tt)\n self.connect(actionSplit, QtCore.SIGNAL('triggered()'),\n self.on_actionSplit_triggered)\n toolbar.addAction(actionSplit)\n\n actionLabelMultiunit = QAction('-', self)\n tt = '<nobr><b>-</b> &nbsp; Label clusters as multiunit</nobr>'\n actionLabelMultiunit.setToolTip(tt)\n self.connect(actionLabelMultiunit, QtCore.SIGNAL('triggered()'),\n self.on_actionLabelMultiunit_triggered)\n toolbar.addAction(actionLabelMultiunit)\n\n actionChanSplitClusters = QAction('/', self)\n tt = '<nobr><b>/</b> &nbsp; Split clusters by channels</nobr>'\n actionChanSplitClusters.setToolTip(tt)\n self.connect(actionChanSplitClusters, QtCore.SIGNAL('triggered()'),\n self.on_actionChanSplitClusters_triggered)\n toolbar.addAction(actionChanSplitClusters)\n\n actionDensitySplit = QAction('P', self)\n tt = ('<nobr><b>P</b> &nbsp; Split cluster pair by density along line between '\n 'their centers</nobr>')\n actionDensitySplit.setToolTip(tt)\n self.connect(actionDensitySplit, QtCore.SIGNAL('triggered()'),\n self.on_actionDensitySplit_triggered)\n toolbar.addAction(actionDensitySplit)\n\n actionRandomSplit = QAction('\\\\', self)\n tt = ('<nobr><b>\\\\</b> &nbsp; Randomly split each selected cluster in half</nobr>')\n actionRandomSplit.setToolTip(tt)\n self.connect(actionRandomSplit, QtCore.SIGNAL('triggered()'),\n self.on_actionRandomSplit_triggered)\n toolbar.addAction(actionRandomSplit)\n\n #actionRenumber = QAction(QIcon('res/gtk-edit.svg'), '#', self)\n actionRenumber = QAction('#', self)\n tt = ('<nobr><b>#</b> &nbsp; Renumber all clusters in vertical spatial order</nobr>\\n'\n '<nobr><b>CTRL+#</b> &nbsp; Renumber selected cluster</nobr>')\n actionRenumber.setToolTip(tt)\n self.connect(actionRenumber, QtCore.SIGNAL('triggered()'),\n self.on_actionRenumber_triggered)\n toolbar.addAction(actionRenumber)\n\n actionFind = QAction(QIcon('res/edit-find.svg'), 'Find', self)\n tt = ('<nobr><b>CTRL+F</b> &nbsp; Find spike in cluster plot</nobr>')\n actionFind.setToolTip(tt)\n self.connect(actionFind, QtCore.SIGNAL('triggered()'),\n self.on_actionFind_triggered)\n toolbar.addAction(actionFind)\n\n actionSelectRandomSpikes = QAction('R', self)\n tt = '<nobr><b>R</b> &nbsp; Select random sample of spikes of current clusters</nobr>'\n actionSelectRandomSpikes.setToolTip(tt)\n self.connect(actionSelectRandomSpikes, QtCore.SIGNAL('triggered()'),\n self.on_actionSelectRandomSpikes_triggered)\n toolbar.addAction(actionSelectRandomSpikes)\n\n actionToggleErrors = QAction('E', self)\n actionToggleErrors.setCheckable(True)\n actionToggleErrors.setChecked(self.panel.enable_fills)\n tt = '<nobr><b>CTRL+E</b> &nbsp; Toggle visibility of template error limits</nobr>'\n actionToggleErrors.setToolTip(tt)\n self.connect(actionToggleErrors, QtCore.SIGNAL('toggled(bool)'),\n self.on_actionToggleErrors_toggled)\n toolbar.addAction(actionToggleErrors)\n self.actionToggleErrors = actionToggleErrors\n\n nsamplesComboBox = QtGui.QComboBox(self)\n nsamplesComboBox.setToolTip('Number of spikes per cluster to randomly select')\n nsamplesComboBox.setFocusPolicy(Qt.NoFocus)\n nsamplesComboBox.addItems(['100', '50', '20', '10', '5', '1'])\n nsamplesComboBox.setCurrentIndex(2)\n toolbar.addWidget(nsamplesComboBox)\n self.connect(nsamplesComboBox, QtCore.SIGNAL('activated(int)'),\n self.on_actionSelectRandomSpikes_triggered)\n self.nsamplesComboBox = nsamplesComboBox\n\n gainComboBox = QtGui.QComboBox(self)\n gainComboBox.setToolTip('Waveform gain (default: 1.5)')\n gainComboBox.setFocusPolicy(Qt.NoFocus)\n gainComboBox.addItems(['4', '3.75', '3.5', '3.25', '3', '2.75', '2.5', '2.25', '2',\n '1.75', '1.5', '1.25', '1', '0.75', '0.5', '0.25'])\n gainComboBox.setCurrentIndex(3)\n toolbar.addWidget(gainComboBox)\n self.connect(gainComboBox, QtCore.SIGNAL('activated(int)'),\n self.on_gainComboBox_triggered)\n self.gainComboBox = gainComboBox\n\n #actionAlignMin = QAction(QIcon('res/go-bottom.svg'), 'Min', self)\n actionAlignMin = QAction('Min', self)\n actionAlignMin.setToolTip('Align selected spikes to min')\n self.connect(actionAlignMin, QtCore.SIGNAL('triggered()'),\n self.on_actionAlignMin_triggered)\n toolbar.addAction(actionAlignMin)\n\n #actionAlignMax = QAction(QIcon('res/go-top.svg'), 'Max', self)\n actionAlignMax = QAction('Max', self)\n actionAlignMax.setToolTip('Align selected spikes to max')\n self.connect(actionAlignMax, QtCore.SIGNAL('triggered()'),\n self.on_actionAlignMax_triggered)\n toolbar.addAction(actionAlignMax)\n\n #actionAlignBest = QAction(QIcon('res/emblem-OK.png'), 'Best', self)\n actionAlignBest = QAction('B', self)\n tt = '<nobr><b>B</b> &nbsp; Align selected spikes by best fit</nobr>'\n actionAlignBest.setToolTip(tt)\n self.connect(actionAlignBest, QtCore.SIGNAL('triggered()'),\n self.on_actionAlignBest_triggered)\n toolbar.addAction(actionAlignBest)\n\n actionShiftLeft = QAction('[', self)\n tt = ('<nobr><b>[</b> &nbsp; Shift selected spikes 2 points left</nobr>\\n'\n '<nobr><b>CTRL+[</b> &nbsp; Shift selected spikes 1 point left</nobr>')\n actionShiftLeft.setToolTip(tt)\n self.connect(actionShiftLeft, QtCore.SIGNAL('triggered()'),\n self.on_actionShiftLeft_triggered)\n toolbar.addAction(actionShiftLeft)\n\n actionShiftRight = QAction(']', self)\n tt = ('<nobr><b>]</b> &nbsp; Shift selected spikes 2 points right</nobr>\\n'\n '<nobr><b>CTRL+]</b> &nbsp; Shift selected spikes 1 point right</nobr>')\n actionShiftRight.setToolTip(tt)\n self.connect(actionShiftRight, QtCore.SIGNAL('triggered()'),\n self.on_actionShiftRight_triggered)\n toolbar.addAction(actionShiftRight)\n\n incltComboBox = QtGui.QComboBox(self)\n incltComboBox.setToolTip(\"Waveform duration (us) to include for component \"\n \"analysis,\\nasymmetric around spike time\")\n incltComboBox.setFocusPolicy(Qt.NoFocus)\n dtw = self.sort.tw[1] - self.sort.tw[0] # spike time window width\n incltstep = intround(dtw / 10) # evenly spaced inclt values\n incltvals = np.arange(dtw, 0, -incltstep)\n incltComboBox.addItems([ str(incltval) for incltval in incltvals ])\n incltComboBox.setCurrentIndex(0)\n toolbar.addWidget(incltComboBox)\n self.connect(incltComboBox, QtCore.SIGNAL('activated(int)'),\n self.on_incltComboBox_triggered)\n self.incltComboBox = incltComboBox\n #incltunitsLabel = QtGui.QLabel('us', self)\n #toolbar.addWidget(incltunitsLabel)\n\n nPCsPerChanSpinBox = QtGui.QSpinBox(self)\n nPCsPerChanSpinBox.setToolTip(\"Number of PCs to use per channel to feed into ICA\")\n nPCsPerChanSpinBox.setFocusPolicy(Qt.NoFocus)\n toolbar.addWidget(nPCsPerChanSpinBox)\n nPCsPerChanSpinBox.setMinimum(1)\n self.connect(nPCsPerChanSpinBox, QtCore.SIGNAL('valueChanged(int)'),\n self.on_nPCsPerChanSpinBox_valueChanged)\n nPCsPerChanSpinBox.setValue(self.sort.npcsperchan)\n self.nPCsPerChanSpinBox = nPCsPerChanSpinBox\n\n #actionFindPrevMostSimilar = QAction(QIcon('res/go-previous.svg'), '<', self)\n actionFindPrevMostSimilar = QAction('<', self)\n tt = '<nobr><b>&lt;</b> &nbsp; Find previous most similar cluster</nobr>'\n actionFindPrevMostSimilar.setToolTip(tt)\n self.connect(actionFindPrevMostSimilar, QtCore.SIGNAL('triggered()'),\n self.on_actionFindPrevMostSimilar_triggered)\n toolbar.addAction(actionFindPrevMostSimilar)\n\n #actionFindNextMostSimilar = QAction(QIcon('res/go-next.svg'), '>', self)\n actionFindNextMostSimilar = QAction('>', self)\n tt = '<nobr><b>&gt;</b> &nbsp; Find next most similar cluster</nobr>'\n actionFindNextMostSimilar.setToolTip(tt)\n self.connect(actionFindNextMostSimilar, QtCore.SIGNAL('triggered()'),\n self.on_actionFindNextMostSimilar_triggered)\n toolbar.addAction(actionFindNextMostSimilar)\n\n actionReloadSpikes = QAction(QIcon('res/view-refresh.svg'), 'Reload', self)\n tt = ('<nobr><b>F5</b> &nbsp; Reload waveforms of selected spikes. '\n 'If none selected, reload all</nobr>\\n'\n '<nobr><b>CTRL+F5</b> &nbsp; Use mean waveform to choose chans to reload</nobr>')\n actionReloadSpikes.setToolTip(tt)\n self.connect(actionReloadSpikes, QtCore.SIGNAL('triggered()'),\n self.on_actionReloadSpikes_triggered)\n toolbar.addAction(actionReloadSpikes)\n\n actionSave = QAction(QIcon('res/document-save.svg'), '&Save', self)\n actionSave.setToolTip('Save sort panel to file')\n self.connect(actionSave, QtCore.SIGNAL('triggered()'),\n self.on_actionSave_triggered)\n toolbar.addAction(actionSave)\n\n return toolbar\n\n def get_sort(self):\n return self.spykewindow.sort\n\n sort = property(get_sort) # make this a property for proper behaviour after unpickling\n\n def closeEvent(self, event):\n self.spykewindow.HideWindow('Sort')\n\n def mousePressEvent(self, event):\n \"\"\"These are mostly passed on up from spyke list views and sort panel. Left\n clicks are (or should be) filtered out\"\"\"\n buttons = event.buttons()\n if buttons == QtCore.Qt.MiddleButton:\n #self.on_actionSelectRandomSpikes_triggered()\n self.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist\n elif buttons == QtCore.Qt.RightButton:\n self.clear()\n\n def keyPressEvent(self, event):\n \"\"\"Alpha character keypresses are by default caught by the child lists for quickly\n scrolling down to and selecting list items. However, the appropriate alpha\n keypresses have been set in the child lists to be ignored, so they propagate\n up to here\"\"\"\n key = event.key()\n modifiers = event.modifiers()\n ctrl = modifiers & Qt.ControlModifier # ctrl is down\n spw = self.spykewindow\n if key == Qt.Key_A: # ignored in SpykeListViews\n spw.ui.plotButton.click() # same as hitting ENTER in nslist\n elif key == Qt.Key_X: # ignored in SpykeListViews\n spw.ui.plotXcorrsButton.click()\n elif key == Qt.Key_N: # ignored in SpykeListViews\n spw.ui.normButton.click()\n elif key == Qt.Key_Escape: # deselect all spikes and all clusters\n self.clear()\n elif key == Qt.Key_Delete:\n self.on_actionDelete_triggered()\n elif key == Qt.Key_M: # ignored in SpykeListViews\n self.on_actionMergeClusters_triggered()\n elif key == Qt.Key_G: # ignored in SpykeListViews\n self.on_actionToggleClustersGood_triggered()\n elif key == Qt.Key_Equal: # ignored in SpykeListViews\n self.on_actionSplit_triggered()\n elif key == Qt.Key_Minus: # ignored in SpykeListViews\n self.on_actionLabelMultiunit_triggered()\n elif key == Qt.Key_Slash: # ignored in SpykeListViews\n self.on_actionChanSplitClusters_triggered()\n elif key == Qt.Key_P: # ignored in SpykeListViews\n self.on_actionDensitySplit_triggered()\n elif key == Qt.Key_Backslash: # ignored in SpykeListViews\n self.on_actionRandomSplit_triggered()\n elif key == Qt.Key_NumberSign: # ignored in SpykeListViews\n self.on_actionRenumber_triggered()\n elif key == Qt.Key_F: # ignored in SpykeListViews\n if ctrl:\n self.FindSpike()\n else:\n self.FindCluster()\n elif key == Qt.Key_R: # ignored in SpykeListViews\n self.on_actionSelectRandomSpikes_triggered()\n elif key == Qt.Key_Space: # ignored in SpykeListViews\n if ctrl:\n SpykeToolWindow.keyPressEvent(self, event) # pass it on\n else:\n spw.on_clusterButton_clicked()\n elif key == Qt.Key_B: # ignored in SpykeListViews\n self.on_actionAlignBest_triggered()\n elif key == Qt.Key_BracketLeft: # ignored in SpykeListViews\n self.on_actionShiftLeft_triggered()\n elif key == Qt.Key_BracketRight: # ignored in SpykeListViews\n self.on_actionShiftRight_triggered()\n elif key == Qt.Key_Comma: # ignored in SpykeListViews\n self.on_actionFindPrevMostSimilar_triggered()\n elif key == Qt.Key_Period: # ignored in SpykeListViews\n self.on_actionFindNextMostSimilar_triggered()\n elif key == Qt.Key_F5: # ignored in SpykeListViews\n self.on_actionReloadSpikes_triggered()\n elif key == Qt.Key_E: # ignored in SpykeListViews\n if ctrl:\n self.actionToggleErrors.toggle()\n else:\n self.clear() # E is synonymous with ESC\n elif key == Qt.Key_C: # toggle between PCA and ICA, ignored in SpykeListViews\n c = str(spw.ui.componentAnalysisComboBox.currentText())\n if c == 'PCA':\n index = spw.ui.componentAnalysisComboBox.findText('ICA')\n spw.ui.componentAnalysisComboBox.setCurrentIndex(index)\n elif c == 'ICA':\n index = spw.ui.componentAnalysisComboBox.findText('PCA')\n spw.ui.componentAnalysisComboBox.setCurrentIndex(index)\n spw.on_plotButton_clicked()\n elif key == Qt.Key_T: # toggle plotting against time, ignored in SpykeListViews\n z = str(spw.ui.zDimComboBox.currentText())\n if z == 't':\n spw.on_c0c1c2Button_clicked() # plot in pure component analysis space\n else:\n spw.on_c0c1tButton_clicked() # plot against time\n elif key == Qt.Key_W: # toggle plotting against RMSError, ignored in SpykeListViews\n z = str(spw.ui.zDimComboBox.currentText())\n if z == 'RMSerror':\n spw.on_c0c1c2Button_clicked() # plot in pure component analysis space\n else:\n spw.ui.zDimComboBox.setCurrentIndex(3)\n spw.on_plotButton_clicked() # plot against RMSError\n elif key in [Qt.Key_Enter, Qt.Key_Return]:\n # this is handled at a lower level by on_actionItem_triggered\n # in the various listview controls\n pass\n else:\n SpykeToolWindow.keyPressEvent(self, event) # pass it on\n\n def clear(self):\n \"\"\"Clear selections in this order: unsorted spikes, sorted spikes,\n cluster automatically selected for comparison, cluster 0, clusters\"\"\"\n spw = self.spykewindow\n clusters = spw.GetClusters()\n if len(self.uslist.selectedIndexes()) > 0:\n self.uslist.clearSelection()\n elif self.nslist.nrowsSelected > 0:\n self.nslist.clearSelection()\n elif len(clusters) == 2 and self._source in clusters:\n clusters.remove(self._source)\n spw.SelectClusters(clusters, on=False)\n elif 0 in spw.GetClusterIDs():\n for cluster in spw.GetClusters():\n if cluster.id == 0:\n spw.SelectClusters([cluster], on=False)\n break\n else:\n self.nlist.clearSelection()\n # reset colours in cluster plot:\n gw = spw.windows['Cluster'].glWidget\n gw.colour()\n gw.updateGL()\n\n def on_actionDelete_triggered(self):\n \"\"\"Delete explicity selected spikes, or clusters\"\"\"\n selsids = self.spykewindow.GetSpikes() # IDs of explicitly selected spikes\n nselsids = len(selsids)\n if (QApplication.instance().keyboardModifiers() & Qt.ControlModifier\n or nselsids > 0):\n self.delete_spikes()\n else:\n self.delete_clusters()\n\n def delete_clusters(self):\n \"\"\"Del button press/click\"\"\"\n spw = self.spykewindow\n clusters = spw.GetClusters()\n s = self.sort\n spikes = s.spikes\n sids = []\n for cluster in clusters:\n sids.append(cluster.neuron.sids)\n sids = np.concatenate(sids)\n\n # save some undo/redo stuff\n message = 'delete clusters %r' % [ c.id for c in clusters ]\n cc = ClusterChange(sids, spikes, message)\n cc.save_old(clusters, s.norder, s.good)\n\n # deselect and delete clusters\n spw.DelClusters(clusters)\n if len(s.clusters) > 0:\n # select cluster that replaces the first of the deleted clusters in norder\n selrows = [ cc.oldnorder.index(oldunid) for oldunid in cc.oldunids ]\n if len(selrows) > 0:\n selrow = selrows[0]\n nlist = spw.windows['Sort'].nlist\n nlist.selectRows(selrow) # TODO: this sets selection, but not focus\n #else: # first of deleted clusters was last in norder, don't select anything\n\n # save more undo/redo stuff\n newclusters = []\n cc.save_new(newclusters, s.norder, s.good)\n spw.AddClusterChangeToStack(cc)\n print(cc.message)\n\n def delete_spikes(self):\n \"\"\"CTRL+Del button press/click\"\"\"\n self.spykewindow.SplitSpikes(delete=True)\n\n def on_actionSplit_triggered(self):\n \"\"\"+ button click. Split off selected clusters into their own cluster\"\"\"\n self.spykewindow.SplitSpikes(delete=False)\n\n def on_actionMergeClusters_triggered(self):\n \"\"\"Merge button (M) click. Merge selected clusters. Easier to use than\n running gac() on selected clusters using a really big sigma to force\n them to all merge\"\"\"\n spw = self.spykewindow\n clusters = spw.GetClusters()\n s = self.sort\n spikes = s.spikes\n sids = [] # spikes to merge\n for cluster in clusters:\n sids.append(cluster.neuron.sids)\n # merge any selected usids as well\n sids.append(spw.GetUnsortedSpikes())\n sids = np.concatenate(sids)\n if len(sids) == 0:\n return\n\n # save some undo/redo stuff\n message = 'merge clusters %r' % [ c.id for c in clusters ]\n cc = ClusterChange(sids, spikes, message)\n cc.save_old(clusters, s.norder, s.good)\n\n # decide on newnid and where to insert it into norder\n newnid = None # merge by default into a new highest numbered nid\n inserti = None # order new cluster by default to end of nlist\n if len(clusters) == 1:\n # keep same position of this one nid in norder, regardless of whether it's\n # single-unit, multiunit, or junk\n inserti = s.norder.index(clusters[0].id)\n elif len(clusters) > 1:\n oldunids = np.asarray(cc.oldunids)\n suids = oldunids[oldunids > 0] # selected single unit nids\n if len(suids) > 0: # merge into largest selected single unit nid:\n spikecounts = np.asarray([ s.neurons[suid].nspikes for suid in suids ])\n newnid = suids[spikecounts.argmax()]\n inserti = s.norder.index(newnid)\n # correct for shift due to deletion of oldunids that precede newnid in norder:\n inserti -= sum([ s.norder.index(oldunid) < inserti for oldunid in oldunids])\n\n # delete selected clusters and deselect selected usids\n spw.DelClusters(clusters, update=False)\n self.uslist.clearSelection()\n\n # create new cluster\n #t0 = time.time()\n newcluster = spw.CreateCluster(update=False, id=newnid, inserti=inserti)\n neuron = newcluster.neuron\n self.MoveSpikes2Neuron(sids, neuron, update=False)\n plotdims = spw.GetClusterPlotDims()\n newcluster.update_pos()\n\n # save more undo/redo stuff\n cc.save_new([newcluster], s.norder, s.good)\n spw.AddClusterChangeToStack(cc)\n\n # now do some final updates\n spw.UpdateClustersGUI()\n spw.ColourPoints(newcluster)\n #print('applying clusters to plot took %.3f sec' % (time.time()-t0))\n # select newly created cluster\n spw.SelectClusters(newcluster)\n cc.message += ' into cluster %d' % newcluster.id\n print(cc.message)\n\n def on_actionToggleClustersGood_triggered(self):\n \"\"\"'Good' button (G) click. Toggle 'good' flag of all selected clusters\"\"\"\n spw = self.spykewindow\n clusters = spw.GetClusters()\n cids = []\n for cluster in clusters:\n cluster.neuron.good = not cluster.neuron.good\n cids.append(cluster.id)\n self.nlist.updateAll() # nlist item colouring will change as a result\n print(\"Toggled 'good' flag of clusters %r\" % cids)\n\n def on_actionLabelMultiunit_triggered(self):\n \"\"\"- button click. Label all selected clusters as multiunit by deleting them\n and creating new ones with -ve IDs\"\"\"\n spw = self.spykewindow\n clusters = spw.GetClusters()\n s = self.sort\n spikes = s.spikes\n # only relabel single unit clusters:\n clusters = [ cluster for cluster in clusters if cluster.id > 0 ]\n if len(clusters) == 0:\n return\n sids = []\n for cluster in clusters:\n sids.append(cluster.neuron.sids)\n sids = np.concatenate(sids)\n\n # save some undo/redo stuff\n message = 'label as multiunit clusters %r' % [ c.id for c in clusters ]\n cc = ClusterChange(sids, spikes, message)\n cc.save_old(clusters, s.norder, s.good)\n\n # delete old clusters\n inserti = s.norder.index(clusters[0].id)\n # collect cluster sids before cluster deletion\n sidss = [ cluster.neuron.sids for cluster in clusters ]\n spw.DelClusters(clusters, update=False)\n\n # create new multiunit clusters\n newclusters = []\n for sids in sidss:\n muid = s.get_nextmuid()\n newcluster = spw.CreateCluster(update=False, id=muid, inserti=inserti)\n neuron = newcluster.neuron\n self.MoveSpikes2Neuron(sids, neuron, update=False)\n newcluster.update_pos()\n newclusters.append(newcluster)\n inserti += 1\n\n # select newly labelled multiunit clusters\n spw.SelectClusters(newclusters)\n\n # save more undo/redo stuff\n cc.save_new(newclusters, s.norder, s.good)\n spw.AddClusterChangeToStack(cc)\n print(cc.message)\n\n def on_actionChanSplitClusters_triggered(self):\n \"\"\"Split by channels button (/) click\"\"\"\n ## TODO: make sure this works on .srf files! Why was chancombosplit being used?\n self.spykewindow.maxchansplit()\n #self.spykewindow.chancombosplit()\n\n def on_actionDensitySplit_triggered(self):\n \"\"\"Split cluster pair by density along line between their centers\"\"\"\n self.spykewindow.densitysplit()\n\n def on_actionRandomSplit_triggered(self):\n \"\"\"Randomly split each selected cluster in half\"\"\"\n self.spykewindow.randomsplit()\n\n def on_actionRenumber_triggered(self):\n if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:\n self.renumber_selected_cluster()\n else:\n self.renumber_all_clusters()\n\n def renumber_selected_cluster(self):\n \"\"\"Renumber a single selected cluster to whatever free ID the user wants, for\n colouring purposes\"\"\"\n spw = self.spykewindow\n s = self.sort\n spikes = s.spikes\n\n cluster = spw.GetCluster() # exactly one selected cluster\n oldid = cluster.id\n newid = max(s.norder) + 1\n newid, ok = QtGui.QInputDialog.getInt(self, \"Renumber cluster\",\n \"This will clear the undo/redo stack, and is not undoable.\\n\"\n \"Enter new ID:\", value=newid)\n if not ok:\n return\n if newid in s.norder:\n print(\"Choose a non-existing nid to renumber to\")\n return\n # deselect cluster\n spw.SelectClusters(cluster, on=False)\n\n # rename to newid\n cluster.id = newid # this indirectly updates neuron.id\n # update cluster and neuron dicts, and spikes array\n s.clusters[newid] = cluster\n s.neurons[newid] = cluster.neuron\n sids = cluster.neuron.sids\n spikes['nid'][sids] = newid\n # remove duplicate oldid dict entries\n del s.clusters[oldid]\n del s.neurons[oldid]\n # replace oldid with newid in norder\n s.norder[s.norder.index(oldid)] = newid\n # update colour of any relevant points in cluster plot\n spw.ColourPoints(cluster)\n # reselect cluster\n spw.SelectClusters(cluster)\n # some cluster changes in stack may no longer be applicable, reset cchanges\n del spw.cchanges[:]\n spw.cci = -1\n print('Renumbered neuron %d to %d' % (oldid, newid))\n\n def renumber_all_clusters(self):\n \"\"\"Renumber single unit clusters consecutively from 1, ordered by y position. Do the\n same for multiunit (-ve number) clusters, starting from -1. Sorting by y position\n makes user inspection of clusters more orderly, makes the presence of duplicate\n clusters more obvious, and allows for maximal spatial separation between clusters of\n the same colour, reducing colour conflicts\"\"\"\n val = QtGui.QMessageBox.question(self.panel, \"Renumber all clusters\",\n \"Are you sure? This will clear the undo/redo stack, and is not undoable.\",\n QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)\n if val == QtGui.QMessageBox.No:\n return\n\n spw = self.spykewindow\n s = self.sort\n spikes = s.spikes\n\n # get spatially and numerically ordered lists of new ids\n oldids = np.asarray(s.norder)\n oldsuids = oldids[oldids > 0]\n oldmuids = oldids[oldids < 0]\n # this is a bit confusing: find indices that would sort old ids by y pos, but then\n # what you really want is to find the y pos *rank* of each old id, so you need to\n # take argsort again:\n newsuids = np.asarray([ s.clusters[cid].pos['y0']\n for cid in oldsuids ]).argsort().argsort() + 1\n newmuids = np.asarray([ s.clusters[cid].pos['y0']\n for cid in oldmuids ]).argsort().argsort() + 1\n newmuids = -newmuids\n # multiunit, followed by single unit, no 0 junk cluster. Can't seem to do it the other\n # way around as of Qt 4.7.2 - it seems QListViews don't like having a -ve value in\n # the last entry. Doing so causes all 2 digit values in the list to become blank,\n # suggests a spacing calculation bug. Reproduce by making last entry multiunit,\n # undoing then redoing. Actually, maybe the bug is it doesn't like having a number\n # in the last entry with fewer digits than the preceding entry. Only seems to be a\n # problem when setting self.setUniformItemSizes(True).\n newids = np.concatenate([newmuids, newsuids])\n\n # test\n if np.all(oldids == newids):\n print('Nothing to renumber: cluster IDs already ordered in y0 and contiguous')\n return\n # update for replacing oldids with newids\n oldids = np.concatenate([oldmuids, oldsuids])\n\n # deselect current selections\n selclusters = spw.GetClusters()\n oldselids = [ cluster.id for cluster in selclusters ]\n spw.SelectClusters(selclusters, on=False)\n\n # delete junk cluster, if it exists\n if 0 in s.clusters:\n s.remove_neuron(0)\n print('Deleted junk cluster 0')\n if 0 in oldselids:\n oldselids.remove(0)\n\n # replace old ids with new ids\n cw = spw.windows['Cluster']\n oldclusters = s.clusters.copy() # no need to deepcopy, just copy refs, not clusters\n dims = spw.GetClusterPlotDims()\n for oldid, newid in zip(oldids, newids):\n newid = int(newid) # keep as Python int, not numpy int\n if oldid == newid:\n continue # no need to waste time removing and recreating this cluster\n # change all occurences of oldid to newid\n cluster = oldclusters[oldid]\n cluster.id = newid # this indirectly updates neuron.id\n # update cluster and neuron dicts\n s.clusters[newid] = cluster\n s.neurons[newid] = cluster.neuron\n sids = cluster.neuron.sids\n spikes['nid'][sids] = newid\n\n # remove any orphaned cluster ids\n for oldid in oldids:\n if oldid not in newids:\n del s.clusters[oldid]\n del s.neurons[oldid]\n\n # reset norder\n s.norder = []\n s.norder.extend(sorted([ int(newid) for newid in newmuids ])[::-1])\n s.norder.extend(sorted([ int(newid) for newid in newsuids ]))\n\n # now do some final updates\n spw.UpdateClustersGUI()\n spw.ColourPoints(s.clusters.values())\n # reselect the previously selected (but now renumbered) clusters,\n # helps user keep track\n oldiis = [ list(oldids).index(oldselid) for oldselid in oldselids ]\n newselids = newids[oldiis]\n spw.SelectClusters([s.clusters[cid] for cid in newselids])\n # all cluster changes in stack are no longer applicable, reset cchanges\n del spw.cchanges[:]\n spw.cci = -1\n print('Renumbering complete')\n\n def on_actionFind_triggered(self):\n \"\"\"Find current cluster or spike\"\"\"\n ctrl = QApplication.instance().keyboardModifiers() & Qt.ControlModifier\n if ctrl:\n self.FindSpike()\n else:\n self.FindCluster()\n\n def FindCluster(self):\n \"\"\"Move focus to location of currently selected (single) cluster\"\"\"\n spw = self.spykewindow\n try:\n cluster = spw.GetCluster()\n except RuntimeError as err:\n print(err)\n return\n gw = spw.windows['Cluster'].glWidget\n dims = spw.GetClusterPlotDims()\n gw.focus = np.float32([ cluster.normpos[dim] for dim in dims ])\n gw.panTo() # pan to new focus\n gw.updateGL()\n\n def FindSpike(self):\n \"\"\"Move focus to location of currently selected (single) spike\"\"\"\n spw = self.spykewindow\n try:\n sid = spw.GetSpike()\n except RuntimeError as err:\n print(err)\n return\n gw = spw.windows['Cluster'].glWidget\n pointis = gw.sids.searchsorted(sid)\n gw.focus = gw.points[pointis]\n gw.panTo() # pan to new focus\n gw.updateGL()\n\n def on_actionSelectRandomSpikes_triggered(self):\n \"\"\"Select random sample of spikes in current cluster(s), or random sample\n of unsorted spikes if no cluster(S) selected\"\"\"\n nsamples = int(self.nsamplesComboBox.currentText())\n if len(self.nslist.neurons) > 0:\n slist = self.nslist\n else:\n slist = self.uslist\n slist.clearSelection() # emits selectionChanged signal, .reset() doesn't\n slist.selectRandom(nsamples)\n\n def on_gainComboBox_triggered(self):\n \"\"\"Set gain of panel based on gainComboBox selection\"\"\"\n panel = self.panel\n panel.gain = float(self.gainComboBox.currentText())\n panel.do_layout() # resets axes lims and recalcs panel.pos\n panel._update_scale()\n panel.draw_refs()\n panel.updateAllItems()\n\n def on_actionAlignMin_triggered(self):\n self.Align('min')\n\n def on_actionAlignMax_triggered(self):\n self.Align('max')\n\n def on_actionAlignBest_triggered(self):\n self.Align('best')\n\n def on_actionShiftLeft_triggered(self):\n if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:\n nt = -1\n else:\n nt = -2\n self.Shift(nt)\n \n def on_actionShiftRight_triggered(self): \n if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:\n nt = 1\n else:\n nt = 2\n self.Shift(nt)\n\n def on_incltComboBox_triggered(self):\n \"\"\"Change length of chan selection lines, optionally trigger cluster replot\"\"\"\n self.panel.update_selvrefs()\n self.panel.draw_refs()\n #self.spykewindow.ui.plotButton.click()\n\n def get_inclt(self):\n \"\"\"Return inclt value in incltComboBox\"\"\"\n return float(self.incltComboBox.currentText()) # us\n\n inclt = property(get_inclt)\n\n def get_tis(self):\n \"\"\"Return tis (start and end timepoint indices) of duration inclt, asymmetric around\n t=0 spike time. Note that any changes to the code here should also be made in the\n timepoint selection display code in SortPanel.update_selvrefs()\"\"\"\n s = self.sort\n inclt = self.inclt # duration to include, asymmetric around t=0 spike time (us)\n tw = self.panel.tw\n dtw = tw[1] - tw[0] # spike time window width\n left = intround(abs(tw[0]) / dtw * inclt) # left fraction wrt t=0 spike time\n right = inclt - left # right fraction wrt t=0 spike time\n tis = s.twts.searchsorted([-left, right])\n return tis\n\n tis = property(get_tis)\n\n def on_nPCsPerChanSpinBox_valueChanged(self, val):\n self.sort.npcsperchan = val\n\n def on_actionReloadSpikes_triggered(self):\n spw = self.spykewindow\n sids = spw.GetAllSpikes()\n sort = self.sort\n if len(sids) == 0:\n # if no spikes specified, reload all spikes\n sids = sort.spikes['id']\n usemeanchans = False\n if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:\n usemeanchans = True\n sort.reload_spikes_and_templates(sids, usemeanchans=usemeanchans)\n # add sids to the set of dirtysids to be resaved to .wave file:\n spw.update_dirtysids(sids)\n # auto-refresh all plots:\n self.panel.updateAllItems()\n\n def on_actionFindPrevMostSimilar_triggered(self):\n self.findMostSimilarCluster('previous')\n\n def on_actionFindNextMostSimilar_triggered(self):\n self.findMostSimilarCluster('next')\n\n def on_actionToggleErrors_toggled(self, checked):\n self.panel.showFills(checked)\n\n def on_slider_valueChanged(self, slideri):\n self.nslist.clearSelection() # emits selectionChanged signal, .reset() doesn't\n if self.nslist.model().sliding == False:\n self.nslist.model().sids.sort() # change from nid order to sid order\n self.nslist.updateAll() # update to reflect new ordering\n self.nslist.model().sliding = True\n nsamples = int(self.nsamplesComboBox.currentText())\n rows = np.arange(slideri, slideri+nsamples)\n self.nslist.selectRows(rows)\n\n def on_slider_sliderPressed(self):\n \"\"\"Make slider click (without movement) highlight the first nsamples\n or fewer spikes when slider is at 0 position\"\"\"\n slideri = self.slider.value()\n if slideri == 0:\n nsamples = int(self.nsamplesComboBox.currentText())\n nsamples = min(nsamples, self.nslist.model().nspikes)\n rows = np.arange(nsamples)\n self.nslist.selectRows(rows)\n\n def update_slider(self):\n \"\"\"Update slider limits and step sizes\"\"\"\n nsamples = int(self.nsamplesComboBox.currentText())\n nsids = len(self.nslist.sids)\n ulim = max(nsids-nsamples, 1) # upper limit\n self.slider.setRange(0, ulim)\n self.slider.setSingleStep(1)\n self.slider.setPageStep(nsamples)\n\n def findMostSimilarCluster(self, which='next'):\n \"\"\"If no chans selected, compare source to next or previous most similar cluster\n based on chans the two have in common, while requiring the two have each others'\n max chans in common. If chans have been selected, use them as a starting set of\n chans to compare on. Also, use only the timepoint range selected in incltComboBox\"\"\"\n try:\n source = self.getClusterComparisonSource()\n except RuntimeError as err:\n print(err)\n return\n destinations = list(self.sort.clusters.values())\n destinations.remove(source)\n selchans = np.sort(self.panel.chans_selected)\n if len(selchans) > 0:\n srcchans = np.intersect1d(source.neuron.wave.chans, selchans)\n if len(srcchans) == 0:\n print(\"Source cluster doesn't overlap with selected chans\")\n return\n else:\n srcchans = source.neuron.wave.chans\n\n if self.spykewindow.ui.normButton.isChecked():\n print(\"NOTE: findMostSimilarCluster() doesn't currently take spike amplitude \"\n \"normalization into account. To see the true amplitudes used to compare \"\n \"neuron pairs, turn off normalization\")\n\n errors = []\n dests = []\n t0i, t1i = self.tis # timepoint range selected in incltComboBox\n # try and compare source neuron waveform to all destination neuron waveforms\n for dest in destinations:\n if dest.neuron.wave.data is None: # hasn't been calculated yet\n dest.neuron.update_wave()\n dstchans = dest.neuron.wave.chans\n if len(selchans) > 0:\n if not set(selchans).issubset(dstchans):\n continue\n dstchans = selchans\n cmpchans = np.intersect1d(srcchans, dstchans)\n if len(cmpchans) == 0: # not comparable\n continue\n # ensure maxchan of both source and dest neuron are both in cmpchans\n if source.neuron.chan not in cmpchans or dest.neuron.chan not in cmpchans:\n continue\n srcwavedata = source.neuron.wave[cmpchans].data[:, t0i:t1i]\n dstwavedata = dest.neuron.wave[cmpchans].data[:, t0i:t1i]\n error = core.rms(srcwavedata - dstwavedata)\n errors.append(error)\n dests.append(dest)\n if len(errors) == 0:\n print(\"No sufficiently overlapping clusters on selected chans to compare to\")\n return\n errors = np.asarray(errors)\n dests = np.asarray(dests)\n desterrsortis = errors.argsort()\n\n if which == 'next':\n self._cmpid += 1\n elif which == 'previous':\n self._cmpid -= 1\n else: raise ValueError('Unknown which: %r' % which)\n self._cmpid = max(self._cmpid, 0)\n self._cmpid = min(self._cmpid, len(dests)-1)\n\n dest = dests[desterrsortis][self._cmpid]\n self.spykewindow.SelectClusters(dest)\n desterr = errors[desterrsortis][self._cmpid]\n print('n%d to n%d rmserror: %.2f uV' %\n (source.id, dest.id, self.sort.converter.AD2uV(desterr)))\n\n def getClusterComparisonSource(self):\n selclusters = self.spykewindow.GetClusters()\n errmsg = 'unclear which cluster to use as source for comparison'\n if len(selclusters) == 1:\n source = selclusters[0]\n self._source = source\n self._cmpid = -1 # init/reset\n elif len(selclusters) == 2:\n source = self._source\n if source not in selclusters:\n raise RuntimeError(errmsg)\n # deselect old destination cluster:\n selclusters.remove(source)\n self.spykewindow.SelectClusters(selclusters, on=False)\n else:\n self._source = None # reset for tidiness\n raise RuntimeError(errmsg)\n return source\n\n def Shift(self, nt):\n \"\"\"Shift selected sids by nt timepoints\"\"\"\n s = self.sort\n spikes = s.spikes\n spw = self.spykewindow\n sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))\n self.sort.shift(sids, nt)\n print('Shifted %d spikes by %d timepoints' % (len(sids), nt))\n unids = np.unique(spikes['nid'][sids])\n neurons = [ s.neurons[nid] for nid in unids ]\n for neuron in neurons:\n neuron.update_wave() # update affected mean waveforms\n # add dirtysids to the set to be resaved to .wave file:\n spw.update_dirtysids(sids)\n # auto-refresh all plots\n self.panel.updateAllItems()\n\n def Align(self, to):\n \"\"\"Align all implicitly selected spikes to min or max, or best fit\n on selected chans\"\"\" \n s = self.sort\n spikes = s.spikes\n spw = self.spykewindow\n sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))\n if to == 'best':\n tis = self.tis\n # find which chans are common to all sids:\n commonchans = s.get_common_chans(sids)[0]\n # check selected chans\n selchans = spw.get_selchans(sids)\n for selchan in selchans:\n if selchan not in commonchans:\n print(\"Chan %d not common to all spikes, pick from %r\"\n % (selchan, list(commonchans)))\n return\n print('Best fit aligning %d spikes between tis=%r on chans=%r' %\n (len(sids), list(tis), selchans))\n # numpy implementation:\n #dirtysids = s.alignbest(sids, tis, selchans)\n # cython implementation:\n dirtysids = util.alignbest_cy(s, sids, tis, np.int64(selchans))\n else: # to in ['min', 'max']\n print('Aligning %d spikes to %s' % (len(sids), to))\n dirtysids = s.alignminmax(sids, to)\n paligned = len(dirtysids) / len(sids) * 100\n print('Aligned %d/%d (%.1f%%) spikes' % (len(dirtysids), len(sids), paligned))\n unids = np.unique(spikes['nid'][dirtysids])\n neurons = [ s.neurons[nid] for nid in unids ]\n for neuron in neurons:\n neuron.update_wave() # update affected mean waveforms\n # add dirtysids to the set to be resaved to .wave file:\n spw.update_dirtysids(dirtysids)\n # auto-refresh all plots:\n self.panel.updateAllItems()\n\n def RemoveNeuron(self, neuron, update=True):\n \"\"\"Remove neuron and all its spikes from the GUI and the Sort\"\"\"\n self.MoveSpikes2List(neuron, neuron.sids, update=update)\n self.sort.remove_neuron(neuron.id)\n if update:\n self.nlist.updateAll()\n\n def MoveSpikes2Neuron(self, sids, neuron=None, update=True):\n \"\"\"Assign spikes from sort.spikes to a neuron, and trigger eventual update of\n mean wave. If neuron is None, create a new one\"\"\"\n sids = toiter(sids)\n spikes = self.sort.spikes\n if neuron == None:\n neuron = self.sort.create_neuron()\n neuron.sids = np.union1d(neuron.sids, sids) # update\n spikes['nid'][sids] = neuron.id\n if update:\n self.sort.update_usids()\n self.uslist.updateAll()\n if neuron in self.nslist.neurons:\n self.nslist.neurons = self.nslist.neurons # trigger nslist refresh\n # TODO: selection doesn't seem to be working, always jumps to top of list\n #self.uslist.Select(row) # automatically select the new item at that position\n neuron.wave.data = None # trigger template mean update\n return neuron\n\n def MoveSpikes2List(self, neuron, sids, update=True):\n \"\"\"Move spikes from a neuron back to the unsorted spike list control\"\"\"\n sids = toiter(sids)\n if len(sids) == 0:\n return # nothing to do\n spikes = self.sort.spikes\n neuron.sids = np.setdiff1d(neuron.sids, sids) # return what's in 1st arr and not in 2nd\n spikes['nid'][sids] = 0 # unbind neuron id of sids in spikes struct array\n if update:\n self.sort.update_usids()\n self.uslist.updateAll()\n # this only makes sense if the neuron is currently selected in the nlist:\n if neuron in self.nslist.neurons:\n self.nslist.neurons = self.nslist.neurons # this triggers a refresh\n neuron.wave.data = None # triggers an update when it's actually needed\n\n def PlotClusterHistogram(self, X, nids):\n \"\"\"Plot histogram of given clusters along a single dimension. If two clusters are\n given, project them onto axis connecting their centers, and calculate separation\n indices between them. Otherwise, plot the distribution of all given clusters\n (up to a limit) along the first dimension in X.\"\"\"\n spw = self.spykewindow\n mplw = spw.OpenWindow('MPL')\n unids = np.unique(nids) # each unid corresponds to a cluster, except possibly unid 0\n nclusters = len(unids)\n if nclusters == 0:\n mplw.ax.clear()\n mplw.figurecanvas.draw()\n print(\"No spikes selected\")\n return\n elif nclusters > 5: # to prevent slowdowns, don't plot too many\n mplw.ax.clear()\n mplw.figurecanvas.draw()\n print(\"Too many clusters selected for cluster histogram\")\n return\n elif nclusters == 2:\n calc_measures = True\n else:\n calc_measures = False\n projdimi = 0\n\n ndims = X.shape[1]\n points = [] # list of projection of each cluster's points onto dimi\n for unid in unids:\n sidis, = np.where(nids == unid)\n # don't seem to need contig points for NDsepmetric, no need for copy:\n points.append(X[sidis])\n #points.append(np.ascontiguousarray(X[sidis]))\n if calc_measures:\n t0 = time.time()\n NDsep = util.NDsepmetric(*points, Nmax=20000)\n print('NDsep calc took %.3f sec' % (time.time()-t0))\n # centers of both clusters, use median:\n c0 = np.median(points[0], axis=0) # ndims vector\n c1 = np.median(points[1], axis=0)\n # line connecting the centers of the two clusters, wrt c0\n line = c1-c0\n line /= np.linalg.norm(line) # make it unit length\n #print('c0=%r, c1=%r, line=%r' % (c0, c1, line))\n else:\n line = np.zeros(ndims)\n line[projdimi] = 1.0 # pick out just the one component\n c0 = 0.0 # set origin at 0\n # calculate projection of each cluster's points onto line\n projs = []\n for cpoints in points:\n projs.append(np.dot(cpoints-c0, line))\n if calc_measures:\n d = np.median(projs[1]) - np.median(projs[0])\n # measure whether centers are at least 3 of the bigger stdevs away from\n # each other:\n maxstd = max(projs[0].std(), projs[1].std())\n if maxstd == 0:\n oneDsep = 0 # not sure if this is ideal\n else:\n oneDsep = d / (3 * maxstd)\n #print('std0=%f, std1=%f, d=%f' % (projs[0].std(), projs[1].std(), d))\n proj = np.concatenate(projs)\n nbins = max(intround(np.sqrt(len(proj))), 2) # seems like a good heuristic\n #print('nbins = %d' % nbins)\n edges = np.histogram(proj, bins=nbins)[1]\n hists = []\n for i in range(nclusters):\n hists.append(np.histogram(projs[i], bins=edges)[0])\n hist = np.concatenate([hists]) # one cluster hist per row\n masses = np.asarray([ h.sum() for h in hist ])\n sortedmassis = masses.argsort()\n # Take the fraction of area that the two distribs overlap.\n # At each bin, take min value of the two distribs. Add up all those min values,\n # and divide by the mass of the smaller distrib.\n if calc_measures:\n overlaparearatio = hist.min(axis=0).sum() / masses[sortedmassis[0]]\n djs = core.DJS(hists[0], hists[1])\n # plotting:\n ledges = edges[:-1] # keep just the left edges, discard the last right edge\n assert len(ledges) == nbins\n binwidth = ledges[1] - ledges[0]\n # plot:\n a = mplw.ax\n a.clear()\n windowtitle = \"clusters %r\" % list(unids)\n print(windowtitle)\n mplw.setWindowTitle(windowtitle)\n if calc_measures:\n #title = (\"sep index=%.3f, overlap area ratio=%.3f, DJS=%.3f, sqrt(DJS)=%.3f\"\n # % (oneDsep, overlaparearatio, djs, np.sqrt(djs)))\n title = (\"%dDsep=%.3f, 1Dsep=%.3f, OAR=%.3f, DJS=%.3f\"\n % (ndims, NDsep, oneDsep, overlaparearatio, djs))\n print(title)\n a.set_title(title)\n cs = [ CLUSTERCOLOURDICT[unid] for unid in unids ]\n for i, c in enumerate(cs):\n # due to white background, replace white clusters with black:\n if c == WHITE:\n cs[i] = 'black'\n # plot the smaller cluster last, to maximize visibility:\n for i in sortedmassis[::-1]:\n a.bar(ledges, hist[i], width=binwidth, color=cs[i], edgecolor=cs[i])\n ## TODO: tight_layout call needs updating for MPL 2.2:\n #mplw.f.tight_layout(pad=0.3) # crop figure to contents\n mplw.figurecanvas.draw()\n" ]
[ [ "numpy.intersect1d", "numpy.histogram", "numpy.savetxt", "numpy.diff", "numpy.any", "numpy.asarray", "numpy.int64", "sklearn.decomposition.FastICA", "sklearn.decomposition.NMF", "numpy.float64", "numpy.abs", "sklearn.manifold.TSNE", "numpy.where", "numpy.union1d", "numpy.unique", "numpy.zeros", "numpy.dot", "numpy.setdiff1d", "numpy.float32", "numpy.column_stack", "numpy.median", "numpy.uint64", "numpy.arange", "numpy.hstack", "numpy.all", "numpy.sort", "numpy.array", "numpy.linalg.norm", "sklearn.decomposition.PCA", "numpy.savez_compressed", "sklearn.decomposition.SparsePCA", "sklearn.decomposition.MiniBatchSparsePCA", "scipy.signal.find_peaks", "numpy.sqrt", "numpy.concatenate", "numpy.split" ] ]
emmettmeinzer/hmwgen
[ "cd47733b5a34a6a3a9b56026eb5e73069e398033", "cd47733b5a34a6a3a9b56026eb5e73069e398033" ]
[ "archive/reuUpdated.py", "archive/attention.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 11 13:41:14 2019\r\n\r\n@author: Emmett & Binyang\r\n\"\"\"\r\n\r\nfrom pprint import pprint\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer\r\n\r\n##Let’s first build a corpus to train our tokenizer on. We’ll use stuff available in NLTK:\r\n\r\nfrom nltk.corpus import gutenberg\r\n\r\n# print (dir(gutenberg))\r\n# print (gutenberg.fileids())\r\n\r\ntext = \"\"\r\nfor file_id in gutenberg.fileids():\r\n text += gutenberg.raw(file_id)\r\n \r\nprint (len(text))\r\n\r\n##a funtion that converts a list to a string\r\ndef listToString(s): \r\n \r\n # initialize an empty string \r\n str1 = \"\" \r\n \r\n # traverse in the string \r\n for ele in s: \r\n str1 += ele \r\n \r\n # return string \r\n return str1\r\n\r\n##extract sentences from samples for following sentiment analysis\r\nsampNum = 1\r\nsent_df = pd.DataFrame()\r\ni = 0\r\n\r\nwhile (sampNum < 186):\r\n fileOpen = open(\"sample\"+str(sampNum)+\".txt\",\"r\")\r\n temp = fileOpen.readlines()\r\n temp = listToString(temp)\r\n \r\n trainer = PunktTrainer()\r\n trainer.INCLUDE_ALL_COLLOCS = True\r\n trainer.train(text)\r\n tokenizer = PunktSentenceTokenizer(trainer.get_params())\r\n \r\n ##Adding more abbreviations\r\n tokenizer._params.abbrev_types.add('dr')\r\n \r\n sent = tokenizer.tokenize(temp)\r\n \r\n for sent in sent:\r\n sent_df.loc[i, 'sent'] = sent\r\n sent_df.loc[i, 'sample'] = sampNum\r\n i += 1\r\n \r\n sampNum += 1\r\n\r\n##NLTK’s built-in Vader Sentiment Analyzer will simply rank a piece of text as positive, negative or neutral \r\n##using a lexicon of positive and negative words.\r\n\r\n##We can utilize this tool by first creating a Sentiment Intensity Analyzer (SIA) to categorize our headlines, \r\n##then we'll use the polarity_scores method to get the sentiment.\r\n\r\n##We'll append each sentiment dictionary to a results list, which we'll transform into a dataframe:\r\n\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA\r\n\r\nsia = SIA()\r\nresults = []\r\n\r\nfor idx, row in sent_df.iterrows():\r\n line = row['sent']\r\n score = sia.polarity_scores(line)\r\n sent_df.loc[idx, 'neg'] = score.get('neg')\r\n sent_df.loc[idx, 'neu'] = score.get('neu')\r\n sent_df.loc[idx, 'pos'] = score.get('pos')\r\n sent_df.loc[idx, 'compound'] = score.get('compound')\r\n\r\n# pprint(results[:10], width=100)\r\n\r\n##We will consider posts with a compound value greater than 0.2 as positive and less than -0.2 as negative. \r\n##There's some testing and experimentation that goes with choosing these ranges, and there is a trade-off to be \r\n##made here. If you choose a higher value, you might get more compact results (less false positives and false \r\n##negatives), but the size of the results will decrease significantly.\r\n\r\nsent_df['label'] = 0\r\nsent_df.loc[sent_df['compound'] > 0.3, 'label'] = 1\r\nsent_df.loc[sent_df['compound'] < -0.3, 'label'] = -1\r\n# sent_df.head()\r\n\r\n##We have all the data we need to save, so let's do that:\r\n\r\nsent_df.to_csv('sentiment analysis.csv', mode='a', encoding='utf-8', index=False)\r\n\r\n##We can now keep appending to this csv, but just make sure that if you reassign the headlines set, you could get \r\n##duplicates. Maybe add a more advanced saving function that reads and removes duplicates before saving.\r\n\r\n#Let's first take a peak at a few positive and negative headlines:\r\n\r\nprint(\"Positive headlines:\\n\")\r\npprint(list(sent_df[sent_df['label'] == 1].sent)[:5], width=200)\r\n\r\nprint(\"\\nNegative headlines:\\n\")\r\npprint(list(sent_df[sent_df['label'] == -1].sent)[:5], width=200)\r\n\r\n##Now let's check how many total positives and negatives we have in this dataset:\r\n\r\nprint(sent_df.label.value_counts())\r\nprint(sent_df.label.value_counts(normalize=True) * 100)\r\n\r\n##The first line gives us raw value counts of the labels, whereas the second line provides percentages \r\n##with the normalize keyword.\r\n\r\n##For fun, let's plot a bar chart:\r\n\"\"\"\r\nfig, ax = plt.subplots(figsize=(8, 8))\r\n\r\ncounts = sent_df.label.value_counts(normalize=True) * 100\r\n\r\nsns.barplot(x=counts.index, y=counts, ax=ax)\r\n\r\nax.set_xticklabels(['Negative', 'Neutral', 'Positive'])\r\nax.set_ylabel(\"Percentage\")\r\n\r\nplt.show()\r\n\"\"\"\r\n\r\n##filter the sentences by number of words in it\r\nfor idx, row in sent_df.iterrows():\r\n sentence = row['sent']\r\n sent_df.loc[idx, 'len_sent'] = len(sentence.split())\r\n\r\n##split positive and other sentences\r\npos = sent_df[sent_df['label'] == 1]\r\nneg = sent_df[sent_df['label'] != 1]\r\n\r\nimport gensim\r\nfrom gensim.parsing.preprocessing import strip_non_alphanum\r\nfrom gensim.parsing.preprocessing import strip_punctuation\r\nfrom gensim.parsing.preprocessing import strip_multiple_whitespaces\r\nfrom gensim.parsing.preprocessing import stem_text\r\n\r\ncorpus_full = []\r\nfor idx, row in sent_df.iterrows():\r\n temp = row['sent']\r\n temp1 = strip_non_alphanum(str(temp))\r\n temp2 = strip_punctuation(temp1)\r\n temp3 = strip_multiple_whitespaces(temp2)\r\n final = stem_text(temp3)\r\n corpus_full.append(final)\r\n\r\ncorpus_pos = []\r\nfor idx, row in pos.iterrows():\r\n temp = row['sent']\r\n temp1 = strip_non_alphanum(str(temp))\r\n temp2 = strip_punctuation(temp1)\r\n temp3 = strip_multiple_whitespaces(temp2)\r\n final = stem_text(temp3)\r\n corpus_pos.append(final)\r\n \r\ncorpus_neg = []\r\nfor idx, row in neg.iterrows():\r\n temp = row['sent']\r\n temp1 = strip_non_alphanum(str(temp))\r\n temp2 = strip_punctuation(temp1)\r\n temp3 = strip_multiple_whitespaces(temp2)\r\n final = stem_text(temp3)\r\n corpus_neg.append(final)\r\n\r\nfrom nltk.corpus import stopwords\r\nstop_words = stopwords.words('english')\r\n\r\nstoplist = set('a about above after again against all am an and any are arent\\\r\n as also at be because been before being below between both but\\\r\n by cant cannot could couldnt did didnt do does doesnt doing dont\\\r\n down during each els few for from further had hadnt has have havent\\\r\n having he hed hes her here heres hers herself him himself his\\\r\n how hows i id ill im ive if in into is isnt it its itself lets\\\r\n me more most mustnt my myself no nor not of off on once only or\\\r\n other ought our ours ourselves out over own same shant she shes\\\r\n should shouldnt so some such than that thats the their theirs\\\r\n them themselves then there theres these they theyd theyll theyre\\\r\n theyve this those through to too under until up very was wasnt\\\r\n we wed were weve were werent what whats when whens which while\\\r\n who whos whom why whys with wont would wouldnt you youd youll\\\r\n youre youve your yours yourself yourselves ll ve s ar mayb ha re\\\r\n us thi isn a b c d e f g h i j k l m n o p q r s t u v w x y z\\\r\n hi will can get back go don wa let atc ok ani mi thei whenev make\\\r\n just take aw know sai good baltimor jetblu lol thank thanks like\\\r\n vari might less highest billion nice probabl lot fuck shit sure\\\r\n feel dure befor realli work veri chanc see awai onc onli dy aren\\\r\n 100 someth thing even happen becaus wai everi much help want think\\\r\n fear flight plane fly mai time dai\\\r\n 1 2 3 4 5 6 7 8 9 10'.split())\r\n\r\nprint (len(stoplist))\r\nstoplist.update(stop_words)\r\n\r\nprint(len(stop_words))\r\nprint(len(stoplist))\r\n\r\n#standardize text -- makes all characters lowercase and removes common stop words\r\ntext_full = [[word for word in document.lower().split() if word not in stoplist]\r\n for document in corpus_full]\r\nprint(text_full)\r\ntext_pos = [[word for word in document.lower().split() if word not in stoplist]\r\n for document in corpus_pos]\r\ntext_neg = [[word for word in document.lower().split() if word not in stoplist]\r\n for document in corpus_neg]\r\n\r\n#count number of times that word appears in corpus\r\n#pair frequency with respective word in new array\r\nfrom collections import defaultdict\r\n\r\nfrequency = defaultdict(int)\r\nfor text in text_full:\r\n for token in text:\r\n frequency[token] += 1\r\n\r\ncorpus_removeOne_full = [[token for token in text if frequency[token]>1] for text in text_full]\r\n\r\nfrequency = defaultdict(int)\r\nfor text in text_pos:\r\n for token in text:\r\n frequency[token] += 1\r\n \r\ncorpus_removeOne_pos = [[token for token in text if frequency[token]>1] for text in text_pos]\r\n\r\nfrequency = defaultdict(int)\r\nfor text in text_neg:\r\n for token in text:\r\n frequency[token] += 1\r\n \r\ncorpus_removeOne_neg = [[token for token in text if frequency[token]>1] for text in text_neg]\r\n\r\n\r\nfrom gensim import corpora\r\n#add corpora to dictionary\r\ndictionary_full = corpora.Dictionary(corpus_removeOne_full)\r\ndictionary_pos = corpora.Dictionary(corpus_removeOne_pos)\r\ndictionary_neg = corpora.Dictionary(corpus_removeOne_neg)\r\n#save dictionary for future reference\r\ndictionary_full.save('redditTest_full.dict')\r\ndictionary_pos.save('redditTest_pos.dict') #location of document in computer\r\ndictionary_neg.save('redditTest_neg.dict')\r\n#dict = gensim.corpora.Dictionary.load('redditTest.dict')\r\n\r\n#assign numeric id to each token in dictionary\r\ndictID_full = dictionary_full.token2id\r\ndictID_pos = dictionary_pos.token2id\r\ndictID_neg = dictionary_neg.token2id\r\n\r\n#remove empty sentences\r\nfor text in corpus_removeOne_full:\r\n if len(text) == 0:\r\n corpus_removeOne_full.remove(text)\r\n\r\nfor text in corpus_removeOne_pos:\r\n if len(text) == 0:\r\n corpus_removeOne_pos.remove(text)\r\n \r\nfor text in corpus_removeOne_neg:\r\n if len(text) == 0:\r\n corpus_removeOne_neg.remove(text)\r\n\r\n\r\n#converts each word into vector following same process as example\r\n#Bag of Word Corpus of Full Sentiment\r\nbow_corpus_full = [dictionary_full.doc2bow(text) for text in corpus_removeOne_full]\r\ncorpora.MmCorpus.serialize('redditTest_full.mm', bow_corpus_full)\r\ncorp_full = gensim.corpora.MmCorpus('redditTest_full.mm')\r\n\r\nfrom gensim import models\r\ntfidf_pos = models.TfidfModel(bow_corpus_full)\r\ncorpus_tfidf_full = tfidf_pos[bow_corpus_full]\r\n\r\n#Bag of Word Corpus of Positive Sentiment\r\nbow_corpus_pos = [dictionary_pos.doc2bow(text) for text in corpus_removeOne_pos]\r\ncorpora.MmCorpus.serialize('redditTest_pos.mm', bow_corpus_pos)\r\ncorp_pos = gensim.corpora.MmCorpus('redditTest_pos.mm')\r\n\r\nfrom gensim import models\r\ntfidf_pos = models.TfidfModel(bow_corpus_pos)\r\ncorpus_tfidf_pos = tfidf_pos[bow_corpus_pos]\r\n\r\n#Bag of Word Corpus of Negative Sentiment\r\nbow_corpus_neg = [dictionary_neg.doc2bow(text) for text in corpus_removeOne_neg]\r\ncorpora.MmCorpus.serialize('redditTest_neg.mm', bow_corpus_neg)\r\ncorp_neg = gensim.corpora.MmCorpus('redditTest_neg.mm')\r\n\r\nfrom gensim import models\r\ntfidf_neg = models.TfidfModel(bow_corpus_neg)\r\ncorpus_tfidf_neg = tfidf_neg[bow_corpus_neg]\r\n\r\n\r\n#LDA Mallet for full corpus\r\nmallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'\r\nlda_full = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_full, num_topics=9, id2word=dictionary_full, workers=1, alpha=110, random_seed=109, iterations=50)\r\ncorpus_LDA_full = lda_full[bow_corpus_full]\r\nlda_full.print_topics(9)\r\n\r\n#LDA Mallet for positive corpus\r\nmallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'\r\nlda_pos = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_pos, num_topics=9, id2word=dictionary_pos, workers=1, alpha=110, random_seed=109, iterations=50)\r\ncorpus_LDA_pos = lda_pos[bow_corpus_pos]\r\nlda_pos.print_topics(9)\r\n\r\n#LDA Mallet for negative corpus\r\nmallet_path = '/Users/emmet/.spyder-py3-dev/REU_Project/mallet-2.0.8/bin/mallet'\r\nlda_neg = gensim.models.wrappers.LdaMallet(mallet_path, corpus=bow_corpus_neg, num_topics=9, id2word=dictionary_neg, workers=1, alpha=110, random_seed=109, iterations=50)\r\ncorpus_LDA_neg = lda_neg[bow_corpus_neg]\r\nlda_neg.print_topics(9)\r\n\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as mcolors\r\nfrom sklearn.manifold import TSNE\r\n\r\ncolors = np.array([color for name, color in mcolors.TABLEAU_COLORS.items()])\r\n\r\n#t-SNE plot for full corpus\r\nn_topics = 9\r\ntopic_weights_full = []\r\nfor row_list in lda_full[bow_corpus_full]:\r\n tmp = np.zeros(n_topics)\r\n for i, w in row_list:\r\n tmp[i] = w\r\n topic_weights_full.append(tmp)\r\n \r\narr_full = pd.DataFrame(topic_weights_full).fillna(9).values\r\ntopic_num_full = np.argmax(arr_full, axis=1)\r\ntsne_model_full = TSNE(n_components=3, random_state=None, method='barnes_hut', \r\n angle=0.5, init='pca')\r\ntsne_lda_full = tsne_model_full.fit_transform(arr_full)\r\n\r\nsub = str.maketrans(\"0123456789\", \"₀₁₂₃₄₅₆₇₈₉\")\r\nplt.xlabel('t-SNE1'.translate(sub))\r\nplt.ylabel('t-SNE2'.translate(sub))\r\nplt.title('t-SNE Plot of Topics within Positive Sentiment Corpus')\r\ntsne_full = plt.scatter(x=tsne_lda_full[:,0], y=tsne_lda_full[:,1])\r\nplt.show(tsne_full)\r\n\r\n\"\"\"\r\n#t-SNE plot for positive corpus\r\nn_topics = 9\r\ntopic_weights_pos = []\r\nfor row_list in lda_pos[bow_corpus_pos]:\r\n tmp = np.zeros(n_topics)\r\n for i, w in row_list:\r\n tmp[i] = w\r\n topic_weights_pos.append(tmp)\r\n \r\narr_pos = pd.DataFrame(topic_weights_pos).fillna(0).values\r\ntopic_num_pos = np.argmax(arr_pos, axis=1)\r\ntsne_model_pos = TSNE(n_components=3, random_state=None, method='barnes_hut', \r\n angle=0.5, init='pca')\r\ntsne_lda_pos = tsne_model_pos.fit_transform(arr_pos)\r\n\r\nsub = str.maketrans(\"0123456789\", \"₀₁₂₃₄₅₆₇₈₉\")\r\nplt.xlabel('t-SNE1'.translate(sub))\r\nplt.ylabel('t-SNE2'.translate(sub))\r\nplt.title('t-SNE Plot of Topics within Positive Sentiment Corpus')\r\ntsne_pos = plt.scatter(x=tsne_lda_pos[:,0], y=tsne_lda_pos[:,1])\r\n#plt.show(tsne_pos)\r\n\r\n\r\n#t-SNE plot for negative corpus\r\nn_topics = 9\r\ntopic_weights_neg = []\r\nfor row_list in lda_neg[bow_corpus_neg]:\r\n tmp = np.zeros(n_topics)\r\n for i, w in row_list:\r\n tmp[i] = w\r\n topic_weights_neg.append(tmp)\r\n \r\narr_neg = pd.DataFrame(topic_weights_neg).fillna(0).values\r\ntopic_num_neg = np.argmax(arr_neg, axis=1)\r\ntsne_model_neg = TSNE(n_components=3, random_state=None, method='barnes_hut', \r\n angle=0.5, init='pca')\r\ntsne_lda_neg = tsne_model_neg.fit_transform(arr_neg)\r\n\r\nsub = str.maketrans(\"0123456789\", \"₀₁₂₃₄₅₆₇₈₉\")\r\nplt.xlabel('t-SNE1'.translate(sub))\r\nplt.ylabel('t-SNE2'.translate(sub))\r\nplt.title('t-SNE Plot of Topics within Negative Sentiment Corpus')\r\ntsne_neg = plt.scatter(tsne_lda_neg[:,0], tsne_lda_neg[:,1])\r\n#plt.show(tsne_neg)\r\n\"\"\"\r\n\r\nfrom collections import Counter\r\n#Word Count & Keyword for Full Corpus\r\ntopics_full = lda_full.show_topics(formatted=False)\r\nflatten_full = [w for w_list in bow_corpus_full for w in w_list]\r\ncounter_full = Counter(flatten_full)\r\n\r\ntopic_weight_full = []\r\nfor i, topic in topics_full:\r\n for word, weight in topic:\r\n topic_weight_full.append([word, i , weight, counter_full[word]])\r\n\r\ndata_frame_full = pd.DataFrame(topic_weight_full, columns=['word', 'topic_id', 'importance', 'word_count']) \r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10,6), sharey=True, dpi=160)\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n ax.bar(x='word', height=\"word_count\", data=data_frame_full.loc[data_frame_full.topic_id==i, :], color=colors[i], width=0.5, alpha=0.3, label='Word Count')\r\n ax_twin = ax.twinx()\r\n ax_twin.bar(x='word', height=\"importance\", data=data_frame_full.loc[data_frame_full.topic_id==i, :], color=colors[i], width=0.2, label='Weights')\r\n ax.set_ylabel('Word Count', color=colors[i])\r\n ax_twin.set_ylim(0, 0.5); ax.set_ylim(0, 100)\r\n ax.set_title('Topic: ' + str(i+1), color=colors[i], fontsize=8)\r\n ax.tick_params(axis='y', left=False)\r\n ax.set_xticklabels(data_frame_full.loc[data_frame_full.topic_id==i, 'word'], rotation=90, horizontalalignment= 'center')\r\n ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')\r\n\r\nfig.tight_layout(w_pad=2) \r\nplt.show()\r\n\r\n\"\"\"\r\n#Word Count & Keyword for Positive Corpus\r\ntopics_pos = lda_pos.show_topics(formatted=False)\r\nflatten_pos = [w for w_list in bow_corpus_pos for w in w_list]\r\ncounter_pos = Counter(flatten_pos)\r\n\r\ntopic_weight_pos = []\r\nfor i, topic in topics_pos:\r\n for word, weight in topic:\r\n topic_weight_pos.append([word, i , weight, counter_pos[word]])\r\n\r\ndata_frame_pos = pd.DataFrame(topic_weight_pos, columns=['word', 'topic_id', 'importance', 'word_count']) \r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10,6), sharey=True, dpi=160)\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n ax.bar(x='word', height=\"word_count\", data=data_frame_pos.loc[data_frame_pos.topic_id==i, :], color=colors[i], width=0.5, alpha=0.3, label='Word Count')\r\n ax_twin = ax.twinx()\r\n ax_twin.bar(x='word', height=\"importance\", data=data_frame_pos.loc[data_frame_pos.topic_id==i, :], color=colors[i], width=0.2, label='Weights')\r\n ax.set_ylabel('Word Count', color=colors[i])\r\n ax_twin.set_ylim(0, 0.5); ax.set_ylim(0, 100)\r\n ax.set_title('Topic: ' + str(i+1), color=colors[i], fontsize=8)\r\n ax.tick_params(axis='y', left=False)\r\n ax.set_xticklabels(data_frame_pos.loc[data_frame_pos.topic_id==i, 'word'], rotation=90, horizontalalignment= 'center')\r\n ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')\r\n\r\nfig.tight_layout(w_pad=2) \r\nplt.show()\r\n\r\n#Word Count & Keyword for Negative Corpus\r\ntopics_neg = lda_neg.show_topics(formatted=False)\r\nflatten_neg = [w for w_list in bow_corpus_neg for w in w_list]\r\ncounter_neg = Counter(flatten_neg)\r\n\r\ntopic_weight_neg = []\r\nfor i, topic in topics_neg:\r\n for word, weight in topic:\r\n topic_weight_neg.append([word, i , weight, counter_neg[word]])\r\n\r\ndata_frame_neg = pd.DataFrame(topic_weight_neg, columns=['word', 'topic_id', 'importance', 'word_count']) \r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10,6), sharey=True, dpi=160)\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n ax.bar(x='word', height=\"word_count\", data=data_frame_neg.loc[data_frame_neg.topic_id==i, :], color=colors[i], width=0.5, alpha=0.3, label='Word Count')\r\n ax_twin = ax.twinx()\r\n ax_twin.bar(x='word', height=\"importance\", data=data_frame_neg.loc[data_frame_neg.topic_id==i, :], color=colors[i], width=0.2, label='Weights')\r\n ax.set_ylabel('Word Count', color=colors[i])\r\n ax_twin.set_ylim(0, 0.5); ax.set_ylim(0, 100)\r\n ax.set_title('Topic: ' + str(i+1), color=colors[i], fontsize=8)\r\n ax.tick_params(axis='y', left=False)\r\n ax.set_xticklabels(data_frame_neg.loc[data_frame_neg.topic_id==i, 'word'], rotation=90, horizontalalignment= 'center')\r\n ax.legend(loc='upper left'); ax_twin.legend(loc='upper right')\r\n\r\nfig.tight_layout(w_pad=2) \r\nplt.show()\r\n\"\"\"\r\n\r\nfrom wordcloud import WordCloud\r\nimport matplotlib.colors as mcolors\r\n#Word Cloud Display for Full Corpus\r\ncloud = WordCloud(stopwords=stoplist, background_color='white', width=2500, height=1800, max_words=7, colormap='tab10', color_func=lambda *args, **kwargs: colors[i], prefer_horizontal=1.0)\r\n\r\ntopics_full = lda_full.show_topics(formatted=False)\r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10, 6))\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n fig.add_subplot(ax)\r\n topic_words_full = dict(topics_full[i][1])\r\n cloud.generate_from_frequencies(topic_words_full, max_font_size=300)\r\n plt.gca().imshow(cloud)\r\n plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=10))\r\n plt.gca().axis('off')\r\n\r\nplt.axis('off')\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n\"\"\"\r\n#Word Cloud Display for Positive Corpus\r\ncloud = WordCloud(stopwords=stoplist, background_color='white', width=2500, height=1800, max_words=7, colormap='tab10', color_func=lambda *args, **kwargs: colors[i], prefer_horizontal=1.0)\r\n\r\ntopics_pos = lda_pos.show_topics(formatted=False)\r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10, 6))\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n fig.add_subplot(ax)\r\n topic_words_pos = dict(topics_pos[i][1])\r\n cloud.generate_from_frequencies(topic_words_pos, max_font_size=300)\r\n plt.gca().imshow(cloud)\r\n plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=10))\r\n plt.gca().axis('off')\r\n\r\nplt.axis('off')\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n#Word Cloud Display for Negative Corpus\r\ncloud = WordCloud(stopwords=stoplist, background_color='white', width=2500, height=1800, max_words=7, colormap='tab10', color_func=lambda *args, **kwargs: colors[i], prefer_horizontal=1.0)\r\n\r\ntopics_neg = lda_neg.show_topics(formatted=False)\r\n\r\nfig, axes = plt.subplots(3, 3, figsize=(10, 6))\r\n\r\nfor i, ax in enumerate(axes.flatten()):\r\n fig.add_subplot(ax)\r\n topic_words_neg = dict(topics_neg[i][1])\r\n cloud.generate_from_frequencies(topic_words_neg, max_font_size=300)\r\n plt.gca().imshow(cloud)\r\n plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=10))\r\n plt.gca().axis('off')\r\n\r\nplt.axis('off')\r\nplt.tight_layout()\r\nplt.show()\r\n\"\"\"\r\n\r\nimport pyLDAvis.gensim\r\nimport pyLDAvis\r\nimport gensim \r\n\r\n#LDA Mallet pyLDAvis for Full Corpus\r\nmallet2lda_full = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_full)\r\nvisualizeLDA_full = pyLDAvis.gensim.prepare(mallet2lda_full, bow_corpus_full, dictionary_full)\r\npyLDAvis.show()\r\n\r\n\"\"\"\r\n#LDA Mallet pyLDAvis for Postiive Corpus\r\nmallet2lda_pos = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_pos)\r\nvisualizeLDA_pos = pyLDAvis.gensim.prepare(mallet2lda_pos, bow_corpus_pos, dictionary_pos)\r\npyLDAvis.show(visualizeLDA_pos)\r\n\r\n#LDA Mallet pyLDAvis for Negative Corpus\r\nmallet2lda_neg = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_neg)\r\nvisualizeLDA_neg = pyLDAvis.gensim.prepare(mallet2lda_neg, bow_corpus_neg, dictionary_neg)\r\npyLDAvis.show(visualizeLDA_neg)\r\n\"\"\"", "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 5 22:53:44 2020\r\n\r\n@author: Emmett\r\n\"\"\"\r\nimport tensorflow as tf\r\nimport os\r\nfrom tensorflow.python.keras.layers import Layer\r\nfrom tensorflow.python.keras import backend as K\r\n\r\n\r\nclass AttentionLayer(Layer):\r\n \"\"\"\r\n This class implements Bahdanau attention (https://arxiv.org/pdf/1409.0473.pdf).\r\n There are three sets of weights introduced W_a, U_a, and V_a\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n super(AttentionLayer, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n assert isinstance(input_shape, list)\r\n # Create a trainable weight variable for this layer.\r\n\r\n self.W_a = self.add_weight(name='W_a',\r\n shape=tf.TensorShape((input_shape[0][2], input_shape[0][2])),\r\n initializer='uniform',\r\n trainable=True)\r\n self.U_a = self.add_weight(name='U_a',\r\n shape=tf.TensorShape((input_shape[1][2], input_shape[0][2])),\r\n initializer='uniform',\r\n trainable=True)\r\n self.V_a = self.add_weight(name='V_a',\r\n shape=tf.TensorShape((input_shape[0][2], 1)),\r\n initializer='uniform',\r\n trainable=True)\r\n\r\n super(AttentionLayer, self).build(input_shape) # Be sure to call this at the end\r\n\r\n def call(self, inputs, verbose=False):\r\n \"\"\"\r\n inputs: [encoder_output_sequence, decoder_output_sequence]\r\n \"\"\"\r\n assert type(inputs) == list\r\n encoder_out_seq, decoder_out_seq = inputs\r\n if verbose:\r\n print('encoder_out_seq>', encoder_out_seq.shape)\r\n print('decoder_out_seq>', decoder_out_seq.shape)\r\n\r\n def energy_step(inputs, states):\r\n \"\"\" Step function for computing energy for a single decoder state\r\n inputs: (batchsize * 1 * de_in_dim)\r\n states: (batchsize * 1 * de_latent_dim)\r\n \"\"\"\r\n\r\n assert_msg = \"States must be an iterable. Got {} of type {}\".format(states, type(states))\r\n assert isinstance(states, list) or isinstance(states, tuple), assert_msg\r\n\r\n \"\"\" Some parameters required for shaping tensors\"\"\"\r\n en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]\r\n de_hidden = inputs.shape[-1]\r\n\r\n \"\"\" Computing S.Wa where S=[s0, s1, ..., si]\"\"\"\r\n # <= batch size * en_seq_len * latent_dim\r\n W_a_dot_s = K.dot(encoder_out_seq, self.W_a)\r\n\r\n \"\"\" Computing hj.Ua \"\"\"\r\n U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1) # <= batch_size, 1, latent_dim\r\n if verbose:\r\n print('Ua.h>', U_a_dot_h.shape)\r\n\r\n \"\"\" tanh(S.Wa + hj.Ua) \"\"\"\r\n # <= batch_size*en_seq_len, latent_dim\r\n Ws_plus_Uh = K.tanh(W_a_dot_s + U_a_dot_h)\r\n if verbose:\r\n print('Ws+Uh>', Ws_plus_Uh.shape)\r\n\r\n \"\"\" softmax(va.tanh(S.Wa + hj.Ua)) \"\"\"\r\n # <= batch_size, en_seq_len\r\n e_i = K.squeeze(K.dot(Ws_plus_Uh, self.V_a), axis=-1)\r\n # <= batch_size, en_seq_len\r\n e_i = K.softmax(e_i)\r\n\r\n if verbose:\r\n print('ei>', e_i.shape)\r\n\r\n return e_i, [e_i]\r\n\r\n def context_step(inputs, states):\r\n \"\"\" Step function for computing ci using ei \"\"\"\r\n\r\n assert_msg = \"States must be an iterable. Got {} of type {}\".format(states, type(states))\r\n assert isinstance(states, list) or isinstance(states, tuple), assert_msg\r\n\r\n # <= batch_size, hidden_size\r\n c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)\r\n if verbose:\r\n print('ci>', c_i.shape)\r\n return c_i, [c_i]\r\n\r\n fake_state_c = K.sum(encoder_out_seq, axis=1)\r\n fake_state_e = K.sum(encoder_out_seq, axis=2) # <= (batch_size, enc_seq_len, latent_dim\r\n\r\n \"\"\" Computing energy outputs \"\"\"\r\n # e_outputs => (batch_size, de_seq_len, en_seq_len)\r\n last_out, e_outputs, _ = K.rnn(\r\n energy_step, decoder_out_seq, [fake_state_e],\r\n )\r\n\r\n \"\"\" Computing context vectors \"\"\"\r\n last_out, c_outputs, _ = K.rnn(\r\n context_step, e_outputs, [fake_state_c],\r\n )\r\n\r\n return c_outputs, e_outputs\r\n\r\n def compute_output_shape(self, input_shape):\r\n \"\"\" Outputs produced by the layer \"\"\"\r\n return [\r\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),\r\n tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))\r\n ]" ]
[ [ "numpy.zeros", "matplotlib.pyplot.axis", "pandas.DataFrame", "matplotlib.pyplot.tight_layout", "matplotlib.colors.TABLEAU_COLORS.items", "numpy.argmax", "matplotlib.pyplot.subplots", "matplotlib.pyplot.title", "sklearn.manifold.TSNE", "matplotlib.pyplot.gca", "matplotlib.pyplot.show", "matplotlib.pyplot.scatter" ], [ "tensorflow.python.keras.backend.softmax", "tensorflow.python.keras.backend.expand_dims", "tensorflow.python.keras.backend.rnn", "tensorflow.python.keras.backend.tanh", "tensorflow.TensorShape", "tensorflow.python.keras.backend.dot", "tensorflow.python.keras.backend.sum" ] ]
LatencyTDH/DeepSpeed
[ "eecef309cb12528cfa78d932a6f073afb43847e5" ]
[ "deepspeed/runtime/engine.py" ]
[ "'''\nCopyright 2019 The Microsoft DeepSpeed Team\n'''\n\nimport os\nimport stat\nimport torch\nimport warnings\nimport hashlib\nimport torch.distributed as dist\nfrom collections import OrderedDict\nfrom shutil import copyfile\n\nfrom torch.nn.modules import Module\nfrom torch.distributed.distributed_c10d import _get_global_rank\nfrom tensorboardX import SummaryWriter\n\nfrom deepspeed.runtime.utils import see_memory_usage\nfrom deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer\nfrom deepspeed.runtime.zero.stage1 import FP16_DeepSpeedZeroOptimizer_Stage1\nfrom deepspeed.runtime.zero.partition_parameters import ZeroParamStatus\nfrom deepspeed.runtime.zero.utils import is_zero_supported_optimizer\nfrom deepspeed.runtime.activation_checkpointing import checkpointing as activation_checkpointing\nfrom deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer\nfrom deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer\nfrom deepspeed.runtime.config import DeepSpeedConfig, DEEPSPEED_OPTIMIZERS, \\\n ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, \\\n TORCH_ADAM_PARAM, ADAM_W_MODE, ADAM_W_MODE_DEFAULT\n\nfrom deepspeed.runtime.dataloader import DeepSpeedDataLoader\nfrom deepspeed.runtime.constants import \\\n ROUTE_TRAIN, ROUTE_PREDICT, ROUTE_EVAL, \\\n PLD_THETA, PLD_GAMMA\nfrom deepspeed.runtime.zero.constants import \\\n ZERO_OPTIMIZATION_OPTIMIZER_STATES, ZERO_OPTIMIZATION_GRADIENTS, ZERO_OPTIMIZATION_WEIGHTS\nfrom deepspeed.runtime.csr_tensor import CSRTensor\nimport deepspeed.runtime.lr_schedules as lr_schedules\nfrom deepspeed.utils import logger, log_dist, init_distributed\nfrom deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer\nfrom deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop\n\nfrom .pipe.module import PipelineModule\nfrom .utils import ensure_directory_exists\nfrom ..ops.op_builder import UtilsBuilder\nfrom ..ops.adam import DeepSpeedCPUAdam\nfrom ..ops.adam import FusedAdam\n\nfrom deepspeed.profiling.flops_profiler.profiler import FlopsProfiler\n\nMEMORY_OPT_ALLREDUCE_SIZE = 500000000\n\ntry:\n from apex import amp\nexcept ImportError:\n # Fail silently so we don't spam logs unnecessarily if user isn't using amp\n pass\n\n\ndef split_half_float_double_csr(tensors):\n dtypes = [\n \"torch.cuda.HalfTensor\",\n \"torch.cuda.FloatTensor\",\n \"torch.cuda.DoubleTensor\",\n CSRTensor.type()\n ]\n buckets = []\n for i, dtype in enumerate(dtypes):\n bucket = [t for t in tensors if t.type() == dtype]\n if bucket:\n buckets.append((dtype, bucket))\n return buckets\n\n\ndef _initialize_parameter_parallel_groups(parameter_parallel_size=None):\n data_parallel_size = int(dist.get_world_size())\n if parameter_parallel_size is None:\n parameter_parallel_size = int(data_parallel_size)\n logger.info(\"data_parallel_size: %s, parameter_parallel_size: %s\",\n data_parallel_size,\n parameter_parallel_size)\n assert data_parallel_size % parameter_parallel_size == 0, \\\n 'world size should be divisible by parameter parallel size'\n rank = dist.get_rank()\n my_group = None\n for i in range(dist.get_world_size() // parameter_parallel_size):\n ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size)\n group = torch.distributed.new_group(ranks)\n if rank in ranks:\n my_group = group\n return my_group\n\n\ndef print_configuration(args, name):\n logger.info('{}:'.format(name))\n for arg in sorted(vars(args)):\n dots = '.' * (29 - len(arg))\n logger.info(' {} {} {}'.format(arg, dots, getattr(args, arg)))\n\n\nclass DeepSpeedEngine(Module):\n r\"\"\"DeepSpeed engine for training.\n \"\"\"\n def __init__(self,\n args,\n model,\n optimizer=None,\n model_parameters=None,\n training_data=None,\n lr_scheduler=None,\n mpu=None,\n dist_init_required=None,\n collate_fn=None,\n config_params=None,\n dont_change_device=False):\n super(DeepSpeedEngine, self).__init__()\n self.dont_change_device = dont_change_device\n self.client_optimizer = optimizer\n self.client_model_parameters = model_parameters\n self.client_lr_scheduler = lr_scheduler\n self.training_data = training_data\n self.collate_fn = collate_fn\n self.mpu = mpu\n self.data_parallel_group = None\n self.global_steps = 0\n self.global_samples = 0\n self.micro_steps = 0\n self.skipped_steps = 0\n self.gradient_average = True\n self.warn_unscaled_loss = True\n self.config_params = config_params\n self.loaded_checkpoint_mp_world_size = None\n self.loaded_checkpoint_dp_world_size = None\n self.enable_backward_allreduce = True\n self.progressive_layer_drop = None\n self.dist_backend = \"nccl\"\n\n if dist_init_required is None:\n dist_init_required = not dist.is_initialized()\n\n if dist_init_required is False:\n assert dist.is_initialized() is True, \"Torch distributed not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()\"\n else:\n # Initialize torch distributed if needed\n init_distributed(dist_backend=self.dist_backend)\n\n see_memory_usage(f\"DeepSpeed Engine: Before args sanity test\")\n self._do_args_sanity_check(args)\n self._configure_with_arguments(args, mpu)\n self._do_sanity_check()\n\n if mpu is not None:\n assert not self.elasticity_enabled(), \"Elasticity is not currently supported\" \\\n \" with model parallelism.\"\n\n self._set_distributed_vars()\n\n if self.tensorboard_enabled() and self.global_rank == 0:\n self.summary_writer = self.get_summary_writer()\n\n see_memory_usage(f\"DeepSpeed Engine: Before configure distributed model\")\n\n # Configure distributed model\n self._configure_distributed_model(model)\n\n see_memory_usage(f\"DeepSpeed Engine: After configure distributed model\")\n\n # Configure wall clock timer\n self.timers = SynchronizedWallClockTimer()\n\n # Throughput timer\n self.tput_timer = ThroughputTimer(\n batch_size=self.train_micro_batch_size_per_gpu(),\n num_workers=self.dp_world_size,\n steps_per_output=self.steps_per_print(),\n monitor_memory=False)\n\n if training_data:\n self.training_dataloader = self.deepspeed_io(training_data)\n else:\n self.training_dataloader = None\n\n # Configure optimizer and scheduler\n self.optimizer = None\n self.lr_scheduler = None\n if model_parameters or optimizer:\n self._configure_optimizer(optimizer, model_parameters)\n self._configure_lr_scheduler(lr_scheduler)\n self._report_progress(0)\n\n # Bookkeeping for csr support\n self.csr_tensor_module_names = set()\n if self.sparse_gradients_enabled():\n for name, module in self.module.named_modules():\n if isinstance(module, torch.nn.Embedding):\n self.csr_tensor_module_names.add(name + \".weight\")\n logger.info(\"Will convert {} to sparse (csr) \"\n \"tensor during training\".format(name))\n\n self.save_non_zero_checkpoint = False\n self.save_zero_checkpoint = False\n self._configure_checkpointing(dist_init_required)\n\n if self.pld_enabled():\n self.progressive_layer_drop = self._configure_progressive_layer_drop()\n\n if self.global_rank == 0:\n self._config.print('DeepSpeedEngine configuration')\n if self.dump_state():\n print_configuration(self, 'DeepSpeedEngine')\n\n # Load pre-installed or JIT compile (un)flatten ops\n util_ops = UtilsBuilder().load()\n self.flatten = util_ops.flatten\n self.unflatten = util_ops.unflatten\n\n def get_batch_info(self):\n \"\"\" Get all training batch related settings.\n\n Returns:\n train_batch_size (int): The effective training batch size. This is the amount of data\n samples that leads to one step of model update.\n train_micro_batch_size_per_gpu (int): Batch size to be processed by one GPU in one\n step (without gradient accumulation).\n gradient_accumulation_steps (int): Number of training steps to accumulate gradients\n before averaging and applying them.\n \"\"\"\n return self.train_batch_size, self.train_micro_batch_size_per_gpu, self.gradient_accumulation_steps\n\n def checkpoint_tag_validation_enabled(self):\n return self._config.checkpoint_tag_validation_enabled\n\n def checkpoint_tag_validation_fail(self):\n return self._config.checkpoint_tag_validation_fail\n\n def elasticity_enabled(self):\n return self._config.elasticity_enabled\n\n def pld_enabled(self):\n return self._config.pld_enabled\n\n def pld_params(self):\n return self._config.pld_params\n\n def pld_theta(self):\n return self.pld_params()[PLD_THETA]\n\n def pld_gamma(self):\n return self.pld_params()[PLD_GAMMA]\n\n def tensorboard_enabled(self):\n return self._config.tensorboard_enabled\n\n def tensorboard_output_path(self):\n return self._config.tensorboard_output_path\n\n def tensorboard_job_name(self):\n return self._config.tensorboard_job_name\n\n def get_summary_writer(self,\n name=\"DeepSpeedJobName\",\n base=os.path.join(os.path.expanduser(\"~\"),\n \"tensorboard\")):\n if self.tensorboard_output_path():\n base_dir = self.tensorboard_output_path()\n job_name = self.tensorboard_job_name()\n log_dir = os.path.join(base_dir, job_name)\n else:\n if self.tensorboard_job_name():\n name = self.tensorboard_job_name()\n\n # Infrastructure-specific job-id\n if 'DLWS_JOB_ID' in os.environ:\n infra_job_id = os.environ['DLWS_JOB_ID']\n elif 'DLTS_JOB_ID' in os.environ:\n infra_job_id = os.environ['DLTS_JOB_ID']\n else:\n infra_job_id = 'unknown-job-id'\n\n summary_writer_dir_name = os.path.join(infra_job_id, \"logs\")\n log_dir = os.path.join(base, summary_writer_dir_name, name)\n\n os.makedirs(log_dir, exist_ok=True)\n\n return SummaryWriter(log_dir=log_dir)\n\n def wall_clock_breakdown(self):\n return self._config.wall_clock_breakdown\n\n def flops_profiler_enabled(self):\n return self._config.flops_profiler_config.enabled\n\n def flops_profiler_profile_step(self):\n return self._config.flops_profiler_config.profile_step\n\n def flops_profiler_module_depth(self):\n return self._config.flops_profiler_config.module_depth\n\n def flops_profiler_top_modules(self):\n return self._config.flops_profiler_config.top_modules\n\n def flops_profiler_detailed(self):\n return self._config.flops_profiler_config.detailed\n\n def memory_breakdown(self):\n return self._config.memory_breakdown\n\n def sparse_gradients_enabled(self):\n return self._config.sparse_gradients_enabled\n\n def train_batch_size(self):\n return self._config.train_batch_size\n\n def train_micro_batch_size_per_gpu(self):\n return self._config.train_micro_batch_size_per_gpu\n\n def optimizer_name(self):\n return self.client_optimizer.__class__.__name__ if self.client_optimizer else self._config.optimizer_name\n\n def optimizer_params(self):\n return self._config.optimizer_params\n\n def optimizer_legacy_fusion(self):\n return self._config.optimizer_legacy_fusion\n\n def scheduler_name(self):\n return self._config.scheduler_name\n\n def scheduler_params(self):\n return self._config.scheduler_params\n\n def zero_optimization(self):\n return self._config.zero_enabled\n\n def zero_allow_untested_optimizer(self):\n return self._config.zero_allow_untested_optimizer\n\n def zero_reduce_scatter(self):\n return self._config.zero_config.reduce_scatter\n\n def zero_overlap_comm(self):\n return self._config.zero_config.overlap_comm\n\n def zero_offload_optimizer(self):\n return self._config.zero_config.offload_optimizer\n\n def zero_offload_param(self):\n return self._config.zero_config.offload_param\n\n def zero_cpu_offload(self):\n return self._config.zero_config.offload_optimizer is not None\n\n def zero_sub_group_size(self):\n return self._config.zero_config.sub_group_size\n\n def zero_optimization_stage(self):\n return self._config.zero_optimization_stage\n\n def zero_reduce_bucket_size(self):\n return self._config.zero_config.reduce_bucket_size\n\n def zero_allgather_bucket_size(self):\n return self._config.zero_config.allgather_bucket_size\n\n def zero_optimization_partition_gradients(self):\n return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_GRADIENTS\n\n def zero_optimization_partition_weights(self):\n return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_WEIGHTS\n\n def zero_contiguous_gradients(self):\n return self._config.zero_config.contiguous_gradients\n\n def zero_load_from_fp32_weights(self):\n return self._config.zero_config.load_from_fp32_weights\n\n def zero_elastic_checkpoint(self):\n return self._config.zero_config.elastic_checkpoint\n\n def zero_max_live_parameters(self):\n return self._config.zero_config.max_live_parameters\n\n def zero_max_reuse_distance(self):\n return self._config.zero_config.max_reuse_distance\n\n def zero_prefetch_bucket_size(self):\n return self._config.zero_config.prefetch_bucket_size\n\n def zero_param_persistence_threshold(self):\n return self._config.zero_config.param_persistence_threshold\n\n def zero_gather_fp16_weights_on_model_save(self):\n return self._config.zero_config.gather_fp16_weights_on_model_save\n\n def fp16_enabled(self):\n return self._config.fp16_enabled\n\n def amp_enabled(self):\n return self._config.amp_enabled\n\n def amp_params(self):\n return self._config.amp_params\n\n def loss_scale(self):\n return self._config.loss_scale\n\n def gradient_accumulation_steps(self):\n return self._config.gradient_accumulation_steps\n\n def allreduce_always_fp32(self):\n return self._config.allreduce_always_fp32\n\n def postscale_gradients(self):\n return not self._config.prescale_gradients\n\n def gradient_predivide_factor(self):\n return self._config.gradient_predivide_factor\n\n def steps_per_print(self):\n return self._config.steps_per_print\n\n def zero_allgather_partitions(self):\n return self._config.zero_config.allgather_partitions\n\n def dump_state(self):\n return self._config.dump_state\n\n def gradient_clipping(self):\n return self._config.gradient_clipping\n\n def dynamic_loss_scale(self):\n return self._config.loss_scale == 0\n\n def initial_dynamic_scale(self):\n return self._config.initial_dynamic_scale\n\n def dynamic_loss_scale_args(self):\n return self._config.dynamic_loss_scale_args\n\n def swap_tensor_config(self):\n return self._config.swap_tensor_config\n\n def aio_config(self):\n return self._config.aio_config\n\n def _configure_lr_scheduler(self, client_lr_scheduler):\n # First check for scheduler in json configuration\n lr_scheduler = self._scheduler_from_config(self.optimizer)\n if lr_scheduler:\n if self.global_rank == 0:\n logger.info(\n f'DeepSpeed using configured LR scheduler = {self.scheduler_name()}')\n self.lr_scheduler = lr_scheduler\n else:\n if self.global_rank == 0:\n logger.info('DeepSpeed using client LR scheduler')\n self.lr_scheduler = client_lr_scheduler\n log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0])\n\n def _configure_checkpointing(self, dist_init_required):\n\n dp_rank = self.global_rank\n if self.mpu:\n dp_rank = self.mpu.get_data_parallel_rank()\n\n # only the first data parallel process needs to store the model checkpoint\n self.save_non_zero_checkpoint = (\n dp_rank == 0) or self.zero_optimization_partition_weights()\n\n if self.zero_optimization():\n param_rank = torch.distributed.get_rank(\n group=self.optimizer.dp_process_group)\n\n # Only the first parameter parallel process needs to store the\n # optimizer state checkpoints for zero\n self.save_zero_checkpoint = (param_rank == dp_rank)\n\n def _scheduler_from_config(self, optimizer):\n scheduler_name = self.scheduler_name()\n if scheduler_name is not None:\n if hasattr(lr_schedules, scheduler_name):\n scheduler = getattr(lr_schedules, scheduler_name)\n else:\n assert hasattr(torch.optim.lr_scheduler, scheduler_name), \\\n f\"DeepSpeed does not recognize LR scheduler {scheduler_name}\"\n\n scheduler = getattr(torch.optim.lr_scheduler, scheduler_name)\n\n scheduler_params = self.scheduler_params()\n instantiated_scheduler = scheduler(optimizer, **scheduler_params)\n return instantiated_scheduler\n else:\n return None\n\n def _set_distributed_vars(self):\n if self.local_rank >= 0:\n torch.cuda.set_device(self.local_rank)\n self.device = torch.device(\"cuda\", self.local_rank)\n self.world_size = dist.get_world_size()\n self.global_rank = dist.get_rank()\n else:\n self.world_size = 1\n self.global_rank = 0\n self.device = torch.device(\"cuda\")\n\n # Configure based on command line arguments\n def _configure_with_arguments(self, args, mpu):\n # After the distributed backend is initialized we are guaranteed the LOCAL_RANK\n # environment variable is set. We must align args.local_rank to this value for\n # backwards compatability with scripts relying on [args|self].local_rank containing\n # the correct local rank info. _do_args_sanity_check will ensure this is the case.\n self.local_rank = int(os.environ['LOCAL_RANK'])\n if hasattr(args, 'local_rank'):\n args.local_rank = self.local_rank\n\n config_file = args.deepspeed_config if hasattr(args,\n 'deepspeed_config') else None\n self._config = DeepSpeedConfig(config_file, mpu, param_dict=self.config_params)\n\n # Validate command line arguments\n def _do_args_sanity_check(self, args):\n if hasattr(args, 'deepscale_config') and args.deepscale_config is not None:\n logger.warning(\n \"************ --deepscale_config is deprecated, please use --deepspeed_config ************\"\n )\n if hasattr(args, 'deepspeed_config'):\n assert args.deepspeed_config is None, \"Not sure how to proceed, we were given both a deepscale_config and deepspeed_config\"\n args.deepspeed_config = args.deepscale_config\n\n assert \"LOCAL_RANK\" in os.environ, \"DeepSpeed requires the LOCAL_RANK environment variable, it is set by the deepspeed launcher, \" \\\n \"deepspeed.init_distributed, or the torch.distributed launcher. If using a different launcher please ensure LOCAL_RANK is set prior to initializing deepspeed.\"\n if hasattr(args, 'local_rank') and args.local_rank != None:\n assert isinstance(args.local_rank, int), f\"args.local_rank of {args.local_rank} is an unknown type {type(args.local_rank)}\"\n if args.local_rank >= 0:\n env_local_rank = int(os.environ.get(\"LOCAL_RANK\"))\n assert env_local_rank == args.local_rank, \\\n f\"Mismatch in local rank setting, args.local_rank={args.local_rank} but env['LOCAL_RANK']={env_local_rank}.\"\n\n if self.config_params is None:\n assert hasattr(args, 'deepspeed_config') and args.deepspeed_config is not None, \\\n 'DeepSpeed requires --deepspeed_config to specify configuration file'\n\n assert os.path.isfile(args.deepspeed_config), \\\n 'DeepSpeed configuration file: {} is not an existing file'.format(args.deepspeed_config)\n\n def _is_supported_optimizer(self, optimizer_name):\n return optimizer_name in DEEPSPEED_OPTIMIZERS or \\\n getattr(torch.optim, optimizer_name, None) is not None\n\n # Validate configuration based on command line arguments\n def _do_sanity_check(self):\n if not self.client_optimizer:\n if self.optimizer_name() is not None:\n assert self._is_supported_optimizer(self.optimizer_name()), \\\n '{} is not a supported DeepSpeed Optimizer'.format(self.optimizer_name())\n\n if self.optimizer_name() == LAMB_OPTIMIZER:\n assert self.dynamic_loss_scale(), \\\n 'DeepSpeed {} optimizer requires dynamic loss scaling'.format(self.optimizer_name())\n\n def _broadcast_model(self):\n def is_replicated(p):\n if hasattr(p, 'ds_status') and p.ds_status is not ZeroParamStatus.AVAILABLE:\n return False\n return True\n\n for p in self.module.parameters():\n if torch.is_tensor(p) and is_replicated(p):\n dist.broadcast(p,\n self.broadcast_src_rank,\n group=self.data_parallel_group)\n\n def _configure_distributed_model(self, model):\n self.module = model\n if self.fp16_enabled():\n self.module.half()\n\n if not self.dont_change_device:\n self.module.to(self.device)\n\n if self.mpu is None:\n self.data_parallel_group = _initialize_parameter_parallel_groups()\n self.dp_world_size = dist.get_world_size()\n self.mp_world_size = 1\n self.broadcast_src_rank = 0\n else:\n self.data_parallel_group = self.mpu.get_data_parallel_group()\n self.dp_world_size = self.mpu.get_data_parallel_world_size()\n self.mp_world_size = self.mpu.get_model_parallel_world_size()\n self.broadcast_src_rank = _get_global_rank(\n self.mpu.get_data_parallel_group(),\n 0)\n\n if not self.amp_enabled():\n self._broadcast_model()\n\n # Configure optimizer\n def _configure_optimizer(self, client_optimizer, model_parameters):\n\n if client_optimizer is not None:\n client_optimizer.param_groups[:] = [\n pg for pg in client_optimizer.param_groups if len(pg[\"params\"]) != 0\n ]\n if self.global_rank == 0:\n logger.info(\n \"Removing param_group that has no 'params' in the client Optimizer\")\n\n basic_optimizer = client_optimizer\n if self.global_rank == 0:\n logger.info('Using client Optimizer as basic optimizer')\n else:\n basic_optimizer = self._configure_basic_optimizer(model_parameters)\n if self.global_rank == 0:\n logger.info(\n 'Using DeepSpeed Optimizer param name {} as basic optimizer'.format(\n self.optimizer_name()))\n\n if self.global_rank == 0:\n logger.info('DeepSpeed Basic Optimizer = {}'.format(\n basic_optimizer.__class__.__name__))\n\n if self.zero_optimization():\n assert not self.amp_enabled(), \"Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2\"\n if not is_zero_supported_optimizer(basic_optimizer):\n assert self.zero_allow_untested_optimizer(), \\\n 'You are using an untested ZeRO Optimizer. Please add <\"zero_allow_untested_optimizer\": true> in the configuration file to use it.'\n\n if self.global_rank == 0:\n logger.warning(\n \"**** You are using ZeRO with an untested optimizer, proceed with caution *****\"\n )\n self.optimizer = self._configure_zero_optimizer(basic_optimizer)\n elif self.amp_enabled():\n assert not self.fp16_enabled(), \"Cannot enable both amp with (legacy) fp16 mode\"\n amp_params = self.amp_params()\n if self.global_rank == 0:\n logger.info(f\"Initializing AMP with these params: {amp_params}\")\n try:\n logger.info(\"Initializing Apex amp from: {}\".format(amp.__path__))\n except NameError:\n # If apex/amp is available it will be imported above\n raise RuntimeError(\n \"Unable to import apex/amp, please make sure it is installed\")\n self.module, self.optimizer = amp.initialize(self.module, basic_optimizer, **amp_params)\n self._broadcast_model()\n elif self.fp16_enabled():\n self.optimizer = self._configure_fp16_optimizer(basic_optimizer)\n else:\n self.optimizer = basic_optimizer\n log_dist('DeepSpeed Final Optimizer = {}'.format(self.optimizer_name()),\n ranks=[0])\n\n def _configure_basic_optimizer(self, model_parameters):\n optimizer_parameters = self.optimizer_params()\n # print(optimizer_parameters.keys())\n if 'max_grad_norm' in optimizer_parameters.keys():\n raise ValueError(\n \"'max_grad_norm' is not supported as an optimizer parameter, please switch to using the deepspeed parameter 'gradient_clipping' see: https://www.deepspeed.ai/docs/config-json/#gradient-clipping for more details\"\n )\n\n if self.optimizer_name() in [ADAM_OPTIMIZER, ADAMW_OPTIMIZER]:\n torch_adam = optimizer_parameters.pop(TORCH_ADAM_PARAM, False)\n adam_w_mode = optimizer_parameters.pop(ADAM_W_MODE, ADAM_W_MODE_DEFAULT)\n\n # Optimizer name of Adam forces AdamW logic unless adam_w_mode is explictly set\n effective_adam_w_mode = self.optimizer_name(\n ) == ADAMW_OPTIMIZER or adam_w_mode\n\n if torch_adam:\n if not effective_adam_w_mode:\n optimizer = torch.optim.Adam(model_parameters,\n **optimizer_parameters)\n else:\n optimizer = torch.optim.AdamW(model_parameters,\n **optimizer_parameters)\n else:\n if self.zero_cpu_offload():\n from deepspeed.ops.adam import DeepSpeedCPUAdam\n optimizer = DeepSpeedCPUAdam(model_parameters,\n **optimizer_parameters,\n adamw_mode=effective_adam_w_mode)\n else:\n from deepspeed.ops.adam import FusedAdam\n optimizer = FusedAdam(model_parameters,\n **optimizer_parameters,\n adam_w_mode=effective_adam_w_mode)\n\n elif self.optimizer_name() == LAMB_OPTIMIZER:\n from deepspeed.ops.lamb import FusedLamb\n optimizer = FusedLamb(model_parameters, **optimizer_parameters)\n elif self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:\n from deepspeed.runtime.fp16.onebit.adam import OnebitAdam\n optimizer = OnebitAdam(model_parameters, self, **optimizer_parameters)\n if not self.fp16_enabled():\n logger.warning(\n f'Currently the convergence of 1-bit Adam is only verified under FP16'\n )\n else:\n torch_optimizer = getattr(torch.optim, self.optimizer_name())\n optimizer = torch_optimizer(model_parameters, **optimizer_parameters)\n return optimizer\n\n def _configure_fp16_optimizer(self, optimizer):\n initial_dynamic_scale = self.initial_dynamic_scale()\n dynamic_loss_args = self.dynamic_loss_scale_args()\n clip_grad = self.gradient_clipping()\n if isinstance(optimizer,\n FusedAdam) or self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:\n if self.dynamic_loss_scale():\n log_dist('Creating fp16 optimizer with dynamic loss scale', ranks=[0])\n timers = self.timers if self.wall_clock_breakdown() else None\n optimizer = FP16_Optimizer(\n optimizer,\n dynamic_loss_scale=True,\n initial_dynamic_scale=initial_dynamic_scale,\n dynamic_loss_args=dynamic_loss_args,\n mpu=self.mpu,\n clip_grad=clip_grad,\n fused_adam_legacy=self.optimizer_legacy_fusion(),\n timers=timers)\n else:\n log_dist('Creating fp16 optimizer with static loss scale: {}'.format(\n self.loss_scale()),\n ranks=[0])\n optimizer = FP16_Optimizer(\n optimizer,\n static_loss_scale=self.loss_scale(),\n mpu=self.mpu,\n clip_grad=clip_grad,\n fused_adam_legacy=self.optimizer_legacy_fusion())\n else:\n log_dist('Creating fp16 unfused optimizer with dynamic loss scale',\n ranks=[0])\n optimizer = FP16_UnfusedOptimizer(\n optimizer,\n static_loss_scale=self.loss_scale(),\n dynamic_loss_scale=self.dynamic_loss_scale(),\n dynamic_loss_args=dynamic_loss_args,\n mpu=self.mpu,\n clip_grad=clip_grad,\n fused_lamb_legacy=self.optimizer_name() == LAMB_OPTIMIZER)\n\n return optimizer\n\n def _configure_zero_optimizer(self, optimizer):\n zero_stage = self.zero_optimization_stage()\n log_dist('Creating fp16 ZeRO stage {} optimizer'.format(zero_stage), ranks=[0])\n assert not self.allreduce_always_fp32(), \"ZeRO does not support 'fp32_allreduce': true\"\n timers = self.timers if self.wall_clock_breakdown() else None\n\n if zero_stage == ZERO_OPTIMIZATION_OPTIMIZER_STATES:\n assert self.zero_reduce_scatter(), 'Stage 1 only supports reduce scatter mode'\n optimizer = FP16_DeepSpeedZeroOptimizer_Stage1(\n optimizer,\n static_loss_scale=self.loss_scale(),\n dynamic_loss_scale=self.dynamic_loss_scale(),\n dynamic_loss_args=self.dynamic_loss_scale_args(),\n clip_grad=self.gradient_clipping(),\n all_gather_partitions=self.zero_allgather_partitions(),\n allgather_size=self.zero_allgather_bucket_size(),\n max_elements_per_comm=self.zero_reduce_bucket_size(),\n dp_process_group=self.data_parallel_group,\n elastic_checkpoint=self.zero_elastic_checkpoint(),\n mpu=self.mpu)\n elif zero_stage == ZERO_OPTIMIZATION_GRADIENTS:\n optimizer = FP16_DeepSpeedZeroOptimizer(\n optimizer,\n timers=timers,\n static_loss_scale=self.loss_scale(),\n dynamic_loss_scale=self.dynamic_loss_scale(),\n dynamic_loss_args=self.dynamic_loss_scale_args(),\n clip_grad=self.gradient_clipping(),\n contiguous_gradients=self.zero_contiguous_gradients(),\n reduce_bucket_size=self.zero_reduce_bucket_size(),\n allgather_bucket_size=self.zero_allgather_bucket_size(),\n dp_process_group=self.data_parallel_group,\n reduce_scatter=self.zero_reduce_scatter(),\n overlap_comm=self.zero_overlap_comm(),\n cpu_offload=self.zero_cpu_offload(),\n mpu=self.mpu,\n postscale_gradients=self.postscale_gradients(),\n gradient_predivide_factor=self.gradient_predivide_factor(),\n gradient_accumulation_steps=self.gradient_accumulation_steps())\n elif zero_stage == ZERO_OPTIMIZATION_WEIGHTS:\n print(\"Initializing ZeRO Stage 3\") if dist.get_rank() == 0 else None\n from deepspeed.runtime.zero.stage3 import FP16_DeepSpeedZeroOptimizer_Stage3\n optimizer = FP16_DeepSpeedZeroOptimizer_Stage3(\n self.module,\n optimizer,\n timers=timers,\n static_loss_scale=self.loss_scale(),\n dynamic_loss_scale=self.dynamic_loss_scale(),\n dynamic_loss_args=self.dynamic_loss_scale_args(),\n clip_grad=self.gradient_clipping(),\n contiguous_gradients=self.zero_contiguous_gradients(),\n reduce_bucket_size=self.zero_reduce_bucket_size(),\n prefetch_bucket_size=self.zero_prefetch_bucket_size(),\n max_reuse_distance=self.zero_max_reuse_distance(),\n max_live_parameters=self.zero_max_live_parameters(),\n param_persistence_threshold=self.zero_param_persistence_threshold(),\n dp_process_group=self.data_parallel_group,\n reduce_scatter=self.zero_reduce_scatter(),\n overlap_comm=self.zero_overlap_comm(),\n offload_optimizer_config=self.zero_offload_optimizer(),\n offload_param_config=self.zero_offload_param(),\n sub_group_size=self.zero_sub_group_size(),\n mpu=self.mpu,\n postscale_gradients=self.postscale_gradients(),\n gradient_predivide_factor=self.gradient_predivide_factor(),\n gradient_accumulation_steps=self.gradient_accumulation_steps(),\n aio_config=self.aio_config())\n\n else:\n raise NotImplementedError(\"ZeRO stage {} not implemented\".format(zero_stage))\n\n return optimizer\n\n def _configure_progressive_layer_drop(self):\n pld = ProgressiveLayerDrop(theta=self.pld_theta(), gamma=self.pld_gamma())\n\n return pld\n\n def deepspeed_io(self,\n dataset,\n batch_size=None,\n route=ROUTE_TRAIN,\n pin_memory=True,\n data_sampler=None,\n collate_fn=None,\n num_local_io_workers=None):\n if not isinstance(dataset, torch.utils.data.Dataset):\n raise ValueError(\"Training data must be a torch Dataset\")\n\n if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL):\n data_sampler = torch.utils.data.SequentialSampler(dataset)\n\n if batch_size is None:\n batch_size = self.train_micro_batch_size_per_gpu()\n\n if collate_fn is None:\n collate_fn = self.collate_fn\n\n # Currently we only use timer in train route\n deepspeed_io_timer = None\n if route == ROUTE_TRAIN:\n deepspeed_io_timer = self.tput_timer\n\n # If mpu is provied, forward world size and parallel rank to sampler.\n data_parallel_world_size = None\n data_parallel_rank = None\n if self.mpu is not None:\n data_parallel_world_size = self.mpu.get_data_parallel_world_size()\n data_parallel_rank = self.mpu.get_data_parallel_rank()\n\n return DeepSpeedDataLoader(dataset=dataset,\n batch_size=batch_size,\n pin_memory=pin_memory,\n collate_fn=collate_fn,\n local_rank=self.local_rank,\n tput_timer=deepspeed_io_timer,\n num_local_io_workers=num_local_io_workers,\n data_sampler=data_sampler,\n data_parallel_world_size=data_parallel_world_size,\n data_parallel_rank=data_parallel_rank)\n\n def train(self, mode=True):\n r\"\"\"\n \"\"\"\n\n self.warn_unscaled_loss = True\n self.module.train(mode)\n\n def eval(self):\n r\"\"\"\n \"\"\"\n\n self.warn_unscaled_loss = True\n self.module.train(False)\n\n def _scale_loss(self, prescaled_loss):\n if isinstance(prescaled_loss, torch.Tensor):\n scaled_loss = prescaled_loss / self.gradient_accumulation_steps()\n elif isinstance(prescaled_loss, tuple) or isinstance(prescaled_loss, list):\n scaled_loss = []\n for l in prescaled_loss:\n if isinstance(l, torch.Tensor):\n scaled_loss.append(l / self.gradient_accumulation_steps())\n else:\n scaled_loss.append(l)\n else:\n scaled_loss = prescaled_loss\n if self.warn_unscaled_loss:\n logger.warning(\n f'DeepSpeed unable to scale loss because of type: {type(prescaled_loss)}'\n )\n self.warn_unscaled_loss = False\n\n return scaled_loss\n\n def forward(self, *inputs, **kwargs):\n r\"\"\"Execute forward propagation\n\n Arguments:\n *inputs: Variable length input list\n **kwargs: variable length keyword arguments\n \"\"\"\n if self.flops_profiler_enabled(\n ) and self.global_steps == self.flops_profiler_profile_step(\n ) and self.global_rank == 0:\n self.flops_profiler = FlopsProfiler(self.module)\n self.flops_profiler.start_profile(ignore_list=None)\n\n if self.module.training and self.progressive_layer_drop:\n kwargs.update(self.progressive_layer_drop.get_state())\n\n if self.zero_optimization_partition_weights():\n # Enable automated discovery of external parameters by indicating that\n # we are in a forward pass.\n for module in self.module.modules():\n module._parameters._in_forward = True\n pass\n\n if self.wall_clock_breakdown():\n self.timers('forward_microstep').start()\n self.timers('forward').start()\n\n if self.training_dataloader is None:\n self.tput_timer.start()\n loss = self.module(*inputs, **kwargs)\n\n if self.zero_optimization_partition_weights():\n # Reset the ZeRO-3 state if we are only doing forward-passes (ie evaluation).\n if not torch._C.is_grad_enabled():\n self.optimizer.param_coordinator.reset_step()\n\n # Disable automated discovery of external parameters\n for module in self.module.modules():\n module._parameters._in_forward = False\n\n if self.wall_clock_breakdown():\n self.timers('forward').stop()\n self.timers('forward_microstep').stop()\n\n if self.flops_profiler_enabled(\n ) and self.global_steps == self.flops_profiler_profile_step(\n ) and self.global_rank == 0:\n self.flops_profiler.print_model_profile(\n profile_step=self.global_steps,\n module_depth=self.flops_profiler_module_depth(),\n top_modules=self.flops_profiler_top_modules(),\n detailed=self.flops_profiler_detailed())\n self.flops_profiler.end_profile()\n\n return loss\n\n def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE):\n #Zero stage 2 communicates during non gradient accumulation boundaries as well\n if self.zero_optimization_partition_gradients():\n self.optimizer.overlapping_partition_gradients_reduce_epilogue()\n\n #Communicate only at gradient accumulation boundaries\n elif self.is_gradient_accumulation_boundary():\n if self.zero_optimization_stage() == ZERO_OPTIMIZATION_OPTIMIZER_STATES:\n assert self.zero_reduce_scatter()\n self.optimizer.reduce_scatter_gradients(\n postscale_gradients=self.postscale_gradients(),\n gradient_predivide_factor=self.gradient_predivide_factor(),\n gradient_average=self.gradient_average)\n else:\n self.buffered_allreduce_fallback(elements_per_buffer=bucket_size)\n\n def backward(self, loss, allreduce_gradients=True, release_loss=False):\n r\"\"\"Execute backward pass on the loss\n\n Arguments:\n loss: Torch tensor on which to execute backward propagation\n allreduce_gradients: is deprecated, ignored, and will soon be removed'\n \"\"\"\n\n if not allreduce_gradients:\n logger.warning(\n f'Argument `allreduce_gradients` is deprecated, ignored, and will soon be removed'\n )\n\n # scale loss w.r.t. gradient accumulation if needed\n if self.gradient_accumulation_steps() > 1:\n loss = self._scale_loss(loss.float())\n\n # Log training Loss\n if self.tensorboard_enabled():\n if self.is_gradient_accumulation_boundary():\n if self.global_rank == 0:\n self.summary_events = [\n (f'Train/Samples/train_loss',\n loss.mean().item() * self.gradient_accumulation_steps(),\n self.global_samples)\n ]\n for event in self.summary_events: # write_summary_events\n self.summary_writer.add_scalar(event[0], event[1], event[2])\n self.summary_writer.flush()\n\n if self.wall_clock_breakdown():\n self.timers('backward_microstep').start()\n self.timers('backward').start()\n\n assert self.optimizer is not None, \"must provide optimizer during \" \\\n \"init in order to use backward\"\n\n if self.wall_clock_breakdown():\n self.timers('backward_inner_microstep').start()\n self.timers('backward_inner').start()\n\n if self.zero_optimization():\n self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary(\n )\n self.optimizer.backward(loss)\n elif self.amp_enabled():\n # AMP requires delaying unscale when inside gradient accumulation boundaries\n # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations\n delay_unscale = not self.is_gradient_accumulation_boundary()\n with amp.scale_loss(loss,\n self.optimizer,\n delay_unscale=delay_unscale) as scaled_loss:\n scaled_loss.backward()\n elif self.fp16_enabled():\n self.optimizer.backward(loss)\n else:\n loss.backward()\n\n if self.wall_clock_breakdown():\n self.timers('backward_inner').stop()\n self.timers('backward_inner_microstep').stop()\n\n if self.wall_clock_breakdown():\n self.timers('backward_allreduce_microstep').start()\n self.timers('backward_allreduce').start()\n\n if self.enable_backward_allreduce:\n self.allreduce_gradients()\n\n if self.wall_clock_breakdown():\n self.timers('backward_allreduce').stop()\n self.timers('backward_allreduce_microstep').stop()\n self.timers('backward').stop()\n self.timers('backward_microstep').stop()\n\n if release_loss:\n # loss.data = None\n pass\n\n return loss\n\n def is_gradient_accumulation_boundary(self):\n \"\"\"Query whether the current micro-batch is at the boundary of\n gradient accumulation, and thus will trigger gradient reductions and\n an optimizer step.\n\n Returns:\n bool: if the current step is a gradient accumulation boundary.\n \"\"\"\n return (self.micro_steps + 1) % \\\n self.gradient_accumulation_steps() == 0\n\n def zero_grad(self):\n \"\"\"\n Zero parameter grads.\n \"\"\"\n for param_name, param in self.module.named_parameters():\n param.grad = None\n\n def clip_fp32_gradients(self):\n torch.nn.utils.clip_grad_norm_(parameters=self.module.parameters(),\n max_norm=self.gradient_clipping())\n\n def _take_model_step(self, lr_kwargs):\n if self.gradient_clipping() > 0.0:\n if not self.fp16_enabled() and not self.amp_enabled():\n self.clip_fp32_gradients()\n elif self.amp_enabled():\n # AMP's recommended way of doing clipping\n # https://nvidia.github.io/apex/advanced.html#gradient-clipping\n master_params = amp.master_params(self.optimizer)\n torch.nn.utils.clip_grad_norm_(parameters=master_params,\n max_norm=self.gradient_clipping())\n self.optimizer.step()\n\n #zero grad in basic optimizer could be unreliable and may not exhibit\n #the behaviour that we want\n if not self.zero_optimization() and not self.fp16_enabled(\n ) and not self.amp_enabled():\n self.zero_grad()\n else:\n self.optimizer.zero_grad()\n\n report_progress = self.global_rank == 0 if self.global_rank else True\n\n # Check overlow here since in DS fp16 optimizer, the overflow is updated in above step() function.\n overflow = False\n if hasattr(self.optimizer, 'overflow'):\n overflow = self.optimizer.overflow\n\n if overflow:\n self.skipped_steps += 1\n else:\n if self.lr_scheduler is not None:\n self.lr_scheduler.step(**(lr_kwargs or {}))\n\n if report_progress and (self.global_steps + 1) % self.steps_per_print() == 0:\n self._report_progress(self.global_steps + 1)\n\n self.global_steps += 1\n self.global_samples += self.train_batch_size()\n\n def step(self, lr_kwargs=None):\n r\"\"\"Execute the weight update step after forward and backward propagation\n on effective_train_batch.\n \"\"\"\n if self.wall_clock_breakdown():\n self.timers('step_microstep').start()\n self.timers('step').start()\n\n assert self.optimizer is not None, \"must provide optimizer during \" \\\n \"init in order to use step\"\n report_progress = self.global_rank == 0 if self.global_rank else True\n\n # Update the model when we reach gradient accumulation boundaries\n if self.is_gradient_accumulation_boundary():\n if self.progressive_layer_drop:\n self.progressive_layer_drop.update_state(self.global_steps)\n\n self._take_model_step(lr_kwargs)\n\n self.tput_timer.stop(report_progress)\n\n # Log learning rate\n if self.tensorboard_enabled():\n if self.is_gradient_accumulation_boundary():\n if self.global_rank == 0:\n self.summary_events = [(f'Train/Samples/lr',\n self.get_lr()[0],\n self.global_samples)]\n for event in self.summary_events: # write_summary_events\n self.summary_writer.add_scalar(event[0], event[1], event[2])\n if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'):\n self.summary_events.append((f'Train/Samples/loss_scale',\n self.optimizer.cur_scale,\n self.global_samples))\n for event in self.summary_events: # write_summary_events\n self.summary_writer.add_scalar(event[0], event[1], event[2])\n self.summary_writer.flush()\n\n if self.wall_clock_breakdown():\n self.timers('step').stop()\n self.timers('step_microstep').stop()\n timer_names = [\n 'forward_microstep',\n 'backward_microstep',\n 'backward_inner_microstep',\n 'backward_allreduce_microstep',\n 'step_microstep'\n ]\n self.timers.log(names=timer_names, memory_breakdown=self.memory_breakdown())\n\n # Log timing\n if self.is_gradient_accumulation_boundary():\n if self.tensorboard_enabled():\n if self.global_rank == 0:\n self.summary_events = [\n (f'Train/Samples/elapsed_time_ms_forward',\n self.timers('forward').elapsed(reset=False) * 1000.0,\n self.global_samples),\n (f'Train/Samples/elapsed_time_ms_backward',\n self.timers('backward').elapsed(reset=False) * 1000.0,\n self.global_samples),\n (f'Train/Samples/elapsed_time_ms_backward_inner',\n self.timers('backward_inner').elapsed(reset=False) * 1000.0,\n self.global_samples),\n (f'Train/Samples/elapsed_time_ms_backward_allreduce',\n self.timers('backward_allreduce').elapsed(reset=False) *\n 1000.0,\n self.global_samples),\n (f'Train/Samples/elapsed_time_ms_step',\n self.timers('step').elapsed(reset=False) * 1000.0,\n self.global_samples)\n ]\n for event in self.summary_events: # write_summary_events\n self.summary_writer.add_scalar(event[0], event[1], event[2])\n self.summary_writer.flush()\n\n if self.wall_clock_breakdown():\n self.timers.log([\n 'forward',\n 'backward',\n 'backward_inner',\n 'backward_allreduce',\n 'step'\n ])\n\n self.micro_steps += 1\n\n def _get_optimizer_param(self, param_name):\n result = []\n if not self.optimizer:\n return result\n for group in self.optimizer.param_groups:\n if param_name in group:\n result.append(group[param_name])\n else:\n result.append(0.0)\n return result\n\n def get_lr(self):\n return self._get_optimizer_param('lr')\n\n def get_type(self):\n return self._get_optimizer_param('type')\n\n def get_mom(self):\n if self.optimizer_name() in ['SGD', 'RMSprop']:\n return self._get_optimizer_param('momentum')\n else:\n return self._get_optimizer_param('betas')\n\n def get_pld_theta(self):\n if self.progressive_layer_drop:\n return self.progressive_layer_drop.get_theta()\n else:\n return None\n\n def _report_progress(self, step):\n lr = self.get_lr()\n mom = self.get_mom()\n log_dist(f'step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}',\n ranks=[0])\n\n def allreduce_bucket(self, bucket):\n tensor = self.flatten(bucket)\n\n tensor_to_allreduce = tensor\n\n if self.allreduce_always_fp32():\n tensor_to_allreduce = tensor.float()\n\n if self.postscale_gradients():\n if self.gradient_predivide_factor() != 1.0:\n tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor())\n\n dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)\n\n if self.gradient_average:\n if self.gradient_predivide_factor() != self.dp_world_size:\n tensor_to_allreduce.mul_(self.gradient_predivide_factor() /\n self.dp_world_size)\n else:\n tensor_to_allreduce.div_(self.dp_world_size)\n dist.all_reduce(tensor_to_allreduce, group=self.data_parallel_group)\n\n if self.allreduce_always_fp32() and tensor is not tensor_to_allreduce:\n tensor.copy_(tensor_to_allreduce)\n\n return tensor\n\n def allreduce_and_copy(self, small_bucket):\n allreduced = self.allreduce_bucket(small_bucket)\n for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):\n buf.copy_(synced)\n\n def allreduce_no_retain(self, bucket, numel_per_bucket=500000000):\n small_bucket = []\n numel = 0\n for tensor in bucket:\n small_bucket.append(tensor)\n numel = numel + tensor.numel()\n if numel > numel_per_bucket:\n self.allreduce_and_copy(small_bucket)\n small_bucket = []\n numel = 0\n if len(small_bucket) > 0:\n self.allreduce_and_copy(small_bucket)\n\n def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000):\n grads = []\n for param_name, param in self.module.named_parameters():\n if param.grad is None:\n # In cases where there is an imbalance of empty grads across\n # ranks we must create empty grads, this will ensure that every\n # rank is reducing the same size. In some cases it may make\n # sense in the future to support the ability to average not\n # w.r.t. world size but with a different value.\n param.grad = torch.zeros(param.size(),\n dtype=param.dtype,\n device=param.device)\n grads.append(param.grad.data)\n else:\n grad_data = param.grad.data\n if self.sparse_gradients_enabled(\n ) and param_name in self.csr_tensor_module_names:\n grads.append(CSRTensor(grad_data))\n else:\n grads.append(grad_data)\n\n split_buckets = split_half_float_double_csr(grads)\n\n for i, bucket_tuple in enumerate(split_buckets):\n bucket_type, bucket = bucket_tuple\n if bucket_type == CSRTensor.type():\n self.csr_allreduce_no_retain(bucket)\n else:\n self.allreduce_no_retain(bucket, numel_per_bucket=elements_per_buffer)\n\n def csr_allreduce_no_retain(self, bucket):\n allreduced_csrs = self.csr_allreduce_bucket(bucket)\n # Densify csr tensor and copy back to original location\n for csr in allreduced_csrs:\n dense_tensor = csr.to_dense()\n csr.orig_dense_tensor.copy_(dense_tensor)\n\n def csr_allreduce_bucket(self, bucket):\n csr_list = []\n for csr in bucket:\n csr_list.append(self.csr_allreduce(csr))\n return csr_list\n\n def csr_allreduce(self, csr):\n # Pre-divide for fp16 stability\n csr.values.div_(self.dp_world_size)\n\n indices_device_list = self.csr_all_gather(csr.indices)\n values_device_list = self.csr_all_gather(csr.values)\n\n csr.indices = torch.cat(indices_device_list)\n csr.values = torch.cat(values_device_list)\n return csr\n\n def csr_all_gather(self, value):\n my_size = torch.LongTensor([value.size()[0]]).to(self.device)\n all_sizes = self.all_gather_scalar(my_size)\n max_size = torch.cat(all_sizes).max()\n fill_size = (max_size - my_size)\n\n assert value.dim() in [1, 2]\n if value.dim() == 1:\n if fill_size > 0:\n value = torch.cat([value, value.new_zeros(fill_size)])\n tensor_list = [value.new_zeros(max_size) for _ in range(self.dp_world_size)]\n else:\n if fill_size > 0:\n value = torch.cat([value, value.new_zeros(fill_size, value.size()[1])])\n tensor_list = [\n value.new_zeros(max_size,\n value.size()[1]) for _ in range(self.dp_world_size)\n ]\n\n dist.all_gather(tensor_list, value, group=self.data_parallel_group)\n tensors = []\n for dev_idx, t in enumerate(tensor_list):\n size = all_sizes[dev_idx][0]\n tensors.append(\n t.index_select(0,\n torch.LongTensor(range(size)).to(self.device)))\n\n return tensors\n\n def all_gather_scalar(self, value):\n tensor_list = [value.new_zeros(value.size()) for _ in range(self.dp_world_size)]\n dist.all_gather(tensor_list, value, group=self.data_parallel_group)\n return tensor_list\n\n def module_state_dict(self, destination=None, prefix='', keep_vars=False):\n sd = self.module.state_dict(destination, prefix, keep_vars)\n return sd\n\n def load_module_state_dict(self, state_dict, strict=True):\n self.module.load_state_dict(state_dict, strict=strict)\n\n def _get_rank_zero_ckpt_name(self, checkpoints_path, tag, mp_rank, dp_rank):\n filename = 'zero_pp_rank_{}'.format(dp_rank)\n zero_ckpt_name = os.path.join(\n checkpoints_path,\n str(tag),\n filename + '_mp_rank_{:02d}'.format(mp_rank) + '_optim_states.pt')\n return zero_ckpt_name\n\n def _get_zero_ckpt_name(self, checkpoints_path, tag):\n mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()\n pp_rank = torch.distributed.get_rank(group=self.optimizer.dp_process_group)\n return self._get_rank_zero_ckpt_name(checkpoints_path, tag, mp_rank, pp_rank)\n\n def _get_ckpt_name(self, checkpoints_path, tag):\n mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()\n if self.zero_optimization_partition_weights():\n filename = 'zero_pp_rank_{}'.format(\n torch.distributed.get_rank(group=self.optimizer.dp_process_group))\n ckpt_name = os.path.join(\n checkpoints_path,\n str(tag),\n filename + '_mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')\n else:\n ckpt_name = os.path.join(\n checkpoints_path,\n str(tag),\n 'mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')\n return ckpt_name\n\n def load_checkpoint(self,\n load_dir,\n tag=None,\n load_module_strict=True,\n load_optimizer_states=True,\n load_lr_scheduler_states=True):\n \"\"\"Load training checkpoint\n\n Arguments:\n load_dir: Required. Directory to load the checkpoint from\n tag: Checkpoint tag used as a unique identifier for checkpoint, if not provided will attempt to load tag in 'latest' file\n load_module_strict: Optional. Boolean to strictly enforce that the keys in state_dict of module and checkpoint match.\n load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance\n load_lr_scheduler_states: Optional. Boolean to add the learning rate scheduler states from Checkpoint.\n Returns:\n A tuple of ``load_path`` and ``client_state``.\n\n *``load_path``: Path of the loaded checkpoint. ``None`` if loading the checkpoint failed.\n\n *``client_state``: State dictionary used for loading required training states in the client code.\n \"\"\"\n\n if tag is None:\n latest_path = os.path.join(load_dir, 'latest')\n if os.path.isfile(latest_path):\n with open(latest_path, 'r') as fd:\n tag = fd.read().strip()\n else:\n logger.warning(f\"Unable to find latest file at {latest_path}, if trying to load latest \" \\\n \"checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint.\")\n return None, None\n\n load_path, client_states = self._load_checkpoint(load_dir,\n tag,\n load_module_strict=load_module_strict,\n load_optimizer_states=load_optimizer_states,\n load_lr_scheduler_states=load_lr_scheduler_states)\n\n if self.zero_optimization() and load_path is not None:\n self._load_zero_checkpoint(load_dir,\n tag,\n load_optimizer_states=load_optimizer_states)\n\n return load_path, client_states\n\n def _load_checkpoint(self,\n load_dir,\n tag,\n load_module_strict=True,\n load_optimizer_states=True,\n load_lr_scheduler_states=True):\n\n load_path = self._get_ckpt_name(load_dir, tag)\n\n if not os.path.exists(load_path):\n logger.warn(\n 'Client provided checkpoint load path: {} does not exist ... skip checkpoint load'\n .format(load_path))\n return None, None\n\n logger.info(f'rank: {self.global_rank} loading checkpoint: {load_path}')\n checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage)\n\n if isinstance(self.module, PipelineModule):\n # Pipeline parallelism uses this to load its own checkpoint files.\n self._curr_ckpt_path = os.path.join(load_dir, tag)\n\n self.load_module_state_dict(state_dict=checkpoint['module'],\n strict=load_module_strict)\n if self.optimizer is not None and not self.zero_optimization():\n if self.fp16_enabled():\n self.optimizer.load_state_dict(\n checkpoint['optimizer'],\n load_optimizer_states=load_optimizer_states)\n elif load_optimizer_states:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if load_lr_scheduler_states and self.lr_scheduler is not None:\n self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n\n self.csr_tensor_module_names = checkpoint['csr_tensor_module_names']\n self.global_steps = checkpoint['global_steps']\n self.global_samples = checkpoint.get('global_samples',\n self.global_steps * self.train_batch_size())\n self.skipped_steps = checkpoint['skipped_steps']\n self.loaded_checkpoint_mp_world_size = checkpoint['mp_world_size']\n self.loaded_checkpoint_dp_world_size = checkpoint['dp_world_size']\n deepspeed_states = [\n 'module',\n 'optimizer',\n 'lr_scheduler',\n 'csr_tensor_module_names',\n 'skipped_steps',\n 'global_steps',\n 'dp_world_size',\n 'mp_world_size'\n ]\n client_state = {\n key: value\n for key,\n value in checkpoint.items() if not key in deepspeed_states\n }\n\n return load_path, client_state\n\n def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True):\n zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag)\n if zero_sd_list is None:\n return\n\n self.optimizer.load_state_dict(\n state_dict_list=zero_sd_list,\n load_optimizer_states=load_optimizer_states,\n load_from_fp32_weights=self.zero_load_from_fp32_weights())\n print(\n f'loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}'\n )\n\n def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size):\n zero_ckpt_names = []\n for dp_rank in range(dp_world_size):\n ckpt_name = self._get_rank_zero_ckpt_name(checkpoints_path=load_dir,\n tag=tag,\n mp_rank=mp_rank,\n dp_rank=dp_rank)\n zero_ckpt_names.append(ckpt_name)\n\n return zero_ckpt_names\n\n def _get_all_zero_checkpoint_names(self,\n load_dir,\n tag,\n mp_world_size,\n dp_world_size):\n zero_ckpt_names = []\n for mp_rank in range(mp_world_size):\n mp_rank_ckpt_names = self._get_mp_rank_zero_checkpoint_names(\n load_dir=load_dir,\n tag=tag,\n mp_rank=mp_rank,\n dp_world_size=dp_world_size)\n zero_ckpt_names += mp_rank_ckpt_names\n\n return zero_ckpt_names\n\n def _get_all_zero_checkpoints(self, load_dir, tag):\n mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()\n zero_ckpt_names = self._get_mp_rank_zero_checkpoint_names(\n load_dir=load_dir,\n tag=tag,\n mp_rank=mp_rank,\n dp_world_size=self.loaded_checkpoint_dp_world_size)\n invalid_zero_ckpt_paths = []\n for i, ckpt_name in enumerate(zero_ckpt_names):\n if not os.path.exists(ckpt_name):\n # transparently handle the old file pattern for optim_states\n if 'optim_states.pt' in ckpt_name:\n ckpt_name_try = ckpt_name.replace(\"_optim_states.pt\",\n \"optim_states.pt\")\n if os.path.exists(ckpt_name_try):\n zero_ckpt_names[i] = ckpt_name_try\n continue\n invalid_zero_ckpt_paths.append(ckpt_name)\n\n if len(invalid_zero_ckpt_paths) > 0:\n logger.warn(\n f\"The following zero checkpoints paths are missing: {invalid_zero_ckpt_paths}\"\n )\n return None\n\n zero_sd_list = []\n for ckpt_name in zero_ckpt_names:\n zero_sd_list.append(torch.load(ckpt_name, map_location='cpu'))\n\n zero_optimizer_sd = [sd['optimizer_state_dict'] for sd in zero_sd_list]\n print(\n f\"successfully loaded {len(zero_optimizer_sd)} ZeRO state_dicts for rank {self.global_rank}\"\n )\n return zero_optimizer_sd\n\n def _checkpoint_tag_validation(self, tag):\n if self.checkpoint_tag_validation_enabled():\n s_hash = hashlib.sha1(tag.encode())\n bhash = torch.ByteTensor([s_hash.digest()]).flatten().to(self.device)\n max_bhash = bhash.clone()\n min_bhash = bhash.clone()\n dist.all_reduce(max_bhash, op=torch.distributed.ReduceOp.MAX)\n dist.all_reduce(min_bhash, op=torch.distributed.ReduceOp.MIN)\n valid = all(min_bhash == bhash) and all(max_bhash == bhash)\n msg = f\"[rank={dist.get_rank()}] The checkpoint tag name '{tag}' is not consistent across \" \\\n \"all ranks. Including rank unique information in checkpoint tag could cause issues when \" \\\n \"restoring with different world sizes.\"\n if self.checkpoint_tag_validation_fail():\n assert valid, msg\n elif not valid:\n logger.warning(msg)\n\n def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True):\n r\"\"\"Save training checkpoint\n\n Arguments:\n save_dir: Required. Directory for saving the checkpoint\n tag: Optional. Checkpoint tag used as a unique identifier for the checkpoint, global step is\n used if not provided. Tag name must be the same across all ranks.\n client_state: Optional. State dictionary used for saving required training states in the client code.\n save_latest: Optional. Save a file 'latest' pointing to the latest saved checkpoint.\n\n Important: all processes must call this method and not just the process with rank 0. It is\n because each process needs to save its master weights and scheduler+optimizer states. This\n method will hang waiting to synchronize with other processes if it's called just for the\n process with rank 0.\n \"\"\"\n\n if self.zero_optimization_partition_weights():\n # Prepare for state_dict() by ensuring all parameters are partitioned\n self.optimizer.save_checkpoint_prologue()\n\n # This is to make sure the checkpoint names are created without collision\n # There seems to be issue creating them in parallel\n\n # Ensure save_dir directory exists\n os.makedirs(save_dir, exist_ok=True)\n\n if tag is None:\n tag = f\"global_step{self.global_steps}\"\n\n # Ensure tag is a string\n tag = str(tag)\n\n # Ensure checkpoint tag is consistent across ranks\n self._checkpoint_tag_validation(tag)\n\n if self.save_non_zero_checkpoint:\n self._create_checkpoint_file(save_dir, tag, False)\n self._save_checkpoint(save_dir, tag, client_state=client_state)\n\n if self.save_zero_checkpoint:\n self._create_zero_checkpoint_files(save_dir, tag)\n self._save_zero_checkpoint(save_dir, tag)\n\n # Save latest checkpoint tag\n if save_latest:\n with open(os.path.join(save_dir, 'latest'), 'w') as fd:\n fd.write(tag)\n\n if self.zero_optimization_partition_weights():\n self.optimizer.save_checkpoint_epilogue()\n\n return True\n\n def _create_checkpoint_file(self, save_dir, tag, zero_checkpoint):\n name_function = self._get_zero_ckpt_name if zero_checkpoint else self._get_ckpt_name\n try:\n checkpoint_name = name_function(save_dir, tag)\n ensure_directory_exists(checkpoint_name)\n except:\n logger.error(f'Failed saving model checkpoint to {save_dir} with tag {tag}')\n return False\n\n return True\n\n def _create_zero_checkpoint_files(self, save_dir, tag):\n success = True\n # zero checkpoint files are created sequentially\n for rank in range(self.world_size):\n if rank == self.global_rank:\n success = self._create_checkpoint_file(save_dir, tag, True)\n\n dist.barrier()\n\n return success\n\n def _save_checkpoint(self, save_dir, tag, client_state={}):\n\n save_path = self._get_ckpt_name(save_dir, tag)\n # A hack to save the checkpointing directory. Pipeline parallelism overrides\n # module_state_dict() and uses this path to save the model. module_state_dict()\n # then instead just returns None.\n self._curr_ckpt_path = os.path.join(save_dir, tag)\n\n state = dict(\n module=self.module_state_dict(),\n optimizer=self.optimizer.state_dict()\n if self.optimizer and not self.zero_optimization() else None,\n lr_scheduler=self.lr_scheduler.state_dict()\n if self.lr_scheduler is not None else None,\n csr_tensor_module_names=self.csr_tensor_module_names,\n skipped_steps=self.skipped_steps,\n global_steps=self.global_steps,\n global_samples=self.global_samples,\n dp_world_size=self.dp_world_size,\n mp_world_size=self.mp_world_size,\n )\n state.update(client_state)\n\n log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0])\n #logger.info('Saving model checkpoint: {}'.format(save_path))\n torch.save(state, save_path)\n self._curr_save_path = None\n\n def _get_param_shapes(self):\n param_shapes = OrderedDict()\n for name, param in self.module.named_parameters():\n param_shapes[name] = param.ds_shape if hasattr(param,\n \"ds_shape\") else param.shape\n # print(f\"saving param {name} {param_shapes[name]}\")\n return param_shapes\n\n def _copy_recovery_script(self, save_path):\n base_dir = os.path.dirname(os.path.dirname(__file__))\n script = \"zero_to_fp32.py\"\n src = os.path.join(base_dir, \"utils\", script)\n dst = os.path.join(save_path, script)\n logger.info(f\"creating recovery script {dst}\")\n copyfile(src, dst)\n # make executable\n os.chmod(dst, os.stat(dst).st_mode | stat.S_IEXEC)\n\n def _save_zero_checkpoint(self, save_path, tag):\n zero_checkpoint_name = self._get_zero_ckpt_name(save_path, tag)\n zero_sd = dict(\n optimizer_state_dict=self.optimizer.state_dict(),\n param_shapes=self._get_param_shapes(),\n )\n torch.save(zero_sd, zero_checkpoint_name)\n self._copy_recovery_script(save_path)\n logger.info('zero checkpoint saved {}'.format(zero_checkpoint_name))\n\n def _zero3_consolidated_fp16_state_dict(self):\n \"\"\"\n\n Get a full non-partitioned state_dict with fp16 weights on cpu.\n\n Important: this function must be called on all ranks and not just rank 0.\n\n This is similar to nn.Module.state_dict (modelled after _save_to_state_dict), but:\n\n 1. consolidates the weights from different partitions on gpu0\n 2. works on one layer at a time to require as little gpu0 memory as possible, by\n moving the already consolidated weights to cpu\n 3. takes care to keep the shared params shared when gradually copying the params to cpu\n\n Returns:\n a consolidated fp16 ``state_dict`` on cpu on rank 0, ``None`` on other ranks\n\n \"\"\"\n import deepspeed\n\n if not self.zero_optimization_partition_weights():\n raise ValueError(\"this function requires ZeRO-3 mode\")\n\n state_dict = OrderedDict() if torch.distributed.get_rank() == 0 else None\n shared_weights = {}\n\n def get_layer_state_dict(module, prefix=\"\"):\n # gather one layer at a time to be memory-efficient\n with deepspeed.zero.GatheredParameters(list(\n module.parameters(recurse=False))):\n if torch.distributed.get_rank() == 0:\n for name, param in module.named_parameters(recurse=False):\n if param is None:\n continue\n key = prefix + name\n # for shared weights we want to make sure not to unshare them when copying to cpu\n data_ptr_id = param.storage().data_ptr()\n if data_ptr_id in shared_weights:\n # shared weights\n # print(f\"`{key}` is shared with `{shared_weights[data_ptr_id]}`\")\n state_dict[key] = state_dict[shared_weights[data_ptr_id]]\n else:\n state_dict[key] = param.detach().cpu()\n shared_weights[data_ptr_id] = key\n #print(f\"param {name} {param.shape}\")\n #print(f\"param {key} {param.shape} {state_dict[key].storage().data_ptr()}\")\n\n # now buffers - not sure if need to take care of potentially shared weights here\n for name, buf in module.named_buffers(recurse=False):\n if buf is not None and name not in module._non_persistent_buffers_set:\n state_dict[prefix + name] = buf.detach().cpu()\n\n for name, child in module.named_children():\n if child is not None:\n get_layer_state_dict(child, prefix + name + \".\")\n\n see_memory_usage(\"before get_layer_state_dict\", force=False)\n get_layer_state_dict(self.module, prefix=\"\")\n see_memory_usage(\"after get_layer_state_dict\", force=False)\n\n return state_dict\n\n def save_fp16_model(self, save_dir, save_filename=\"pytorch_model.bin\"):\n r\"\"\"Save fp16 model weights\n\n This method saves the fp16 model weights at the desired destination.\n\n Arguments:\n save_dir: Required. Directory for saving the model\n save_filename: Optional. Filename to save to. Defaults to ``pytorch_model.bin``\n\n Important: all processes must call this method and not just the process with rank 0. It is\n because the processes need to work in sync to gather the weights. This method will hang\n waiting to synchronize with other processes if it's called just for the process with rank 0.\n\n \"\"\"\n\n path = os.path.join(save_dir, save_filename)\n\n if self.zero_optimization_partition_weights():\n if self.zero_gather_fp16_weights_on_model_save():\n # consolidation is expensive in time and memory and therefore isn't a default\n state_dict = self._zero3_consolidated_fp16_state_dict()\n else:\n # the model will be bogus if not consolidated so don't confuse the user by saving it\n logger.info(\n f\"Did not save the model {path} because `stage3_gather_fp16_weights_on_model_save` is False\"\n )\n return\n else:\n state_dict = self.module.state_dict()\n\n if torch.distributed.get_rank() == 0:\n os.makedirs(save_dir, exist_ok=True)\n logger.info(f\"Saving model weights to {path}\")\n torch.save(state_dict, path)\n" ]
[ [ "torch.distributed.all_gather", "torch.distributed.get_world_size", "torch.distributed.get_rank", "torch.load", "torch.distributed.broadcast", "torch.save", "torch.utils.data.SequentialSampler", "torch.device", "torch.distributed.is_initialized", "torch.distributed.new_group", "torch.distributed.barrier", "torch.optim.Adam", "torch.distributed.all_reduce", "torch.is_tensor", "torch.optim.AdamW", "torch.cat", "torch._C.is_grad_enabled", "torch.cuda.set_device" ] ]
notmatthancock/notmatthancock.github.io
[ "abcd91cc7c2653c5243fe96ba2fd681ec03930bb" ]
[ "code/py/test_statsrecorder.py" ]
[ "import numpy as np\nimport statsrecorder as sr\n\nrs = np.random.RandomState(323)\n\nmystats = sr.StatsRecorder()\n\n# Hold all observations in \"data\" to check for correctness.\nndims = 42\ndata = np.empty((0, ndims))\n\nfor i in range(1000):\n nobserv = rs.randint(10,101)\n newdata = rs.randn(nobserv, ndims)\n data = np.vstack((data, newdata))\n\n # Update stats recorder object\n mystats.update(newdata)\n\n # Check stats recorder object is doing its business right.\n assert np.allclose(mystats.mean, data.mean(axis=0))\n assert np.allclose(mystats.std, data.std(axis=0))\n" ]
[ [ "numpy.random.RandomState", "numpy.empty", "numpy.vstack" ] ]
samtygier-stfc/SScanSS-2
[ "0df2160c32fdc533f7d391735bd55d524e253f4d" ]
[ "sscanss/ui/dialogs/insert.py" ]
[ "import numpy as np\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom sscanss.config import path_for, settings\nfrom sscanss.core.math import Plane, Matrix33, Vector3, clamp, map_range, trunc, VECTOR_EPS\nfrom sscanss.core.geometry import mesh_plane_intersection\nfrom sscanss.core.util import Primitives, DockFlag, StrainComponents, PointType, PlaneOptions, Attributes\nfrom sscanss.ui.widgets import (FormGroup, FormControl, GraphicsView, GraphicsScene, create_tool_button, FormTitle,\n create_scroll_area, CompareValidator, GraphicsPointItem, Grid, create_icon)\nfrom .managers import PointManager\n\n\nclass InsertPrimitiveDialog(QtWidgets.QWidget):\n \"\"\"Provides UI for typing in measurement/fiducial points\n\n :param primitive: primitive type\n :type primitive: Primitives\n :param parent: Main window\n :type parent: MainWindow\n \"\"\"\n dock_flag = DockFlag.Upper\n\n def __init__(self, primitive, parent):\n super().__init__(parent)\n self.parent = parent\n self.parent_model = self.parent.presenter.model\n self.parent.scenes.switchToSampleScene()\n self.primitive = primitive\n\n self.main_layout = QtWidgets.QVBoxLayout()\n\n self.textboxes = {}\n name = self.parent_model.uniqueKey(self.primitive.value)\n self.mesh_args = {'name': name}\n if self.primitive == Primitives.Tube:\n self.mesh_args.update({'outer_radius': 100.000, 'inner_radius': 50.000, 'height': 200.000})\n elif self.primitive == Primitives.Sphere:\n self.mesh_args.update({'radius': 100.000})\n elif self.primitive == Primitives.Cylinder:\n self.mesh_args.update({'radius': 100.000, 'height': 200.000})\n else:\n self.mesh_args.update({'width': 50.000, 'height': 100.000, 'depth': 200.000})\n\n self.createPrimitiveSwitcher()\n self.createFormInputs()\n\n button_layout = QtWidgets.QHBoxLayout()\n self.create_primitive_button = QtWidgets.QPushButton('Create')\n self.create_primitive_button.clicked.connect(self.createPrimiviteButtonClicked)\n button_layout.addWidget(self.create_primitive_button)\n button_layout.addStretch(1)\n\n self.main_layout.addLayout(button_layout)\n self.main_layout.addStretch(1)\n\n self.setLayout(self.main_layout)\n\n self.title = 'Insert {}'.format(self.primitive.value)\n self.setMinimumWidth(450)\n self.textboxes['name'].setFocus()\n\n def createPrimitiveSwitcher(self):\n switcher_layout = QtWidgets.QHBoxLayout()\n switcher = create_tool_button(style_name='MenuButton', status_tip='Open dialog for a different primitive')\n switcher.setArrowType(QtCore.Qt.DownArrow)\n switcher.setPopupMode(QtWidgets.QToolButton.InstantPopup)\n switcher.setMenu(self.parent.primitives_menu)\n switcher_layout.addStretch(1)\n switcher_layout.addWidget(switcher)\n self.main_layout.addLayout(switcher_layout)\n\n def createFormInputs(self):\n self.form_group = FormGroup()\n for key, value in self.mesh_args.items():\n pretty_label = key.replace('_', ' ').title()\n\n if key == 'name':\n control = FormControl(pretty_label, value, required=True)\n control.form_lineedit.textChanged.connect(self.nameCheck)\n else:\n control = FormControl(pretty_label, value, desc='mm', required=True, number=True)\n control.range(0, None, min_exclusive=True)\n\n self.textboxes[key] = control\n self.form_group.addControl(control)\n\n if self.primitive == Primitives.Tube:\n outer_radius = self.textboxes['outer_radius']\n inner_radius = self.textboxes['inner_radius']\n\n outer_radius.compareWith(inner_radius, CompareValidator.Operator.Greater)\n inner_radius.compareWith(outer_radius, CompareValidator.Operator.Less)\n\n self.main_layout.addWidget(self.form_group)\n self.form_group.groupValidation.connect(self.formValidation)\n\n def nameCheck(self, value):\n if self.parent_model.all_sample_key == value:\n self.textboxes['name'].isInvalid(f'\"{self.parent_model.all_sample_key}\" is a reserved name')\n\n def formValidation(self, is_valid):\n if is_valid:\n self.create_primitive_button.setEnabled(True)\n else:\n self.create_primitive_button.setDisabled(True)\n\n def createPrimiviteButtonClicked(self):\n for key, textbox in self.textboxes.items():\n value = textbox.value\n self.mesh_args[key] = value\n\n self.parent.presenter.addPrimitive(self.primitive, self.mesh_args)\n new_name = self.parent_model.uniqueKey(self.primitive.value)\n self.textboxes['name'].value = new_name\n\n\nclass InsertPointDialog(QtWidgets.QWidget):\n \"\"\"Provides UI for typing in measurement/fiducial points\n\n :param point_type: point type\n :type point_type: PointType\n :param parent: Main window\n :type parent: MainWindow\n \"\"\"\n dock_flag = DockFlag.Upper\n\n def __init__(self, point_type, parent):\n super().__init__(parent)\n self.parent = parent\n self.parent_model = parent.presenter.model\n self.parent.scenes.switchToSampleScene()\n self.point_type = point_type\n self.title = 'Add {} Point'.format(point_type.value)\n self.main_layout = QtWidgets.QVBoxLayout()\n unit = 'mm'\n self.form_group = FormGroup()\n self.x_axis = FormControl('X', 0.0, required=True, desc=unit, number=True)\n self.y_axis = FormControl('Y', 0.0, required=True, desc=unit, number=True)\n self.z_axis = FormControl('Z', 0.0, required=True, desc=unit, number=True)\n self.form_group.addControl(self.x_axis)\n self.form_group.addControl(self.y_axis)\n self.form_group.addControl(self.z_axis)\n self.form_group.groupValidation.connect(self.formValidation)\n button_layout = QtWidgets.QHBoxLayout()\n self.execute_button = QtWidgets.QPushButton(self.title)\n self.execute_button.clicked.connect(self.executeButtonClicked)\n button_layout.addWidget(self.execute_button)\n button_layout.addStretch(1)\n\n self.main_layout.addWidget(self.form_group)\n self.main_layout.addLayout(button_layout)\n self.main_layout.addStretch(1)\n self.setLayout(self.main_layout)\n\n self.setMinimumWidth(450)\n\n def formValidation(self, is_valid):\n if is_valid:\n self.execute_button.setEnabled(True)\n else:\n self.execute_button.setDisabled(True)\n\n def executeButtonClicked(self):\n point = [self.x_axis.value, self.y_axis.value, self.z_axis.value]\n self.parent.presenter.addPoints([(point, True)], self.point_type)\n\n\nclass InsertVectorDialog(QtWidgets.QWidget):\n \"\"\"Provides UI for adding measurement vectors using a variety of methods\n\n :param parent: Main window\n :type parent: MainWindow\n \"\"\"\n dock_flag = DockFlag.Upper\n\n def __init__(self, parent):\n super().__init__(parent)\n self.parent = parent\n self.parent_model = parent.presenter.model\n self.parent.scenes.switchToSampleScene()\n self.title = 'Add Measurement Vectors'\n self.main_layout = QtWidgets.QVBoxLayout()\n spacing = 10\n self.main_layout.addSpacing(spacing)\n self.main_layout.addWidget(QtWidgets.QLabel('Measurement Point:'))\n self.points_combobox = QtWidgets.QComboBox()\n self.points_combobox.setView(QtWidgets.QListView())\n self.main_layout.addWidget(self.points_combobox)\n self.updatePointList()\n self.main_layout.addSpacing(spacing)\n\n layout = QtWidgets.QHBoxLayout()\n alignment_layout = QtWidgets.QVBoxLayout()\n alignment_layout.addWidget(QtWidgets.QLabel('Alignment:'))\n self.alignment_combobox = QtWidgets.QComboBox()\n self.alignment_combobox.setView(QtWidgets.QListView())\n self.alignment_combobox.setInsertPolicy(QtWidgets.QComboBox.InsertAtCurrent)\n self.updateAlignment()\n self.alignment_combobox.activated.connect(self.addNewAlignment)\n self.alignment_combobox.currentIndexChanged.connect(self.changeRenderedAlignment)\n alignment_layout.addWidget(self.alignment_combobox)\n alignment_layout.addSpacing(spacing)\n layout.addLayout(alignment_layout)\n\n self.detector_combobox = QtWidgets.QComboBox()\n self.detector_combobox.setView(QtWidgets.QListView())\n self.detector_combobox.addItems(list(self.parent_model.instrument.detectors.keys()))\n if len(self.parent_model.instrument.detectors) > 1:\n detector_layout = QtWidgets.QVBoxLayout()\n detector_layout.addWidget(QtWidgets.QLabel('Detector:'))\n detector_layout.addWidget(self.detector_combobox)\n size = self.detector_combobox.iconSize()\n self.detector_combobox.setItemIcon(0, create_icon(settings.value(settings.Key.Vector_1_Colour), size))\n self.detector_combobox.setItemIcon(1, create_icon(settings.value(settings.Key.Vector_2_Colour), size))\n detector_layout.addSpacing(spacing)\n layout.addSpacing(spacing)\n layout.addLayout(detector_layout)\n\n self.main_layout.addLayout(layout)\n\n self.main_layout.addWidget(QtWidgets.QLabel('Strain Component:'))\n self.component_combobox = QtWidgets.QComboBox()\n self.component_combobox.setView(QtWidgets.QListView())\n strain_components = [s.value for s in StrainComponents]\n self.component_combobox.addItems(strain_components)\n self.component_combobox.currentTextChanged.connect(self.toggleKeyInBox)\n self.main_layout.addWidget(self.component_combobox)\n self.main_layout.addSpacing(spacing)\n\n button_layout = QtWidgets.QHBoxLayout()\n self.execute_button = QtWidgets.QPushButton(self.title)\n self.execute_button.clicked.connect(self.executeButtonClicked)\n button_layout.addWidget(self.execute_button)\n button_layout.addStretch(1)\n\n self.createKeyInBox()\n\n self.reverse_checkbox = QtWidgets.QCheckBox('Reverse Direction of Vector')\n self.main_layout.addWidget(self.reverse_checkbox)\n self.main_layout.addSpacing(spacing)\n\n self.main_layout.addLayout(button_layout)\n self.main_layout.addStretch(1)\n self.setLayout(self.main_layout)\n self.parent_model.measurement_points_changed.connect(self.updatePointList)\n self.parent_model.measurement_vectors_changed.connect(self.updateAlignment)\n self.parent.scenes.rendered_alignment_changed.connect(self.alignment_combobox.setCurrentIndex)\n self.setMinimumWidth(450)\n\n def updatePointList(self):\n self.points_combobox.clear()\n point_list = ['All Points']\n point_list.extend(['{}'.format(i+1) for i in range(self.parent_model.measurement_points.size)])\n self.points_combobox.addItems(point_list)\n\n def updateAlignment(self):\n align_count = self.parent_model.measurement_vectors.shape[2]\n if align_count != self.alignment_combobox.count() - 1:\n self.alignment_combobox.clear()\n alignment_list = ['{}'.format(i + 1) for i in range(align_count)]\n alignment_list.append('Add New...')\n self.alignment_combobox.addItems(alignment_list)\n\n self.alignment_combobox.setCurrentIndex(self.parent.scenes.rendered_alignment)\n\n def addNewAlignment(self, index):\n if index == self.alignment_combobox.count() - 1:\n self.alignment_combobox.insertItem(index, '{}'.format(index + 1))\n self.alignment_combobox.setCurrentIndex(index)\n\n def changeRenderedAlignment(self, index):\n align_count = self.parent_model.measurement_vectors.shape[2]\n if 0 <= index < align_count:\n self.parent.scenes.changeRenderedAlignment(index)\n elif index >= align_count:\n self.parent.scenes.changeVisibility(Attributes.Vectors, False)\n\n def toggleKeyInBox(self, selected_text):\n strain_component = StrainComponents(selected_text)\n if strain_component == StrainComponents.custom:\n self.key_in_box.setVisible(True)\n self.form_group.validateGroup()\n else:\n self.key_in_box.setVisible(False)\n self.execute_button.setEnabled(True)\n\n def createKeyInBox(self):\n self.key_in_box = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout()\n\n self.form_group = FormGroup(FormGroup.Layout.Horizontal)\n self.x_axis = FormControl('X', 1.0, required=True, number=True, decimals=7)\n self.x_axis.range(-1.0, 1.0)\n self.y_axis = FormControl('Y', 0.0, required=True, number=True, decimals=7)\n self.y_axis.range(-1.0, 1.0)\n self.z_axis = FormControl('Z', 0.0, required=True, number=True, decimals=7)\n self.z_axis.range(-1.0, 1.0)\n self.form_group.addControl(self.x_axis)\n self.form_group.addControl(self.y_axis)\n self.form_group.addControl(self.z_axis)\n self.form_group.groupValidation.connect(self.formValidation)\n\n layout.addWidget(self.form_group)\n self.key_in_box.setLayout(layout)\n self.main_layout.addWidget(self.key_in_box)\n self.toggleKeyInBox(self.component_combobox.currentText())\n\n def formValidation(self, is_valid):\n self.execute_button.setDisabled(True)\n if is_valid:\n if np.linalg.norm([self.x_axis.value, self.y_axis.value, self.z_axis.value]) > VECTOR_EPS:\n self.x_axis.validation_label.setText('')\n self.execute_button.setEnabled(True)\n else:\n self.x_axis.validation_label.setText('Bad Normal')\n\n def executeButtonClicked(self):\n points = self.points_combobox.currentIndex() - 1\n\n selected_text = self.component_combobox.currentText()\n strain_component = StrainComponents(selected_text)\n\n alignment = self.alignment_combobox.currentIndex()\n detector = self.detector_combobox.currentIndex()\n check_state = self.reverse_checkbox.checkState()\n reverse = True if check_state == QtCore.Qt.Checked else False\n\n if strain_component == StrainComponents.custom:\n vector = [self.x_axis.value, self.y_axis.value, self.z_axis.value]\n else:\n vector = None\n\n self.parent.presenter.addVectors(points, strain_component, alignment, detector,\n key_in=vector, reverse=reverse)\n # New vectors are drawn by the scene manager after function ends\n self.parent.scenes._rendered_alignment = alignment\n\n def closeEvent(self, event):\n self.parent.scenes.changeRenderedAlignment(0)\n event.accept()\n\n\nclass PickPointDialog(QtWidgets.QWidget):\n \"\"\"Provides UI for selecting measurement points on a cross section of the sample\n\n :param parent: Main window\n :type parent: MainWindow\n \"\"\"\n dock_flag = DockFlag.Full\n\n def __init__(self, parent):\n super().__init__(parent)\n self.parent = parent\n self.parent_model = parent.presenter.model\n self.parent.scenes.switchToSampleScene()\n self.title = 'Add Measurement Points Graphically'\n self.setMinimumWidth(500)\n\n self.plane_offset_range = (-1., 1.)\n self.slider_range = (-10000000, 10000000)\n\n self.sample_scale = 20\n self.path_pen = QtGui.QPen(QtGui.QColor(255, 0, 0), 0)\n self.point_pen = QtGui.QPen(QtGui.QColor(200, 0, 0), 0)\n\n self.main_layout = QtWidgets.QVBoxLayout()\n self.setLayout(self.main_layout)\n button_layout = QtWidgets.QHBoxLayout()\n self.help_button = create_tool_button(tooltip='Help', style_name='ToolButton',\n status_tip='Display shortcuts for the cross-section view',\n icon_path=path_for('question.png'))\n self.help_button.clicked.connect(self.showHelp)\n\n self.reset_button = create_tool_button(tooltip='Reset View', style_name='ToolButton',\n status_tip='Reset camera transformation of the cross-section view',\n icon_path=path_for('refresh.png'))\n self.execute_button = QtWidgets.QPushButton('Add Points')\n self.execute_button.clicked.connect(self.addPoints)\n button_layout.addWidget(self.help_button)\n button_layout.addWidget(self.reset_button)\n button_layout.addStretch(1)\n button_layout.addWidget(self.execute_button)\n self.main_layout.addLayout(button_layout)\n\n self.splitter = QtWidgets.QSplitter(QtCore.Qt.Vertical)\n self.splitter.setChildrenCollapsible(False)\n self.main_layout.addWidget(self.splitter)\n self.createGraphicsView()\n self.reset_button.clicked.connect(self.view.reset)\n self.createControlPanel()\n\n self.prepareMesh()\n self.parent_model.sample_changed.connect(self.prepareMesh)\n self.parent_model.measurement_points_changed.connect(self.updateCrossSection)\n self.initializing = True\n\n def showEvent(self, event):\n if self.initializing:\n self.view.fitInView(self.view.anchor, QtCore.Qt.KeepAspectRatio)\n self.initializing = False\n\n super().showEvent(event)\n\n def closeEvent(self, event):\n self.parent.scenes.removePlane()\n event.accept()\n\n def prepareMesh(self):\n self.mesh = None\n samples = self.parent_model.sample\n for _, sample in samples.items():\n if self.mesh is None:\n self.mesh = sample.copy()\n else:\n self.mesh.append(sample)\n\n self.scene.clear()\n self.tabs.setEnabled(self.mesh is not None)\n if self.mesh is not None:\n self.setPlane(self.plane_combobox.currentText())\n else:\n self.parent.scenes.removePlane()\n self.view.reset()\n\n def updateStatusBar(self, point):\n if self.view.rect().contains(point):\n transform = self.view.scene_transform.inverted()[0]\n scene_pt = transform.map(self.view.mapToScene(point)) / self.sample_scale\n world_pt = [scene_pt.x(), scene_pt.y(), -self.old_distance] @ self.matrix.transpose()\n cursor_text = f'X: {world_pt[0]:.3f} Y: {world_pt[1]:.3f} Z: {world_pt[2]:.3f}'\n self.parent.cursor_label.setText(cursor_text)\n else:\n self.parent.cursor_label.clear()\n\n def createGraphicsView(self):\n self.scene = GraphicsScene(self.sample_scale, self)\n self.view = GraphicsView(self.scene)\n self.view.mouse_moved.connect(self.updateStatusBar)\n self.view.setMinimumHeight(350)\n self.splitter.addWidget(self.view)\n\n def createControlPanel(self):\n self.tabs = QtWidgets.QTabWidget()\n self.tabs.setMinimumHeight(250)\n self.tabs.setTabPosition(QtWidgets.QTabWidget.South)\n self.splitter.addWidget(self.tabs)\n\n self.createPlaneTab()\n self.createSelectionToolsTab()\n self.createGridOptionsTab()\n point_manager = PointManager(PointType.Measurement, self.parent)\n self.tabs.addTab(create_scroll_area(point_manager), 'Point Manager')\n\n def createPlaneTab(self):\n layout = QtWidgets.QVBoxLayout()\n layout.addWidget(QtWidgets.QLabel('Specify Plane:'))\n self.plane_combobox = QtWidgets.QComboBox()\n self.plane_combobox.setView(QtWidgets.QListView())\n self.plane_combobox.addItems([p.value for p in PlaneOptions])\n self.plane_combobox.currentTextChanged.connect(self.setPlane)\n self.createCustomPlaneBox()\n layout.addWidget(self.plane_combobox)\n layout.addWidget(self.custom_plane_widget)\n layout.addSpacing(20)\n\n slider_layout = QtWidgets.QHBoxLayout()\n slider_layout.addWidget(QtWidgets.QLabel('Plane Distance from Origin (mm):'))\n self.plane_lineedit = QtWidgets.QLineEdit()\n validator = QtGui.QDoubleValidator(self.plane_lineedit)\n validator.setNotation(QtGui.QDoubleValidator.StandardNotation)\n validator.setDecimals(3)\n self.plane_lineedit.setValidator(validator)\n self.plane_lineedit.textEdited.connect(self.updateSlider)\n self.plane_lineedit.editingFinished.connect(self.movePlane)\n slider_layout.addStretch(1)\n slider_layout.addWidget(self.plane_lineedit)\n layout.addLayout(slider_layout)\n self.plane_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n self.plane_slider.setMinimum(self.slider_range[0])\n self.plane_slider.setMaximum(self.slider_range[1])\n self.plane_slider.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.plane_slider.setSingleStep(1)\n self.plane_slider.sliderMoved.connect(self.updateLineEdit)\n self.plane_slider.sliderReleased.connect(self.movePlane)\n layout.addWidget(self.plane_slider)\n layout.addStretch(1)\n\n plane_tab = QtWidgets.QWidget()\n plane_tab.setLayout(layout)\n self.tabs.addTab(create_scroll_area(plane_tab), 'Define Plane')\n\n def createSelectionToolsTab(self):\n layout = QtWidgets.QVBoxLayout()\n selector_layout = QtWidgets.QHBoxLayout()\n selector_layout.addWidget(QtWidgets.QLabel('Select Geometry of Points: '))\n self.button_group = QtWidgets.QButtonGroup()\n self.button_group.buttonClicked[int].connect(self.changeSceneMode)\n\n self.object_selector = create_tool_button(checkable=True, checked=True, tooltip='Select Points',\n status_tip='Select movable points from the cross-section view',\n style_name='MidToolButton', icon_path=path_for('select.png'))\n self.point_selector = create_tool_button(checkable=True, tooltip='Draw a Point',\n status_tip='Draw a single point at the selected position',\n style_name='MidToolButton', icon_path=path_for('point.png'))\n self.line_selector = create_tool_button(checkable=True, tooltip='Draw Points on Line',\n status_tip='Draw equally spaced points on the selected line',\n style_name='MidToolButton', icon_path=path_for('line_tool.png'))\n self.area_selector = create_tool_button(checkable=True, tooltip='Draw Points on Area',\n status_tip='Draw a grid of points on the selected area',\n style_name='MidToolButton', icon_path=path_for('area_tool.png'))\n\n self.button_group.addButton(self.object_selector, GraphicsScene.Mode.Select.value)\n self.button_group.addButton(self.point_selector, GraphicsScene.Mode.Draw_point.value)\n self.button_group.addButton(self.line_selector, GraphicsScene.Mode.Draw_line.value)\n self.button_group.addButton(self.area_selector, GraphicsScene.Mode.Draw_area.value)\n selector_layout.addWidget(self.object_selector)\n selector_layout.addWidget(self.point_selector)\n selector_layout.addWidget(self.line_selector)\n selector_layout.addWidget(self.area_selector)\n selector_layout.addStretch(1)\n\n self.createLineToolWidget()\n self.createAreaToolWidget()\n\n layout.addLayout(selector_layout)\n layout.addWidget(self.line_tool_widget)\n layout.addWidget(self.area_tool_widget)\n layout.addStretch(1)\n\n select_tab = QtWidgets.QWidget()\n select_tab.setLayout(layout)\n self.tabs.addTab(create_scroll_area(select_tab), 'Selection Tools')\n\n def createGridOptionsTab(self):\n layout = QtWidgets.QVBoxLayout()\n self.show_grid_checkbox = QtWidgets.QCheckBox('Show Grid')\n self.show_grid_checkbox.stateChanged.connect(self.showGrid)\n self.snap_to_grid_checkbox = QtWidgets.QCheckBox('Snap Selection to Grid')\n self.snap_to_grid_checkbox.stateChanged.connect(self.snapToGrid)\n self.snap_to_grid_checkbox.setEnabled(self.view.show_grid)\n layout.addWidget(self.show_grid_checkbox)\n layout.addWidget(self.snap_to_grid_checkbox)\n self.createGridWidget()\n layout.addWidget(self.grid_widget)\n layout.addStretch(1)\n\n grid_tab = QtWidgets.QWidget()\n grid_tab.setLayout(layout)\n self.tabs.addTab(create_scroll_area(grid_tab), 'Grid Options')\n\n def createCustomPlaneBox(self):\n self.custom_plane_widget = QtWidgets.QWidget(self)\n layout = QtWidgets.QVBoxLayout()\n\n self.form_group = FormGroup(FormGroup.Layout.Horizontal)\n self.x_axis = FormControl('X', 1.0, required=True, number=True)\n self.x_axis.range(-1.0, 1.0)\n self.y_axis = FormControl('Y', 0.0, required=True, number=True)\n self.y_axis.range(-1.0, 1.0)\n self.z_axis = FormControl('Z', 0.0, required=True, number=True)\n self.z_axis.range(-1.0, 1.0)\n self.form_group.addControl(self.x_axis)\n self.form_group.addControl(self.y_axis)\n self.form_group.addControl(self.z_axis)\n self.form_group.groupValidation.connect(self.setCustomPlane)\n\n layout.addWidget(self.form_group)\n self.custom_plane_widget.setLayout(layout)\n\n def createLineToolWidget(self):\n self.line_tool_widget = QtWidgets.QWidget(self)\n layout = QtWidgets.QHBoxLayout()\n layout.setContentsMargins(0, 20, 0, 0)\n layout.addWidget(QtWidgets.QLabel('Number of Points: '))\n self.line_point_count_spinbox = QtWidgets.QSpinBox()\n self.line_point_count_spinbox.setValue(self.scene.line_tool_size)\n self.line_point_count_spinbox.setRange(2, 100)\n self.line_point_count_spinbox.valueChanged.connect(self.scene.setLineToolSize)\n\n layout.addWidget(self.line_point_count_spinbox)\n self.line_tool_widget.setVisible(False)\n self.line_tool_widget.setLayout(layout)\n\n def createAreaToolWidget(self):\n self.area_tool_widget = QtWidgets.QWidget(self)\n layout = QtWidgets.QHBoxLayout()\n layout.setContentsMargins(0, 20, 0, 0)\n layout.addWidget(QtWidgets.QLabel('Number of Points: '))\n self.area_x_spinbox = QtWidgets.QSpinBox()\n self.area_x_spinbox.setValue(self.scene.area_tool_size[0])\n self.area_x_spinbox.setRange(2, 100)\n self.area_y_spinbox = QtWidgets.QSpinBox()\n self.area_y_spinbox.setValue(self.scene.area_tool_size[1])\n self.area_y_spinbox.setRange(2, 100)\n\n stretch_factor = 3\n layout.addStretch(1)\n layout.addWidget(QtWidgets.QLabel('X: '))\n self.area_x_spinbox.valueChanged.connect(lambda: self.scene.setAreaToolSize(self.area_x_spinbox.value(),\n self.area_y_spinbox.value()))\n layout.addWidget(self.area_x_spinbox, stretch_factor)\n layout.addStretch(1)\n layout.addWidget(QtWidgets.QLabel('Y: '))\n self.area_y_spinbox.valueChanged.connect(lambda: self.scene.setAreaToolSize(self.area_x_spinbox.value(),\n self.area_y_spinbox.value()))\n layout.addWidget(self.area_y_spinbox, stretch_factor)\n self.area_tool_widget.setVisible(False)\n self.area_tool_widget.setLayout(layout)\n\n def createGridWidget(self):\n self.grid_widget = QtWidgets.QWidget(self)\n main_layout = QtWidgets.QVBoxLayout()\n main_layout.setContentsMargins(0, 20, 0, 0)\n layout = QtWidgets.QHBoxLayout()\n layout.addWidget(QtWidgets.QLabel('Grid Type: '))\n grid_combobox = QtWidgets.QComboBox()\n grid_combobox.setView(QtWidgets.QListView())\n grid_combobox.addItems([g.value for g in Grid.Type])\n grid_combobox.currentTextChanged.connect(lambda value: self.setGridType(Grid.Type(value)))\n layout.addWidget(grid_combobox)\n main_layout.addLayout(layout)\n main_layout.addSpacing(20)\n\n layout = QtWidgets.QHBoxLayout()\n layout.addWidget(QtWidgets.QLabel('Grid Size: '))\n self.grid_x_label = QtWidgets.QLabel('')\n self.grid_x_spinbox = QtWidgets.QDoubleSpinBox()\n self.grid_x_spinbox.setDecimals(1)\n self.grid_x_spinbox.setSingleStep(0.1)\n self.grid_x_spinbox.valueChanged.connect(self.changeGridSize)\n self.grid_y_label = QtWidgets.QLabel('')\n self.grid_y_spinbox = QtWidgets.QDoubleSpinBox()\n self.grid_y_spinbox.setDecimals(1)\n self.grid_y_spinbox.setSingleStep(0.1)\n self.grid_y_spinbox.valueChanged.connect(self.changeGridSize)\n stretch_factor = 3\n layout.addStretch(1)\n layout.addWidget(self.grid_x_label)\n layout.addWidget(self.grid_x_spinbox, stretch_factor)\n layout.addStretch(1)\n layout.addWidget(self.grid_y_label)\n layout.addWidget(self.grid_y_spinbox, stretch_factor)\n main_layout.addLayout(layout)\n self.setGridType(self.view.grid.type)\n self.grid_widget.setVisible(False)\n self.grid_widget.setLayout(main_layout)\n\n def changeGridSize(self):\n if self.view.grid.type == Grid.Type.Box:\n grid_x = int(self.grid_x_spinbox.value() * self.sample_scale)\n grid_y = int(self.grid_y_spinbox.value() * self.sample_scale)\n else:\n grid_x = int(self.grid_x_spinbox.value() * self.sample_scale)\n grid_y = self.grid_y_spinbox.value()\n self.view.setGridSize((grid_x, grid_y))\n\n def setGridType(self, grid_type):\n self.view.setGridType(grid_type)\n size = self.view.grid.size\n if grid_type == Grid.Type.Box:\n self.grid_x_label.setText('X (mm): ')\n self.grid_y_label.setText('Y (mm): ')\n self.grid_x_spinbox.setValue(size[0])\n self.grid_y_spinbox.setValue(size[1])\n self.grid_x_spinbox.setRange(0.1, 1000)\n self.grid_y_spinbox.setRange(0.1, 1000)\n else:\n self.grid_x_label.setText('Radius (mm): ')\n self.grid_y_label.setText('Angle (degree): ')\n self.grid_x_spinbox.setValue(size[0])\n self.grid_y_spinbox.setValue(size[1])\n self.grid_x_spinbox.setRange(0.1, 1000)\n self.grid_y_spinbox.setRange(0.1, 360)\n\n def changeSceneMode(self, button_id):\n self.scene.mode = GraphicsScene.Mode(button_id)\n self.line_tool_widget.setVisible(self.scene.mode == GraphicsScene.Mode.Draw_line)\n self.area_tool_widget.setVisible(self.scene.mode == GraphicsScene.Mode.Draw_area)\n\n def showHelp(self):\n self.view.show_help = False if self.view.has_foreground else True\n self.scene.update()\n\n def showGrid(self, state):\n self.view.show_grid = True if state == QtCore.Qt.Checked else False\n self.snap_to_grid_checkbox.setEnabled(self.view.show_grid)\n self.grid_widget.setVisible(self.view.show_grid)\n self.scene.update()\n\n def snapToGrid(self, state):\n self.view.snap_to_grid = True if state == QtCore.Qt.Checked else False\n\n def updateSlider(self, value):\n if not self.plane_lineedit.hasAcceptableInput():\n return\n\n new_distance = clamp(float(value), *self.plane_offset_range)\n slider_value = int(map_range(*self.plane_offset_range, *self.slider_range, new_distance))\n self.plane_slider.setValue(slider_value)\n\n offset = new_distance - self.old_distance\n self.parent.scenes.movePlane(offset * self.plane.normal)\n self.old_distance = new_distance\n\n def updateLineEdit(self, value):\n new_distance = trunc(map_range(*self.slider_range, *self.plane_offset_range, value), 3)\n self.plane_lineedit.setText('{:.3f}'.format(new_distance))\n\n offset = new_distance - self.old_distance\n self.parent.scenes.movePlane(offset * self.plane.normal)\n self.old_distance = new_distance\n\n def movePlane(self):\n distance = clamp(float(self.plane_lineedit.text()), *self.plane_offset_range)\n self.plane_lineedit.setText('{:.3f}'.format(distance))\n point = distance * self.plane.normal\n self.plane = Plane(self.plane.normal, point)\n self.updateCrossSection()\n\n def setCustomPlane(self, is_valid):\n if is_valid:\n normal = np.array([self.x_axis.value, self.y_axis.value, self.z_axis.value])\n try:\n self.initializePlane(normal, self.mesh.bounding_box.center)\n except ValueError:\n self.x_axis.validation_label.setText('Bad Normal')\n\n def setPlane(self, selected_text):\n if selected_text == PlaneOptions.Custom.value:\n self.custom_plane_widget.setVisible(True)\n self.form_group.validateGroup()\n return\n else:\n self.custom_plane_widget.setVisible(False)\n\n if selected_text == PlaneOptions.XY.value:\n plane_normal = np.array([0., 0., 1.])\n elif selected_text == PlaneOptions.XZ.value:\n plane_normal = np.array([0., 1., 0.])\n else:\n plane_normal = np.array([1., 0., 0.])\n\n self.initializePlane(plane_normal, self.mesh.bounding_box.center)\n\n def initializePlane(self, plane_normal, plane_point):\n self.plane = Plane(plane_normal, plane_point)\n plane_size = self.mesh.bounding_box.radius\n\n self.parent.scenes.drawPlane(self.plane, 2 * plane_size, 2 * plane_size)\n distance = self.plane.distanceFromOrigin()\n self.plane_offset_range = (distance - plane_size, distance + plane_size)\n slider_value = int(map_range(*self.plane_offset_range, *self.slider_range, distance))\n self.plane_slider.setValue(slider_value)\n self.plane_lineedit.setText('{:.3f}'.format(distance))\n self.old_distance = distance\n # inverted the normal so that the y-axis is flipped\n self.matrix = self.__lookAt(-Vector3(self.plane.normal))\n self.view.resetTransform()\n self.updateCrossSection()\n\n def updateCrossSection(self):\n self.scene.clear()\n segments = mesh_plane_intersection(self.mesh, self.plane)\n if len(segments) == 0:\n return\n segments = np.array(segments)\n\n item = QtWidgets.QGraphicsPathItem()\n cross_section_path = QtGui.QPainterPath()\n rotated_segments = self.sample_scale * (segments @ self.matrix)\n for i in range(0, rotated_segments.shape[0], 2):\n start = rotated_segments[i, :]\n cross_section_path.moveTo(start[0], start[1])\n end = rotated_segments[i + 1, :]\n cross_section_path.lineTo(end[0], end[1])\n item.setPath(cross_section_path)\n item.setPen(self.path_pen)\n item.setTransform(self.view.scene_transform)\n self.scene.addItem(item)\n rect = item.boundingRect()\n anchor = rect.center()\n\n ab = self.plane.point - self.parent_model.measurement_points.points\n d = np.einsum('ij,ij->i', np.expand_dims(self.plane.normal, axis=0), ab)\n index = np.where(np.abs(d) < VECTOR_EPS)[0]\n rotated_points = self.parent_model.measurement_points.points[index, :]\n rotated_points = rotated_points @ self.matrix\n\n for i, p in zip(index, rotated_points):\n point = QtCore.QPointF(p[0], p[1]) * self.sample_scale\n point = self.view.scene_transform.map(point)\n item = GraphicsPointItem(point, size=self.scene.point_size)\n item.setToolTip(f'Point {i + 1}')\n item.fixed = True\n item.makeControllable(self.scene.mode == GraphicsScene.Mode.Select)\n item.setPen(self.point_pen)\n self.scene.addItem(item)\n rect = rect.united(item.boundingRect().translated(point))\n\n # calculate new rectangle that encloses original rect with a different anchor\n rect.united(rect.translated(anchor - rect.center()))\n self.view.setSceneRect(rect)\n self.view.fitInView(rect, QtCore.Qt.KeepAspectRatio)\n self.view.anchor = rect\n\n @staticmethod\n def __lookAt(forward):\n rot_matrix = Matrix33.identity()\n up = Vector3([0., -1., 0.]) if -VECTOR_EPS < forward[1] < VECTOR_EPS else Vector3([0., 0., 1.])\n left = up ^ forward\n left.normalize()\n up = forward ^ left\n\n rot_matrix.c1[:3] = left\n rot_matrix.c2[:3] = up\n rot_matrix.c3[:3] = forward\n\n return rot_matrix\n\n def addPoints(self):\n if len(self.scene.items()) < 2:\n return\n\n points_2d = []\n transform = self.view.scene_transform.inverted()[0]\n for item in self.scene.items():\n if isinstance(item, GraphicsPointItem) and not item.fixed:\n pos = transform.map(item.pos()) / self.sample_scale\n # negate distance due to inverted normal when creating matrix\n points_2d.append([pos.x(), pos.y(), -self.old_distance])\n self.scene.removeItem(item)\n\n if not points_2d:\n return\n\n points = points_2d[::-1] @ self.matrix.transpose()\n enabled = [True] * points.shape[0]\n self.parent.presenter.addPoints(list(zip(points, enabled)), PointType.Measurement, False)\n\n\nclass AlignSample(QtWidgets.QWidget):\n \"\"\"Provides UI for aligning sample on instrument with 6D pose\n\n :param parent: Main window\n :type parent: MainWindow\n \"\"\"\n dock_flag = DockFlag.Upper\n\n def __init__(self, parent):\n super().__init__(parent)\n self.parent = parent\n self.parent.scenes.switchToInstrumentScene()\n self.title = 'Align Sample with 6D pose'\n self.setMinimumWidth(450)\n\n self.main_layout = QtWidgets.QVBoxLayout()\n self.setLayout(self.main_layout)\n self.main_layout.addSpacing(20)\n self.main_layout.addWidget(FormTitle('Create Transformation for Alignment'))\n self.main_layout.addSpacing(10)\n\n self.main_layout.addWidget(QtWidgets.QLabel('Translation along the X, Y, and Z axis (mm):'))\n self.position_form_group = FormGroup(FormGroup.Layout.Horizontal)\n self.x_position = FormControl('X', 0.0, required=True, number=True)\n self.y_position = FormControl('Y', 0.0, required=True, number=True)\n self.z_position = FormControl('Z', 0.0, required=True, number=True)\n self.position_form_group.addControl(self.x_position)\n self.position_form_group.addControl(self.y_position)\n self.position_form_group.addControl(self.z_position)\n self.position_form_group.groupValidation.connect(self.formValidation)\n self.main_layout.addWidget(self.position_form_group)\n\n self.main_layout.addWidget(QtWidgets.QLabel('Rotation around the X, Y, and Z axis (degrees):'))\n self.orientation_form_group = FormGroup(FormGroup.Layout.Horizontal)\n self.x_rotation = FormControl('X', 0.0, required=True, number=True)\n self.x_rotation.range(-360.0, 360.0)\n self.y_rotation = FormControl('Y', 0.0, required=True, number=True)\n self.y_rotation.range(-360.0, 360.0)\n self.z_rotation = FormControl('Z', 0.0, required=True, number=True)\n self.z_rotation.range(-360.0, 360.0)\n self.orientation_form_group.addControl(self.x_rotation)\n self.orientation_form_group.addControl(self.y_rotation)\n self.orientation_form_group.addControl(self.z_rotation)\n self.orientation_form_group.groupValidation.connect(self.formValidation)\n self.main_layout.addWidget(self.orientation_form_group)\n\n button_layout = QtWidgets.QHBoxLayout()\n self.execute_button = QtWidgets.QPushButton('Align Sample')\n self.execute_button.clicked.connect(self.executeButtonClicked)\n button_layout.addWidget(self.execute_button)\n button_layout.addStretch(1)\n self.main_layout.addLayout(button_layout)\n self.main_layout.addStretch(1)\n\n def formValidation(self):\n if self.position_form_group.valid and self.orientation_form_group.valid:\n self.execute_button.setEnabled(True)\n else:\n self.execute_button.setDisabled(True)\n\n def executeButtonClicked(self):\n pose = [self.x_position.value, self.y_position.value, self.z_position.value,\n self.z_rotation.value, self.y_rotation.value, self.x_rotation.value]\n\n self.parent.presenter.alignSampleWithPose(pose)\n" ]
[ [ "numpy.array", "numpy.expand_dims", "numpy.linalg.norm", "numpy.abs" ] ]
cnheider/onnx
[ "f5bb59aa0f8b18b602763abe47d1d24d0d54b197", "781545783a4e2bbbda48fc64318fb2c6d8bbb3cc" ]
[ "onnx/backend/test/case/node/batchnorm.py", "onnx/backend/test/case/base.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass BatchNormalization(Base):\n\n @staticmethod\n def export(): # type: () -> None\n def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5): # type: ignore\n dims_x = len(x.shape)\n dim_ones = (1,) * (dims_x - 2)\n s = s.reshape(-1, *dim_ones)\n bias = bias.reshape(-1, *dim_ones)\n mean = mean.reshape(-1, *dim_ones)\n var = var.reshape(-1, *dim_ones)\n return s * (x - mean) / np.sqrt(var + epsilon) + bias\n\n # input size: (1, 2, 1, 3)\n x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)\n s = np.array([1.0, 1.5]).astype(np.float32)\n bias = np.array([0, 1]).astype(np.float32)\n mean = np.array([0, 3]).astype(np.float32)\n var = np.array([1, 1.5]).astype(np.float32)\n y = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\n node = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n )\n\n # output size: (1, 2, 1, 3)\n expect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_example')\n\n # input size: (2, 3, 4, 5)\n x = np.random.randn(2, 3, 4, 5).astype(np.float32)\n s = np.random.randn(3).astype(np.float32)\n bias = np.random.randn(3).astype(np.float32)\n mean = np.random.randn(3).astype(np.float32)\n var = np.random.rand(3).astype(np.float32)\n epsilon = 1e-2\n y = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\n node = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n epsilon=epsilon,\n )\n\n # output size: (2, 3, 4, 5)\n expect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_epsilon')\n", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict\nimport inspect\nfrom textwrap import dedent\nfrom typing import Dict, Text, List, Tuple, Type, Sequence, Any\n\nimport numpy as np # type: ignore\nfrom six import add_metaclass\n\n\ndef process_snippet(op_name, name, export): # type: (Text, Text, Any) -> Tuple[Text, Text]\n snippet_name = name[len('export_'):] or op_name.lower()\n source_code = dedent(inspect.getsource(export))\n # remove the function signature line\n lines = source_code.splitlines()\n assert lines[0] == '@staticmethod'\n assert lines[1].startswith('def export')\n return snippet_name, dedent(\"\\n\".join(lines[2:]))\n\n\nSnippets = defaultdict(list) # type: Dict[Text, List[Tuple[Text, Text]]]\n\n\nclass _Exporter(type):\n exports = defaultdict(list) # type: Dict[Text, List[Tuple[Text, Text]]]\n\n def __init__(cls, name, bases, dct): # type: (str, Tuple[Type[Any], ...], Dict[str, Any]) -> None\n for k, v in dct.items():\n if k.startswith('export'):\n if not isinstance(v, staticmethod):\n raise ValueError(\n 'Only staticmethods could be named as export.*')\n export = getattr(cls, k)\n Snippets[name].append(process_snippet(name, k, export))\n # export functions should call expect and so populate\n # TestCases\n np.random.seed(seed=0)\n export()\n super(_Exporter, cls).__init__(name, bases, dct)\n\n\n@add_metaclass(_Exporter)\nclass Base(object):\n pass\n" ]
[ [ "numpy.array", "numpy.random.randn", "numpy.random.rand", "numpy.sqrt" ], [ "numpy.random.seed" ] ]
nghitrampham/air_pollution_death_rate_related
[ "3fd72b9684e8362de5706ba37c1d90b844d4afe0" ]
[ "air_pollution_death_rate_related/scripts/air_pollution/predict_aqi.py" ]
[ "\"\"\"\nThis module is used to predict the Air Quality Index model for 2019 for all counties.\n\"\"\"\nimport pickle\nimport warnings\n\nimport pandas as pd\nimport numpy as np\nfrom keras.models import load_model\n\nimport helpers\n\nwarnings.filterwarnings(\"ignore\")\n\ndef main():\n\n data2019_raw = pd.read_csv(\"\"\"air_pollution_death_rate_related/data/air_pollution/\n data_air_raw/daily_aqi_by_county_2019.csv\"\"\")\n data2019 = helpers.data_cleaning(data2019_raw)\n predicted_date = \"2019-03-12\"\n\n file = open(\"temp.csv\", \"w\")\n file.write(\"date,state_county,AQI\\n\")\n\n # for county in list(data2019[\"state_county\"].unique()):\n for county in list(data2019[\"state_county\"].unique())[:5]:\n\n ## load model to predict AQI\n print(\"---> Loading model for county {} ...\".format(county))\n\n try:\n scaler_path = (\"air_pollution_death_rate_related/trained_model/min_scaler_model/\" +\n county + \"_scaler.pickle\")\n\n model_path = (\"air_pollution_death_rate_related/trained_model/county_aqi/\" +\n county + \"_model.h5\")\n\n model = load_model(model_path)\n mm_scaler = pickle.load(open(scaler_path, \"rb\"))\n\n ### feature engineering for model\n data_feature_temp = helpers.data_feature_engineering_for_test(\n data2019,\n county,\n predicted_date)\n x_test, y_test = helpers.load_test_data(data_feature_temp[\"data\"], mm_scaler)\n\n ## predicting AQI\n predictions = helpers.predict_point_by_point(model, x_test)\n # helpers.plot_results(predictions, y_test)\n\n ## keep prediction for all counties\n print(\"Predicting ....\")\n y_pred = np.append(x_test, predictions.reshape(1, 1, 1)).reshape(1, 39)\n y_scale = mm_scaler.inverse_transform(y_pred)[-1][-1]\n\n file.write(predicted_date+\",\"+county+\",\"+str(y_scale)+\"\\n\")\n\n del data_feature_temp, scaler_path,\\\n model_path, model, mm_scaler, x_test, y_test, predictions, y_pred, y_scale\n\n except Exception as exp:\n print(exp)\n exp.args += ('Path and list_year must not be empty', \"check read_raw_data function\")\n\n file.close()\n\n ## creating dataframe containing county, state, predicted AQI,\n ## predicted date for interactive visualization map\n county_code = pd.read_csv(\"\"\"air_pollution_death_rate_related/data/air_pollution/\n data_misc/county_with_code.csv\"\"\")\n df_prediction = pd.read_csv(\"temp.csv\")\n\n df_result = (pd.merge(county_code, df_prediction,\n how='inner',\n left_on=[\"state_county\"],\n right_on=[\"state_county\"])\n )\n df_result.to_csv(\"predicted_AQI\" + predicted_date + \".csv\", index=False)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.read_csv", "pandas.merge" ] ]
positivevaib/semi-supervised-imagenet-classification
[ "4fb6427f5a72951c1b866a1ddbc2599811bb5770" ]
[ "deep-clustering-conv-autoencoder/main.py" ]
[ "# import\nimport numpy as np\nimport sklearn as skl\nimport sklearn.cluster as cluster\nimport sklearn.metrics as metrics\nimport torch\nimport torch.distributions.kl as kl\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport tqdm\n\n\n# model\nclass CAE_ENC(nn.Module):\n def __init__(self):\n super().__init__()\n # self.enc = nn.Sequential(*list(model.features.children())[:-5])\n self.conv1 = nn.Conv2d(3, 32, kernel_size=5, padding=2, stride=2)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=2)\n self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=2)\n self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2)\n self.fc1 = nn.Linear(256 * 6 * 6, 1000)\n\n def forward(self, x):\n # x = self.features(x)\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = F.relu(self.conv4(x))\n x = x.view(-1, 256 * 6 * 6)\n x = self.fc1(x)\n return x\n\n\nclass CAE_DEC(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc2 = nn.Linear(1000, 256 * 6 * 6)\n self.deconv1 = nn.ConvTranspose2d(256, 128, 2, stride=2)\n self.deconv2 = nn.ConvTranspose2d(128, 64, 2, stride=2)\n self.deconv3 = nn.ConvTranspose2d(64, 32, 2, stride=2)\n self.deconv4 = nn.ConvTranspose2d(32, 3, 2, stride=2)\n self.conv5 = nn.Conv2d(3, 3, kernel_size=1) # might have to remove\n\n def forward(self, x):\n x = F.relu(self.fc2(x))\n x = x.view(128, 256, 6, 6)\n x = F.relu(self.deconv1(x))\n x = F.relu(self.deconv2(x))\n x = F.relu(self.deconv3(x))\n x = F.relu(self.deconv4(x))\n x = torch.sigmoid(self.conv5(x)) # might have to remove\n return x\n\n\nclass ClusteringLayer(nn.Module):\n def __init__(self, weights=None, alpha=1.0):\n super().__init__()\n if weights:\n self.weights = weights\n else:\n self.weights = torch.empty(1000, 1000)\n nn.init.xavier_uniform_(self.weights)\n self.alpha = alpha\n\n def forward(self, x):\n q = 1.0 / (1.0 + (torch.sum(\n (x.unsqueeze(1) - self.weights)**2, dim=2) / self.alpha))\n q **= (self.alpha + 1.0) / 2.0\n q = torch.transpose(\n torch.transpose(q, 1, 2) / torch.sum(q, dim=1), 1, 2)\n return q\n\n\ndef set_weights(module, weights):\n if isinstance(module, ClusteringLayer):\n module.weights = weights\n\n\nclass CAE(nn.Module):\n def __init__(self):\n super().__init__()\n self.enc = CAE_ENC()\n self.dec = CAE_DEC()\n self.clus = ClusteringLayer()\n\n def forward(self, x):\n h = self.enc(x)\n q = self.clus(h)\n o = self.dec(h)\n return (h, q, o)\n\n\ndef loss(q, p, o, gamma=0.1):\n mse = nn.MSELoss(o)\n kld = gamma * kl.kl_divergence(p, q)\n l = mse + kld\n return l\n\n\ndef target_distribution(q):\n weight = q**2 / torch.sum(q, dim=0)\n return torch.transpose(torch.transpose(q) / torch.sum(weight, dim=1))\n\n\n# data\ntransformations = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225),\n inplace=True)\n])\ndataset1 = datasets.ImageFolder('/beegfs/vag273/ssl_data_96/supervised/train/',\n transform=transformations)\ndataset2 = datasets.ImageFolder('/beegfs/vag273/ssl_data_96/unsupervised/',\n transform=transformations)\ndataset = data.ConcatDataset((dataset1, dataset2))\n\ntrain_ratio = 0.9\ntrain_set_size = int(train_ratio * len(dataset))\nval_set_size = len(dataset) - train_set_size\n\ntrain_data, val_data = data.random_split(dataset,\n (train_set_size, val_set_size))\n\ntrain_loader = data.DataLoader(train_data, batch_size=128, shuffle=True)\nval_loader = data.DataLoader(val_data, batch_size=128, shuffle=False)\n\n# training\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\nmodel = CAE().to(device)\n# criterion = nn.MSELoss()\noptimizer = optim.Adam(model.parameters())\n\n# pretrain\nbest_val_loss = float('inf')\ntot_epochs = 200 # maybe lower it on one of the runs\nprint('pretrain')\nfor epoch in range(tot_epochs):\n model.train()\n\n print('epoch {} of {}'.format(epoch + 1, tot_epochs))\n\n desc = \"ITERATION - loss: {:.2f}\"\n pbar = tqdm.tqdm(desc=desc.format(0),\n total=len(train_loader),\n leave=False,\n file=None,\n initial=0)\n\n running_loss = 0\n for batch_idx, data in enumerate(train_loader):\n img, _ = data\n img = img.to(device)\n\n optimizer.zero_grad()\n\n _, _, out = model(img)\n loss = nn.MSELoss(out, img)\n\n running_loss += loss.item()\n\n loss.backward()\n optimizer.step()\n\n pbar.desc = desc.format(loss.item())\n pbar.update()\n\n print('loss: {}'.format(running_loss / len(train_loader)))\n\n model.eval()\n with torch.no_grad():\n val_running_loss = 0\n for val_batch_idx, val_data in enumerate(val_loader):\n val_img, _ = val_data\n val_img = val_img.to(device)\n\n _, _, val_out = model(val_img)\n val_loss = nn.MSELoss(val_out, val_img)\n\n val_running_loss += val_loss.item()\n\n if val_running_loss / len(val_loader) < best_val_loss:\n torch.save(model.state_dict(), 'weights.pth')\n\n print('val loss: {}'.format(val_running_loss / len(val_loader)))\n\n pbar.close()\n\n# first cluster\nfeatures = None\nfor batch_idx, data in enumerate(train_loader):\n img, _ = data\n img = img.to(device)\n\n if not features:\n features = model(img)\n else:\n torch.cat((features, model(img)), 0)\n\nkmeans = cluster.kMeans(n_clusters=1000, n_init=20)\nfeatures = features.view(-1)\npred_last = kmeans.fit_predict(features)\nq = kmeans.cluster_centers_\n\n# deep cluster\nprint('deep cklustering')\nupdate_interval = 140 # maybe reduce this for sake of time\nmaxiter = 20000 # maybe reduce this for sake of time\nfor ite in range(int(maxiter)):\n model.train()\n if ite % update_interval == 0:\n q = None\n for batch_idx, data in enumerate(train_loader):\n img, _ = data\n img = img.to(device)\n\n if not features:\n _, q, _ = model(img)\n else:\n _, new_q, _ = model(img)\n torch.cat((q, new_q), 0)\n p = target_distribution(\n q) # update the auxiliary target distribution p\n\n # evaluate the clustering performance\n pred = q.argmax(1)\n\n # check stop criterion\n delta_label = np.sum(pred != pred_last).astype(\n np.float32) / pred.shape[0]\n pred_last = np.copy(pred)\n if ite > 0 and delta_label < 0.001: # 0.001 is the tolerance\n print('delta_label ', delta_label, '< tol ', 0.001) # tol\n print('Reached tolerance threshold. Stopping training.')\n break\n\n print('epoch {} of {}'.format(epoch + 1, tot_epochs))\n\n desc = \"ITERATION - loss: {:.2f}\"\n pbar = tqdm.tqdm(desc=desc.format(0),\n total=len(train_loader),\n leave=False,\n file=None,\n initial=0)\n\n running_loss = 0\n for batch_idx, data in enumerate(train_loader):\n img, _ = data\n img = img.to(device)\n\n optimizer.zero_grad()\n\n _, q, out = model(img)\n loss = loss(q,\n p[batch_idx * 128:batch_idx * 128 + 128, :],\n out,\n gamma=0.1)\n\n running_loss += loss.item()\n\n loss.backward()\n optimizer.step()\n\n pbar.desc = desc.format(loss.item())\n pbar.update()\n\n print('loss: {}'.format(running_loss / len(train_loader)))\n\n model.eval()\n with torch.no_grad():\n val_running_loss = 0\n for val_batch_idx, val_data in enumerate(val_loader):\n val_img, _ = val_data\n val_img = val_img.to(device)\n\n _, val_q, val_out = model(val_img)\n val_loss = loss(val_q,\n p[val_batch_idx * 128:val_batch_idx * 128 +\n 128, :],\n val_out,\n gamma=0.1)\n\n val_running_loss += val_loss.item()\n\n if val_running_loss / len(val_loader) < best_val_loss:\n torch.save(model.state_dict(), 'overall_weights.pth')\n\n print('val loss: {}'.format(val_running_loss / len(val_loader)))\n\n pbar.close()\n" ]
[ [ "torch.sum", "torch.utils.data.DataLoader", "torch.empty", "torch.nn.init.xavier_uniform_", "torch.nn.Linear", "torch.nn.MSELoss", "torch.transpose", "torch.utils.data.ConcatDataset", "numpy.sum", "torch.distributions.kl.kl_divergence", "torch.no_grad", "numpy.copy", "torch.cuda.is_available", "torch.utils.data.random_split", "torch.nn.Conv2d", "sklearn.cluster.kMeans", "torch.cat", "torch.nn.ConvTranspose2d" ] ]
yhl111/PCNN
[ "2e0967aec962d55df1eb7d149a44b91c6c751a1a" ]
[ "model/config.py" ]
[ "import os\nimport numpy as np\n\nfrom .general_utils import get_logger\nfrom .data_utils import load_vocab, get_processing_word\n\nclass Config():\n def __init__(self, load=True):\n \"\"\"Initialize hyperparameters and load vocabs\n\n Args:\n load_embeddings: (bool) if True, load embeddings into\n np array, else None\n\n \"\"\"\n # directory for training outputs\n if not os.path.exists(self.dir_output):\n os.makedirs(self.dir_output)\n\n # create instance of logger\n self.logger = get_logger(self.path_log)\n\n # load if requested (default)\n if load:\n self.load()\n\n\n def load(self):\n \"\"\"Loads vocabulary, processing functions and embeddings\n\n Supposes that build_data.py has been run successfully and that\n the corresponding files have been created (vocab and trimmed\n vectors)\n\n \"\"\"\n # 1. vocabulary\n self.vocab_words = load_vocab(self.filename_words)\n self.vocab_relations = load_vocab(self.filename_relation)\n\n self.nwords = len(self.vocab_words)\n self.nrelations = len(self.vocab_relations)\n\n # 2. get processing functions that map str -> id\n self.processing_word = get_processing_word(self.vocab_words, UNK = \"<UNK>\")\n self.processing_relation = get_processing_word(self.vocab_relations, UNK='NA')\n\n # 3. get pre-trained embeddings\n self.embeddings = (np.load(self.filename_embeddings)['vec']\n if self.use_pretrained else None)\n\n\n # general config\n dir_output = \"./results/test/\"\n graph_output = \"./graph\"\n dir_model = dir_output + \"model.weights/\" # directory to save models\n path_log = dir_output + \"log.txt\"\n restore_model = \"./results/test/model.weights/early_best.ckpt\"\n\n # embeddings\n dim_word = 50\n dim_pos = 5\n dim = dim_word + 2*dim_pos\n\n # position range in sentence\n nposition = 500\n\n # convolution\n window_size = 3\n feature_maps = 230\n\n filename_train_origin = \"./data/origin_data/train.txt\"\n filename_train = \"./data/processed_data/train.txt\"\n filename_train_wrong = \"./data/processed_data/wrong_parse_train.txt\"\n\n filename_dev = \"./data/processed_data/test.txt\"\n\n filename_test_origin = \"./data/origin_data/test.txt\"\n filename_test = \"./data/processed_data/test.txt\"\n filename_test_wrong = \"./data/processed_data/wrong_parse_test.txt\"\n\n max_iter = None # if not None, max number of examples in Dataset\n\n # vocab (created from dataset with build_data.py)\n filename_words = \"./data/processed_data/words.txt\"\n filename_embeddings = \"./data/processed_data/vectors.npz\"\n\n filename_relation_origin = \"./data/origin_data/relation2id.txt\"\n filename_relation = \"./data/processed_data/relation.txt\"\n\n # word vectors file\n filename_wordvectors = \"./data/origin_data/vec.txt\"\n\n use_pretrained = True\n\n MIL = False # if True, using multi-instances learning\n shuffle = False # if True, shuffle train dataset\n max_iter = None # if not None, max number of examples in Dataset\n\n # training\n train_word_embeddings = False\n train_pos_embeddings = True\n nepochs = 15\n dropout = 0.5\n batch_size = 50\n lr_method = \"adadelta\"\n lr = 0.001\n lr_decay = 0.9\n clip = -1 # if negative, no clipping\n nepoch_no_imprv = 3\n early_stop = True\n max_train_step = 100000\n" ]
[ [ "numpy.load" ] ]
thompson318/scikit-surgerycore
[ "22867073a5a3e87def68b4a76e70fe54d085be32" ]
[ "tests/algorithms/test_tracking_smoothing.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Tests for BARD pointer module\"\"\"\nimport math\nimport numpy as np\nimport pytest\nimport sksurgerycore.algorithms.tracking_smoothing as reg\n\n\ndef test_rvec_to_quaterion():\n \"\"\"\n Does it convert correctly\n \"\"\"\n\n #a 90 degree rotation about the x axis\n rvec = np.array([math.pi/2.0, 0.0, 0.0])\n\n quaternion = reg._rvec_to_quaternion(rvec) # pylint: disable=protected-access\n\n assert quaternion[0] == math.cos(math.pi/4.0)\n assert quaternion[1] == 1.0 * math.sin(math.pi/4.0)\n assert quaternion[2] == 0.0\n assert quaternion[3] == 0.0\n\n\ndef test_quaterion_to_matrix():\n \"\"\"\n Test conversion on a 90 degree rotation about y axis.\n \"\"\"\n quaternion = np.array([math.cos(math.pi/4.0), 0.0,\n 1.0 * math.sin(math.pi/4.0), 0.0])\n\n rot_mat = reg.quaternion_to_matrix(quaternion)\n\n rot_mat1 = np.eye(3, dtype=np.float64)\n\n rot_mat1[0, 0] = 0.0\n rot_mat1[0, 2] = 1.0\n rot_mat1[2, 0] = -1.0\n rot_mat1[2, 2] = 0.0\n\n assert np.allclose(rot_mat, rot_mat1, rtol=1e-05, atol=1e-10)\n\ndef test_rolling_mean_no_buffer():\n \"\"\"\n Try doing a rolling mean with zero buffer.\n \"\"\"\n with pytest.raises(ValueError):\n _ = reg.RollingMean(vector_size=3, buffer_size=0)\n\n\n\ndef test_rolling_mean_returns_nan():\n \"\"\"\n Tests for rolling mean class.\n \"\"\"\n\n mean_buffer = reg.RollingMean(vector_size=3, buffer_size=5)\n\n assert np.isnan(mean_buffer.getmean()).all\n\ndef test_rolling_mean_single_value():\n \"\"\"\n Test rolling mean returns vector value for single entry\n \"\"\"\n vector = [5.4, 1.2, 3.4]\n\n mean_buffer = reg.RollingMean(vector_size=3, buffer_size=5)\n\n mean_buffer.pop(vector)\n\n assert np.allclose(vector, mean_buffer.getmean(), rtol=1e-05, atol=1e-10)\n\ndef test_rolling_mean_four_values():\n \"\"\"\n Test rolling mean returns vector value for single entry\n \"\"\"\n vector0 = [5.4, 1.2, 3.4]\n vector1 = [7.4, -1.2, -1.4]\n vector2 = [-2.6, 4.2, 2.6]\n vector3 = [9.0, 3.3, 3.6]\n\n expected_answer0 = [3.4, 1.4, 1.533333]\n expected_answer1 = [4.6, 2.1, 1.6]\n\n mean_buffer = reg.RollingMean(vector_size=3, buffer_size=3)\n mean_buffer.pop(vector0)\n mean_buffer.pop(vector1)\n mean_buffer.pop(vector2)\n assert np.allclose(expected_answer0, mean_buffer.getmean(), rtol=1e-05,\n atol=1e-6)\n\n mean_buffer.pop(vector3)\n\n assert np.allclose(expected_answer1, mean_buffer.getmean(), rtol=1e-05,\n atol=1e-10)\n\n\ndef test_rolling_rotation_no_buffer():\n \"\"\"\n Try doing a rolling rotation mean with zero buffer.\n \"\"\"\n with pytest.raises(ValueError):\n _ = reg.RollingMeanRotation(buffer_size=0)\n\n\ndef test_rolling_rot_returns_nan():\n \"\"\"\n Tests for rolling mean rotation class.\n \"\"\"\n\n mean_buffer = reg.RollingMeanRotation(buffer_size=5)\n\n assert np.isnan(mean_buffer.getmean()).all\n\n\ndef test_rolling_rot_single_value():\n \"\"\"\n Test rolling mean rotation returns vector value for single entry\n \"\"\"\n\n rvec = np.array([0.0, -math.pi/2.0, 0.0])\n expected_quaternion = np.array([math.cos(math.pi/4.0), 0.0,\n -1.0 * math.sin(math.pi/4.0), 0.0])\n\n mean_buffer = reg.RollingMeanRotation(buffer_size=5)\n\n mean_buffer.pop(rvec)\n\n assert np.allclose(expected_quaternion, mean_buffer.getmean(),\n rtol=1e-05, atol=1e-10)\n\n\ndef test_r_rot_sgl_value_sgl_buff():\n \"\"\"\n Test rolling mean rotation returns vector value for single entry\n \"\"\"\n\n rvec = np.array([0.0, 0.0, -math.pi/2.0])\n expected_quaternion = np.array([math.cos(math.pi/4.0), 0.0, 0.0,\n -1.0 * math.sin(math.pi/4.0)])\n\n mean_buffer = reg.RollingMeanRotation(buffer_size=1)\n\n mean_buffer.pop(rvec)\n\n assert np.allclose(expected_quaternion, mean_buffer.getmean(),\n rtol=1e-05, atol=1e-10)\n\n\ndef test_rolling_rot_four_values():\n \"\"\"\n Test rolling mean returns vector value for single entry\n \"\"\"\n rvec0 = [0.0, 0.0, 0.0]\n rvec1 = [np.NaN, np.NaN, np.NaN]\n rvec2 = [0.0, 0.0, -math.pi/2.0]\n rvec3 = [0.0, math.pi/3.0, 0.0]\n\n expected_answer0 = reg._rvec_to_quaternion([0.0, 0.0, -math.pi/4.0]) # pylint: disable=protected-access\n #the next ones more of a regression test, I haven't independently\n #calculated this answer.\n expected_answer1 = [-0.87602709, 0.0, -0.27843404, 0.39376519]\n\n mean_buffer = reg.RollingMeanRotation(buffer_size=3)\n mean_buffer.pop(rvec0)\n mean_buffer.pop(rvec1)\n mean_buffer.pop(rvec2)\n assert np.allclose(expected_answer0, mean_buffer.getmean(), rtol=1e-05,\n atol=1e-6)\n\n mean_buffer.pop(rvec3)\n\n assert np.allclose(expected_answer1, mean_buffer.getmean(), rtol=1e-05,\n atol=1e-10)\n" ]
[ [ "numpy.array", "numpy.eye", "numpy.allclose" ] ]
bourov/probability
[ "1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2", "1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2", "1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2", "1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2", "1e4053a0938b4773c3425bcbb07b3f1e5d50c7e2" ]
[ "tensorflow_probability/python/distributions/deterministic.py", "tensorflow_probability/python/internal/backend/numpy/debugging.py", "tensorflow_probability/python/internal/backend/numpy/gen/linear_operator_diag.py", "tensorflow_probability/examples/grammar_vae.py", "tensorflow_probability/python/distributions/sample_test.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Deterministic distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\n# Dependency imports\nimport six\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import kullback_leibler\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n__all__ = [\n 'Deterministic',\n 'VectorDeterministic',\n]\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass _BaseDeterministic(distribution.Distribution):\n \"\"\"Base class for Deterministic distributions.\"\"\"\n\n def __init__(self,\n loc,\n atol=None,\n rtol=None,\n is_vector=False,\n validate_args=False,\n allow_nan_stats=True,\n parameters=None,\n name='_BaseDeterministic'):\n \"\"\"Initialize a batch of `_BaseDeterministic` distributions.\n\n The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`\n computations, e.g. due to floating-point error.\n\n ```\n pmf(x; loc)\n = 1, if Abs(x - loc) <= atol + rtol * Abs(loc),\n = 0, otherwise.\n ```\n\n Args:\n loc: Numeric `Tensor`. The point (or batch of points) on which this\n distribution is supported.\n atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The absolute tolerance for comparing closeness to `loc`.\n Default is `0`.\n rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The relative tolerance for comparing closeness to `loc`.\n Default is `0`.\n is_vector: Python `bool`. If `True`, this is for `VectorDeterministic`,\n else `Deterministic`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value '`NaN`' to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n parameters: Dict of locals to facilitate copy construction.\n name: Python `str` name prefixed to Ops created by this class.\n\n Raises:\n ValueError: If `loc` is a scalar.\n \"\"\"\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([loc, atol, rtol], dtype_hint=tf.float32)\n self._loc = tensor_util.convert_nonref_to_tensor(\n loc, dtype_hint=dtype, name='loc')\n self._atol = tensor_util.convert_nonref_to_tensor(\n 0 if atol is None else atol, dtype=dtype, name='atol')\n self._rtol = tensor_util.convert_nonref_to_tensor(\n 0 if rtol is None else rtol, dtype=dtype, name='rtol')\n self._is_vector = is_vector\n\n super(_BaseDeterministic, self).__init__(\n dtype=self._loc.dtype,\n reparameterization_type=(\n reparameterization.FULLY_REPARAMETERIZED\n if dtype_util.is_floating(self._loc.dtype)\n else reparameterization.NOT_REPARAMETERIZED),\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n def _slack(self, loc):\n # Avoid using the large broadcast with self.loc if possible.\n if self.parameters['rtol'] is None:\n return self.atol\n else:\n return self.atol + self.rtol * tf.abs(loc)\n\n @property\n def loc(self):\n \"\"\"Point (or batch of points) at which this distribution is supported.\"\"\"\n return self._loc\n\n @property\n def atol(self):\n \"\"\"Absolute tolerance for comparing points to `self.loc`.\"\"\"\n return self._atol\n\n @property\n def rtol(self):\n \"\"\"Relative tolerance for comparing points to `self.loc`.\"\"\"\n return self._rtol\n\n def _entropy(self):\n return tf.zeros(self.batch_shape_tensor(), dtype=self.dtype)\n\n def _mean(self):\n return tf.identity(self.loc)\n\n def _variance(self):\n return tf.zeros_like(self.loc)\n\n def _mode(self):\n return self.mean()\n\n def _sample_n(self, n, seed=None):\n del seed # unused\n loc = tf.convert_to_tensor(self.loc)\n return tf.broadcast_to(\n loc,\n tf.concat([[n], self._batch_shape_tensor(loc=loc),\n self._event_shape_tensor(loc=loc)],\n axis=0))\n\n def _default_event_space_bijector(self):\n return\n\n def _parameter_control_dependencies(self, is_init):\n assertions = []\n\n # In init, we can always build shape and dtype checks because\n # we assume shape doesn't change for Variable backed args.\n if is_init and self._is_vector:\n msg = 'Argument `loc` must be at least rank 1.'\n if tensorshape_util.rank(self.loc.shape) is not None:\n if tensorshape_util.rank(self.loc.shape) < 1:\n raise ValueError(msg)\n elif self.validate_args:\n assertions.append(\n assert_util.assert_rank_at_least(self.loc, 1, message=msg))\n\n if not self.validate_args:\n assert not assertions # Should never happen\n return []\n\n if is_init != tensor_util.is_ref(self.atol):\n assertions.append(\n assert_util.assert_non_negative(\n self.atol, message='Argument \"atol\" must be non-negative'))\n if is_init != tensor_util.is_ref(self.rtol):\n assertions.append(\n assert_util.assert_non_negative(\n self.rtol, message='Argument \"rtol\" must be non-negative'))\n return assertions\n\n\nclass Deterministic(_BaseDeterministic):\n \"\"\"Scalar `Deterministic` distribution on the real line.\n\n The scalar `Deterministic` distribution is parameterized by a [batch] point\n `loc` on the real line. The distribution is supported at this point only,\n and corresponds to a random variable that is constant, equal to `loc`.\n\n See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).\n\n #### Mathematical Details\n\n The probability mass function (pmf) and cumulative distribution function (cdf)\n are\n\n ```none\n pmf(x; loc) = 1, if x == loc, else 0\n cdf(x; loc) = 1, if x >= loc, else 0\n ```\n\n #### Examples\n\n ```python\n # Initialize a single Deterministic supported at zero.\n constant = tfp.distributions.Deterministic(0.)\n constant.prob(0.)\n ==> 1.\n constant.prob(2.)\n ==> 0.\n\n # Initialize a [2, 2] batch of scalar constants.\n loc = [[0., 1.], [2., 3.]]\n x = [[0., 1.1], [1.99, 3.]]\n constant = tfp.distributions.Deterministic(loc)\n constant.prob(x)\n ==> [[1., 0.], [0., 1.]]\n ```\n\n \"\"\"\n\n def __init__(self,\n loc,\n atol=None,\n rtol=None,\n validate_args=False,\n allow_nan_stats=True,\n name='Deterministic'):\n \"\"\"Initialize a scalar `Deterministic` distribution.\n\n The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`\n computations, e.g. due to floating-point error.\n\n ```\n pmf(x; loc)\n = 1, if Abs(x - loc) <= atol + rtol * Abs(loc),\n = 0, otherwise.\n ```\n\n Args:\n loc: Numeric `Tensor` of shape `[B1, ..., Bb]`, with `b >= 0`.\n The point (or batch of points) on which this distribution is supported.\n atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The absolute tolerance for comparing closeness to `loc`.\n Default is `0`.\n rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The relative tolerance for comparing closeness to `loc`.\n Default is `0`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value '`NaN`' to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n super(Deterministic, self).__init__(\n loc,\n atol=atol,\n rtol=rtol,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _params_event_ndims(cls):\n return dict(loc=0, atol=0, rtol=0)\n\n def _batch_shape_tensor(self, loc=None):\n return tf.broadcast_dynamic_shape(\n tf.shape(self.loc if loc is None else loc),\n tf.broadcast_dynamic_shape(tf.shape(self.atol), tf.shape(self.rtol)))\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(\n self.loc.shape,\n tf.broadcast_static_shape(self.atol.shape, self.rtol.shape))\n\n def _event_shape_tensor(self, loc=None):\n del loc\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _prob(self, x):\n loc = tf.convert_to_tensor(self.loc)\n # Enforces dtype of probability to be float, when self.dtype is not.\n prob_dtype = self.dtype if dtype_util.is_floating(\n self.dtype) else tf.float32\n return tf.cast(tf.abs(x - loc) <= self._slack(loc), dtype=prob_dtype)\n\n def _cdf(self, x):\n loc = tf.identity(self.loc)\n return tf.cast(x >= loc - self._slack(loc), dtype=self.dtype)\n\n\nclass VectorDeterministic(_BaseDeterministic):\n \"\"\"Vector `Deterministic` distribution on `R^k`.\n\n The `VectorDeterministic` distribution is parameterized by a [batch] point\n `loc in R^k`. The distribution is supported at this point only,\n and corresponds to a random variable that is constant, equal to `loc`.\n\n See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).\n\n #### Mathematical Details\n\n The probability mass function (pmf) is\n\n ```none\n pmf(x; loc)\n = 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],\n = 0, otherwise.\n ```\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n\n # Initialize a single VectorDeterministic supported at [0., 2.] in R^2.\n constant = tfd.Deterministic([0., 2.])\n constant.prob([0., 2.])\n ==> 1.\n constant.prob([0., 3.])\n ==> 0.\n\n # Initialize a [3] batch of constants on R^2.\n loc = [[0., 1.], [2., 3.], [4., 5.]]\n constant = tfd.VectorDeterministic(loc)\n constant.prob([[0., 1.], [1.9, 3.], [3.99, 5.]])\n ==> [1., 0., 0.]\n ```\n\n \"\"\"\n\n def __init__(self,\n loc,\n atol=None,\n rtol=None,\n validate_args=False,\n allow_nan_stats=True,\n name='VectorDeterministic'):\n \"\"\"Initialize a `VectorDeterministic` distribution on `R^k`, for `k >= 0`.\n\n Note that there is only one point in `R^0`, the 'point' `[]`. So if `k = 0`\n then `self.prob([]) == 1`.\n\n The `atol` and `rtol` parameters allow for some slack in `pmf`\n computations, e.g. due to floating-point error.\n\n ```\n pmf(x; loc)\n = 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],\n = 0, otherwise\n ```\n\n Args:\n loc: Numeric `Tensor` of shape `[B1, ..., Bb, k]`, with `b >= 0`, `k >= 0`\n The point (or batch of points) on which this distribution is supported.\n atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The absolute tolerance for comparing closeness to `loc`.\n Default is `0`.\n rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable\n shape. The relative tolerance for comparing closeness to `loc`.\n Default is `0`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value '`NaN`' to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n super(VectorDeterministic, self).__init__(\n loc,\n atol=atol,\n rtol=rtol,\n is_vector=True,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n @classmethod\n def _params_event_ndims(cls):\n return dict(loc=1, atol=1, rtol=1)\n\n def _batch_shape_tensor(self, loc=None):\n return tf.broadcast_dynamic_shape(\n tf.shape(self.loc if loc is None else loc),\n tf.broadcast_dynamic_shape(tf.shape(self.atol),\n tf.shape(self.rtol)))[:-1]\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(\n self.loc.shape,\n tf.broadcast_static_shape(self.atol.shape, self.rtol.shape))[:-1]\n\n def _event_shape_tensor(self, loc=None):\n return tf.shape(self.loc if loc is None else loc)[-1:]\n\n def _event_shape(self):\n return self.loc.shape[-1:]\n\n def _prob(self, x):\n loc = tf.convert_to_tensor(self.loc)\n return tf.cast(\n tf.reduce_all(tf.abs(x - loc) <= self._slack(loc), axis=-1),\n dtype=self.dtype)\n\n def _sample_control_dependencies(self, x):\n assertions = []\n if not self.validate_args:\n return assertions\n assertions.append(assert_util.assert_rank_at_least(x, 1))\n assertions.append(assert_util.assert_equal(\n self.event_shape_tensor(), tf.gather(tf.shape(x), tf.rank(x) - 1),\n message=('Argument `x` not defined in the same space '\n 'R**k as this distribution')))\n return assertions\n\n\n@kullback_leibler.RegisterKL(_BaseDeterministic, distribution.Distribution)\ndef _kl_deterministic_distribution(a, b, name=None):\n \"\"\"Calculate the batched KL divergence `KL(a || b)` with `a` Deterministic.\n\n Args:\n a: instance of a Deterministic distribution object.\n b: instance of a Distribution distribution object.\n name: (optional) Name to use for created operations. Default is\n 'kl_deterministic_distribution'.\n\n Returns:\n Batchwise `KL(a || b)`.\n \"\"\"\n with tf.name_scope(name or 'kl_deterministic_distribution'):\n return -b.log_prob(a.loc)\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Experimental Numpy backend.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\n\nfrom tensorflow_probability.python.internal.backend.numpy import _utils as utils\nfrom tensorflow_probability.python.internal.backend.numpy.ops import convert_to_tensor\nfrom tensorflow_probability.python.internal.backend.numpy.ops import is_tensor\nfrom tensorflow_probability.python.internal.backend.numpy.ops import Tensor\n\n\n__all__ = [\n 'Assert',\n 'assert_equal',\n 'assert_greater',\n 'assert_greater_equal',\n 'assert_integer',\n 'assert_less',\n 'assert_less_equal',\n 'assert_near',\n 'assert_negative',\n 'assert_non_negative',\n 'assert_non_positive',\n 'assert_none_equal',\n 'assert_positive',\n 'assert_proper_iterable',\n 'assert_rank',\n 'assert_rank_at_least',\n 'assert_rank_in',\n 'assert_scalar',\n 'check_numerics',\n]\n\nJAX_MODE = False\n\n\ndef skip_assert_for_tracers(f):\n \"\"\"Function decorator that returns None if JAX tracers are detected.\"\"\"\n if not JAX_MODE:\n return f\n from jax import core as jax_core # pylint: disable=g-import-not-at-top\n def wrapped(*args, **kwargs):\n if any(isinstance(arg, jax_core.Tracer) for arg\n in args + tuple(kwargs.values())):\n print('skip assert ' + f.__name__)\n return None\n return f(*args, **kwargs)\n return wrapped\n\n\n@skip_assert_for_tracers\ndef _assert_binary(\n x, y, comparator, sym, summarize=None, message=None, name=None):\n del summarize\n del name\n x = convert_to_tensor(x)\n y = convert_to_tensor(y)\n if not np.all(comparator(x, y)):\n raise ValueError('Condition x {} y did not hold element-wise. {}'.format(\n sym, message or ''))\n\n\n@skip_assert_for_tracers\ndef _assert_equal(x, y, summarize=None, message=None, name=None):\n del summarize\n del name\n x = convert_to_tensor(x)\n y = convert_to_tensor(y)\n if not np.all(np.equal(x, y)):\n raise ValueError('Expected x == y but got {} vs {} {}'.format(\n x, y, message or ''))\n\n\ndef _assert_greater(x, y, summarize=None, message=None, name=None):\n return _assert_binary(\n x, y, np.greater, '>', summarize=summarize,\n message=message, name=name)\n\n\ndef _assert_less(x, y, summarize=None, message=None, name=None):\n return _assert_binary(\n x, y, np.less, '<', summarize=summarize,\n message=message, name=name)\n\n\ndef _assert_greater_equal(\n x, y, summarize=None, message=None, name=None):\n return _assert_binary(\n x, y, np.greater_equal, '>=', summarize=summarize,\n message=message, name=name)\n\n\ndef _assert_less_equal(\n x, y, summarize=None, message=None, name=None):\n return _assert_binary(\n x, y, np.less_equal, '<=', summarize=summarize,\n message=message, name=name)\n\n\n@skip_assert_for_tracers\ndef _assert_compare_to_zero(\n x, comparator, sym, summarize=None, message=None, name=None):\n del summarize\n del name\n x = convert_to_tensor(x)\n if not np.all(comparator(x, 0)):\n raise ValueError(\n 'Condition x {} 0 did not hold element-wise; got {} {}'.format(\n sym, x, message or ''))\n\n\ndef _assert_positive(x, summarize=None, message=None, name=None):\n return _assert_compare_to_zero(\n x, np.greater, '>', summarize=summarize, message=message, name=name)\n\n\ndef _assert_negative(x, summarize=None, message=None, name=None):\n return _assert_compare_to_zero(\n x, np.less, '<', summarize=summarize, message=message, name=name)\n\n\ndef _assert_non_negative(x, summarize=None, message=None, name=None):\n return _assert_compare_to_zero(\n x, np.greater_equal, '>=',\n summarize=summarize, message=message, name=name)\n\n\ndef _assert_non_positive(x, summarize=None, message=None, name=None):\n return _assert_compare_to_zero(\n x, np.less_equal, '<=', summarize=summarize, message=message, name=name)\n\n\ndef _assert_rank(x, rank, message=None, name=None): # pylint: disable=unused-argument\n return _assert_equal(x=len(np.shape(x)), y=rank, message=message)\n\n\ndef _assert_scalar(*_, **__): # pylint: disable=unused-argument\n pass\n\n\ndef _assert_integer(*_, **__): # pylint: disable=unused-argument\n pass\n\n\n@skip_assert_for_tracers\ndef _assert_near(x, y, rtol=None, atol=None,\n message=None, summarize=None, name=None): # pylint: disable=unused-argument\n \"\"\"Raises an error if abs(x - y) > atol + rtol * abs(y).\"\"\"\n del summarize\n del name\n x = convert_to_tensor(x)\n y = convert_to_tensor(y)\n rtol = rtol if rtol else 10 * np.finfo(x.dtype).eps\n atol = atol if atol else 10 * np.finfo(x.dtype).eps\n if np.any(np.abs(x - y) > atol + rtol * np.abs(y)):\n raise ValueError('x = {} and y = {} are not equal to tolerance rtol = {}, '\n 'atol = {} {}'.format(x, y, rtol, atol, message or ''))\n\n\n@skip_assert_for_tracers\ndef _assert_none_equal(x, y, summarize=None, message=None, name=None):\n del summarize\n del name\n x = convert_to_tensor(x)\n y = convert_to_tensor(y)\n if np.any(np.equal(x, y)):\n raise ValueError('Expected x != y but got {} vs {} {}'.format(\n x, y, message or ''))\n\n\ndef _assert_proper_iterable(values):\n unintentional_iterables = (Tensor, np.ndarray, bytes, six.text_type)\n if isinstance(values, unintentional_iterables):\n raise TypeError(\n 'Expected argument \"values\" to be a \"proper\" iterable. Found: %s' %\n type(values))\n\n if not hasattr(values, '__iter__'):\n raise TypeError(\n 'Expected argument \"values\" to be iterable. Found: %s' % type(values))\n\n\ndef _assert_rank_at_least(x, rank, message=None, name=None):\n del name\n if len(x.shape) < rank:\n raise ValueError('Expected rank at least {} but got shape {} {}'.format(\n rank, x.shape, message or ''))\n\n\ndef _assert_rank_in(*_, **__): # pylint: disable=unused-argument\n pass\n\n\n# --- Begin Public Functions --------------------------------------------------\n\n\nAssert = utils.copy_docstring( # pylint: disable=invalid-name\n 'tf.debugging.Assert',\n lambda condition, data, summarize=None, name=None: None)\n\nassert_equal = utils.copy_docstring(\n 'tf.debugging.assert_equal',\n _assert_equal)\n\nassert_greater = utils.copy_docstring(\n 'tf.debugging.assert_greater',\n _assert_greater)\n\nassert_less = utils.copy_docstring(\n 'tf.debugging.assert_less',\n _assert_less)\n\nassert_rank = utils.copy_docstring(\n 'tf.debugging.assert_rank',\n _assert_rank)\n\nassert_scalar = utils.copy_docstring(\n 'tf.debugging.assert_scalar',\n _assert_scalar)\n\nassert_greater_equal = utils.copy_docstring(\n 'tf.debugging.assert_greater_equal',\n _assert_greater_equal)\n\nassert_integer = utils.copy_docstring(\n 'tf.debugging.assert_integer',\n _assert_integer)\n\nassert_less_equal = utils.copy_docstring(\n 'tf.debugging.assert_less_equal',\n _assert_less_equal)\n\nassert_near = utils.copy_docstring(\n 'tf.debugging.assert_near',\n _assert_near)\n\nassert_negative = utils.copy_docstring(\n 'tf.debugging.assert_negative',\n _assert_negative)\n\nassert_non_negative = utils.copy_docstring(\n 'tf.debugging.assert_non_negative',\n _assert_non_negative)\n\nassert_non_positive = utils.copy_docstring(\n 'tf.debugging.assert_non_positive',\n _assert_non_positive)\n\nassert_none_equal = utils.copy_docstring(\n 'tf.debugging.assert_none_equal',\n _assert_none_equal)\n\nassert_positive = utils.copy_docstring(\n 'tf.debugging.assert_positive',\n _assert_positive)\n\nassert_proper_iterable = utils.copy_docstring(\n 'tf.debugging.assert_proper_iterable',\n _assert_proper_iterable)\n\nassert_rank_at_least = utils.copy_docstring(\n 'tf.debugging.assert_rank_at_least',\n _assert_rank_at_least)\n\nassert_rank_in = utils.copy_docstring(\n 'tf.debugging.assert_rank_in',\n _assert_rank_in)\n\ncheck_numerics = utils.copy_docstring(\n 'tf.debugging.check_numerics',\n lambda x, *_, **__: x)\n\nis_numeric_tensor = utils.copy_docstring(\n 'tf.debugging.is_numeric_tensor',\n lambda x: is_tensor(x) and np.issubdtype(x.dtype, np.number))\n", "# Copyright 2020 The TensorFlow Probability Authors. All Rights Reserved.\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n# THIS FILE IS AUTO-GENERATED BY `gen_linear_operators.py`.\n# DO NOT MODIFY DIRECTLY.\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n# pylint: disable=g-import-not-at-top\n# pylint: disable=g-direct-tensorflow-import\n# pylint: disable=g-bad-import-order\n# pylint: disable=unused-import\n# pylint: disable=line-too-long\n# pylint: disable=reimported\n# pylint: disable=g-bool-id-comparison\n# pylint: disable=g-statement-before-imports\n# pylint: disable=bad-continuation\n# pylint: disable=useless-import-alias\n# pylint: disable=property-with-parameters\n# pylint: disable=trailing-whitespace\n\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"`LinearOperator` acting like a diagonal matrix.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# [internal] enable type annotations\nfrom __future__ import print_function\n\nfrom tensorflow_probability.python.internal.backend.numpy import ops\nfrom tensorflow_probability.python.internal.backend.numpy import numpy_array as array_ops\nfrom tensorflow_probability.python.internal.backend.numpy import debugging as check_ops\nfrom tensorflow_probability.python.internal.backend.numpy import numpy_math as math_ops\nfrom tensorflow_probability.python.internal.backend.numpy import linalg_impl as linalg\nfrom tensorflow_probability.python.internal.backend.numpy.gen import linear_operator\nfrom tensorflow_probability.python.internal.backend.numpy.gen import linear_operator_util\n# from tensorflow.python.util.tf_export import tf_export\n\n__all__ = [\"LinearOperatorDiag\",]\n\n\n# @tf_export(\"linalg.LinearOperatorDiag\")\nclass LinearOperatorDiag(linear_operator.LinearOperator):\n \"\"\"`LinearOperator` acting like a [batch] square diagonal matrix.\n\n This operator acts like a [batch] diagonal matrix `A` with shape\n `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a\n batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is\n an `N x N` matrix. This matrix `A` is not materialized, but for\n purposes of broadcasting this shape will be relevant.\n\n `LinearOperatorDiag` is initialized with a (batch) vector.\n\n ```python\n # Create a 2 x 2 diagonal linear operator.\n diag = [1., -1.]\n operator = LinearOperatorDiag(diag)\n\n operator.to_dense()\n ==> [[1., 0.]\n [0., -1.]]\n\n tensor_shape.TensorShape(operator.shape)\n ==> [2, 2]\n\n operator.log_abs_determinant()\n ==> scalar Tensor\n\n x = ... Shape [2, 4] Tensor\n operator.matmul(x)\n ==> Shape [2, 4] Tensor\n\n # Create a [2, 3] batch of 4 x 4 linear operators.\n diag = tf.random.normal(shape=[2, 3, 4])\n operator = LinearOperatorDiag(diag)\n\n # Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible\n # since the batch dimensions, [2, 1], are broadcast to\n # operator.batch_shape = [2, 3].\n y = tf.random.normal(shape=[2, 1, 4, 2])\n x = operator.solve(y)\n ==> operator.matmul(x) = y\n ```\n\n #### Shape compatibility\n\n This operator acts on [batch] matrix with compatible shape.\n `x` is a batch matrix with compatible shape for `matmul` and `solve` if\n\n ```\n tensor_shape.TensorShape(operator.shape) = [B1,...,Bb] + [N, N], with b >= 0\n tensor_shape.TensorShape(x.shape) = [C1,...,Cc] + [N, R],\n and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]\n ```\n\n #### Performance\n\n Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`,\n and `tensor_shape.TensorShape(x.shape) = [N, R]`. Then\n\n * `operator.matmul(x)` involves `N * R` multiplications.\n * `operator.solve(x)` involves `N` divisions and `N * R` multiplications.\n * `operator.determinant()` involves a size `N` `reduce_prod`.\n\n If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and\n `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.\n\n #### Matrix property hints\n\n This `LinearOperator` is initialized with boolean flags of the form `is_X`,\n for `X = non_singular, self_adjoint, positive_definite, square`.\n These have the following meaning:\n\n * If `is_X == True`, callers should expect the operator to have the\n property `X`. This is a promise that should be fulfilled, but is *not* a\n runtime assert. For example, finite floating point precision may result\n in these promises being violated.\n * If `is_X == False`, callers should expect the operator to not have `X`.\n * If `is_X == None` (the default), callers should have no expectation either\n way.\n \"\"\"\n\n def __init__(self,\n diag,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=\"LinearOperatorDiag\"):\n r\"\"\"Initialize a `LinearOperatorDiag`.\n\n Args:\n diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.\n The diagonal of the operator. Allowed dtypes: `float16`, `float32`,\n `float64`, `complex64`, `complex128`.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose. If `diag.dtype` is real, this is auto-set to `True`.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`.\n\n Raises:\n TypeError: If `diag.dtype` is not an allowed type.\n ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.\n \"\"\"\n\n with ops.name_scope(name, values=[diag]):\n self._diag = linear_operator_util.convert_nonref_to_tensor(\n diag, name=\"diag\")\n self._check_diag(self._diag)\n\n # Check and auto-set hints.\n if not np.issubdtype(self._diag.dtype, np.complexfloating):\n if is_self_adjoint is False:\n raise ValueError(\"A real diagonal operator is always self adjoint.\")\n else:\n is_self_adjoint = True\n\n if is_square is False:\n raise ValueError(\"Only square diagonal operators currently supported.\")\n is_square = True\n\n super(LinearOperatorDiag, self).__init__(\n dtype=self._diag.dtype,\n graph_parents=None,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n # TODO(b/143910018) Remove graph_parents in V3.\n self._set_graph_parents([self._diag])\n\n def _check_diag(self, diag):\n \"\"\"Static check of diag.\"\"\"\n if tensor_shape.TensorShape(diag.shape).ndims is not None and tensor_shape.TensorShape(diag.shape).ndims < 1:\n raise ValueError(\"Argument diag must have at least 1 dimension. \"\n \"Found: %s\" % diag)\n\n def _shape(self):\n # If d_shape = [5, 3], we return [5, 3, 3].\n d_shape = tensor_shape.TensorShape(self._diag.shape)\n return d_shape.concatenate(d_shape[-1:])\n\n def _shape_tensor(self):\n d_shape = array_ops.shape(self._diag)\n k = d_shape[-1]\n return array_ops.concat((d_shape, [k]), 0)\n\n @property\n def diag(self):\n return self._diag\n\n def _assert_non_singular(self):\n return linear_operator_util.assert_no_entries_with_modulus_zero(\n self._diag,\n message=\"Singular operator: Diagonal contained zero values.\")\n\n def _assert_positive_definite(self):\n if np.issubdtype(self.dtype, np.complexfloating):\n message = (\n \"Diagonal operator had diagonal entries with non-positive real part, \"\n \"thus was not positive definite.\")\n else:\n message = (\n \"Real diagonal operator had non-positive diagonal entries, \"\n \"thus was not positive definite.\")\n\n return check_ops.assert_positive(\n math_ops.real(self._diag),\n message=message)\n\n def _assert_self_adjoint(self):\n return linear_operator_util.assert_zero_imag_part(\n self._diag,\n message=(\n \"This diagonal operator contained non-zero imaginary values. \"\n \" Thus it was not self-adjoint.\"))\n\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n diag_term = math_ops.conj(self._diag) if adjoint else self._diag\n x = linalg.adjoint(x) if adjoint_arg else x\n diag_mat = array_ops.expand_dims(diag_term, -1)\n return diag_mat * x\n\n def _matvec(self, x, adjoint=False):\n diag_term = math_ops.conj(self._diag) if adjoint else self._diag\n return diag_term * x\n\n def _determinant(self):\n return math_ops.reduce_prod(self._diag, axis=[-1])\n\n def _log_abs_determinant(self):\n log_det = math_ops.reduce_sum(\n math_ops.log(math_ops.abs(self._diag)), axis=[-1])\n if np.issubdtype(self.dtype, np.complexfloating):\n log_det = _ops.cast(log_det, dtype=self.dtype)\n return log_det\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n diag_term = math_ops.conj(self._diag) if adjoint else self._diag\n rhs = linalg.adjoint(rhs) if adjoint_arg else rhs\n inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1)\n return rhs * inv_diag_mat\n\n def _to_dense(self):\n return _linalg.diag(self._diag)\n\n def _diag_part(self):\n return self.diag\n\n def _add_to_tensor(self, x):\n x_diag = _linalg.diag_part(x)\n new_diag = self._diag + x_diag\n return _linalg.set_diag(x, new_diag)\n\n def _eigvals(self):\n return ops.convert_to_tensor(self.diag)\n\n def _cond(self):\n abs_diag = math_ops.abs(self.diag)\n return (math_ops.reduce_max(abs_diag, axis=-1) /\n math_ops.reduce_min(abs_diag, axis=-1))\n\nimport numpy as np\nfrom tensorflow_probability.python.internal.backend.numpy import linalg_impl as _linalg\nfrom tensorflow_probability.python.internal.backend.numpy import ops as _ops\nfrom tensorflow_probability.python.internal.backend.numpy.gen import tensor_shape\n\nfrom tensorflow_probability.python.internal.backend.numpy import private\ndistribution_util = private.LazyLoader(\n \"distribution_util\", globals(),\n \"tensorflow_probability.python.internal._numpy.distribution_util\")\ntensorshape_util = private.LazyLoader(\n \"tensorshape_util\", globals(),\n \"tensorflow_probability.python.internal._numpy.tensorshape_util\")\n\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Trains a grammar variational auto-encoder on synthetic data.\n\nThe grammar variational auto-encoder (VAE) [1] posits a generative model over\nproductions from a context-free grammar, and it posits an amortized variational\napproximation for efficient posterior inference. We train the grammar VAE\non synthetic data using the grammar from [1] (Figure 1). Note for real data\nanalyses, one should implement a parser to convert examples into lists of\nproduction rules.\n\nThis example showcases eager execution in order to train a model where data\npoints have a variable number of time steps (that is, without padding). However,\nnote that handling a variable number of time steps requires a batch size of 1.\nIn this example, we assume data points arrive in a stream, one at a time. Such a\nsetting has an unbounded maximum length which prevents padding.\n\nSummaries are written under the flag `model_dir`. Point TensorBoard to that\ndirectory in order to monitor progress.\n\nExample output:\n\n```none\nRandom examples from synthetic data distribution:\n222N1N21c\n1c2N2C2C12C1N\nC11C12c\n2C\nNCC\n\nStep: 0 Loss: -13.724 (0.494 sec)\nStep: 500 Loss: -0.004 (145.741 sec)\nStep: 1000 Loss: -0.000 (292.205 sec)\nStep: 1500 Loss: -0.000 (438.819 sec)\n```\n\n#### References\n\n[1]: Matt J. Kusner, Brooks Paige, and Jose Miguel Hernandez-Lobato. Grammar\n Variational Autoencoder. In _International Conference on Machine Learning_,\n 2017. https://arxiv.org/abs/1703.01925\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\n\n# Dependency imports\nfrom absl import flags\nimport six\nimport tensorflow.compat.v1 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability import edward2 as ed\n\nflags.DEFINE_float(\"learning_rate\",\n default=1e-4,\n help=\"Initial learning rate.\")\nflags.DEFINE_integer(\"max_steps\",\n default=5000,\n help=\"Number of training steps to run.\")\nflags.DEFINE_integer(\"latent_size\",\n default=128,\n help=\"Number of dimensions in the latent code.\")\nflags.DEFINE_integer(\"num_units\",\n default=256,\n help=\"Number of units in the generative model's LSTM.\")\nflags.DEFINE_string(\"model_dir\",\n default=os.path.join(os.getenv(\"TEST_TMPDIR\", \"/tmp\"),\n \"grammar_vae/\"),\n help=\"Directory to put the model's fit.\")\n\nFLAGS = flags.FLAGS\n\n\nclass SmilesGrammar(object):\n \"\"\"Context-free grammar for SMILES strings.\n\n A context-free grammar is a 4-tuple consisting of the following elements:\n\n + `nonterminal_symbols`: finite set of strings.\n + `alphabet`: finite set of strings (terminal symbols). It is disjoint from\n `nonterminal_symbols`.\n + `production_rules`: list of 2-tuples. The first and second elements of\n each tuple respectively denote the left-hand-side and right-hand-side of a\n production rule. All right-hand-sides are written as lists, since the\n number of right-hand-side symbols may be greater than 1.\n + `start_symbol`: string, a distinct nonterminal symbol.\n \"\"\"\n\n @property\n def nonterminal_symbols(self):\n return {\"smiles\", \"chain\", \"branched atom\", \"atom\", \"ringbond\",\n \"aromatic organic\", \"aliphatic organic\", \"digit\"}\n\n @property\n def alphabet(self):\n return {\"c\", \"C\", \"N\", \"1\", \"2\"}\n\n @property\n def production_rules(self):\n return [\n (\"smiles\", [\"chain\"]),\n (\"chain\", [\"chain\", \"branched atom\"]),\n (\"chain\", [\"branched atom\"]),\n (\"branched atom\", [\"atom\", \"ringbond\"]),\n (\"branched atom\", [\"atom\"]),\n (\"atom\", [\"aromatic organic\"]),\n (\"atom\", [\"aliphatic organic\"]),\n (\"ringbond\", [\"digit\"]),\n (\"aromatic organic\", [\"c\"]),\n (\"aliphatic organic\", [\"C\"]),\n (\"aliphatic organic\", [\"N\"]),\n (\"digit\", [\"1\"]),\n (\"digit\", [\"2\"]),\n ]\n\n @property\n def start_symbol(self):\n return \"smiles\"\n\n def convert_to_string(self, productions):\n \"\"\"Converts a sequence of productions into a string of terminal symbols.\n\n Args:\n productions: Tensor of shape [1, num_productions, num_production_rules].\n Slices along the `num_productions` dimension represent one-hot vectors.\n\n Returns:\n str that concatenates all terminal symbols from `productions`.\n\n Raises:\n ValueError: If the first production rule does not begin with\n `self.start_symbol`.\n \"\"\"\n symbols = []\n for production in tf.unstack(productions, axis=1):\n lhs, rhs = self.production_rules[\n tf.argmax(input=tf.squeeze(production), axis=-1)]\n if not symbols: # first iteration\n if lhs != self.start_symbol:\n raise ValueError(\"`productions` must begin with `self.start_symbol`.\")\n symbols = rhs\n else:\n # Greedily unroll the nonterminal symbols based on the first occurrence\n # in a linear sequence.\n index = symbols.index(lhs)\n symbols = symbols[:index] + rhs + symbols[index + 1:]\n string = \"\".join(symbols)\n return string\n\n def mask(self, symbol, on_value, off_value):\n \"\"\"Produces a masking tensor for (in)valid production rules.\n\n Args:\n symbol: str, a symbol in the grammar.\n on_value: Value to use for a valid production rule.\n off_value: Value to use for an invalid production rule.\n\n Returns:\n Tensor of shape [1, num_production_rules]. An element is `on_value`\n if its corresponding production rule has `symbol` on its left-hand-side;\n the element is `off_value` otherwise.\n \"\"\"\n mask_values = [on_value if lhs == symbol else off_value\n for lhs, _ in self.production_rules]\n mask_values = tf.reshape(mask_values, [1, len(self.production_rules)])\n return mask_values\n\n\nclass ProbabilisticGrammar(tf.keras.Model):\n \"\"\"Deep generative model over productions that follow a grammar.\"\"\"\n\n def __init__(self, grammar, latent_size, num_units):\n \"\"\"Constructs a probabilistic grammar.\n\n Args:\n grammar: An object representing a grammar. It has members\n `nonterminal_symbols`, `alphabet`, `production_rules`, and\n `start_symbol`, and a method `mask` determining (in)valid\n production rules given a symbol.\n latent_size: Number of dimensions in the latent code.\n num_units: Number of units in the LSTM cell.\n \"\"\"\n super(ProbabilisticGrammar, self).__init__()\n self.grammar = grammar\n self.latent_size = latent_size\n self.lstm = tf.compat.v1.nn.rnn_cell.LSTMCell(num_units)\n self.output_layer = tf.keras.layers.Dense(len(grammar.production_rules))\n\n def __call__(self, *args, **kwargs):\n inputs = 0. # fixes a dummy variable so Model can be called without inputs\n return super(ProbabilisticGrammar, self).__call__(inputs, *args, **kwargs)\n\n def call(self, inputs):\n \"\"\"Runs the model forward to generate a sequence of productions.\n\n Args:\n inputs: Unused.\n\n Returns:\n productions: Tensor of shape [1, num_productions, num_production_rules].\n Slices along the `num_productions` dimension represent one-hot vectors.\n \"\"\"\n del inputs # unused\n latent_code = ed.MultivariateNormalDiag(loc=tf.zeros(self.latent_size),\n sample_shape=1,\n name=\"latent_code\")\n state = self.lstm.zero_state(1, dtype=tf.float32)\n t = 0\n productions = []\n stack = [self.grammar.start_symbol]\n while stack:\n symbol = stack.pop()\n net, state = self.lstm(latent_code, state)\n logits = (self.output_layer(net) +\n self.grammar.mask(symbol, on_value=0., off_value=-1e9))\n production = ed.OneHotCategorical(logits=logits,\n name=\"production_\" + str(t))\n _, rhs = self.grammar.production_rules[tf.argmax(\n input=tf.squeeze(production), axis=-1)]\n for symbol in rhs:\n if symbol in self.grammar.nonterminal_symbols:\n stack.append(symbol)\n productions.append(production)\n t += 1\n return tf.stack(productions, axis=1)\n\n\nclass ProbabilisticGrammarVariational(tf.keras.Model):\n \"\"\"Amortized variational posterior for a probabilistic grammar.\"\"\"\n\n def __init__(self, latent_size):\n \"\"\"Constructs a variational posterior for a probabilistic grammar.\n\n Args:\n latent_size: Number of dimensions in the latent code.\n \"\"\"\n super(ProbabilisticGrammarVariational, self).__init__()\n self.latent_size = latent_size\n self.encoder_net = tf.keras.Sequential([\n tf.keras.layers.Conv1D(64, 3, padding=\"SAME\"),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation(tf.nn.elu),\n tf.keras.layers.Conv1D(128, 3, padding=\"SAME\"),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation(tf.nn.elu),\n tf.keras.layers.Dropout(0.1),\n tf.keras.layers.GlobalAveragePooling1D(),\n tf.keras.layers.Dense(latent_size * 2, activation=None),\n ])\n\n def call(self, inputs):\n \"\"\"Runs the model forward to return a stochastic encoding.\n\n Args:\n inputs: Tensor of shape [1, num_productions, num_production_rules]. It is\n a sequence of productions of length `num_productions`. Each production\n is a one-hot vector of length `num_production_rules`: it determines\n which production rule the production corresponds to.\n\n Returns:\n latent_code_posterior: A random variable capturing a sample from the\n variational distribution, of shape [1, self.latent_size].\n \"\"\"\n net = self.encoder_net(tf.cast(inputs, tf.float32))\n return ed.MultivariateNormalDiag(\n loc=net[..., :self.latent_size],\n scale_diag=tf.nn.softplus(net[..., self.latent_size:]),\n name=\"latent_code_posterior\")\n\n\ndef main(argv):\n del argv # unused\n if tf.io.gfile.exists(FLAGS.model_dir):\n tf.compat.v1.logging.warning(\n \"Warning: deleting old log directory at {}\".format(FLAGS.model_dir))\n tf.io.gfile.rmtree(FLAGS.model_dir)\n tf.io.gfile.makedirs(FLAGS.model_dir)\n tf.compat.v1.enable_eager_execution()\n\n grammar = SmilesGrammar()\n synthetic_data_distribution = ProbabilisticGrammar(\n grammar=grammar, latent_size=FLAGS.latent_size, num_units=FLAGS.num_units)\n\n print(\"Random examples from synthetic data distribution:\")\n for _ in range(5):\n productions = synthetic_data_distribution()\n string = grammar.convert_to_string(productions)\n print(string)\n\n probabilistic_grammar = ProbabilisticGrammar(\n grammar=grammar, latent_size=FLAGS.latent_size, num_units=FLAGS.num_units)\n probabilistic_grammar_variational = ProbabilisticGrammarVariational(\n latent_size=FLAGS.latent_size)\n\n checkpoint = tf.train.Checkpoint(\n synthetic_data_distribution=synthetic_data_distribution,\n probabilistic_grammar=probabilistic_grammar,\n probabilistic_grammar_variational=probabilistic_grammar_variational)\n global_step = tf.compat.v1.train.get_or_create_global_step()\n optimizer = tf.compat.v1.train.AdamOptimizer(FLAGS.learning_rate)\n writer = tf.compat.v2.summary.create_file_writer(FLAGS.model_dir)\n writer.set_as_default()\n\n start_time = time.time()\n for step in range(FLAGS.max_steps):\n productions = synthetic_data_distribution()\n with tf.GradientTape() as tape:\n # Sample from amortized variational distribution and record its trace.\n with ed.tape() as variational_tape:\n _ = probabilistic_grammar_variational(productions)\n\n # Set model trace to take on the data's values and the sample from the\n # variational distribution.\n values = {\"latent_code\": variational_tape[\"latent_code_posterior\"]}\n values.update({\"production_\" + str(t): production for t, production\n in enumerate(tf.unstack(productions, axis=1))})\n with ed.tape() as model_tape:\n with ed.interception(ed.make_value_setter(**values)):\n _ = probabilistic_grammar()\n\n # Compute the ELBO given the variational sample, averaged over the batch\n # size and the number of time steps (number of productions). Although the\n # ELBO per data point sums over time steps, we average in order to have a\n # value that remains on the same scale across batches.\n log_likelihood = 0.\n for name, rv in six.iteritems(model_tape):\n if name.startswith(\"production\"):\n log_likelihood += rv.distribution.log_prob(rv.value)\n\n kl = tfp.distributions.kl_divergence(\n variational_tape[\"latent_code_posterior\"].distribution,\n model_tape[\"latent_code\"].distribution)\n\n timesteps = tf.cast(productions.shape[1], dtype=tf.float32)\n elbo = tf.reduce_mean(input_tensor=log_likelihood - kl) / timesteps\n loss = -elbo\n with tf.compat.v2.summary.record_if(\n lambda: tf.math.equal(0, global_step % 500)):\n tf.compat.v2.summary.scalar(\n \"log_likelihood\",\n tf.reduce_mean(input_tensor=log_likelihood) / timesteps,\n step=global_step)\n tf.compat.v2.summary.scalar(\n \"kl\", tf.reduce_mean(input_tensor=kl) / timesteps, step=global_step)\n tf.compat.v2.summary.scalar(\"elbo\", elbo, step=global_step)\n\n variables = (probabilistic_grammar.variables\n + probabilistic_grammar_variational.variables)\n grads = tape.gradient(loss, variables)\n grads_and_vars = list(zip(grads, variables))\n optimizer.apply_gradients(grads_and_vars, global_step)\n\n if step % 500 == 0:\n duration = time.time() - start_time\n print(\"Step: {:>3d} Loss: {:.3f} ({:.3f} sec)\".format(\n step, loss, duration))\n checkpoint.save(file_prefix=FLAGS.model_dir)\n\nif __name__ == \"__main__\":\n tf.compat.v1.app.run()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for the Sample distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import bijectors as tfb\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import test_util\n\n\n@test_util.test_all_tf_execution_regimes\nclass SampleDistributionTest(test_util.TestCase):\n\n def test_everything_scalar(self):\n s = tfd.Sample(tfd.Normal(loc=0, scale=1), 5, validate_args=True)\n x = s.sample(seed=test_util.test_seed())\n actual_lp = s.log_prob(x)\n # Sample.log_prob will reduce over event space, ie, dims [0, 2]\n # corresponding to sizes concat([[5], [2]]).\n expected_lp = tf.reduce_sum(s.distribution.log_prob(x), axis=0)\n x_, actual_lp_, expected_lp_ = self.evaluate([x, actual_lp, expected_lp])\n self.assertEqual((5,), x_.shape)\n self.assertEqual((), actual_lp_.shape)\n self.assertAllClose(expected_lp_, actual_lp_, atol=0, rtol=1e-3)\n\n def test_everything_nonscalar(self):\n s = tfd.Sample(\n tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=1), 1), [5, 4],\n validate_args=True)\n x = s.sample([6, 1], seed=test_util.test_seed())\n actual_lp = s.log_prob(x)\n # Sample.log_prob will reduce over event space, ie, dims [2, 3, 5]\n # corresponding to sizes concat([[5, 4], [2]]).\n expected_lp = tf.reduce_sum(\n s.distribution.log_prob(tf.transpose(a=x, perm=[0, 1, 3, 4, 2, 5])),\n axis=[2, 3])\n x_, actual_lp_, expected_lp_ = self.evaluate([x, actual_lp, expected_lp])\n self.assertEqual((6, 1, 3, 5, 4, 2), x_.shape)\n self.assertEqual((6, 1, 3), actual_lp_.shape)\n self.assertAllClose(expected_lp_, actual_lp_, atol=0, rtol=1e-3)\n\n def test_mixed_scalar(self):\n s = tfd.Sample(tfd.Independent(tfd.Normal(loc=[0., 0], scale=1), 1),\n 3, validate_args=False)\n x = s.sample(4, seed=test_util.test_seed())\n lp = s.log_prob(x)\n self.assertEqual((4, 3, 2), x.shape)\n self.assertEqual((4,), lp.shape)\n\n def test_kl_divergence(self):\n q_scale = 2.\n p = tfd.Sample(\n tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=1), 1), [5, 4],\n validate_args=True)\n q = tfd.Sample(\n tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=2.), 1), [5, 4],\n validate_args=True)\n actual_kl = tfd.kl_divergence(p, q)\n expected_kl = ((5 * 4) *\n (0.5 * q_scale**-2. - 0.5 + np.log(q_scale)) * # Actual KL.\n np.ones([3]) * 2) # Batch, events.\n self.assertAllClose(expected_kl, self.evaluate(actual_kl))\n\n def test_transformed_affine(self):\n sample_shape = 3\n mvn = tfd.Independent(tfd.Normal(loc=[0., 0], scale=1), 1)\n aff = tfb.Affine(scale_tril=[[0.75, 0.],\n [0.05, 0.5]])\n\n def expected_lp(y):\n x = aff.inverse(y) # Ie, tf.random.normal([4, 3, 2])\n fldj = aff.forward_log_det_jacobian(x, event_ndims=1)\n return tf.reduce_sum(mvn.log_prob(x) - fldj, axis=1)\n\n # Transform a Sample.\n d = tfd.TransformedDistribution(\n tfd.Sample(mvn, sample_shape, validate_args=True),\n bijector=aff)\n y = d.sample(4, seed=test_util.test_seed())\n actual_lp = d.log_prob(y)\n self.assertAllEqual((4,) + (sample_shape,) + (2,), y.shape)\n self.assertAllEqual((4,), actual_lp.shape)\n self.assertAllClose(\n *self.evaluate([expected_lp(y), actual_lp]),\n atol=0., rtol=1e-3)\n\n # Sample a Transform.\n d = tfd.Sample(\n tfd.TransformedDistribution(mvn, bijector=aff),\n sample_shape,\n validate_args=True)\n y = d.sample(4, seed=test_util.test_seed())\n actual_lp = d.log_prob(y)\n self.assertAllEqual((4,) + (sample_shape,) + (2,), y.shape)\n self.assertAllEqual((4,), actual_lp.shape)\n self.assertAllClose(\n *self.evaluate([expected_lp(y), actual_lp]),\n atol=0., rtol=1e-3)\n\n def test_transformed_exp(self):\n sample_shape = 3\n mvn = tfd.Independent(tfd.Normal(loc=[0., 0], scale=1), 1)\n exp = tfb.Exp()\n\n def expected_lp(y):\n x = exp.inverse(y) # Ie, tf.random.normal([4, 3, 2])\n fldj = exp.forward_log_det_jacobian(x, event_ndims=1)\n return tf.reduce_sum(mvn.log_prob(x) - fldj, axis=1)\n\n # Transform a Sample.\n d = tfd.TransformedDistribution(\n tfd.Sample(mvn, sample_shape, validate_args=True),\n bijector=exp)\n y = d.sample(4, seed=test_util.test_seed())\n actual_lp = d.log_prob(y)\n self.assertAllEqual((4,) + (sample_shape,) + (2,), y.shape)\n self.assertAllEqual((4,), actual_lp.shape)\n # If `TransformedDistribution` didn't scale the jacobian by\n # `_sample_distribution_size`, then `scale_fldj` would need to be `False`.\n self.assertAllClose(\n *self.evaluate([expected_lp(y), actual_lp]),\n atol=0., rtol=1e-3)\n\n # Sample a Transform.\n d = tfd.Sample(\n tfd.TransformedDistribution(mvn, bijector=exp),\n sample_shape,\n validate_args=True)\n y = d.sample(4, seed=test_util.test_seed())\n actual_lp = d.log_prob(y)\n self.assertAllEqual((4,) + (sample_shape,) + (2,), y.shape)\n self.assertAllEqual((4,), actual_lp.shape)\n # Regardless of whether `TransformedDistribution` scales the jacobian by\n # `_sample_distribution_size`, `scale_fldj` is `True`.\n self.assertAllClose(\n *self.evaluate([expected_lp(y), actual_lp]),\n atol=0., rtol=1e-3)\n\n @parameterized.parameters(\n 'mean',\n 'stddev',\n 'variance',\n 'mode',\n )\n def test_summary_statistic(self, attr):\n sample_shape = [5, 4]\n mvn = tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=1), 1)\n d = tfd.Sample(mvn, sample_shape, validate_args=True)\n self.assertEqual((3,), d.batch_shape)\n expected_stat = (\n getattr(mvn, attr)()[:, tf.newaxis, tf.newaxis, :] *\n tf.ones([3, 5, 4, 2]))\n actual_stat = getattr(d, attr)()\n self.assertAllEqual(*self.evaluate([expected_stat, actual_stat]))\n\n def test_entropy(self):\n sample_shape = [3, 4]\n mvn = tfd.Independent(tfd.Normal(loc=0, scale=[[0.25, 0.5]]), 1)\n d = tfd.Sample(mvn, sample_shape, validate_args=True)\n expected_entropy = 12 * tf.reduce_sum(mvn.distribution.entropy(), axis=-1)\n actual_entropy = d.entropy()\n self.assertAllEqual(*self.evaluate([expected_entropy, actual_entropy]))\n\n @test_util.tf_tape_safety_test\n def test_gradients_through_params(self):\n loc = tf.Variable(tf.zeros([4, 5, 3]), shape=tf.TensorShape(None))\n scale = tf.Variable(tf.ones([]), shape=tf.TensorShape(None))\n # In real life, you'd really always want `sample_shape` to be\n # `trainable=False`.\n sample_shape = tf.Variable([1, 2], shape=tf.TensorShape(None))\n dist = tfd.Sample(\n tfd.Independent(tfd.Logistic(loc=loc, scale=scale),\n reinterpreted_batch_ndims=1),\n sample_shape=sample_shape,\n validate_args=True)\n with tf.GradientTape() as tape:\n loss = -dist.log_prob(0.)\n self.assertLen(dist.trainable_variables, 3)\n grad = tape.gradient(loss, [loc, scale, sample_shape])\n self.assertAllNotNone(grad[:-1])\n self.assertIs(grad[-1], None)\n\n @test_util.tf_tape_safety_test\n def test_variable_shape_change(self):\n loc = tf.Variable(tf.zeros([4, 5, 3]), shape=tf.TensorShape(None))\n scale = tf.Variable(tf.ones([]), shape=tf.TensorShape(None))\n # In real life, you'd really always want `sample_shape` to be\n # `trainable=False`.\n sample_shape = tf.Variable([1, 2], shape=tf.TensorShape(None))\n dist = tfd.Sample(\n tfd.Independent(tfd.Logistic(loc=loc, scale=scale),\n reinterpreted_batch_ndims=1),\n sample_shape=sample_shape,\n validate_args=True)\n self.evaluate([v.initializer for v in dist.trainable_variables])\n\n x = dist.mean()\n y = dist.sample([7, 2], seed=test_util.test_seed())\n loss_x = -dist.log_prob(x)\n loss_0 = -dist.log_prob(0.)\n batch_shape = dist.batch_shape_tensor()\n event_shape = dist.event_shape_tensor()\n [x_, y_, loss_x_, loss_0_, batch_shape_, event_shape_] = self.evaluate([\n x, y, loss_x, loss_0, batch_shape, event_shape])\n self.assertAllEqual([4, 5, 1, 2, 3], x_.shape)\n self.assertAllEqual([7, 2, 4, 5, 1, 2, 3], y_.shape)\n self.assertAllEqual([4, 5], loss_x_.shape)\n self.assertAllEqual([4, 5], loss_0_.shape)\n self.assertAllEqual([4, 5], batch_shape_)\n self.assertAllEqual([1, 2, 3], event_shape_)\n self.assertLen(dist.trainable_variables, 3)\n\n with tf.control_dependencies([\n loc.assign(tf.zeros([])),\n scale.assign(tf.ones([3, 1, 2])),\n sample_shape.assign(6),\n ]):\n x = dist.mean()\n y = dist.sample([7, 2], seed=test_util.test_seed())\n loss_x = -dist.log_prob(x)\n loss_0 = -dist.log_prob(0.)\n batch_shape = dist.batch_shape_tensor()\n event_shape = dist.event_shape_tensor()\n [x_, y_, loss_x_, loss_0_, batch_shape_, event_shape_] = self.evaluate([\n x, y, loss_x, loss_0, batch_shape, event_shape])\n self.assertAllEqual([3, 1, 6, 2], x_.shape)\n self.assertAllEqual([7, 2, 3, 1, 6, 2], y_.shape)\n self.assertAllEqual([3, 1], loss_x_.shape)\n self.assertAllEqual([3, 1], loss_0_.shape)\n self.assertAllEqual([3, 1], batch_shape_)\n self.assertAllEqual([6, 2], event_shape_)\n self.assertLen(dist.trainable_variables, 3)\n\n def test_variable_sample_shape_exception(self):\n loc = tf.Variable(tf.zeros([4, 5, 3]), shape=tf.TensorShape(None))\n scale = tf.Variable(tf.ones([]), shape=tf.TensorShape(None))\n sample_shape = tf.Variable([[1, 2]], shape=tf.TensorShape(None))\n with self.assertRaisesWithPredicateMatch(\n Exception,\n 'Argument `sample_shape` must be either a scalar or a vector.'):\n dist = tfd.Sample(\n tfd.Independent(tfd.Logistic(loc=loc, scale=scale),\n reinterpreted_batch_ndims=1),\n sample_shape=sample_shape,\n validate_args=True)\n self.evaluate([v.initializer for v in dist.trainable_variables])\n self.evaluate(dist.mean())\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.shape", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.identity", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v2.abs", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.broadcast_static_shape", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.rank", "tensorflow.compat.v2.zeros_like" ], [ "numpy.equal", "numpy.issubdtype", "numpy.abs", "numpy.shape", "numpy.finfo" ], [ "numpy.issubdtype" ], [ "tensorflow.compat.v1.compat.v1.nn.rnn_cell.LSTMCell", "tensorflow.compat.v1.nn.softplus", "tensorflow.compat.v1.unstack", "tensorflow.compat.v1.keras.layers.Activation", "tensorflow.compat.v1.keras.layers.Dense", "tensorflow.compat.v1.io.gfile.rmtree", "tensorflow.compat.v1.compat.v1.train.get_or_create_global_step", "tensorflow.compat.v1.keras.layers.Conv1D", "tensorflow.compat.v1.io.gfile.exists", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v1.train.Checkpoint", "tensorflow.compat.v1.io.gfile.makedirs", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.stack", "tensorflow.compat.v1.compat.v1.train.AdamOptimizer", "tensorflow.compat.v1.compat.v2.summary.create_file_writer", "tensorflow.compat.v1.keras.layers.BatchNormalization", "tensorflow.compat.v1.math.equal", "tensorflow.compat.v1.compat.v2.summary.scalar", "tensorflow.compat.v1.compat.v1.enable_eager_execution", "tensorflow.compat.v1.keras.layers.GlobalAveragePooling1D", "tensorflow.compat.v1.compat.v1.app.run", "tensorflow.compat.v1.zeros", "tensorflow.compat.v1.GradientTape", "tensorflow.compat.v1.keras.layers.Dropout", "tensorflow.compat.v1.squeeze" ], [ "tensorflow.compat.v2.transpose", "numpy.ones", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.GradientTape", "numpy.log", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.TensorShape" ] ]
danielgrassinger/yt_new_frontend
[ "5f91d2fb8721c4c5da0af543a6256ed979cd9fc9" ]
[ "yt/frontends/athena/io.py" ]
[ "\"\"\"\nThe data-file handling functions\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom yt.utilities.io_handler import \\\n BaseIOHandler\nimport numpy as np\nfrom yt.funcs import mylog, defaultdict\nfrom .data_structures import chk23\n\nfloat_size = {\"float\":np.dtype(\">f4\").itemsize,\n \"double\":np.dtype(\">f8\").itemsize}\n\naxis_list = [\"_x\",\"_y\",\"_z\"]\n\nclass IOHandlerAthena(BaseIOHandler):\n _dataset_type = \"athena\"\n _offset_string = 'data:offsets=0'\n _data_string = 'data:datatype=0'\n _read_table_offset = None\n\n def _field_dict(self,fhandle):\n keys = fhandle['field_types'].keys()\n val = fhandle['field_types'].keys()\n return dict(zip(keys,val))\n\n def _read_field_names(self,grid):\n pass\n\n def _read_chunk_data(self,chunk,fields):\n data = {}\n if len(chunk.objs) == 0: return data\n for grid in chunk.objs:\n if grid.filename is None:\n continue\n f = open(grid.filename, \"rb\")\n data[grid.id] = {}\n grid_dims = grid.ActiveDimensions\n read_dims = grid.read_dims.astype(\"int64\")\n grid_ncells = np.prod(read_dims)\n grid0_ncells = np.prod(grid.index.grids[0].read_dims)\n read_table_offset = get_read_table_offset(f)\n for field in fields:\n ftype, offsetr, dtype = grid.index._field_map[field]\n if grid_ncells != grid0_ncells:\n offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))\n if grid_ncells == grid0_ncells:\n offset = offsetr\n offset = int(offset) # Casting to be certain.\n file_offset = grid.file_offset[2]*read_dims[0]*read_dims[1]*float_size[dtype]\n xread = slice(grid.file_offset[0],grid.file_offset[0]+grid_dims[0])\n yread = slice(grid.file_offset[1],grid.file_offset[1]+grid_dims[1])\n f.seek(read_table_offset+offset+file_offset)\n if dtype == 'float':\n dt = '>f4'\n elif dtype == 'double':\n dt = '>f8'\n if ftype == 'scalar':\n f.seek(read_table_offset+offset+file_offset)\n v = np.fromfile(f, dtype=dt,\n count=grid_ncells).reshape(read_dims,order='F')\n if ftype == 'vector':\n vec_offset = axis_list.index(field[-1][-2:])\n f.seek(read_table_offset+offset+3*file_offset)\n v = np.fromfile(f, dtype=dt, count=3*grid_ncells)\n v = v[vec_offset::3].reshape(read_dims,order='F')\n if grid.ds.field_ordering == 1:\n data[grid.id][field] = v[xread,yread,:].T.astype(\"float64\")\n else:\n data[grid.id][field] = v[xread,yread,:].astype(\"float64\")\n f.close()\n return data\n \n def _read_data_slice(self, grid, field, axis, coord):\n sl = [slice(None), slice(None), slice(None)]\n sl[axis] = slice(coord, coord + 1)\n if grid.ds.field_ordering == 1:\n sl.reverse()\n return self._read_data_set(grid, field)[sl]\n\n def _read_fluid_selection(self, chunks, selector, fields, size):\n chunks = list(chunks)\n if any((ftype != \"athena\" for ftype, fname in fields)):\n raise NotImplementedError\n rv = {}\n for field in fields:\n rv[field] = np.empty(size, dtype=\"float64\")\n ng = sum(len(c.objs) for c in chunks)\n mylog.debug(\"Reading %s cells of %s fields in %s grids\",\n size, [f2 for f1, f2 in fields], ng)\n ind = 0\n for chunk in chunks:\n data = self._read_chunk_data(chunk, fields)\n for g in chunk.objs:\n for field in fields:\n ftype, fname = field\n ds = data[g.id].pop(field)\n nd = g.select(selector, ds, rv[field], ind) # caches\n ind += nd\n data.pop(g.id)\n return rv\n\ndef get_read_table_offset(f):\n line = f.readline()\n while True:\n splitup = line.strip().split()\n chkc = chk23('CELL_DATA')\n chkp = chk23('POINT_DATA')\n if chkc in splitup or chkp in splitup:\n f.readline()\n read_table_offset = f.tell()\n break\n line = f.readline()\n return read_table_offset\n\n\n" ]
[ [ "numpy.dtype", "numpy.fromfile", "numpy.empty", "numpy.prod" ] ]
nbingo/sMOOth
[ "aacdc5d24b931e534e984681923ec74f1103ca2f" ]
[ "src/configs/adult/adult_mlp_weighted.py" ]
[ "\"\"\"\nAn example config file to train a ImageNet classifier with detectron2.\nModel and dataloader both come from torchvision.\nThis shows how to use detectron2 as a general engine for any new models and tasks.\n\nTo run, use the following command:\n\npython tools/lazyconfig_train_net.py --config-file configs/Misc/torchvision_imagenet_R_50.py \\\n --num-gpus 8 dataloader.train.dataset.root=/path/to/imagenet/\n\n\"\"\"\n\nimport yaml\nimport torch\nfrom omegaconf import OmegaConf\nfrom fvcore.common.param_scheduler import CosineParamScheduler\n\nfrom detectron2.solver import WarmupParamScheduler\nfrom detectron2.solver.build import get_default_optimizer_params\nfrom detectron2.config import LazyConfig, LazyCall as L\nfrom detectron2.evaluation import DatasetEvaluators\n\nfrom src.configs.common.utils import build_data_loader\nfrom src.models.adult_mlp import IncomeClassifier\nfrom src.loaders.adult_loader import FeatDataset\nfrom src.metrics.evaluators import ClassificationAcc, BinaryEqualizedOddsViolation\nfrom src.metrics.losses import cross_entropy_loss, equalized_odds_violation, MultiObjectiveLoss\nfrom src.harnesses.harnesses import MultiProcessHarness, SimpleHarness\n\ndataloader = OmegaConf.create()\ndataloader.train = L(build_data_loader)(\n dataset=L(FeatDataset)(\n subset='train',\n income_const=yaml.load(open('/lfs/local/0/nomir/sMOOth/data/Adult/income.yml'), Loader=yaml.FullLoader)\n ),\n batch_size=256,\n num_workers=4,\n training=True,\n)\n\ndataloader.test = L(build_data_loader)(\n dataset=L(FeatDataset)(\n subset='val',\n income_const=yaml.load(open('/lfs/local/0/nomir/sMOOth/data/Adult/income.yml'), Loader=yaml.FullLoader)\n ),\n batch_size=256,\n num_workers=4,\n training=False,\n)\n\n# Can also be list of DatasetEvaluators\ndataloader.evaluator = L(DatasetEvaluators)(evaluators=(ClassificationAcc(), BinaryEqualizedOddsViolation()))\n\ntrain = LazyConfig.load(\"/lfs/local/0/nomir/sMOOth/src/configs/common/train.py\").train\ntrain.init_checkpoint = None\n# max_iter = number epochs * (train dataset size / batch size)\ntrain.max_iter = 50 * 30162 // 256\ntrain.eval_period = 30162 // 256\ntrain.loss_fn = L(MultiObjectiveLoss)(losses=[cross_entropy_loss, equalized_odds_violation])\ntrain.loss_tradeoff = torch.Tensor([0.5, 0.5])\n# Arguments for multiprocess training\ntrain.harness = SimpleHarness\ntrain.num_workers = 1\ntrain.gpus = [0] # TODO: Eventually want this to be a commandline arg\ntrain.process_over_key = 'model.loss_fn'\ntrain.process_over_vals = [cross_entropy_loss]\n\nmodel = L(IncomeClassifier)(\n in_dim=105,\n hidden_dim=105,\n num_hidden_blocks=2,\n drop_prob=0.2,\n out_dim=2,\n loss_fn=train.loss_fn,\n device=train.device,\n)\n\noptimizer = L(torch.optim.Adam)(\n params=L(get_default_optimizer_params)(),\n lr=1e-3,\n weight_decay=1e-4,\n)\n\nlr_multiplier = L(WarmupParamScheduler)(\n scheduler=L(CosineParamScheduler)(\n start_value=0.1,\n end_value=1e-4,\n ),\n warmup_length=1 / 100,\n warmup_factor=0.1,\n)\n" ]
[ [ "torch.Tensor" ] ]
FujitsuResearch/automatic_pruning
[ "b3bb525b736ca3e465cb6fb87f134748424a0fe5" ]
[ "examples/resnet34_imagenet/resnet34.py" ]
[ "# resnet34.py COPYRIGHT Fujitsu Limited 2022\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef zero_padding(x1, x2):\n num_ch1 = x1.size()[1]\n num_ch2 = x2.size()[1]\n ch_diff = num_ch1 - num_ch2\n # path1 < path2 : zero padding to path1 tensor\n if num_ch1 < num_ch2:\n ch_diff = -1 * ch_diff\n if ch_diff%2 ==0:\n x1 = F.pad(x1[:, :, :, :], (0, 0, 0, 0, ch_diff//2, ch_diff//2), \"constant\", 0)\n else:\n x1 = F.pad(x1[:, :, :, :], (0, 0, 0, 0, ch_diff//2, (ch_diff//2)+1), \"constant\", 0)\n # path1 > path2 : zero padding to path2 tensor\n elif num_ch1 > num_ch2:\n if ch_diff%2 ==0:\n x2 = F.pad(x2[:, :, :, :], (0, 0, 0, 0, ch_diff//2, ch_diff//2), \"constant\", 0)\n else:\n x2 = F.pad(x2[:, :, :, :], (0, 0, 0, 0, ch_diff//2, (ch_diff//2)+1), \"constant\", 0)\n return x1, x2\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,\n dilation=dilation,\n )\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(\n self,\n inplanes,\n planes,\n stride=1,\n downsample=None,\n groups=1,\n base_width=64,\n dilation=1,\n norm_layer=None,\n n_in_channels=None,\n n_channels1=None,\n n_channels2=None,\n ):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError(\"BasicBlock only supports groups=1 and base_width=64\")\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(n_in_channels, n_channels1, stride)\n self.bn1 = norm_layer(n_channels1)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(n_channels1, n_channels2)\n self.bn2 = norm_layer(n_channels2)\n self.downsample = downsample #if dawnsample else downsample(n_in_channels, n_channels3)\n self.stride = stride\n\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out, identity = zero_padding(out, identity) # zero padding\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet34(nn.Module):\n def __init__(\n self,\n block=BasicBlock,\n layers=[3, 4, 6, 3],\n num_classes=1000,\n zero_init_residual=False,\n groups=1,\n width_per_group=64,\n replace_stride_with_dilation=None,\n norm_layer=None,\n ch_conv1=64,\n\n ch_l10_1=64,\n ch_l10_2=64,\n ch_l11_1=64,\n ch_l11_2=64,\n ch_l12_1=64,\n ch_l12_2=64,\n\n ch_l20_1=128,\n ch_l20_2=128,\n ch_l20_ds=128,\n ch_l21_1=128,\n ch_l21_2=128,\n ch_l22_1=128,\n ch_l22_2=128,\n ch_l23_1=128,\n ch_l23_2=128,\n\n ch_l30_1=256,\n ch_l30_2=256,\n ch_l30_ds=256,\n ch_l31_1=256,\n ch_l31_2=256,\n ch_l32_1=256,\n ch_l32_2=256,\n ch_l33_1=256,\n ch_l33_2=256,\n ch_l34_1=256,\n ch_l34_2=256,\n ch_l35_1=256,\n ch_l35_2=256,\n\n ch_l40_1=512,\n ch_l40_2=512,\n ch_l40_ds=512,\n ch_l41_1=512,\n ch_l41_2=512,\n ch_l42_1=512,\n ch_l42_2=512,\n ):\n super(ResNet34, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\n \"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation)\n )\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3, ch_conv1, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = norm_layer(ch_conv1)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n in_ch_l11 = max(ch_conv1, ch_l10_2)\n in_ch_l12 = max(in_ch_l11, ch_l11_2)\n self.layer1 = self._make_layer_3(block=block, planes=64, blocks=layers[0],\n n_in_channels0=ch_conv1,\n n_channels00=ch_l10_1,\n n_channels01=ch_l10_2,\n n_channels_ds=None,\n n_in_channels1=in_ch_l11,\n n_channels10=ch_l11_1,\n n_channels11=ch_l11_2,\n n_in_channels2=in_ch_l12,\n n_channels20=ch_l12_1,\n n_channels21=ch_l12_2,\n )\n\n in_ch_l20 = max(in_ch_l12, ch_l12_2)\n in_ch_l21 = max(ch_l20_ds, ch_l20_2)\n in_ch_l22 = max(in_ch_l21, ch_l21_2)\n in_ch_l23 = max(in_ch_l22, ch_l22_2) \n self.layer2 = self._make_layer_4(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0],\n n_in_channels0=in_ch_l20,\n n_channels00=ch_l20_1,\n n_channels01=ch_l20_2,\n n_channels_ds=ch_l20_ds,\n n_in_channels1=in_ch_l21,\n n_channels10=ch_l21_1,\n n_channels11=ch_l21_2,\n n_in_channels2=in_ch_l22,\n n_channels20=ch_l22_1,\n n_channels21=ch_l22_2,\n n_in_channels3=in_ch_l23,\n n_channels30=ch_l23_1,\n n_channels31=ch_l23_2,\n )\n\n in_ch_l30 = max(in_ch_l23, ch_l23_2)\n in_ch_l31 = max(ch_l30_ds, ch_l30_2)\n in_ch_l32 = max(in_ch_l31, ch_l31_2)\n in_ch_l33 = max(in_ch_l32, ch_l32_2)\n in_ch_l34 = max(in_ch_l33, ch_l33_2)\n in_ch_l35 = max(in_ch_l34, ch_l34_2)\n self.layer3 = self._make_layer_6(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1],\n n_in_channels0=in_ch_l30,\n n_channels00=ch_l30_1,\n n_channels01=ch_l30_2,\n n_channels_ds=ch_l30_ds,\n n_in_channels1=in_ch_l31,\n n_channels10=ch_l31_1,\n n_channels11=ch_l31_2,\n n_in_channels2=in_ch_l32,\n n_channels20=ch_l32_1,\n n_channels21=ch_l32_2,\n n_in_channels3=in_ch_l33,\n n_channels30=ch_l33_1,\n n_channels31=ch_l33_2,\n n_in_channels4=in_ch_l34,\n n_channels40=ch_l34_1,\n n_channels41=ch_l34_2,\n n_in_channels5=in_ch_l35,\n n_channels50=ch_l35_1,\n n_channels51=ch_l35_2,\n )\n\n in_ch_l40 = max(in_ch_l35, ch_l35_2)\n in_ch_l41 = max(ch_l40_ds, ch_l40_2)\n in_ch_l42 = max(in_ch_l41, ch_l41_2)\n self.layer4 = self._make_layer_3(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2],\n n_in_channels0=in_ch_l40,\n n_channels00=ch_l40_1,\n n_channels01=ch_l40_2,\n n_channels_ds=ch_l40_ds,\n n_in_channels1=in_ch_l41,\n n_channels10=ch_l41_1,\n n_channels11=ch_l41_2,\n n_in_channels2=in_ch_l42,\n n_channels20=ch_l42_1,\n n_channels21=ch_l42_2,\n )\n\n in_ch_fc = max(in_ch_l42, ch_l42_2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(in_ch_fc, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer_3(self, block, planes, blocks, stride=1, dilate=False,\n n_in_channels0=None,\n n_channels00=None, n_channels01=None,\n n_channels_ds=None,\n n_in_channels1=None,\n n_channels10=None, n_channels11=None,\n n_in_channels2=None,\n n_channels20=None, n_channels21=None,\n ):\n\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential( conv1x1(n_in_channels0, n_channels_ds, stride), norm_layer(n_channels_ds) )\n\n self.inplanes = planes * block.expansion\n layers = []\n\n # layer_0\n layers.append(\n block(\n self.inplanes,\n planes,\n stride,\n downsample,\n self.groups,\n self.base_width,\n previous_dilation,\n norm_layer,\n n_in_channels=n_in_channels0,\n n_channels1=n_channels00,\n n_channels2=n_channels01,\n )\n )\n # layer_1\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels1,\n n_channels1=n_channels10,\n n_channels2=n_channels11,\n )\n )\n # layer_2\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels2,\n n_channels1=n_channels20,\n n_channels2=n_channels21,\n )\n )\n return nn.Sequential(*layers)\n\n\n def _make_layer_4(self, block, planes, blocks, stride=1, dilate=False,\n n_in_channels0=None,\n n_channels00=None, n_channels01=None,\n n_channels_ds=None,\n n_in_channels1=None,\n n_channels10=None, n_channels11=None,\n n_in_channels2=None,\n n_channels20=None, n_channels21=None,\n n_in_channels3=None,\n n_channels30=None, n_channels31=None,\n ):\n\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential( conv1x1(n_in_channels0, n_channels_ds, stride), norm_layer(n_channels_ds) )\n\n self.inplanes = planes * block.expansion\n layers = []\n\n # layer_0\n layers.append(\n block(\n self.inplanes,\n planes,\n stride,\n downsample,\n self.groups,\n self.base_width,\n previous_dilation,\n norm_layer,\n n_in_channels=n_in_channels0,\n n_channels1=n_channels00,\n n_channels2=n_channels01,\n )\n )\n # layer_1\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels1,\n n_channels1=n_channels10,\n n_channels2=n_channels11,\n )\n )\n # layer_2\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels2,\n n_channels1=n_channels20,\n n_channels2=n_channels21,\n )\n )\n # layer_3\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels3,\n n_channels1=n_channels30,\n n_channels2=n_channels31,\n )\n )\n return nn.Sequential(*layers)\n\n\n def _make_layer_6(self, block, planes, blocks, stride=1, dilate=False,\n n_in_channels0=None,\n n_channels00=None, n_channels01=None,\n n_channels_ds=None,\n n_in_channels1=None,\n n_channels10=None, n_channels11=None,\n n_in_channels2=None,\n n_channels20=None, n_channels21=None,\n n_in_channels3=None,\n n_channels30=None, n_channels31=None,\n n_in_channels4=None,\n n_channels40=None, n_channels41=None,\n n_in_channels5=None,\n n_channels50=None, n_channels51=None,\n ):\n\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential( conv1x1(n_in_channels0, n_channels_ds, stride), norm_layer(n_channels_ds) )\n\n self.inplanes = planes * block.expansion\n layers = []\n\n # layer_0\n layers.append(\n block(\n self.inplanes,\n planes,\n stride,\n downsample,\n self.groups,\n self.base_width,\n previous_dilation,\n norm_layer,\n n_in_channels=n_in_channels0,\n n_channels1=n_channels00,\n n_channels2=n_channels01,\n )\n )\n # layer_1\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels1,\n n_channels1=n_channels10,\n n_channels2=n_channels11,\n )\n )\n # layer_2\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels2,\n n_channels1=n_channels20,\n n_channels2=n_channels21,\n )\n )\n # layer_3\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels3,\n n_channels1=n_channels30,\n n_channels2=n_channels31,\n )\n )\n # layer_4\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels4,\n n_channels1=n_channels40,\n n_channels2=n_channels41,\n )\n )\n # layer_5\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n n_in_channels=n_in_channels5,\n n_channels1=n_channels50,\n n_channels2=n_channels51,\n )\n )\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.reshape(x.size(0), -1)\n x = self.fc(x)\n\n return x\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.init.kaiming_normal_", "torch.nn.Linear", "torch.nn.init.constant_", "torch.nn.AdaptiveAvgPool2d", "torch.nn.functional.pad", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU" ] ]
hobinkwak/Stock-Movements-Classification
[ "dac2e90d9ef2294f5c4dc8f6605b9051c71b3f45" ]
[ "utils/dataload.py" ]
[ "from itertools import combinations\nimport pandas as pd\n\nfrom utils.utils import *\n\n\ndef load_etf():\n etf_data = pd.read_csv(\n \"data/etf_data.csv\", encoding=\"euc_kr\", parse_dates=[\"tdate\"]\n )\n etf_ohlcv = etf_data.set_index([\"tdate\", \"etf_code\", \"data_name\"])[\n \"value\"\n ].unstack()\n etf_close = etf_ohlcv[\"종가\"].unstack()\n return etf_close\n\ndef load_macro_data():\n macro_data = pd.read_csv('외부데이터/macro_final.csv', index_col='Item Name').iloc[1:, :]\n macro_data.index = pd.to_datetime(macro_data.index)\n macro_data = macro_data.fillna(method='ffill')\n macro_data = (macro_data.resample('m').last() / macro_data.resample('m').first())\n\n macro_data.columns = ['FOMC정책금리', '한국정책금리', '중국정책금리', '미국국채_1m', '미국국채_3m', '미국국채_6m', '미국국채_1y', '미국국채_5y',\n '미국국채_10y', '리보_달러_1m', '리보_달러_1y', '리보_달러_3m', '리보_달러_6m', '리보_달러_1w',\n 'DDR4 16G (2G*8) 2666 MHZ', 'NAND 16Gb 2Gx8 SLC', 'DDR4 16G (2G*8) eTT MHZ',\n 'DDR3 4Gb 512Mx8 1600/1866Mbps', 'DDR3 4Gb 512Mx8 eTT',\n 'NAND 8Gb 1Gx8 SLC', 'NAND 64Gb 8Gx8 MLC', 'WTI_1M', 'BRENT_1M', 'DUBAI_ASIA1M',\n '난방유_선물_NYMEX', '천연가스_선물_NYMEX', '가스오일_선물_IPE', '천연가스_선물_IPE', '금_선물', '은_선물', '알루미늄_선물',\n '전기동_선물', '납_선물', '니켈_선물', '주석_선물', '아연_선물', '10YR BEI', 'T10Y2Y', 'DFF',\n 'HY Ef Yield', 'Trade DI', 'VIX', 'USDKRW', 'Eco Policy Uncertainty']\n\n macro_data = macro_data[\n ['FOMC정책금리', '한국정책금리', '중국정책금리', '미국국채_1m', '미국국채_3m', '미국국채_6m', '미국국채_1y', '미국국채_5y', '미국국채_10y', '리보_달러_1m',\n '리보_달러_1y', '리보_달러_3m', '리보_달러_6m', '리보_달러_1w', 'DDR3 4Gb 512Mx8 eTT',\n 'NAND 8Gb 1Gx8 SLC', 'WTI_1M', 'BRENT_1M', 'DUBAI_ASIA1M', '난방유_선물_NYMEX', '천연가스_선물_NYMEX', '가스오일_선물_IPE',\n '천연가스_선물_IPE', '금_선물', '은_선물', '알루미늄_선물', '전기동_선물', '납_선물', '니켈_선물', '주석_선물', '아연_선물', '10YR BEI', 'T10Y2Y',\n 'HY Ef Yield', 'Trade DI', 'VIX', 'USDKRW', 'Eco Policy Uncertainty']]\n return macro_data\n\n\n\ndef load_wics_data():\n WICS대_exposure = process_wics_data(\"./외부데이터/ETF별 업종 exposure.csv\")\n WICS업종 = process_wics_data(\"./외부데이터/WICS 업종별 투자정보 데이터.csv\")\n WICS대 = WICS업종[\n [\n \"에너지\",\n \"소재\",\n \"산업재\",\n \"경기관련소비재\",\n \"필수소비재\",\n \"건강관리\",\n \"금융\",\n \"IT\",\n \"커뮤니케이션서비스\",\n \"유틸리티\",\n ]\n ]\n WICS대 = WICS대.T.drop_duplicates().T\n return WICS대, WICS대_exposure\n\n\n\ndef features_from_wics(wics):\n \"\"\"\n wics : WICS대 (from load_wics_data())\n \"\"\"\n wics_price = wics.xs(\"종가지수\", level=1, axis=1)\n momentums = get_moving_features(wics_price, type='price')\n\n wics_trd_volume = wics.xs(\"거래대금\", level=1, axis=1)\n trd_volumes = get_moving_features(wics_trd_volume, type='volume')\n wics_retail_volume = wics.xs(\"개인 순매수대금(일간)\", level=1, axis=1).fillna(0)\n retail_volumes = get_moving_features(wics_retail_volume, type='volume')\n wics_for_volume = wics.xs(\"외국인총합계순매수대금(일간)\", level=1, axis=1).fillna(0)\n for_volumes = get_moving_features(wics_for_volume, type='volume')\n wics_inst_volume = wics.xs(\"기관 순매수대금(일간)\", level=1,axis=1).fillna(0)\n inst_volumes = get_moving_features(wics_inst_volume, type='volume')\n\n wics_pe = wics.xs(\"P/E(FY0)\", level=1,axis=1)\n pe_scale = wics_pe.resample('M').last().apply(lambda X: minmaxscale(X), axis=1)\n\n wics_fwd_pe = wics.xs(\"P/E(Fwd.12M)\", level=1,axis=1)\n fwd_pe_changes = get_moving_features(wics_fwd_pe, type='fwd')\n wics_fwd_eps = wics.xs(\"EPS(Fwd.12M, 지배)\", level=1,axis=1)\n fwd_eps_changes =get_moving_features(wics_fwd_eps, type='fwd')\n\n size_ = wics.xs(\"시가총액\", level=1,axis=1).resample('M').last()\n\n features = {\n \"macro\": load_macro_data(),\n \"size\": size_,\n \"mom_1m\": momentums[0],\n \"mom_3m\": momentums[1],\n \"mom_6m\": momentums[2],\n \"mom_1y\": momentums[3],\n \"trd_1m\": trd_volumes[0],\n \"trd_3m\": trd_volumes[1],\n \"trd_6m\": trd_volumes[2],\n \"trd_1y\": trd_volumes[3],\n \"retail_trd_1m\": retail_volumes[0],\n \"retail_trd_3m\": retail_volumes[1],\n \"retail_trd_6m\": retail_volumes[2],\n \"retail_trd_1y\": retail_volumes[3],\n \"for_trd_1m\": for_volumes[0],\n \"for_trd_3m\": for_volumes[1],\n \"for_trd_6m\": for_volumes[2],\n \"for_trd_1y\": for_volumes[3],\n \"inst_trd_1m\": inst_volumes[0],\n \"inst_trd_3m\": inst_volumes[1],\n \"inst_trd_6m\": inst_volumes[2],\n \"inst_trd_1y\": inst_volumes[3],\n \"fwd_pe_1m\": fwd_pe_changes[0],\n \"fwd_pe_3m\": fwd_pe_changes[1],\n \"fwd_eps_1m\": fwd_eps_changes[0],\n \"fwd_eps_3m\": fwd_eps_changes[1],\n \"pe\": pe_scale,\n }\n\n return wics_price, features\n\n\ndef combination_set(pair, start, end, price, features):\n \"\"\"\n :param pair: WICS대분류 pair\n :param start: 기간\n :param end: 기간\n :param price: wics_prices (from features_from_wics())\n :param features: features (from features_from_wics())\n \"\"\"\n comb_price = price[list(pair)]\n comb_ret = (comb_price.resample('m').last() / comb_price.resample('m').first()).loc[start:end]\n\n feature_table = features['macro'].loc[start:end]\n for key in list(features.keys())[1:6]:\n feature_table[key] = features[key].apply(lambda x: (x[pair[0]] / x[pair[1]]), axis=1).loc[start:end]\n for key in list(features.keys())[6:]:\n feature_table[key] = features[key].apply(lambda x: (x[pair[0]] - x[pair[1]]), axis=1).loc[start:end]\n\n comb_ret['winner'] = comb_ret.apply(\n lambda x: comb_ret.columns[0] if (x[comb_ret.columns[0]] > x[comb_ret.columns[1]]) else comb_ret.columns[1],\n axis=1)\n\n feature_table = feature_table.replace([-np.inf, np.inf], np.nan).fillna(method='ffill')\n comb_ret = comb_ret.replace([-np.inf, np.inf], np.nan).fillna(method='ffill')\n\n feature_table = feature_table.shift(1).iloc[1:]\n comb_ret = comb_ret.iloc[1:]\n\n X_data = feature_table\n y_data = comb_ret[['winner']].astype('category')\n\n return X_data, y_data\n\ndef load_dataset():\n WICS대,_ = load_wics_data()\n price, features = features_from_wics(WICS대)\n columns = ['에너지', '소재', '산업재', '경기관련소비재', '필수소비재', '건강관리', '금융', 'IT', '커뮤니케이션서비스', '유틸리티']\n pairs = list(combinations(columns, 2))\n total_dataset = {pair : combination_set(pair,'2011-12','2021-05', price, features) for pair in pairs}\n return total_dataset\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
haideraltahan/datasets
[ "aad5c7ea705949d20817fcc49a892bb2a21532f0" ]
[ "tensorflow_datasets/testing/starcraft.py" ]
[ "# coding=utf-8\n# Copyright 2019 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tool for preparing test example of Starcraft dataset.\n\n\n./starcraft --resolution=64 --output_file=test.tfrecords\n./starcraft --resolution=64 --output_file=train_0.tfrecords\n./starcraft --resolution=64 --output_file=train_1.tfrecords\n./starcraft --resolution=64 --output_file=valid.tfrecords\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nimport png\nimport six\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\"resolution\", 64, \"Resolution of the video.\")\nflags.DEFINE_string(\"output_file\", None, \"Path to the output file.\")\n\n\ndef main(argv):\n if len(argv) > 1:\n raise tf.app.UsageError(\"Too many command-line arguments.\")\n\n writer = tf.io.TFRecordWriter(FLAGS.output_file)\n\n feature_list = {}\n frame_list = []\n for _ in range(20):\n # generate 20 frames.\n png_image = six.StringIO()\n png.from_array(\n np.random.randint(\n low=0,\n high=255,\n size=(FLAGS.resolution, FLAGS.resolution, 3),\n dtype=np.uint8), \"RGB\").save(png_image)\n frame_list.append(\n tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[png_image.getvalue()])))\n png_image.close()\n\n feature_list[\"rgb_screen\"] = tf.train.FeatureList(feature=frame_list)\n\n context_feature = {}\n context_feature[\"game_duration_loops\"] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[20]))\n context_feature[\"game_duration_seconds\"] = tf.train.Feature(\n float_list=tf.train.FloatList(value=[20.0]))\n context_feature[\"n_steps\"] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[20]))\n context_feature[\"screen_size\"] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[FLAGS.resolution, FLAGS.resolution]))\n\n example = tf.train.SequenceExample(\n feature_lists=tf.train.FeatureLists(feature_list=feature_list),\n context=tf.train.Features(feature=context_feature))\n writer.write(example.SerializeToString())\n writer.close()\n\n\nif __name__ == \"__main__\":\n app.run(main)\n" ]
[ [ "tensorflow.train.FeatureLists", "tensorflow.io.TFRecordWriter", "tensorflow.app.UsageError", "tensorflow.train.Int64List", "tensorflow.train.FloatList", "tensorflow.train.Features", "numpy.random.randint", "tensorflow.train.FeatureList" ] ]
zhao-david/ACORE-LFI
[ "91de88b77f0be110e42ed91bbb7a50b7ca83319a" ]
[ "acore/classifier_cov_pow_toy_pvalue.py" ]
[ "from warnings import simplefilter\nsimplefilter(action='ignore', category=FutureWarning)\n\nimport numpy as np\nimport argparse\nimport pandas as pd\nfrom tqdm.auto import tqdm\nfrom datetime import datetime\nfrom sklearn.metrics import log_loss\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom utils.functions import train_clf, compute_statistics_single_t0, clf_prob_value, compute_bayesfactor_single_t0, \\\n odds_ratio_loss, train_pvalue_clf\nfrom models.toy_poisson import ToyPoissonLoader\nfrom models.toy_gmm import ToyGMMLoader\nfrom models.toy_gamma import ToyGammaLoader\nfrom or_classifiers.toy_example_list import classifier_dict, classifier_dict_mlpcomp, classifier_pvalue_dict\n\nmodel_dict = {\n 'poisson': ToyPoissonLoader,\n 'gmm': ToyGMMLoader,\n 'gamma': ToyGammaLoader\n}\n\n\ndef main(run, rep, b, b_prime, alpha, t0_val, sample_size_obs, test_statistic, mlp_comp=False,\n monte_carlo_samples=500, debug=False, seed=7, size_check=1000, verbose=False, marginal=False,\n size_marginal=1000, guided_sim=False, guided_sample=1000, empirical_marginal=True):\n\n # Changing values if debugging\n b = b if not debug else 100\n b_prime = b_prime if not debug else 100\n size_check = size_check if not debug else 100\n rep = rep if not debug else 2\n model_obj = model_dict[run](marginal=marginal, size_marginal=size_marginal, empirical_marginal=empirical_marginal)\n classifier_dict_run = classifier_dict_mlpcomp if mlp_comp else classifier_dict\n\n # Get the correct functions\n msnh_sampling_func = model_obj.sample_msnh_algo5\n grid_param = model_obj.grid\n gen_obs_func = model_obj.sample_sim\n gen_sample_func = model_obj.generate_sample\n gen_param_fun = model_obj.sample_param_values\n t0_grid = model_obj.pred_grid\n tp_func = model_obj.compute_exact_prob\n\n # Creating sample to check entropy about\n np.random.seed(seed)\n sample_check = gen_sample_func(sample_size=size_check, marginal=marginal)\n theta_vec = sample_check[:, :model_obj.d]\n x_vec = sample_check[:, (model_obj.d + 1):]\n bern_vec = sample_check[:, model_obj.d]\n\n true_prob_vec = tp_func(theta_vec=theta_vec, x_vec=x_vec)\n entropy_est = -np.average([np.log(true_prob_vec[kk]) if el == 1\n else np.log(1 - true_prob_vec[kk])\n for kk, el in enumerate(bern_vec)])\n\n # Loop over repetitions and classifiers\n # Each time we train the different classifiers, we build the intervals and we record\n # whether the point is in or not.\n out_val = []\n out_cols = ['test_statistic', 'b_prime', 'b', 'classifier', 'classifier_pvalue', 'run', 'rep', 'sample_size_obs',\n 'cross_entropy_loss', 'cross_entropy_loss_pvalue', 't0_true_val', 'theta_0_current', 'on_true_t0',\n 'estimated_pvalue', 'in_confint', 'out_confint', 'size_CI', 'true_entropy', 'or_loss_value',\n 'monte_carlo_samples', 'guided_sim', 'empirical_marginal', 'guided_sample']\n pbar = tqdm(total=rep, desc='Toy Example for Simulations, n=%s, b=%s' % (sample_size_obs, b))\n rep_counter = 0\n not_update_flag = False\n while rep_counter < rep:\n # Generates samples for each t0 values, so to be able to check both coverage and power\n x_obs = gen_obs_func(sample_size=sample_size_obs, true_param=t0_val)\n\n # Train the classifier for the odds\n clf_odds_fitted = {}\n clf_pvalue_fitted = {}\n for clf_name, clf_model in sorted(classifier_dict_run.items(), key=lambda x: x[0]):\n clf_odds = train_clf(sample_size=b, clf_model=clf_model, gen_function=gen_sample_func,\n clf_name=clf_name, nn_square_root=True)\n if verbose:\n print('----- %s Trained' % clf_name)\n\n if test_statistic == 'acore':\n tau_obs = np.array([\n compute_statistics_single_t0(\n clf=clf_odds, obs_sample=x_obs, t0=theta_0, grid_param_t1=grid_param,\n d=model_obj.d, d_obs=model_obj.d_obs) for theta_0 in t0_grid])\n elif test_statistic == 'avgacore':\n tau_obs = np.array([\n compute_bayesfactor_single_t0(\n clf=clf_odds, obs_sample=x_obs, t0=theta_0, gen_param_fun=gen_param_fun,\n d=model_obj.d, d_obs=model_obj.d_obs, log_out=False) for theta_0 in t0_grid])\n elif test_statistic == 'logavgacore':\n tau_obs = np.array([\n compute_bayesfactor_single_t0(\n clf=clf_odds, obs_sample=x_obs, t0=theta_0, gen_param_fun=gen_param_fun,\n d=model_obj.d, d_obs=model_obj.d_obs, log_out=True) for theta_0 in t0_grid])\n else:\n raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'\n ' Currently %s' % test_statistic)\n\n # Calculating cross-entropy\n est_prob_vec = clf_prob_value(clf=clf_odds, x_vec=x_vec, theta_vec=theta_vec, d=model_obj.d,\n d_obs=model_obj.d_obs)\n loss_value = log_loss(y_true=bern_vec, y_pred=est_prob_vec)\n\n # Calculating or loss\n or_loss_value = odds_ratio_loss(clf=clf_odds, x_vec=x_vec, theta_vec=theta_vec,\n bern_vec=bern_vec, d=1, d_obs=1)\n clf_odds_fitted[clf_name] = (tau_obs, loss_value, or_loss_value)\n\n # Train the P-value regression algorithm for confidence levels\n\n if guided_sim:\n # Commenting the above -- we now sample a set of thetas from the parameter (of size guided_sample)\n # budget, then resample them according to the odds values, fit a gaussian and then sample the\n # datasets from that.\n theta_mat_sample = gen_param_fun(sample_size=guided_sample)\n\n if test_statistic == 'acore':\n stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,\n func1d=lambda row: compute_statistics_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row,\n grid_param_t1=grid_param,\n d=model_obj.d,\n d_obs=model_obj.d_obs\n ))\n elif test_statistic == 'avgacore':\n stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row,\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples\n ))\n elif test_statistic == 'logavgacore':\n stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row,\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples,\n log_out=True\n ))\n else:\n raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'\n ' Currently %s' % test_statistic)\n\n # If there are log-odds, then some of the values might be negative, so we need to exponentiate them\n # so to make sure that the large negative numbers are counted correctly (i.e. as very low probability,\n # not probabilities with large magnitudes).\n if test_statistic in ['acore', 'logavgacore']:\n stats_sample = np.exp(stats_sample)\n stats_sample = stats_sample/np.sum(stats_sample)\n theta_mat_gaussian_fit = np.random.choice(a=theta_mat_sample, p=stats_sample.reshape(-1, ),\n size=guided_sample)\n std_gaussian_fit = np.std(theta_mat_gaussian_fit) if np.std(theta_mat_gaussian_fit) == 0.0 else 1.0\n theta_mat = np.clip(\n a=np.random.normal(size=b_prime, loc=np.mean(theta_mat_gaussian_fit),\n scale=std_gaussian_fit),\n a_min=model_obj.low_int, a_max=model_obj.high_int)\n sample_mat = np.apply_along_axis(arr=theta_mat.reshape(-1, 1), axis=1,\n func1d=lambda row: gen_obs_func(sample_size=sample_size_obs,\n true_param=row))\n else:\n # Generate a matrix with values for both the sampled thetas as the actual samples\n theta_mat, sample_mat = msnh_sampling_func(b_prime=b_prime, sample_size=sample_size_obs)\n\n full_mat = np.hstack((theta_mat.reshape(-1, 1), sample_mat))\n if test_statistic == 'acore':\n stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_statistics_single_t0(\n clf=clf_odds,\n obs_sample=row[model_obj.d:],\n t0=row[:model_obj.d],\n grid_param_t1=grid_param,\n d=model_obj.d,\n d_obs=model_obj.d_obs\n ))\n stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_statistics_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row[:model_obj.d],\n grid_param_t1=grid_param,\n d=model_obj.d,\n d_obs=model_obj.d_obs\n ))\n elif test_statistic == 'avgacore':\n stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=row[model_obj.d:],\n t0=row[:model_obj.d],\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples\n ))\n stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row[:model_obj.d],\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples\n ))\n elif test_statistic == 'logavgacore':\n stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=row[model_obj.d:],\n t0=row[:model_obj.d],\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples,\n log_out=True\n ))\n stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,\n func1d=lambda row: compute_bayesfactor_single_t0(\n clf=clf_odds,\n obs_sample=x_obs,\n t0=row[:model_obj.d],\n gen_param_fun=gen_param_fun,\n d=model_obj.d,\n d_obs=model_obj.d_obs,\n monte_carlo_samples=monte_carlo_samples,\n log_out=True\n ))\n else:\n raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'\n ' Currently %s' % test_statistic)\n\n if np.any(np.isnan(stats_mat_generated)) or not np.all(np.isfinite(stats_mat_generated)) or \\\n np.any(np.isnan(stats_mat_observed)) or not np.all(np.isfinite(stats_mat_observed)):\n not_update_flag = True\n break\n\n # Comparing the two vectors of values\n clf_pvalue_fitted[clf_name] = {}\n indicator_vec = np.greater(stats_mat_observed, stats_mat_generated).astype(int)\n for clf_name_pvalue, clf_model_pvalue in sorted(classifier_pvalue_dict.items(), key=lambda x: x[0]):\n\n # If there the indicator_vec is either all 0 or all 1, do not fit a classifier or sklearn will throw\n # an error out. Just return the class.\n if sum(indicator_vec) <= 1 or sum(indicator_vec) >= len(indicator_vec) - 1:\n pval_pred = np.repeat(sum(indicator_vec) / len(indicator_vec), b_prime)\n loss_value_pval = np.nan\n else:\n clf_pvalue = train_pvalue_clf(clf_model=clf_model_pvalue, X=theta_mat.reshape(-1, model_obj.d),\n y=indicator_vec.reshape(-1, ), clf_name=clf_name_pvalue,\n nn_square_root=True)\n pval_pred = clf_pvalue.predict_proba(t0_grid.reshape(-1, model_obj.d))[:, 1]\n theta_mat_pred = clf_pvalue.predict_proba(theta_mat.reshape(-1, model_obj.d))[:, 1]\n loss_value_pval = log_loss(y_true=indicator_vec, y_pred=theta_mat_pred)\n clf_pvalue_fitted[clf_name][clf_name_pvalue] = (pval_pred, loss_value_pval)\n\n # If there were some problems in calculating the statistics, get out of the loop\n if not_update_flag:\n not_update_flag = False\n continue\n\n # At this point all it's left is to record\n for clf_name, (tau_obs_val, cross_ent_loss, or_loss_value) in clf_odds_fitted.items():\n for clf_name_qr, (pvalue_val, pvalue_celoss_val) in clf_pvalue_fitted[clf_name].items():\n size_temp = np.mean((pvalue_val > alpha).astype(int))\n for kk, theta_0_current in enumerate(t0_grid):\n out_val.append([\n test_statistic, b_prime, b, clf_name, clf_name_qr, run, rep_counter, sample_size_obs,\n cross_ent_loss, pvalue_celoss_val, t0_val, theta_0_current, int(t0_val == theta_0_current),\n pvalue_val[kk], int(pvalue_val[kk] > alpha),\n int(pvalue_val[kk] <= alpha), size_temp, entropy_est, or_loss_value,\n monte_carlo_samples, int(guided_sim), int(empirical_marginal), guided_sample\n ])\n pbar.update(1)\n rep_counter += 1\n\n # Saving the results\n out_df = pd.DataFrame.from_records(data=out_val, index=range(len(out_val)), columns=out_cols)\n out_dir = 'sims/classifier_cov_pow_toy/'\n out_filename = 'classifier_reps_cov_pow_toy_pvalues_%steststats_%s_%sB_%sBprime_%s_%srep_alpha%s_sampleobs%s_t0val%s%s_%s.csv' % (\n test_statistic, 'mlp_comp' if mlp_comp else 'toyclassifiers', b, b_prime, run, rep,\n str(alpha).replace('.', '-'), sample_size_obs,\n str(t0_val).replace('.', '-'),\n '_empirmarg' if empirical_marginal else '',\n datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')\n )\n out_df.to_csv(out_dir + out_filename)\n\n # Print results\n cov_df = out_df[out_df['on_true_t0'] == 1][['classifier', 'classifier_pvalue', 'in_confint',\n 'cross_entropy_loss', 'cross_entropy_loss_pvalue', 'size_CI']]\n print(cov_df.groupby(['classifier', 'classifier_pvalue']).agg({'in_confint': [np.average],\n 'size_CI': [np.average, np.std],\n 'cross_entropy_loss': [np.average],\n 'cross_entropy_loss_pvalue': [np.average]}))\n\n # Power plots\n out_df['class_combo'] = out_df[['classifier', 'classifier_pvalue']].apply(lambda x: x[0] + '---' + x[1], axis = 1)\n plot_df = out_df[['class_combo', 'theta_0_current', 'out_confint']].groupby(\n ['class_combo', 'theta_0_current']).mean().reset_index()\n fig = plt.figure(figsize=(20, 10))\n sns.lineplot(x='theta_0_current', y='out_confint', hue='class_combo', data=plot_df, palette='cubehelix')\n plt.legend(loc='best', fontsize=25)\n plt.xlabel(r'$\\theta$', fontsize=25)\n plt.ylabel('Power', fontsize=25)\n plt.title(\"Power of Hypothesis Test, B=%s, B'=%s, n=%s, %s\" % (\n b, b_prime, sample_size_obs, run.title()), fontsize=25)\n out_dir = 'images/classifier_cov_pow_toy/'\n outfile_name = 'power_classifier_reps_pvalue_%steststats_%sB_%sBprime_%s_%srep_alpha%s_sampleobs%s_t0val%s_%s.pdf' % (\n test_statistic, b, b_prime, run, rep, str(alpha).replace('.', '-'), sample_size_obs,\n str(t0_val).replace('.', '-'),\n datetime.strftime(datetime.today(), '%Y-%m-%d')\n )\n plt.tight_layout()\n plt.savefig(out_dir + outfile_name)\n plt.close()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--seed', action=\"store\", type=int, default=7,\n help='Random State')\n parser.add_argument('--rep', action=\"store\", type=int, default=10,\n help='Number of Repetitions for calculating the Pinball loss')\n parser.add_argument('--b', action=\"store\", type=int, default=5000,\n help='Sample size to train the classifier for calculating odds')\n parser.add_argument('--b_prime', action=\"store\", type=int, default=1000,\n help='Sample size to train the quantile regression algorithm')\n parser.add_argument('--marginal', action='store_true', default=False,\n help='Whether we are using a parametric approximation of the marginal or'\n 'the baseline reference G')\n parser.add_argument('--alpha', action=\"store\", type=float, default=0.1,\n help='Statistical confidence level')\n parser.add_argument('--run', action=\"store\", type=str, default='poisson',\n help='Problem to run')\n parser.add_argument('--debug', action='store_true', default=False,\n help='If true, a very small value for the sample sizes is fit to make sure the'\n 'file can run quickly for debugging purposes')\n parser.add_argument('--verbose', action='store_true', default=False,\n help='If true, logs are printed to the terminal')\n parser.add_argument('--sample_size_obs', action=\"store\", type=int, default=10,\n help='Sample size of the actual observed data.')\n parser.add_argument('--t0_val', action=\"store\", type=float, default=10.0,\n help='True parameter which generates the observed dataset')\n parser.add_argument('--size_marginal', action=\"store\", type=int, default=1000,\n help='Sample size of the actual marginal distribution, if marginal is True.')\n parser.add_argument('--monte_carlo_samples', action=\"store\", type=int, default=500,\n help='Sample size for the calculation of the avgacore and logavgacore statistic.')\n parser.add_argument('--test_statistic', action=\"store\", type=str, default='acore',\n help='Test statistic to compute confidence intervals. Can be acore|avgacore|logavgacore')\n parser.add_argument('--mlp_comp', action='store_true', default=False,\n help='If true, we compare different MLP training algorithm.')\n parser.add_argument('--empirical_marginal', action='store_true', default=False,\n help='Whether we are sampling directly from the empirical marginal for G')\n parser.add_argument('--guided_sim', action='store_true', default=False,\n help='If true, we guided the sampling for the B prime in order to get meaningful results.')\n parser.add_argument('--guided_sample', action=\"store\", type=int, default=2500,\n help='The sample size to be used for the guided simulation. Only used if guided_sim is True.')\n argument_parsed = parser.parse_args()\n\n # b_vec = [100, 500, 1000]\n # for b_val in b_vec:\n main(\n run=argument_parsed.run,\n rep=argument_parsed.rep,\n marginal=argument_parsed.marginal,\n b=argument_parsed.b,\n b_prime=argument_parsed.b_prime,\n alpha=argument_parsed.alpha,\n debug=argument_parsed.debug,\n sample_size_obs=argument_parsed.sample_size_obs,\n t0_val=argument_parsed.t0_val,\n seed=argument_parsed.seed,\n verbose=argument_parsed.verbose,\n size_marginal=argument_parsed.size_marginal,\n monte_carlo_samples=argument_parsed.monte_carlo_samples,\n test_statistic=argument_parsed.test_statistic,\n mlp_comp=argument_parsed.mlp_comp,\n empirical_marginal=argument_parsed.empirical_marginal,\n guided_sim=argument_parsed.guided_sim,\n guided_sample=argument_parsed.guided_sample\n )\n" ]
[ [ "numpy.sum", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.savefig", "numpy.random.seed", "numpy.greater", "numpy.exp", "numpy.log", "matplotlib.pyplot.ylabel", "sklearn.metrics.log_loss", "matplotlib.pyplot.close", "numpy.isnan", "numpy.mean", "numpy.std", "matplotlib.pyplot.xlabel", "numpy.isfinite" ] ]
onlyrico/AliceMind
[ "a6a070b1610e4c4bfe84ee6c4195b2bc4f725ded" ]
[ "StructVBERT/tasks/vqa.py" ]
[ "# coding=utf-8\n# Copyleft 2019 project LXRT.\n\nimport os\nimport collections\n\nimport torch\nimport torch.nn as nn\nimport logging\nfrom torch.utils.data.dataloader import DataLoader\nfrom tqdm import tqdm\n\nfrom param import args\nfrom lxrt.qa_answer_table import load_lxmert_qa\nfrom tasks.vqa_model import VQAModel\nfrom tasks.vqa_data import VQADataset, VQATorchDataset, VQAEvaluator\n\nDataTuple = collections.namedtuple(\"DataTuple\", 'dataset loader evaluator')\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef get_data_tuple(splits: str, bs:int, shuffle=False, drop_last=False) -> DataTuple:\n dset = VQADataset(splits)\n tset = VQATorchDataset(dset)\n evaluator = VQAEvaluator(dset)\n data_loader = DataLoader(\n tset, batch_size=bs,\n shuffle=shuffle, num_workers=args.num_workers,\n drop_last=drop_last, pin_memory=True\n )\n\n return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator)\n\nclass WarmupOptimizer(object):\n def __init__(self, _lr_base, optimizer, _data_size, _batch_size):\n self.optimizer = optimizer\n self._step = 0\n self._lr_base = _lr_base\n self._rate = 0\n self._data_size = _data_size\n self._batch_size = _batch_size\n\n def step(self):\n self._step += 1\n rate = self.rate()\n for p in self.optimizer.param_groups:\n p['lr'] = rate\n self._rate = rate\n self.optimizer.step()\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def rate(self, step=None):\n if step is None:\n step = self._step\n if step <= int(self._data_size / self._batch_size * 1):\n r = self._lr_base * 1/4.\n elif step <= int(self._data_size / self._batch_size * 2):\n r = self._lr_base * 2/4.\n elif step <= int(self._data_size / self._batch_size * 3):\n r = self._lr_base * 3/4.\n else:\n r = self._lr_base\n return r\n\ndef adjust_learning_rate(optimizer, decay_rate):\n optimizer._lr_base *= decay_rate\n\nclass VQA:\n def __init__(self):\n # Datasets\n self.train_tuple = get_data_tuple(\n args.train, bs=args.batch_size, shuffle=True, drop_last=True\n )\n if args.valid != \"\":\n self.valid_tuple = get_data_tuple(\n args.valid, bs=256, # for large model\n shuffle=False, drop_last=False\n )\n else:\n self.valid_tuple = None\n \n # Model\n self.model = VQAModel(self.train_tuple.dataset.num_answers)\n self._lr_decay_epoch_list = [8, 10]\n self._lr_decay_rate = 0.2\n\n # Load pre-trained weights\n if args.load_lxmert is not None:\n self.model.lxrt_encoder.load(args.load_lxmert)\n if args.load_lxmert_qa is not None:\n load_lxmert_qa(args.load_lxmert_qa, self.model,\n label2ans=self.train_tuple.dataset.label2ans)\n if args.fix_language_bert:\n assert args.patial_load\n state_dict = torch.load(args.patial_load)\n for k in state_dict.copy():\n if not k.startswith('bert.'):\n state_dict['bert.' + k.replace('gamma', 'weight').replace('beta', 'bias')] = state_dict.pop(k)\n\n # fix bert parameters\n for name, param in self.model.lxrt_encoder.model.named_parameters():\n # if 'pooler' in name: # pooler not fixed\n # continue\n if name in state_dict:\n logger.info('fix param for: {}'.format(name))\n param.requires_grad = False\n\n # GPU options\n self.model = self.model.cuda()\n\n # Loss and Optimizer\n self.bce_loss = nn.BCEWithLogitsLoss()\n if 'bert' in args.optim:\n batch_per_epoch = len(self.train_tuple.loader)\n t_total = int(batch_per_epoch * args.epochs)\n logger.info(\"BertAdam Total Iters: %d\" % t_total)\n from lxrt.optimization import BertAdam\n self.optim = BertAdam(list(self.model.parameters()),\n lr=args.lr,\n warmup=0.1,\n t_total=t_total)\n elif 'adam' in args.optim:\n batch_per_epoch = len(self.train_tuple.loader)\n optim = args.optimizer(filter(lambda p: p.requires_grad, self.model.parameters()), lr=0, betas=(0.9, 0.98), eps=1e-9)\n self.optim = WarmupOptimizer(args.lr, optim, batch_per_epoch * args.batch_size, args.batch_size)\n else:\n self.optim = args.optimizer(self.model.parameters(), args.lr)\n\n if args.amp_type is not None:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to run this example.\")\n self.model, self.optim = amp.initialize(self.model, self.optim, opt_level=args.amp_type)\n\n if args.multiGPU:\n self.model.lxrt_encoder.multi_gpu()\n # Output Directory\n self.output = args.output\n os.makedirs(self.output, exist_ok=True)\n\n def train(self, train_tuple, eval_tuple):\n dset, loader, evaluator = train_tuple\n iter_wrapper = (lambda x: tqdm(x, total=len(loader))) if args.tqdm else (lambda x: x)\n\n best_valid = 0.\n for epoch in range(args.epochs):\n quesid2ans = {}\n if 'adam' in args.optim and epoch in self._lr_decay_epoch_list:\n adjust_learning_rate(self.optim, self._lr_decay_rate)\n for i, (ques_id, feats, boxes, sent, target) in iter_wrapper(enumerate(loader)):\n\n self.model.train()\n self.optim.zero_grad()\n\n feats, boxes, target = feats.cuda(), boxes.cuda(), target.cuda()\n\n logit = self.model(feats, boxes, sent)\n assert logit.dim() == target.dim() == 2\n loss = self.bce_loss(logit, target)\n loss = loss * logit.size(1)\n if args.multiGPU:\n loss = loss.mean() # mean() to average on multi-gpu.\n\n if args.amp_type is not None:\n from apex import amp\n with amp.scale_loss(loss, self.optim) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), args.clip_norm)\n self.optim.step()\n\n score, label = logit.max(1)\n for qid, l in zip(ques_id, label.cpu().numpy()):\n ans = dset.label2ans[l]\n quesid2ans[qid.item()] = ans\n\n log_str = \"\\nEpoch %d: Train %0.2f\\n\" % (epoch, evaluator.evaluate(quesid2ans) * 100.)\n\n if self.valid_tuple is not None: # Do Validation\n valid_score = self.evaluate(eval_tuple)\n if valid_score > best_valid:\n best_valid = valid_score\n self.save(\"BEST\")\n\n log_str += \"Epoch %d: Valid %0.2f\\n\" % (epoch, valid_score * 100.) + \\\n \"Epoch %d: Best %0.2f\\n\" % (epoch, best_valid * 100.)\n\n logger.info(log_str)\n\n with open(self.output + \"/log.log\", 'a') as f:\n f.write(log_str)\n f.flush()\n\n self.save(\"LAST\")\n\n def predict(self, eval_tuple: DataTuple, dump=None):\n \"\"\"\n Predict the answers to questions in a data split.\n\n :param eval_tuple: The data tuple to be evaluated.\n :param dump: The path of saved file to dump results.\n :return: A dict of question_id to answer.\n \"\"\"\n self.model.eval()\n dset, loader, evaluator = eval_tuple\n quesid2ans = {}\n for i, datum_tuple in enumerate(loader):\n ques_id, feats, boxes, sent = datum_tuple[:4] # Avoid seeing ground truth\n with torch.no_grad():\n feats, boxes = feats.cuda(), boxes.cuda()\n logit = self.model(feats, boxes, sent)\n if args.with_score:\n logit = nn.Softmax(dim=1)(logit)\n score, label = logit.max(1)\n if args.with_score:\n for qid, l, s in zip(ques_id, label.cpu().numpy(), score.cpu().numpy()):\n ans = dset.label2ans[l]\n quesid2ans[qid.item()] = (ans, str(s))\n else:\n for qid, l in zip(ques_id, label.cpu().numpy()):\n ans = dset.label2ans[l]\n quesid2ans[qid.item()] = ans\n if dump is not None:\n evaluator.dump_result(quesid2ans, dump)\n return quesid2ans\n\n def evaluate(self, eval_tuple: DataTuple, dump=None):\n \"\"\"Evaluate all data in data_tuple.\"\"\"\n quesid2ans = self.predict(eval_tuple, dump)\n return eval_tuple.evaluator.evaluate(quesid2ans)\n\n @staticmethod\n def oracle_score(data_tuple):\n dset, loader, evaluator = data_tuple\n quesid2ans = {}\n for i, (ques_id, feats, boxes, sent, target) in enumerate(loader):\n _, label = target.max(1)\n for qid, l in zip(ques_id, label.cpu().numpy()):\n ans = dset.label2ans[l]\n quesid2ans[qid.item()] = ans\n return evaluator.evaluate(quesid2ans)\n\n def save(self, name):\n torch.save(self.model.state_dict(),\n os.path.join(self.output, \"%s.pth\" % name))\n\n def load(self, path):\n logger.info(\"Load model from %s\" % path)\n state_dict = torch.load(\"%s.pth\" % path)\n self.model.load_state_dict(state_dict)\n\n\nif __name__ == \"__main__\":\n # Build Class\n vqa = VQA()\n\n # Load VQA model weights\n if args.load is not None:\n vqa.load(args.load)\n\n # Test or Train\n if args.test is not None:\n args.fast = args.tiny = False # Always loading all data in test\n if 'test' in args.test:\n vqa.predict(\n get_data_tuple(args.test, bs=950,\n shuffle=False, drop_last=False),\n dump=os.path.join(args.output, 'test_predict.json')\n )\n elif 'val' in args.test: \n # Since part of valididation data are used in pre-training/fine-tuning,\n # only validate on the minival set.\n result = vqa.evaluate(\n get_data_tuple('minival', bs=950,\n shuffle=False, drop_last=False),\n dump=os.path.join(args.output, 'minival_predict.json')\n )\n logger.info(result)\n else:\n assert False, \"No such test option for %s\" % args.test\n else:\n # print('Splits in Train data:', vqa.train_tuple.dataset.splits)\n logger.info('Splits in Train data: {}'.format(vqa.train_tuple.dataset.splits))\n if vqa.valid_tuple is not None:\n logger.info('Splits in Valid data: {}'.format(vqa.valid_tuple.dataset.splits))\n logger.info(\"Valid Oracle: %0.2f\" % (vqa.oracle_score(vqa.valid_tuple) * 100))\n else:\n logger.info(\"DO NOT USE VALIDATION\")\n vqa.train(vqa.train_tuple, vqa.valid_tuple)\n\n\n" ]
[ [ "torch.load", "torch.nn.Softmax", "torch.no_grad", "torch.utils.data.dataloader.DataLoader", "torch.nn.BCEWithLogitsLoss" ] ]
ziniuwan/maed
[ "9e1f1c37eba81da86c8d9c62dc9be41a01abff5b" ]
[ "lib/models/spin.py" ]
[ "\"\"\"\nThis script is brought from https://github.com/nkolot/SPIN\nAdhere to their licence to use this script\n\"\"\"\n\nimport math\nimport torch\nimport numpy as np\nimport os.path as osp\nimport torch.nn as nn\n\nfrom lib.core.config import DATA_DIR\nfrom lib.utils.geometry import rotation_matrix_to_angle_axis, rot6d_to_rotmat\nfrom lib.models.smpl import SMPL, SMPL_MODEL_DIR, H36M_TO_J17, SMPL_MEAN_PARAMS\n\n\nclass Regressor(nn.Module):\n def __init__(self, smpl_mean_params=SMPL_MEAN_PARAMS, feat_dim=2048, hidden_dim=1024, **kwargs):\n super(Regressor, self).__init__()\n\n self.smpl = SMPL(\n SMPL_MODEL_DIR,\n create_transl=False,\n create_global_orient=False,\n create_body_pose=False,\n create_betas=False,\n )\n npose = 24 * 6\n nshape = 10\n\n self.fc1 = nn.Linear(feat_dim + npose + nshape + 3, hidden_dim)\n self.drop1 = nn.Dropout()\n self.fc2 = nn.Linear(hidden_dim, hidden_dim)\n self.drop2 = nn.Dropout()\n self.decpose = nn.Linear(hidden_dim, npose)\n self.decshape = nn.Linear(hidden_dim, nshape)\n self.deccam = nn.Linear(hidden_dim, 3)\n nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)\n nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)\n nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)\n\n mean_params = np.load(smpl_mean_params)\n init_pose = torch.from_numpy(mean_params['pose'][:]).unsqueeze(0)\n init_shape = torch.from_numpy(mean_params['shape'][:].astype('float32')).unsqueeze(0)\n init_cam = torch.from_numpy(mean_params['cam']).unsqueeze(0)\n self.register_buffer('init_pose', init_pose)\n self.register_buffer('init_shape', init_shape)\n self.register_buffer('init_cam', init_cam)\n\n\n def iterative_regress(self, x, init_pose=None, init_shape=None, init_cam=None, n_iter=3):\n nt = x.shape[0]\n\n if init_pose is None:\n init_pose = self.init_pose.expand(nt, -1)\n if init_shape is None:\n init_shape = self.init_shape.expand(nt, -1)\n if init_cam is None:\n init_cam = self.init_cam.expand(nt, -1)\n\n pred_pose = init_pose\n pred_shape = init_shape\n pred_cam = init_cam\n for i in range(n_iter):\n xc = torch.cat([x, pred_pose, pred_shape, pred_cam], 1)\n xc = self.fc1(xc)\n xc = self.drop1(xc)\n xc = self.fc2(xc)\n xc = self.drop2(xc)\n pred_pose = self.decpose(xc) + pred_pose\n pred_shape = self.decshape(xc) + pred_shape\n pred_cam = self.deccam(xc) + pred_cam\n\n return pred_pose, pred_shape, pred_cam\n\n def forward(self, x, seqlen, J_regressor=None,\n init_pose=None, init_shape=None, init_cam=None, n_iter=3, **kwargs):\n nt = x.shape[0]\n N = nt//seqlen\n\n pred_pose, pred_shape, pred_cam = self.iterative_regress(x, init_pose, init_shape, init_cam, n_iter=3)\n output_regress = self.get_output(pred_pose, pred_shape, pred_cam, J_regressor)\n\n return output_regress\n\n\n def get_output(self, pred_pose, pred_shape, pred_cam, J_regressor):\n output = {}\n nt = pred_pose.shape[0]\n pred_rotmat = rot6d_to_rotmat(pred_pose).reshape(nt, -1, 3, 3)\n \n pred_output = self.smpl(\n betas=pred_shape,\n body_pose=pred_rotmat[:, 1:],\n global_orient=pred_rotmat[:, 0].unsqueeze(1),\n pose2rot=False\n )\n pred_vertices = pred_output.vertices[:nt]\n pred_joints = pred_output.joints[:nt]\n if J_regressor is not None:\n J_regressor_batch = J_regressor[None, :].expand(pred_vertices.shape[0], -1, -1).to(pred_vertices.device)\n pred_joints = torch.matmul(J_regressor_batch, pred_vertices)\n pred_keypoints_2d = projection(pred_joints, pred_cam)\n pose = rotation_matrix_to_angle_axis(pred_rotmat.reshape(-1, 3, 3)).reshape(nt, -1)\n output['theta'] = torch.cat([pred_cam, pose, pred_shape], dim=1)\n output['verts'] = pred_vertices\n output['kp_2d'] = pred_keypoints_2d\n output['kp_3d'] = pred_joints\n output['rotmat'] = pred_rotmat\n return output\n\n\ndef projection(pred_joints, pred_camera):\n pred_cam_t = torch.stack([pred_camera[:, 1],\n pred_camera[:, 2],\n 2 * 5000. / (224. * pred_camera[:, 0] + 1e-9)], dim=-1)\n batch_size = pred_joints.shape[0]\n camera_center = torch.zeros(batch_size, 2)\n pred_keypoints_2d = perspective_projection(pred_joints,\n rotation=torch.eye(3).unsqueeze(0).expand(batch_size, -1, -1).to(pred_joints.device),\n translation=pred_cam_t,\n focal_length=5000.,\n camera_center=camera_center)\n # Normalize keypoints to [-1,1]\n pred_keypoints_2d = pred_keypoints_2d / (224. / 2.)\n return pred_keypoints_2d\n\n\ndef perspective_projection(points, rotation, translation,\n focal_length, camera_center):\n \"\"\"\n This function computes the perspective projection of a set of points.\n Input:\n points (bs, N, 3): 3D points\n rotation (bs, 3, 3): Camera rotation\n translation (bs, 3): Camera translation\n focal_length (bs,) or scalar: Focal length\n camera_center (bs, 2): Camera center\n \"\"\"\n batch_size = points.shape[0]\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:,0,0] = focal_length\n K[:,1,1] = focal_length\n K[:,2,2] = 1.\n K[:,:-1, -1] = camera_center\n\n # Transform points\n points = torch.einsum('bij,bkj->bki', rotation, points)\n points = points + translation.unsqueeze(1)\n\n # Apply perspective distortion\n projected_points = points / points[:,:,-1].unsqueeze(-1)\n\n # Apply camera intrinsics\n projected_points = torch.einsum('bij,bkj->bki', K, projected_points)\n\n return projected_points[:, :, :-1]\n" ]
[ [ "numpy.load", "torch.nn.init.xavier_uniform_", "torch.stack", "torch.nn.Linear", "torch.from_numpy", "torch.zeros", "torch.einsum", "torch.eye", "torch.cat", "torch.nn.Dropout", "torch.matmul" ] ]
soma2000-lang/colour
[ "bb7ee23ac65e09613af78bd18dd98dffb1a2904a", "bb7ee23ac65e09613af78bd18dd98dffb1a2904a" ]
[ "colour/models/rgb/transfer_functions/canon_log.py", "colour/models/rgb/datasets/p3_d65.py" ]
[ "\"\"\"\nCanon Log Encodings\n===================\n\nDefines the *Canon Log* encodings:\n\n- :func:`colour.models.log_encoding_CanonLog`\n- :func:`colour.models.log_decoding_CanonLog`\n- :func:`colour.models.log_encoding_CanonLog2`\n- :func:`colour.models.log_decoding_CanonLog2`\n- :func:`colour.models.log_encoding_CanonLog3`\n- :func:`colour.models.log_decoding_CanonLog3`\n\nNotes\n-----\n- :cite:`Canona` is available as a *Drivers & Downloads* *Software* for\n Windows 10 (x64) *Operating System*, a copy of the archive is hosted at\n this url: https://drive.google.com/open?id=0B_IQZQdc4Vy8ZGYyY29pMEVwZU0\n\nReferences\n----------\n- :cite:`Canona` : Canon. (2016). EOS C300 Mark II - EOS C300 Mark II Input\n Transform Version 2.0 (for Cinema Gamut / BT.2020). Retrieved August 23,\n 2016, from\n https://www.usa.canon.com/internet/portal/us/home/support/details/cameras/cinema-eos/eos-c300-mark-ii\n- :cite:`Thorpe2012a` : Thorpe, L. (2012). CANON-LOG TRANSFER CHARACTERISTIC.\n Retrieved September 25, 2014, from\n http://downloads.canon.com/CDLC/Canon-Log_Transfer_Characteristic_6-20-2012.pdf\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\n\nfrom colour.hints import (\n Boolean,\n FloatingOrArrayLike,\n FloatingOrNDArray,\n Integer,\n)\nfrom colour.models.rgb.transfer_functions import full_to_legal, legal_to_full\nfrom colour.utilities import (\n as_float,\n domain_range_scale,\n from_range_1,\n to_domain_1,\n)\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright (C) 2013-2022 - Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"log_encoding_CanonLog\",\n \"log_decoding_CanonLog\",\n \"log_encoding_CanonLog2\",\n \"log_decoding_CanonLog2\",\n \"log_encoding_CanonLog3\",\n \"log_decoding_CanonLog3\",\n]\n\n\ndef log_encoding_CanonLog(\n x: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n out_normalised_code_value: Boolean = True,\n in_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log* log encoding curve / opto-electronic transfer\n function.\n\n Parameters\n ----------\n x\n Linear data :math:`x`.\n bit_depth\n Bit depth used for conversion.\n out_normalised_code_value\n Whether the *Canon Log* non-linear data is encoded as normalised code\n values.\n in_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n *Canon Log* non-linear data.\n\n References\n ----------\n :cite:`Thorpe2012a`\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n Examples\n --------\n >>> log_encoding_CanonLog(0.18) * 100 # doctest: +ELLIPSIS\n 34.3389651...\n\n The values of *Table 2 Canon-Log Code Values* table in :cite:`Thorpe2012a`\n are obtained as follows:\n\n >>> x = np.array([0, 2, 18, 90, 720]) / 100\n >>> np.around(log_encoding_CanonLog(x) * (2 ** 10 - 1)).astype(np.int)\n array([ 128, 169, 351, 614, 1016])\n >>> np.around(log_encoding_CanonLog(x, 10, False) * 100, 1)\n array([ 7.3, 12. , 32.8, 62.7, 108.7])\n \"\"\"\n\n x = to_domain_1(x)\n\n if in_reflection:\n x = x / 0.9\n\n with domain_range_scale(\"ignore\"):\n clog = np.where(\n x < log_decoding_CanonLog(0.0730597, bit_depth, False),\n -(0.529136 * (np.log10(-x * 10.1596 + 1)) - 0.0730597),\n 0.529136 * np.log10(10.1596 * x + 1) + 0.0730597,\n )\n\n clog_cv = (\n full_to_legal(clog, bit_depth) if out_normalised_code_value else clog\n )\n\n return as_float(from_range_1(clog_cv))\n\n\ndef log_decoding_CanonLog(\n clog: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n in_normalised_code_value: Boolean = True,\n out_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log* log decoding curve / electro-optical transfer\n function.\n\n Parameters\n ----------\n clog\n *Canon Log* non-linear data.\n bit_depth\n Bit depth used for conversion.\n in_normalised_code_value\n Whether the *Canon Log* non-linear data is encoded with normalised\n code values.\n out_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n Linear data :math:`x`.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Thorpe2012a`\n\n Examples\n --------\n >>> log_decoding_CanonLog(34.338965172606912 / 100) # doctest: +ELLIPSIS\n 0.17999999...\n \"\"\"\n\n clog = to_domain_1(clog)\n\n clog = legal_to_full(clog, bit_depth) if in_normalised_code_value else clog\n\n x = np.where(\n clog < 0.0730597,\n -(10 ** ((0.0730597 - clog) / 0.529136) - 1) / 10.1596,\n (10 ** ((clog - 0.0730597) / 0.529136) - 1) / 10.1596,\n )\n\n if out_reflection:\n x = x * 0.9\n\n return as_float(from_range_1(x))\n\n\ndef log_encoding_CanonLog2(\n x: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n out_normalised_code_value: Boolean = True,\n in_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log 2* log encoding curve / opto-electronic transfer\n function.\n\n Parameters\n ----------\n x\n Linear data :math:`x`.\n bit_depth\n Bit depth used for conversion.\n out_normalised_code_value\n Whether the *Canon Log 2* non-linear data is encoded as normalised\n code values.\n in_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n *Canon Log 2* non-linear data.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog2`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Canona`\n\n Examples\n --------\n >>> log_encoding_CanonLog2(0.18) * 100 # doctest: +ELLIPSIS\n 39.8254694...\n \"\"\"\n\n x = to_domain_1(x)\n\n if in_reflection:\n x = x / 0.9\n\n with domain_range_scale(\"ignore\"):\n clog2 = np.where(\n x < log_decoding_CanonLog2(0.035388128, bit_depth, False),\n -(0.281863093 * (np.log10(-x * 87.09937546 + 1)) - 0.035388128),\n 0.281863093 * np.log10(x * 87.09937546 + 1) + 0.035388128,\n )\n\n clog2_cv = (\n full_to_legal(clog2, bit_depth) if out_normalised_code_value else clog2\n )\n\n return as_float(from_range_1(clog2_cv))\n\n\ndef log_decoding_CanonLog2(\n clog2: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n in_normalised_code_value: Boolean = True,\n out_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log 2* log decoding curve / electro-optical transfer\n function.\n\n Parameters\n ----------\n clog2\n *Canon Log 2* non-linear data.\n bit_depth\n Bit depth used for conversion.\n in_normalised_code_value\n Whether the *Canon Log 2* non-linear data is encoded with normalised\n code values.\n out_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n Linear data :math:`x`.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog2`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Canona`\n\n Examples\n --------\n >>> log_decoding_CanonLog2(39.825469498316735 / 100) # doctest: +ELLIPSIS\n 0.1799999...\n \"\"\"\n\n clog2 = to_domain_1(clog2)\n\n clog2 = (\n legal_to_full(clog2, bit_depth) if in_normalised_code_value else clog2\n )\n\n x = np.where(\n clog2 < 0.035388128,\n -(10 ** ((0.035388128 - clog2) / 0.281863093) - 1) / 87.09937546,\n (10 ** ((clog2 - 0.035388128) / 0.281863093) - 1) / 87.09937546,\n )\n\n if out_reflection:\n x = x * 0.9\n\n return as_float(from_range_1(x))\n\n\ndef log_encoding_CanonLog3(\n x: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n out_normalised_code_value: Boolean = True,\n in_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log 3* log encoding curve / opto-electronic transfer\n function.\n\n Parameters\n ----------\n x\n Linear data :math:`x`.\n bit_depth\n Bit depth used for conversion.\n out_normalised_code_value\n Whether the *Canon Log 3* non-linear data is encoded as normalised code\n values.\n in_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n *Canon Log 3* non-linear data.\n\n Notes\n -----\n - Introspection of the grafting points by Shaw, N. (2018) shows that the\n *Canon Log 3* IDT was likely derived from its encoding curve as the\n later is grafted at *+/-0.014*::\n\n >>> clog3 = 0.04076162\n >>> (clog3 - 0.073059361) / 2.3069815\n -0.014000000000000002\n >>> clog3 = 0.105357102\n >>> (clog3 - 0.073059361) / 2.3069815\n 0.013999999999999997\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog3`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Canona`\n\n Examples\n --------\n >>> log_encoding_CanonLog3(0.18) * 100 # doctest: +ELLIPSIS\n 34.3389369...\n \"\"\"\n\n x = to_domain_1(x)\n\n if in_reflection:\n x = x / 0.9\n\n with domain_range_scale(\"ignore\"):\n clog3 = np.select(\n (\n x\n < log_decoding_CanonLog3(0.04076162, bit_depth, False, False),\n x\n <= log_decoding_CanonLog3(\n 0.105357102, bit_depth, False, False\n ),\n x\n > log_decoding_CanonLog3(0.105357102, bit_depth, False, False),\n ),\n (\n -0.42889912 * np.log10(-x * 14.98325 + 1) + 0.07623209,\n 2.3069815 * x + 0.073059361,\n 0.42889912 * np.log10(x * 14.98325 + 1) + 0.069886632,\n ),\n )\n\n clog3_cv = (\n full_to_legal(clog3, bit_depth) if out_normalised_code_value else clog3\n )\n\n return as_float(from_range_1(clog3_cv))\n\n\ndef log_decoding_CanonLog3(\n clog3: FloatingOrArrayLike,\n bit_depth: Integer = 10,\n in_normalised_code_value: Boolean = True,\n out_reflection: Boolean = True,\n) -> FloatingOrNDArray:\n \"\"\"\n Defines the *Canon Log 3* log decoding curve / electro-optical transfer\n function.\n\n Parameters\n ----------\n clog3\n *Canon Log 3* non-linear data.\n bit_depth\n Bit depth used for conversion.\n in_normalised_code_value\n Whether the *Canon Log 3* non-linear data is encoded with normalised\n code values.\n out_reflection\n Whether the light level :math:`x` to a camera is reflection.\n\n Returns\n -------\n :class:`numpy.floating` or :class:`numpy.ndarray`\n Linear data :math:`x`.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``clog3`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``x`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Canona`\n\n Examples\n --------\n >>> log_decoding_CanonLog3(34.338936938868677 / 100) # doctest: +ELLIPSIS\n 0.1800000...\n \"\"\"\n\n clog3 = to_domain_1(clog3)\n\n clog3 = (\n legal_to_full(clog3, bit_depth) if in_normalised_code_value else clog3\n )\n\n x = np.select(\n (clog3 < 0.04076162, clog3 <= 0.105357102, clog3 > 0.105357102),\n (\n -(10 ** ((0.07623209 - clog3) / 0.42889912) - 1) / 14.98325,\n (clog3 - 0.073059361) / 2.3069815,\n (10 ** ((clog3 - 0.069886632) / 0.42889912) - 1) / 14.98325,\n ),\n )\n\n if out_reflection:\n x = x * 0.9\n\n return as_float(from_range_1(x))\n", "\"\"\"\nP3-D65 Colourspace\n==================\n\nDefines the *P3-D65* colourspace:\n\n- :attr:`colour.models.RGB_COLOURSPACE_P3_D65`.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport numpy as np\nfrom functools import partial\n\nfrom colour.colorimetry import CCS_ILLUMINANTS\nfrom colour.hints import NDArray\nfrom colour.models.rgb import (\n RGB_Colourspace,\n gamma_function,\n normalised_primary_matrix,\n)\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright (C) 2013-2022 - Colour Developers\"\n__license__ = \"New BSD License - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n__all__ = [\n \"PRIMARIES_P3_D65\",\n \"WHITEPOINT_NAME_P3_D65\",\n \"CCS_WHITEPOINT_P3_D65\",\n \"MATRIX_P3_D65_TO_XYZ\",\n \"MATRIX_XYZ_TO_P3_D65\",\n \"RGB_COLOURSPACE_P3_D65\",\n]\n\nPRIMARIES_P3_D65: NDArray = np.array(\n [\n [0.6800, 0.3200],\n [0.2650, 0.6900],\n [0.1500, 0.0600],\n ]\n)\n\"\"\"\n*P3-D65* colourspace primaries.\n\"\"\"\n\nWHITEPOINT_NAME_P3_D65: str = \"D65\"\n\"\"\"\n*P3-D65* colourspace whitepoint name.\n\"\"\"\n\nCCS_WHITEPOINT_P3_D65: NDArray = CCS_ILLUMINANTS[\n \"CIE 1931 2 Degree Standard Observer\"\n][WHITEPOINT_NAME_P3_D65]\n\"\"\"\n*P3-D65* colourspace whitepoint chromaticity coordinates.\n\"\"\"\n\nMATRIX_P3_D65_TO_XYZ: NDArray = normalised_primary_matrix(\n PRIMARIES_P3_D65, CCS_WHITEPOINT_P3_D65\n)\n\"\"\"\n*P3-D65* colourspace to *CIE XYZ* tristimulus values matrix.\n\"\"\"\n\nMATRIX_XYZ_TO_P3_D65: NDArray = np.linalg.inv(MATRIX_P3_D65_TO_XYZ)\n\"\"\"\n*CIE XYZ* tristimulus values to *P3-D65* colourspace matrix.\n\"\"\"\n\nRGB_COLOURSPACE_P3_D65: RGB_Colourspace = RGB_Colourspace(\n \"P3-D65\",\n PRIMARIES_P3_D65,\n CCS_WHITEPOINT_P3_D65,\n WHITEPOINT_NAME_P3_D65,\n MATRIX_P3_D65_TO_XYZ,\n MATRIX_XYZ_TO_P3_D65,\n partial(gamma_function, exponent=1 / 2.6),\n partial(gamma_function, exponent=2.6),\n)\nRGB_COLOURSPACE_P3_D65.__doc__ = \"\"\"\n*P3-D65* colourspace.\n\"\"\"\n" ]
[ [ "numpy.where", "numpy.log10", "numpy.select" ], [ "numpy.array", "numpy.linalg.inv" ] ]
chaitanyamalaviya/NeuralFactorGraph
[ "6cd664b7edc43d56c6f1165baa7e7625eb0f7cd8" ]
[ "utils.py" ]
[ "from __future__ import division, print_function\nfrom conllu.parser import parse, parse_tree\nfrom tags import Tags, Tag, Label\n\nimport os\nimport re\nimport math\nimport numpy as np\nimport itertools\nimport pdb\nimport pickle\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nnp.set_printoptions(threshold=np.nan)\n\n\nFROZEN_TAG = \"__frozen__\"\n\ndef freeze_dict(obj):\n if isinstance(obj, dict):\n dict_items = list(obj.items())\n dict_items.append((FROZEN_TAG, True))\n return tuple([(k, freeze_dict(v)) for k, v in dict_items])\n return obj\n\ndef unfreeze_dict(obj):\n if isinstance(obj, tuple):\n if (FROZEN_TAG, True) in obj:\n out = dict((k, unfreeze_dict(v)) for k, v in obj)\n del out[FROZEN_TAG]\n return out\n return obj\n\n\ndef get_lang_code_dicts():\n \"\"\"\n Returns lang_to_code, code_to_lang dictionaries\n\n \"\"\"\n lang_to_code = {}\n code_to_lang = {}\n bad_chars = \",''\"\n rgx = re.compile('[%s]' % bad_chars)\n\n with open(\"data/lang_codes.txt\") as f:\n data = f.read()\n lines = data.split(\"\\n\")\n split_line = [line.split() for line in lines]\n for line in split_line[:-2]:\n lang = rgx.sub('', line[0])\n code = rgx.sub('', line[2]) \n lang_to_code[lang] = code\n code_to_lang = {v: k for k, v in lang_to_code.iteritems()}\n return lang_to_code, code_to_lang\n\n\ndef read_conll(treebank_path, langs, code_to_lang, train_or_dev, tgt_size=None, test=False):\n \n \"\"\"\n Reads conll formatted file\n\n langs: list of languages\n train: read training data\n returns: dict with data for each language\n as list of tuples of sentences and morph-tags\n \"\"\"\n\n annot_sents = {}\n unique = []\n for lang in langs:\n\n train = train_or_dev if not test else \"test\"\n\n if not test:\n for file in os.listdir(treebank_path + \"UD_\" + code_to_lang[lang]):\n if file.endswith(\"train.conllu\"):\n filepath = os.path.join(treebank_path + \"UD_\" + code_to_lang[lang], file)\n break\n else:\n for file in os.listdir(treebank_path + \"UD_\" + code_to_lang[lang]):\n if file.endswith(\"dev.conllu\"):\n filepath = os.path.join(treebank_path+ \"UD_\" + code_to_lang[lang], file)\n break\n\n with open(filepath) as f:\n data = f.readlines()[:-1]\n data = [line for line in data if line[0]!='#']\n split_data = \" \".join(data).split(\"\\n \\n\")\n ud = [parse(sent)[0] for sent in split_data]\n\n all_text = []\n all_tags = []\n if langs[-1]==lang and tgt_size:\n tgt_size = min(tgt_size, len(ud))\n ud = ud[:tgt_size]\n for sent in ud:\n sent_text = []\n sent_tags = []\n for word in sent:\n word_tags = {}\n if word['feats']:\n word_tags = dict(word['feats'])\n if word['upostag']:\n if word_tags:\n word_tags.update({'POS':word['upostag']})\n else:\n word_tags = {'POS':word['upostag']}\n \n if word_tags:\n word_tags = freeze_dict(word_tags)\n if word_tags not in unique:\n unique.append(word_tags)\n\n sent_text.append(word['form'])\n sent_tags.append(freeze_dict(word_tags))\n\n all_text.append(sent_text)\n all_tags.append(sent_tags)\n\n annot_sents[lang] = [(w, m) for w, m in zip(all_text, all_tags)]\n\n return annot_sents, unique\n\n\ndef addNullLabels(annot_sents, langs, unique_tags):\n\n for lang in langs:\n i = 0\n for w, m in annot_sents[lang]:\n new_tags = []\n for tags in m:\n tag_dict = unfreeze_dict(tags)\n for tag in unique_tags:\n if tag.name not in tag_dict:\n tag_dict[tag.name] = \"NULL\"\n new_tags.append(freeze_dict(tag_dict))\n\n annot_sents[lang][i] = (w, new_tags)\n i += 1\n\n return annot_sents\n\n\ndef sortbylength(data, lang_ids, maxlen=500):\n \"\"\"\n :param data: List of tuples of source sentences and morph tags\n :param lang_ids: List of lang IDs for each sentence\n :param maxlen: Maximum sentence length permitted\n :return: Sorted data and sorted langIDs\n \"\"\"\n src = [elem[0] for elem in data]\n tgt = [elem[1] for elem in data]\n indexed_src = [(i,src[i]) for i in range(len(src))]\n sorted_indexed_src = sorted(indexed_src, key=lambda x: -len(x[1]))\n sorted_src = [item[1] for item in sorted_indexed_src if len(item[1])<maxlen]\n sort_order = [item[0] for item in sorted_indexed_src if len(item[1])<maxlen]\n sorted_tgt = [tgt[i] for i in sort_order]\n sorted_lang_ids = [lang_ids[i] for i in sort_order]\n sorted_data = [(src, tgt) for src, tgt in zip(sorted_src, sorted_tgt)]\n\n return sorted_data, sorted_lang_ids\n\n\ndef get_train_order(training_data, batch_size, startIdx=0):\n \"\"\"\n :param data: List of tuples of source sentences and morph tags\n :return: start idxs of batches\n \"\"\"\n\n lengths = [len(elem[0]) for elem in training_data]\n start_idxs = []\n end_idxs = []\n prev_length=-1\n batch_counter = 0\n\n for i, length in enumerate(lengths, start=startIdx):\n \n if length!=prev_length or batch_counter>batch_size:\n start_idxs.append(i)\n if prev_length!=-1:\n end_idxs.append(i-1)\n batch_counter = 1\n\n batch_counter += 1 \n prev_length = length\n\n end_idxs.append(startIdx + len(lengths)-1)\n\n return [(s,e) for s,e in zip(start_idxs, end_idxs)]\n\ndef find_unique_tags(train_data_tags, null_label=False):\n\n unique_tags = Tags()\n\n for tags in train_data_tags:\n for tag, label in unfreeze_dict(tags).items():\n if not unique_tags.tagExists(tag):\n unique_tags.addTag(tag)\n \n curTag = unique_tags.getTagbyName(tag)\n\n if not curTag.labelExists(label):\n curTag.addLabel(label)\n\n # Add null labels to unseen tags in each tag set\n if null_label:\n for tag in unique_tags:\n tag.addLabel(\"NULL\")\n\n return unique_tags\n\n\ndef plot_heatmap(uniqueTags, weights, kind):\n\n font = {'family' : 'normal',\n 'size' : 14,\n 'weight' : 'bold'}\n\n matplotlib.rc('font', **font)\n\n pairs = list(itertools.combinations(range(uniqueTags.size()), 2))\n\n # weights is a ParameterList\n for k, weight in enumerate(weights):\n if kind==\"pair\":\n i, j = pairs[k]\n tag1 = uniqueTags.getTagbyIdx(i)\n tag2 = uniqueTags.getTagbyIdx(j)\n tag1_labels = [label.name for label in tag1.labels]\n tag2_labels = [label.name for label in tag2.labels]\n \n plt.figure(figsize=(20, 18), dpi=80)\n plt.xticks(range(0, len(tag2_labels)), tag2_labels)\n plt.yticks(range(0, len(tag1_labels)), tag1_labels)\n plt.tick_params(labelsize=25)\n plt.xlabel(tag2.name, fontsize=40)\n plt.ylabel(tag1.name, fontsize=50)\n plt.imshow(weight.data.cpu().numpy(), cmap='Reds', interpolation='nearest')\n plt.savefig(\"figures/\" + tag1.name + \"_\" + tag2.name + \".png\", bbox_inches='tight')\n plt.close()\n \n elif kind==\"trans\":\n tag = uniqueTags.getTagbyIdx(k)\n tag_labels = [label.name for label in tag.labels]\n\n plt.figure(figsize=(20, 18), dpi=80)\n plt.xticks(range(0, len(tag_labels)), tag_labels, rotation=45)\n plt.yticks(range(0, len(tag_labels)), tag_labels)\n plt.tick_params(labelsize=40)\n plt.xlabel(tag.name, fontsize=50)\n plt.ylabel(tag.name, fontsize=50)\n plt.imshow(weight.data.cpu().numpy(), cmap='Greys', interpolation='nearest')\n plt.savefig(\"figures/\" + tag.name + \"_\" + tag.name + \".png\", bbox_inches='tight')\n plt.close()\n\n\ndef get_var(x, gpu=False, volatile=False):\n x = Variable(x, volatile=volatile)\n if gpu:\n x = x.cuda()\n return x\n\ndef prepare_sequence(seq, to_ix, gpu=False):\n if isinstance(to_ix, dict):\n idxs = [to_ix[w] if w in to_ix else to_ix[\"UNK\"] for w in seq]\n elif isinstance(to_ix, list):\n idxs = [to_ix.index(w) if w in to_ix else to_ix.index(\"UNK\") for w in seq]\n tensor = torch.LongTensor(idxs)\n return get_var(tensor, gpu)\n\ndef to_scalar(var):\n # returns a python float\n return var.view(-1).data.tolist()[0]\n\ndef argmax(vec):\n # return the argmax as a python int\n _, idx = torch.max(vec, 1)\n return to_scalar(idx)\n\ndef logSumExp(a, b):\n maxi = np.maximum(a, b)\n aexp = a - maxi\n bexp = b - maxi\n sumOfExp = np.exp(aexp) + np.exp(bexp)\n return maxi + np.log(sumOfExp)\n\ndef logSumExpTensor(vec):\n # vec -> 16, tag_size\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + \\\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 1))\n\ndef logSumExpTensors(a, b):\n\n maxi = torch.max(a, b)\n aexp = a - maxi\n bexp = b - maxi\n sumOfExp = torch.exp(aexp) + torch.exp(bexp)\n return maxi + torch.log(sumOfExp)\n\ndef logDot(a, b, redAxis=None):\n\n if redAxis==1:\n b = b.transpose()\n\n max_a = np.amax(a)\n max_b = np.amax(b)\n\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n # else:\n # np.log(C + 1e-300, out=C)\n\n C += max_a + max_b\n\n return C.transpose() if redAxis==1 else C\n\n\ndef logMax(a, b, redAxis=None):\n\n if redAxis==1:\n b = b.transpose()\n\n max_a = np.amax(a)\n max_b = np.amax(b)\n\n C = np.max(np.exp(a[:, :, None]-max_a) * np.exp(b[None, :, :]-max_b), axis=1)\n\n # if np.isfinite(C).all():\n np.log(C, out=C)\n # else:\n # np.log(C + 1e-300, out=C)\n\n C += max_a + max_b\n\n return C.transpose() if redAxis==1 else C\n\ndef logNormalize(a):\n\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose()- denom).transpose()\n\ndef logNormalizeTensor(a):\n\n denom = logSumExpTensor(a)\n if len(a.size())==2:\n denom = denom.view(-1, 1).expand(-1, a.size()[1])\n elif len(a.size())==3:\n denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.size()[2])\n\n return (a-denom)\n\ndef computeF1(hyps, golds, prefix, labels_to_ix=None, baseline=False, write_results=False):\n \"\"\"\n hyps: List of dicts for predicted morphological tags\n golds: List of dicts for gold morphological tags\n \"\"\"\n\n f1_precision_scores = {}\n f1_precision_total = {}\n f1_recall_scores = {}\n f1_recall_total = {}\n f1_average = 0.0\n \n if baseline:\n hyps = [unfreeze_dict(h) for h in hyps]\n golds = [unfreeze_dict(t) for t in golds]\n\n # calculate precision\n for i, word_tags in enumerate(hyps, start=0):\n for k, v in word_tags.items():\n if v==\"NULL\":\n continue\n if k not in f1_precision_scores:\n f1_precision_scores[k] = 0\n f1_precision_total[k] = 0\n if k in golds[i]:\n if v==golds[i][k]:\n f1_precision_scores[k] += 1\n f1_precision_total[k] += 1\n \n f1_micro_precision = sum(f1_precision_scores.values())/sum(f1_precision_total.values())\n\n for k in f1_precision_scores.keys():\n f1_precision_scores[k] = f1_precision_scores[k]/f1_precision_total[k]\n \n # calculate recall\n for i, word_tags in enumerate(golds, start=0):\n for k, v in word_tags.items():\n if v==\"NULL\":\n continue\n if k not in f1_recall_scores:\n f1_recall_scores[k] = 0\n f1_recall_total[k] = 0\n if k in hyps[i]:\n if v==hyps[i][k]:\n f1_recall_scores[k] += 1\n f1_recall_total[k] += 1\n\n f1_micro_recall = sum(f1_recall_scores.values())/sum(f1_recall_total.values())\n\n f1_scores = {}\n for k in f1_recall_scores.keys():\n f1_recall_scores[k] = f1_recall_scores[k]/f1_recall_total[k]\n \n if f1_recall_scores[k]==0 or k not in f1_precision_scores:\n f1_scores[k] = 0\n else:\n f1_scores[k] = 2 * (f1_precision_scores[k] * f1_recall_scores[k]) / (f1_precision_scores[k] + f1_recall_scores[k])\n\n f1_average += f1_recall_total[k] * f1_scores[k]\n\n f1_average /= sum(f1_recall_total.values())\n f1_micro_score = 2 * (f1_micro_precision * f1_micro_recall) / (f1_micro_precision + f1_micro_recall)\n\n\n if write_results:\n print(\"Writing F1 scores...\")\n with open(prefix + '_results_f1.txt', 'ab') as file:\n file.write(pickle.dumps(f1_scores))\n file.write(\"\\nMacro-averaged F1 Score: \" + str(f1_average))\n file.write(\"\\nMicro-averaged F1 Score: \" + str(f1_micro_score))\n\n\n return f1_average, f1_micro_score\n\n\ndef getCorrectCount(golds, hyps):\n\n correct = 0\n\n for i, word_tags in enumerate(golds, start=0):\n allCorrect = True\n for k, v in word_tags.items():\n if k in hyps[i]:\n if v!=hyps[i][k]:\n allCorrect = False\n break\n\n if allCorrect==True:\n correct += 1\n\n return correct\n" ]
[ [ "numpy.log", "torch.log", "matplotlib.pyplot.ylabel", "torch.max", "numpy.amax", "numpy.logaddexp.reduce", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "torch.autograd.Variable", "numpy.set_printoptions", "matplotlib.rc", "matplotlib.use", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.close", "numpy.maximum", "numpy.exp", "torch.exp", "torch.LongTensor", "matplotlib.pyplot.xlabel" ] ]
bwingert/ProDy
[ "7377a20b4a4841ec59dccaa93fa58e2ee0fe89bc" ]
[ "prody/utilities/catchall.py" ]
[ "\"\"\"This module defines miscellaneous utility functions that is public to users.\"\"\"\n\nimport numpy as np\nfrom numpy import unique, linalg, diag, sqrt, dot\n\nfrom Bio.Phylo.BaseTree import Tree, Clade\n\nfrom prody import PY3K\nfrom .misctools import addEnds, interpY, index, isListLike\nfrom .checkers import checkCoords\nfrom .logger import LOGGER\n\n\n__all__ = ['calcTree', 'clusterMatrix', 'showLines', 'showMatrix', \n 'reorderMatrix', 'findSubgroups', 'getCoords', \n 'getLinkage', 'getTreeFromLinkage', 'clusterSubfamilies']\n\nclass LinkageError(Exception):\n pass\n\ndef clusterSubfamilies(similarities, n_clusters=0, linkage='all', method='tsne', cutoff=0.0, **kwargs):\n \"\"\"Perform clustering based on members of the *ensemble* projected into lower a reduced\n dimension.\n \n :arg similarities: a matrix of similarities for each structure in the ensemble, such as\n RMSD-matrix, dynamics-based spectral overlap, sequence similarity\n :type similarities: :class:`~numpy.ndarray`\n\n :arg n_clusters: the number of clusters to generate. If **0**, will scan a range of \n number of clusters and return the best one based on highest\n silhouette score. Default is **0**.\n :type n_clusters: int\n\n :arg linkage: if **all**, will test all linkage types (ward, average, complete,\n single). Otherwise will use only the one(s) given as input. Default is\n **all**.\n :type linkage: str, list, tuple, :class:`~numpy.ndarray`\n\n :arg method: if set to **spectral**, will generate a Kirchoff matrix based on the \n cutoff value given and use that as input as clustering instead of\n the values themselves. Default is **tsne**.\n :type method: str\n\n :arg cutoff: only used if *method* is set to **spectral**. This value is used for \n generating the Kirchoff matrix to use for generating clusters when\n doing spectral clustering. Default is **0.0**.\n :type cutoff: float\n \"\"\"\n\n # Import necessary packages\n try:\n from sklearn.manifold import SpectralEmbedding\n from sklearn.cluster import AgglomerativeClustering\n from sklearn.metrics import silhouette_score\n from sklearn.manifold import TSNE\n except ImportError:\n raise ImportError('need sklearn module')\n '''\n try: \n import Bio \n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n '''\n \n\n # Check inputs to make sure are of valid types/values\n if not isinstance(similarities, np.ndarray):\n raise TypeError('similarities should be a numpy ndarray')\n\n dim = similarities.shape\n if dim[0] != dim[1]:\n raise ValueError('similarities must be a square matrix')\n\n if n_clusters != 0:\n if not isinstance(n_clusters, int):\n raise TypeError('clusters must be an instance of int')\n if n_clusters < 1:\n raise ValueError('clusters must be a positive integer')\n elif n_clusters > similarities.shape[0]:\n raise ValueError('clusters can\\'t be longer than similarities matrix')\n nclusts = range(n_clusters,n_clusters+1)\n else:\n nclusts = range(2,10,1)\n\n if linkage != 'all':\n # Check if given input for linkage is list-like\n if isListLike(linkage):\n for val in linkage:\n if val.lower() not in ['ward', 'average', 'complete', 'single']:\n raise ValueError('linkage must be one or more of: \\'ward\\', \\'average\\', \\'complete\\', or \\'single\\'')\n if len(linkage) > 4:\n raise ValueError('linkage must be one or more of: \\'ward\\', \\'average\\', \\'complete\\', or \\'single\\'')\n linkages = [ x.lower() for x in linkage ]\n\n # If not, check if it is a valid string and method name\n else:\n if not isinstance(linkage, str):\n raise TypeError('linkage must be an instance of str or list-like of strs')\n\n if linkage not in ['ward', 'average', 'complete', 'single']:\n raise ValueError('linkage must one or more of: \\'ward\\', \\'average\\', \\'complete\\', or \\'single\\'')\n\n linkages = [linkage]\n else:\n linkages = ['ward', 'average', 'complete', 'single']\n\n if method != 'tsne':\n if not isinstance(method, str):\n raise TypeError('method must be an instance of str')\n if method != 'spectral':\n raise ValueError('method must be either \\'tsne\\' or \\'spectral\\'')\n\n if not isinstance(cutoff, float):\n raise TypeError('cutoff must be an instance of float')\n\n best_score = -1\n best_nclust = 0\n best_link = ''\n best_labels = []\n\n # Scan over range of clusters\n for x in nclusts:\n if method == 'tsne':\n embedding = TSNE(n_components=2)\n transform = embedding.fit_transform(similarities)\n\n else:\n kirchhoff = np.where(similarities > cutoff, 0, -1)\n embedding = SpectralEmbedding(n_components=2)\n transform = embedding.fit_transform(kirchhoff)\n\n for link in linkages:\n clustering = AgglomerativeClustering(linkage=link, n_clusters=x)\n clustering.fit(transform)\n\n silhouette_avg = silhouette_score(transform, clustering.labels_)\n \n if silhouette_avg > best_score:\n best_score = silhouette_avg\n best_nclust = x\n best_link = link\n best_labels = clustering.labels_\n\n\n return best_labels\n\ndef getCoords(data):\n\n try:\n data = (data._getCoords() if hasattr(data, '_getCoords') else\n data.getCoords())\n except AttributeError:\n try:\n checkCoords(data)\n except TypeError:\n raise TypeError('data must be a Numpy array or an object '\n 'with `getCoords` method')\n\n return data\n\ndef getLinkage(names, tree):\n \"\"\" Obtain the :func:`~scipy.cluster.hierarchy.linkage` matrix encoding \n ``tree``. \n \n :arg names: a list of names, the order determines the values in the \n linkage matrix\n :type names: list, :class:`~numpy.ndarray`\n\n :arg tree: tree to be converted\n :type tree: :class:`~Bio.Phylo.BaseTree.Tree`\n \"\"\"\n\n tree_terminals = tree.get_terminals()\n\n if len(tree_terminals) != len(names):\n raise ValueError('inconsistent number of terminals in tree and names')\n \n terminals = [None] * len(names)\n for clade in tree_terminals:\n i = index(names, clade.name)\n terminals[i] = clade\n\n n = len(terminals)\n nonterminals = [c for c in reversed(tree.get_nonterminals())]\n if len(nonterminals) != n-1:\n raise LinkageError('wrong number of terminal clades')\n\n Z = np.zeros((n-1, 4))\n\n root = tree.root\n\n def _indexOfClade(clade):\n if clade.is_terminal():\n i = index(terminals, clade)\n else:\n i = index(nonterminals, clade) + n\n return i\n\n def _height_of(clade):\n if clade.is_terminal():\n height = 0 \n else:\n height = max(_height_of(c) + c.branch_length for c in clade.clades)\n\n return height\n\n def _dfs(clade):\n if clade.is_terminal():\n return\n\n i = _indexOfClade(clade)\n clade_a = clade.clades[0]\n clade_b = clade.clades[1]\n\n a = _indexOfClade(clade_a)\n b = _indexOfClade(clade_b) \n\n l = min(a, b)\n r = max(a, b)\n\n Z[i-n, 0] = l\n Z[i-n, 1] = r\n Z[i-n, 2] = _height_of(clade) * 2.\n Z[i-n, 3] = clade.count_terminals()\n\n _dfs(clade_a)\n _dfs(clade_b)\n \n _dfs(root)\n\n return Z\n\ndef getTreeFromLinkage(names, linkage):\n \"\"\" Obtain the tree encoded by ``linkage``. \n \n :arg names: a list of names, the order should correspond to the values in \n linkage\n :type names: list, :class:`~numpy.ndarray`\n\n :arg linkage: linkage matrix\n :type linkage: :class:`~numpy.ndarray`\n \"\"\"\n try: \n import Bio \n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n\n from Bio.Phylo.BaseTree import Tree, Clade\n \n if not isinstance(linkage, np.ndarray):\n raise TypeError('linkage must be a numpy.ndarray instance')\n\n if linkage.ndim != 2:\n raise LinkageError('linkage must be a 2-dimensional matrix')\n\n if linkage.shape[1] != 4:\n raise LinkageError('linkage must have exactly 4 columns')\n\n n_terms = len(names)\n if linkage.shape[0] != n_terms-1:\n raise LinkageError('linkage must have exactly len(names)-1 rows')\n \n clades = []\n heights = []\n for name in names:\n clade = Clade(None, name)\n clades.append(clade)\n heights.append(0.)\n\n for link in linkage:\n l = int(link[0])\n r = int(link[1])\n height = link[2]\n\n left = clades[l]\n right = clades[r]\n\n lh = heights[l]\n rh = heights[r]\n\n left.branch_length = height - lh\n right.branch_length = height - rh\n\n clade = Clade(None, None)\n clade.clades.append(left)\n clade.clades.append(right)\n\n clades.append(clade)\n heights.append(height)\n\n return Tree(clade)\n\ndef calcTree(names, distance_matrix, method='upgma', linkage=False):\n \"\"\" Given a distance matrix, it creates an returns a tree structure.\n\n :arg names: a list of names\n :type names: list, :class:`~numpy.ndarray`\n\n :arg distance_matrix: a square matrix with length of ensemble. If numbers does not match *names*\n it will raise an error\n :type distance_matrix: :class:`~numpy.ndarray`\n\n :arg method: method used for constructing the tree. Acceptable options are ``\"upgma\"``, ``\"nj\"``, \n or methods supported by :func:`~scipy.cluster.hierarchy.linkage` such as ``\"single\"``, \n ``\"average\"``, ``\"ward\"``, etc. Default is ``\"upgma\"``\n :type method: str\n\n :arg linkage: whether the linkage matrix is returned. Note that NJ trees do not support linkage\n :type linkage: bool\n \"\"\"\n try: \n import Bio \n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n \n from .TreeConstruction import DistanceMatrix, DistanceTreeConstructor\n \n if len(names) != distance_matrix.shape[0] or len(names) != distance_matrix.shape[1]:\n raise ValueError(\"Mismatch between the sizes of matrix and names.\")\n \n method = method.lower().strip()\n\n if method in ['ward', 'single', 'average', 'weighted', 'centroid', 'median']:\n from scipy.cluster.hierarchy import linkage as hlinkage\n from scipy.spatial.distance import squareform\n \n Z = hlinkage(squareform(distance_matrix), method=method)\n tree = getTreeFromLinkage(names, Z)\n else:\n matrix = []\n k = 1\n Z = None\n for row in distance_matrix:\n matrix.append(list(row[:k]))\n k = k + 1\n \n if isinstance(names, np.ndarray):\n names = names.tolist()\n dm = DistanceMatrix(names, matrix)\n constructor = DistanceTreeConstructor()\n\n method = method.strip().lower()\n if method == 'nj':\n tree = constructor.nj(dm)\n elif method == 'upgma':\n tree = constructor.upgma(dm)\n if linkage:\n Z = getLinkage(names, tree)\n else:\n raise ValueError('Method can be only either \"nj\", \"upgma\" or '\n 'hierarchical clustering such as \"single\", \"average\", etc.')\n\n for node in tree.get_nonterminals():\n node.name = None\n\n if linkage:\n return tree, Z\n else:\n return tree\n\ndef writeTree(filename, tree, format_str='newick'):\n \"\"\" Write a tree to file using Biopython.\n\n :arg filename: name for output file\n :type filename: str\n\n :arg tree: a square matrix with length of ensemble. If numbers does not match *names*\n it will raise an error\n :type tree: :class:`~Bio.Phylo.BaseTree.Tree`\n\n :arg format_str: a string specifying the format for the tree\n :type format_str: str\n \"\"\"\n try: \n from Bio import Phylo\n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n\n if not isinstance(filename, str):\n raise TypeError('filename should be a string')\n\n if not isinstance(tree, Phylo.BaseTree.Tree):\n raise TypeError('tree should be a Biopython.Phylo Tree object')\n\n if not isinstance(format_str, str):\n raise TypeError('format_str should be a string')\n\n Phylo.write(tree, filename, format_str)\n\n\ndef clusterMatrix(distance_matrix=None, similarity_matrix=None, labels=None, return_linkage=None, **kwargs):\n \"\"\"\n Cluster a distance matrix using scipy.cluster.hierarchy and \n return the sorted matrix, indices used for sorting, sorted labels (if **labels** are passed), \n and linkage matrix (if **return_linkage** is **True**). Set ``similarity=True`` for clustering a similarity matrix\n \n :arg distance_matrix: an N-by-N matrix containing some measure of distance \n such as 1. - seqid_matrix, rmsds, or distances in PCA space\n :type similarity_matrix: :class:`~numpy.ndarray`\n\n :arg similarity_matrix: an N-by-N matrix containing some measure of similarity \n such as sequence identity, mode-mode overlap, or spectral overlap\n :type similarity_matrix: :class:`~numpy.ndarray`\n \n :arg labels: labels for each matrix row that can be returned sorted\n :type labels: list\n\n :arg no_plot: if **True**, don't plot the dendrogram.\n default is **True**\n :type no_plot: bool\n \n :arg reversed: if set to **True**, then the sorting indices will be reversed.\n :type reversed: bool\n\n Other arguments for :func:`~scipy.hierarchy.linkage` and :func:`~scipy.hierarchy.dendrogram`\n can also be provided and will be taken as **kwargs**.\n \"\"\"\n\n import scipy.cluster.hierarchy as sch\n from scipy import spatial\n if similarity_matrix is None and distance_matrix is None:\n raise ValueError('Please provide a distance matrix or a similarity matrix')\n \n orientation = kwargs.pop('orientiation', 'right')\n reversed = kwargs.pop('reversed', False)\n no_plot = kwargs.pop('no_plot', True)\n\n if distance_matrix is None:\n matrix = similarity_matrix\n distance_matrix = 1. - similarity_matrix\n else:\n matrix = distance_matrix\n \n formatted_distance_matrix = spatial.distance.squareform(distance_matrix)\n linkage_matrix = sch.linkage(formatted_distance_matrix, **kwargs)\n sorting_dendrogram = sch.dendrogram(linkage_matrix, orientation=orientation, labels=labels, no_plot=no_plot)\n\n indices = sorting_dendrogram['leaves']\n sorted_labels = sorting_dendrogram['ivl']\n\n if reversed:\n indices = indices[::-1]\n sorted_labels = sorted_labels[::-1]\n \n sorted_matrix = matrix[indices, :]\n sorted_matrix = sorted_matrix[:, indices]\n \n return_vals = [sorted_matrix, indices]\n\n if labels is not None:\n return_vals.append(sorted_labels)\n if return_linkage:\n return_vals.append(linkage_matrix)\n return tuple(return_vals) # convert to tuple to avoid [pylint] E0632:Possible unbalanced tuple unpacking\n\ndef showLines(*args, **kwargs):\n \"\"\"\n Show 1-D data using :func:`~matplotlib.axes.Axes.plot`. \n \n :arg x: (optional) x coordinates. *x* can be an 1-D array or a 2-D matrix of \n column vectors.\n :type x: :class:`~numpy.ndarray`\n\n :arg y: data array. *y* can be an 1-D array or a 2-D matrix of \n column vectors.\n :type y: :class:`~numpy.ndarray`\n\n :arg dy: an array of variances of *y* which will be plotted as a \n band along *y*. It should have the same shape with *y*.\n :type dy: :class:`~numpy.ndarray`\n\n :arg lower: an array of lower bounds which will be plotted as a \n band along *y*. It should have the same shape with *y* and should be \n paired with *upper*.\n :type lower: :class:`~numpy.ndarray`\n\n :arg upper: an array of upper bounds which will be plotted as a \n band along *y*. It should have the same shape with *y* and should be \n paired with *lower*.\n :type upper: :class:`~numpy.ndarray`\n\n :arg alpha: the transparency of the band(s) for plotting *dy*.\n :type alpha: float\n\n :arg beta: the transparency of the band(s) for plotting *miny* and *maxy*.\n :type beta: float\n\n :arg ticklabels: user-defined tick labels for x-axis.\n :type ticklabels: list\n \"\"\"\n \n # note for developers: this function serves as a low-level \n # plotting function which provides basic utilities for other \n # plotting functions. Therefore showFigure is not handled \n # in this function as it should be already handled in the caller.\n\n ticklabels = kwargs.pop('ticklabels', None)\n dy = kwargs.pop('dy', None)\n miny = kwargs.pop('lower', None)\n maxy = kwargs.pop('upper', None)\n alpha = kwargs.pop('alpha', 0.5)\n beta = kwargs.pop('beta', 0.25)\n gap = kwargs.pop('gap', False)\n labels = kwargs.pop('label', None)\n\n from matplotlib import cm, ticker\n from matplotlib.pyplot import figure, gca, xlim\n\n ax = gca()\n lines = ax.plot(*args, **kwargs)\n\n polys = []\n \n for i, line in enumerate(lines):\n color = line.get_color()\n x, y = line.get_data()\n \n if gap:\n x_new, y_new = addEnds(x, y)\n line.set_data(x_new, y_new)\n else:\n x_new, y_new = x, y\n \n if labels is not None:\n if np.isscalar(labels):\n line.set_label(labels)\n else:\n try:\n line.set_label(labels[i])\n except IndexError:\n raise ValueError('The number of labels ({0}) and that of y ({1}) do not match.'\n .format(len(labels), len(line)))\n \n # the following function needs to be here so that line exists\n def sub_array(a, i, tag='a'):\n ndim = 0\n if a is not None:\n if np.isscalar(a[0]):\n ndim = 1 # a plain list (array)\n else:\n ndim = 2 # a nested list (array)\n else:\n return None\n\n if ndim == 1:\n _a = a\n else:\n try:\n _a = a[i]\n except IndexError:\n raise ValueError('The number of {2} ({0}) and that of y ({1}) do not match.'\n .format(len(miny), len(line), tag))\n\n if len(_a) != len(y):\n raise ValueError('The shapes of {2} ({0}) and y ({1}) do not match.'\n .format(len(_miny), len(y), tag))\n return _a\n\n if miny is not None and maxy is not None:\n _miny = sub_array(miny, i)\n _maxy = sub_array(maxy, i)\n\n if gap:\n _, _miny = addEnds(x, _miny)\n _, _maxy = addEnds(x, _maxy)\n \n poly = ax.fill_between(x_new, _miny, _maxy,\n alpha=beta, facecolor=color, edgecolor=None,\n linewidth=1, antialiased=True)\n polys.append(poly)\n\n if dy is not None:\n _dy = sub_array(dy, i)\n\n if gap:\n _, _dy = addEnds(x, _dy)\n \n poly = ax.fill_between(x_new, y_new-_dy, y_new+_dy,\n alpha=alpha, facecolor=color, edgecolor=None,\n linewidth=1, antialiased=True)\n polys.append(poly)\n\n ax.margins(x=0)\n if ticklabels is not None:\n if callable(ticklabels):\n ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(ticklabels))\n else:\n ax.get_xaxis().set_major_formatter(ticker.IndexFormatter(ticklabels))\n \n ax.xaxis.set_major_locator(ticker.AutoLocator())\n ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())\n\n return lines, polys\n\ndef showMatrix(matrix, x_array=None, y_array=None, **kwargs):\n \"\"\"Show a matrix using :meth:`~matplotlib.axes.Axes.imshow`. Curves on x- and y-axis can be added.\n\n :arg matrix: matrix to be displayed\n :type matrix: :class:`~numpy.ndarray`\n\n :arg x_array: data to be plotted above the matrix\n :type x_array: :class:`~numpy.ndarray`\n\n :arg y_array: data to be plotted on the left side of the matrix\n :type y_array: :class:`~numpy.ndarray`\n\n :arg percentile: a percentile threshold to remove outliers, i.e. only showing data within *p*-th \n to *100-p*-th percentile\n :type percentile: float\n\n :arg interactive: turn on or off the interactive options\n :type interactive: bool\n\n :arg xtickrotation: how much to rotate the xticklabels in degrees\n default is 0\n :type xtickrotation: float\n \"\"\"\n\n from matplotlib import ticker\n from matplotlib.gridspec import GridSpec\n from matplotlib.collections import LineCollection\n from matplotlib.pyplot import gca, sca, sci, colorbar, subplot\n\n from .drawtools import drawTree\n\n p = kwargs.pop('percentile', None)\n vmin = vmax = None\n if p is not None:\n vmin = np.percentile(matrix, p)\n vmax = np.percentile(matrix, 100-p)\n \n vmin = kwargs.pop('vmin', vmin)\n vmax = kwargs.pop('vmax', vmax)\n vcenter = kwargs.pop('vcenter', None)\n norm = kwargs.pop('norm', None)\n\n if vcenter is not None and norm is None:\n if PY3K:\n try:\n from matplotlib.colors import DivergingNorm\n except ImportError:\n from matplotlib.colors import TwoSlopeNorm as DivergingNorm\n\n norm = DivergingNorm(vmin=vmin, vcenter=0., vmax=vmax)\n else:\n LOGGER.warn('vcenter cannot be used in Python 2 so norm remains None')\n\n lw = kwargs.pop('linewidth', 1)\n \n W = H = kwargs.pop('ratio', 6)\n\n ticklabels = kwargs.pop('ticklabels', None)\n xticklabels = kwargs.pop('xticklabels', ticklabels)\n yticklabels = kwargs.pop('yticklabels', ticklabels)\n\n xtickrotation = kwargs.pop('xtickrotation', 0.)\n\n show_colorbar = kwargs.pop('colorbar', True)\n cb_extend = kwargs.pop('cb_extend', 'neither')\n allticks = kwargs.pop('allticks', False) # this argument is temporary and will be replaced by better implementation\n interactive = kwargs.pop('interactive', True)\n\n cmap = kwargs.pop('cmap', 'jet')\n origin = kwargs.pop('origin', 'lower')\n\n try: \n from Bio import Phylo\n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n tree_mode_y = isinstance(y_array, Phylo.BaseTree.Tree)\n tree_mode_x = isinstance(x_array, Phylo.BaseTree.Tree)\n\n if x_array is not None and y_array is not None:\n nrow = 2; ncol = 2\n i = 1; j = 1\n width_ratios = [1, W]\n height_ratios = [1, H]\n aspect = 'auto'\n elif x_array is not None and y_array is None:\n nrow = 2; ncol = 1\n i = 1; j = 0\n width_ratios = [W]\n height_ratios = [1, H]\n aspect = 'auto'\n elif x_array is None and y_array is not None:\n nrow = 1; ncol = 2\n i = 0; j = 1\n width_ratios = [1, W]\n height_ratios = [H]\n aspect = 'auto'\n else:\n nrow = 1; ncol = 1\n i = 0; j = 0\n width_ratios = [W]\n height_ratios = [H]\n aspect = kwargs.pop('aspect', None)\n\n main_index = (i, j)\n upper_index = (i-1, j)\n left_index = (i, j-1)\n\n complex_layout = nrow > 1 or ncol > 1\n\n ax1 = ax2 = ax3 = None\n\n if complex_layout:\n gs = GridSpec(nrow, ncol, width_ratios=width_ratios, \n height_ratios=height_ratios, hspace=0., wspace=0.)\n\n ## draw matrix\n if complex_layout:\n ax3 = subplot(gs[main_index])\n else:\n ax3 = gca()\n \n im = ax3.imshow(matrix, aspect=aspect, vmin=vmin, vmax=vmax, \n norm=norm, cmap=cmap, origin=origin, **kwargs)\n \n #ax3.set_xlim([-0.5, matrix.shape[0]+0.5])\n #ax3.set_ylim([-0.5, matrix.shape[1]+0.5])\n\n if xticklabels is not None:\n ax3.xaxis.set_major_formatter(ticker.IndexFormatter(xticklabels))\n if yticklabels is not None and ncol == 1:\n ax3.yaxis.set_major_formatter(ticker.IndexFormatter(yticklabels))\n\n if allticks:\n ax3.xaxis.set_major_locator(ticker.IndexLocator(offset=0.5, base=1.))\n ax3.yaxis.set_major_locator(ticker.IndexLocator(offset=0.5, base=1.))\n else:\n locator = ticker.AutoLocator()\n locator.set_params(integer=True)\n minor_locator = ticker.AutoMinorLocator()\n\n ax3.xaxis.set_major_locator(locator)\n ax3.xaxis.set_minor_locator(minor_locator)\n\n locator = ticker.AutoLocator()\n locator.set_params(integer=True)\n minor_locator = ticker.AutoMinorLocator()\n \n ax3.yaxis.set_major_locator(locator)\n ax3.yaxis.set_minor_locator(minor_locator)\n\n if ncol > 1:\n ax3.yaxis.set_major_formatter(ticker.NullFormatter())\n \n ## draw x_ and y_array\n lines = []\n\n if nrow > 1:\n ax1 = subplot(gs[upper_index])\n\n if tree_mode_x:\n Y, X = drawTree(x_array, label_func=None, orientation='vertical', \n inverted=True)\n miny = min(Y.values())\n maxy = max(Y.values())\n\n minx = min(X.values())\n maxx = max(X.values())\n\n ax1.set_xlim(minx-.5, maxx+.5)\n ax1.set_ylim(miny, 1.05*maxy)\n else:\n ax1.set_xticklabels([])\n \n y = x_array\n xp, yp = interpY(y)\n points = np.array([xp, yp]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lcy = LineCollection(segments, array=yp, linewidths=lw, cmap=cmap)\n lines.append(lcy)\n ax1.add_collection(lcy)\n\n ax1.set_xlim(xp.min()-.5, xp.max()+.5)\n ax1.set_ylim(yp.min(), yp.max())\n\n if ax3.xaxis_inverted():\n ax2.invert_xaxis()\n\n ax1.axis('off')\n\n if ncol > 1:\n ax2 = subplot(gs[left_index])\n \n if tree_mode_y:\n X, Y = drawTree(y_array, label_func=None, inverted=True)\n miny = min(Y.values())\n maxy = max(Y.values())\n\n minx = min(X.values())\n maxx = max(X.values())\n\n ax2.set_ylim(miny-.5, maxy+.5)\n ax2.set_xlim(minx, 1.05*maxx)\n else:\n ax2.set_xticklabels([])\n \n y = y_array\n xp, yp = interpY(y)\n points = np.array([yp, xp]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lcx = LineCollection(segments, array=yp, linewidths=lw, cmap=cmap)\n lines.append(lcx)\n ax2.add_collection(lcx)\n ax2.set_xlim(yp.min(), yp.max())\n ax2.set_ylim(xp.min()-.5, xp.max()+.5)\n \n ax2.invert_xaxis()\n\n if ax3.yaxis_inverted():\n ax2.invert_yaxis()\n\n ax2.axis('off')\n\n ## draw colorbar\n sca(ax3)\n cb = None\n if show_colorbar:\n if nrow > 1:\n axes = [ax1, ax2, ax3]\n while None in axes:\n axes.remove(None)\n s = H / (H + 1.)\n cb = colorbar(mappable=im, ax=axes, anchor=(0, 0), shrink=s, extend=cb_extend)\n else:\n cb = colorbar(mappable=im, extend=cb_extend)\n\n sca(ax3)\n sci(im)\n\n if interactive:\n from prody.utilities import ImageCursor\n from matplotlib.pyplot import connect\n cursor = ImageCursor(ax3, im)\n connect('button_press_event', cursor.onClick)\n\n ax3.tick_params(axis='x', rotation=xtickrotation)\n\n return im, lines, cb\n\ndef reorderMatrix(names, matrix, tree, axis=None):\n \"\"\"\n Reorder a matrix based on a tree and return the reordered matrix \n and indices for reordering other things.\n\n :arg names: a list of names associated with the rows of the matrix\n These names must match the ones used to generate the tree\n :type names: list\n\n :arg matrix: any square matrix\n :type matrix: :class:`~numpy.ndarray`\n\n :arg tree: any tree from :func:`calcTree`\n :type tree: :class:`~Bio.Phylo.BaseTree.Tree`\n\n :arg axis: along which axis the matrix should be reordered. \n Default is **None** which reorder along all the axes\n :type axis: int\n \"\"\"\n\n try:\n from Bio import Phylo\n except ImportError:\n raise ImportError('Phylo module could not be imported. '\n 'Reinstall ProDy or install Biopython '\n 'to solve the problem.')\n\n try:\n if matrix.ndim != 2:\n raise ValueError('matrix should be a 2D matrix.')\n except AttributeError:\n raise TypeError('matrix should be a numpy array.')\n\n if np.shape(matrix)[0] != np.shape(matrix)[1]:\n raise ValueError('matrix should be a square matrix')\n \n names = np.asarray(names)\n\n if np.isscalar(names):\n raise TypeError('names should be list-like')\n \n if not len(names):\n raise TypeError('names is empty')\n\n if not isinstance(tree, Phylo.BaseTree.Tree):\n raise TypeError('tree should be a BioPython Tree')\n\n if len(names) != len(matrix):\n raise ValueError('names should have entries for each matrix row/column')\n \n terminals = tree.get_terminals()\n if len(names) != len(terminals):\n raise ValueError('names should have entries for each tree terminal')\n\n if len(terminals) != len(matrix):\n raise ValueError('matrix should have a row for each tree terminal')\n\n indices = []\n for terminal in terminals:\n name = terminal.name\n locs = np.where(names == name)[0]\n if not len(locs):\n raise ValueError('inconsistent names and tree: %s not in names'%name)\n\n if len(locs) > 1:\n raise ValueError('inconsistent names and tree: duplicate name %s in names'%name)\n indices.append(locs[0])\n\n # rmatrix = matrix[:, indices]\n # rmatrix = rmatrix[indices, :]\n\n if axis is not None:\n I = [np.arange(s) for s in matrix.shape] \n axes = [axis] if np.isscalar(axis) else axis\n for ax in axes:\n I[ax] = indices\n else:\n I = [indices] * matrix.ndim\n \n rmatrix = matrix[np.ix_(*I)]\n \n return rmatrix, indices\n\ndef findSubgroups(tree, c, method='naive', **kwargs):\n \"\"\"\n Divide a tree into subgroups using a criterion and a cutoff.\n Returns a list of lists with labels divided into subgroups.\n \"\"\"\n\n method = method.lower().strip()\n terminals = tree.get_terminals()\n names = [clade.name for clade in terminals]\n Z = None\n\n if method != 'naive':\n try:\n Z = getLinkage(names, tree)\n except LinkageError:\n print('Failed to build linkage; fall back to naive criterion')\n method = 'naive'\n \n if method == 'naive':\n subgroups = [[names[0]]]\n for i in range(len(terminals)-1):\n curr_clade = terminals[i]\n next_clade = terminals[i + 1]\n d = tree.distance(curr_clade, next_clade)\n if d > c:\n subgroups.append([])\n subgroups[-1].append(next_clade.name)\n else:\n from scipy.cluster.hierarchy import fcluster\n \n T = fcluster(Z, c, criterion=method, **kwargs)\n labels = np.unique(T)\n subgroups = [[] for _ in range(len(labels))]\n\n for i, t in enumerate(T):\n subgroups[t-1].append(names[i])\n\n return subgroups\n" ]
[ [ "matplotlib.ticker.IndexLocator", "matplotlib.pyplot.sci", "matplotlib.ticker.FuncFormatter", "numpy.asarray", "matplotlib.pyplot.connect", "numpy.isscalar", "scipy.cluster.hierarchy.dendrogram", "matplotlib.pyplot.gca", "scipy.cluster.hierarchy.linkage", "sklearn.manifold.TSNE", "sklearn.metrics.silhouette_score", "matplotlib.ticker.IndexFormatter", "numpy.where", "matplotlib.gridspec.GridSpec", "sklearn.manifold.SpectralEmbedding", "numpy.unique", "matplotlib.colors.TwoSlopeNorm", "matplotlib.ticker.AutoLocator", "numpy.zeros", "numpy.arange", "matplotlib.pyplot.colorbar", "matplotlib.collections.LineCollection", "numpy.percentile", "scipy.spatial.distance.squareform", "numpy.ix_", "matplotlib.ticker.NullFormatter", "scipy.cluster.hierarchy.fcluster", "matplotlib.pyplot.sca", "matplotlib.pyplot.subplot", "numpy.shape", "numpy.array", "numpy.concatenate", "matplotlib.ticker.AutoMinorLocator", "sklearn.cluster.AgglomerativeClustering" ] ]
ZHANG-CAIQI/COMP1001
[ "abfad8101b4b58697dfbc8599eebf466beebb9ec" ]
[ "Assessments 1-8/Ass8/Q2_b_1.py" ]
[ "import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef stockUp(priceFile):\r\n\r\n # read the file\r\n infile = open(priceFile, \"r\")\r\n date = []\r\n stock = []\r\n\r\n # store only the dates and closing price\r\n day = 1\r\n firstLine = True\r\n for line in infile:\r\n if firstLine:\r\n firstLine = False\r\n else:\r\n count_item = 0\r\n for item in line.split(\",\"):\r\n if count_item == 0:\r\n date.append(day)\r\n elif count_item == 4:\r\n stock.append(float(item))\r\n count_item += 1\r\n day += 1\r\n\r\n infile.close()\r\n\r\n # Compute the up periods\r\n up = len(date)*[0]\r\n for k in range(1,len(stock)): # skip the heading\r\n i = k # i = k = 1\r\n while ((i>0) and float(stock[k])>=float(stock[i])):\r\n up[k] += 1\r\n i -= 1\r\n\r\n\r\n fig, ax1 = plt.subplots()\r\n\r\n color = 'tab:red'\r\n ax1.set_xlabel('Days started from 11/13/2017 and end on 11/12/2018')\r\n ax1.set_ylabel('Stock prices', color=color)\r\n ax1.plot(date, stock, color=color)\r\n ax1.tick_params(axis='y', labelcolor=color)\r\n\r\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\r\n\r\n color = 'tab:blue'\r\n ax2.set_ylabel('Up periods', color=color) # we already handled the x-label with ax1\r\n ax2.plot(date, up, color=color)\r\n ax2.tick_params(axis='y', labelcolor=color)\r\n\r\n fig.tight_layout() # otherwise the right y-label is slightly clipped\r\n plt.show()\r\n\r\n return\r\n\r\n\"\"\"\r\n plt.plot(date, up, marker='x')\r\n plt.plot(date, stock, marker='o')\r\n plt.title('The up periods for 11/13/2017-11/12/2018')\r\n plt.xlabel('Days started from 11/13/2017 and end on 11/12/2018')\r\n plt.ylabel('The up periods of GOOGL at closing')\r\n plt.show()\r\n\"\"\" \r\n\r\n\r\nstockUp(\"GOOGL.csv\")\r\n\r\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
steffi7574/lbann
[ "665797a112dc96d15bd1d958de61f48bf5d3d21f" ]
[ "bamboo/unit_tests/test_unit_layer_gather.py" ]
[ "import functools\nimport operator\nimport os\nimport os.path\nimport sys\nimport numpy as np\n\n# Bamboo utilities\ncurrent_file = os.path.realpath(__file__)\ncurrent_dir = os.path.dirname(current_file)\nsys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))\nimport tools\n\n# ==============================================\n# Objects for Python data reader\n# ==============================================\n# Note: The Python data reader imports this file as a module and calls\n# the functions below to ingest data.\n\n# Data\ninput_size = 23\noutput_size = 15\nseed = 202101280\n\n# Sample access functions\ndef get_sample(index):\n np.random.seed(seed+index)\n values = [np.random.normal() for _ in range(input_size)]\n indices = [\n np.random.uniform(-1, input_size+1)\n for _ in range(output_size)\n ]\n return values + indices\ndef num_samples():\n return 25\ndef sample_dims():\n return (input_size+output_size,)\n\n# ==============================================\n# Setup LBANN experiment\n# ==============================================\n\ndef setup_experiment(lbann):\n \"\"\"Construct LBANN experiment.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n mini_batch_size = num_samples() // 2\n trainer = lbann.Trainer(mini_batch_size)\n model = construct_model(lbann)\n data_reader = construct_data_reader(lbann)\n optimizer = lbann.NoOptimizer()\n return trainer, model, data_reader, optimizer\n\ndef construct_model(lbann):\n \"\"\"Construct LBANN model.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n\n # Input data\n # Note: Sum with a weights layer so that gradient checking will\n # verify that error signals are correct.\n x = lbann.Identity(lbann.Input())\n x_slice = lbann.Slice(\n x,\n slice_points=tools.str_list([0,input_size,input_size+output_size]),\n )\n x0_weights = lbann.Weights(\n optimizer=lbann.SGD(),\n initializer=lbann.ConstantInitializer(value=0.0),\n name='input_weights',\n )\n x0 = lbann.Sum(\n lbann.Identity(x_slice),\n lbann.WeightsLayer(weights=x0_weights, dims=tools.str_list(input_size)),\n )\n x1 = lbann.Identity(x_slice)\n\n # Apply gather\n y0 = lbann.Gather(x0, x1)\n y1 = lbann.Concatenation([\n lbann.Constant(value=i+1, num_neurons='1')\n for i in range(output_size)\n ])\n y = lbann.Multiply(y0, y1)\n z = lbann.L2Norm2(y)\n\n # Objects for LBANN model\n layers = list(lbann.traverse_layer_graph(x))\n metric = lbann.Metric(z, name='obj')\n obj = lbann.ObjectiveFunction(z)\n callbacks = []\n\n # Compute expected metric value\n vals = []\n for i in range(num_samples()):\n x = get_sample(i)\n x0 = x[:input_size]\n x1 = x[input_size:]\n y0 = np.zeros(output_size)\n for i in range(output_size):\n if 0 <= x1[i] < input_size:\n y0[i] = x0[int(x1[i])]\n z = 0\n for i in range(output_size):\n z += ((i+1)*y0[i]) ** 2\n vals.append(z)\n val = np.mean(vals)\n tol = 8 * val * np.finfo(np.float32).eps\n callbacks.append(lbann.CallbackCheckMetric(\n metric=metric.name,\n lower_bound=val-tol,\n upper_bound=val+tol,\n error_on_failure=True,\n execution_modes='test'))\n\n # Gradient checking\n callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))\n\n # Construct model\n num_epochs = 0\n return lbann.Model(num_epochs,\n layers=layers,\n objective_function=obj,\n metrics=[metric],\n callbacks=callbacks)\n\ndef construct_data_reader(lbann):\n \"\"\"Construct Protobuf message for Python data reader.\n\n The Python data reader will import the current Python file to\n access the sample access functions.\n\n Args:\n lbann (module): Module for LBANN Python frontend\n\n \"\"\"\n\n # Note: The training data reader should be removed when\n # https://github.com/LLNL/lbann/issues/1098 is resolved.\n message = lbann.reader_pb2.DataReader()\n message.reader.extend([\n tools.create_python_data_reader(\n lbann,\n current_file,\n 'get_sample',\n 'num_samples',\n 'sample_dims',\n 'train'\n )\n ])\n message.reader.extend([\n tools.create_python_data_reader(\n lbann,\n current_file,\n 'get_sample',\n 'num_samples',\n 'sample_dims',\n 'test'\n )\n ])\n return message\n\n# ==============================================\n# Setup PyTest\n# ==============================================\n\n# Create test functions that can interact with PyTest\nfor _test_func in tools.create_tests(setup_experiment, __file__):\n globals()[_test_func.__name__] = _test_func\n" ]
[ [ "numpy.random.uniform", "numpy.zeros", "numpy.random.seed", "numpy.random.normal", "numpy.finfo", "numpy.mean" ] ]
bigvideoresearch/SCC
[ "f26cdb6aaf248b5112812dbdac1f1b5086aebccc" ]
[ "datasets/imagename_dataset.py" ]
[ "from runner_master import runner\nimport os\nimport io\nimport torch\nimport logging\nfrom PIL import Image, ImageFile\nfrom runner_master.runner.data import datasets\n# to fix \"OSError: image file is truncated\"\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nclass ImagenameDataset(datasets.ImglistDatasetV2):\n\n def getitem(self, index):\n line = self.imglist[index].strip('\\n')\n tokens = line.split(' ', maxsplit=1)\n #if len(tokens) != 2:\n # raise RuntimeError('split tokens < 2')\n\n image_name, extra_str = tokens[0], tokens[1]\n if self.root != '' and image_name.startswith('/'):\n raise RuntimeError('root not empty but image_name starts with \"/\"')\n path = os.path.join(self.root, image_name)\n sample = dict()\n sample['image_name'] = image_name\n try:\n if not self.dummy_read:\n filebytes = self.reader(path)\n buff = io.BytesIO(filebytes)\n if self.dummy_size is not None:\n sample['data'] = torch.rand(self.dummy_size)\n else:\n image = Image.open(buff)\n sample['data'] = self.transform_image(image)\n for key, value in self.transform_extra(extra_str).items():\n sample[key] = value\n except Exception as e:\n logging.error('[{}] broken'.format(path))\n raise e\n return sample\n\nrunner.patch_dataset('ImagenameDataset', ImagenameDataset)\n" ]
[ [ "torch.rand" ] ]
hlzhang109/OpenPrompt
[ "8a1ec1ceac3805a11b09dda9b96ad7406d222f26" ]
[ "openprompt/prompts/one2one_verbalizer.py" ]
[ "import json\nfrom transformers.tokenization_utils import PreTrainedTokenizer\nfrom yacs.config import CfgNode\nfrom openprompt.data_utils.data_utils import InputFeatures\nimport re\nfrom openprompt import Verbalizer\nfrom typing import *\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom openprompt.utils.logging import logger\n\n\n\nclass One2oneVerbalizer(Verbalizer):\n r\"\"\"\n The basic manually defined verbalizer class, this class is inherited from the :obj:`Verbalizer` class.\n This class restrict the use of label words to one words per label. For a verbalzer with less constraints,\n please use Basic ManualVerbalizer.\n\n Args: \n tokenizer (:obj:`PreTrainedTokenizer`): The tokenizer of the current pre-trained model to point out the vocabulary.\n classes (:obj:`classes`): The classes (or labels) of the current task.\n num_classes (:obj:`int`): Optional. The number of classes of the verbalizer. Only one of `classes` and `num_classes` should be used.\n label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.\n prefix (:obj:`str`, optional): The prefix string of the verbalizer. (used in PLMs like RoBERTa, which is sensitive to prefix space)\n multi_token_handler (:obj:`str`, optional): The handling strategy for multiple tokens produced by the tokenizer.\n \"\"\"\n def __init__(self, \n tokenizer: PreTrainedTokenizer,\n num_classes: Optional[int] = None,\n classes: Optional[List] = None,\n label_words: Optional[Union[Sequence[str], Mapping[str, str]]] = None,\n prefix: Optional[str] = \" \",\n multi_token_handler: Optional[str] = \"first\",\n ):\n super().__init__(tokenizer=tokenizer, num_classes=num_classes, classes=classes)\n self.prefix = prefix\n self.multi_token_handler = multi_token_handler\n self.label_words = label_words\n\n def on_label_words_set(self):\n super().on_label_words_set()\n self.label_words = self.add_prefix(self.label_words, self.prefix)\n self.generate_parameters()\n \n @staticmethod\n def add_prefix(label_words, prefix):\n r\"\"\"Add prefix to label words. For example, if a label words is in the middle of a template,\n the prefix should be ``' '``.\n\n Args:\n label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.\n prefix (:obj:`str`, optional): The prefix string of the verbalizer.\n \n Returns:\n :obj:`Sequence[str]`: New label words with prefix.\n \"\"\"\n new_label_words = []\n if isinstance(label_words[0], list):\n assert max([len(w) for w in label_words]) == 1, \"Providing multiple label words, you should use other verbalizers instead.\"\n label_words = [w[0] for w in label_words] \n\n for word in label_words:\n if word.startswith(\"<!>\"):\n new_label_words.append(word.split(\"<!>\")[1])\n else:\n new_label_words.append(prefix + word)\n\n return new_label_words\n \n def generate_parameters(self) -> List:\n r\"\"\"In basic manual template, the parameters are generated from label words directly.\n In this implementation, the label_words should not be tokenized into more than one token. \n \"\"\"\n words_ids = []\n for word in self.label_words:\n word_ids = self.tokenizer.encode(word, add_special_tokens=False)\n if len(word_ids) > 1:\n logger.warning(\"Word {} is split into multiple tokens: {}. \\\n If this is not what you expect, try using another word for this verbalizer\" \\\n .format(word, self.tokenizer.convert_ids_to_tokens(word_ids)))\n words_ids.append(word_ids)\n \n \n max_len = max([len(ids) for ids in words_ids])\n words_ids_mask = [[1]*len(ids) + [0]*(max_len-len(ids)) for ids in words_ids]\n words_ids = [ids+[0]*(max_len-len(ids)) for ids in words_ids]\n \n words_ids_tensor = torch.tensor(words_ids)\n words_ids_mask = torch.tensor(words_ids_mask)\n self.label_words_ids = nn.Parameter(words_ids_tensor, requires_grad=False)\n self.label_words_mask = nn.Parameter(words_ids_mask, requires_grad=False)\n\n def project(self,\n logits: torch.Tensor,\n **kwargs,\n ) -> torch.Tensor:\n r\"\"\"\n Project the labels, the return value is the normalized (sum to 1) probs of label words. \n \n Args:\n logits (:obj:`torch.Tensor`): The orginal logits of label words.\n \n Returns:\n :obj:`torch.Tensor`: The normalized logits of label words\n \"\"\"\n label_words_logits = logits[:, self.label_words_ids]\n label_words_logits = self.handle_multi_token(label_words_logits, self.label_words_mask)\n return label_words_logits\n\n def process_logits(self, logits: torch.Tensor, **kwargs):\n r\"\"\"A whole framework to process the original logits over the vocabulary, which contains four steps: \n (1) Project the logits into logits of label words\n (2) Normalize over all label words\n (3) Calibrate (optional)\n Args:\n logits (:obj:`torch.Tensor`): The orginal logits.\n \n Returns:\n (:obj:`torch.Tensor`): The final processed logits over the label words set.\n \"\"\"\n # project\n label_words_logits = self.project(logits, **kwargs) #Output: (batch_size, num_classes) or (batch_size, num_classes, num_label_words_per_label)\n\n # normalize\n label_words_probs = self.normalize(label_words_logits)\n\n # calibrate\n if hasattr(self, \"_calibrate_logits\") and self._calibrate_logits is not None:\n label_words_probs = self.calibrate(label_words_probs=label_words_probs)\n\n # convert to logits\n label_words_logits = torch.log(label_words_probs+1e-15)\n return label_words_logits\n \n def normalize(self, logits: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Given logits regarding the entire vocabulary, return the probs over the label words set.\n \n Args:\n logits (:obj:`Tensor`): The logits over the entire vocabulary.\n\n Returns:\n :obj:`Tensor`: The logits over the label words set.\n \n \"\"\"\n batch_size = logits.shape[0]\n return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)\n\n\n def calibrate(self, label_words_probs: torch.Tensor, **kwargs) -> torch.Tensor:\n r\"\"\"\n \n Args:\n label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]\n \n Returns:\n :obj:`torch.Tensor`: The calibrated probability of label words.\n \"\"\"\n shape = label_words_probs.shape\n assert self._calibrate_logits.dim() == 1, \"self._calibrate_logits are not 1-d tensor\"\n calibrate_label_words_probs = self.normalize(self.project(self._calibrate_logits.unsqueeze(0), **kwargs))\n assert calibrate_label_words_probs.shape[1:] == label_words_probs.shape[1:] \\\n and calibrate_label_words_probs.shape[0]==1, \"shape not match\"\n label_words_probs /= (calibrate_label_words_probs+1e-15)\n # normalize # TODO Test the performance\n norm = label_words_probs.reshape(shape[0], -1).sum(dim=-1,keepdim=True) # TODO Test the performance of detaching()\n label_words_probs /= norm\n return label_words_probs\n \n\n \n\n\n \n \n" ]
[ [ "torch.log", "torch.tensor", "torch.nn.Parameter" ] ]
Ascend/modelzoo
[ "f018cfed33dbb1cc2110b9ea2e233333f71cc509", "f018cfed33dbb1cc2110b9ea2e233333f71cc509", "f018cfed33dbb1cc2110b9ea2e233333f71cc509", "f018cfed33dbb1cc2110b9ea2e233333f71cc509", "f018cfed33dbb1cc2110b9ea2e233333f71cc509", "f018cfed33dbb1cc2110b9ea2e233333f71cc509", "f018cfed33dbb1cc2110b9ea2e233333f71cc509" ]
[ "built-in/PyTorch/Official/cv/image_classification/Gluon_ResNet50_v1d_for_PyTorch/timm/optim/radam.py", "built-in/ACL_TensorFlow/Official/recommendation/DCN_for_ACL/scripts/eval.py", "built-in/ACL_TensorFlow/Official/recommendation/KGAT_for_ACL/Model/offline_inference/xacl_inference.py", "built-in/PyTorch/Official/cv/image_object_detection/YoloV3_ID1790_for_PyTorch/tools/publish_model.py", "built-in/PyTorch/Official/cv/image_object_detection/YoloV3_ID1790_for_PyTorch/mmcv_need/distributed.py", "built-in/PyTorch/Official/cv/scene_text_detection/PSENet_for_PyTorch/NPU/test/test_npu.py", "contrib/PyTorch/Official/cv/image_classification/SPNASNet_100_for_PyTorch/timm/models/layers/test_time_pool.py" ]
[ "# Copyright [yyyy] [name of copyright owner]\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"RAdam Optimizer.\nImplementation lifted from: https://github.com/LiyuanLucasLiu/RAdam\nPaper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265\n\"\"\"\nimport math\nimport torch\nfrom torch.optim.optimizer import Optimizer, required\n\n\nclass RAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n self.buffer = [[None, None, None] for ind in range(10)]\n super(RAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(RAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n buffered = self.buffer[int(state['step'] % 10)]\n if state['step'] == buffered[0]:\n N_sma, step_size = buffered[1], buffered[2]\n else:\n buffered[0] = state['step']\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n buffered[1] = N_sma\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n step_size = group['lr'] * math.sqrt(\n (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n N_sma_max - 2)) / (1 - beta1 ** state['step'])\n else:\n step_size = group['lr'] / (1 - beta1 ** state['step'])\n buffered[2] = step_size\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n else:\n p_data_fp32.add_(-step_size, exp_avg)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n\n\nclass PlainRAdam(Optimizer):\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n\n super(PlainRAdam, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(PlainRAdam, self).__setstate__(state)\n\n def step(self, closure=None):\n\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data.float()\n if grad.is_sparse:\n raise RuntimeError('RAdam does not support sparse gradients')\n\n p_data_fp32 = p.data.float()\n\n state = self.state[p]\n\n if len(state) == 0:\n state['step'] = 0\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\n state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n else:\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n state['step'] += 1\n beta2_t = beta2 ** state['step']\n N_sma_max = 2 / (1 - beta2) - 1\n N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n\n if group['weight_decay'] != 0:\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n # more conservative since it's an approximated value\n if N_sma >= 5:\n step_size = group['lr'] * math.sqrt(\n (1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n N_sma_max - 2)) / (1 - beta1 ** state['step'])\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n p_data_fp32.addcdiv_(-step_size, exp_avg, denom)\n else:\n step_size = group['lr'] / (1 - beta1 ** state['step'])\n p_data_fp32.add_(-step_size, exp_avg)\n\n p.data.copy_(p_data_fp32)\n\n return loss\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom sklearn.metrics import average_precision_score, roc_auc_score\nimport numpy as np\nimport sys\n\ndef aucPerformance(mse, labels):\n \"\"\"\n :param mse:\n :param labels:\n :return:\n \"\"\"\n roc_auc = roc_auc_score(labels, mse)\n ap = average_precision_score(labels, mse)\n print(\"AUC-ROC: %.4f, AUC-PR: %.4f\" % (roc_auc, ap))\n return roc_auc, ap\n\n\ndef eval_om(label_dir, om_output_dir):\n \"\"\"\n :param label_dir:\n :param om_output_dir:\n :return:\n \"\"\"\n label, score = read_directory(label_dir, om_output_dir)\n aucPerformance(score, label)\n\n\ndef read_directory(label_dir, om_output_dir):\n \"\"\"\n :param label_dir:\n :param om_output_dir:\n :return:\n \"\"\"\n # get label bin files\n labels = os.listdir(label_dir)\n labels.sort()\n labels_data = list()\n\n # get om output files\n outputs = os.listdir(om_output_dir)\n outputs.sort()\n outputs_data = list()\n\n for i in range(len(outputs)):\n label_data = np.fromfile(os.path.join(label_dir, labels[i]), dtype=np.int32)\n labels_data.extend(label_data)\n output_data = np.fromfile(os.path.join(om_output_dir, outputs[i]), dtype=np.float32)\n outputs_data.extend(output_data)\n return labels_data, outputs_data\n\ngt_dir = sys.argv[1]\npredict_dir = sys.argv[2]\neval_om(gt_dir, predict_dir)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport numpy as np\nimport multiprocessing\nimport sys\nsys.path.append('./')\nfrom utility.batch_test import cores, Ks, BATCH_SIZE, ITEM_NUM, data_generator, test_one_user\nfrom utility.parser import parse_args\n\n\ndef xaclPath(output_path, inference_path, model_path):\n \"\"\"\n 使用文件夹推理\n \"\"\"\n if os.path.isdir(inference_path):\n os.system(\"rm -rf \" + inference_path)\n os.makedirs(inference_path)\n output_path = output_path if output_path[-1] == \"/\" else output_path + \"/\"\n output_path_lst = [output_path + \"input1\", output_path + \"input2\", output_path + \"input3\"]\n output_paths = ','.join(output_path_lst)\n print(\"xacl_fmk -m \" + model_path + \" -i \" + output_paths +\n \" -o \" + inference_path + '/kgat_output_bin')\n os.system(\"xacl_fmk -m \" + model_path + \" -i \" +\n output_paths + \" -o \" + inference_path + '/kgat_output_bin')\n print(inference_path)\n print(\"[INFO] 推理结果生成结束\")\n\n\ndef inference_files(inference_path):\n result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),\n 'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}\n\n pool = multiprocessing.Pool(cores)\n\n u_batch_size = BATCH_SIZE * 2\n\n users_to_test = list(data_generator.test_user_dict.keys())\n test_users = users_to_test\n n_test_users = len(test_users)\n\n files = sorted(os.listdir(inference_path))\n files = [inference_path + '/' + i for i in files]\n\n for u_batch_id, f in enumerate(files):\n if f.endswith(\".bin\"):\n rate_batch = np.fromfile(f, dtype='float32')\n rate_batch = rate_batch.reshape((-1, ITEM_NUM))\n\n start = u_batch_id * u_batch_size\n end = (u_batch_id + 1) * u_batch_size\n\n user_batch = test_users[start: end]\n\n user_batch_rating_uid = zip(rate_batch, user_batch)\n batch_result = pool.map(test_one_user, user_batch_rating_uid)\n\n for re in batch_result:\n result['precision'] += re['precision'] / n_test_users\n result['recall'] += re['recall'] / n_test_users\n result['ndcg'] += re['ndcg'] / n_test_users\n result['hit_ratio'] += re['hit_ratio'] / n_test_users\n result['auc'] += re['auc'] / n_test_users\n pool.close()\n print(result)\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n output_path = args.output_path\n inference_path = args.inference_path\n model_path = args.model_path\n\n xaclPath(output_path, inference_path, model_path)\n inference_files(inference_path)\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport subprocess\n\nimport torch\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Process a checkpoint to be published')\n parser.add_argument('in_file', help='input checkpoint filename')\n parser.add_argument('out_file', help='output checkpoint filename')\n args = parser.parse_args()\n return args\n\n\ndef process_checkpoint(in_file, out_file):\n checkpoint = torch.load(in_file, map_location='cpu')\n # remove optimizer for smaller file size\n if 'optimizer' in checkpoint:\n del checkpoint['optimizer']\n # if it is necessary to remove some sensitive data in checkpoint['meta'],\n # add the code here.\n torch.save(checkpoint, out_file)\n sha = subprocess.check_output(['sha256sum', out_file]).decode()\n if out_file.endswith('.pth'):\n out_file_name = out_file[:-4]\n else:\n out_file_name = out_file\n final_file = out_file_name + f'-{sha[:8]}.pth'\n subprocess.Popen(['mv', out_file, final_file])\n\n\ndef main():\n args = parse_args()\n process_checkpoint(args.in_file, args.out_file)\n\n\nif __name__ == '__main__':\n main()\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom torch.nn.parallel.distributed import (DistributedDataParallel,\n _find_tensors)\n\nfrom mmcv import print_log\nfrom mmcv.utils import TORCH_VERSION\nfrom .scatter_gather import scatter_kwargs\n\n\nclass MMDistributedDataParallel(DistributedDataParallel):\n \"\"\"The DDP module that supports DataContainer.\n\n MMDDP has two main differences with PyTorch DDP:\n\n - It supports a custom type :class:`DataContainer` which allows more\n flexible control of input data.\n - It implement two APIs ``train_step()`` and ``val_step()``.\n \"\"\"\n\n def scatter(self, inputs, kwargs, device_ids):\n return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)\n\n def train_step(self, *inputs, **kwargs):\n \"\"\"train_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.train_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n \"\"\"\n\n # In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the\n # end of backward to the beginning of forward.\n if (TORCH_VERSION >= '1.7' and 'parrots'\n not in TORCH_VERSION) and self.reducer._rebuild_buckets():\n print_log(\n 'Reducer buckets have been rebuilt in this iteration.',\n logger='mmcv')\n\n if getattr(self, 'require_forward_param_sync', True):\n self._sync_params()\n if self.device_ids and False:\n inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)\n if len(self.device_ids) == 1:\n output = self.module.train_step(*inputs[0], **kwargs[0])\n else:\n outputs = self.parallel_apply(\n self._module_copies[:len(inputs)], inputs, kwargs)\n output = self.gather(outputs, self.output_device)\n else:\n inputs, kwargs = self.scatter(inputs, kwargs, [-1])\n output = self.module.train_step(*inputs[0], **kwargs[0])\n\n if torch.is_grad_enabled() and getattr(\n self, 'require_backward_grad_sync', True):\n if self.find_unused_parameters:\n self.reducer.prepare_for_backward(list(_find_tensors(output)))\n else:\n self.reducer.prepare_for_backward([])\n else:\n if TORCH_VERSION > '1.2':\n self.require_forward_param_sync = False\n return output\n\n def val_step(self, *inputs, **kwargs):\n \"\"\"val_step() API for module wrapped by DistributedDataParallel.\n\n This method is basically the same as\n ``DistributedDataParallel.forward()``, while replacing\n ``self.module.forward()`` with ``self.module.val_step()``.\n It is compatible with PyTorch 1.1 - 1.5.\n \"\"\"\n # In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the\n # end of backward to the beginning of forward.\n if (TORCH_VERSION >= '1.7' and 'parrots'\n not in TORCH_VERSION) and self.reducer._rebuild_buckets():\n print_log(\n 'Reducer buckets have been rebuilt in this iteration.',\n logger='mmcv')\n\n if getattr(self, 'require_forward_param_sync', True):\n self._sync_params()\n if self.device_ids:\n inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)\n if len(self.device_ids) == 1:\n output = self.module.val_step(*inputs[0], **kwargs[0])\n else:\n outputs = self.parallel_apply(\n self._module_copies[:len(inputs)], inputs, kwargs)\n output = self.gather(outputs, self.output_device)\n else:\n output = self.module.val_step(*inputs, **kwargs)\n\n if torch.is_grad_enabled() and getattr(\n self, 'require_backward_grad_sync', True):\n if self.find_unused_parameters:\n self.reducer.prepare_for_backward(list(_find_tensors(output)))\n else:\n self.reducer.prepare_for_backward([])\n else:\n if TORCH_VERSION > '1.2':\n self.require_forward_param_sync = False\n return output\n", "# Copyright [yyyy] [name of copyright owner]\n# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\nimport collections\nimport os\nimport sys\nimport time\n\nimport cv2\nimport numpy as np\nimport torch\nfrom torch.utils import data\n\nimport models\nimport util\nfrom dataset import IC15TestLoader\n# c++ version pse based on opencv 3+\n# from pse import pse\n# python pse\nfrom pypse import pse as pypse\n\n\ndef extend_3c(img):\n img = img.reshape(img.shape[0], img.shape[1], 1)\n img = np.concatenate((img, img, img), axis=2)\n return img\n\n\ndef debug(idx, img_paths, imgs, output_root):\n if not os.path.exists(output_root):\n os.makedirs(output_root)\n\n col = []\n for i in range(len(imgs)):\n row = []\n for j in range(len(imgs[i])):\n # img = cv2.copyMakeBorder(imgs[i][j], 3, 3, 3, 3, cv2.BORDER_CONSTANT, value=[255, 0, 0])\n row.append(imgs[i][j])\n res = np.concatenate(row, axis=1)\n col.append(res)\n res = np.concatenate(col, axis=0)\n img_name = img_paths[idx].split('/')[-1]\n print(idx, '/', len(img_paths), img_name)\n cv2.imwrite(output_root + img_name, res)\n\n\ndef write_result_as_txt(image_name, bboxes, path):\n if not os.path.exists(path):\n os.mkdir(path)\n filename = util.io.join_path(path, 'res_%s.txt' % (image_name))\n lines = []\n for b_idx, bbox in enumerate(bboxes):\n values = [int(v) for v in bbox]\n line = \"%d, %d, %d, %d, %d, %d, %d, %d\\n\" % tuple(values)\n lines.append(line)\n util.io.write_lines(filename, lines)\n\n\ndef polygon_from_points(points):\n \"\"\"\n Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4\n \"\"\"\n resBoxes = np.empty([1, 8], dtype='int32')\n resBoxes[0, 0] = int(points[0])\n resBoxes[0, 4] = int(points[1])\n resBoxes[0, 1] = int(points[2])\n resBoxes[0, 5] = int(points[3])\n resBoxes[0, 2] = int(points[4])\n resBoxes[0, 6] = int(points[5])\n resBoxes[0, 3] = int(points[6])\n resBoxes[0, 7] = int(points[7])\n pointMat = resBoxes[0].reshape([2, 4]).T\n return plg.Polygon(pointMat)\n\n\[email protected]_grad()\ndef test(args):\n data_loader = IC15TestLoader(long_size=args.long_size, datadir=args.data_dir)\n test_loader = torch.utils.data.DataLoader(\n data_loader,\n batch_size=1,\n shuffle=False,\n num_workers=1,\n drop_last=True)\n\n # Setup Model\n if args.arch == \"resnet50\":\n model = models.resnet50(pretrained=True, num_classes=7, scale=args.scale)\n elif args.arch == \"resnet101\":\n model = models.resnet101(pretrained=True, num_classes=7, scale=args.scale)\n elif args.arch == \"resnet152\":\n model = models.resnet152(pretrained=True, num_classes=7, scale=args.scale)\n\n for param in model.parameters():\n param.requires_grad = False\n\n model = model.to(CALCULATE_DEVICE)\n\n if args.resume is not None:\n if os.path.isfile(args.resume):\n print(\"Loading model and optimizer from checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume, map_location=CALCULATE_DEVICE)\n\n # model.load_state_dict(checkpoint['state_dict'])\n d = collections.OrderedDict()\n for key, value in checkpoint['state_dict'].items():\n if key.startswith('module.'):\n tmp = key[7:]\n else:\n tmp = key\n d[tmp] = value\n model.load_state_dict(d)\n\n print(\"Loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n sys.stdout.flush()\n else:\n print(\"No checkpoint found at '{}'\".format(args.resume))\n sys.stdout.flush()\n\n model.eval()\n total_frame = 0.0\n total_time = 0.0\n for idx, (org_img, img) in enumerate(test_loader):\n print('progress: %d / %d' % (idx, len(test_loader)))\n sys.stdout.flush()\n\n img = img.to(CALCULATE_DEVICE)\n org_img = org_img.numpy().astype('uint8')[0]\n text_box = org_img.copy()\n\n torch.npu.synchronize()\n start = time.time()\n outputs = model(img)\n\n score = torch.sigmoid(outputs[:, 0, :, :])\n outputs = (torch.sign(outputs - args.binary_th) + 1) / 2\n\n text = outputs[:, 0, :, :]\n kernels = outputs[:, 0:args.kernel_num, :, :] * text\n\n score = score.data.cpu().numpy()[0].astype(np.float32)\n text = text.data.cpu().numpy()[0].astype(np.uint8)\n kernels = kernels.data.cpu().numpy()[0].astype(np.uint8)\n\n # c++ version pse\n # pred = pse(kernels, args.min_kernel_area / (args.scale * args.scale))\n # python version pse\n pred = pypse(kernels, args.min_kernel_area / (args.scale * args.scale))\n print(pred.shape)\n\n # scale = (org_img.shape[0] * 1.0 / pred.shape[0], org_img.shape[1] * 1.0 / pred.shape[1])\n scale = (org_img.shape[1] * 1.0 / pred.shape[1], org_img.shape[0] * 1.0 / pred.shape[0])\n label = pred\n label_num = np.max(label) + 1\n bboxes = []\n for i in range(1, label_num):\n points = np.array(np.where(label == i)).transpose((1, 0))[:, ::-1]\n\n if points.shape[0] < args.min_area / (args.scale * args.scale):\n continue\n\n score_i = np.mean(score[label == i])\n if score_i < args.min_score:\n continue\n\n rect = cv2.minAreaRect(points)\n bbox = cv2.boxPoints(rect) * scale\n bbox = bbox.astype('int32')\n bboxes.append(bbox.reshape(-1))\n\n torch.npu.synchronize()\n end = time.time()\n total_frame += 1\n total_time += (end - start)\n print('fps: %.2f' % (total_frame / total_time))\n sys.stdout.flush()\n\n for bbox in bboxes:\n cv2.drawContours(text_box, [bbox.reshape(4, 2)], -1, (0, 255, 0), 2)\n\n image_name = data_loader.img_paths[idx].split('/')[-1].split('.')[0]\n write_result_as_txt(image_name, bboxes, 'outputs/' + args.output_file)\n\n # text_box = cv2.resize(text_box, (text.shape[1], text.shape[0]))\n # debug(idx, data_loader.img_paths, [[text_box]], 'outputs/vis_ic15/')\n\n cmd = 'cd %s;zip -j %s %s/*' % ('./outputs/', args.output_file + '.zip', args.output_file)\n print(cmd)\n sys.stdout.flush()\n util.cmd.cmd(cmd)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Hyperparams')\n parser.add_argument('--arch', nargs='?', type=str, default='resnet50')\n parser.add_argument('--resume', nargs='?', type=str, default=None,\n help='Path to previous saved model to restart from')\n parser.add_argument('--binary_th', nargs='?', type=float, default=1.0,\n help='Path to previous saved model to restart from')\n parser.add_argument('--kernel_num', nargs='?', type=int, default=7,\n help='Path to previous saved model to restart from')\n parser.add_argument('--scale', nargs='?', type=int, default=1,\n help='Path to previous saved model to restart from')\n parser.add_argument('--long_size', nargs='?', type=int, default=2240,\n help='Path to previous saved model to restart from')\n parser.add_argument('--min_kernel_area', nargs='?', type=float, default=5.0,\n help='min kernel area')\n parser.add_argument('--min_area', nargs='?', type=float, default=800.0,\n help='min area')\n parser.add_argument('--min_score', nargs='?', type=float, default=0.93,\n help='min score')\n parser.add_argument('--npu', default=None, type=int,\n help='NPU id to use.')\n parser.add_argument('--data_dir', default='./data/ICDAR/Challenge/', type=str)\n parser.add_argument('--output_file', default='submit_ic15', type=str)\n args = parser.parse_args()\n CALCULATE_DEVICE = f\"npu:{args.npu}\"\n PRINT_DEVICE = \"cpu\"\n torch.npu.set_device(CALCULATE_DEVICE)\n test(args)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" Test Time Pooling (Average-Max Pool)\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\n\nimport logging\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom .adaptive_avgmax_pool import adaptive_avgmax_pool2d\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass TestTimePoolHead(nn.Module):\n def __init__(self, base, original_pool=7):\n super(TestTimePoolHead, self).__init__()\n self.base = base\n self.original_pool = original_pool\n base_fc = self.base.get_classifier()\n if isinstance(base_fc, nn.Conv2d):\n self.fc = base_fc\n else:\n self.fc = nn.Conv2d(\n self.base.num_features, self.base.num_classes, kernel_size=1, bias=True)\n self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size()))\n self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size()))\n self.base.reset_classifier(0) # delete original fc layer\n\n def forward(self, x):\n x = self.base.forward_features(x)\n x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1)\n x = self.fc(x)\n x = adaptive_avgmax_pool2d(x, 1)\n return x.view(x.size(0), -1)\n\n\ndef apply_test_time_pool(model, config, use_test_size=True):\n test_time_pool = False\n if not hasattr(model, 'default_cfg') or not model.default_cfg:\n return model, False\n if use_test_size and 'test_input_size' in model.default_cfg:\n df_input_size = model.default_cfg['test_input_size']\n else:\n df_input_size = model.default_cfg['input_size']\n if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]:\n _logger.info('Target input size %s > pretrained default %s, using test time pooling' %\n (str(config['input_size'][-2:]), str(df_input_size[-2:])))\n model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size'])\n test_time_pool = True\n return model, test_time_pool\n" ]
[ [ "torch.zeros_like" ], [ "sklearn.metrics.roc_auc_score", "sklearn.metrics.average_precision_score" ], [ "numpy.fromfile" ], [ "torch.save", "torch.load" ], [ "torch.is_grad_enabled", "torch.nn.parallel.distributed._find_tensors" ], [ "torch.utils.data.DataLoader", "numpy.empty", "torch.load", "numpy.concatenate", "torch.no_grad", "numpy.where", "torch.sign", "numpy.max", "torch.npu.synchronize", "torch.sigmoid", "torch.npu.set_device", "numpy.mean" ], [ "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d" ] ]
anglixjtu/MeshCNN_
[ "83826e66d8989ed4967047c2ed6d099568c5781c" ]
[ "src/util/losses.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass ChamferLoss(nn.Module):\n\n def __init__(self):\n super(ChamferLoss, self).__init__()\n self.use_cuda = torch.cuda.is_available()\n\n def forward(self, preds, gts, reverse=True, bidirectional=True):\n def compute_loss(preds, gts):\n P = self.batch_pairwise_dist(gts, preds)\n mins, _ = torch.min(P, 1)\n loss_1 = torch.sum(mins)\n mins, _ = torch.min(P, 2)\n loss_2 = torch.sum(mins)\n return loss_1 + loss_2\n\n if bidirectional or reverse:\n backward_loss = compute_loss(gts, preds)\n if reverse:\n return backward_loss\n else:\n forward_loss = compute_loss(preds, gts)\n return forward_loss + backward_loss\n else:\n forward_loss = compute_loss(preds, gts)\n return forward_loss\n\n def batch_pairwise_dist(self, x, y):\n bs, num_points_x, points_dim = x.size()\n _, num_points_y, _ = y.size()\n xx = torch.bmm(x, x.transpose(2, 1))\n yy = torch.bmm(y, y.transpose(2, 1))\n zz = torch.bmm(x, y.transpose(2, 1))\n if self.use_cuda:\n dtype = torch.cuda.LongTensor\n else:\n dtype = torch.LongTensor\n diag_ind_x = torch.arange(0, num_points_x).type(dtype)\n diag_ind_y = torch.arange(0, num_points_y).type(dtype)\n rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(\n zz.transpose(2, 1))\n ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz)\n P = rx.transpose(2, 1) + ry - 2 * zz\n return P" ]
[ [ "torch.sum", "torch.min", "torch.cuda.is_available", "torch.arange" ] ]
morkovka1337/openvino_training_extensions
[ "846db45c264d6b061505213f51763520b9432ba9", "846db45c264d6b061505213f51763520b9432ba9", "846db45c264d6b061505213f51763520b9432ba9" ]
[ "pytorch_toolkit/nncf/examples/object_detection/layers/modules/multibox_loss.py", "tensorflow_toolkit/text_recognition/text_recognition/model.py", "pytorch_toolkit/nncf/tests/modules/test_rnn.py" ]
[ "\"\"\"\n Copyright (c) 2019 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..box_utils import match, log_sum_exp\n\n\nclass MultiBoxLoss(nn.Module):\n \"\"\"SSD Weighted Loss Function\n Compute Targets:\n 1) Produce Confidence Target Indices by matching ground truth boxes\n with (default) 'priorboxes' that have jaccard index > threshold parameter\n (default threshold: 0.5).\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3) Hard negative mining to filter the excessive number of negative examples\n that comes with using a large number of default bounding boxes.\n (default negative:positive ratio 3:1)\n Objective Loss:\n L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n weighted by α which is set to 1 by cross val.\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n \"\"\"\n\n def __init__(self, cfg, num_classes, overlap_thresh, prior_for_matching,\n bkg_label, neg_mining, neg_pos, neg_overlap, encode_target, device=None):\n super(MultiBoxLoss, self).__init__()\n self.device = device\n self.num_classes = num_classes\n self.threshold = overlap_thresh\n self.background_label = bkg_label\n self.encode_target = encode_target\n self.use_prior_for_matching = prior_for_matching\n self.do_neg_mining = neg_mining\n self.negpos_ratio = neg_pos\n self.neg_overlap = neg_overlap\n\n def forward(self, predictions, targets):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n and prior boxes from SSD net.\n conf shape: torch.size(batch_size,num_priors,num_classes)\n loc shape: torch.size(batch_size,num_priors,4)\n priors shape: torch.size(num_priors,4)\n\n ground_truth (tensor): Ground truth boxes and labels for a batch,\n shape: [batch_size,num_objs,5] (last idx is the label).\n \"\"\"\n loc_data, conf_data, priors = predictions\n batch = loc_data.size(0)\n num_priors = loc_data.size(1)\n\n # match priors (default boxes) and ground truth boxes\n loc_t = torch.Tensor(batch, num_priors, 4).to(self.device)\n conf_t = torch.LongTensor(batch, num_priors).to(self.device)\n for idx in range(batch):\n truths = targets[idx][:, :-1].data\n labels = targets[idx][:, -1].data\n defaults = priors.data\n match(self.threshold, truths, defaults[0], labels, loc_t, conf_t, idx)\n pos = conf_t > 0\n num_pos = pos.sum(dim=1, keepdim=True)\n\n # Localization Loss (Smooth L1)\n # Shape: [batch,num_priors,4]\n pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)\n loc_p = loc_data[pos_idx].view(-1, 4)\n loc_t = loc_t[pos_idx].view(-1, 4)\n loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')\n\n # Compute max conf across batch for hard negative mining\n batch_conf = conf_data.view(-1, self.num_classes)\n\n loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))\n\n # Hard Negative Mining\n loss_c = loss_c.view(batch, -1)\n loss_c[pos] = 0 # filter out pos boxes for now\n _, loss_idx = loss_c.sort(1, descending=True)\n _, idx_rank = loss_idx.sort(1)\n num_pos = pos.long().sum(1, keepdim=True)\n num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1)\n neg = idx_rank < num_neg.expand_as(idx_rank)\n\n # Confidence Loss Including Positive and Negative Examples\n pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)\n targets_weighted = conf_t[(pos + neg).gt(0)]\n loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')\n\n # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n\n N = num_pos.data.sum().to(torch.float)\n loss_l /= N\n loss_c /= N\n return loss_l, loss_c\n", "# Copyright (C) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\n\"\"\" This module contains architecture of Text Recognition model.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nimport tensorflow.contrib.slim as slim\n\n\nclass TextRecognition:\n \"\"\" Text recognition model definition. \"\"\"\n\n def __init__(self, is_training, num_classes, backbone_dropout=0.0):\n self.is_training = is_training\n self.lstm_dim = 256\n self.num_classes = num_classes\n self.backbone_dropout = backbone_dropout\n\n def __call__(self, inputdata):\n with tf.variable_scope('shadow'):\n features = self.feature_extractor(inputdata=inputdata)\n logits = self.encoder_decoder(inputdata=tf.squeeze(features, axis=1))\n\n return logits\n\n # pylint: disable=too-many-locals\n def feature_extractor(self, inputdata):\n \"\"\" Extracts features from input text image. \"\"\"\n\n with slim.arg_scope([slim.conv2d], padding='SAME',\n weights_initializer=tf.contrib.layers.variance_scaling_initializer(),\n weights_regularizer=slim.l2_regularizer(0.00025),\n biases_initializer=None, activation_fn=None):\n with slim.arg_scope([slim.batch_norm], updates_collections=None):\n bn0 = slim.batch_norm(inputdata, 0.9, scale=True, is_training=self.is_training,\n activation_fn=None)\n\n dropout1 = slim.dropout(bn0, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv1 = slim.conv2d(dropout1, num_outputs=64, kernel_size=3)\n bn1 = slim.batch_norm(conv1, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n pool1 = slim.max_pool2d(bn1, kernel_size=2, stride=2)\n\n dropout2 = slim.dropout(pool1, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv2 = slim.conv2d(dropout2, num_outputs=128, kernel_size=3)\n bn2 = slim.batch_norm(conv2, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n pool2 = slim.max_pool2d(bn2, kernel_size=2, stride=2)\n\n dropout3 = slim.dropout(pool2, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv3 = slim.conv2d(dropout3, num_outputs=256, kernel_size=3)\n bn3 = slim.batch_norm(conv3, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n\n dropout4 = slim.dropout(bn3, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv4 = slim.conv2d(dropout4, num_outputs=256, kernel_size=3)\n bn4 = slim.batch_norm(conv4, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n pool4 = slim.max_pool2d(bn4, kernel_size=[2, 1], stride=[2, 1])\n\n dropout5 = slim.dropout(pool4, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv5 = slim.conv2d(dropout5, num_outputs=512, kernel_size=3)\n bn5 = slim.batch_norm(conv5, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n\n dropout6 = slim.dropout(bn5, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv6 = slim.conv2d(dropout6, num_outputs=512, kernel_size=3)\n bn6 = slim.batch_norm(conv6, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n pool6 = slim.max_pool2d(bn6, kernel_size=[2, 1], stride=[2, 1])\n\n dropout7 = slim.dropout(pool6, keep_prob=1.0 - self.backbone_dropout,\n is_training=self.is_training)\n conv7 = slim.conv2d(dropout7, num_outputs=512, kernel_size=2, stride=[2, 1])\n bn7 = slim.batch_norm(conv7, 0.9, scale=True, is_training=self.is_training,\n activation_fn=tf.nn.relu)\n\n return bn7\n\n def encoder_decoder(self, inputdata):\n \"\"\" LSTM-based encoder-decoder module. \"\"\"\n\n with tf.variable_scope('LSTMLayers'):\n [batch_size, width, _] = inputdata.get_shape().as_list()\n\n with tf.variable_scope('encoder'):\n forward_cells = []\n backward_cells = []\n\n for _ in range(2):\n forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))\n backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))\n\n encoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(\n forward_cells, backward_cells, inputdata, dtype=tf.float32)\n\n with tf.variable_scope('decoder'):\n forward_cells = []\n backward_cells = []\n\n for _ in range(2):\n forward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))\n backward_cells.append(tf.nn.rnn_cell.LSTMCell(self.lstm_dim))\n\n decoder_layer, _, _ = rnn.stack_bidirectional_dynamic_rnn(\n forward_cells, backward_cells, encoder_layer, dtype=tf.float32)\n\n rnn_reshaped = tf.reshape(decoder_layer, [batch_size * width, -1])\n\n logits = slim.fully_connected(rnn_reshaped, self.num_classes, activation_fn=None)\n logits = tf.reshape(logits, [batch_size, width, self.num_classes])\n rnn_out = tf.transpose(logits, (1, 0, 2))\n\n return rnn_out\n", "\"\"\"\n Copyright (c) 2019-2020 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport logging\nimport sys\nfrom collections import namedtuple\nfrom typing import List, Tuple\n\nimport copy\nimport onnx\nimport os\nimport pytest\nimport torch\nimport torch.nn.functional as F\nfrom functools import partial\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import PackedSequence\n\nfrom nncf.dynamic_graph.context import TracingContext\nfrom nncf.dynamic_graph.transform_graph import replace_modules\nfrom nncf.model_creation import create_compressed_model\nfrom nncf.layers import LSTMCellNNCF, NNCF_RNN, ITERATION_MODULES\nfrom tests.modules.seq2seq.gnmt import GNMT\nfrom tests.test_helpers import get_empty_config, get_grads, create_compressed_model_and_algo_for_test\n\n\ndef replace_lstm(model):\n def replace_fn(module_):\n if not isinstance(module_, nn.LSTM):\n return module_\n device = next(module_.parameters()).device\n custom_lstm = NNCF_RNN('LSTM', input_size=module_.input_size, hidden_size=module_.hidden_size,\n num_layers=module_.num_layers, bidirectional=module_.bidirectional,\n batch_first=module_.batch_first, dropout=module_.dropout,\n bias=module_.bias)\n\n def get_param_names(bias):\n # type: (bool) -> List[str]\n suffixes = ['ih', 'hh']\n names = ['weight_' + suffix for suffix in suffixes]\n if bias:\n names += ['bias_' + suffix for suffix in suffixes]\n return names\n\n for l in range(custom_lstm.num_layers):\n for d in range(custom_lstm.num_directions):\n for name in get_param_names(custom_lstm.bias):\n suffix = '_reverse' if d == 1 else ''\n param_name = name + '_l{}{}'.format(l, suffix)\n param = getattr(module_, param_name)\n getattr(custom_lstm, param_name).data.copy_(param.data)\n custom_lstm.to(device)\n return custom_lstm\n\n if isinstance(model, nn.LSTM):\n return replace_fn(model)\n affected_scopes = []\n return replace_modules(model, replace_fn, affected_scopes)[0]\n\ndef clone_test_data(data_list):\n # type: (LSTMTestData) -> List[torch.Tensor]\n results = []\n x = data_list[0]\n result = x if isinstance(x, PackedSequence) else x.clone()\n results.append(result)\n for tensor_list in data_list[1:]:\n result = ()\n for tensor in tensor_list:\n if isinstance(tensor, Variable):\n sub_result = tensor.data.clone()\n sub_result = Variable(sub_result, requires_grad=True)\n else:\n sub_result = tensor.clone()\n result += (sub_result,)\n results.append(result)\n return results\n\n\nLSTMTestSizes = namedtuple('LSTMTestSizes', ['input_size', 'hidden_size', 'batch', 'seq_length'])\nLSTMTestData = namedtuple('LSTMTestData', ['x', 'h0', 'c0', 'weight_ih', 'weight_hh', 'bias_ih', 'bias_hh'])\n\n\[email protected]('sizes',\n [LSTMTestSizes(512, 768, 128, 50),\n LSTMTestSizes(3, 3, 3, 3),\n LSTMTestSizes(1, 1, 1, 1)], ids=lambda val: '[{}]'.format('-'.join([str(v) for v in val])))\nclass TestLSTMCell:\n @staticmethod\n def generate_lstm_data(p, num_layers=1, num_directions=1, variable_length=False, sorted_=True, batch_first=True,\n is_cuda=False, bias=True, empty_initial=False, is_backward=False):\n # type: (LSTMTestSizes, int, int, bool, bool, bool, bool, bool, bool, bool) -> LSTMTestData\n num_chunks = 4\n seq_list = []\n if variable_length:\n seq_lens = torch.IntTensor(p.batch).random_(1, p.seq_length + 1)\n if sorted_:\n seq_lens = torch.sort(seq_lens, descending=True).values\n for seq_size in seq_lens:\n seq_list.append(torch.randn(seq_size.item(), p.input_size))\n padded_seq_batch = torch.nn.utils.rnn.pad_sequence(seq_list, batch_first=batch_first)\n x_data = torch.nn.utils.rnn.pack_padded_sequence(padded_seq_batch, lengths=seq_lens,\n batch_first=batch_first, enforce_sorted=sorted_)\n\n else:\n size = (p.seq_length, p.batch, p.input_size)\n if batch_first:\n size = (p.batch, p.seq_length, p.input_size)\n x_data = torch.randn(*size)\n\n def wrap_tensor(tensor):\n wrapped = tensor\n if is_cuda:\n wrapped = wrapped.cuda()\n if is_backward:\n wrapped = Variable(wrapped, requires_grad=True)\n return wrapped\n\n if is_cuda:\n x_data = x_data.cuda()\n h0, c0, wih, whh, bih, bhh = ([] for _ in range(6))\n for layer_ in range(num_layers):\n for _ in range(num_directions):\n layer_input_size = p.input_size if layer_ == 0 else p.hidden_size * num_directions\n if not empty_initial:\n h0.append(wrap_tensor(torch.randn(p.batch, p.hidden_size)))\n c0.append(wrap_tensor(torch.randn(p.batch, p.hidden_size)))\n wih.append(wrap_tensor(torch.rand(num_chunks * p.hidden_size, layer_input_size)))\n whh.append(wrap_tensor(torch.rand(num_chunks * p.hidden_size, p.hidden_size)))\n if bias:\n bih.append(wrap_tensor(torch.rand(num_chunks * p.hidden_size)))\n bhh.append(wrap_tensor(torch.rand(num_chunks * p.hidden_size)))\n result = LSTMTestData(x_data, h0, c0, wih, whh, bih, bhh)\n return result\n\n @staticmethod\n def set_weights(cell, data):\n # type: (nn.LSTMCell, LSTMTestData) -> None\n for name in TestLSTM.get_param_names(bias=True):\n param = getattr(data, name)\n if param:\n getattr(cell, name).data.copy_(param[0].data)\n\n def test_forward_lstm_cell(self, sizes, _seed):\n p = sizes\n ref_data = TestLSTMCell.generate_lstm_data(p, batch_first=False)\n test_data = LSTMTestData(*clone_test_data(ref_data))\n\n ref_rnn = nn.LSTMCell(p.input_size, p.hidden_size)\n TestLSTMCell.set_weights(ref_rnn, ref_data)\n test_rnn = LSTMCellNNCF(p.input_size, p.hidden_size)\n TestLSTMCell.set_weights(test_rnn, test_data)\n\n for i in range(p.seq_length):\n ref_result = ref_rnn(ref_data.x[i], (ref_data.h0[0], ref_data.c0[0]))\n test_result = test_rnn(test_data.x[i], (test_data.h0[0], test_data.c0[0]))\n for (ref, test) in list(zip(ref_result, test_result)):\n torch.testing.assert_allclose(test, ref)\n\n def test_backward_lstm_cell(self, sizes, _seed):\n p = sizes\n ref_data = TestLSTMCell.generate_lstm_data(p, batch_first=False, is_backward=True)\n with torch.no_grad():\n test_data = LSTMTestData(*clone_test_data(ref_data))\n\n ref_rnn = nn.LSTMCell(p.input_size, p.hidden_size)\n TestLSTMCell.set_weights(ref_rnn, ref_data)\n test_rnn = LSTMCellNNCF(p.input_size, p.hidden_size)\n TestLSTMCell.set_weights(test_rnn, test_data)\n\n for i in range(p.seq_length):\n ref_result = ref_rnn(ref_data.x[i], (ref_data.h0[0], ref_data.c0[0]))\n test_result = test_rnn(test_data.x[i], (test_data.h0[0], test_data.c0[0]))\n ref_result[0].sum().backward()\n test_result[0].sum().backward()\n ref_grads = get_grads([ref_data.h0[0], ref_data.c0[0]])\n ref_grads += get_grads([ref_rnn.weight_ih, ref_rnn.weight_hh, ref_rnn.bias_ih, ref_rnn.bias_hh])\n test_grads = get_grads([ref_data.h0[0], ref_data.c0[0]])\n test_grads += get_grads([test_rnn.weight_ih, test_rnn.weight_hh, test_rnn.bias_ih, test_rnn.bias_hh])\n for (ref, test) in list(zip(test_grads, ref_grads)):\n torch.testing.assert_allclose(test, ref)\n\n\ndef test_export_lstm_cell(tmp_path):\n config = get_empty_config(model_size=1, input_sample_size=(1, 1))\n config['compression'] = {'algorithm': 'quantization'}\n\n model, algo = create_compressed_model_and_algo_for_test(LSTMCellNNCF(1, 1), config)\n\n test_path = str(tmp_path.joinpath('test.onnx'))\n algo.export_model(test_path)\n assert os.path.exists(test_path)\n\n onnx_num = 0\n model = onnx.load(test_path)\n # pylint: disable=no-member\n for node in model.graph.node:\n if node.op_type == 'FakeQuantize':\n onnx_num += 1\n assert onnx_num == 12\n\n\[email protected]('sizes',\n [LSTMTestSizes(512, 324, 128, 50),\n LSTMTestSizes(3, 3, 3, 3),\n LSTMTestSizes(1, 1, 1, 1)], ids=lambda val: '[{}]'.format('-'.join([str(v) for v in val])))\[email protected]('bidirectional', (True, False), ids=('bi', 'uni'))\[email protected](\"bias\", [True, False], ids=['bias', 'no_bias'])\[email protected]('num_layers', [1, 2], ids=['single_layer', 'stacked'])\[email protected]('batch_first', [True, False], ids=['batch_first', 'seq_first'])\[email protected](('variable_length', 'sorted_'),\n ([True, True],\n [True, False],\n [False, False]), ids=['packed_sorted', 'packed_unsorted', 'not_packed'])\[email protected]('is_cuda', [True, False], ids=['cuda', 'cpu'])\[email protected]('empty_initial', [True, False], ids=['no_initial', 'with_initial'])\n# TODO: dropout gives different result. Looks like different random seed on CPU\n# @pytest.mark.parametrize('dropout', [0, 0.9], ids=['no_dropout', 'with_dropout'])\[email protected]('dropout', [0], ids=['no_dropout'])\nclass TestLSTM:\n def test_forward_lstm(self, sizes, bidirectional, num_layers, bias, batch_first, variable_length, sorted_, is_cuda,\n empty_initial, dropout, _seed):\n num_directions = 2 if bidirectional else 1\n p = sizes\n\n ref_data = TestLSTMCell.generate_lstm_data(p, num_layers, num_directions, variable_length, sorted_, batch_first,\n is_cuda, bias, empty_initial)\n\n ref_rnn = nn.LSTM(input_size=p.input_size, hidden_size=p.hidden_size, num_layers=num_layers,\n bidirectional=bidirectional, batch_first=batch_first, bias=bias, dropout=dropout)\n self.set_ref_lstm_weights(ref_data, ref_rnn, num_layers, num_directions, bias)\n ref_hidden = None if empty_initial else self.get_ref_lstm_hidden(ref_data)\n\n test_data = LSTMTestData(*clone_test_data(ref_data))\n\n class ModelWrapper(nn.Module):\n def __init__(self, lstm):\n super().__init__()\n self.lstm = lstm\n\n def forward(self, *input_):\n return self.lstm(*input_)\n\n wrapped_ref_rnn = ModelWrapper(ref_rnn)\n wrapped_test_rnn = replace_lstm(copy.deepcopy(wrapped_ref_rnn))\n test_rnn = wrapped_test_rnn.lstm\n test_hidden = None if empty_initial else self.get_test_lstm_hidden(test_data)\n\n if is_cuda:\n ref_rnn.cuda()\n test_rnn.cuda()\n ref_output, (ref_hn, ref_cn) = ref_rnn(ref_data.x, ref_hidden)\n test_output, (test_hn, test_cn) = test_rnn(test_data.x, test_hidden)\n\n torch.testing.assert_allclose(test_hn[0], ref_hn[0], rtol=1e-3, atol=1e-4)\n torch.testing.assert_allclose(test_cn[0], ref_cn[0], rtol=1e-3, atol=1e-4)\n if variable_length:\n torch.testing.assert_allclose(test_output.batch_sizes, ref_output.batch_sizes)\n torch.testing.assert_allclose(test_output.data, ref_output.data, rtol=1e-2, atol=1e-3)\n if not sorted_:\n torch.testing.assert_allclose(test_output.sorted_indices, ref_output.sorted_indices)\n torch.testing.assert_allclose(test_output.unsorted_indices, ref_output.unsorted_indices)\n else:\n torch.testing.assert_allclose(test_output, ref_output, rtol=1e-2, atol=1e-3)\n\n def test_backward_lstm(self, sizes, bidirectional, num_layers, bias, batch_first, variable_length, sorted_, is_cuda,\n empty_initial, dropout, _seed):\n\n num_directions = 2 if bidirectional else 1\n\n p = sizes\n\n ref_data = TestLSTMCell.generate_lstm_data(p, num_layers, num_directions, variable_length, sorted_, batch_first,\n is_cuda, bias, empty_initial, True)\n\n ref_rnn = nn.LSTM(input_size=p.input_size, hidden_size=p.hidden_size, num_layers=num_layers,\n bidirectional=bidirectional, batch_first=batch_first, bias=bias, dropout=dropout)\n self.set_ref_lstm_weights(ref_data, ref_rnn, num_layers, num_directions, bias)\n ref_hidden = None if empty_initial else self.get_ref_lstm_hidden(ref_data)\n\n test_data = LSTMTestData(*clone_test_data(ref_data))\n test_rnn = replace_lstm(copy.deepcopy(ref_rnn))\n test_hidden = None if empty_initial else self.get_test_lstm_hidden(test_data)\n\n if is_cuda:\n ref_rnn.cuda()\n test_rnn.cuda()\n\n ref_output, _ = ref_rnn(ref_data.x, ref_hidden)\n test_output, _ = test_rnn(test_data.x, test_hidden)\n\n ref_output[0].sum().backward()\n test_output[0].sum().backward()\n\n ref_grads = get_grads(self.flatten_nested_lists(ref_rnn.all_weights))\n test_grads = get_grads(self.flatten_nested_lists(test_rnn.all_weights))\n if not empty_initial:\n # TODO: compare gradient of all hidden\n ref_grads += get_grads([ref_data.h0[0], ref_data.c0[0]])\n test_grads += get_grads([test_hidden[0][0], test_hidden[1][0]])\n for (ref, test) in list(zip(test_grads, ref_grads)):\n torch.testing.assert_allclose(test, ref, rtol=1e-1, atol=1e-1)\n\n @classmethod\n def flatten_nested_lists(cls, nested_list):\n # type: (List) -> List[torch.Tensor]\n return [tensor for tensor_tuple in nested_list for tensor in tensor_tuple]\n\n @classmethod\n def get_test_lstm_hidden(cls, data):\n # type: (LSTMTestData) -> List[Tuple[torch.Tensor, ...]]\n result = []\n hidden_names = ['h0', 'c0']\n for name in hidden_names:\n hidden_list = getattr(data, name)\n element = ()\n num_hidden = len(hidden_list)\n for i in range(num_hidden):\n element += (hidden_list[i],)\n result.append(element)\n return result\n\n @classmethod\n def get_ref_lstm_hidden(cls, data):\n # type: (LSTMTestData) -> Tuple[torch.Tensor, torch.Tensor]\n hidden = cls.get_test_lstm_hidden(data)\n hidden_states = [torch.unsqueeze(tensor, dim=0) for tensor in hidden[0]]\n cell_states = [torch.unsqueeze(tensor, dim=0) for tensor in hidden[1]]\n return (\n torch.cat(hidden_states, dim=0),\n torch.cat(cell_states, dim=0)\n )\n\n @classmethod\n def set_ref_lstm_weights(cls, data, nn_lstm, num_layers, num_directions, bias):\n # type: (LSTMTestData, nn.LSTM, int, int, bool) -> None\n for l in range(num_layers):\n for d in range(num_directions):\n i = l * num_directions + d\n for name in cls.get_param_names(bias):\n suffix = '_reverse' if d == 1 else ''\n param = getattr(data, name)\n param_name = name + '_l{}{}'.format(l, suffix)\n getattr(nn_lstm, param_name).data.copy_(param[i].data)\n\n @classmethod\n def get_param_names(cls, bias):\n # type: (bool) -> List[str]\n suffixes = ['ih', 'hh']\n names = ['weight_' + suffix for suffix in suffixes]\n if bias:\n names += ['bias_' + suffix for suffix in suffixes]\n return names\n\n\ndef test_export_stacked_bi_lstm(tmp_path):\n p = LSTMTestSizes(3, 3, 3, 3)\n config = get_empty_config(input_sample_size=(1, p.hidden_size, p.input_size))\n config['compression'] = {'algorithm': 'quantization'}\n\n # TODO: batch_first=True fails with building graph: ambiguous call to mul or sigmoid\n test_rnn = NNCF_RNN('LSTM', input_size=p.input_size, hidden_size=p.hidden_size, num_layers=2, bidirectional=True,\n batch_first=False)\n model, algo = create_compressed_model_and_algo_for_test(test_rnn, config)\n\n test_path = str(tmp_path.joinpath('test.onnx'))\n algo.export_model(test_path)\n assert os.path.exists(test_path)\n\n onnx_num = 0\n model = onnx.load(test_path)\n # pylint: disable=no-member\n for node in model.graph.node:\n if node.op_type == 'FakeQuantize':\n onnx_num += 1\n assert onnx_num == 50\n\n\nclass TestNumberOfNodes:\n logging.basicConfig(level=logging.INFO, stream=sys.stdout)\n\n def test_number_of_calling_fq_for_lstm(self):\n p = LSTMTestSizes(1, 1, 1, 5)\n num_layers = 2\n bidirectional = True\n num_directions = 2 if bidirectional else 1\n bias = True\n batch_first = False\n config = get_empty_config(input_sample_size=(p.seq_length, p.batch, p.input_size))\n config['compression'] = {'algorithm': 'quantization', 'quantize_inputs': True}\n\n test_data = TestLSTMCell.generate_lstm_data(p, num_layers, num_directions, bias=bias, batch_first=batch_first)\n\n test_rnn = NNCF_RNN('LSTM', input_size=p.input_size, hidden_size=p.hidden_size, num_layers=num_layers,\n bidirectional=bidirectional, bias=bias, batch_first=batch_first)\n TestLSTM.set_ref_lstm_weights(test_data, test_rnn, num_layers, num_directions, bias)\n test_hidden = TestLSTM.get_test_lstm_hidden(test_data)\n\n model, algo = create_compressed_model_and_algo_for_test(test_rnn, config)\n\n class Counter:\n def __init__(self):\n self.count = 0\n\n def next(self):\n self.count += 1\n\n def hook(model, input_, counter):\n counter.next()\n\n counters = {}\n for name, quantizer in algo.all_quantizations.items():\n counter = Counter()\n counters[name] = counter\n quantizer.register_forward_pre_hook(partial(hook, counter=counter))\n _ = model(test_data.x, test_hidden)\n assert model.get_graph().get_nodes_count() == 107 # NB: may always fail in debug due to superfluous 'cat' nodes\n assert len(counters) == 50\n for counter in counters.values():\n assert counter.count == p.seq_length\n\n def test_number_of_calling_fq_for_gnmt(self):\n torch.cuda.set_device(0)\n device = torch.device('cuda')\n batch_first = False\n vocab_size = 32000\n model_config = {'hidden_size': 100,\n 'vocab_size': vocab_size,\n 'num_layers': 4,\n 'dropout': 0.2,\n 'batch_first': batch_first,\n 'share_embedding': True,\n }\n batch_size = 128\n sequence_size = 50\n input_sample_size = (batch_size, sequence_size) if batch_first else (sequence_size, batch_size)\n config = get_empty_config(input_sample_size=input_sample_size)\n config['compression'] = \\\n {'algorithm': 'quantization',\n 'quantize_inputs': True,\n 'quantizable_subgraph_patterns': [[\"linear\", \"__add__\"],\n [\"sigmoid\", \"__mul__\", \"__add__\"],\n [\"__add__\", \"tanh\", \"__mul__\"],\n [\"sigmoid\", \"__mul__\"]],\n 'disable_function_quantization_hooks': True}\n config['scopes_without_shape_matching'] = \\\n ['GNMT/ResidualRecurrentDecoder[decoder]/RecurrentAttention[att_rnn]/BahdanauAttention[attn]', ]\n\n model = GNMT(**model_config)\n model = replace_lstm(model)\n model.to(device)\n\n def dummy_forward_fn(model, seq_len=sequence_size):\n def gen_packed_sequence():\n seq_list = []\n seq_lens = torch.LongTensor(batch_size).random_(1, seq_len + 1)\n seq_lens = torch.sort(seq_lens, descending=True).values\n for seq_size in seq_lens:\n seq_list.append(torch.LongTensor(seq_size.item()).random_(1, vocab_size).to(device))\n padded_seq_batch = torch.nn.utils.rnn.pad_sequence(seq_list, batch_first=batch_first)\n return padded_seq_batch, seq_lens\n\n x_data, seq_lens = gen_packed_sequence()\n input_encoder = x_data\n input_enc_len = seq_lens.to(device)\n input_decoder = gen_packed_sequence()[0]\n model(input_encoder, input_enc_len, input_decoder)\n\n algo, model = create_compressed_model(model, config, dummy_forward_fn, dump_graphs=False)\n model.to(device)\n\n class Counter:\n def __init__(self):\n self.count = 0\n\n def next(self):\n self.count += 1\n\n def hook(model, input_, counter):\n counter.next()\n\n counters = {}\n for name, quantizer in algo.all_quantizations.items():\n counter = Counter()\n counters[str(name)] = counter\n quantizer.register_forward_pre_hook(partial(hook, counter=counter))\n dummy_forward_fn(model)\n assert model.get_graph().get_nodes_count() == 230 # NB: may always fail in debug due to superfluous 'cat' nodes\n assert len(counters) == 55\n for name, counter in counters.items():\n if 'cell' in name or \"LSTMCellForwardNNCF\" in name:\n assert counter.count == sequence_size, name\n else:\n assert counter.count == 1, name\n new_seq_len = int(sequence_size / 2)\n dummy_forward_fn(model, new_seq_len)\n assert model.get_graph().get_nodes_count() == 230 # NB: may always fail in debug due to superfluous 'cat' nodes\n assert len(counters) == 55\n for name, counter in counters.items():\n if 'cell' in name or \"LSTMCellForwardNNCF\" in name:\n assert counter.count == sequence_size + new_seq_len, name\n else:\n assert counter.count == 2, name\n\n def test_number_of_nodes_for_module_in_loop(self):\n num_iter = 5\n\n class LoopModule(nn.Module):\n @ITERATION_MODULES.register('Inner')\n class Inner(nn.Module):\n def __init__(self):\n super().__init__()\n self.operator1 = torch.sigmoid\n self.operator2 = torch.tanh\n\n def forward(self, x):\n s = self.operator1(x)\n t = self.operator2(x)\n result = t + s\n return result\n\n @staticmethod\n def nodes_number():\n return 3\n\n def __init__(self):\n super().__init__()\n self.inner = self.Inner()\n\n def forward(self, x):\n for _ in range(num_iter):\n x = self.inner(x)\n return x\n\n def nodes_number(self):\n return self.inner.nodes_number()\n\n test_module = LoopModule()\n context = TracingContext()\n with context as ctx:\n _ = test_module(torch.zeros(1))\n assert ctx.graph.get_nodes_count() == test_module.nodes_number()\n\n def test_number_of_nodes_for_module_in_loop__not_input_node(self):\n num_iter = 5\n\n class LoopModule(nn.Module):\n class Inner(nn.Module):\n def forward(self, x):\n s = F.sigmoid(x)\n t = F.tanh(x)\n result = F.sigmoid(x) * t + F.tanh(x) * s\n return result\n\n @staticmethod\n def nodes_number():\n return 7\n\n def __init__(self):\n super().__init__()\n self.inner = self.Inner()\n\n def forward(self, x):\n for _ in range(num_iter):\n x = self.inner(F.relu(x))\n return x\n\n def nodes_number(self):\n return self.inner.nodes_number() + num_iter\n\n test_module = LoopModule()\n context = TracingContext()\n with context as ctx:\n _ = test_module(torch.zeros(1))\n assert ctx.graph.get_nodes_count() == test_module.nodes_number()\n\n def test_number_of_nodes_for_module_with_nested_loops(self):\n num_iter = 5\n\n class TestIterModule(nn.Module):\n @ITERATION_MODULES.register()\n class TestIterModule_ResetPoint(nn.Module):\n def __init__(self, loop_module):\n super().__init__()\n self.loop_module = loop_module\n\n def forward(self, x):\n return self.loop_module(F.relu(x))\n\n def __init__(self):\n super().__init__()\n self.loop_module = self.LoopModule2()\n self.reset_point = self.TestIterModule_ResetPoint(self.loop_module)\n\n def forward(self, x):\n for _ in range(num_iter):\n x = self.reset_point(x)\n return x\n\n class LoopModule2(nn.Module):\n\n @ITERATION_MODULES.register()\n class LoopModule2_ResetPoint(nn.Module):\n def __init__(self, inner):\n super().__init__()\n self.inner = inner\n\n def forward(self, x):\n return self.inner(F.relu(x))\n\n def __init__(self):\n super().__init__()\n self.inner = self.Inner()\n self.reset_helper = self.LoopModule2_ResetPoint(self.inner)\n\n def forward(self, x):\n for _ in range(num_iter):\n self.reset_helper(x)\n return x\n\n class Inner(nn.Module):\n def forward(self, x):\n s = F.sigmoid(x)\n t = F.tanh(x)\n result = t + s\n return result\n\n test_module = TestIterModule()\n context = TracingContext()\n with context as ctx:\n _ = test_module(torch.zeros(1))\n assert ctx.graph.get_nodes_count() == num_iter\n\n def test_number_of_nodes_for_repeated_module(self):\n\n class LoopModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.operator = F.relu\n self.layers = nn.ModuleList([\n nn.Conv2d(1, 1, 1),\n nn.Conv2d(1, 1, 1)\n ])\n\n def forward(self, x):\n for layer in self.layers:\n x = F.relu(layer(x))\n return x\n\n test_module = LoopModule()\n context = TracingContext()\n with context as ctx:\n x = test_module(torch.zeros(1, 1, 1, 1))\n assert ctx.graph.get_nodes_count() == 4 # NB: may always fail in debug due to superfluous 'cat' nodes\n _ = test_module(x)\n assert ctx.graph.get_nodes_count() == 8 # NB: may always fail in debug due to superfluous 'cat' nodes\n" ]
[ [ "torch.nn.functional.cross_entropy", "torch.LongTensor", "torch.Tensor", "torch.nn.functional.smooth_l1_loss" ], [ "tensorflow.contrib.rnn.stack_bidirectional_dynamic_rnn", "tensorflow.reshape", "tensorflow.contrib.slim.batch_norm", "tensorflow.contrib.slim.fully_connected", "tensorflow.contrib.slim.conv2d", "tensorflow.contrib.slim.l2_regularizer", "tensorflow.contrib.slim.dropout", "tensorflow.variable_scope", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.contrib.slim.arg_scope", "tensorflow.squeeze", "tensorflow.transpose", "tensorflow.contrib.slim.max_pool2d", "tensorflow.contrib.layers.variance_scaling_initializer" ], [ "torch.rand", "torch.no_grad", "torch.nn.Conv2d", "torch.cat", "torch.nn.functional.sigmoid", "torch.testing.assert_allclose", "torch.randn", "torch.autograd.Variable", "torch.device", "torch.sort", "torch.cuda.set_device", "torch.unsqueeze", "torch.nn.utils.rnn.pad_sequence", "torch.nn.LSTM", "torch.nn.functional.tanh", "torch.IntTensor", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.functional.relu", "torch.nn.LSTMCell", "torch.zeros", "torch.LongTensor" ] ]
CORAL-CMU/kalibr
[ "ebd759286944f156c3ae6202c27fe47667929744" ]
[ "aslam_offline_calibration/kalibr/python/kalibr_camera_calibration/CameraIntializers.py" ]
[ "import sm\nimport aslam_backend as aopt\nimport aslam_cv as cv\nimport numpy as np\n\ndef addPoseDesignVariable(problem, T0=sm.Transformation()):\n q_Dv = aopt.RotationQuaternionDv( T0.q() )\n q_Dv.setActive( True )\n problem.addDesignVariable(q_Dv)\n t_Dv = aopt.EuclideanPointDv( T0.t() )\n t_Dv.setActive( True )\n problem.addDesignVariable(t_Dv)\n return aopt.TransformationBasicDv( q_Dv.toExpression(), t_Dv.toExpression() )\n\ndef stereoCalibrate(camL_geometry, camH_geometry, obslist, distortionActive=False, baseline=None):\n #####################################################\n ## find initial guess as median of all pnp solutions\n #####################################################\n if baseline is None:\n r=[]; t=[]\n for obsL, obsH in obslist:\n #if we have observations for both camss\n if obsL is not None and obsH is not None:\n success, T_L = camL_geometry.geometry.estimateTransformation(obsL)\n success, T_H = camH_geometry.geometry.estimateTransformation(obsH)\n \n baseline = T_H.inverse()*T_L\n t.append(baseline.t())\n rv=sm.RotationVector()\n r.append(rv.rotationMatrixToParameters( baseline.C() ))\n \n r_median = np.median(np.asmatrix(r), axis=0).flatten().T\n R_median = rv.parametersToRotationMatrix(r_median)\n t_median = np.median(np.asmatrix(t), axis=0).flatten().T\n \n baseline_HL = sm.Transformation( sm.rt2Transform(R_median, t_median) )\n else:\n baseline_HL = baseline\n \n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n dL = camL_geometry.geometry.projection().distortion().getParameters().flatten()\n pL = camL_geometry.geometry.projection().getParameters().flatten()\n dH = camH_geometry.geometry.projection().distortion().getParameters().flatten()\n pH = camH_geometry.geometry.projection().getParameters().flatten()\n sm.logDebug(\"initial guess for stereo calib: {0}\".format(baseline_HL.T()))\n sm.logDebug(\"initial guess for intrinsics camL: {0}\".format(pL))\n sm.logDebug(\"initial guess for intrinsics camH: {0}\".format(pH))\n sm.logDebug(\"initial guess for distortion camL: {0}\".format(dL))\n sm.logDebug(\"initial guess for distortion camH: {0}\".format(dH)) \n \n ############################################\n ## solve the bundle adjustment\n ############################################\n problem = aopt.OptimizationProblem()\n\n #baseline design variable \n baseline_dv = addPoseDesignVariable(problem, baseline_HL)\n \n #target pose dv for all target views (=T_camL_w)\n target_pose_dvs = list()\n for obsL, obsH in obslist:\n if obsL is not None: #use camL if we have an obs for this one\n success, T_t_cL = camL_geometry.geometry.estimateTransformation(obsL)\n else:\n success, T_t_cH = camH_geometry.geometry.estimateTransformation(obsH)\n T_t_cL = T_t_cH*baseline_HL #apply baseline for the second camera\n \n target_pose_dv = addPoseDesignVariable(problem, T_t_cL)\n target_pose_dvs.append(target_pose_dv)\n \n #add camera dvs\n camL_geometry.setDvActiveStatus(camL_geometry.projectionActive, distortionActive or camL_geometry.distortionActive, False)\n camH_geometry.setDvActiveStatus(camH_geometry.projectionActive, distortionActive or camH_geometry.distortionActive, False)\n problem.addDesignVariable(camL_geometry.dv.distortionDesignVariable())\n problem.addDesignVariable(camL_geometry.dv.projectionDesignVariable())\n problem.addDesignVariable(camL_geometry.dv.shutterDesignVariable())\n problem.addDesignVariable(camH_geometry.dv.distortionDesignVariable())\n problem.addDesignVariable(camH_geometry.dv.projectionDesignVariable())\n problem.addDesignVariable(camH_geometry.dv.shutterDesignVariable())\n \n ############################################\n ## add error terms\n ############################################\n \n #corner uncertainty\n # \\todo pass in the detector uncertainty somehow.\n cornerUncertainty = 1.0\n R = np.eye(2) * cornerUncertainty * cornerUncertainty\n invR = np.linalg.inv(R)\n \n #Add reprojection error terms for both cameras\n reprojectionErrors0 = []; reprojectionErrors1 = []\n \n for cidx, cam in enumerate([camL_geometry, camH_geometry]):\n sm.logDebug(\"stereoCalibration: adding camera error terms for {0} calibration targets\".format(len(obslist)))\n\n #get the image and target points corresponding to the frame\n target = cam.ctarget.detector.target()\n \n #add error terms for all observations\n for view_id, obstuple in enumerate(obslist):\n \n #add error terms if we have an observation for this cam\n obs=obstuple[cidx]\n if obs is not None:\n T_cam_w = target_pose_dvs[view_id].toExpression().inverse()\n \n #add the baseline for the second camera\n if cidx!=0:\n T_cam_w = baseline_dv.toExpression() * T_cam_w\n \n for i in range(0, target.size()):\n p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));\n valid, y = obs.imagePoint(i)\n if valid:\n # Create an error term.\n rerr = cam.model.reprojectionError(y, invR, T_cam_w * p_target, cam.dv)\n rerr.idx = i\n problem.addErrorTerm(rerr)\n \n if cidx==0:\n reprojectionErrors0.append(rerr)\n else:\n reprojectionErrors1.append(rerr)\n \n sm.logDebug(\"stereoCalibrate: added {0} camera error terms\".format( len(reprojectionErrors0)+len(reprojectionErrors1) ))\n \n ############################################\n ## solve\n ############################################ \n options = aopt.Optimizer2Options()\n options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False\n options.nThreads = 4\n options.convergenceDeltaX = 1e-3\n options.convergenceDeltaJ = 1\n options.maxIterations = 200\n options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)\n\n optimizer = aopt.Optimizer2(options)\n optimizer.setProblem(problem)\n\n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n sm.logDebug(\"Before optimization:\")\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors0 ])\n sm.logDebug( \" Reprojection error squarred (camL): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors1 ])\n sm.logDebug( \" Reprojection error squarred (camH): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n \n sm.logDebug(\"baseline={0}\".format(baseline_dv.toTransformationMatrix()))\n \n try: \n retval = optimizer.optimize()\n if retval.linearSolverFailure:\n sm.logError(\"stereoCalibrate: Optimization failed!\")\n success = not retval.linearSolverFailure\n except:\n sm.logError(\"stereoCalibrate: Optimization failed!\")\n success = False\n \n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n sm.logDebug(\"After optimization:\")\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors0 ])\n sm.logDebug( \" Reprojection error squarred (camL): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors1 ])\n sm.logDebug( \" Reprojection error squarred (camH): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n \n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n dL = camL_geometry.geometry.projection().distortion().getParameters().flatten()\n pL = camL_geometry.geometry.projection().getParameters().flatten()\n dH = camH_geometry.geometry.projection().distortion().getParameters().flatten()\n pH = camH_geometry.geometry.projection().getParameters().flatten()\n sm.logDebug(\"guess for intrinsics camL: {0}\".format(pL))\n sm.logDebug(\"guess for intrinsics camH: {0}\".format(pH))\n sm.logDebug(\"guess for distortion camL: {0}\".format(dL))\n sm.logDebug(\"guess for distortion camH: {0}\".format(dH)) \n \n if success:\n baseline_HL = sm.Transformation(baseline_dv.toTransformationMatrix())\n return success, baseline_HL\n else:\n #return the intiial guess if we fail\n return success, baseline_HL\n\n\ndef calibrateIntrinsics(cam_geometry, obslist, distortionActive=True, intrinsicsActive=True):\n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n d = cam_geometry.geometry.projection().distortion().getParameters().flatten()\n p = cam_geometry.geometry.projection().getParameters().flatten()\n sm.logDebug(\"calibrateIntrinsics: intrinsics guess: {0}\".format(p))\n sm.logDebug(\"calibrateIntrinsics: distortion guess: {0}\".format(d))\n \n ############################################\n ## solve the bundle adjustment\n ############################################\n problem = aopt.OptimizationProblem()\n \n #add camera dvs\n cam_geometry.setDvActiveStatus(intrinsicsActive, distortionActive, False)\n problem.addDesignVariable(cam_geometry.dv.distortionDesignVariable())\n problem.addDesignVariable(cam_geometry.dv.projectionDesignVariable())\n problem.addDesignVariable(cam_geometry.dv.shutterDesignVariable())\n \n #corner uncertainty\n cornerUncertainty = 1.0\n R = np.eye(2) * cornerUncertainty * cornerUncertainty\n invR = np.linalg.inv(R)\n \n #get the image and target points corresponding to the frame\n target = cam_geometry.ctarget.detector.target()\n \n #target pose dv for all target views (=T_camL_w)\n reprojectionErrors = []; \n sm.logDebug(\"calibrateIntrinsics: adding camera error terms for {0} calibration targets\".format(len(obslist)))\n target_pose_dvs=list()\n for obs in obslist: \n success, T_t_c = cam_geometry.geometry.estimateTransformation(obs)\n target_pose_dv = addPoseDesignVariable(problem, T_t_c)\n target_pose_dvs.append(target_pose_dv)\n \n T_cam_w = target_pose_dv.toExpression().inverse()\n \n ## add error terms\n for i in range(0, target.size()):\n p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));\n valid, y = obs.imagePoint(i)\n if valid:\n rerr = cam_geometry.model.reprojectionError(y, invR, T_cam_w * p_target, cam_geometry.dv)\n problem.addErrorTerm(rerr)\n reprojectionErrors.append(rerr)\n \n sm.logDebug(\"calibrateIntrinsics: added {0} camera error terms\".format(len(reprojectionErrors)))\n \n ############################################\n ## solve\n ############################################ \n options = aopt.Optimizer2Options()\n options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False\n options.nThreads = 4\n options.convergenceDeltaX = 1e-3\n options.convergenceDeltaJ = 1\n options.maxIterations = 200\n options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)\n\n optimizer = aopt.Optimizer2(options)\n optimizer.setProblem(problem)\n\n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n sm.logDebug(\"Before optimization:\")\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors ])\n sm.logDebug( \" Reprojection error squarred (camL): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n \n #run intrinsic calibration\n try: \n retval = optimizer.optimize()\n if retval.linearSolverFailure:\n sm.logError(\"calibrateIntrinsics: Optimization failed!\")\n success = not retval.linearSolverFailure\n\n except:\n sm.logError(\"calibrateIntrinsics: Optimization failed!\")\n success = False\n \n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n d = cam_geometry.geometry.projection().distortion().getParameters().flatten()\n p = cam_geometry.geometry.projection().getParameters().flatten()\n sm.logDebug(\"calibrateIntrinsics: guess for intrinsics cam: {0}\".format(p))\n sm.logDebug(\"calibrateIntrinsics: guess for distortion cam: {0}\".format(d))\n \n return success\n\n\ndef solveFullBatch(cameras, baseline_guesses, graph): \n ############################################\n ## solve the bundle adjustment\n ############################################\n problem = aopt.OptimizationProblem()\n \n #add camera dvs\n for cam in cameras:\n cam.setDvActiveStatus(cam.projectionActive, cam.distortionActive, False)\n problem.addDesignVariable(cam.dv.distortionDesignVariable())\n problem.addDesignVariable(cam.dv.projectionDesignVariable())\n problem.addDesignVariable(cam.dv.shutterDesignVariable())\n \n baseline_dvs = list()\n for baseline_idx in range(0, len(cameras)-1): \n baseline_dv = aopt.TransformationDv(baseline_guesses[baseline_idx])\n \n for i in range(0, baseline_dv.numDesignVariables()):\n problem.addDesignVariable(baseline_dv.getDesignVariable(i))\n \n baseline_dvs.append( baseline_dv )\n \n #corner uncertainty\n cornerUncertainty = 1.0\n R = np.eye(2) * cornerUncertainty * cornerUncertainty\n invR = np.linalg.inv(R)\n \n #get the target\n target = cameras[0].ctarget.detector.target()\n\n #Add calibration target reprojection error terms for all camera in chain\n target_pose_dvs = list()\n \n #shuffle the views\n reprojectionErrors = []; \n timestamps = graph.obs_db.getAllViewTimestamps()\n for view_id, timestamp in enumerate(timestamps):\n \n #get all observations for all cams at this time\n obs_tuple = graph.obs_db.getAllObsAtTimestamp(timestamp)\n\n #create a target pose dv for all target views (= T_cam0_w)\n T0 = graph.getTargetPoseGuess(timestamp, cameras, baseline_guesses)\n target_pose_dv = addPoseDesignVariable(problem, T0)\n target_pose_dvs.append(target_pose_dv)\n \n\n for cidx, obs in obs_tuple:\n cam = cameras[cidx]\n \n #calibration target coords to camera X coords\n T_cam0_calib = target_pose_dv.toExpression().inverse()\n\n #build pose chain (target->cam0->baselines->camN)\n T_camN_calib = T_cam0_calib\n for idx in range(0, cidx):\n T_camN_calib = baseline_dvs[idx].toExpression() * T_camN_calib\n \n \n ## add error terms\n for i in range(0, target.size()):\n p_target = aopt.HomogeneousExpression(sm.toHomogeneous(target.point(i)));\n valid, y = obs.imagePoint(i)\n if valid:\n rerr = cameras[cidx].model.reprojectionError(y, invR, T_camN_calib * p_target, cameras[cidx].dv)\n problem.addErrorTerm(rerr)\n reprojectionErrors.append(rerr)\n \n sm.logDebug(\"solveFullBatch: added {0} camera error terms\".format(len(reprojectionErrors)))\n \n ############################################\n ## solve\n ############################################ \n options = aopt.Optimizer2Options()\n options.verbose = True if sm.getLoggingLevel()==sm.LoggingLevel.Debug else False\n options.nThreads = 4\n options.convergenceDeltaX = 1e-3\n options.convergenceDeltaJ = 1\n options.maxIterations = 250\n options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(10)\n\n optimizer = aopt.Optimizer2(options)\n optimizer.setProblem(problem)\n\n #verbose output\n if sm.getLoggingLevel()==sm.LoggingLevel.Debug:\n sm.logDebug(\"Before optimization:\")\n e2 = np.array([ e.evaluateError() for e in reprojectionErrors ])\n sm.logDebug( \" Reprojection error squarred (camL): mean {0}, median {1}, std: {2}\".format(np.mean(e2), np.median(e2), np.std(e2) ) )\n \n #run intrinsic calibration\n try:\n retval = optimizer.optimize()\n if retval.linearSolverFailure:\n sm.logError(\"calibrateIntrinsics: Optimization failed!\")\n success = not retval.linearSolverFailure\n\n except:\n sm.logError(\"calibrateIntrinsics: Optimization failed!\")\n success = False\n\n baselines=list()\n for baseline_dv in baseline_dvs:\n baselines.append( sm.Transformation(baseline_dv.T()) )\n \n return success, baselines\n\n" ]
[ [ "numpy.eye", "numpy.linalg.inv", "numpy.median", "numpy.asmatrix", "numpy.std", "numpy.mean" ] ]
ymaxgit/mxnet
[ "01ae629c6593e0352fd30979bccd0196854ef882" ]
[ "tests/python/unittest/test_gluon_rnn.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport mxnet as mx\nfrom mxnet import gluon\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport unittest\nfrom mxnet.test_utils import almost_equal\n\n\ndef test_rnn():\n cell = gluon.rnn.RNNCell(100, prefix='rnn_')\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']\n assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 100), (10, 100), (10, 100)]\n\n\ndef test_lstm():\n cell = gluon.rnn.LSTMCell(100, prefix='rnn_')\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']\n assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 100), (10, 100), (10, 100)]\n\n\ndef test_lstm_forget_bias():\n forget_bias = 2.0\n stack = gluon.rnn.SequentialRNNCell()\n stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l0_'))\n stack.add(gluon.rnn.LSTMCell(100, i2h_bias_initializer=mx.init.LSTMBias(forget_bias), prefix='l1_'))\n\n dshape = (32, 1, 200)\n data = mx.sym.Variable('data')\n\n sym, _ = stack.unroll(1, data, merge_outputs=True)\n mod = mx.mod.Module(sym, label_names=None, context=mx.cpu(0))\n mod.bind(data_shapes=[('data', dshape)], label_shapes=None)\n\n mod.init_params()\n\n bias_argument = next(x for x in sym.list_arguments() if x.endswith('i2h_bias'))\n expected_bias = np.hstack([np.zeros((100,)),\n forget_bias * np.ones(100, ), np.zeros((2 * 100,))])\n assert_allclose(mod.get_params()[0][bias_argument].asnumpy(), expected_bias)\n\n\ndef test_gru():\n cell = gluon.rnn.GRUCell(100, prefix='rnn_')\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n assert sorted(cell.collect_params().keys()) == ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']\n assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 100), (10, 100), (10, 100)]\n\n\ndef test_residual():\n cell = gluon.rnn.ResidualCell(gluon.rnn.GRUCell(50, prefix='rnn_'))\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]\n outputs, _ = cell.unroll(2, inputs)\n outputs = mx.sym.Group(outputs)\n assert sorted(cell.collect_params().keys()) == \\\n ['rnn_h2h_bias', 'rnn_h2h_weight', 'rnn_i2h_bias', 'rnn_i2h_weight']\n # assert outputs.list_outputs() == \\\n # ['rnn_t0_out_plus_residual_output', 'rnn_t1_out_plus_residual_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))\n assert outs == [(10, 50), (10, 50)]\n outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50)),\n rnn_t1_data=mx.nd.ones((10, 50)),\n rnn_i2h_weight=mx.nd.zeros((150, 50)),\n rnn_i2h_bias=mx.nd.zeros((150,)),\n rnn_h2h_weight=mx.nd.zeros((150, 50)),\n rnn_h2h_bias=mx.nd.zeros((150,)))\n expected_outputs = np.ones((10, 50))\n assert np.array_equal(outputs[0].asnumpy(), expected_outputs)\n assert np.array_equal(outputs[1].asnumpy(), expected_outputs)\n\n\ndef test_residual_bidirectional():\n cell = gluon.rnn.ResidualCell(\n gluon.rnn.BidirectionalCell(\n gluon.rnn.GRUCell(25, prefix='rnn_l_'),\n gluon.rnn.GRUCell(25, prefix='rnn_r_')))\n\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]\n outputs, _ = cell.unroll(2, inputs, merge_outputs=False)\n outputs = mx.sym.Group(outputs)\n assert sorted(cell.collect_params().keys()) == \\\n ['rnn_l_h2h_bias', 'rnn_l_h2h_weight', 'rnn_l_i2h_bias', 'rnn_l_i2h_weight',\n 'rnn_r_h2h_bias', 'rnn_r_h2h_weight', 'rnn_r_i2h_bias', 'rnn_r_i2h_weight']\n # assert outputs.list_outputs() == \\\n # ['bi_t0_plus_residual_output', 'bi_t1_plus_residual_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))\n assert outs == [(10, 50), (10, 50)]\n outputs = outputs.eval(rnn_t0_data=mx.nd.ones((10, 50))+5,\n rnn_t1_data=mx.nd.ones((10, 50))+5,\n rnn_l_i2h_weight=mx.nd.zeros((75, 50)),\n rnn_l_i2h_bias=mx.nd.zeros((75,)),\n rnn_l_h2h_weight=mx.nd.zeros((75, 25)),\n rnn_l_h2h_bias=mx.nd.zeros((75,)),\n rnn_r_i2h_weight=mx.nd.zeros((75, 50)),\n rnn_r_i2h_bias=mx.nd.zeros((75,)),\n rnn_r_h2h_weight=mx.nd.zeros((75, 25)),\n rnn_r_h2h_bias=mx.nd.zeros((75,)))\n expected_outputs = np.ones((10, 50))+5\n assert np.array_equal(outputs[0].asnumpy(), expected_outputs)\n assert np.array_equal(outputs[1].asnumpy(), expected_outputs)\n\n\ndef test_stack():\n cell = gluon.rnn.SequentialRNNCell()\n for i in range(5):\n if i == 1:\n cell.add(gluon.rnn.ResidualCell(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_' % i)))\n else:\n cell.add(gluon.rnn.LSTMCell(100, prefix='rnn_stack%d_'%i))\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n keys = sorted(cell.collect_params().keys())\n for i in range(5):\n assert 'rnn_stack%d_h2h_weight'%i in keys\n assert 'rnn_stack%d_h2h_bias'%i in keys\n assert 'rnn_stack%d_i2h_weight'%i in keys\n assert 'rnn_stack%d_i2h_bias'%i in keys\n assert outputs.list_outputs() == ['rnn_stack4_t0_out_output', 'rnn_stack4_t1_out_output', 'rnn_stack4_t2_out_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 100), (10, 100), (10, 100)]\n\n\ndef test_bidirectional():\n cell = gluon.rnn.BidirectionalCell(\n gluon.rnn.LSTMCell(100, prefix='rnn_l0_'),\n gluon.rnn.LSTMCell(100, prefix='rnn_r0_'),\n output_prefix='rnn_bi_')\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n assert outputs.list_outputs() == ['rnn_bi_t0_output', 'rnn_bi_t1_output', 'rnn_bi_t2_output']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 200), (10, 200), (10, 200)]\n\n\ndef test_zoneout():\n cell = gluon.rnn.ZoneoutCell(gluon.rnn.RNNCell(100, prefix='rnn_'), zoneout_outputs=0.5,\n zoneout_states=0.5)\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(3)]\n outputs, _ = cell.unroll(3, inputs)\n outputs = mx.sym.Group(outputs)\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10,50), rnn_t1_data=(10,50), rnn_t2_data=(10,50))\n assert outs == [(10, 100), (10, 100), (10, 100)]\n\n\ndef check_rnn_forward(layer, inputs, deterministic=True):\n inputs.attach_grad()\n layer.collect_params().initialize()\n with mx.autograd.record():\n out = layer.unroll(3, inputs, merge_outputs=False)[0]\n mx.autograd.backward(out)\n out = layer.unroll(3, inputs, merge_outputs=True)[0]\n out.backward()\n\n np_out = out.asnumpy()\n np_dx = inputs.grad.asnumpy()\n\n layer.hybridize()\n\n with mx.autograd.record():\n out = layer.unroll(3, inputs, merge_outputs=False)[0]\n mx.autograd.backward(out)\n out = layer.unroll(3, inputs, merge_outputs=True)[0]\n out.backward()\n\n if deterministic:\n mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-3, atol=1e-5)\n mx.test_utils.assert_almost_equal(np_dx, inputs.grad.asnumpy(), rtol=1e-3, atol=1e-5)\n\n\ndef test_rnn_cells():\n check_rnn_forward(gluon.rnn.LSTMCell(100, input_size=200), mx.nd.ones((8, 3, 200)))\n check_rnn_forward(gluon.rnn.RNNCell(100, input_size=200), mx.nd.ones((8, 3, 200)))\n check_rnn_forward(gluon.rnn.GRUCell(100, input_size=200), mx.nd.ones((8, 3, 200)))\n\n bilayer = gluon.rnn.BidirectionalCell(gluon.rnn.LSTMCell(100, input_size=200),\n gluon.rnn.LSTMCell(100, input_size=200))\n check_rnn_forward(bilayer, mx.nd.ones((8, 3, 200)))\n\n check_rnn_forward(gluon.rnn.DropoutCell(0.5), mx.nd.ones((8, 3, 200)), False)\n\n check_rnn_forward(gluon.rnn.ZoneoutCell(gluon.rnn.LSTMCell(100, input_size=200),\n 0.5, 0.2),\n mx.nd.ones((8, 3, 200)), False)\n\n net = gluon.rnn.SequentialRNNCell()\n net.add(gluon.rnn.LSTMCell(100, input_size=200))\n net.add(gluon.rnn.RNNCell(100, input_size=100))\n net.add(gluon.rnn.GRUCell(100, input_size=100))\n check_rnn_forward(net, mx.nd.ones((8, 3, 200)))\n\ndef check_rnn_layer_forward(layer, inputs, states=None):\n layer.collect_params().initialize()\n inputs.attach_grad()\n with mx.autograd.record():\n out = layer(inputs, states)\n if states is not None:\n assert isinstance(out, tuple) and len(out) == 2\n out = out[0]\n else:\n assert isinstance(out, mx.nd.NDArray)\n out.backward()\n\n np_out = out.asnumpy()\n np_dx = inputs.grad.asnumpy()\n\n layer.hybridize()\n\n with mx.autograd.record():\n out = layer(inputs, states)\n if states is not None:\n assert isinstance(out, tuple) and len(out) == 2\n out = out[0]\n else:\n assert isinstance(out, mx.nd.NDArray)\n out.backward()\n\n mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-3, atol=1e-5)\n mx.test_utils.assert_almost_equal(np_dx, inputs.grad.asnumpy(), rtol=1e-3, atol=1e-5)\n\ndef test_rnn_layers():\n check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)))\n check_rnn_layer_forward(gluon.rnn.RNN(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))\n check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)))\n check_rnn_layer_forward(gluon.rnn.LSTM(10, 2), mx.nd.ones((8, 3, 20)), [mx.nd.ones((2, 3, 10)), mx.nd.ones((2, 3, 10))])\n check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)))\n check_rnn_layer_forward(gluon.rnn.GRU(10, 2), mx.nd.ones((8, 3, 20)), mx.nd.ones((2, 3, 10)))\n\n net = gluon.nn.Sequential()\n net.add(gluon.rnn.LSTM(10, 2, bidirectional=True))\n net.add(gluon.nn.BatchNorm(axis=2))\n net.add(gluon.nn.Flatten())\n net.add(gluon.nn.Dense(3, activation='relu'))\n net.collect_params().initialize()\n with mx.autograd.record():\n net(mx.nd.ones((2, 3, 10))).backward()\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n" ]
[ [ "numpy.ones", "numpy.zeros" ] ]
lamsoa729/FoXlink
[ "3c061b02968cdab1def752d5c145a6df4615504b" ]
[ "foxlink/me_zrl_bound_evolvers.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"@package docstring\nFile: me_zrl_bound_evolvers.py\nAuthor: Adam Lamson\nEmail: [email protected]\nDescription:\n\"\"\"\n\nimport numpy as np\n# from scipy.integrate import dblquad\nfrom .me_helpers import dr_dt, convert_sol_to_geom\nfrom .me_zrl_odes import (rod_geom_derivs_zrl, calc_moment_derivs_zrl,\n calc_moment_derivs_zrl_B_terms,\n calc_boundary_derivs_zrl)\nfrom .me_zrl_helpers import (avg_force_zrl,\n prep_zrl_bound_evolver,\n get_zrl_moments_and_boundary_terms)\nfrom .rod_steric_forces import calc_wca_force_torque\nfrom .me_zrl_evolvers import prep_zrl_evolver\n\n\ndef evolver_zrl_bound(sol, fric_coeff, params):\n \"\"\"!Calculate all time derivatives necessary to solve the moment expansion\n evolution of the Fokker-Planck equation of zero rest length (zrl) crosslinkers\nbound to moving rods. d<var> is the time derivative of corresponding\nvariable\n\n @param sol: Solution vector to solve_ivp\n @param fric_coeff: friction coefficients of rod\n @param params: Constant parameters of the simulation\n @return: Time-derivatives of all time varying quantities in a flattened\n array\n \"\"\"\n # Define useful parameters for functions\n hL_i, hL_j = (.5 * params['L_i'], .5 * params['L_j'])\n ks = params['ks']\n r_i, r_j, u_i, u_j = convert_sol_to_geom(sol)\n r_ij = r_j - r_i\n\n (scalar_geom, q_arr, Q_arr) = prep_zrl_bound_evolver(sol, params)\n (mu_kl, B_terms) = get_zrl_moments_and_boundary_terms(sol)\n if mu_kl[0] < 0.:\n mu_kl[0] = 0.\n if mu_kl[4] < 0.:\n mu_kl[4] = 0.\n if mu_kl[5] < 0.:\n mu_kl[5] = 0.\n\n # Get average force of crosslinkers on rod2\n f_ij = avg_force_zrl(r_ij, u_i, u_j, mu_kl[0], mu_kl[1], mu_kl[2], ks)\n # Evolution of rod positions\n dgeom = rod_geom_derivs_zrl(f_ij, r_ij, u_i, u_j, scalar_geom,\n mu_kl, fric_coeff, ks)\n\n # Evolution of moments\n dmu_kl = calc_moment_derivs_zrl_B_terms(mu_kl, scalar_geom,\n q_arr, B_terms, params)\n\n # Evolution of boundary condtions\n dB_terms = calc_boundary_derivs_zrl(B_terms, scalar_geom, Q_arr, params)\n dsol = np.concatenate(dgeom, dmu_kl, dB_terms)\n return dsol\n\n##########################################\n" ]
[ [ "numpy.concatenate" ] ]
shuishoudage/music_generator
[ "7c17ef5bb3a5d872bff5ac8e1664f57f5b4ea08f" ]
[ "data_clean/preprocessing.py" ]
[ "from typing import List, Tuple, Dict, Any\nfrom collections import Counter\nimport pretty_midi\nimport matplotlib.pyplot as plt\nimport librosa.display\nimport os\nfrom os import listdir, walk\nfrom os.path import isfile, isdir, join\nfrom sys import argv\nimport traceback\nimport logging\nimport numpy as np\nfrom shutil import copyfile\nimport shutil\n\n\n# Ideas behind the preprocessing class\n#\n# 1. only use those midi with one tempo and one key, since some midi music\n# have key and tempo changes inside. Which might make some unpredictable result\n#\n# 2. list distribution for all keys contained in the corpus. Only select those\n# most frequent appeared. (different keys may increase training difficulty)\n#\n# 3. only select similar tempo music, based on the mean and std of tempos,\n# simple one will be left boundary = mean - std, right boundary = mean + std\n#\n# 4. find the mean of highest and lowest pitch in the corpus. filter out those not\n# the range. We have pitch range from 0-128, no meaning cover two extreme sides.\nclass FileReport(object):\n \"\"\"\n This class is mainly for generating meta information for our report\n \"\"\"\n\n def __init__(self,\n tempos: List[float],\n freq_key: Dict[int, int],\n min_pitch: List[int],\n max_pitch: List[int]):\n self.tempos = tempos\n self.freq_key = freq_key\n self.min_pitch = min_pitch\n self.max_pitch = max_pitch\n\n def aggregation_report(self):\n \"\"\"\n two important variable are min_pitch and max_pitch,\n since they will be used to decode from pitch to audio\n \"\"\"\n temp_mean = np.array(self.tempos).mean()\n temp_std = np.array(self.tempos).std()\n most_freq_key = self.getMostFreqValue(self.freq_key)\n min_pitch = int(np.array(self.min_pitch).mean())\n max_pitch = int(np.array(self.max_pitch).mean())\n return temp_mean, temp_std, most_freq_key, min_pitch, max_pitch\n\n def plots(self):\n # implement later on\n pass\n\n def getMostFreqValue(self, keys: Dict[int, int], reversed=True) -> int:\n return sorted(keys.items(), key=lambda kv: kv[1], reverse=reversed)[0][0]\n\n\nclass Preprocess(object):\n def __init__(self, path: str):\n self.path = path\n self.fileFilter()\n\n def generateMidiFileReport(self) -> FileReport:\n \"\"\"\n meta information like tempos, keys, pitches will be generated for\n filtering the midi files\n \"\"\"\n tempos = []\n keys = []\n max_pitchs = []\n min_pitchs = []\n for pm in self.pms:\n try:\n tempos.append(pm.estimate_tempo())\n key = pm.key_signature_changes[0].key_number\n keys.append(key)\n min_pitch, max_pitch = self.getMinMaxPitch(pm)\n max_pitchs.append(max_pitch)\n min_pitchs.append(min_pitch)\n except:\n pass\n self.report = FileReport(tempos, dict(\n Counter(keys)), min_pitchs, max_pitchs)\n return self.report\n\n def getMinMaxPitch(self, pm: pretty_midi.PrettyMIDI):\n \"\"\"\n find the min and max pitch inside a midi file\n \"\"\"\n notes = [\n note.pitch for instrument in pm.instruments for note in instrument.notes\n ]\n return min(notes), max(notes)\n\n def SaveFilterMIDIfiles(self):\n \"\"\"\n according generated meta data info to filter out those not in range\n \"\"\"\n report = self.generateMidiFileReport()\n temp_mean, temp_std, key, left_boundary, right_boundary = report.aggregation_report()\n piano_roll_paths = []\n for pm, path in zip(self.pms, self.paths):\n try:\n tempo = pm.estimate_tempo()\n min_pitch, max_pitch = self.getMinMaxPitch(pm)\n if self.isTempoInRange(tempo, temp_mean, temp_std) \\\n and self.isPitchInRange(min_pitch, max_pitch, left_boundary, right_boundary) \\\n and self.isKeyMatch(pm.key_signature_changes[0].key_number, key):\n savedPath = os.path.join(os.getcwd(), 'filterData')\n if not os.path.exists(savedPath):\n os.makedirs(savedPath, exist_ok=True)\n shutil.move(\n path, os.path.join(os.getcwd(), 'filterData', os.path.basename(path)))\n except:\n pass\n\n def isTempoInRange(self, tempo: float, mean: float, std: float) -> bool:\n \"\"\"\n a helper function that can be used check if a midi file's tempo in range\n \"\"\"\n if tempo > (mean - std) and tempo < (mean + std):\n return True\n return False\n\n def isKeyMatch(self, key: int, grand_truth_key: int) -> bool:\n if key == grand_truth_key:\n return True\n return False\n\n def isPitchInRange(self, low_pitch: int,\n high_pitch: int,\n left_boundary: int,\n right_boundary: int) -> bool:\n if low_pitch >= left_boundary and high_pitch <= right_boundary:\n return True\n return False\n\n def fileFilter(self):\n \"\"\"\n first filtering that only allow one tempo and one key inside a midi file\n \"\"\"\n self.pms: List[pretty_midi.PrettyMIDI] = []\n self.paths: List[str] = []\n for (dirPath, _, files) in walk(self.path): # type: ignore\n for file in files:\n # get the absoluted path of file\n path = join(dirPath, file)\n try:\n pm = pretty_midi.PrettyMIDI(path)\n # only handle files contain one key and one tempo\n if len(pm.key_signature_changes) == 1 \\\n and len(pm.time_signature_changes) == 1:\n self.pms.append(pm)\n self.paths.append(path)\n except: # skip all parsing exceptions\n pass\n\n\ndef cliArgParser(argv) -> Any:\n if len(argv) != 2:\n raise ValueError(f\"path of folder must be provided\")\n if isdir(argv[1]):\n path = os.path.abspath(argv[1])\n return path\n else:\n raise ValueError(f\"provided path is not a folder\")\n\n\nif __name__ == \"__main__\":\n try:\n path = cliArgParser(argv)\n p = Preprocess(path)\n p.SaveFilterMIDIfiles()\n except Exception as err:\n print(traceback.format_exc())\n exit(1)\n" ]
[ [ "numpy.array" ] ]
kushalj001/transformers
[ "0538820737bd8fb9ba1eb3a772412c6bbe2433ab" ]
[ "src/transformers/modeling_t5.py" ]
[ "# coding=utf-8\n# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch T5 model. \"\"\"\n\n\nimport copy\nimport math\nimport os\nimport warnings\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom .configuration_t5 import T5Config\nfrom .file_utils import (\n DUMMY_INPUTS,\n DUMMY_MASK,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom .modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput\nfrom .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer\nfrom .utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"T5Config\"\n_TOKENIZER_FOR_DOC = \"T5Tokenizer\"\n\n####################################################\n# This dict contains shortcut names and associated url\n# for the pretrained weights provided with the models\n####################################################\nT5_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"t5-small\",\n \"t5-base\",\n \"t5-large\",\n \"t5-3b\",\n \"t5-11b\",\n # See all T5 models at https://huggingface.co/models?filter=t5\n]\n\n\n####################################################\n# This is a conversion method from TF 1.0 to PyTorch\n# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28\n####################################################\ndef load_tf_weights_in_t5(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n tf_weights = {}\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n tf_weights[name] = array\n\n for txt_name in names:\n name = txt_name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n if \"_slot_\" in name[-1]:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n tf_weights.pop(txt_name, None)\n continue\n pointer = model\n array = tf_weights[txt_name]\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n # elif scope_names[0] == 'scale':\n # pointer = getattr(pointer, 'weight')\n # elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':\n # pointer = getattr(pointer, 'bias')\n # elif scope_names[0] == 'squad':\n # pointer = getattr(pointer, 'classifier')\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if scope_names[0] not in [\"kernel\", \"scale\", \"embedding\"]:\n pointer = getattr(pointer, \"weight\")\n if scope_names[0] != \"embedding\":\n logger.info(\"Transposing numpy weight of shape {} for {}\".format(array.shape, name))\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array.astype(np.float32))\n tf_weights.pop(txt_name, None)\n\n logger.info(\"Weights not copied to PyTorch model: {}\".format(\", \".join(tf_weights.keys())))\n # logger.info(\"Weights not copied to PyTorch model: {}\".format(', '.join(tf_weights.keys())))\n return model\n\n\n####################################################\n# PyTorch Models are constructed by sub-classing\n# - torch.nn.Module for the layers and\n# - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module)\n####################################################\n\n\nclass T5LayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-6):\n \"\"\"\n Construct a layernorm module in the T5 style No bias and no subtraction of mean.\n \"\"\"\n super().__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n # layer norm should always be calculated in float32\n variance = x.to(torch.float32).pow(2).mean(-1, keepdim=True)\n x = x / torch.sqrt(variance + self.variance_epsilon)\n\n if self.weight.dtype == torch.float16:\n x = x.to(torch.float16)\n return self.weight * x\n\n\nclass T5DenseReluDense(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)\n self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(self, hidden_states):\n h = self.wi(hidden_states)\n h = F.relu(h)\n h = self.dropout(h)\n h = self.wo(h)\n return h\n\n\nclass T5LayerFF(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.DenseReluDense = T5DenseReluDense(config)\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(self, hidden_states):\n norm_x = self.layer_norm(hidden_states)\n y = self.DenseReluDense(norm_x)\n layer_output = hidden_states + self.dropout(y)\n return layer_output\n\n\nclass T5Attention(nn.Module):\n def __init__(self, config: T5Config, has_relative_attention_bias=False, is_bidirectional=False):\n super().__init__()\n self.is_bidirectional = is_bidirectional\n self.is_decoder = config.is_decoder\n self.has_relative_attention_bias = has_relative_attention_bias\n\n self.relative_attention_num_buckets = config.relative_attention_num_buckets\n self.d_model = config.d_model\n self.d_kv = config.d_kv\n self.n_heads = config.num_heads\n self.dropout = config.dropout_rate\n self.inner_dim = self.n_heads * self.d_kv\n\n # Mesh TensorFlow initialization to avoid scaling before softmax\n self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)\n\n if self.has_relative_attention_bias:\n self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.d_kv, self.pruned_heads)\n # Prune linear layers\n self.q = prune_linear_layer(self.q, index)\n self.k = prune_linear_layer(self.k, index)\n self.v = prune_linear_layer(self.v, index)\n self.o = prune_linear_layer(self.o, index, dim=1)\n # Update hyper params\n self.n_heads = self.n_heads - len(heads)\n self.inner_dim = self.d_kv * self.n_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n @staticmethod\n def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):\n \"\"\"\n Adapted from Mesh Tensorflow:\n https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593\n\n Translate relative position to a bucket number for relative attention. The relative position is defined as\n memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to\n position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for\n small absolute relative_position and larger buckets for larger absolute relative_positions. All relative\n positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.\n This should allow for more graceful generalization to longer sequences than the model has been trained on\n\n Args:\n relative_position: an int32 Tensor\n bidirectional: a boolean - whether the attention is bidirectional\n num_buckets: an integer\n max_distance: an integer\n\n Returns:\n a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)\n \"\"\"\n ret = 0\n n = -relative_position\n if bidirectional:\n num_buckets //= 2\n ret += (n < 0).to(torch.long) * num_buckets # mtf.to_int32(mtf.less(n, 0)) * num_buckets\n n = torch.abs(n)\n else:\n n = torch.max(n, torch.zeros_like(n))\n # now n is in the range [0, inf)\n\n # half of the buckets are for exact increments in positions\n max_exact = num_buckets // 2\n is_small = n < max_exact\n\n # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance\n val_if_large = max_exact + (\n torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)\n ).to(torch.long)\n val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))\n\n ret += torch.where(is_small, n, val_if_large)\n return ret\n\n def compute_bias(self, qlen, klen):\n \"\"\" Compute binned relative position bias \"\"\"\n context_position = torch.arange(qlen, dtype=torch.long)[:, None]\n memory_position = torch.arange(klen, dtype=torch.long)[None, :]\n relative_position = memory_position - context_position # shape (qlen, klen)\n rp_bucket = self._relative_position_bucket(\n relative_position, # shape (qlen, klen)\n bidirectional=self.is_bidirectional,\n num_buckets=self.relative_attention_num_buckets,\n )\n rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)\n values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads)\n values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen)\n return values\n\n def forward(\n self,\n input,\n mask=None,\n kv=None,\n position_bias=None,\n past_key_value=None,\n head_mask=None,\n query_length=None,\n use_cache=False,\n output_attentions=False,\n ):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n # past_key_value[0] is (bs, n_heads, q_len - 1, dim_per_head)\n bs, qlen, dim = input.size()\n\n if past_key_value is not None:\n assert self.is_decoder is True, \"Encoder cannot cache past key value states\"\n assert (\n len(past_key_value) == 2\n ), \"past_key_value should have 2 past states: keys and values. Got {} past states\".format(\n len(past_key_value)\n )\n real_qlen = qlen + past_key_value[0].shape[2] if query_length is None else query_length\n else:\n real_qlen = qlen\n\n if kv is None:\n klen = real_qlen\n else:\n klen = kv.size(1)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2)\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim)\n\n q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head)\n\n if kv is None:\n k = shape(self.k(input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v(input)) # (bs, n_heads, qlen, dim_per_head)\n elif past_key_value is None:\n k = v = kv\n k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if past_key_value is not None:\n if kv is None:\n k_, v_ = past_key_value\n k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)\n v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = past_key_value\n\n if self.is_decoder and use_cache is True:\n present_key_value_state = ((k, v),)\n else:\n present_key_value_state = (None,)\n\n # (bs, n_heads, qlen, klen)\n scores = torch.matmul(\n q, k.transpose(3, 2)\n ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", q, k), compatible with onnx op>9\n\n if position_bias is None:\n if not self.has_relative_attention_bias:\n raise ValueError(\"No position_bias provided and no weights to compute position_bias\")\n position_bias = self.compute_bias(real_qlen, klen)\n\n # if key and values are already calculated\n # we want only the last query position bias\n if past_key_value is not None:\n position_bias = position_bias[:, :, -qlen:, :]\n\n if mask is not None:\n position_bias = position_bias + mask # (bs, n_heads, qlen, klen)\n\n scores += position_bias\n weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)\n weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n context = unshape(context) # (bs, qlen, dim)\n\n context = self.o(context)\n\n outputs = (context,) + present_key_value_state\n\n if output_attentions:\n outputs = outputs + (weights,)\n if self.has_relative_attention_bias:\n outputs = outputs + (position_bias,)\n return outputs\n\n\nclass T5LayerSelfAttention(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super().__init__()\n self.SelfAttention = T5Attention(\n config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=not config.is_decoder\n )\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n position_bias=None,\n head_mask=None,\n past_key_value=None,\n use_cache=False,\n output_attentions=False,\n ):\n norm_x = self.layer_norm(hidden_states)\n attention_output = self.SelfAttention(\n norm_x,\n mask=attention_mask,\n position_bias=position_bias,\n head_mask=head_mask,\n past_key_value=past_key_value,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n y = attention_output[0]\n layer_output = hidden_states + self.dropout(y)\n outputs = (layer_output,) + attention_output[1:] # add attentions if we output them\n return outputs\n\n\nclass T5LayerCrossAttention(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super().__init__()\n self.EncDecAttention = T5Attention(\n config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=True\n )\n self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n def forward(\n self,\n hidden_states,\n kv,\n attention_mask=None,\n position_bias=None,\n head_mask=None,\n past_key_value=None,\n use_cache=False,\n query_length=None,\n output_attentions=False,\n ):\n norm_x = self.layer_norm(hidden_states)\n attention_output = self.EncDecAttention(\n norm_x,\n mask=attention_mask,\n kv=kv,\n position_bias=position_bias,\n head_mask=head_mask,\n past_key_value=past_key_value,\n use_cache=use_cache,\n query_length=query_length,\n output_attentions=output_attentions,\n )\n y = attention_output[0]\n layer_output = hidden_states + self.dropout(y)\n outputs = (layer_output,) + attention_output[1:] # add attentions if we output them\n return outputs\n\n\nclass T5Block(nn.Module):\n def __init__(self, config, has_relative_attention_bias=False):\n super().__init__()\n self.is_decoder = config.is_decoder\n self.layer = nn.ModuleList()\n self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))\n if self.is_decoder:\n self.layer.append(T5LayerCrossAttention(config, has_relative_attention_bias=has_relative_attention_bias))\n\n self.layer.append(T5LayerFF(config))\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n position_bias=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n encoder_decoder_position_bias=None,\n head_mask=None,\n past_key_value=None,\n use_cache=False,\n output_attentions=False,\n ):\n\n if past_key_value is not None:\n assert self.is_decoder, \"Only decoder can use `past_key_values`\"\n expected_num_past_key_values = 2 if encoder_hidden_states is None else 4\n\n error_message = \"There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states\".format(\n expected_num_past_key_values,\n \"2 (past / key) for cross attention\" if expected_num_past_key_values == 4 else \"\",\n len(past_key_value),\n )\n assert len(past_key_value) == expected_num_past_key_values, error_message\n\n self_attn_past_key_value = past_key_value[:2]\n cross_attn_past_key_value = past_key_value[2:]\n else:\n self_attn_past_key_value, cross_attn_past_key_value = None, None\n\n self_attention_outputs = self.layer[0](\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n head_mask=head_mask,\n past_key_value=self_attn_past_key_value,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n hidden_states, present_key_value_state = self_attention_outputs[:2]\n attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights\n\n if self.is_decoder and encoder_hidden_states is not None:\n # the actual query length is unknown for cross attention\n # if using past key value states. Need to inject it here\n if present_key_value_state is not None:\n query_length = present_key_value_state[0].shape[2]\n else:\n query_length = None\n\n cross_attention_outputs = self.layer[1](\n hidden_states,\n kv=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n position_bias=encoder_decoder_position_bias,\n head_mask=head_mask,\n past_key_value=cross_attn_past_key_value,\n query_length=query_length,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n hidden_states = cross_attention_outputs[0]\n # Combine self attn and cross attn key value states\n if present_key_value_state is not None:\n present_key_value_state = present_key_value_state + cross_attention_outputs[1]\n\n # Keep cross-attention outputs and relative position weights\n attention_outputs = attention_outputs + cross_attention_outputs[2:]\n\n # Apply Feed Forward layer\n hidden_states = self.layer[-1](hidden_states)\n outputs = (hidden_states,)\n\n # Add attentions if we output them\n outputs = outputs + (present_key_value_state,) + attention_outputs\n return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n\n\nclass T5PreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = T5Config\n load_tf_weights = load_tf_weights_in_t5\n base_model_prefix = \"transformer\"\n\n @property\n def dummy_inputs(self):\n input_ids = torch.tensor(DUMMY_INPUTS)\n input_mask = torch.tensor(DUMMY_MASK)\n dummy_inputs = {\n \"decoder_input_ids\": input_ids,\n \"input_ids\": input_ids,\n \"decoder_attention_mask\": input_mask,\n }\n return dummy_inputs\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n factor = self.config.initializer_factor # Used for testing weights initialization\n if isinstance(module, T5LayerNorm):\n module.weight.data.fill_(factor * 1.0)\n elif isinstance(module, (T5Model, T5ForConditionalGeneration)):\n # Mesh TensorFlow embeddings initialization\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624\n module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)\n elif isinstance(module, T5DenseReluDense):\n # Mesh TensorFlow FF initialization\n # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56\n # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89\n module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))\n if hasattr(module.wi, \"bias\") and module.wi.bias is not None:\n module.wi.bias.data.zero_()\n module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))\n if hasattr(module.wo, \"bias\") and module.wo.bias is not None:\n module.wo.bias.data.zero_()\n elif isinstance(module, T5Attention):\n # Mesh TensorFlow attention initialization to avoid scaling before softmax\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136\n d_model = self.config.d_model\n d_kv = self.config.d_kv\n n_heads = self.config.num_heads\n module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * d_kv) ** -0.5))\n module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))\n module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))\n module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * d_kv) ** -0.5))\n if module.has_relative_attention_bias:\n module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))\n\n def _shift_right(self, input_ids):\n decoder_start_token_id = self.config.decoder_start_token_id\n pad_token_id = self.config.pad_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information\"\n\n # shift inputs to the right\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()\n shifted_input_ids[..., 0] = decoder_start_token_id\n\n assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\"\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `shifted_input_ids` has only positive values\"\n\n return shifted_input_ids\n\n\nclass T5Stack(T5PreTrainedModel):\n def __init__(self, config, embed_tokens=None):\n super().__init__(config)\n\n self.embed_tokens = embed_tokens\n self.is_decoder = config.is_decoder\n\n self.block = nn.ModuleList(\n [T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]\n )\n self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = nn.Dropout(config.dropout_rate)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def get_output_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, new_embeddings):\n self.embed_tokens = new_embeddings\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n inputs_embeds=None,\n head_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n err_msg_prefix = \"decoder_\" if self.is_decoder else \"\"\n raise ValueError(\n f\"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n err_msg_prefix = \"decoder_\" if self.is_decoder else \"\"\n raise ValueError(f\"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds\")\n\n if inputs_embeds is None:\n assert self.embed_tokens is not None, \"You have to initialize the model with valid token embeddings\"\n inputs_embeds = self.embed_tokens(input_ids)\n\n batch_size, seq_length = input_shape\n\n # required mask seq length can be calculated via length of past\n mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length\n\n if use_cache is True:\n assert self.is_decoder, \":obj:`use_cache` can only be set to `True` if {} is used as a decoder\".format(\n self\n )\n\n if attention_mask is None:\n attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)\n if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:\n encoder_seq_length = encoder_hidden_states.shape[1]\n encoder_attention_mask = torch.ones(\n batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long\n )\n\n # initialize past_key_values with `None` if past does not exist\n if past_key_values is None:\n past_key_values = [None] * len(self.block)\n\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)\n\n if self.is_decoder and encoder_attention_mask is not None:\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n head_mask = self.get_head_mask(head_mask, self.config.num_layers)\n present_key_value_states = () if use_cache else None\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n position_bias = None\n encoder_decoder_position_bias = None\n\n hidden_states = self.dropout(inputs_embeds)\n\n for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states,\n attention_mask=extended_attention_mask,\n position_bias=position_bias,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n encoder_decoder_position_bias=encoder_decoder_position_bias,\n head_mask=head_mask[i],\n past_key_value=past_key_value,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n # layer_outputs is a tuple with:\n # hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n hidden_states, present_key_value_state = layer_outputs[:2]\n\n if i == 0:\n # We share the position biases between the layers - the first layer store them\n # layer_outputs = hidden-states, key-value-states (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)\n position_bias = layer_outputs[3 if output_attentions else 2]\n if self.is_decoder and encoder_hidden_states is not None:\n encoder_decoder_position_bias = layer_outputs[5 if output_attentions else 3]\n # append next layer key value states\n if use_cache:\n present_key_value_states = present_key_value_states + (present_key_value_state,)\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[2],) # We keep only self-attention weights for now\n\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions]\n if v is not None\n )\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=present_key_value_states,\n hidden_states=all_hidden_states,\n attentions=all_attentions,\n )\n\n\nT5_START_DOCSTRING = r\"\"\"\n\n The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer\n <https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,\n Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text\n denoising generative setting.\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nT5_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you\n should be able to pad the inputs on both the right and the left.\n\n Indices can be obtained using :class:`~transformers.T5Tokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n detail.\n\n To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training\n <./t5.html#training>`__.\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Provide for sequence to sequence training. T5 uses the :obj:`pad_token_id` as the starting token for\n :obj:`decoder_input_ids` generation. If :obj:`past_key_values` is used, optionally only the last\n :obj:`decoder_input_ids` have to be input (see :obj:`past_key_values`).\n\n To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at `T5 Training\n <./t5.html#training>`__. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset,\n :obj:`decoder_input_ids` takes the value of :obj:`input_ids`.\n decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):\n Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will\n also be used by default.\n encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):\n Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`:\n `attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a\n sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of\n the decoder.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded\n representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`\n have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert\n :obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`\n takes the value of :obj:`inputs_embeds`.\n\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare T5 Model transformer outputting raw hidden-states\" \"without any specific head on top.\",\n T5_START_DOCSTRING,\n)\nclass T5Model(T5PreTrainedModel):\n def __init__(self, config: T5Config):\n super().__init__(config)\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\n\n encoder_config = copy.deepcopy(config)\n encoder_config.use_cache = False\n encoder_config.is_encoder_decoder = False\n self.encoder = T5Stack(encoder_config, self.shared)\n\n decoder_config = copy.deepcopy(config)\n decoder_config.is_decoder = True\n decoder_config.is_encoder_decoder = False\n decoder_config.num_layers = config.num_decoder_layers\n self.decoder = T5Stack(decoder_config, self.shared)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, new_embeddings):\n self.shared = new_embeddings\n self.encoder.set_input_embeddings(new_embeddings)\n self.decoder.set_input_embeddings(new_embeddings)\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n head_mask=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n Returns:\n\n Example::\n\n >>> from transformers import T5Tokenizer, T5Model\n\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\n >>> model = T5Model.from_pretrained('t5-small')\n\n >>> input_ids = tokenizer(\"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\").input_ids # Batch size 1\n >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1\n >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids, return_dict=True)\n\n >>> last_hidden_states = outputs.last_hidden_state\n \"\"\"\n if \"decoder_past_key_value_states\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"decoder_past_key_value_states\")\n if \"decoder_past_key_values\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"decoder_past_key_values\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # Encode if needed (training, first prediction pass)\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n hidden_states = encoder_outputs[0]\n\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_values=past_key_values,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"T5 Model with a `language modeling` head on top. \"\"\", T5_START_DOCSTRING)\nclass T5ForConditionalGeneration(T5PreTrainedModel):\n authorized_missing_keys = [r\"encoder\\.embed_tokens\\.weight\", r\"decoder\\.embed_tokens\\.weight\", r\"lm_head\\.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.model_dim = config.d_model\n\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\n\n encoder_config = copy.deepcopy(config)\n encoder_config.use_cache = False\n encoder_config.is_encoder_decoder = False\n self.encoder = T5Stack(encoder_config, self.shared)\n\n decoder_config = copy.deepcopy(config)\n decoder_config.is_decoder = True\n decoder_config.is_encoder_decoder = False\n decoder_config.num_layers = config.num_decoder_layers\n self.decoder = T5Stack(decoder_config, self.shared)\n\n self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, new_embeddings):\n self.shared = new_embeddings\n self.encoder.set_input_embeddings(new_embeddings)\n self.decoder.set_input_embeddings(new_embeddings)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n head_mask=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,\n config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for\n labels in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n\n Examples::\n\n >>> from transformers import T5Tokenizer, T5ForConditionalGeneration\n\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\n >>> model = T5ForConditionalGeneration.from_pretrained('t5-small', return_dict=True)\n\n >>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids\n labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids\n >>> outputs = model(input_ids=input_ids, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n\n >>> input_ids = tokenizer(\"summarize: studies have shown that owning a dog is good for you \", return_tensors=\"pt\").input_ids # Batch size 1\n >>> outputs = model.generate(input_ids)\n \"\"\"\n\n if \"lm_labels\" in kwargs:\n warnings.warn(\n \"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"lm_labels\")\n if \"decoder_past_key_value_states\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"decoder_past_key_value_states\")\n if \"decoder_past_key_values\" in kwargs:\n warnings.warn(\n \"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n past_key_values = kwargs.pop(\"decoder_past_key_values\")\n assert kwargs == {}, f\"Unexpected keyword arguments: {list(kwargs.keys())}.\"\n\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # Encode if needed (training, first prediction pass)\n if encoder_outputs is None:\n # Convert encoder inputs in embeddings if needed\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n hidden_states = encoder_outputs[0]\n\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\n # get decoder inputs from shifting lm labels to the right\n decoder_input_ids = self._shift_right(labels)\n\n # If decoding with past key value states, only the last tokens\n # should be given as an input\n if past_key_values is not None:\n assert labels is None, \"Decoder should not use cached key value states when training.\"\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n if decoder_inputs_embeds is not None:\n decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]\n\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_values=past_key_values,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = decoder_outputs[0]\n # Rescale output before projecting on vocab\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\n sequence_output = sequence_output * (self.model_dim ** -0.5)\n lm_logits = self.lm_head(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-100)\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))\n # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666\n\n if not return_dict:\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=loss,\n logits=lm_logits,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs):\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\n \"decoder_input_ids\": input_ids,\n \"past_key_values\": past,\n \"encoder_outputs\": encoder_outputs,\n \"attention_mask\": attention_mask,\n \"use_cache\": use_cache,\n }\n\n def _reorder_cache(self, past, beam_idx):\n # if decoder past is not included in output\n # speedy decoding is disabled and no need to reorder\n if past is None:\n logger.warning(\"You might want to consider setting `use_cache=True` to speed up decoding\")\n return past\n\n reordered_decoder_past = ()\n for layer_past_states in past:\n # get the correct batch idx from layer past batch dim\n # batch dim of `past` is at 2nd position\n reordered_layer_past_states = ()\n for layer_past_state in layer_past_states:\n # need to set correct `past` for each of the four key / value states\n reordered_layer_past_states = reordered_layer_past_states + (\n layer_past_state.index_select(0, beam_idx),\n )\n\n assert reordered_layer_past_states[0].shape == layer_past_states[0].shape\n assert len(reordered_layer_past_states) == len(layer_past_states)\n\n reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)\n return reordered_decoder_past\n" ]
[ [ "torch.sqrt", "torch.nn.ModuleList", "tensorflow.train.load_variable", "torch.cat", "torch.nn.Dropout", "numpy.transpose", "torch.nn.functional.dropout", "torch.all", "torch.arange", "torch.ones", "tensorflow.train.list_variables", "torch.tensor", "torch.nn.Linear", "torch.zeros_like", "torch.nn.Embedding", "torch.nn.CrossEntropyLoss", "torch.nn.functional.relu", "torch.where", "torch.abs", "torch.full_like", "torch.matmul" ] ]
sepam/machine-learning-engineering-for-production-public
[ "cd6053459eee9b7f30bf86da63104b3f1381383a" ]
[ "course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py" ]
[ "import pickle\nimport numpy as np\nfrom typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, conlist\n\n\n\napp = FastAPI(title=\"Predicting Wine Class with batching\")\n\n# Open classifier in global scope\nwith open(\"models/wine-95-fixed.pkl\", \"rb\") as file:\n clf = pickle.load(file)\n\n\nclass Wine(BaseModel):\n batches: List[conlist(item_type=float, min_items=13, max_items=13)]\n\n\n# make predictions on this endpoint\[email protected](\"/predict\")\ndef predict(wine: Wine):\n batches = wine.batches\n np_batches = np.array(batches)\n pred = clf.predict(np_batches).tolist()\n return {\"Prediction\": pred}\n" ]
[ [ "numpy.array" ] ]
GLaDO8/pytorch_playground
[ "3623de18881a37ce413c92d8a63ea9ba1cc401a5" ]
[ "nnwordembed.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\ntorch.manual_seed(1)\n\nword_to_ix = {\"hello\": 0, \"world\": 1}\n#first argument is the size of the embedded matrix. The second argument is the dimension of each word embedding. \nembeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings\nlookup_tensor = torch.tensor([word_to_ix[\"hello\"], word_to_ix[\"world\"]], dtype=torch.long)\nhello_embed = embeds(lookup_tensor)\nprint(hello_embed)" ]
[ [ "torch.manual_seed", "torch.tensor", "torch.nn.Embedding" ] ]
zactodd/mmdetection
[ "68532eb6f4643ddf0179a4384c8c9e004a2c1d07" ]
[ "mmdet/models/dense_heads/pisa_retinanet_head.py" ]
[ "import torch\n\nfrom mmdet.core import force_fp32, images_to_levels\nfrom ..builder import HEADS\nfrom ..losses import carl_loss, isr_p\nfrom .retina_head import RetinaHead\n\n\[email protected]_module()\nclass PISARetinaHead(RetinaHead):\n \"\"\"PISA Retinanet Head.\n\n The head owns the same structure with Retinanet Head, but differs in two\n aspects:\n 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to\n change the positive loss weights.\n 2. Classification-aware regression loss is adopted as a third loss.\n \"\"\"\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n def loss(self,\n cls_scores,\n bbox_preds,\n gt_bboxes,\n gt_labels,\n img_metas,\n gt_bboxes_ignore=None):\n \"\"\"Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image\n with shape (num_obj, 4).\n gt_labels (list[Tensor]): Ground truth labels of each image\n with shape (num_obj, 4).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.\n Default: None.\n\n Returns:\n dict: Loss dict, comprise classification loss, regression loss and\n carl loss.\n \"\"\"\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n assert len(featmap_sizes) == self.anchor_generator.num_levels\n\n device = cls_scores[0].device\n\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, img_metas, device=device)\n label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n cls_reg_targets = self.get_targets(\n anchor_list,\n valid_flag_list,\n gt_bboxes,\n img_metas,\n gt_bboxes_ignore_list=gt_bboxes_ignore,\n gt_labels_list=gt_labels,\n label_channels=label_channels,\n return_sampling_results=True)\n if cls_reg_targets is None:\n return None\n (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets\n num_total_samples = (\n num_total_pos + num_total_neg if self.sampling else num_total_pos)\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n # concat all level anchors and flags to a single tensor\n concat_anchor_list = []\n for i in range(len(anchor_list)):\n concat_anchor_list.append(torch.cat(anchor_list[i]))\n all_anchor_list = images_to_levels(concat_anchor_list,\n num_level_anchors)\n\n num_imgs = len(img_metas)\n flatten_cls_scores = [\n cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels)\n for cls_score in cls_scores\n ]\n flatten_cls_scores = torch.cat(\n flatten_cls_scores, dim=1).reshape(-1,\n flatten_cls_scores[0].size(-1))\n flatten_bbox_preds = [\n bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)\n for bbox_pred in bbox_preds\n ]\n flatten_bbox_preds = torch.cat(\n flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1))\n flatten_labels = torch.cat(labels_list, dim=1).reshape(-1)\n flatten_label_weights = torch.cat(\n label_weights_list, dim=1).reshape(-1)\n flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4)\n flatten_bbox_targets = torch.cat(\n bbox_targets_list, dim=1).reshape(-1, 4)\n flatten_bbox_weights = torch.cat(\n bbox_weights_list, dim=1).reshape(-1, 4)\n\n # Apply ISR-P\n isr_cfg = self.train_cfg.get('isr', None)\n if isr_cfg is not None:\n all_targets = (flatten_labels, flatten_label_weights,\n flatten_bbox_targets, flatten_bbox_weights)\n with torch.no_grad():\n all_targets = isr_p(\n flatten_cls_scores,\n flatten_bbox_preds,\n all_targets,\n flatten_anchors,\n sampling_results_list,\n bbox_coder=self.bbox_coder,\n loss_cls=self.loss_cls,\n num_class=self.num_classes,\n **self.train_cfg.isr)\n (flatten_labels, flatten_label_weights, flatten_bbox_targets,\n flatten_bbox_weights) = all_targets\n\n # For convenience we compute loss once instead separating by fpn level,\n # so that we don't need to separate the weights by level again.\n # The result should be the same\n losses_cls = self.loss_cls(\n flatten_cls_scores,\n flatten_labels,\n flatten_label_weights,\n avg_factor=num_total_samples)\n losses_bbox = self.loss_bbox(\n flatten_bbox_preds,\n flatten_bbox_targets,\n flatten_bbox_weights,\n avg_factor=num_total_samples)\n loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n\n # CARL Loss\n carl_cfg = self.train_cfg.get('carl', None)\n if carl_cfg is not None:\n loss_carl = carl_loss(\n flatten_cls_scores,\n flatten_labels,\n flatten_bbox_preds,\n flatten_bbox_targets,\n self.loss_bbox,\n **self.train_cfg.carl,\n avg_factor=num_total_pos,\n sigmoid=True,\n num_class=self.num_classes)\n loss_dict.update(loss_carl)\n\n return loss_dict\n" ]
[ [ "torch.no_grad", "torch.cat" ] ]
microsoft/iclr2019-learning-to-represent-edits
[ "e5777d6aa6cdeda500cf076646177c48d1cb4622" ]
[ "diff_representation/model/edit_encoder/bag_of_edits_change_encoder.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\nfrom itertools import chain\n\nimport numpy as np\nimport torch\nfrom torch import nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom tqdm import tqdm\nimport sys\n\nfrom diff_representation.change_entry import ChangeExample\nfrom diff_representation.model import nn_utils\nfrom diff_representation.model.embedder import EmbeddingTable\n\n\nclass BagOfEditsChangeEncoder(nn.Module):\n \"\"\"project a CodeChange instance into distributed vectors\"\"\"\n\n def __init__(self, token_embedder, vocab, **kwargs):\n super(BagOfEditsChangeEncoder, self).__init__()\n\n self.token_embedder = token_embedder\n self.token_embedding_size = self.token_embedder.weight.size(1)\n self.vocab = vocab\n self.change_vector_size = self.token_embedding_size * 2\n\n @property\n def device(self):\n return self.token_embedder.device\n\n def forward(self, code_changes, *args, **kwargs):\n \"\"\"\n given the token encodings of the previous and updated code,\n and the diff information (alignment between the tokens between the\n previous and updated code), generate the diff representation\n \"\"\"\n\n added_tokens = []\n added_token_batch_ids = []\n deled_tokens = []\n deled_token_batch_ids = []\n for e_id, example in enumerate(code_changes):\n for entry in example.change_seq:\n tag, token = entry\n if tag == 'ADD':\n token_id = self.vocab[token]\n added_tokens.append(token_id)\n added_token_batch_ids.append(e_id)\n elif tag == 'DEL':\n token_id = self.vocab[token]\n deled_tokens.append(token_id)\n deled_token_batch_ids.append(e_id)\n elif tag == 'REPLACE':\n added_token_id = self.vocab[token[1]]\n deled_token_id = self.vocab[token[0]]\n\n added_tokens.append(added_token_id)\n deled_tokens.append(deled_token_id)\n\n added_token_batch_ids.append(e_id)\n deled_token_batch_ids.append(e_id)\n\n changed_token_ids = added_tokens + deled_tokens\n changed_token_ids = torch.tensor(changed_token_ids, dtype=torch.long, device=self.device)\n # (token_num, embed_size)\n changed_token_embeds = self.token_embedder.weight[changed_token_ids]\n\n added_token_embeds = changed_token_embeds[:len(added_tokens)]\n deled_token_embeds = changed_token_embeds[len(added_tokens):]\n\n added_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,\n device=self.device)\n if added_token_batch_ids:\n added_change_embeds = added_change_embeds.scatter_add_(0,\n torch.tensor(added_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(added_token_embeds),\n added_token_embeds)\n\n deled_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,\n device=self.device)\n if deled_token_batch_ids:\n deled_change_embeds = deled_change_embeds.scatter_add_(0,\n torch.tensor(deled_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(deled_token_embeds),\n deled_token_embeds)\n\n change_vectors = torch.cat([added_change_embeds, deled_change_embeds], dim=-1)\n\n return change_vectors\n\n def encode_code_change(self, prev_code_tokens, updated_code_tokens, code_encoder):\n example = ChangeExample(prev_code_tokens, updated_code_tokens, context=None)\n\n change_vec = self.forward([example]).data.cpu().numpy()[0]\n\n return change_vec\n\n def encode_code_changes(self, examples, code_encoder, batch_size=32):\n \"\"\"encode each change in the list `code_changes`,\n return a 2D numpy array of shape (len(code_changes), code_change_embed_dim)\"\"\"\n\n change_vecs = []\n\n for batch_examples in tqdm(nn_utils.batch_iter(examples, batch_size), file=sys.stdout, total=len(examples)):\n batch_change_vecs = self.forward(batch_examples).data.cpu().numpy()\n change_vecs.append(batch_change_vecs)\n\n change_vecs = np.concatenate(change_vecs, axis=0)\n\n return change_vecs\n" ]
[ [ "numpy.concatenate", "torch.tensor", "torch.cat" ] ]
junarwohn/tvm
[ "96c2e06cd063a695b3b485f2bdf8875df55fff1a" ]
[ "tvm_test/run_simple_mod_op2_pth.py" ]
[ "import tvm\nfrom tvm import relay\nfrom tvm import relay\nfrom tvm.runtime.vm import VirtualMachine\nfrom tvm.contrib.download import download_testdata\nfrom SimpleModel import Net\nimport numpy as np\nimport cv2\n\n# PyTorch imports\nimport torch\nimport torchvision\n\n# Time library for speed check\nimport time\n\nin_size = 32\n\ninput_shape = (1, 3, in_size, in_size)\n\n\ndef do_trace(model, inp):\n model_trace = torch.jit.trace(model, inp)\n model_trace.eval()\n return model_trace\n\n\n# model_func = torchvision.models.detection.maskrcnn_resnet50_fpn\n# model = TraceWrapper(model_func(pretrained=True))\n\nmodel = Net()\nmodel.load_state_dict(torch.load('./simple_mod.pth'))\n\nmodel.eval()\ninp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size)))\n\nwith torch.no_grad():\n out = model(inp)\n script_module = do_trace(model, inp)\n \n\nimg_url = (\n \"https://raw.githubusercontent.com/dmlc/web-data/\" \"master/gluoncv/detection/street_small.jpg\"\n)\nimg_path = download_testdata(img_url, \"test_street_small.jpg\", module=\"data\")\n\nimg = cv2.imread(img_path).astype(\"float32\")\nimg = cv2.resize(img, (in_size, in_size))\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nimg = np.transpose(img / 255.0, [2, 0, 1])\nimg = np.expand_dims(img, axis=0)\n\ninput_name = \"input0\"\nshape_list = [(input_name, input_shape)]\nmod, params = relay.frontend.from_pytorch(script_module, shape_list)\n\ntarget = \"llvm\"\n\nwith tvm.transform.PassContext(opt_level=2, disabled_pass=[\"FoldScaleAxis\"]):\n vm_exec = relay.vm.compile(mod, target=target, params=params)\n\n# dev = tvm.cuda()\ndev = tvm.cpu()\nvm = VirtualMachine(vm_exec, dev)\nvm.set_input(\"main\", **{input_name: img})\ninference_start = time.time()\ntvm_res = vm.run()\ninference_end = time.time()\ninference_time_tvm = inference_end - inference_start\nprint(\"Infernece Time : {}\".format(inference_time_tvm))\n\n\n" ]
[ [ "numpy.random.uniform", "numpy.transpose", "torch.load", "torch.no_grad", "numpy.expand_dims", "torch.jit.trace" ] ]
rogerfitz/tutorials
[ "dae6470bad63b71e755caaff0b69893f5c9a1d63" ]
[ "travel_time_visualization/server.py" ]
[ "from flask import Flask, jsonify,render_template,request\nfrom config import API_KEY\nimport datetime\nfrom collections import defaultdict\nimport requests\nimport pandas as pd\nimport sys\nimport logging\nfrom itertools import repeat\n\napp = Flask(__name__)\ngunicorn_error_logger = logging.getLogger('gunicorn.error')\napp.logger.handlers.extend(gunicorn_error_logger.handlers)\napp.logger.setLevel(logging.DEBUG)\n\nfrom multiprocessing.dummy import Pool as ThreadPool \npool = ThreadPool(20) \nBASE_URL=\"https://maps.googleapis.com/maps/api/\"\napp.logger.debug(datetime.datetime.fromtimestamp(1498924020))\n\nclass GAPIError(Exception):\n status_code = 31337\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\ndef makeRequest(url, API_KEY):\n url+=\"&key=%s\"%API_KEY\n return requests.get(url).json()['rows'][0]['elements'][0]['duration_in_traffic']['value']\ndef getDistanceMatrix(origin,destination,mode,departure_time,traffic_model, API_KEY):\n #UTC Time\n url=BASE_URL+\"distancematrix/json?\"\n params=\"origins=%s&destinations=%s&mode=%s&departure_time=%s&traffic_model=%s\"%(origin,destination,mode,departure_time,traffic_model)\n return makeRequest(url+params, API_KEY)\n\ndef getNearest(dt,offset):\n return dt + (datetime.datetime.min - dt) % datetime.timedelta(minutes=offset)\n\ndef getChartData(starting_address,destination_address, leave_after, hours_to_grab,API_KEY,OFFSET=15):\n start_date=getNearest(leave_after,15)\n request_times=defaultdict(dict)\n dts=[int(leave_after.timestamp())]\n \n for dt in (start_date + datetime.timedelta(minutes=offset) for offset in range(0,60*hours_to_grab,OFFSET)):\n dts.append(int(dt.timestamp()))\n \n request_times={}\n for traffic_model in [\"best_guess\",\"pessimistic\",\"optimistic\"]:\n results=pool.starmap(\n getDistanceMatrix, zip(repeat(starting_address),repeat(destination_address),repeat(\"car\"),dts,repeat(traffic_model), repeat(API_KEY))\n )\n request_times[traffic_model]=results\n request_times[\"index\"]=dts\n travel_times=pd.DataFrame.from_dict(request_times).set_index(\"index\")/60\n viz_df=travel_times.reset_index()\n viz_df['x']=viz_df['index']*1000#Add milliseconds for JS datetime\n del viz_df['index']\n viz_json=viz_df.to_dict(orient=\"list\")\n #to c3 Columns\n columns=[]\n for col,vals in viz_json.items():\n if col!=\"x\":\n vals=[round(x) for x in vals]\n columns.append([col]+vals)\n return columns\n\[email protected](\"/\")\ndef index():\n return render_template('index.html', API_KEY=API_KEY)\n \[email protected]('/data')\ndef data():\n app.logger.debug(request.args) \n leaveAfter=request.args.get(\"leaveAfter\")\n leaveAfter=datetime.datetime.fromtimestamp(int(leaveAfter)/1000)\n USERS_API_KEY=request.args.get(\"API_KEY\",default=API_KEY)\n now=datetime.datetime.now()\n if leaveAfter<now:\n leaveAfter=now\n try:\n response=getChartData(request.args.get(\"startingAddress\"),request.args.get(\"destinationAddress\"),leaveAfter,8, USERS_API_KEY)\n return jsonify(response)\n except:\n raise GAPIError(\"API Key no longer valid\", status_code=31337)\n \n \[email protected](GAPIError)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n \nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
mommy79/AuDi-GIT-turtlebot3_autorace
[ "fd1382246f1ee74ee70857006563184d672a6666" ]
[ "src/mission_node/src/intersection_detector.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2\nimport math\n\n\nclass IntersectionDetector:\n def __init__(self):\n self.lower_blue = np.array([85, 90, 120], np.uint8)\n self.upper_blue = np.array([115, 255, 255], np.uint8)\n\n def fn_find_intersection_line(self, img_trans):\n # ROI 영역에 맞게 자른 이미지\n pers_height, pers_width = img_trans.shape[:2] # shape is w384 x h240\n img_gray = cv2.cvtColor(img_trans[:int(pers_height * 1/ 2), :].copy(), cv2.COLOR_RGB2GRAY)\n _, img_intersection = cv2.threshold(img_gray, 180, 255, 0)\n img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))\n img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))\n img_debug = cv2.merge((img_intersection, img_intersection, img_intersection)).copy()\n\n _, list_intersection_contour, _ = cv2.findContours(img_intersection, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n intersection_check = False\n\n for intersection_contour in list_intersection_contour:\n cv2.drawContours(img_debug, [intersection_contour], 0, (0, 0, 255), 2)\n x_stop, y_stop, w_stop, h_stop = cv2.boundingRect(intersection_contour)\n cv2.putText(img_debug, 'w: {}, h: {}'.format(w_stop, h_stop), (intersection_contour[0][0][0]+10, intersection_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))\n if 330 < w_stop:\n cv2.drawContours(img_debug, [intersection_contour], 0, (0, 255, 0), 2)\n intersection_check = True\n\n return intersection_check, img_debug\n\n def fn_find_exit_line(self, img_trans, direction='left'):\n # ROI 영역에 맞게 자른 이미지\n pers_height, pers_width = img_trans.shape[:2] # shape is w384 x h240\n if direction == 'left':\n img_gray = cv2.cvtColor(img_trans[:, int(pers_width * 1/ 2):].copy(), cv2.COLOR_RGB2GRAY)\n else:\n img_gray = cv2.cvtColor(img_trans[:, :int(pers_width * 1/ 2)].copy(), cv2.COLOR_RGB2GRAY)\n _, img_exit = cv2.threshold(img_gray, 190, 255, 0)\n img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))\n img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))\n img_debug = cv2.merge((img_exit, img_exit, img_exit)).copy()\n\n _, list_exit_contour, _ = cv2.findContours(img_exit, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n exit_check = False\n exit_pos = (0, 0)\n\n for exit_contour in list_exit_contour:\n cv2.drawContours(img_debug, [exit_contour], 0, (0, 0, 255), 2)\n x_exit, y_exit, w_exit, h_exit = cv2.boundingRect(exit_contour)\n bottom_most_pos = tuple(exit_contour[exit_contour[:, :, 1].argmax()][0])\n val_height = h_exit\n for pos_y in range(pers_height-1, 0, -1):\n if img_gray[pos_y, bottom_most_pos[0]] != 0:\n val_height = pos_y\n break\n\n cv2.putText(img_debug, 'w: {}, h: {}, length: {}'.format(w_exit, h_exit, val_height), (exit_contour[0][0][0]+10, exit_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))\n\n if h_exit > val_height * 4/5 and h_exit > pers_height/2:\n cv2.drawContours(img_debug, [exit_contour], 0, (0, 255, 0), 2)\n exit_pos = exit_contour[0][0]\n exit_check = True\n\n return exit_check, exit_pos, img_debug\n\n def fn_find_direction_sign(self, img_ori):\n left_sign_detect = False\n right_sign_detect = False\n\n img_height, img_width = img_ori.shape[:2]\n img_roi = img_ori[:int(img_height*1 / 2), :].copy()\n img_hsv = cv2.cvtColor(img_roi, cv2.COLOR_BGR2HSV)\n\n # Hsv fillter - Blue color\n img_mask_b = cv2.inRange(img_hsv, self.lower_blue, self.upper_blue)\n img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_OPEN, np.ones((7, 7), np.uint8))\n img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8))\n #_, list_obj_contour, _ = cv2.findContours(img_mask_b, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n _, list_obj_contour, _ = cv2.findContours(img_mask_b, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n img_blue = cv2.bitwise_and(img_roi, img_roi, mask=img_mask_b)\n img_debug = img_roi.copy()\n\n list_obj = []\n\n for obj_contour in list_obj_contour:\n #cv2.drawContours(img_blue, [contour], 0, (0, 0, 255), 2)\n x, y, w, h = cv2.boundingRect(obj_contour)\n area = cv2.contourArea(obj_contour)\n aspect_ratio = float(w) / h\n area_ratio = float(area) / (w*h)\n cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cv2.putText(img_debug, 'w: {}, h: {}, aspect_ratio: {:.2f}, area_ratio: {:.2f}'.format(w, h, aspect_ratio, area_ratio), (x+10, y+10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 127, 0))\n\n if (50 < w < 150) and (50 < h < 150) and (0.8 < aspect_ratio < 2.5) and (area_ratio > 0.5):\n cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 255, 255), 2)\n list_obj.append((img_roi[y:y+h, x:x+w].copy(), (x, y, w, h)))\n\n for (img_obj, (obj_x, obj_y, obj_w, obj_h)) in list_obj:\n img_obj_gray = cv2.cvtColor(img_obj, cv2.COLOR_BGR2GRAY)\n _, img_obj_binary = cv2.threshold(img_obj_gray, 180, 255, cv2.THRESH_BINARY)\n img_obj_binary = cv2.morphologyEx(img_obj_binary, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))\n _, list_arrow_contour, _ = cv2.findContours(img_obj_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n obj_x_mid = int(obj_w / 2)\n obj_y_mid = int(obj_h / 2)\n\n min_val_dis = 30\n bottom_most_pos = None\n\n for arrow_contour in list_arrow_contour:\n mask_arrow = np.zeros(img_obj_gray.shape, np.uint8)\n cv2.drawContours(mask_arrow, [arrow_contour], 0, 255, -1)\n arrow_x, arrow_y, arrow_w, arrow_h = cv2.boundingRect(arrow_contour)\n cv2.rectangle(img_debug, (obj_x + arrow_x, obj_y + arrow_y), (obj_x + arrow_x + arrow_w, arrow_y + obj_y + arrow_h), (255, 255, 0), 1)\n arrow_area = cv2.contourArea(arrow_contour)\n arrow_aspect_ratio = float(arrow_w) / arrow_h\n arrow_area_ratio = float(arrow_area) / (arrow_w * arrow_h)\n\n arrow_x_mid = int(arrow_x + arrow_w / 2)\n arrow_y_mid = int(arrow_y + arrow_h / 2)\n\n if (0.4 * obj_w < arrow_w) and (0.4 * obj_h < arrow_h) and (0.5 < arrow_aspect_ratio < 2) and (arrow_area_ratio > 0.3):\n val_dis = math.sqrt((arrow_x_mid - obj_x_mid) ** 2 + (arrow_y_mid - obj_y_mid) ** 2)\n if val_dis < min_val_dis:\n min_val_dis = val_dis\n\n #left_most_pos = tuple(obj_contour[obj_contour[:, :, 0].argmin()][0])\n #right_most_pos = tuple(obj_contour[obj_contour[:, :, 0].argmax()][0])\n #top_most_pos = tuple(obj_contour[obj_contour[:, :, 1].argmin()][0])\n bottom_most_pos = tuple(arrow_contour[arrow_contour[:, :, 1].argmax()][0])\n\n if bottom_most_pos is not None:\n cv2.circle(img_debug, (obj_x + bottom_most_pos[0], obj_y + bottom_most_pos[1]), 4, (0, 0, 255), -1)\n cv2.line(img_debug, (obj_x + obj_x_mid, obj_y), (obj_x + obj_x_mid, obj_y + obj_h), (255, 0, 255), 2)\n if bottom_most_pos[0] > obj_x_mid:\n left_sign_detect = True\n cv2.putText(img_debug, 'LEFT', (obj_x+10, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))\n cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (255, 0, 0), 2)\n else:\n right_sign_detect = True\n cv2.putText(img_debug, 'RIGHT', (obj_x+3, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))\n cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (0, 255, 0), 2)\n\n return left_sign_detect, right_sign_detect, np.vstack((img_debug, img_blue))\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.zeros", "numpy.vstack" ] ]
ahoho/numpyro
[ "64e94e346c51a6c0c1ba51aa7b608e73513f158f" ]
[ "numpyro/distributions/transforms.py" ]
[ "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport math\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom jax import lax, ops, tree_flatten, tree_map, vmap\nfrom jax.flatten_util import ravel_pytree\nfrom jax.nn import softplus\nimport jax.numpy as jnp\nfrom jax.scipy.linalg import solve_triangular\nfrom jax.scipy.special import expit, logit\n\nfrom numpyro.distributions import constraints\nfrom numpyro.distributions.util import matrix_to_tril_vec, signed_stick_breaking_tril, sum_rightmost, vec_to_tril_matrix\nfrom numpyro.util import not_jax_tracer\n\n__all__ = [\n 'biject_to',\n 'AbsTransform',\n 'AffineTransform',\n 'CholeskyTransform',\n 'ComposeTransform',\n 'CorrCholeskyTransform',\n 'CorrMatrixCholeskyTransform',\n 'ExpTransform',\n 'SoftplusTransform',\n 'IdentityTransform',\n 'InvCholeskyTransform',\n 'LowerCholeskyTransform',\n 'LowerCholeskyAffine',\n 'PermuteTransform',\n 'PowerTransform',\n 'SigmoidTransform',\n 'SoftplusTransform',\n 'SoftplusLowerCholeskyTransform',\n 'StickBreakingTransform',\n 'Transform',\n 'UnpackTransform',\n]\n\n\ndef _clipped_expit(x):\n finfo = jnp.finfo(jnp.result_type(x))\n return jnp.clip(expit(x), a_min=finfo.tiny, a_max=1. - finfo.eps)\n\n\nclass Transform(object):\n domain = constraints.real\n codomain = constraints.real\n _inv = None\n\n @property\n def event_dim(self):\n warnings.warn(\"transform.event_dim is deprecated. Please use Transform.domain.event_dim to \"\n \"get input event dim or Transform.codomain.event_dim to get output event dim.\",\n FutureWarning)\n return self.domain.event_dim\n\n @property\n def inv(self):\n inv = None\n if self._inv is not None:\n inv = self._inv()\n if inv is None:\n inv = _InverseTransform(self)\n self._inv = weakref.ref(inv)\n return inv\n\n def __call__(self, x):\n return NotImplementedError\n\n def _inverse(self, y):\n raise NotImplementedError\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n raise NotImplementedError\n\n def call_with_intermediates(self, x):\n return self(x), None\n\n def forward_shape(self, shape):\n \"\"\"\n Infers the shape of the forward computation, given the input shape.\n Defaults to preserving shape.\n \"\"\"\n return shape\n\n def inverse_shape(self, shape):\n \"\"\"\n Infers the shapes of the inverse computation, given the output shape.\n Defaults to preserving shape.\n \"\"\"\n return shape\n\n\nclass _InverseTransform(Transform):\n def __init__(self, transform):\n super().__init__()\n self._inv = transform\n\n @property\n def domain(self):\n return self._inv.codomain\n\n @property\n def codomain(self):\n return self._inv.domain\n\n @property\n def inv(self):\n return self._inv\n\n def __call__(self, x):\n return self._inv._inverse(x)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # NB: we don't use intermediates for inverse transform\n return -self._inv.log_abs_det_jacobian(y, x, None)\n\n def forward_shape(self, shape):\n return self._inv.inverse_shape(shape)\n\n def inverse_shape(self, shape):\n return self._inv.forward_shape(shape)\n\n\nclass AbsTransform(Transform):\n domain = constraints.real\n codomain = constraints.positive\n\n def __eq__(self, other):\n return isinstance(other, AbsTransform)\n\n def __call__(self, x):\n return jnp.abs(x)\n\n def _inverse(self, y):\n return y\n\n\nclass AffineTransform(Transform):\n \"\"\"\n .. note:: When `scale` is a JAX tracer, we always assume that `scale > 0`\n when calculating `codomain`.\n \"\"\"\n def __init__(self, loc, scale, domain=constraints.real):\n self.loc = loc\n self.scale = scale\n self.domain = domain\n\n @property\n def codomain(self):\n if self.domain is constraints.real:\n return constraints.real\n elif isinstance(self.domain, constraints.greater_than):\n if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):\n return constraints.less_than(self(self.domain.lower_bound))\n # we suppose scale > 0 for any tracer\n else:\n return constraints.greater_than(self(self.domain.lower_bound))\n elif isinstance(self.domain, constraints.less_than):\n if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):\n return constraints.greater_than(self(self.domain.upper_bound))\n # we suppose scale > 0 for any tracer\n else:\n return constraints.less_than(self(self.domain.upper_bound))\n elif isinstance(self.domain, constraints.interval):\n if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):\n return constraints.interval(self(self.domain.upper_bound),\n self(self.domain.lower_bound))\n else:\n return constraints.interval(self(self.domain.lower_bound),\n self(self.domain.upper_bound))\n else:\n raise NotImplementedError\n\n def __call__(self, x):\n return self.loc + self.scale * x\n\n def _inverse(self, y):\n return (y - self.loc) / self.scale\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.broadcast_to(jnp.log(jnp.abs(self.scale)), jnp.shape(x))\n\n def forward_shape(self, shape):\n return lax.broadcast_shapes(shape,\n getattr(self.loc, \"shape\", ()),\n getattr(self.scale, \"shape\", ()))\n\n def inverse_shape(self, shape):\n return lax.broadcast_shapes(shape,\n getattr(self.loc, \"shape\", ()),\n getattr(self.scale, \"shape\", ()))\n\n\ndef _get_compose_transform_input_event_dim(parts):\n input_event_dim = parts[-1].domain.event_dim\n for part in parts[len(parts) - 1::-1]:\n input_event_dim = part.domain.event_dim + max(input_event_dim - part.codomain.event_dim, 0)\n return input_event_dim\n\n\ndef _get_compose_transform_output_event_dim(parts):\n output_event_dim = parts[0].codomain.event_dim\n for part in parts[1:]:\n output_event_dim = part.codomain.event_dim + max(output_event_dim - part.domain.event_dim, 0)\n return output_event_dim\n\n\nclass ComposeTransform(Transform):\n def __init__(self, parts):\n self.parts = parts\n\n @property\n def domain(self):\n input_event_dim = _get_compose_transform_input_event_dim(self.parts)\n first_input_event_dim = self.parts[0].domain.event_dim\n assert input_event_dim >= first_input_event_dim\n if input_event_dim == first_input_event_dim:\n return self.parts[0].domain\n else:\n return constraints.independent(self.parts[0].domain, input_event_dim - first_input_event_dim)\n\n @property\n def codomain(self):\n output_event_dim = _get_compose_transform_output_event_dim(self.parts)\n last_output_event_dim = self.parts[-1].codomain.event_dim\n assert output_event_dim >= last_output_event_dim\n if output_event_dim == last_output_event_dim:\n return self.parts[-1].codomain\n else:\n return constraints.independent(self.parts[-1].codomain, output_event_dim - last_output_event_dim)\n\n def __call__(self, x):\n for part in self.parts:\n x = part(x)\n return x\n\n def _inverse(self, y):\n for part in self.parts[::-1]:\n y = part.inv(y)\n return y\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n if intermediates is not None:\n if len(intermediates) != len(self.parts):\n raise ValueError('Intermediates array has length = {}. Expected = {}.'\n .format(len(intermediates), len(self.parts)))\n\n result = 0.\n input_event_dim = self.domain.event_dim\n for i, part in enumerate(self.parts[:-1]):\n y_tmp = part(x) if intermediates is None else intermediates[i][0]\n inter = None if intermediates is None else intermediates[i][1]\n logdet = part.log_abs_det_jacobian(x, y_tmp, intermediates=inter)\n batch_ndim = input_event_dim - part.domain.event_dim\n result = result + sum_rightmost(logdet, batch_ndim)\n input_event_dim = part.codomain.event_dim + batch_ndim\n x = y_tmp\n # account the the last transform, where y is available\n inter = None if intermediates is None else intermediates[-1]\n part = self.parts[-1]\n logdet = part.log_abs_det_jacobian(x, y, intermediates=inter)\n result = result + sum_rightmost(logdet, input_event_dim - part.domain.event_dim)\n return result\n\n def call_with_intermediates(self, x):\n intermediates = []\n for part in self.parts[:-1]:\n x, inter = part.call_with_intermediates(x)\n intermediates.append([x, inter])\n # NB: we don't need to hold the last output value in `intermediates`\n x, inter = self.parts[-1].call_with_intermediates(x)\n intermediates.append(inter)\n return x, intermediates\n\n def forward_shape(self, shape):\n for part in self.parts:\n shape = part.forward_shape(shape)\n return shape\n\n def inverse_shape(self, shape):\n for part in reversed(self.parts):\n shape = part.inverse_shape(shape)\n return shape\n\n\ndef _matrix_forward_shape(shape, offset=0):\n # Reshape from (..., N) to (..., D, D).\n if len(shape) < 1:\n raise ValueError(\"Too few dimensions in input\")\n N = shape[-1]\n D = round((0.25 + 2 * N) ** 0.5 - 0.5)\n if D * (D + 1) // 2 != N:\n raise ValueError(\"Input is not a flattend lower-diagonal number\")\n D = D - offset\n return shape[:-1] + (D, D)\n\n\ndef _matrix_inverse_shape(shape, offset=0):\n # Reshape from (..., D, D) to (..., N).\n if len(shape) < 2:\n raise ValueError(\"Too few dimensions on input\")\n if shape[-2] != shape[-1]:\n raise ValueError(\"Input is not square\")\n D = shape[-1] + offset\n N = D * (D + 1) // 2\n return shape[:-2] + (N,)\n\n\nclass CholeskyTransform(Transform):\n r\"\"\"\n Transform via the mapping :math:`y = cholesky(x)`, where `x` is a\n positive definite matrix.\n \"\"\"\n domain = constraints.positive_definite\n codomain = constraints.lower_cholesky\n\n def __call__(self, x):\n return jnp.linalg.cholesky(x)\n\n def _inverse(self, y):\n return jnp.matmul(y, jnp.swapaxes(y, -2, -1))\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # Ref: http://web.mit.edu/18.325/www/handouts/handout2.pdf page 13\n n = jnp.shape(x)[-1]\n order = -jnp.arange(n, 0, -1)\n return -n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)\n\n\nclass CorrCholeskyTransform(Transform):\n r\"\"\"\n Transforms a uncontrained real vector :math:`x` with length :math:`D*(D-1)/2` into the\n Cholesky factor of a D-dimension correlation matrix. This Cholesky factor is a lower\n triangular matrix with positive diagonals and unit Euclidean norm for each row.\n The transform is processed as follows:\n\n 1. First we convert :math:`x` into a lower triangular matrix with the following order:\n\n .. math::\n \\begin{bmatrix}\n 1 & 0 & 0 & 0 \\\\\n x_0 & 1 & 0 & 0 \\\\\n x_1 & x_2 & 1 & 0 \\\\\n x_3 & x_4 & x_5 & 1\n \\end{bmatrix}\n\n 2. For each row :math:`X_i` of the lower triangular part, we apply a *signed* version of\n class :class:`StickBreakingTransform` to transform :math:`X_i` into a\n unit Euclidean length vector using the following steps:\n\n a. Scales into the interval :math:`(-1, 1)` domain: :math:`r_i = \\tanh(X_i)`.\n b. Transforms into an unsigned domain: :math:`z_i = r_i^2`.\n c. Applies :math:`s_i = StickBreakingTransform(z_i)`.\n d. Transforms back into signed domain: :math:`y_i = (sign(r_i), 1) * \\sqrt{s_i}`.\n \"\"\"\n domain = constraints.real_vector\n codomain = constraints.corr_cholesky\n\n def __call__(self, x):\n # we interchange step 1 and step 2.a for a better performance\n t = jnp.tanh(x)\n return signed_stick_breaking_tril(t)\n\n def _inverse(self, y):\n # inverse stick-breaking\n z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)\n pad_width = [(0, 0)] * y.ndim\n pad_width[-1] = (1, 0)\n z1m_cumprod_shifted = jnp.pad(z1m_cumprod[..., :-1], pad_width,\n mode=\"constant\", constant_values=1.)\n t = matrix_to_tril_vec(y, diagonal=-1) / jnp.sqrt(\n matrix_to_tril_vec(z1m_cumprod_shifted, diagonal=-1))\n # inverse of tanh\n x = jnp.log((1 + t) / (1 - t)) / 2\n return x\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # NB: because domain and codomain are two spaces with different dimensions, determinant of\n # Jacobian is not well-defined. Here we return `log_abs_det_jacobian` of `x` and the\n # flatten lower triangular part of `y`.\n\n # stick_breaking_logdet = log(y / r) = log(z_cumprod) (modulo right shifted)\n z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)\n # by taking diagonal=-2, we don't need to shift z_cumprod to the right\n # NB: diagonal=-2 works fine for (2 x 2) matrix, where we get an empty array\n z1m_cumprod_tril = matrix_to_tril_vec(z1m_cumprod, diagonal=-2)\n stick_breaking_logdet = 0.5 * jnp.sum(jnp.log(z1m_cumprod_tril), axis=-1)\n\n tanh_logdet = -2 * jnp.sum(x + softplus(-2 * x) - jnp.log(2.), axis=-1)\n return stick_breaking_logdet + tanh_logdet\n\n def forward_shape(self, shape):\n return _matrix_forward_shape(shape, offset=-1)\n\n def inverse_shape(self, shape):\n return _matrix_inverse_shape(shape, offset=-1)\n\n\nclass CorrMatrixCholeskyTransform(CholeskyTransform):\n r\"\"\"\n Transform via the mapping :math:`y = cholesky(x)`, where `x` is a\n correlation matrix.\n \"\"\"\n domain = constraints.corr_matrix\n codomain = constraints.corr_cholesky\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # NB: see derivation in LKJCholesky implementation\n n = jnp.shape(x)[-1]\n order = -jnp.arange(n - 1, -1, -1)\n return jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)\n\n\nclass ExpTransform(Transform):\n # TODO: refine domain/codomain logic through setters, especially when\n # transforms for inverses are supported\n def __init__(self, domain=constraints.real):\n self.domain = domain\n\n @property\n def codomain(self):\n if self.domain is constraints.real:\n return constraints.positive\n elif isinstance(self.domain, constraints.greater_than):\n return constraints.greater_than(self.__call__(self.domain.lower_bound))\n elif isinstance(self.domain, constraints.interval):\n return constraints.interval(self.__call__(self.domain.lower_bound),\n self.__call__(self.domain.upper_bound))\n else:\n raise NotImplementedError\n\n def __call__(self, x):\n # XXX consider to clamp from below for stability if necessary\n return jnp.exp(x)\n\n def _inverse(self, y):\n return jnp.log(y)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return x\n\n\nclass IdentityTransform(Transform):\n\n def __call__(self, x):\n return x\n\n def _inverse(self, y):\n return y\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.zeros_like(x)\n\n\nclass IndependentTransform(Transform):\n \"\"\"\n Wraps a transform by aggregating over ``reinterpreted_batch_ndims``-many\n dims in :meth:`check`, so that an event is valid only if all its\n independent entries are valid.\n \"\"\"\n def __init__(self, base_transform, reinterpreted_batch_ndims):\n assert isinstance(base_transform, Transform)\n assert isinstance(reinterpreted_batch_ndims, int)\n assert reinterpreted_batch_ndims >= 0\n self.base_transform = base_transform\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n super().__init__()\n\n @property\n def domain(self):\n return constraints.independent(self.base_transform.domain, self.reinterpreted_batch_ndims)\n\n @property\n def codomain(self):\n return constraints.independent(self.base_transform.codomain, self.reinterpreted_batch_ndims)\n\n def __call__(self, x):\n return self.base_transform(x)\n\n def _inverse(self, y):\n return self.base_transform._inverse(y)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n result = self.base_transform.log_abs_det_jacobian(x, y, intermediates=intermediates)\n if jnp.ndim(result) < self.reinterpreted_batch_ndims:\n expected = self.domain.event_dim\n raise ValueError(f\"Expected x.dim() >= {expected} but got {jnp.ndim(x)}\")\n return sum_rightmost(result, self.reinterpreted_batch_ndims)\n\n def call_with_intermediates(self, x):\n return self.base_transform.call_with_intermediates(x)\n\n def forward_shape(self, shape):\n return self.base_transform.forward_shape(shape)\n\n def inverse_shape(self, shape):\n return self.base_transform.inverse_shape(shape)\n\n\nclass InvCholeskyTransform(Transform):\n r\"\"\"\n Transform via the mapping :math:`y = x @ x.T`, where `x` is a lower\n triangular matrix with positive diagonal.\n \"\"\"\n\n def __init__(self, domain=constraints.lower_cholesky):\n warnings.warn(\"InvCholeskyTransform is deprecated. Please use CholeskyTransform\"\n \" or CorrMatrixCholeskyTransform instead.\", FutureWarning)\n assert domain in [constraints.lower_cholesky, constraints.corr_cholesky]\n self.domain = domain\n\n @property\n def codomain(self):\n if self.domain is constraints.lower_cholesky:\n return constraints.positive_definite\n elif self.domain is constraints.corr_cholesky:\n return constraints.corr_matrix\n\n def __call__(self, x):\n return jnp.matmul(x, jnp.swapaxes(x, -2, -1))\n\n def _inverse(self, y):\n return jnp.linalg.cholesky(y)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n if self.domain is constraints.lower_cholesky:\n # Ref: http://web.mit.edu/18.325/www/handouts/handout2.pdf page 13\n n = jnp.shape(x)[-1]\n order = jnp.arange(n, 0, -1)\n return n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)\n else:\n # NB: see derivation in LKJCholesky implementation\n n = jnp.shape(x)[-1]\n order = jnp.arange(n - 1, -1, -1)\n return jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)\n\n\nclass LowerCholeskyAffine(Transform):\n r\"\"\"\n Transform via the mapping :math:`y = loc + scale\\_tril\\ @\\ x`.\n\n :param loc: a real vector.\n :param scale_tril: a lower triangular matrix with positive diagonal.\n \"\"\"\n domain = constraints.real_vector\n codomain = constraints.real_vector\n\n def __init__(self, loc, scale_tril):\n if jnp.ndim(scale_tril) != 2:\n raise ValueError(\"Only support 2-dimensional scale_tril matrix. \"\n \"Please make a feature request if you need to \"\n \"use this transform with batched scale_tril.\")\n self.loc = loc\n self.scale_tril = scale_tril\n\n def __call__(self, x):\n return self.loc + jnp.squeeze(jnp.matmul(self.scale_tril, x[..., jnp.newaxis]), axis=-1)\n\n def _inverse(self, y):\n y = y - self.loc\n original_shape = jnp.shape(y)\n yt = jnp.reshape(y, (-1, original_shape[-1])).T\n xt = solve_triangular(self.scale_tril, yt, lower=True)\n return jnp.reshape(xt.T, original_shape)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.broadcast_to(jnp.log(jnp.diagonal(self.scale_tril, axis1=-2, axis2=-1)).sum(-1),\n jnp.shape(x)[:-1])\n\n def forward_shape(self, shape):\n if len(shape) < 1:\n raise ValueError(\"Too few dimensions on input\")\n return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])\n\n def inverse_shape(self, shape):\n if len(shape) < 1:\n raise ValueError(\"Too few dimensions on input\")\n return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])\n\n\nclass LowerCholeskyTransform(Transform):\n domain = constraints.real_vector\n codomain = constraints.lower_cholesky\n\n def __call__(self, x):\n n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)\n z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)\n diag = jnp.exp(x[..., -n:])\n return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)\n\n def _inverse(self, y):\n z = matrix_to_tril_vec(y, diagonal=-1)\n return jnp.concatenate([z, jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1))], axis=-1)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform\n n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)\n return x[..., -n:].sum(-1)\n\n def forward_shape(self, shape):\n return _matrix_forward_shape(shape)\n\n def inverse_shape(self, shape):\n return _matrix_inverse_shape(shape)\n\n\nclass OrderedTransform(Transform):\n \"\"\"\n Transform a real vector to an ordered vector.\n\n **References:**\n\n 1. *Stan Reference Manual v2.20, section 10.6*,\n Stan Development Team\n \"\"\"\n domain = constraints.real_vector\n codomain = constraints.ordered_vector\n\n def __call__(self, x):\n z = jnp.concatenate([x[..., :1], jnp.exp(x[..., 1:])], axis=-1)\n return jnp.cumsum(z, axis=-1)\n\n def _inverse(self, y):\n x = jnp.log(y[..., 1:] - y[..., :-1])\n return jnp.concatenate([y[..., :1], x], axis=-1)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.sum(x[..., 1:], -1)\n\n\nclass PermuteTransform(Transform):\n domain = constraints.real_vector\n codomain = constraints.real_vector\n\n def __init__(self, permutation):\n self.permutation = permutation\n\n def __call__(self, x):\n return x[..., self.permutation]\n\n def _inverse(self, y):\n size = self.permutation.size\n permutation_inv = ops.index_update(jnp.zeros(size, dtype=jnp.result_type(int)),\n self.permutation,\n jnp.arange(size))\n return y[..., permutation_inv]\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.full(jnp.shape(x)[:-1], 0.)\n\n\nclass PowerTransform(Transform):\n domain = constraints.positive\n codomain = constraints.positive\n\n def __init__(self, exponent):\n self.exponent = exponent\n\n def __call__(self, x):\n return jnp.power(x, self.exponent)\n\n def _inverse(self, y):\n return jnp.power(y, 1 / self.exponent)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.log(jnp.abs(self.exponent * y / x))\n\n def forward_shape(self, shape):\n return lax.broadcast_shapes(shape, getattr(self.exponent, \"shape\", ()))\n\n def inverse_shape(self, shape):\n return lax.broadcast_shapes(shape, getattr(self.exponent, \"shape\", ()))\n\n\nclass SigmoidTransform(Transform):\n codomain = constraints.unit_interval\n\n def __call__(self, x):\n return _clipped_expit(x)\n\n def _inverse(self, y):\n return logit(y)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n x_abs = jnp.abs(x)\n return -x_abs - 2 * jnp.log1p(jnp.exp(-x_abs))\n\n\ndef _softplus_inv(y):\n return jnp.log(-jnp.expm1(-y)) + y\n\n\nclass SoftplusTransform(Transform):\n r\"\"\"\n Transform from unconstrained space to positive domain via softplus :math:`y = \\log(1 + \\exp(x))`.\n The inverse is computed as :math:`x = \\log(\\exp(y) - 1)`.\n \"\"\"\n domain = constraints.real\n codomain = constraints.softplus_positive\n\n def __call__(self, x):\n return softplus(x)\n\n def _inverse(self, y):\n return _softplus_inv(y)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return -softplus(-x)\n\n\nclass SoftplusLowerCholeskyTransform(Transform):\n \"\"\"\n Transform from unconstrained vector to lower-triangular matrices with\n nonnegative diagonal entries. This is useful for parameterizing positive\n definite matrices in terms of their Cholesky factorization.\n \"\"\"\n domain = constraints.real_vector\n codomain = constraints.softplus_lower_cholesky\n\n def __call__(self, x):\n n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)\n z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)\n diag = softplus(x[..., -n:])\n return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)\n\n def _inverse(self, y):\n z = matrix_to_tril_vec(y, diagonal=-1)\n diag = _softplus_inv(jnp.diagonal(y, axis1=-2, axis2=-1))\n return jnp.concatenate([z, diag], axis=-1)\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform\n n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)\n return -softplus(-x[..., -n:]).sum(-1)\n\n def forward_shape(self, shape):\n return _matrix_forward_shape(shape)\n\n def inverse_shape(self, shape):\n return _matrix_inverse_shape(shape)\n\n\nclass StickBreakingTransform(Transform):\n domain = constraints.real_vector\n codomain = constraints.simplex\n\n def __call__(self, x):\n # we shift x to obtain a balanced mapping (0, 0, ..., 0) -> (1/K, 1/K, ..., 1/K)\n x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))\n # convert to probabilities (relative to the remaining) of each fraction of the stick\n z = _clipped_expit(x)\n z1m_cumprod = jnp.cumprod(1 - z, axis=-1)\n pad_width = [(0, 0)] * x.ndim\n pad_width[-1] = (0, 1)\n z_padded = jnp.pad(z, pad_width, mode=\"constant\", constant_values=1.)\n pad_width = [(0, 0)] * x.ndim\n pad_width[-1] = (1, 0)\n z1m_cumprod_shifted = jnp.pad(z1m_cumprod, pad_width, mode=\"constant\", constant_values=1.)\n return z_padded * z1m_cumprod_shifted\n\n def _inverse(self, y):\n y_crop = y[..., :-1]\n z1m_cumprod = jnp.clip(1 - jnp.cumsum(y_crop, axis=-1), a_min=jnp.finfo(y.dtype).tiny)\n # hence x = logit(z) = log(z / (1 - z)) = y[::-1] / z1m_cumprod\n x = jnp.log(y_crop / z1m_cumprod)\n return x + jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n # Ref: https://mc-stan.org/docs/2_19/reference-manual/simplex-transform-section.html\n # |det|(J) = Product(y * (1 - z))\n x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))\n z = jnp.clip(expit(x), a_min=jnp.finfo(x.dtype).tiny)\n # XXX we use the identity 1 - z = z * exp(-x) to not worry about\n # the case z ~ 1\n return jnp.sum(jnp.log(y[..., :-1] * z) - x, axis=-1)\n\n def forward_shape(self, shape):\n if len(shape) < 1:\n raise ValueError(\"Too few dimensions on input\")\n return shape[:-1] + (shape[-1] + 1,)\n\n def inverse_shape(self, shape):\n if len(shape) < 1:\n raise ValueError(\"Too few dimensions on input\")\n return shape[:-1] + (shape[-1] - 1,)\n\n\nclass UnpackTransform(Transform):\n \"\"\"\n Transforms a contiguous array to a pytree of subarrays.\n\n :param unpack_fn: callable used to unpack a contiguous array.\n \"\"\"\n domain = constraints.real_vector\n codomain = constraints.dependent\n\n def __init__(self, unpack_fn):\n self.unpack_fn = unpack_fn\n\n def __call__(self, x):\n batch_shape = x.shape[:-1]\n if batch_shape:\n unpacked = vmap(self.unpack_fn)(x.reshape((-1,) + x.shape[-1:]))\n return tree_map(lambda z: jnp.reshape(z, batch_shape + z.shape[1:]), unpacked)\n else:\n return self.unpack_fn(x)\n\n def _inverse(self, y):\n leading_dims = [v.shape[0] if jnp.ndim(v) > 0 else 0\n for v in tree_flatten(y)[0]]\n d0 = leading_dims[0]\n not_scalar = d0 > 0 or len(leading_dims) > 1\n if not_scalar and all(d == d0 for d in leading_dims[1:]):\n warnings.warn(\"UnpackTransform.inv might lead to an unexpected behavior because it\"\n \" cannot transform a batch of unpacked arrays.\")\n return ravel_pytree(y)[0]\n\n def log_abs_det_jacobian(self, x, y, intermediates=None):\n return jnp.zeros(jnp.shape(x)[:-1])\n\n def forward_shape(self, shape):\n raise NotImplementedError\n\n def inverse_shape(self, shape):\n raise NotImplementedError\n\n\n##########################################################\n# CONSTRAINT_REGISTRY\n##########################################################\n\nclass ConstraintRegistry(object):\n def __init__(self):\n self._registry = {}\n\n def register(self, constraint, factory=None):\n if factory is None:\n return lambda factory: self.register(constraint, factory)\n\n if isinstance(constraint, constraints.Constraint):\n constraint = type(constraint)\n\n self._registry[constraint] = factory\n\n def __call__(self, constraint):\n try:\n factory = self._registry[type(constraint)]\n except KeyError as e:\n raise NotImplementedError from e\n\n return factory(constraint)\n\n\nbiject_to = ConstraintRegistry()\n\n\n@biject_to.register(constraints.corr_cholesky)\ndef _transform_to_corr_cholesky(constraint):\n return CorrCholeskyTransform()\n\n\n@biject_to.register(constraints.corr_matrix)\ndef _transform_to_corr_matrix(constraint):\n return ComposeTransform([CorrCholeskyTransform(),\n CorrMatrixCholeskyTransform().inv])\n\n\n@biject_to.register(constraints.greater_than)\ndef _transform_to_greater_than(constraint):\n if constraint is constraints.positive:\n return ExpTransform()\n return ComposeTransform([ExpTransform(),\n AffineTransform(constraint.lower_bound, 1,\n domain=constraints.positive)])\n\n\n@biject_to.register(constraints.less_than)\ndef _transform_to_less_than(constraint):\n return ComposeTransform([ExpTransform(),\n AffineTransform(constraint.upper_bound, -1,\n domain=constraints.positive)])\n\n\n@biject_to.register(constraints.independent)\ndef _biject_to_independent(constraint):\n return IndependentTransform(biject_to(constraint.base_constraint),\n constraint.reinterpreted_batch_ndims)\n\n\n@biject_to.register(constraints.interval)\ndef _transform_to_interval(constraint):\n if constraint is constraints.unit_interval:\n return SigmoidTransform()\n scale = constraint.upper_bound - constraint.lower_bound\n return ComposeTransform([SigmoidTransform(),\n AffineTransform(constraint.lower_bound, scale,\n domain=constraints.unit_interval)])\n\n\n@biject_to.register(constraints.lower_cholesky)\ndef _transform_to_lower_cholesky(constraint):\n return LowerCholeskyTransform()\n\n\n@biject_to.register(constraints.ordered_vector)\ndef _transform_to_ordered_vector(constraint):\n return OrderedTransform()\n\n\n@biject_to.register(constraints.positive_definite)\ndef _transform_to_positive_definite(constraint):\n return ComposeTransform([LowerCholeskyTransform(), CholeskyTransform().inv])\n\n\n@biject_to.register(constraints.positive_ordered_vector)\ndef _transform_to_positive_ordered_vector(constraint):\n return ComposeTransform([OrderedTransform(), ExpTransform()])\n\n\n@biject_to.register(constraints.real)\ndef _transform_to_real(constraint):\n return IdentityTransform()\n\n\n@biject_to.register(constraints.softplus_positive)\ndef _transform_to_softplus_positive(constraint):\n return SoftplusTransform()\n\n\n@biject_to.register(constraints.softplus_lower_cholesky)\ndef _transform_to_softplus_lower_cholesky(constraint):\n return SoftplusLowerCholeskyTransform()\n\n\n@biject_to.register(constraints.simplex)\ndef _transform_to_simplex(constraint):\n return StickBreakingTransform()\n" ]
[ [ "numpy.less" ] ]
tvieijra/netket
[ "ef3ff32b242f25b6a6ae0f08db1aada85775a2ea" ]
[ "Test/Machine/rbm.py" ]
[ "# Copyright 2019 The Simons Foundation, Inc. - All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport netket\nimport numpy as _np\n\n__all__ = [\"PyRbm\"]\n\n\nclass PyRbm(netket.machine.CxxMachine):\n \"\"\"\n __Do not use me in production code!__\n\n A proof of concept implementation of a complex-valued RBM in pure Python.\n This is an example of how to subclass `CxxMachine` so that the machine will\n be usable with NetKet's C++ core.\n\n This class can be used as a drop-in replacement for `RbmSpin`.\n \"\"\"\n\n def __init__(\n self, hilbert, alpha=None, use_visible_bias=True, use_hidden_bias=True\n ):\n r\"\"\"Constructs a new RBM.\n\n Args:\n hilbert: Hilbert space.\n alpha: `alpha * hilbert.size` is the number of hidden spins.\n use_visible_bias: specifies whether to use a bias for visible\n spins.\n use_hidden_bias: specifies whether to use a bias for hidden spins.\n \"\"\"\n # NOTE: The following call to __init__ is important!\n super(PyRbm, self).__init__(hilbert)\n n = hilbert.size\n if alpha < 0:\n raise ValueError(\"`alpha` should be non-negative\")\n m = int(round(alpha * n))\n self._w = _np.empty([m, n], dtype=_np.complex128)\n self._a = _np.empty(n, dtype=_np.complex128) if use_visible_bias else None\n self._b = _np.empty(m, dtype=_np.complex128) if use_hidden_bias else None\n\n def _number_parameters(self):\n r\"\"\"Returns the number of parameters in the machine. We just sum the\n sizes of all the tensors we hold.\n \"\"\"\n return (\n self._w.size\n + (self._a.size if self._a is not None else 0)\n + (self._b.size if self._b is not None else 0)\n )\n\n def _number_visible(self):\n r\"\"\"Returns the number of visible units.\n \"\"\"\n return self._w.shape[1]\n\n def _get_parameters(self):\n r\"\"\"Returns the parameters as a 1D tensor.\n\n This function tries to order parameters in the exact same way as\n ``RbmSpin`` does so that we can do stuff like\n\n >>> import netket\n >>> import numpy\n >>> hilbert = netket.hilbert.Spin(\n graph=netket.graph.Hypercube(length=100, n_dim=1),\n s=1/2.\n )\n >>> cxx_rbm = netket.machine.RbmSpin(hilbert, alpha=3)\n >>> py_rbm = netket.machine.PyRbm(hilbert, alpha=3)\n >>> cxx_rbm.init_random_parameters()\n >>> # Order of parameters is the same, so we can assign one to the\n >>> # other\n >>> py_rbm.parameters = cxx_rbm.parameters\n >>> x = np.array(hilbert.local_states, size=hilbert.size)\n >>> assert numpy.isclose(py_rbm.log_val(x), cxx_rbm.log_val(x))\n \"\"\"\n params = tuple()\n if self._a is not None:\n params += (self._a,)\n if self._b is not None:\n params += (self._b,)\n params += (self._w.reshape(-1, order=\"C\"),)\n return _np.concatenate(params)\n\n def _set_parameters(self, p):\n r\"\"\"Sets parameters from a 1D tensor.\n\n ``self._set_parameters(self._get_parameters())`` is an identity.\n \"\"\"\n i = 0\n if self._a is not None:\n self._a[:] = p[i : i + self._a.size]\n i += self._a.size\n if self._b is not None:\n self._b[:] = p[i : i + self._b.size]\n i += self._b.size\n\n self._w[:] = p[i : i + self._w.size].reshape(self._w.shape, order=\"C\")\n\n def log_val(self, x):\n r\"\"\"Computes the logarithm of the wave function given a spin\n configuration ``x``.\n \"\"\"\n r = _np.dot(self._w, x)\n if self._b is not None:\n r += self._b\n r = _np.sum(PyRbm._log_cosh(r))\n if self._a is not None:\n r += _np.dot(self._a, x)\n # Officially, we should return\n # self._w.shape[0] * 0.6931471805599453 + r\n # but the C++ implementation ignores the \"constant factor\"\n return r\n\n def der_log(self, x):\n r\"\"\"Computes the gradient of the logarithm of the wave function\n given a spin configuration ``x``.\n \"\"\"\n grad = _np.empty(self.n_par, dtype=_np.complex128)\n i = 0\n\n if self._a is not None:\n grad[i : i + self._a.size] = x\n i += self._a.size\n\n tanh_stuff = _np.dot(self._w, x)\n if self._b is not None:\n tanh_stuff += self._b\n tanh_stuff = _np.tanh(tanh_stuff, out=tanh_stuff)\n\n if self._b is not None:\n grad[i : i + self._b.size] = tanh_stuff\n i += self._b.size\n\n out = grad[i : i + self._w.size]\n out.shape = (tanh_stuff.size, x.size)\n _np.outer(tanh_stuff, x, out=out)\n\n return grad\n\n def _is_holomorphic(self):\n r\"\"\"Complex valued RBM a holomorphic function.\n \"\"\"\n return True\n\n def save(self, filename):\n r\"\"\"Saves machine weights to ``filename`` using ``pickle``.\n \"\"\"\n import pickle\n\n with open(filename, \"wb\") as output_file:\n pickle.dump((self._w, self._a, self._b), output_file)\n\n def load(self, filename):\n r\"\"\"Loads machine weights from ``filename`` using ``pickle``.\n \"\"\"\n import pickle\n\n with open(filename, \"rb\") as input_file:\n self._w, self._a, self._b = pickle.load(input_file)\n\n @staticmethod\n def _log_cosh(x):\n # TODO: Handle big numbers properly\n return _np.log(_np.cosh(x))\n" ]
[ [ "numpy.cosh", "numpy.empty", "numpy.dot", "numpy.concatenate", "numpy.outer", "numpy.tanh" ] ]
jameszhou-gl/CBRE
[ "53c952e0afc74518fc4223f0f20881336df20f95" ]
[ "cbre/cbre_net.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom cbre.util import *\n\n\nclass CBRENet(object):\n \"\"\"\n cbre_net implements the cycly-balanced representation learning for counterfactual inference\n\n The network is implemented as a tensorflow graph. The class constructor\n creates an object containing relevant TF nodes as member variables.\n \"\"\"\n\n def __init__(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):\n \"\"\"\n x The varibales of data\n t The treatment applied to x, t.shape[1]==1\n y_ The true outcome\n p_t The treatment probability in all observations\n z_norm todo unknown\n flags The arg params\n r_alpha The coefficient of reconstruction and cycle loss\n r_lambda The coefficient of regularization of prediction network\n r_beta The coefficient of gradient penalty in GAN\n do_in The val of dropout_in\n do_out The val of dropout_out\n data_x_dim The dim of varibale x\n \"\"\"\n self.variables = {}\n # wd_loss: regularization l2 loss\n self.wd_loss = 0\n\n if flags.nonlin.lower() == 'elu':\n self.nonlin = tf.nn.elu\n else:\n self.nonlin = tf.nn.relu\n\n self._build_graph(x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim)\n\n def _add_variable(self, var, name):\n \"\"\"\n Adds variables to the internal track-keeper\n \"\"\"\n basename = name\n i = 0\n while name in self.variables:\n name = '%s_%d' % (basename, i) # @TODO: not consistent with TF internally if changed\n i += 1\n\n self.variables[name] = var\n\n def _create_variable(self, var, name):\n \"\"\" Create and adds variables to the internal track-keeper \"\"\"\n # tf.get_variable(name=name, initializer=var)\n var = tf.Variable(var, name=name)\n self._add_variable(var, name)\n return var\n\n def _create_variable_with_weight_decay(self, initializer, name, wd):\n \"\"\" Create and adds variables to the internal track-keeper\n and adds it to the list of weight decayed variables \"\"\"\n var = self._create_variable(initializer, name)\n self.wd_loss += wd * tf.nn.l2_loss(var)\n return var\n\n def _build_graph(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):\n \"\"\"\n Constructs a TensorFlow subgraph for causal effect inference.\n Sets the following member variables (to TF nodes):\n\n self.output The output prediction \"y\"\n self.tot_loss The total objective to minimize\n self.pred_loss The prediction term of the objective\n self.weights_in The input/representation layer weights\n self.weights_out The output/post-representation layer weights\n self.weights_pred The (linear) prediction layer weights\n self.h_rep The layer of the penalized representation\n \"\"\"\n self.x = x\n self.t = t\n self.y_ = y_\n self.p_t = p_t\n self.r_alpha = r_alpha\n self.r_lambda = r_lambda\n self.r_beta = r_beta\n self.do_in = do_in\n self.do_out = do_out\n self.z_norm = z_norm\n\n self.encoder_dim = flags.encoder_dim\n encoder_dim = flags.encoder_dim\n self.decoder_dim = flags.decoder_dim\n self.predictor_dim = flags.predictor_dim\n predictor_dim = flags.predictor_dim\n mi_estimator_dim = flags.mi_estimator_dim\n self.discriminator_dim = flags.discriminator_dim\n discriminator_dim = flags.discriminator_dim\n\n \"\"\"\n Network Components\n \"\"\"\n '''\n 1. Encoder Network\n '''\n # Construct Encoder network layers, four layers with size 200\n h_rep, h_rep_norm, weights_in = self._build_encoder(x, data_x_dim, flags)\n\n '''\n 2. GAN\n '''\n d0, d1, dp, weights_dis, weights_discore = self._build_adversarial_graph(h_rep_norm, t, encoder_dim,\n discriminator_dim, do_out,\n flags)\n # discriminator\n # with sigmoid\n # discriminator_loss = tf.reduce_mean(tf.nn.softplus(-d0)) + tf.reduce_mean(tf.nn.softplus(-d1) + d1) + dp\n # without sigmoid\n discriminator_loss = -tf.reduce_mean(d0) + tf.reduce_mean(d1) + r_beta * dp\n # encoder\n # with sigmoid\n # rep_loss = tf.reduce_mean(tf.nn.softplus(-d1))\n # without sigmoid\n # todo rep_loss in paper: rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)\n rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)\n # rep_loss = -tf.reduce_mean(d1)\n\n '''\n 3. Reconstruction \n '''\n # graph for reconstruction loss\n x0, recons_x_0, x1, recons_x_1 = self._build_reconstruct_graph(x, t, data_x_dim, flags)\n recons_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - recons_x_0)) + 1.0e-12) + tf.sqrt(\n tf.reduce_mean(tf.square(x1 - recons_x_1)) + 1.0e-12)\n\n '''\n 4. Cycle \n '''\n x0, cycle_x0, x1, cycle_x1 = self._build_cycle_graph(x, t, data_x_dim, flags)\n cycle_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - cycle_x0)) + 1.0e-12) + tf.sqrt(\n tf.reduce_mean(tf.square(x1 - cycle_x1)) + 1.0e-12)\n\n '''\n Predict Networks\n '''\n y, weights_out, weights_pred = self._build_output_graph(h_rep_norm, t, encoder_dim, predictor_dim, do_out,\n flags)\n\n \"\"\" Compute sample reweighting \"\"\"\n if flags.reweight_sample:\n w_t = t / (2 * p_t)\n w_c = (1 - t) / (2 * 1 - p_t)\n sample_weight = w_t + w_c\n else:\n sample_weight = 1.0\n\n self.sample_weight = sample_weight\n\n risk = tf.reduce_mean(sample_weight * tf.square(y_ - y))\n pred_error = tf.sqrt(tf.reduce_mean(tf.square(y_ - y)) + 1.0e-12)\n\n \"\"\" Regularization \"\"\"\n if flags.p_lambda > 0 and flags.rep_weight_decay:\n for i in range(0, flags.layer_num_encoder):\n if not (flags.varsel and i == 0): # No penalty on W in variable selection\n self.wd_loss += tf.nn.l2_loss(weights_in[i])\n\n \"\"\" Total error \"\"\"\n tot_error = risk\n\n if flags.p_lambda > 0:\n tot_error = tot_error + r_lambda * self.wd_loss + recons_loss + cycle_loss\n if flags.coef_recons > 0:\n tot_error += flags.coef_recons * recons_loss\n if flags.coef_cycle:\n tot_error += flags.coef_cycle * cycle_loss\n if flags.coef_d:\n tot_error += flags.coef_d * discriminator_loss\n\n if flags.varsel:\n self.w_proj = tf.placeholder(\"float\", shape=[data_x_dim], name='w_proj')\n self.projection = weights_in[0].assign(self.w_proj)\n\n self.output = y\n self.tot_loss = tot_error\n self.discriminator_loss = discriminator_loss\n self.rep_loss = rep_loss\n self.rec_loss = recons_loss\n self.cycle_loss = cycle_loss\n self.recons_cycle_loss = recons_loss + cycle_loss\n self.pred_loss = pred_error\n self.weights_in = weights_in\n self.weights_out = weights_out\n self.weights_dis = weights_dis\n self.weights_discore = weights_discore\n self.weights_pred = weights_pred\n self.h_rep = h_rep\n self.h_rep_norm = h_rep_norm\n self.dp = dp\n\n def _build_output_0(self, h_input, encoder_dim, predictor_dim, do_out, flags):\n h_out = [h_input]\n dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)\n with tf.variable_scope('pred_0') as scope:\n weights_out = []\n biases_out = []\n\n for i in range(0, flags.layer_num_predictor):\n wo = tf.get_variable(name='w_{}'.format(i),\n initializer=tf.random_normal([dims[i], dims[i + 1]],\n stddev=flags.weight_init / np.sqrt(dims[i])))\n\n weights_out.append(wo)\n\n # biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))\n biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))\n z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]\n\n h_out.append(self.nonlin(z))\n h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)\n\n weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],\n stddev=flags.weight_init / np.sqrt(predictor_dim)),\n 'w_pred')\n weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],\n stddev=flags.weight_init / np.sqrt(\n predictor_dim)))\n bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')\n\n if flags.varsel or flags.layer_num_predictor == 0:\n self.wd_loss += tf.nn.l2_loss(\n tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient\n else:\n self.wd_loss += tf.nn.l2_loss(weights_pred)\n\n \"\"\" Construct linear classifier \"\"\"\n h_pred = h_out[-1]\n y = tf.matmul(h_pred, weights_pred) + bias_pred\n\n return y, weights_out, weights_pred\n\n def _build_output_1(self, h_input, encoder_dim, predictor_dim, do_out, flags):\n h_out = [h_input]\n dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)\n with tf.variable_scope('pred_1') as scope:\n weights_out = []\n biases_out = []\n\n for i in range(0, flags.layer_num_predictor):\n wo = tf.get_variable(name='w_{}'.format(i),\n initializer=tf.random_normal([dims[i], dims[i + 1]],\n stddev=flags.weight_init / np.sqrt(dims[i])))\n\n weights_out.append(wo)\n\n # biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))\n biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))\n z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]\n\n h_out.append(self.nonlin(z))\n h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)\n\n weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],\n stddev=flags.weight_init / np.sqrt(predictor_dim)),\n 'w_pred')\n weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],\n stddev=flags.weight_init / np.sqrt(\n predictor_dim)))\n bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')\n\n if flags.varsel or flags.layer_num_predictor == 0:\n self.wd_loss += tf.nn.l2_loss(\n tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient\n else:\n self.wd_loss += tf.nn.l2_loss(weights_pred)\n\n \"\"\" Construct linear classifier \"\"\"\n h_pred = h_out[-1]\n y = tf.matmul(h_pred, weights_pred) + bias_pred\n\n return y, weights_out, weights_pred\n\n def _build_output_graph(self, rep, t, encoder_dim, predictor_dim, do_out, flags):\n \"\"\" Construct output/regression layers \"\"\"\n\n if flags.split_output:\n\n i0 = tf.to_int32(tf.where(t < 1)[:, 0])\n i1 = tf.to_int32(tf.where(t > 0)[:, 0])\n\n rep0 = tf.gather(rep, i0)\n rep1 = tf.gather(rep, i1)\n\n y0, weights_out0, weights_pred0 = self._build_output_0(rep0, encoder_dim, predictor_dim, do_out, flags)\n y1, weights_out1, weights_pred1 = self._build_output_1(rep1, encoder_dim, predictor_dim, do_out, flags)\n\n y = tf.dynamic_stitch([i0, i1], [y0, y1])\n weights_out = weights_out0 + weights_out1\n weights_pred = weights_pred0 + weights_pred1\n else:\n h_input = tf.concat(1, [rep, t])\n # y, weights_out, weights_pred = self._build_output(h_input, encoder_dim + 1, predictor_dim, do_out, flags)\n y, weights_out, weights_pred = None, None, None\n\n return y, weights_out, weights_pred\n\n def _build_encoder(self, x, data_x_dim, flags):\n with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE) as scope:\n weights_in = []\n biases_in = []\n\n if flags.batch_norm:\n bn_biases = []\n bn_scales = []\n\n h_in = [x]\n\n for i in range(0, flags.layer_num_encoder):\n if i == 0:\n \"\"\" If using variable selection, first layer is just rescaling\"\"\"\n if flags.varsel:\n weights_in.append(tf.get_variable(name='wg_{}'.format(i),\n initializer=1.0 / data_x_dim * tf.ones([data_x_dim])))\n else:\n wg = tf.get_variable(name='wg_{}'.format(i),\n initializer=tf.random_normal([data_x_dim, self.encoder_dim],\n stddev=flags.weight_init / np.sqrt(\n data_x_dim)))\n weights_in.append(wg)\n else:\n wg = tf.get_variable(name='wg_{}'.format(i),\n initializer=tf.random_normal([self.encoder_dim, self.encoder_dim],\n stddev=flags.weight_init / np.sqrt(\n self.encoder_dim)))\n weights_in.append(wg)\n\n biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, self.encoder_dim])))\n # z equals outcome of each layer in Encoder Network.\n z = tf.matmul(h_in[i], weights_in[i]) + biases_in[i]\n\n if flags.batch_norm:\n batch_mean, batch_var = tf.nn.moments(z, [0])\n\n if flags.normalization == 'bn_fixed':\n z = tf.nn.batch_normalization(z, batch_mean, batch_var, 0, 1, 1e-3)\n else:\n # bn_biases.append(tf.Variable(tf.zeros([self.encoder_dim])))\n bn_biases.append(\n tf.get_variable(name='bn_b_{}'.format(i), initializer=tf.zeros([self.encoder_dim])))\n # bn_scales.append(tf.Variable(tf.ones([self.encoder_dim])))\n bn_scales.append(\n tf.get_variable(name='bn_s_{}'.format(i), initializer=tf.ones([self.encoder_dim])))\n z = tf.nn.batch_normalization(z, batch_mean, batch_var, bn_biases[-1], bn_scales[-1], 1e-3)\n\n h_in.append(self.nonlin(z))\n h_in[i + 1] = tf.nn.dropout(h_in[i + 1], self.do_in)\n\n h_rep = h_in[-1]\n\n # todo normalization meaning?\n if flags.normalization == 'divide':\n h_rep_norm = h_rep / safe_sqrt(tf.reduce_sum(tf.square(h_rep), axis=1, keep_dims=True) + 1.0e-12)\n else:\n h_rep_norm = 1.0 * h_rep\n return h_rep, h_rep_norm, weights_in\n\n def _build_decoder(self, h_rep, data_x_dim, flags, suffix='0'):\n with tf.variable_scope('decoder_' + suffix, reuse=tf.AUTO_REUSE) as scope:\n weights_in = []\n biases_in = []\n recons_x = [h_rep]\n decoder_dim = flags.decoder_dim\n for i in range(0, flags.layer_num_decoder):\n if i == 0:\n weights_in.append(tf.get_variable(name='wg_{}'.format(i),\n initializer=tf.random_normal([flags.encoder_dim, decoder_dim],\n stddev=flags.weight_init / np.sqrt(\n flags.encoder_dim))))\n biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))\n elif i == flags.layer_num_decoder - 1:\n weights_in.append(\n tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, data_x_dim],\n stddev=flags.weight_init / np.sqrt(\n decoder_dim))))\n biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, data_x_dim])))\n\n else:\n weights_in.append(\n tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, decoder_dim],\n stddev=flags.weight_init / np.sqrt(\n decoder_dim))))\n biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))\n\n # z equals outcome of each layer in Encoder Network.\n z = tf.matmul(recons_x[i], weights_in[i]) + biases_in[i]\n\n recons_x.append(self.nonlin(z))\n recons_x[i + 1] = tf.nn.dropout(recons_x[i + 1], self.do_in)\n\n recons_x = recons_x[-1]\n return recons_x, weights_in\n\n def _build_discriminator_graph_mine(self, x, hrep, data_x_dim, encoder_dim, mi_estimator_dim, flags):\n \"\"\" Construct MI estimation layers \"\"\"\n # two layers with size 200\n with tf.variable_scope('gmi') as scope:\n input_num = tf.shape(x)[0]\n x_shuffle = tf.random_shuffle(x)\n x_conc = tf.concat([x, x_shuffle], axis=0)\n y_conc = tf.concat([hrep, hrep], axis=0)\n\n # forward\n # [25, 200]\n weights_mi_x = self._create_variable(tf.random_normal([data_x_dim, mi_estimator_dim],\n stddev=flags.weight_init / np.sqrt(data_x_dim)),\n 'weights_mi_x')\n biases_mi_x = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_x')\n # [, 200]\n lin_x = tf.matmul(x_conc, weights_mi_x) + biases_mi_x\n # [200, 200]\n weights_mi_y = self._create_variable(tf.random_normal([encoder_dim, mi_estimator_dim],\n stddev=flags.weight_init / np.sqrt(encoder_dim)),\n 'weights_mi_y')\n biases_mi_y = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_y')\n # [, 200]\n lin_y = tf.matmul(y_conc, weights_mi_y) + biases_mi_y\n\n # lin_conc = tf.nn.relu(lin_x + lin_y)\n lin_conc = self.nonlin(lin_x + lin_y)\n\n weights_mi_pred = self._create_variable(tf.random_normal([mi_estimator_dim, 1],\n stddev=flags.weight_init / np.sqrt(\n mi_estimator_dim)),\n 'gmi_p')\n biases_mi_pred = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_pred')\n gmi_output = tf.matmul(lin_conc, weights_mi_pred) + biases_mi_pred\n # real estimator outcome: shape=[input_num, 1]\n real_estimate = gmi_output[:input_num]\n # fake estimator outcome: shape=[input_num, 1]\n fake_estimate = gmi_output[input_num:]\n\n return real_estimate, fake_estimate, weights_mi_x, weights_mi_y, weights_mi_pred\n\n def _build_discriminator_adversarial(self, hrep, encoder_dim, discriminator_dim, do_out, flags):\n \"\"\" Construct adversarial discriminator layers \"\"\"\n with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as scope:\n h_dis = [hrep]\n\n weights_dis = []\n biases_dis = []\n for i in range(0, flags.layer_num_discriminator):\n\n if i == 0:\n weights_dis.append(tf.get_variable(name='wg_{}'.format(i),\n initializer=tf.random_normal([encoder_dim, discriminator_dim],\n stddev=flags.weight_init / np.sqrt(\n encoder_dim))))\n else:\n weights_dis.append(tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal(\n [discriminator_dim, discriminator_dim],\n stddev=flags.weight_init / np.sqrt(\n discriminator_dim))))\n biases_dis.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, discriminator_dim])))\n z = tf.matmul(h_dis[i], weights_dis[i]) + biases_dis[i]\n h_dis.append(self.nonlin(z))\n h_dis[i + 1] = tf.nn.dropout(h_dis[i + 1], do_out)\n\n weights_discore = tf.get_variable(initializer=tf.random_normal([discriminator_dim, 1],\n stddev=flags.weight_init / np.sqrt(\n discriminator_dim)), name='dc_p')\n bias_dc = tf.get_variable(initializer=tf.zeros([1]), name='dc_b_p')\n\n h_score = h_dis[-1]\n dis_score = tf.matmul(h_score, weights_discore) + bias_dc\n\n return dis_score, weights_dis, weights_discore\n\n def _build_adversarial_graph(self, rep, t, encoder_dim, discriminator_dim, do_out, flags):\n \"\"\"\n Construct adversarial discriminator\n \"\"\"\n # three layers with size 200\n\n i0 = tf.to_int32(tf.where(t < 1)[:, 0])\n i1 = tf.to_int32(tf.where(t > 0)[:, 0])\n\n rep0 = tf.gather(rep, i0)\n rep1 = tf.gather(rep, i1)\n\n z_rep0 = tf.reduce_max(rep0, axis=0, keep_dims=True)\n z_rep1 = tf.reduce_max(rep1, axis=0, keep_dims=True)\n\n z_rep0_conc = tf.concat([z_rep0, self.z_norm], axis=1)\n z_rep1_conc = tf.concat([z_rep1, self.z_norm], axis=1)\n\n d0, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep0_conc, encoder_dim + encoder_dim,\n discriminator_dim,\n do_out, flags)\n d1, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep1_conc, encoder_dim + encoder_dim,\n discriminator_dim,\n do_out, flags)\n\n # gradient penalty\n alpha_dist = tf.contrib.distributions.Uniform(low=0., high=1.)\n alpha = alpha_dist.sample((1, 1))\n interpolated = z_rep1 + alpha * (z_rep0 - z_rep1)\n interpolated_conc = tf.concat([interpolated, self.z_norm], axis=1)\n inte_logit, weights_dis, weights_discore = self._build_discriminator_adversarial(interpolated_conc,\n encoder_dim + encoder_dim,\n discriminator_dim, do_out,\n flags)\n gradients = tf.gradients(inte_logit, [interpolated])[0]\n grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1]) + 1.0e-12)\n gradient_penalty = tf.reduce_mean(tf.square(grad_l2 - 1.0))\n\n return d0, d1, gradient_penalty, weights_dis, weights_discore\n\n def _build_reconstruct_graph(self, x, t, data_x_dim, flags):\n \"\"\" construct graph for later computing reconstruction loss easily\n\n Parameters:\n x The varibales of data\n t The treatment applied to x\n\n Returns:\n x0 x[t=0]\n reconstruct_x reconstruct x when pass encoder and decoder networks\n \"\"\"\n i0 = tf.to_int32(tf.where(t < 1)[:, 0])\n i1 = tf.to_int32(tf.where(t > 0)[:, 0])\n\n x0 = tf.gather(x, i0)\n x1 = tf.gather(x, i1)\n h_rep_0, h_rep_norm_0, weights_in_0 = self._build_encoder(x0, data_x_dim, flags)\n h_rep_1, h_rep_norm_1, weights_in_1 = self._build_encoder(x1, data_x_dim, flags)\n\n recons_x_0, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='0')\n recons_x_1, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='1')\n return x0, recons_x_0, x1, recons_x_1\n\n def _build_cycle_graph(self, x, t, data_x_dim, flags):\n \"\"\" construct graph for later computing cycle loss easily\n\n Parameters:\n x The varibales of data\n t The treatment applied to x\n\n Returns:\n x0 x[t=0]\n reconstruct_x reconstruct x when pass encoder and decoder networks\n \"\"\"\n i0 = tf.to_int32(tf.where(t < 1)[:, 0])\n i1 = tf.to_int32(tf.where(t > 0)[:, 0])\n\n x0 = tf.gather(x, i0)\n x1 = tf.gather(x, i1)\n # cycle x0-x1'-x0\n _, h_rep_norm_0, _ = self._build_encoder(x0, data_x_dim, flags)\n temp_x_0_in_1, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='1')\n _, cyc_h_rep_norm_0, _ = self._build_encoder(temp_x_0_in_1, data_x_dim, flags)\n cycle_x0, _ = self._build_decoder(cyc_h_rep_norm_0, data_x_dim, flags, suffix='0')\n\n # cycle x1-x0'-x1\n _, h_rep_norm_1, _ = self._build_encoder(x1, data_x_dim, flags)\n temp_x_1_in_0, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='0')\n _, cyc_h_rep_norm_1, _ = self._build_encoder(temp_x_1_in_0, data_x_dim, flags)\n cycle_x1, _ = self._build_decoder(cyc_h_rep_norm_1, data_x_dim, flags, suffix='1')\n\n return x0, cycle_x0, x1, cycle_x1\n" ]
[ [ "tensorflow.reduce_max", "tensorflow.ones", "tensorflow.variable_scope", "tensorflow.nn.l2_loss", "tensorflow.contrib.distributions.Uniform", "tensorflow.matmul", "tensorflow.concat", "tensorflow.Variable", "tensorflow.slice", "tensorflow.nn.dropout", "tensorflow.shape", "tensorflow.nn.batch_normalization", "tensorflow.random_shuffle", "tensorflow.gradients", "tensorflow.dynamic_stitch", "tensorflow.nn.moments", "tensorflow.placeholder", "tensorflow.zeros", "tensorflow.reduce_mean", "tensorflow.square", "tensorflow.where", "numpy.sqrt", "tensorflow.gather" ] ]
caltech-netlab/gym-acnportal
[ "cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46" ]
[ "gym_acnportal/gym_acnsim/envs/tests/test_action_spaces.py" ]
[ "# coding=utf-8\n\"\"\" Tests for SimAction and action space functions. \"\"\"\nimport unittest\nfrom typing import Callable, Dict, List, Any\nfrom unittest.mock import create_autospec\n\nimport numpy as np\nfrom gym import Space\n\nfrom ..action_spaces import (\n SimAction,\n single_charging_schedule,\n zero_centered_single_charging_schedule,\n)\nfrom ...interfaces import GymTrainedInterface\n\n\nclass TestSimAction(unittest.TestCase):\n # noinspection PyMissingOrEmptyDocstring\n @classmethod\n def setUpClass(cls) -> None:\n # The type here is Any as space_function is actually a Mock\n # object, but there's no Mock type in the typing library.\n cls.space_function: Any = create_autospec(lambda interface: Space())\n cls.to_schedule: Callable[\n [GymTrainedInterface, np.ndarray], Dict[str, List[float]]\n ] = lambda interface, array: {\"a\": [0]}\n cls.name: str = \"stub_action\"\n cls.sim_action: SimAction = SimAction(\n cls.space_function, cls.to_schedule, cls.name\n )\n cls.interface: GymTrainedInterface = create_autospec(GymTrainedInterface)\n\n def test_correct_on_init_sim_action_name(self) -> None:\n self.assertEqual(self.sim_action.name, self.name)\n\n def test_get_space(self) -> None:\n self.sim_action.get_space(self.interface)\n self.space_function.assert_called_once()\n\n def test_get_schedule(self) -> None:\n array: np.ndarray = np.array([[1, 0], [0, 1]])\n self.assertEqual(\n self.sim_action.get_schedule(self.interface, array), {\"a\": [0]}\n )\n\n\nclass TestSingleChargingSchedule(unittest.TestCase):\n # Some class variables are defined outside of setUpClass so that\n # the code inspector knows that inherited classes have these\n # attributes.\n max_rate: float = 16.0\n min_rate: float = 0.0\n negative_rate: float = -4.0\n deadband_rate: float = 6.0\n\n # noinspection PyMissingOrEmptyDocstring\n @classmethod\n def setUpClass(cls) -> None:\n cls.sim_action: SimAction = single_charging_schedule()\n cls.station_ids: List[str] = [\"T1\", \"T2\"]\n cls.offset: float = 0.5\n\n def _interface_builder(interface: Any, min_rate: float) -> Any:\n interface.station_ids = cls.station_ids\n interface.max_pilot_signal = lambda station_id: cls.max_rate\n interface.min_pilot_signal = lambda station_id: (\n min_rate if station_id == cls.station_ids[1] else cls.min_rate\n )\n return interface\n\n cls.interface: Any = _interface_builder(\n create_autospec(GymTrainedInterface), cls.min_rate\n )\n cls.interface_negative_min: Any = _interface_builder(\n create_autospec(GymTrainedInterface), cls.negative_rate\n )\n cls.interface_deadband_min: Any = _interface_builder(\n create_autospec(GymTrainedInterface), cls.deadband_rate\n )\n\n def test_correct_on_init_single_name(self) -> None:\n self.assertEqual(self.sim_action.name, \"single schedule\")\n\n def _test_space_function_helper(\n self, interface: GymTrainedInterface, min_rate: float, max_rate: float\n ) -> None:\n out_space: Space = self.sim_action.get_space(interface)\n self.assertEqual(out_space.shape, (len(self.station_ids),))\n np.testing.assert_equal(out_space.low, 2 * [min_rate])\n np.testing.assert_equal(out_space.high, 2 * [max_rate])\n self.assertEqual(out_space.dtype, \"float\")\n\n def test_single_space_function(self) -> None:\n self._test_space_function_helper(self.interface, self.min_rate, self.max_rate)\n\n def test_single_space_function_negative_min(self) -> None:\n self._test_space_function_helper(\n self.interface_negative_min, self.negative_rate, self.max_rate\n )\n\n def test_single_space_function_deadband_min(self) -> None:\n self._test_space_function_helper(\n self.interface_deadband_min, self.min_rate, self.max_rate\n )\n\n def test_single_to_schedule(self) -> None:\n good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(\n self.interface,\n np.array(\n [self.min_rate + self.offset, (self.max_rate - self.min_rate) / 2]\n ),\n )\n self.assertEqual(\n good_schedule,\n {\n self.station_ids[0]: [self.min_rate + self.offset],\n self.station_ids[1]: [(self.max_rate - self.min_rate) / 2],\n },\n )\n\n def test_single_to_bad_schedule(self) -> None:\n # The get_schedule function does not test if the input schedule\n # array is within the action space.\n bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(\n self.interface,\n np.array([self.min_rate - self.offset, self.max_rate + self.offset]),\n )\n self.assertEqual(\n bad_schedule,\n {\n self.station_ids[0]: [self.min_rate - self.offset],\n self.station_ids[1]: [self.max_rate + self.offset],\n },\n )\n\n def test_single_error_schedule(self) -> None:\n with self.assertRaises(TypeError):\n _ = self.sim_action.get_schedule(\n self.interface,\n np.array(\n [[self.min_rate - self.offset], [self.max_rate + self.offset]]\n ),\n )\n\n\nclass TestZeroCenteredSingleChargingSchedule(TestSingleChargingSchedule):\n # noinspection PyMissingOrEmptyDocstring\n @classmethod\n def setUpClass(cls) -> None:\n super().setUpClass()\n cls.sim_action: SimAction = zero_centered_single_charging_schedule()\n cls.shifted_max = cls.max_rate - (cls.max_rate + cls.min_rate) / 2\n cls.shifted_minimums = [\n cls.min_rate - (cls.max_rate + cls.min_rate) / 2,\n cls.negative_rate - (cls.max_rate + cls.negative_rate) / 2,\n cls.min_rate - (cls.max_rate + cls.deadband_rate) / 2,\n ]\n cls.negative_max_shift = cls.max_rate - (cls.max_rate + cls.negative_rate) / 2\n\n def test_correct_on_init_single_name(self) -> None:\n self.assertEqual(self.sim_action.name, \"zero-centered single schedule\")\n\n def test_single_space_function(self) -> None:\n self._test_space_function_helper(\n self.interface, self.shifted_minimums[0], self.shifted_max\n )\n\n def test_single_space_function_negative_min(self) -> None:\n self._test_space_function_helper(\n self.interface_negative_min,\n self.shifted_minimums[1],\n self.negative_max_shift,\n )\n\n def test_single_space_function_deadband_min(self) -> None:\n self._test_space_function_helper(\n self.interface_deadband_min, self.shifted_minimums[2], self.shifted_max\n )\n\n def test_single_to_bad_schedule(self) -> None:\n # The get_schedule function does not test if the input schedule\n # array is within the action space.\n bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(\n self.interface,\n np.array([self.min_rate - self.offset, self.max_rate + self.offset]),\n )\n self.assertEqual(\n bad_schedule,\n {\n self.station_ids[0]: [\n self.min_rate - self.offset + (self.max_rate + self.min_rate) / 2\n ],\n self.station_ids[1]: [\n self.max_rate + self.offset + (self.max_rate + self.min_rate) / 2\n ],\n },\n )\n\n def test_single_to_schedule(self) -> None:\n good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(\n self.interface,\n np.array(\n [\n self.min_rate - (self.max_rate + self.min_rate) / 2,\n self.max_rate - (self.max_rate + self.min_rate) / 2,\n ]\n ),\n )\n self.assertEqual(\n good_schedule,\n {\n self.station_ids[0]: [self.min_rate],\n self.station_ids[1]: [self.max_rate],\n },\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.testing.assert_equal" ] ]
ckxy/part-of-hitogata
[ "76402d48a336fcd964d0e64bb01d959e8f07f296" ]
[ "datasets/readers/ccpd.py" ]
[ "import os\nimport numpy as np\nfrom addict import Dict\nfrom PIL import Image\nfrom .reader import Reader\nfrom .builder import READER\n\n\n__all__ = ['CCPD2019FolderReader']\n\n\[email protected]_module()\nclass CCPD2019FolderReader(Reader):\n def __init__(self, root, **kwargs):\n super(CCPD2019FolderReader, self).__init__(**kwargs)\n\n self.root = root\n self.chars = ('京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',\n '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',\n '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',\n '新', \n '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',\n 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z', 'I', 'O', '-')\n self.img_paths = sorted(os.listdir(kwargs['root']))\n assert len(self.img_paths) > 0\n\n def get_dataset_info(self):\n return range(len(self.img_paths)), Dict({'chars': self.chars})\n\n def get_data_info(self, index):\n img = Image.open(self.img_paths[index][0])\n w, h = img.size\n return dict(h=h, w=w)\n\n def __call__(self, index):\n # index = data_dict\n # img = Image.open(os.path.join(self.root, self.img_paths[index])).convert('RGB')\n img = self.read_image(os.path.join(self.root, self.img_paths[index]))\n w, h = img.size\n path = os.path.join(self.root, self.img_paths[index])\n\n base_name = os.path.basename(self.img_paths[index])\n img_name, suffix = os.path.splitext(base_name)\n img_name = img_name.split(\"-\")[0].split(\"_\")[0]\n\n # if len(img_name) == 8:\n # print(path, 'a')\n # if img_name[2] != 'D' and img_name[2] != 'F' and img_name[-1] != 'D' and img_name[-1] != 'F':\n # print(path)\n # raise ValueError\n\n words = []\n for c in img_name:\n words.append(self.chars.index(c))\n\n # return {'image': img, 'ori_size': np.array([h, w]).astype(np.float32), 'path': path, 'seq': words, 'seq_length': len(words)}\n return dict(\n image=img,\n ori_size=np.array([h, w]).astype(np.float32),\n path=path,\n seq=words,\n seq_length=len(words)\n )\n\n def __repr__(self):\n return 'CCPD2019FolderReader(root={}, {})'.format(self.root, super(CCPD2019FolderReader, self).__repr__())\n" ]
[ [ "numpy.array" ] ]
YeeCY/PASF
[ "95e548d365ea5da482c56408539d9a1514ef246b" ]
[ "rlkit/samplers/data_collector/path_collector.py" ]
[ "from collections import deque, OrderedDict\nfrom functools import partial\n\nimport numpy as np\n\nfrom rlkit.core.eval_util import create_stats_ordered_dict\nfrom rlkit.samplers.data_collector.base import PathCollector\nfrom rlkit.samplers.rollout_functions import rollout\n\n\nclass ActionAgent():\n def __init__(self):\n\n self._actions = None\n self._step = 0\n\n def reset(self):\n self._step = 0\n\n def set_action(self, actions):\n self._actions = actions\n\n def get_action(self, *args, **kwargs):\n action = self._actions[self._step]\n self._step += 1\n return action, []\n\n\nclass MdpPathCollector(PathCollector):\n def __init__(\n self,\n env,\n policy,\n max_num_epoch_paths_saved=None,\n render=False,\n render_kwargs=None,\n rollout_fn=rollout,\n save_env_in_snapshot=True,\n ):\n if render_kwargs is None:\n render_kwargs = {}\n self._env = env\n self._policy = policy\n self._max_num_epoch_paths_saved = max_num_epoch_paths_saved\n self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)\n self._render = render\n self._render_kwargs = render_kwargs\n self._rollout_fn = rollout_fn\n self._action_agent = ActionAgent()\n\n self._num_steps_total = 0\n self._num_paths_total = 0\n\n self._save_env_in_snapshot = save_env_in_snapshot\n\n def collect_new_paths(\n self,\n max_path_length,\n num_steps,\n discard_incomplete_paths,\n ):\n paths = []\n num_steps_collected = 0\n while num_steps_collected < num_steps:\n max_path_length_this_loop = min( # Do not go over num_steps\n max_path_length,\n num_steps - num_steps_collected,\n )\n path = self._rollout_fn(\n self._env,\n self._policy,\n max_path_length=max_path_length_this_loop,\n render=self._render,\n render_kwargs=self._render_kwargs,\n )\n path_len = len(path['actions'])\n if (\n path_len != max_path_length\n and not path['terminals'][-1]\n and discard_incomplete_paths\n ):\n break\n num_steps_collected += path_len\n paths.append(path)\n self._num_paths_total += len(paths)\n self._num_steps_total += num_steps_collected\n self._epoch_paths.extend(paths)\n return paths\n\n def collect_aligned_paths(self, path_actions, discard_incomplete_paths=True):\n paths = []\n num_steps_collected = 0\n for p in path_actions:\n max_path_length = len(p)\n self._action_agent.set_action(p)\n path = self._rollout_fn(\n self._env,\n self._action_agent,\n max_path_length=max_path_length,\n render=self._render,\n render_kwargs=self._render_kwargs,\n )\n path_len = len(path['actions'])\n if (\n path_len != max_path_length\n and not path['terminals'][-1]\n and discard_incomplete_paths\n ):\n break\n num_steps_collected += path_len\n paths.append(path)\n self._num_paths_total += len(paths)\n self._num_steps_total += num_steps_collected\n return paths\n\n def get_epoch_paths(self):\n return self._epoch_paths\n\n def end_epoch(self, epoch):\n self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)\n\n def get_diagnostics(self):\n path_lens = [len(path['actions']) for path in self._epoch_paths]\n stats = OrderedDict([\n ('num steps total', self._num_steps_total),\n ('num paths total', self._num_paths_total),\n ])\n stats.update(create_stats_ordered_dict(\n \"path length\",\n path_lens,\n always_show_all_stats=True,\n ))\n return stats\n\n def get_snapshot(self):\n snapshot_dict = dict(\n policy=self._policy,\n num_steps_total=self._num_steps_total,\n num_paths_total=self._num_paths_total,\n )\n if self._save_env_in_snapshot:\n snapshot_dict['env'] = self._env\n return snapshot_dict\n\n def load_from_snapshot(self, snapshot):\n self._policy = snapshot['policy']\n self._num_steps_total = snapshot['num_steps_total']\n self._num_paths_total = snapshot['num_paths_total']\n if self._save_env_in_snapshot:\n assert 'env' in snapshot\n if hasattr(self._env, '_custom_goal_sampler'):\n snapshot['env']._custom_goal_sampler = self._env._custom_goal_sampler\n self._env = snapshot['env']\n\n\nclass GoalConditionedPathCollector(MdpPathCollector):\n def __init__(\n self,\n *args,\n observation_key='observation',\n desired_goal_key='desired_goal',\n goal_sampling_mode=None,\n **kwargs\n ):\n def obs_processor(o):\n return np.hstack((o[observation_key], o[desired_goal_key]))\n\n rollout_fn = partial(\n rollout,\n preprocess_obs_for_policy_fn=obs_processor,\n )\n super().__init__(*args, rollout_fn=rollout_fn, **kwargs)\n self._observation_key = observation_key\n self._desired_goal_key = desired_goal_key\n self._goal_sampling_mode = goal_sampling_mode\n\n def collect_new_paths(self, *args, **kwargs):\n self._env.goal_sampling_mode = self._goal_sampling_mode\n return super().collect_new_paths(*args, **kwargs)\n\n def get_snapshot(self):\n snapshot = super().get_snapshot()\n snapshot.update(\n observation_key=self._observation_key,\n desired_goal_key=self._desired_goal_key,\n )\n return snapshot\n\n def load_from_snapshot(self, snapshot):\n super().load_from_snapshot(snapshot)\n self._observation_key = snapshot['observation_key']\n self._desired_goal_key = snapshot['desired_goal_key']\n\n\nclass ObsDictPathCollector(MdpPathCollector):\n def __init__(\n self,\n *args,\n observation_key='observation',\n **kwargs\n ):\n def obs_processor(obs):\n return obs[observation_key]\n\n rollout_fn = partial(\n rollout,\n preprocess_obs_for_policy_fn=obs_processor,\n )\n super().__init__(*args, rollout_fn=rollout_fn, **kwargs)\n self._observation_key = observation_key\n\n def get_snapshot(self):\n snapshot = super().get_snapshot()\n snapshot.update(\n observation_key=self._observation_key,\n )\n return snapshot\n\n\nclass VAEWrappedEnvPathCollector(GoalConditionedPathCollector):\n def __init__(\n self,\n env,\n policy,\n decode_goals=False,\n **kwargs\n ):\n \"\"\"Expects env is VAEWrappedEnv\"\"\"\n super().__init__(env, policy, **kwargs)\n self._decode_goals = decode_goals\n\n def collect_new_paths(self, *args, **kwargs):\n self._env.decode_goals = self._decode_goals\n return super().collect_new_paths(*args, **kwargs)\n" ]
[ [ "numpy.hstack" ] ]
charliec443/plaid-rl
[ "2e8fbf389af9efecd41361df80e40e0bf932056d" ]
[ "plaidrl/exploration_strategies/ou_strategy.py" ]
[ "import numpy as np\nimport numpy.random as nr\n\nfrom plaidrl.exploration_strategies.base import RawExplorationStrategy\n\n\nclass OUStrategy(RawExplorationStrategy):\n \"\"\"\n This strategy implements the Ornstein-Uhlenbeck process, which adds\n time-correlated noise to the actions taken by the deterministic policy.\n The OU process satisfies the following stochastic differential equation:\n dxt = theta*(mu - xt)*dt + sigma*dWt\n where Wt denotes the Wiener process\n\n Based on the rllab implementation.\n \"\"\"\n\n def __init__(\n self,\n action_space,\n mu=0,\n theta=0.15,\n max_sigma=0.3,\n min_sigma=None,\n decay_period=100000,\n ):\n if min_sigma is None:\n min_sigma = max_sigma\n self.mu = mu\n self.theta = theta\n self.sigma = max_sigma\n self._max_sigma = max_sigma\n if min_sigma is None:\n min_sigma = max_sigma\n self._min_sigma = min_sigma\n self._decay_period = decay_period\n self.dim = np.prod(action_space.low.shape)\n self.low = action_space.low\n self.high = action_space.high\n self.state = np.ones(self.dim) * self.mu\n self.reset()\n\n def reset(self):\n self.state = np.ones(self.dim) * self.mu\n\n def evolve_state(self):\n x = self.state\n dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))\n self.state = x + dx\n return self.state\n\n def get_action_from_raw_action(self, action, t=0, **kwargs):\n ou_state = self.evolve_state()\n self.sigma = self._max_sigma - (self._max_sigma - self._min_sigma) * min(\n 1.0, t * 1.0 / self._decay_period\n )\n return np.clip(action + ou_state, self.low, self.high)\n" ]
[ [ "numpy.ones", "numpy.clip", "numpy.prod" ] ]
sdwivedi/LightGBM
[ "f5ec54fbaca8bd5f72cdecbf755216c6278aafe3" ]
[ "examples/python-guide/advanced_example.py" ]
[ "# coding: utf-8\nimport json\nimport lightgbm as lgb\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\n\ntry:\n import cPickle as pickle\nexcept BaseException:\n import pickle\n\nprint('Loading data...')\n# load or create your dataset\ndf_train = pd.read_csv('../binary_classification/binary.train', header=None, sep='\\t')\ndf_test = pd.read_csv('../binary_classification/binary.test', header=None, sep='\\t')\nW_train = pd.read_csv('../binary_classification/binary.train.weight', header=None)[0]\nW_test = pd.read_csv('../binary_classification/binary.test.weight', header=None)[0]\n\ny_train = df_train[0]\ny_test = df_test[0]\nX_train = df_train.drop(0, axis=1)\nX_test = df_test.drop(0, axis=1)\n\nnum_train, num_feature = X_train.shape\n\n# create dataset for lightgbm\n# if you want to re-use data, remember to set free_raw_data=False\nlgb_train = lgb.Dataset(X_train, y_train,\n weight=W_train, free_raw_data=False)\nlgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train,\n weight=W_test, free_raw_data=False)\n\n# specify your configurations as a dict\nparams = {\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': 'binary_logloss',\n 'num_leaves': 31,\n 'learning_rate': 0.05,\n 'feature_fraction': 0.9,\n 'bagging_fraction': 0.8,\n 'bagging_freq': 5,\n 'verbose': 0\n}\n\n# generate feature names\nfeature_name = ['feature_' + str(col) for col in range(num_feature)]\n\nprint('Starting training...')\n# feature_name and categorical_feature\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n valid_sets=lgb_train, # eval training data\n feature_name=feature_name,\n categorical_feature=[21])\n\nprint('Finished first 10 rounds...')\n# check feature name\nprint('7th feature name is:', lgb_train.feature_name[6])\n\nprint('Saving model...')\n# save model to file\ngbm.save_model('model.txt')\n\nprint('Dumping model to JSON...')\n# dump model to JSON (and save to file)\nmodel_json = gbm.dump_model()\n\nwith open('model.json', 'w+') as f:\n json.dump(model_json, f, indent=4)\n\n# feature names\nprint('Feature names:', gbm.feature_name())\n\n# feature importances\nprint('Feature importances:', list(gbm.feature_importance()))\n\nprint('Loading model to predict...')\n# load model to predict\nbst = lgb.Booster(model_file='model.txt')\n# can only predict with the best iteration (or the saving iteration)\ny_pred = bst.predict(X_test)\n# eval with loaded model\nprint(\"The rmse of loaded model's prediction is:\", mean_squared_error(y_test, y_pred) ** 0.5)\n\nprint('Dumping and loading model with pickle...')\n# dump model with pickle\nwith open('model.pkl', 'wb') as fout:\n pickle.dump(gbm, fout)\n# load model with pickle to predict\nwith open('model.pkl', 'rb') as fin:\n pkl_bst = pickle.load(fin)\n# can predict with any iteration when loaded in pickle way\ny_pred = pkl_bst.predict(X_test, num_iteration=7)\n# eval with loaded model\nprint(\"The rmse of pickled model's prediction is:\", mean_squared_error(y_test, y_pred) ** 0.5)\n\n# continue training\n# init_model accepts:\n# 1. model file name\n# 2. Booster()\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n init_model='model.txt',\n valid_sets=lgb_eval)\n\nprint('Finished 10 - 20 rounds with model file...')\n\n# decay learning rates\n# learning_rates accepts:\n# 1. list/tuple with length = num_boost_round\n# 2. function(curr_iter)\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n init_model=gbm,\n learning_rates=lambda iter: 0.05 * (0.99 ** iter),\n valid_sets=lgb_eval)\n\nprint('Finished 20 - 30 rounds with decay learning rates...')\n\n# change other parameters during training\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n init_model=gbm,\n valid_sets=lgb_eval,\n callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)])\n\nprint('Finished 30 - 40 rounds with changing bagging_fraction...')\n\n\n# self-defined objective function\n# f(preds: array, train_data: Dataset) -> grad: array, hess: array\n# log likelihood loss\ndef loglikelihood(preds, train_data):\n labels = train_data.get_label()\n preds = 1. / (1. + np.exp(-preds))\n grad = preds - labels\n hess = preds * (1. - preds)\n return grad, hess\n\n\n# self-defined eval metric\n# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool\n# binary error\n# NOTE: when you do customized loss function, the default prediction value is margin\n# This may make built-in evalution metric calculate wrong results\n# For example, we are doing log likelihood loss, the prediction is score before logistic transformation\n# Keep this in mind when you use the customization\ndef binary_error(preds, train_data):\n labels = train_data.get_label()\n preds = 1. / (1. + np.exp(-preds))\n return 'error', np.mean(labels != (preds > 0.5)), False\n\n\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n init_model=gbm,\n fobj=loglikelihood,\n feval=binary_error,\n valid_sets=lgb_eval)\n\nprint('Finished 40 - 50 rounds with self-defined objective function and eval metric...')\n\n\n# another self-defined eval metric\n# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool\n# accuracy\n# NOTE: when you do customized loss function, the default prediction value is margin\n# This may make built-in evalution metric calculate wrong results\n# For example, we are doing log likelihood loss, the prediction is score before logistic transformation\n# Keep this in mind when you use the customization\ndef accuracy(preds, train_data):\n labels = train_data.get_label()\n preds = 1. / (1. + np.exp(-preds))\n return 'accuracy', np.mean(labels == (preds > 0.5)), True\n\n\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n init_model=gbm,\n fobj=loglikelihood,\n feval=lambda preds, train_data: [binary_error(preds, train_data),\n accuracy(preds, train_data)],\n valid_sets=lgb_eval)\n\nprint('Finished 50 - 60 rounds with self-defined objective function '\n 'and multiple self-defined eval metrics...')\n\nprint('Starting a new training job...')\n\n\n# callback\ndef reset_metrics():\n def callback(env):\n lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train)\n if env.iteration - env.begin_iteration == 5:\n print('Add a new valid dataset at iteration 5...')\n env.model.add_valid(lgb_eval_new, 'new_valid')\n callback.before_iteration = True\n callback.order = 0\n return callback\n\n\ngbm = lgb.train(params,\n lgb_train,\n num_boost_round=10,\n valid_sets=lgb_train,\n callbacks=[reset_metrics()])\n\nprint('Finished first 10 rounds with callback function...')\n" ]
[ [ "pandas.read_csv", "numpy.exp", "sklearn.metrics.mean_squared_error", "numpy.mean" ] ]
afonchikk/Audio-Classification
[ "6acc7015ec847a64338f6300dca608a0752ba554" ]
[ "predict.py" ]
[ "from tensorflow.keras.models import load_model\nfrom clean import downsample_mono, envelope\nfrom kapre.time_frequency import STFT, Magnitude, ApplyFilterbank, MagnitudeToDecibel\nfrom sklearn.preprocessing import LabelEncoder\nimport numpy as np\nfrom glob import glob\nimport argparse\nimport os\nimport pandas as pd\nfrom tqdm import tqdm\n\n\ndef make_prediction(args):\n # load the model\n model = load_model(args.model_fn,\n custom_objects={'STFT': STFT,\n 'Magnitude': Magnitude,\n 'ApplyFilterbank': ApplyFilterbank,\n 'MagnitudeToDecibel': MagnitudeToDecibel})\n\n # find the sound data\n wav_paths = glob('{}/**'.format(args.src_dir), recursive=True)\n wav_paths = sorted([x.replace(os.sep, '/') for x in wav_paths if '.wav' in x])\n classes = sorted(os.listdir(args.src_dir))\n labels = [os.path.split(x)[0].split('/')[-1] for x in wav_paths]\n le = LabelEncoder()\n y_true = le.fit_transform(labels)\n results = []\n\n for z, wav_fn in tqdm(enumerate(wav_paths), total=len(wav_paths)):\n rate, wav = downsample_mono(wav_fn, args.sr)\n mask, env = envelope(wav, rate, threshold=args.threshold)\n clean_wav = wav[mask]\n step = int(args.sr * args.dt)\n batch = []\n\n for i in range(0, clean_wav.shape[0], step):\n sample = clean_wav[i:i + step]\n sample = sample.reshape(-1, 1)\n if sample.shape[0] < step:\n tmp = np.zeros(shape=(step, 1), dtype=np.float32)\n tmp[:sample.shape[0], :] = sample.flatten().reshape(-1, 1)\n sample = tmp\n batch.append(sample)\n X_batch = np.array(batch, dtype=np.float32)\n y_pred = model.predict(X_batch)\n y_mean = np.mean(y_pred, axis=0)\n y_pred = np.argmax(y_mean)\n real_class = os.path.dirname(wav_fn).split('/')[-1]\n print('Actual class: {}, Predicted class: {}'.format(real_class, classes[y_pred]))\n results.append(y_mean)\n\n np.save(os.path.join('logs', args.pred_fn), np.array(results))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Audio Classification Training')\n parser.add_argument('--model_fn', type=str, default='models/lstm.h5',\n help='model file to make predictions')\n parser.add_argument('--pred_fn', type=str, default='y_pred',\n help='fn to write predictions in logs dir')\n parser.add_argument('--src_dir', type=str, default='wavfiles',\n help='directory containing wavfiles to predict')\n parser.add_argument('--dt', type=float, default=1.0,\n help='time in seconds to sample audio')\n parser.add_argument('--sr', type=int, default=16000,\n help='sample rate of clean audio')\n parser.add_argument('--threshold', type=str, default=20,\n help='threshold magnitude for np.int16 dtype')\n args, _ = parser.parse_known_args()\n\n make_prediction(args)\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.zeros", "numpy.argmax", "sklearn.preprocessing.LabelEncoder", "numpy.array", "numpy.mean" ] ]
electr0de/APControllerProjectGit
[ "141ac08e716d6ac8cebe7b144b744744024d8939" ]
[ "simglucose/controller/PaperController.py" ]
[ "from functools import partial\nfrom pprint import pprint\nimport matplotlib.pyplot as plt\n\n# import test2\nfrom simglucose.controller.base import Controller\n#from datetime import datetime, timedelta, time\nimport numpy as np\nimport math\n\npercent_value = 0.05\n\nsign = lambda x: math.copysign(1, x)\n\nnormalize_f = lambda x: (x - 39) / (600 - 39)\n\n\nclass PaperRLController(Controller):\n\n def __init__(self, a_hyper=1, a_hypo=10, current_breakfast_bolus=0.0, current_lunch_bolus=0.0,\n current_dinner_bolus=0.0, current_basal_rate=0.0, current_snack_bolus=0.0, init_state=None):\n super().__init__(init_state)\n np.random.seed(1)\n\n self.a_hyper = a_hyper\n self.hypo = a_hypo\n self.GL = normalize_f(90)\n self.GH = normalize_f(150)\n self.current_basal_rate = current_basal_rate\n self.current_breakfast_bolus = current_breakfast_bolus # bolus means IC ratio\n self.current_lunch_bolus = current_lunch_bolus\n self.current_dinner_bolus = current_dinner_bolus\n # self.current_snack_bolus = current_snack_bolus\n self.basal_theta = []\n self.bolus_theta = []\n # np.random.seed(2)\n # self.bolus_theta = np.random.rand(2).tolist()\n self.h = 0.5\n self.c_sigma = 0.05\n self.m = 0.5\n self.previous_basal_rate = 0.0\n np.random.seed(55)\n self.w = (np.random.rand(2) * 2 - 1).tolist()\n self._lambda = 0.5\n self.gamma = 0.9\n self.z = [0.0, 0.0]\n self.a = 0.5\n self.beta = 0.5\n self.beta_basal = 0.5\n self.value_factor = 10\n # self.time_array = []\n # self.theta_array_1 = []\n # self.theta_array_2 = []\n # self.bolus_time_array = []\n # self.F_1_array = []\n # self.F_2_array = []\n # plt.figure(200)\n # self.fig, self.axis = plt.subplots(4)\n # plt.show()\n # self.axis[0].set_title(\" Hyper feature for basal\")\n # self.axis[1].set_title(\" Hypo feature for basal\")\n # self.axis[2].set_title(\"Hyper theta for basal\")\n # self.axis[3].set_title(\" Hypo theta for basal\")\n\n self.previous_state_basal = None\n self.previous_state_breakfast = None\n self.previous_state_lunch = None\n self.previous_state_dinner = None\n\n def extract_features(self, array):\n M_hyper = []\n M_hypo = []\n\n for element in array:\n if element > 150:\n M_hyper.append(normalize_f(element))\n elif element < 90:\n M_hypo.append(normalize_f(element))\n\n F_hyper = sum([element - self.GH for element in M_hyper]) * 1 / len(M_hyper) if M_hyper else 0\n\n F_hypo = sum([self.GL - element for element in M_hypo]) * 1 / len(M_hypo) if M_hypo else 0\n\n return (F_hyper, F_hypo)\n\n def calculate_basal(self, previous_state, basal_array, time):\n F_hyper, F_hypo = self.extract_features(basal_array)\n F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)\n\n #\n # self.F_1_array.append(F_hyper)\n # self.F_2_array.append(F_hypo)\n # self.time_array.append(time)\n #\n # self.axis[0].plot(self.time_array, self.F_1_array)\n #\n # self.axis[1].plot(self.time_array, self.F_2_array)\n #\n # plt.pause(0.001)\n\n Ps = None\n if F_hypo == 0.0:\n Ps = 0\n elif F_hypo > 0.0 and F_hyper == 0.0:\n Ps = -0.1 * F_hypo\n elif F_hypo > 0.0 and F_hyper > 0.0:\n Ps = -0.05 * F_hypo\n\n assert Ps is not None, \"No conditions matched\"\n\n P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), True)\n\n self.previous_basal_rate = self.current_basal_rate\n\n br_change = self.m * P * self.current_basal_rate\n\n # uncomment to enable 5 % change\n # percent_value = 0\n if abs(br_change / self.current_basal_rate) > percent_value:\n self.current_basal_rate += self.current_basal_rate * percent_value * sign(br_change)\n print(\" used % changed\")\n else:\n self.current_basal_rate += br_change\n print(\" didn't use % changed\")\n return self.current_basal_rate\n\n def calculate_bolus(self, previous_state, next_state, food_counter, time):\n F_hyper, F_hypo = self.extract_features(next_state)\n\n F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)\n\n #\n # self.F_1_array.append(F_hyper)\n # self.F_2_array.append(F_hypo)\n # self.bolus_time_array.append(time)\n #\n # self.axis[0].plot(self.bolus_time_array, self.F_1_array)\n # self.axis[1].plot(self.bolus_time_array, self.F_2_array)\n\n Ps = None\n if F_hypo == 0.0:\n Ps = 0\n elif F_hypo > 0.0 and F_hyper == 0.0:\n Ps = +0.1 * F_hypo\n elif F_hypo > 0.0 and F_hyper > 0.0:\n Ps = +0.05 * F_hypo\n\n assert Ps is not None, \"No conditions matched\"\n\n P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), False, food_counter)\n\n if food_counter == 0:\n self.current_breakfast_bolus = self.update_bolus(self.current_breakfast_bolus, P)\n return self.current_breakfast_bolus\n\n if food_counter == 1:\n self.current_lunch_bolus = self.update_bolus(self.current_lunch_bolus, P)\n return self.current_lunch_bolus\n\n if food_counter == 2:\n self.current_dinner_bolus = self.update_bolus(self.current_dinner_bolus, P)\n return self.current_dinner_bolus\n # if food_counter == 3:\n # self.current_snack_bolus = self.update_bolus(self.current_snack_bolus, P)\n # return self.current_snack_bolus\n return 0.0\n\n def perform_update(self, Ps, F_old, F, coming_from, food_counter=None):\n\n if coming_from:\n theta = self.basal_theta\n previous_state = self.previous_state_basal\n else:\n theta = self.bolus_theta\n if food_counter == 0:\n previous_state = self.previous_state_breakfast\n elif food_counter == 1:\n previous_state = self.previous_state_lunch\n elif food_counter == 2:\n previous_state = self.previous_state_dinner\n else:\n return 0\n\n # theta = self.theta\n\n print(f\"theta: {theta}\")\n\n Pa = sum([element1 * element2 for element1, element2 in zip(F, theta)])\n\n Pd = self.h * Pa + (1 - self.h) * Ps\n\n sigma = self.c_sigma * (F[0] ** 2 + F[1] ** 2)\n\n Pe = Pd + np.random.normal(0, sigma)\n\n cost = 1 * F[0] + self.value_factor * F[1]\n\n if not previous_state:\n previous_state = sum(\n [((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F_old, self.w)])\n\n next_value = sum(\n [((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F, self.w)])\n d = cost + self.gamma * next_value - previous_state\n\n self.w = [element1 + self.a * d * element2 for element1, element2 in zip(self.w, self.z)]\n\n self.z = [self._lambda * element1 + element2 for element1, element2 in zip(self.z, F)]\n\n if coming_from:\n self.basal_theta = [element1 - self.beta_basal * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for\n element1, element2 in zip(self.basal_theta, F)]\n self.previous_state_basal = next_value\n else:\n self.bolus_theta = [element1 - self.beta * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for\n element1, element2 in zip(self.bolus_theta, F)]\n\n if food_counter == 0:\n self.previous_state_breakfast = next_value\n elif food_counter == 1:\n self.previous_state_lunch = next_value\n else:\n self.previous_state_dinner = next_value\n\n assert sigma > 0.0000001, \"sigma is too low\"\n # self.theta_array_1.append(self.theta[0])\n # self.theta_array_2.append(self.theta[1])\n # self.axis[2].plot(self.time_array, self.theta_array_1)\n # self.axis[3].plot(self.time_array, self.theta_array_2)\n\n return Pe\n\n def update_bolus(self, old_bolus, P):\n fusion_rate = old_bolus + self.m * P * old_bolus\n\n l = 1 if (self.current_basal_rate > self.previous_basal_rate and fusion_rate < old_bolus) or (\n self.current_basal_rate < self.previous_basal_rate and fusion_rate > old_bolus) else 0\n\n # fusion_rate = l * old_bolus + (1 - l) * fusion_rate\n bl_change = fusion_rate - old_bolus\n\n if abs(bl_change / old_bolus) > percent_value:\n old_bolus += sign(bl_change) * old_bolus * percent_value\n print(\" used % changed\")\n else:\n old_bolus += bl_change\n print(\" didn't use % changed\")\n return old_bolus\n\n# if __name__ == '__main__':\n#\n# GL = normalize_f(90)\n# GH = normalize_f(150)\n#\n# def extract_features(array):\n# M_hyper = []\n# M_hypo = []\n#\n# for element in array:\n# if element > 150:\n# M_hyper.append(normalize_f(element))\n# elif element < 90:\n# M_hypo.append(normalize_f(element))\n#\n# F_hyper = sum([element - GH for element in M_hyper]) * 1 / len(M_hyper) if M_hyper else 0\n#\n# F_hypo = sum([GL - element for element in M_hypo]) * 1 / len(M_hypo) if M_hypo else 0\n#\n# return (F_hyper, F_hypo)\n#\n# array = test2.array\n# print(extract_features(array))\n" ]
[ [ "numpy.random.normal", "numpy.random.seed", "numpy.random.rand" ] ]
0Miquel/LIIF-temporal
[ "b992cb87cb9bdeba6d4c9bc3960b36ba52a1ba75" ]
[ "models/rdn.py" ]
[ "# Residual Dense Network for Image Super-Resolution\r\n# https://arxiv.org/abs/1802.08797\r\n# modified from: https://github.com/thstkdgus35/EDSR-PyTorch\r\n\r\nfrom argparse import Namespace\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nfrom models import register\r\n\r\n\r\nclass RDB_Conv(nn.Module):\r\n def __init__(self, inChannels, growRate, kSize=3):\r\n super(RDB_Conv, self).__init__()\r\n Cin = inChannels\r\n G = growRate\r\n self.conv = nn.Sequential(*[\r\n #nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1),\r\n nn.Conv3d(Cin, G, kSize, padding=(kSize - 1) // 2, stride=1),\r\n nn.ReLU()\r\n ])\r\n\r\n def forward(self, x):\r\n out = self.conv(x)\r\n return torch.cat((x, out), 1)\r\n\r\nclass RDB(nn.Module):\r\n def __init__(self, growRate0, growRate, nConvLayers, kSize=3):\r\n super(RDB, self).__init__()\r\n G0 = growRate0\r\n G = growRate\r\n C = nConvLayers\r\n\r\n convs = []\r\n for c in range(C):\r\n convs.append(RDB_Conv(G0 + c*G, G))\r\n self.convs = nn.Sequential(*convs)\r\n\r\n # Local Feature Fusion\r\n self.LFF = nn.Conv3d(G0 + C * G, G0, 1, padding=0, stride=1)\r\n #self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1)\r\n\r\n def forward(self, x):\r\n return self.LFF(self.convs(x)) + x\r\n\r\nclass RDN(nn.Module):\r\n def __init__(self, args):\r\n super(RDN, self).__init__()\r\n self.args = args\r\n r = args.scale[0]\r\n G0 = args.G0\r\n kSize = args.RDNkSize\r\n\r\n # number of RDB blocks, conv layers, out channels\r\n self.D, C, G = {\r\n 'A': (20, 6, 32),\r\n 'B': (16, 8, 64),\r\n }[args.RDNconfig]\r\n\r\n # Shallow feature extraction net\r\n #self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)\r\n #self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)\r\n self.SFENet1 = nn.Conv3d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)\r\n self.SFENet2 = nn.Conv3d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)\r\n\r\n # Redidual dense blocks and dense feature fusion\r\n self.RDBs = nn.ModuleList()\r\n for i in range(self.D):\r\n self.RDBs.append(\r\n RDB(growRate0 = G0, growRate = G, nConvLayers = C)\r\n )\r\n\r\n # Global Feature Fusion\r\n self.GFF = nn.Sequential(*[\r\n #nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1),\r\n #nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)\r\n nn.Conv3d(self.D * G0, G0, 1, padding=0, stride=1),\r\n nn.Conv3d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)\r\n ])\r\n\r\n if args.no_upsampling:\r\n self.out_dim = G0\r\n else:\r\n self.out_dim = args.n_colors\r\n # Up-sampling net\r\n if r == 2 or r == 3:\r\n self.UPNet = nn.Sequential(*[\r\n nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1),\r\n nn.PixelShuffle(r),\r\n nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)\r\n ])\r\n elif r == 4:\r\n self.UPNet = nn.Sequential(*[\r\n nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1),\r\n nn.PixelShuffle(2),\r\n nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),\r\n nn.PixelShuffle(2),\r\n nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)\r\n ])\r\n else:\r\n raise ValueError(\"scale must be 2 or 3 or 4.\")\r\n\r\n def forward(self, x):\r\n f__1 = self.SFENet1(x)\r\n x = self.SFENet2(f__1)\r\n\r\n RDBs_out = []\r\n for i in range(self.D):\r\n x = self.RDBs[i](x)\r\n RDBs_out.append(x)\r\n\r\n x = self.GFF(torch.cat(RDBs_out,1))\r\n x += f__1\r\n\r\n if self.args.no_upsampling:\r\n return x\r\n else:\r\n return self.UPNet(x)\r\n\r\n\r\n@register('rdn')\r\ndef make_rdn(G0=64, RDNkSize=3, RDNconfig='B',\r\n scale=2, no_upsampling=False):\r\n args = Namespace()\r\n args.G0 = G0\r\n args.RDNkSize = RDNkSize\r\n args.RDNconfig = RDNconfig\r\n\r\n args.scale = [scale]\r\n args.no_upsampling = no_upsampling\r\n\r\n args.n_colors = 3\r\n return RDN(args)\r\n" ]
[ [ "torch.nn.PixelShuffle", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.Conv3d", "torch.nn.ReLU", "torch.cat" ] ]
danagi/tianshou
[ "c97aa4065ee8464bd5897bb86f1f81abd8e2cff9" ]
[ "tianshou/policy/modelfree/discrete_sac.py" ]
[ "import torch\nimport numpy as np\nfrom torch.distributions import Categorical\nfrom typing import Any, Dict, Tuple, Union, Optional\n\nfrom tianshou.policy import SACPolicy\nfrom tianshou.data import Batch, ReplayBuffer, to_torch\n\n\nclass DiscreteSACPolicy(SACPolicy):\n \"\"\"Implementation of SAC for Discrete Action Settings. arXiv:1910.07207.\n\n :param torch.nn.Module actor: the actor network following the rules in\n :class:`~tianshou.policy.BasePolicy`. (s -> logits)\n :param torch.optim.Optimizer actor_optim: the optimizer for actor network.\n :param torch.nn.Module critic1: the first critic network. (s -> Q(s))\n :param torch.optim.Optimizer critic1_optim: the optimizer for the first\n critic network.\n :param torch.nn.Module critic2: the second critic network. (s -> Q(s))\n :param torch.optim.Optimizer critic2_optim: the optimizer for the second\n critic network.\n :param float tau: param for soft update of the target network, defaults to\n 0.005.\n :param float gamma: discount factor, in [0, 1], defaults to 0.99.\n :param (float, torch.Tensor, torch.optim.Optimizer) or float alpha: entropy\n regularization coefficient, default to 0.2.\n If a tuple (target_entropy, log_alpha, alpha_optim) is provided, then\n alpha is automatatically tuned.\n :param bool reward_normalization: normalize the reward to Normal(0, 1),\n defaults to ``False``.\n :param bool ignore_done: ignore the done flag while training the policy,\n defaults to ``False``.\n\n .. seealso::\n\n Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed\n explanation.\n \"\"\"\n\n def __init__(\n self,\n actor: torch.nn.Module,\n actor_optim: torch.optim.Optimizer,\n critic1: torch.nn.Module,\n critic1_optim: torch.optim.Optimizer,\n critic2: torch.nn.Module,\n critic2_optim: torch.optim.Optimizer,\n tau: float = 0.005,\n gamma: float = 0.99,\n alpha: Union[\n float, Tuple[float, torch.Tensor, torch.optim.Optimizer]\n ] = 0.2,\n reward_normalization: bool = False,\n ignore_done: bool = False,\n estimation_step: int = 1,\n **kwargs: Any,\n ) -> None:\n super().__init__(actor, actor_optim, critic1, critic1_optim, critic2,\n critic2_optim, (-np.inf, np.inf), tau, gamma, alpha,\n reward_normalization, ignore_done, estimation_step,\n **kwargs)\n self._alpha: Union[float, torch.Tensor]\n\n def forward( # type: ignore\n self,\n batch: Batch,\n state: Optional[Union[dict, Batch, np.ndarray]] = None,\n input: str = \"obs\",\n **kwargs: Any,\n ) -> Batch:\n obs = batch[input]\n logits, h = self.actor(obs, state=state, info=batch.info)\n dist = Categorical(logits=logits)\n act = dist.sample()\n return Batch(logits=logits, act=act, state=h, dist=dist)\n\n def _target_q(\n self, buffer: ReplayBuffer, indice: np.ndarray\n ) -> torch.Tensor:\n batch = buffer[indice] # batch.obs: s_{t+n}\n with torch.no_grad():\n obs_next_result = self(batch, input=\"obs_next\")\n dist = obs_next_result.dist\n target_q = dist.probs * torch.min(\n self.critic1_old(batch.obs_next),\n self.critic2_old(batch.obs_next),\n )\n target_q = target_q.sum(dim=-1) + self._alpha * dist.entropy()\n return target_q\n\n def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, float]:\n weight = batch.pop(\"weight\", 1.0)\n target_q = batch.returns.flatten()\n act = to_torch(\n batch.act[:, np.newaxis], device=target_q.device, dtype=torch.long)\n\n # critic 1\n current_q1 = self.critic1(batch.obs).gather(1, act).flatten()\n td1 = current_q1 - target_q\n critic1_loss = (td1.pow(2) * weight).mean()\n\n self.critic1_optim.zero_grad()\n critic1_loss.backward()\n self.critic1_optim.step()\n\n # critic 2\n current_q2 = self.critic2(batch.obs).gather(1, act).flatten()\n td2 = current_q2 - target_q\n critic2_loss = (td2.pow(2) * weight).mean()\n\n self.critic2_optim.zero_grad()\n critic2_loss.backward()\n self.critic2_optim.step()\n batch.weight = (td1 + td2) / 2.0 # prio-buffer\n\n # actor\n dist = self(batch).dist\n entropy = dist.entropy()\n with torch.no_grad():\n current_q1a = self.critic1(batch.obs)\n current_q2a = self.critic2(batch.obs)\n q = torch.min(current_q1a, current_q2a)\n actor_loss = -(self._alpha * entropy\n + (dist.probs * q).sum(dim=-1)).mean()\n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n\n if self._is_auto_alpha:\n log_prob = -entropy.detach() + self._target_entropy\n alpha_loss = -(self._log_alpha * log_prob).mean()\n self._alpha_optim.zero_grad()\n alpha_loss.backward()\n self._alpha_optim.step()\n self._alpha = self._log_alpha.detach().exp()\n\n self.sync_weight()\n\n result = {\n \"loss/actor\": actor_loss.item(),\n \"loss/critic1\": critic1_loss.item(),\n \"loss/critic2\": critic2_loss.item(),\n }\n if self._is_auto_alpha:\n result[\"loss/alpha\"] = alpha_loss.item()\n result[\"alpha\"] = self._alpha.item() # type: ignore\n\n return result\n" ]
[ [ "torch.min", "torch.no_grad", "torch.distributions.Categorical" ] ]
mayanks888/second.pytorch
[ "02d37885a543ee46516648dcab7db8f5d677a179" ]
[ "second/mayank_scripts/infer_ros_melodic_pretained_same_frame.py" ]
[ "#!/usr/bin/env python\n# ROS node libs\n\nimport time\n\nimport numpy as np\nimport rospy\nimport torch\n# from geometry_msgs.msg import Quaternion, Pose, Point, Vector3\nfrom pyquaternion import Quaternion\nfrom google.protobuf import text_format\nfrom sensor_msgs.msg import PointCloud2\nfrom std_msgs.msg import Header, ColorRGBA\n# from cv_bridge import CvBridge, CvBridgeError\nfrom visualization_msgs.msg import Marker, MarkerArray\n\nfrom second.protos import pipeline_pb2\n# from second.utils import simplevis\nfrom second.pytorch.train import build_network\nfrom second.utils import config_tool\nfrom std_msgs.msg import Int16, Float32MultiArray\nfrom jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray\n# import ros_numpy\n\n\n# GPU settings: Select GPUs to use. Coment it to let the system decide\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nclass ros_tensorflow_obj():\n def __init__(self):\n # ## Initial msg\n rospy.loginfo(' ## Starting ROS interface ##')\n # ## Load a (frozen) Tensorflow model into memory.\n print(\"ready to process----------------------------------------------------------\")\n ####################################################################################333\n # config_path = \"../configs/nuscenes/all.pp.largea.config\"\n # config_path = \"/home/mayank_sati/codebase/python/lidar/second.pytorch/second/configs/pointpillars/car/xyres_28.config\"\n config_path = \"/home/mayank_sati/codebase/python/lidar/second.pytorch/second/configs/pointpillars/car/xyres_24.config\"\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n # config_tool.change_detection_range(model_cfg, [-50, -50, 50, 50])\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # ckpt_path = \"../checkpoint/voxelnet-140670.tckpt\"\n ckpt_path=\"/home/mayank_sati/Downloads/pretrained_models_v1.5/pp_model_for_nuscenes_pretrain/voxelnet-296960.tckpt\"\n net = build_network(model_cfg).to(device).eval()\n net.load_state_dict(torch.load(ckpt_path))\n target_assigner = net.target_assigner\n self.voxel_generator = net.voxel_generator\n\n class_names = target_assigner.classes\n\n grid_size = self.voxel_generator.grid_size\n feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(model_cfg)\n feature_map_size = [*feature_map_size, 1][::-1]\n anchors = target_assigner.generate_anchors(feature_map_size)[\"anchors\"]\n anchors = torch.tensor(anchors, dtype=torch.float32, device=device)\n anchors = anchors.view(1, -1, 7)\n # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n feature_map_size = [1, 50, 50]\n ret = target_assigner.generate_anchors(feature_map_size)\n class_names = target_assigner.classes\n anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)\n anchors_list = []\n for k, v in anchors_dict.items():\n anchors_list.append(v[\"anchors\"])\n\n # anchors = ret[\"anchors\"]\n anchors = np.concatenate(anchors_list, axis=0)\n anchors = anchors.reshape([-1, target_assigner.box_ndim])\n assert np.allclose(anchors, ret[\"anchors\"].reshape(-1, target_assigner.box_ndim))\n matched_thresholds = ret[\"matched_thresholds\"]\n unmatched_thresholds = ret[\"unmatched_thresholds\"]\n # anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:, [0, 1, 3, 4, 6]])\n anchors_bv = 2\n anchor_cache = {\n \"anchors\": anchors,\n \"anchors_bv\": anchors_bv,\n \"matched_thresholds\": matched_thresholds,\n \"unmatched_thresholds\": unmatched_thresholds,\n \"anchors_dict\": anchors_dict,\n }\n anchors = torch.tensor(anchors, dtype=torch.float32, device=device)\n self.anchors = anchors.view(1, -1, 7)\n self.net = net\n self.device = device\n ##########################################################################################\n # self.marker_publisher = rospy.Publisher('visualization_marker', MarkerArray, queue_size=5)\n self.pcl_publisher = rospy.Publisher('result_pcl', PointCloud2, queue_size=1)\n ############\n # [print(n.name) for n in tf.get_default_graph().as_graph_def().node]\n # ROS environment setup\n # ## Define subscribers\n self.subscribers_def()\n # ## Define publishers\n self.publishers_def()\n self.now = rospy.Time.now()\n\n # Define subscribers\n def subscribers_def(self):\n # subs_topic = '/kitti/velo/pointcloud'\n #subs_topic = '/apollo/sensor/velodyne64/compensator/PointCloud2'\n # subs_topic = '/velodyne64_points'\n\n # subs_topic = '/apollo/sensor/velodyne64/PointCloud2'\n # subs_topic = '/points_raw'\n # subs_topic = '/livox/lidar'\n # subs_topic = '/apollo/sensor/velodyne32C/compensator/PointCloud2'\n subs_topic = '/lidar_top'\n self._sub = rospy.Subscriber(subs_topic, PointCloud2, self.lidar_callback, queue_size=10, buff_size=2 ** 24)\n # mydata = rospy.Subscriber( subs_topic , PointCloud2, self.lidar_callback, queue_size=1, buff_size=2**24)\n # print(mydata)\n\n # self._sub = rospy.Subscriber( subs_topic , Image, self.lidar_callback, queue_size=1, buff_size=100)\n\n # Define publishers\n def publishers_def(self):\n self._pub = rospy.Publisher('pc_bbox_topic', Float32MultiArray, queue_size=1)\n self.pub_arr_bbox = rospy.Publisher(\"Detections\", BoundingBoxArray, queue_size=1)\n\n # Camera image callback\n def lidar_callback(self, point_cl_msg):\n arr_bbox = BoundingBoxArray()\n ############################################################################3\n # lidar = np.fromstring(point_cl_msg.data, dtype=np.float32)\n # points = lidar.reshape(-1, 4)\n # print('gotit\"')\n # pc = ros_numpy.numpify(point_cl_msg)\n # points = np.zeros((pc.shape[0], 4))\n # points[:, 0] = pc['x']\n # points[:, 1] = pc['y']\n # points[:, 2] = pc['z']\n # points[:, 3] = pc['intensity']\n # points[:, 3] /= 255\n #########################################################333\n lidar = np.fromstring(point_cl_msg.data, dtype=np.float32)\n points = lidar.reshape(-1, 4)\n points[:, 3] /= 255\n\n #######################################################################\n res = self.voxel_generator.generate(points, max_voxels=30000)\n voxels = res[\"voxels\"]\n coords = res[\"coordinates\"]\n num_points = res[\"num_points_per_voxel\"]\n num_voxels = np.array([voxels.shape[0]], dtype=np.int64)\n # print(\"voxel_generator_time\",(time.time() - t)*1000)\n ###############################################################\n # print(voxels.shape)\n # add batch idx to coords\n coords = np.pad(coords, ((0, 0), (1, 0)), mode='constant', constant_values=0)\n voxels = torch.tensor(voxels, dtype=torch.float32, device=self.device)\n coords = torch.tensor(coords, dtype=torch.int32, device=self.device)\n num_points = torch.tensor(num_points, dtype=torch.int32, device=self.device)\n # print(\"conversion time\",(time.time() - t)*1000)\n example = {\"anchors\": self.anchors, \"voxels\": voxels, \"num_points\": num_points, \"coordinates\": coords, }\n t2 = time.time()\n pred = self.net(example)[0]\n # print(pred)\n # print(\"prediction\",(time.time() - t2)*1000)\n # print(\"total_time\",(time.time() - t)*1000)\n boxes_lidar = pred[\"box3d_lidar\"].detach().cpu().numpy()\n scores_lidar = pred[\"scores\"].detach().cpu().numpy()\n labels_lidar = pred[\"label_preds\"].detach().cpu().numpy()\n ##############################3333\n threshold = 0.2\n keep = np.where((scores_lidar >= threshold))[0]\n scores_lidar = scores_lidar[keep]\n print(scores_lidar)\n boxes_lidar = boxes_lidar[keep]\n labels_lidar = labels_lidar[keep]\n # sco\n # print(scores_lidar)\n ################################################################################\n # self.show_text_in_rviz_mullti_cube(boxes_lidar,point_cl_msg)\n # self.show_text_in_rviz_mullti_sphere(boxes_lidar,point_cl_msg)\n ##################################################################################\n # apollo integration\n # numboxes = np.squeeze(scores_lidar)\n numboxes = len(scores_lidar)\n tl_bbox = Float32MultiArray()\n iLen = boxes_lidar.shape[0]\n lidar_bbox = Float32MultiArray()\n print('Processing no of object:', iLen)\n\n if (numboxes) >= 1:\n tmp = -np.ones(10 * (numboxes) + 1)\n for i in range(0, int(numboxes)):\n try:\n score = float((scores_lidar)[i])\n if (boxes_lidar.shape[0]) == 1:\n bboxes = [float(v) for v in (boxes_lidar)[i]]\n else:\n bboxes = [float(v) for v in np.squeeze(boxes_lidar)[i]]\n tmp[0] = numboxes\n tmp[10 * i + 1] = score\n tmp[10 * i + 2] = bboxes[0]\n tmp[10 * i + 3] = bboxes[1]\n tmp[10 * i + 4] = bboxes[2]\n tmp[10 * i + 5] = bboxes[3]\n tmp[10 * i + 6] = bboxes[4]\n tmp[10 * i + 7] = bboxes[5]\n tmp[10 * i + 8] = bboxes[6]\n tmp[10 * i + 9] = 0\n tmp[10 * i + 10] = 0\n bbox = BoundingBox()\n # bbox.header.frame_id = point_cl_msg.header.frame_id\n # bbox.header.frame_id = 'livox_frame'\n bbox.header.frame_id = 'lidar_top'\n q = Quaternion(axis=(0, 0, 1), radians=-1.0 * float(boxes_lidar[i][6]))\n bbox.pose.orientation.x = q.x\n bbox.pose.orientation.y = q.y\n bbox.pose.orientation.z = q.z\n bbox.pose.orientation.w = q.w\n bbox.pose.position.x = float(boxes_lidar[i][0])\n bbox.pose.position.y = float(boxes_lidar[i][1])\n bbox.pose.position.z = float(boxes_lidar[i][2])\n bbox.dimensions.x = float(boxes_lidar[i][3])\n bbox.dimensions.y = float(boxes_lidar[i][4])\n bbox.dimensions.z = float(boxes_lidar[i][5])\n arr_bbox.boxes.append(bbox)\n\n except:\n print(\"I am here\")\n # here data for publishing\n tl_bbox.data = tmp\n self._pub.publish(tl_bbox)\n arr_bbox.header.frame_id = point_cl_msg.header.frame_id\n self.pub_arr_bbox.publish(arr_bbox)\n\n point_cl_msg.header.frame_id = point_cl_msg.header.frame_id\n self.pcl_publisher.publish(point_cl_msg)\n arr_bbox.boxes.clear()\n\n\ndef spin(self):\n rospy.spin()\n\n\ndef main():\n rospy.init_node('LIDAR_NODE', anonymous=True)\n tf_ob = ros_tensorflow_obj()\n\n # tf_ob.subscribers_def\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.ones", "torch.load", "numpy.squeeze", "torch.tensor", "numpy.pad", "numpy.where", "torch.cuda.is_available", "numpy.array", "numpy.concatenate", "numpy.fromstring" ] ]
MaxSchambach/colour
[ "3f3685d616fda4be58cec20bc1e16194805d7e2d", "3f3685d616fda4be58cec20bc1e16194805d7e2d", "3f3685d616fda4be58cec20bc1e16194805d7e2d" ]
[ "colour/corresponding/datasets/breneman1987.py", "colour/models/rgb/transfer_functions/itur_bt_1886.py", "colour/volume/tests/test_mesh.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nBreneman Corresponding Chromaticities Dataset\n=============================================\n\nDefines *Breneman (1987)* results for corresponding chromaticities experiments.\n\nSee Also\n--------\n`Corresponding Chromaticities Prediction Jupyter Notebook\n<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\\\nblob/master/notebooks/corresponding/prediction.ipynb>`_\n\nReferences\n----------\n- :cite:`Breneman1987b` : Breneman, E. J. (1987). Corresponding\n chromaticities for different states of adaptation to complex visual fields.\n Journal of the Optical Society of America A, 4(6), 1115.\n doi:10.1364/JOSAA.4.001115\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\n\nfrom collections import namedtuple\n\nfrom colour.utilities.documentation import DocstringDict\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'BrenemanExperimentResult', 'PrimariesChromaticityCoordinates',\n 'BRENEMAN_EXPERIMENT_1_RESULTS', 'BRENEMAN_EXPERIMENT_2_RESULTS',\n 'BRENEMAN_EXPERIMENT_3_RESULTS', 'BRENEMAN_EXPERIMENT_4_RESULTS',\n 'BRENEMAN_EXPERIMENT_5_RESULTS', 'BRENEMAN_EXPERIMENT_6_RESULTS',\n 'BRENEMAN_EXPERIMENT_7_RESULTS', 'BRENEMAN_EXPERIMENT_10_RESULTS',\n 'BRENEMAN_EXPERIMENT_8_RESULTS', 'BRENEMAN_EXPERIMENT_9_RESULTS',\n 'BRENEMAN_EXPERIMENT_11_RESULTS', 'BRENEMAN_EXPERIMENT_12_RESULTS',\n 'BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES', 'BRENEMAN_EXPERIMENTS'\n]\n\n\nclass BrenemanExperimentResult(\n namedtuple('BrenemanExperimentResult',\n ('name', 'uv_t', 'uv_m', 's_uv', 'd_uv_i', 'd_uv_g'))):\n \"\"\"\n Experiment result.\n\n Parameters\n ----------\n name : unicode\n Test colour name.\n uv_t : numeric\n Chromaticity coordinates :math:`uv_t^p` of test colour.\n uv_m : array_like, (2,)\n Chromaticity coordinates :math:`uv_m^p` of matching colour.\n s_uv : array_like, (2,), optional\n Interobserver variation (:math:`x10^3`) :math:`\\\\sigma_uv^p`.\n d_uv_i : array_like, (2,), optional\n Deviation of individual linear transformation (:math:`x10^3`)\n :math:`\\\\delta_uv_i^p`.\n d_uv_g : array_like, (2,), optional\n Deviation of individual linear transformation (:math:`x10^3`)\n :math:`\\\\delta_uv_g^p`.\n \"\"\"\n\n def __new__(cls, name, uv_t, uv_m, s_uv=None, d_uv_i=None, d_uv_g=None):\n \"\"\"\n Returns a new instance of the\n :class:`colour.corresponding.datasets.corresponding_chromaticities.\\\nBrenemanExperimentResult` class.\n \"\"\"\n\n return super(BrenemanExperimentResult, cls).__new__(\n cls, name, np.array(uv_t), np.array(uv_m), np.array(s_uv),\n np.array(d_uv_i), np.array(d_uv_g))\n\n\nclass PrimariesChromaticityCoordinates(\n namedtuple(\n 'PrimariesChromaticityCoordinates',\n ('experiment', 'illuminants', 'Y', 'P_uvp', 'D_uvp', 'T_uvp'))):\n \"\"\"\n Chromaticity coordinates of primaries.\n\n Parameters\n ----------\n experiment : integer\n Experiment.\n illuminants : array_like, (2,)\n Chromaticity coordinates :math:`uv_t^p` of test colour.\n Y : numeric\n White luminance :math:`Y` in :math:`cd/m^2`.\n P_uvp : numeric\n Chromaticity coordinates :math:`uv^p` of primary :math:`P`.\n D_uvp : numeric\n Chromaticity coordinates :math:`uv^p` of primary :math:`D`.\n T_uvp : numeric\n Chromaticity coordinates :math:`uv^p` of primary :math:`T`.\n \"\"\"\n\n def __new__(cls,\n experiment,\n illuminants,\n Y,\n P_uvp=None,\n D_uvp=None,\n T_uvp=None):\n \"\"\"\n Returns a new instance of the\n :class:`colour.corresponding.datasets.corresponding_chromaticities.\\\nPrimariesChromaticityCoordinates` class.\n \"\"\"\n\n return super(PrimariesChromaticityCoordinates, cls).__new__(\n cls, experiment, np.array(illuminants), np.array(Y),\n np.array(P_uvp), np.array(D_uvp), np.array(T_uvp))\n\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_1_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.259, 0.526), (0.200, 0.475)),\n BrenemanExperimentResult(\n 'Gray',\n (0.259, 0.524), (0.199, 0.487), (4, 4), (2, 3), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.459, 0.522), (0.420, 0.509), (19, 4), (-10, -7), (-19, -3)),\n BrenemanExperimentResult(\n 'Skin',\n (0.307, 0.526), (0.249, 0.497), (7, 4), (-1, 1), (-6, -1)),\n BrenemanExperimentResult(\n 'Orange',\n (0.360, 0.544), (0.302, 0.548), (12, 1), (1, -2), (-7, -6)),\n BrenemanExperimentResult(\n 'Brown',\n (0.350, 0.541), (0.290, 0.537), (11, 4), (3, 0), (-5, -3)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.318, 0.550), (0.257, 0.554), (8, 2), (0, 2), (-5, -5)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.258, 0.542), (0.192, 0.529), (4, 6), (3, 2), (3, -6)),\n BrenemanExperimentResult(\n 'Green',\n (0.193, 0.542), (0.129, 0.521), (7, 5), (3, 2), (9, -7)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.180, 0.516), (0.133, 0.469), (4, 6), (-3, -2), (2, -5)),\n BrenemanExperimentResult(\n 'Blue',\n (0.186, 0.445), (0.158, 0.340), (13, 33), (2, 7), (1, 13)),\n BrenemanExperimentResult(\n 'Sky',\n (0.226, 0.491), (0.178, 0.426), (3, 14), (1, -3), (0, -1)),\n BrenemanExperimentResult(\n 'Purple',\n (0.278, 0.456), (0.231, 0.365), (4, 25), (0, 2), (-5, 7)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 1 results.\n\nBRENEMAN_EXPERIMENT_1_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *A*, *D65*\n- White Luminance : 1500 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_2_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.222, 0.521), (0.204, 0.479)),\n BrenemanExperimentResult(\n 'Gray',\n (0.227, 0.517), (0.207, 0.486), (2, 5), (-1, 0), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.464, 0.520), (0.449, 0.511), (22, 3), (-8, -8), (-7, -2)),\n BrenemanExperimentResult(\n 'Skin',\n (0.286, 0.526), (0.263, 0.505), (7, 2), (0, -1), (0, -1)),\n BrenemanExperimentResult(\n 'Orange',\n (0.348, 0.546), (0.322, 0.545), (13, 3), (3, -1), (3, -2)),\n BrenemanExperimentResult(\n 'Brown',\n (0.340, 0.543), (0.316, 0.537), (11, 3), (1, 1), (0, 0)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.288, 0.554), (0.265, 0.553), (5, 2), (-2, 2), (-1, -2)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.244, 0.547), (0.221, 0.538), (4, 3), (-2, 1), (0, -3)),\n BrenemanExperimentResult(\n 'Green',\n (0.156, 0.548), (0.135, 0.532), (4, 3), (-1, 3), (3, -4)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.159, 0.511), (0.145, 0.472), (9, 7), (-1, 2), (2, 1)),\n BrenemanExperimentResult(\n 'Blue',\n (0.160, 0.406), (0.163, 0.331), (23, 31), (2, -3), (-1, 3)),\n BrenemanExperimentResult(\n 'Sky',\n (0.190, 0.481), (0.176, 0.431), (5, 24), (2, -2), (2, 0)),\n BrenemanExperimentResult(\n 'Purple',\n (0.258, 0.431), (0.244, 0.349), (4, 19), (-3, 13), (-4, 19)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 2 results.\n\nBRENEMAN_EXPERIMENT_2_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *Projector*, *D55*\n- White Luminance : 1500 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_3_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.223, 0.521), (0.206, 0.478)),\n BrenemanExperimentResult(\n 'Gray',\n (0.228, 0.517), (0.211, 0.494), (1, 3), (0, 2), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.462, 0.519), (0.448, 0.505), (11, 4), (-3, 6), (-4, 6)),\n BrenemanExperimentResult(\n 'Skin',\n (0.285, 0.524), (0.267, 0.507), (6, 3), (-1, 1), (-2, 1)),\n BrenemanExperimentResult(\n 'Orange',\n (0.346, 0.546), (0.325, 0.541), (11, 3), (1, -2), (2, 3)),\n BrenemanExperimentResult(\n 'Brown',\n (0.338, 0.543), (0.321, 0.532), (9, 6), (-3, 2), (-3, 7)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.287, 0.554), (0.267, 0.548), (4, 5), (1, -2), (0, 5)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.244, 0.547), (0.226, 0.531), (3, 6), (-1, 3), (-2, 8)),\n BrenemanExperimentResult(\n 'Green',\n (0.157, 0.548), (0.141, 0.528), (9, 6), (2, 2), (0, 6)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.160, 0.510), (0.151, 0.486), (8, 5), (-2, -1), (-2, -5)),\n BrenemanExperimentResult(\n 'Blue',\n (0.162, 0.407), (0.158, 0.375), (6, 7), (1, -6), (4, -23)),\n BrenemanExperimentResult(\n 'Sky',\n (0.191, 0.482), (0.179, 0.452), (4, 5), (0, 1), (1, -7)),\n BrenemanExperimentResult(\n 'Purple',\n (0.258, 0.432), (0.238, 0.396), (4, 8), (5, 3), (4, -11)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 3 results.\n\nBRENEMAN_EXPERIMENT_3_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *Projector*, *D55*\n- White Luminance : 75 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_4_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.258, 0.523), (0.199, 0.467)),\n BrenemanExperimentResult(\n 'Gray',\n (0.257, 0.524), (0.205, 0.495), (2, 2), (0, 4), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.460, 0.521), (0.416, 0.501), (11, 6), (-6, 4), (-6, 9)),\n BrenemanExperimentResult(\n 'Skin',\n (0.308, 0.526), (0.253, 0.503), (7, 3), (-1, 1), (-1, 0)),\n BrenemanExperimentResult(\n 'Orange',\n (0.360, 0.544), (0.303, 0.541), (14, 5), (1, -4), (1, 2)),\n BrenemanExperimentResult(\n 'Brown',\n (0.350, 0.541), (0.296, 0.527), (11, 7), (-2, 4), (-3, 9)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.317, 0.550), (0.260, 0.547), (9, 5), (1, -3), (0, 3)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.258, 0.543), (0.203, 0.520), (4, 6), (0, 8), (0, 9)),\n BrenemanExperimentResult(\n 'Green',\n (0.193, 0.543), (0.142, 0.516), (6, 9), (3, 8), (2, 6)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.180, 0.516), (0.140, 0.484), (9, 5), (-2, -1), (-1, -9)),\n BrenemanExperimentResult(\n 'Blue',\n (0.185, 0.445), (0.151, 0.394), (8, 10), (2, -8), (8, -24)),\n BrenemanExperimentResult(\n 'Sky',\n (0.225, 0.490), (0.180, 0.448), (4, 8), (1, -1), (3, -11)),\n BrenemanExperimentResult(\n 'Purple',\n (0.278, 0.455), (0.229, 0.388), (6, 14), (1, 12), (3, 0)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 4 results.\n\nBRENEMAN_EXPERIMENT_4_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *A*, *D65*\n- White Luminance : 75 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_5_RESULTS = (\n BrenemanExperimentResult(\n 'Gray',\n (0.028, 0.480), (0.212, 0.491), (2, 2)),\n BrenemanExperimentResult(\n 'Red',\n (0.449, 0.512), (0.408, 0.514), (11, 5)),\n BrenemanExperimentResult(\n 'Skin',\n (0.269, 0.505), (0.262, 0.511), (4, 2)),\n BrenemanExperimentResult(\n 'Orange',\n (0.331, 0.548), (0.303, 0.545), (4, 3)),\n BrenemanExperimentResult(\n 'Brown',\n (0.322, 0.541), (0.303, 0.538), (4, 4)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.268, 0.555), (0.264, 0.550), (3, 2)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.224, 0.538), (0.227, 0.535), (3, 3)),\n BrenemanExperimentResult(\n 'Green',\n (0.134, 0.531), (0.159, 0.530), (9, 3)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.145, 0.474), (0.165, 0.490), (8, 3)),\n BrenemanExperimentResult(\n 'Blue',\n (0.163, 0.329), (0.173, 0.378), (7, 12)),\n BrenemanExperimentResult(\n 'Sky',\n (0.179, 0.438), (0.189, 0.462), (5, 4)),\n BrenemanExperimentResult(\n 'Purple',\n (0.245, 0.364), (0.239, 0.401), (4, 16)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 5 results.\n\nBRENEMAN_EXPERIMENT_5_RESULTS : tuple\n\nNotes\n-----\n- Effective White Levels : 130 and 2120 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_6_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.257, 0.525), (0.201, 0.482)),\n BrenemanExperimentResult(\n 'Gray',\n (0.267, 0.521), (0.207, 0.485), (5, 3), (-1, 0), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.457, 0.521), (0.398, 0.516), (9, 4), (-2, -5), (1, -9)),\n BrenemanExperimentResult(\n 'Skin',\n (0.316, 0.526), (0.253, 0.503), (5, 3), (-3, -2), (-1, -3)),\n BrenemanExperimentResult(\n 'Orange',\n (0.358, 0.545), (0.287, 0.550), (7, 3), (3, 0), (7, -6)),\n BrenemanExperimentResult(\n 'Brown',\n (0.350, 0.541), (0.282, 0.540), (6, 3), (-1, 0), (2, -5)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.318, 0.551), (0.249, 0.556), (7, 2), (-1, 1), (2, -5)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.256, 0.547), (0.188, 0.537), (5, 4), (3, 1), (4, -2)),\n BrenemanExperimentResult(\n 'Green',\n (0.193, 0.542), (0.133, 0.520), (13, 3), (5, -2), (5, -4)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.180, 0.516), (0.137, 0.466), (12, 10), (0, 0), (-2, 2)),\n BrenemanExperimentResult(\n 'Blue',\n (0.186, 0.445), (0.156, 0.353), (12, 45), (6, 1), (2, 6)),\n BrenemanExperimentResult(\n 'Sky',\n (0.225, 0.492), (0.178, 0.428), (6, 14), (1, -1), (-1, 3)),\n BrenemanExperimentResult(\n 'Purple',\n (0.276, 0.456), (0.227, 0.369), (6, 27), (-2, 4), (-3, 9)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 6 results.\n\nBRENEMAN_EXPERIMENT_6_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *A*, *D55*\n- White Luminance : 11100 :math:`cd/m^2`\n- Observers Count : 8\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_7_RESULTS = (\n BrenemanExperimentResult(\n 'Gray',\n (0.208, 0.481), (0.211, 0.486), (2, 3)),\n BrenemanExperimentResult(\n 'Red',\n (0.448, 0.512), (0.409, 0.516), (9, 2)),\n BrenemanExperimentResult(\n 'Skin',\n (0.269, 0.505), (0.256, 0.506), (4, 3)),\n BrenemanExperimentResult(\n 'Orange',\n (0.331, 0.549), (0.305, 0.547), (5, 4)),\n BrenemanExperimentResult(\n 'Brown',\n (0.322, 0.541), (0.301, 0.539), (5, 2)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.268, 0.555), (0.257, 0.552), (3, 4)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.225, 0.538), (0.222, 0.536), (3, 2)),\n BrenemanExperimentResult(\n 'Green',\n (0.135, 0.531), (0.153, 0.529), (8, 2)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.145, 0.475), (0.160, 0.484), (3, 5)),\n BrenemanExperimentResult(\n 'Blue',\n (0.163, 0.331), (0.171, 0.379), (4, 11)),\n BrenemanExperimentResult(\n 'Sky',\n (0.179, 0.438), (0.187, 0.452), (4, 7)),\n BrenemanExperimentResult(\n 'Purple',\n (0.245, 0.365), (0.240, 0.398), (4, 10)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 7 results.\n\nBRENEMAN_EXPERIMENT_7_RESULTS : tuple\n\nNotes\n-----\n- Effective White Levels : 850 and 11100 :math:`cd/m^2`\n- Observers Count : 8\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_8_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.258, 0.524), (0.195, 0.469)),\n BrenemanExperimentResult(\n 'Gray',\n (0.257, 0.525), (0.200, 0.494), (2, 3), (1, 2), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.458, 0.522), (0.410, 0.508), (12, 4), (-3, 5), (-7, 2)),\n BrenemanExperimentResult(\n 'Skin',\n (0.308, 0.526), (0.249, 0.502), (6, 2), (-1, 1), (-3, -1)),\n BrenemanExperimentResult(\n 'Orange',\n (0.359, 0.545), (0.299, 0.545), (12, 4), (0, -2), (-3, 0)),\n BrenemanExperimentResult(\n 'Brown',\n (0.349, 0.540), (0.289, 0.532), (10, 4), (0, 1), (-2, 2)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.317, 0.550), (0.256, 0.549), (9, 5), (0, -3), (-3, 1)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.260, 0.545), (0.198, 0.529), (5, 5), (3, 1), (0, 3)),\n BrenemanExperimentResult(\n 'Green',\n (0.193, 0.543), (0.137, 0.520), (9, 5), (3, 0), (2, 1)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.182, 0.516), (0.139, 0.477), (9, 4), (-3, 0), (-2, -4)),\n BrenemanExperimentResult(\n 'Blue',\n (0.184, 0.444), (0.150, 0.387), (5, 11), (3, -10), (6, -22)),\n BrenemanExperimentResult(\n 'Sky',\n (0.224, 0.489), (0.177, 0.439), (5, 6), (1, 1), (1, -7)),\n BrenemanExperimentResult(\n 'Purple',\n (0.277, 0.454), (0.226, 0.389), (4, 10), (1, 4), (1, -8)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 8 results.\n\nBRENEMAN_EXPERIMENT_8_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *A*, *D65*\n- White Luminance : 350 :math:`cd/m^2`\n- Observers Count : 8\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_9_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.254, 0.525), (0.195, 0.465)),\n BrenemanExperimentResult(\n 'Gray',\n (0.256, 0.524), (0.207, 0.496), (4, 6), (3, 2), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.459, 0.521), (0.415, 0.489), (20, 14), (2, 12), (-2, 21)),\n BrenemanExperimentResult(\n 'Skin',\n (0.307, 0.525), (0.261, 0.500), (7, 7), (0, 1), (-5, 2)),\n BrenemanExperimentResult(\n 'Orange',\n (0.359, 0.545), (0.313, 0.532), (7, 5), (-2, -3), (-6, 13)),\n BrenemanExperimentResult(\n 'Brown',\n (0.349, 0.540), (0.302, 0.510), (11, 15), (0, 12), (-5, 24)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.317, 0.550), (0.268, 0.538), (7, 10), (1, -4), (-4, 12)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.259, 0.544), (0.212, 0.510), (10, 11), (0, 14), (-4, 22)),\n BrenemanExperimentResult(\n 'Green',\n (0.193, 0.542), (0.150, 0.506), (6, 10), (-1, 13), (-2, 15)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.181, 0.517), (0.144, 0.487), (9, 6), (-3, 0), (-1, -9)),\n BrenemanExperimentResult(\n 'Blue',\n (0.184, 0.444), (0.155, 0.407), (4, 11), (-2, -6), (6, -36)),\n BrenemanExperimentResult(\n 'Sky',\n (0.225, 0.490), (0.183, 0.458), (5, 8), (1, -3), (2, -19)),\n BrenemanExperimentResult(\n 'Purple',\n (0.276, 0.454), (0.233, 0.404), (7, 12), (2, 9), (0, -16)),\n BrenemanExperimentResult(\n '(Gray)h',\n (0.256, 0.525), (0.208, 0.498)),\n BrenemanExperimentResult(\n '(Red)h',\n (0.456, 0.521), (0.416, 0.501), (15, 7), None, (-6, -9)),\n BrenemanExperimentResult(\n '(Brown)h',\n (0.349, 0.539), (0.306, 0.526), (11, 8), None, (-8, 7)),\n BrenemanExperimentResult(\n '(Foliage)h',\n (0.260, 0.545), (0.213, 0.528), (7, 9), None, (-4, 5)),\n BrenemanExperimentResult(\n '(Green)h',\n (0.193, 0.543), (0.149, 0.525), (10, 8), None, (-1, -1)),\n BrenemanExperimentResult(\n '(Blue)h',\n (0.184, 0.444), (0.156, 0.419), (7, 8), None, (4, -45)),\n BrenemanExperimentResult(\n '(Purple)h',\n (0.277, 0.456), (0.236, 0.422), (6, 11), None, (-2, -29)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 9 results.\n\nBRENEMAN_EXPERIMENT_9_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *A*, *D65*\n- White Luminance : 15 :math:`cd/m^2`\n- Observers Count : 8\n- The colors indicated by (.)h are the darker colors presented at the higher\n luminescence level of the lighter colors.\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_10_RESULTS = (\n BrenemanExperimentResult(\n 'Gray',\n (0.208, 0.482), (0.213, 0.494), (3, 3)),\n BrenemanExperimentResult(\n 'Red',\n (0.447, 0.512), (0.411, 0.506), (15, 7)),\n BrenemanExperimentResult(\n 'Skin',\n (0.269, 0.505), (0.269, 0.511), (4, 3)),\n BrenemanExperimentResult(\n 'Orange',\n (0.331, 0.549), (0.315, 0.536), (7, 8)),\n BrenemanExperimentResult(\n 'Brown',\n (0.323, 0.542), (0.310, 0.526), (6, 8)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.268, 0.556), (0.268, 0.541), (3, 6)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.226, 0.538), (0.230, 0.525), (4, 8)),\n BrenemanExperimentResult(\n 'Green',\n (0.135, 0.531), (0.158, 0.524), (6, 3)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.145, 0.476), (0.161, 0.491), (4, 4)),\n BrenemanExperimentResult(\n 'Blue',\n (0.163, 0.330), (0.171, 0.377), (6, 19)),\n BrenemanExperimentResult(\n 'Sky',\n (0.179, 0.439), (0.187, 0.465), (5, 5)),\n BrenemanExperimentResult(\n 'Purple',\n (0.245, 0.366), (0.240, 0.402), (3, 12)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 10 results.\n\nBRENEMAN_EXPERIMENT_10_RESULTS : tuple\n\nNotes\n-----\n- Effective White Levels : 15 and 270 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_11_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.208, 0.482), (0.174, 0.520)),\n BrenemanExperimentResult(\n 'Gray',\n (0.209, 0.483), (0.176, 0.513), (3, 4), (2, 2), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.450, 0.512), (0.419, 0.524), (10, 2), (3, 2), (8, -1)),\n BrenemanExperimentResult(\n 'Skin',\n (0.268, 0.506), (0.240, 0.528), (6, 2), (-4, 0), (-3, 0)),\n BrenemanExperimentResult(\n 'Orange',\n (0.331, 0.547), (0.293, 0.553), (6, 2), (3, -1), (5, 1)),\n BrenemanExperimentResult(\n 'Brown',\n (0.323, 0.542), (0.290, 0.552), (5, 2), (-1, -3), (0, -1)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.266, 0.549), (0.236, 0.557), (4, 2), (-3, -2), (-4, 2)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.227, 0.538), (0.194, 0.552), (4, 2), (2, -3), (-1, 1)),\n BrenemanExperimentResult(\n 'Green',\n (0.146, 0.534), (0.118, 0.551), (8, 3), (4, -2), (-6, 3)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.160, 0.475), (0.130, 0.513), (9, 4), (1, -1), (-4, -3)),\n BrenemanExperimentResult(\n 'Blue',\n (0.177, 0.340), (0.133, 0.427), (6, 14), (4, -17), (11, -29)),\n BrenemanExperimentResult(\n 'Sky',\n (0.179, 0.438), (0.146, 0.482), (6, 10), (1, 4), (0, -1)),\n BrenemanExperimentResult(\n 'Purple',\n (0.245, 0.366), (0.216, 0.419), (4, 13), (-3, 8), (4, -2)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 1 results.\n\nBRENEMAN_EXPERIMENT_11_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *green*, *D65*\n- White Luminance : 1560 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENT_12_RESULTS = (\n BrenemanExperimentResult(\n 'Illuminant',\n (0.205, 0.482), (0.174, 0.519)),\n BrenemanExperimentResult(\n 'Gray',\n (0.208, 0.482), (0.181, 0.507), (4, 3), (0, 1), (0, 0)),\n BrenemanExperimentResult(\n 'Red',\n (0.451, 0.512), (0.422, 0.526), (20, 3), (0, -5), (10, -5)),\n BrenemanExperimentResult(\n 'Skin',\n (0.268, 0.506), (0.244, 0.525), (5, 2), (-6, 0), (-2, -1)),\n BrenemanExperimentResult(\n 'Orange',\n (0.331, 0.548), (0.292, 0.553), (10, 2), (5, 2), (11, 1)),\n BrenemanExperimentResult(\n 'Brown',\n (0.324, 0.542), (0.286, 0.554), (8, 1), (5, -3), (10, -4)),\n BrenemanExperimentResult(\n 'Yellow',\n (0.266, 0.548), (0.238, 0.558), (6, 2), (-3, -1), (-1, -2)),\n BrenemanExperimentResult(\n 'Foliage',\n (0.227, 0.538), (0.196, 0.555), (6, 3), (3, -4), (2, -5)),\n BrenemanExperimentResult(\n 'Green',\n (0.145, 0.534), (0.124, 0.551), (8, 6), (1, -1), (-8, -1)),\n BrenemanExperimentResult(\n 'Blue-green',\n (0.160, 0.474), (0.135, 0.505), (5, 2), (1, -1), (-4, -3)),\n BrenemanExperimentResult(\n 'Blue',\n (0.178, 0.339), (0.149, 0.392), (4, 20), (-1, -5), (3, -7)),\n BrenemanExperimentResult(\n 'Sky',\n (0.179, 0.440), (0.150, 0.473), (4, 8), (3, 2), (2, 0)),\n BrenemanExperimentResult(\n 'Purple',\n (0.246, 0.366), (0.222, 0.404), (5, 15), (-4, 2), (4, 2)))\n# yapf: enable\n\"\"\"\n*Breneman (1987)* experiment 12 results.\n\nBRENEMAN_EXPERIMENT_12_RESULTS : tuple\n\nNotes\n-----\n- Illuminants : *D55*, *green*\n- White Luminance : 75 :math:`cd/m^2`\n- Observers Count : 7\n\"\"\"\n\n# yapf: disable\nBRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES = DocstringDict({\n 1: PrimariesChromaticityCoordinates(\n 1, ('A', 'D65'), 1500,\n (0.671, 0.519), (-0.586, 0.627), (0.253, 0.016)),\n 2: PrimariesChromaticityCoordinates(\n 2, ('Projector', 'D55'), 1500,\n (0.675, 0.523), (-0.466, 0.617), (0.255, 0.018)),\n 3: PrimariesChromaticityCoordinates(\n 3, ('Projector', 'D55'), 75,\n (0.664, 0.510), (-0.256, 0.729), (0.244, 0.003)),\n 4: PrimariesChromaticityCoordinates(\n 4, ('A', 'D65'), 75,\n (0.674, 0.524), (-0.172, 0.628), (0.218, -0.026)),\n 6: PrimariesChromaticityCoordinates(\n 6, ('A', 'D55'), 11100,\n (0.659, 0.506), (-0.141, 0.615), (0.249, 0.009)),\n 8: PrimariesChromaticityCoordinates(\n 8, ('A', 'D65'), 350,\n (0.659, 0.505), (-0.246, 0.672), (0.235, -0.006)),\n 9: PrimariesChromaticityCoordinates(\n 9, ('A', 'D65'), 15,\n (0.693, 0.546), (-0.446, 0.773), (0.221, -0.023)),\n 11: PrimariesChromaticityCoordinates(\n 11, ('D55', 'green'), 1560,\n (0.680, 0.529), (0.018, 0.576), (0.307, 0.080)),\n 12: PrimariesChromaticityCoordinates(\n 12, ('D55', 'green'), 75,\n (0.661, 0.505), (0.039, 0.598), (0.345, 0.127))})\n# yapf: enable\nBRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES.__doc__ = \"\"\"\n*Breneman (1987)* experiments primaries chromaticities.\n\nReferences\n----------\n:cite:`Breneman1987b`\n\nBRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES : dict\n\"\"\"\n\nBRENEMAN_EXPERIMENTS = DocstringDict({\n 1: BRENEMAN_EXPERIMENT_1_RESULTS,\n 2: BRENEMAN_EXPERIMENT_2_RESULTS,\n 3: BRENEMAN_EXPERIMENT_3_RESULTS,\n 4: BRENEMAN_EXPERIMENT_4_RESULTS,\n 5: BRENEMAN_EXPERIMENT_5_RESULTS,\n 6: BRENEMAN_EXPERIMENT_6_RESULTS,\n 7: BRENEMAN_EXPERIMENT_7_RESULTS,\n 8: BRENEMAN_EXPERIMENT_8_RESULTS,\n 9: BRENEMAN_EXPERIMENT_9_RESULTS,\n 10: BRENEMAN_EXPERIMENT_10_RESULTS,\n 11: BRENEMAN_EXPERIMENT_11_RESULTS,\n 12: BRENEMAN_EXPERIMENT_12_RESULTS\n})\nBRENEMAN_EXPERIMENTS.__doc__ = \"\"\"\n*Breneman (1987)* experiments.\n\nReferences\n----------\n:cite:`Breneman1987b`\n\nBRENEMAN_EXPERIMENTS : dict\n\"\"\"\n", "# -*- coding: utf-8 -*-\n\"\"\"\nITU-R BT.1886\n=============\n\nDefines *Recommendation ITU-R BT.1886* electro-optical transfer function\n(EOTF / EOCF) and its inverse:\n\n- :func:`colour.models.eotf_inverse_BT1886`\n- :func:`colour.models.eotf_BT1886`\n\nSee Also\n--------\n`RGB Colourspaces Jupyter Notebook\n<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\\\nblob/master/notebooks/models/rgb.ipynb>`_\n\nReferences\n----------\n- :cite:`InternationalTelecommunicationUnion2011h` : International\n Telecommunication Union. (2011). Recommendation ITU-R BT.1886 - Reference\n electro-optical transfer function for flat panel displays used in HDTV\n studio production BT Series Broadcasting service. Retrieved from\n https://www.itu.int/dms_pubrec/itu-r/rec/bt/\\\nR-REC-BT.1886-0-201103-I!!PDF-E.pdf\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\n\nfrom colour.utilities import from_range_1, to_domain_1\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['eotf_inverse_BT1886', 'eotf_BT1886']\n\n\ndef eotf_inverse_BT1886(L, L_B=0, L_W=1):\n \"\"\"\n Defines *Recommendation ITU-R BT.1886* inverse electro-optical transfer\n function (EOTF / EOCF).\n\n Parameters\n ----------\n L : numeric or array_like\n Screen luminance in :math:`cd/m^2`.\n L_B : numeric, optional\n Screen luminance for black.\n L_W : numeric, optional\n Screen luminance for white.\n\n Returns\n -------\n numeric or ndarray\n Input video signal level (normalised, black at :math:`V = 0`, to white\n at :math:`V = 1`.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``L`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``V`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`InternationalTelecommunicationUnion2011h`\n\n Examples\n --------\n >>> eotf_inverse_BT1886(0.11699185725296059) # doctest: +ELLIPSIS\n 0.4090077...\n \"\"\"\n\n L = to_domain_1(L)\n\n gamma = 2.40\n gamma_d = 1 / gamma\n\n n = L_W ** gamma_d - L_B ** gamma_d\n a = n ** gamma\n b = L_B ** gamma_d / n\n\n V = (L / a) ** gamma_d - b\n\n return from_range_1(V)\n\n\ndef eotf_BT1886(V, L_B=0, L_W=1):\n \"\"\"\n Defines *Recommendation ITU-R BT.1886* electro-optical transfer function\n (EOTF / EOCF).\n\n Parameters\n ----------\n V : numeric or array_like\n Input video signal level (normalised, black at :math:`V = 0`, to white\n at :math:`V = 1`. For content mastered per\n *Recommendation ITU-R BT.709*, 10-bit digital code values :math:`D` map\n into values of :math:`V` per the following equation:\n :math:`V = (D-64)/876`\n L_B : numeric, optional\n Screen luminance for black.\n L_W : numeric, optional\n Screen luminance for white.\n\n Returns\n -------\n numeric or ndarray\n Screen luminance in :math:`cd/m^2`.\n\n Notes\n -----\n\n +------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``V`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n +------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +============+=======================+===============+\n | ``L`` | [0, 1] | [0, 1] |\n +------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`InternationalTelecommunicationUnion2011h`\n\n Examples\n --------\n >>> eotf_BT1886(0.409007728864150) # doctest: +ELLIPSIS\n 0.1169918...\n \"\"\"\n\n V = to_domain_1(V)\n\n gamma = 2.40\n gamma_d = 1 / gamma\n\n n = L_W ** gamma_d - L_B ** gamma_d\n a = n ** gamma\n b = L_B ** gamma_d / n\n L = a * np.maximum(V + b, 0) ** gamma\n\n return from_range_1(L)\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.volume.mesh` module.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport unittest\nfrom itertools import permutations\n\nfrom colour.volume import is_within_mesh_volume\nfrom colour.utilities import ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['TestIsWithinMeshVolume']\n\n\nclass TestIsWithinMeshVolume(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.volume.mesh.is_within_mesh_volume` definition unit\n tests methods.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialises common tests attributes.\n \"\"\"\n\n self._mesh = np.array([\n [-1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [0.0, 1.0, 0.0],\n ])\n\n def test_is_within_mesh_volume(self):\n \"\"\"\n Tests :func:`colour.volume.mesh.is_within_mesh_volume` definition.\n \"\"\"\n\n self.assertTrue(\n is_within_mesh_volume(\n np.array([0.0005, 0.0031, 0.0010]), self._mesh))\n\n self.assertFalse(\n is_within_mesh_volume(\n np.array([0.3205, 0.4131, 0.5100]), self._mesh))\n\n self.assertTrue(\n is_within_mesh_volume(\n np.array([0.0025, 0.0088, 0.0340]), self._mesh))\n\n self.assertFalse(\n is_within_mesh_volume(\n np.array([0.4325, 0.3788, 0.1034]), self._mesh))\n\n def test_n_dimensional_is_within_mesh_volume(self):\n \"\"\"\n Tests :func:`colour.volume.mesh.is_within_mesh_volume` definition\n n-dimensional arrays support.\n \"\"\"\n\n a = np.array([0.0005, 0.0031, 0.0010])\n b = is_within_mesh_volume(a, self._mesh)\n\n a = np.tile(a, (6, 1))\n b = np.tile(b, 6)\n np.testing.assert_almost_equal(is_within_mesh_volume(a, self._mesh), b)\n\n a = np.reshape(a, (2, 3, 3))\n b = np.reshape(b, (2, 3))\n np.testing.assert_almost_equal(is_within_mesh_volume(a, self._mesh), b)\n\n @ignore_numpy_errors\n def test_nan_is_within_mesh_volume(self):\n \"\"\"\n Tests :func:`colour.volume.mesh.is_within_mesh_volume` definition nan\n support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n is_within_mesh_volume(case, self._mesh)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array" ], [ "numpy.maximum" ], [ "numpy.array", "numpy.tile", "numpy.reshape" ] ]
NestLakerJasonLIN/pipedream
[ "cad624f79a71f44ba79099f0c38321347b13e5c2" ]
[ "profiler/torchmodules/torchlogger/activation_gradient_logger.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport os\nimport pickle\nimport torch\n\n\nclass ActivationAndGradientLogger:\n def __init__(self, directory):\n self.directory = directory\n try:\n os.mkdir(self.directory)\n except:\n pass\n self.iteration = 0\n self.forward_counter = 0\n self.backward_counter = 0\n\n def reset_counters(self):\n self.forward_counter = 0\n self.backward_counter = 0\n\n def hook_modules(self, module, iteration):\n self.iteration = iteration\n sub_directory = os.path.join(self.directory, str(iteration))\n try:\n os.mkdir(sub_directory)\n except:\n pass\n self.hook_modules_helper(module, sub_directory)\n\n def hook_modules_helper(self, module, sub_directory):\n sub_modules = module.__dict__['_modules']\n\n for name, sub_module in sub_modules.items():\n if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:\n break\n\n sub_sub_modules = sub_module.__dict__['_modules']\n if len(sub_sub_modules) > 0:\n # Recursively visit this module's descendants.\n self.hook_modules_helper(sub_module, sub_directory)\n else:\n def forward_hook(*args):\n activation = args[2]\n filename = os.path.join(sub_directory, 'activations.%d.pkl' % self.forward_counter)\n with open(filename, 'wb') as f:\n torch.save(activation, f)\n self.forward_counter += 1\n\n def backward_hook(*args):\n gradient = args[2]\n filename = os.path.join(sub_directory, 'gradients.%d.pkl' % self.backward_counter)\n with open(filename, 'wb') as f:\n torch.save(gradient, f)\n self.backward_counter += 1\n\n sub_module.register_forward_hook(forward_hook)\n sub_module.register_backward_hook(backward_hook)\n\n def unhook_modules(self, module):\n self.unhook_modules_helper(module)\n self.reset_counters()\n\n def unhook_modules_helper(self, module):\n sub_modules = module.__dict__['_modules']\n\n for name, sub_module in sub_modules.items():\n if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:\n break\n\n sub_sub_modules = sub_module.__dict__['_modules']\n if len(sub_sub_modules) > 0:\n # Recursively visit this module's descendants.\n self.unhook_modules_helper(sub_module)\n else:\n sub_module.reset_hooks()\n" ]
[ [ "torch.save" ] ]
PriyamvadaKumar/AWS_BioActive_Classification
[ "b6a4413618586712ca4dc196f2dfaa3ceca804fb" ]
[ "bioactive_lab.py" ]
[ "import os, sys\ndirpath = os.getcwd()\nsys.path.insert(0, dirpath + '/goal_tether_functions')\nsys.path.insert(0, dirpath + '/predictive_modelers')\nsys.path.insert(0, dirpath + '/predictive_modelers/assessment_resources')\nsys.path.insert(0, dirpath + '/active_learners')\nsys.path.insert(0, dirpath + '/data_acquisition')\nsys.path.insert(0, dirpath + '/diagnostics')\nfrom createCampaign_battleship import main as createCampaign\n# from createImageCampaign_Bria import main as createCampaign\nfrom runCampaign2 import main as runCampaign\nfrom database import *\nimport outputManager\nimport time\nimport boto3\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.cluster import KMeans\n\n\n# Part 1 Plotting Function\ndef plot_simulation_accuracy(acc, title, mul_accuracy=False):\n fig, ax = plt.subplots()\n ax.set_ylabel(\"Accuracy (%)\")\n ax.set_xlabel(\"Iterations\")\n ax.set_title(title)\n if mul_accuracy:\n ax.plot(np.arange(len(acc[0])), acc[0], label=\"Full Space\")\n ax.plot(np.arange(len(acc[1])), acc[1], label=\"Forward Modeling\")\n ax.plot(np.arange(len(acc[2])), acc[2], label=\"Prediction Only\")\n else:\n ax.plot(np.arange(len(acc)), acc)\n ax.legend()\n plt.show()\n\n\ndef average_arrays(mat):\n array = []\n for i in range(25):\n avg = 0\n for m in range(len(mat)):\n if len(mat[m]) < i:\n continue\n avg += mat[m][i]\n avg = avg/len(mat)\n array.append(avg)\n return array\n\nwd =os.getcwd()\nprint(\"Current Working Directory: \", wd)\nprint()\n\nif path.exists(\"data/data.csv\") is False:\n print(\"Retrieving Data from S3\")\n\n# read data from S3\ns3 = boto3.resource('s3')\ns3.Bucket('whatyouknowaboutmybucket').download_file('data.csv', wd + '/data/data.csv')\n\nif path.exists(\"data/data.csv\") is False:\n print(\"Retrieving Data from S3\")\n time.sleep(5)\n\ndata = pd.read_csv(\"data/data.csv\").dropna().to_numpy()\nfeatures = data[:, 4:]\nlabels = data[:, 2]\n\nl = LabelEncoder()\nlabels = l.fit_transform(labels)\nprint(l.classes_)\n\ns = KMeans(n_clusters=5)\n# s.decision_function(features[:1000])\ns.fit_transform(features[:1500])\nprint(s.score(features[1500:]))\n\nd = np.zeros((20,20))\n\n# create groundTruth\nfor i in range(len(data)):\n if data[i][0] - 1 >= len(d) or data[i][1] >= len(d[0]):\n continue\n d[data[i][0]-1][data[i][1]-1] = s.predict(features[i].reshape(1,-1))\n\nprint(d)\n\n\nnp.savetxt('data_acquisition/project.txt', d)\n\n\nprint(labels)\n\n\n\n\n\n\n# exit()\n'''\ncampaign = createCampaign()\nrunCampaign(campaign)\nacc = [np.array(campaign.accuracy_full), np.array(campaign.accuracy_forwardModeling),\n np.array(campaign.accuracy_onlyPredictions)]\n\nplot_simulation_accuracy(acc, \"Model Accuracies for a Single Simulation\", mul_accuracy=True)\n'''\n\n# Part 2 of Assignment - 2 independent variables (0-20) and 1 dependent variable (0-10) for 20 simulations\n\nacc = []\nfor i in range(1):\n campaign = createCampaign()\n campaign.randoseed = 2\n # campaign.ESS.iVars = [('int', 0, 9), ('int', 0, 9)]\n # campaign.ESS.dVars = [('int', 0, 2)]\n campaign.groundtruthData = 'data_acquisition/project.txt'\n campaign.simsFlag = True\n runCampaign(campaign)\n acc = [campaign.accuracy_full, campaign.accuracy_forwardModeling, campaign.accuracy_onlyPredictions]\n# acc = average_arrays(acc)\nplot_simulation_accuracy(acc, \"Three Accuracies for the Experimental Space\", mul_accuracy=True)\n\n\n# Part 3 of Assignment -\n# acc1, acc2, acc3, acc4 = [], [], [], []\n# for i in range(5):\n# campaign = createCampaign()\n# campaign.ESS.high_homogeneity = True\n# campaign.ESS.h_num = 2\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 2)]\n# campaign.ESS.dimarr = [20,20]\n# runCampaign(campaign)\n# acc = campaign.accuracy_onlyPredictions\n# acc1.append(acc)\n#\n# for i in range(5):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.h_num = 2\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 2)]\n# runCampaign(campaign)\n# acc = campaign.accuracy_onlyPredictions\n# acc2.append(acc)\n#\n# for i in range(5):\n# campaign = createCampaign()\n# campaign.ESS.high_homogeneity = True\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20,20]\n# runCampaign(campaign)\n# acc = campaign.accuracy_onlyPredictions\n# acc3.append(acc)\n#\n# for i in range(5):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20,20]\n# runCampaign(campaign)\n# acc = campaign.accuracy_onlyPredictions\n# acc4.append(acc)\n#\n# acc1, acc2, acc3, acc4 = average_arrays(acc1), average_arrays(acc2), average_arrays(acc3), average_arrays(acc4)\n#\n# plt.plot([i+1 for i in range(len(acc1))], acc1, label=\"H-2\", color=\"blue\")\n# plt.plot([i+1 for i in range(len(acc2))], acc2, label=\"L-2\", color=\"green\")\n# plt.plot([i+1 for i in range(len(acc3))], acc3, label=\"H-10\", color=\"red\")\n# plt.plot([i+1 for i in range(len(acc4))], acc4, label=\"L-10\", color=\"black\")\n# plt.ylabel(\"Accuracy (%)\")\n# plt.xlabel(\"Iterations\")\n# plt.title(\"Different Homogeneity within Experimental Spaces\")\n# plt.legend()\n# plt.show()\n\n\n# Part 4 of Assignment -\n\n# acc1, acc2, acc3, acc4 = [], [], [], []\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0\n# campaign.randoseed= 45\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc = campaign.accuracy_onlyPredictions\n# acc1.append(acc)\n#\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.randoseed = 1\n# campaign.ESS.error = 0.1\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc = campaign.accuracy_onlyPredictions\n# acc2.append(acc)\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0.5\n# campaign.randoseed = 2\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc = campaign.accuracy_onlyPredictions\n# acc3.append(acc)\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 1.0\n# campaign.randoseed=3\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc = campaign.accuracy_onlyPredictions\n# acc4.append(acc)\n#\n# acc1, acc2, acc3, acc4 = average_arrays(acc1), average_arrays(acc2), average_arrays(acc3), average_arrays(acc4)\n#\n# plt.plot([i+1 for i in range(len(acc1))], acc1, label=\"0.0\", color=\"blue\")\n# plt.plot([i+1 for i in range(len(acc2))], acc2, label=\"0.1\", color=\"green\")\n# plt.plot([i+1 for i in range(len(acc3))], acc3, label=\"0.5\", color=\"red\")\n# plt.plot([i+1 for i in range(len(acc4))], acc4, label=\"1.0\", color=\"black\")\n# plt.ylabel(\"Accuracy (%)\")\n# plt.xlabel(\"Iterations\")\n# plt.title(\"Different Error Rates within Experimental Spaces\")\n# plt.legend()\n# plt.show()\n\n\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0\n# campaign.randoseed = 53\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc1 = campaign.accuracy_onlyPredictions\n#\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0\n# campaign.randoseed = 39\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc2 = campaign.accuracy_onlyPredictions\n#\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0.1\n# campaign.randoseed = 32\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc3 = campaign.accuracy_onlyPredictions\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0.1\n# campaign.randoseed = 17\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc4 = campaign.accuracy_onlyPredictions\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0.5\n# campaign.randoseed = 3\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc5 = campaign.accuracy_onlyPredictions\n#\n# for i in range(1):\n# campaign = createCampaign()\n# campaign.ESS.low_homogeneity = True\n# campaign.ESS.error = True\n# campaign.ESS.error = 0.5\n# campaign.randoseed = 15\n# campaign.ESS.h_num = 10\n# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]\n# campaign.ESS.dVars = [('int', 0, 9)]\n# campaign.ESS.dimarr = [20, 20]\n# runCampaign(campaign)\n# print(campaign.groundTruth)\n# acc6 = campaign.accuracy_onlyPredictions\n#\n#\n# plt.plot([i+1 for i in range(len(acc1))], acc1, label=\"0.0 - B\", color=\"blue\")\n# plt.plot([i+1 for i in range(len(acc2))], acc2, label=\"0.0 - N\", color=\"green\")\n# plt.plot([i+1 for i in range(len(acc3))], acc3, label=\"0.1 - B\", color=\"red\")\n# plt.plot([i+1 for i in range(len(acc4))], acc4, label=\"0.1 - N\", color=\"black\")\n# plt.plot([i+1 for i in range(len(acc5))], acc5, label=\"0.5 - B\", color=\"yellow\")\n# plt.plot([i+1 for i in range(len(acc6))], acc6, label=\"0.5 - N\", color=\"cyan\")\n# plt.ylabel(\"Accuracy (%)\")\n# plt.xlabel(\"Iterations\")\n# plt.title(\"Different Categorical Models within Experimental Spaces\")\n# plt.legend()\n# plt.show()\n" ]
[ [ "numpy.zeros", "pandas.read_csv", "numpy.savetxt", "matplotlib.pyplot.subplots", "sklearn.cluster.KMeans", "matplotlib.pyplot.show", "sklearn.preprocessing.LabelEncoder" ] ]
polewczakp/pyAudioAnalysis
[ "7dc2d8e18da1ca2f2485a402bb7399b43bbb2b24" ]
[ "pyAudioAnalysis/audioSegmentation.py" ]
[ "from __future__ import print_function\nimport os\nimport csv\nimport glob\nimport scipy\nimport sklearn\nimport numpy as np\nimport hmmlearn.hmm\nimport sklearn.cluster\nimport pickle as cpickle\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import distance\nimport sklearn.discriminant_analysis\nfrom pyAudioAnalysis import audioBasicIO\nfrom pyAudioAnalysis import audioTrainTest as at\nfrom pyAudioAnalysis import MidTermFeatures as mtf\nfrom pyAudioAnalysis import ShortTermFeatures as stf\n\n\"\"\" General utility functions \"\"\"\n\n\ndef smooth_moving_avg(signal, window=11):\n window = int(window)\n if signal.ndim != 1:\n raise ValueError(\"\")\n if signal.size < window:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n if window < 3:\n return signal\n s = np.r_[2 * signal[0] - signal[window - 1::-1],\n signal, 2 * signal[-1] - signal[-1:-window:-1]]\n w = np.ones(window, 'd')\n y = np.convolve(w/w.sum(), s, mode='same')\n return y[window:-window + 1]\n\n\ndef self_similarity_matrix(feature_vectors):\n \"\"\"\n This function computes the self-similarity matrix for a sequence\n of feature vectors.\n ARGUMENTS:\n - feature_vectors: a np matrix (nDims x nVectors) whose i-th column\n corresponds to the i-th feature vector\n\n RETURNS:\n - sim_matrix: the self-similarity matrix (nVectors x nVectors)\n \"\"\"\n norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])\n norm_feature_vectors = norm_feature_vectors[0].T\n sim_matrix = 1.0 - distance.squareform(\n distance.pdist(norm_feature_vectors.T, 'cosine'))\n return sim_matrix\n\n\ndef labels_to_segments(labels, window):\n \"\"\"\n ARGUMENTS:\n - labels: a sequence of class labels (per time window)\n - window: window duration (in seconds)\n\n RETURNS:\n - segments: a sequence of segment's limits: segs[i, 0] is start and\n segs[i, 1] are start and end point of segment i\n - classes: a sequence of class flags: class[i] is the class ID of\n the i-th segment\n \"\"\"\n\n if len(labels)==1:\n segs = [0, window]\n classes = labels\n return segs, classes\n\n\n num_segs = 0\n index = 0\n classes = []\n segment_list = []\n cur_label = labels[index]\n while index < len(labels) - 1:\n previous_value = cur_label\n while True:\n index += 1\n compare_flag = labels[index]\n if (compare_flag != cur_label) | (index == len(labels) - 1):\n num_segs += 1\n cur_label = labels[index]\n segment_list.append((index * window))\n classes.append(previous_value)\n break\n segments = np.zeros((len(segment_list), 2))\n\n for i in range(len(segment_list)):\n if i > 0:\n segments[i, 0] = segment_list[i-1]\n segments[i, 1] = segment_list[i]\n return segments, classes\n\n\ndef segments_to_labels(start_times, end_times, labels, window):\n \"\"\"\n This function converts segment endpoints and respective segment\n labels to fix-sized class labels.\n ARGUMENTS:\n - start_times: segment start points (in seconds)\n - end_times: segment endpoints (in seconds)\n - labels: segment labels\n - window: fix-sized window (in seconds)\n RETURNS:\n - flags: np array of class indices\n - class_names: list of classnames (strings)\n \"\"\"\n flags = []\n class_names = list(set(labels))\n index = window / 2.0\n while index < end_times[-1]:\n for i in range(len(start_times)):\n if start_times[i] < index <= end_times[i]:\n break\n flags.append(class_names.index(labels[i]))\n index += window\n return np.array(flags), class_names\n\n\ndef compute_metrics(confusion_matrix, class_names):\n \"\"\"\n This function computes the precision, recall and f1 measures,\n given a confusion matrix\n \"\"\"\n f1 = []\n recall = []\n precision = []\n n_classes = confusion_matrix.shape[0]\n if len(class_names) != n_classes:\n print(\"Error in computePreRec! Confusion matrix and class_names \"\n \"list must be of the same size!\")\n else:\n for i, c in enumerate(class_names):\n precision.append(confusion_matrix[i, i] /\n np.sum(confusion_matrix[:, i]))\n recall.append(confusion_matrix[i, i] /\n np.sum(confusion_matrix[i, :]))\n f1.append(2 * precision[-1] * recall[-1] /\n (precision[-1] + recall[-1]))\n return recall, precision, f1\n\n\ndef read_segmentation_gt(gt_file):\n \"\"\"\n This function reads a segmentation ground truth file,\n following a simple CSV format with the following columns:\n <segment start>,<segment end>,<class label>\n\n ARGUMENTS:\n - gt_file: the path of the CSV segment file\n RETURNS:\n - seg_start: a np array of segments' start positions\n - seg_end: a np array of segments' ending positions\n - seg_label: a list of respective class labels (strings)\n \"\"\"\n with open(gt_file, 'rt') as f_handle:\n reader = csv.reader(f_handle, delimiter='\\t')\n start_times = []\n end_times = []\n labels = []\n for row in reader:\n if len(row) == 3:\n start_times.append(float(row[0]))\n end_times.append(float(row[1]))\n labels.append((row[2]))\n return np.array(start_times), np.array(end_times), labels\n\n\ndef plot_segmentation_results(flags_ind, flags_ind_gt, class_names, mt_step,\n evaluate_only=False):\n \"\"\"\n This function plots statistics on the classification-segmentation results \n produced either by the fix-sized supervised method or the HMM method.\n It also computes the overall accuracy achieved by the respective method \n if ground-truth is available.\n \"\"\"\n \n flags = [class_names[int(f)] for f in flags_ind]\n segments, classes = labels_to_segments(flags, mt_step)\n min_len = min(flags_ind.shape[0], flags_ind_gt.shape[0]) \n if min_len > 0:\n accuracy = np.sum(flags_ind[0:min_len] ==\n flags_ind_gt[0:min_len]) / float(min_len)\n else:\n accuracy = -1\n\n if not evaluate_only:\n duration = segments[-1, 1]\n s_percentages = np.zeros((len(class_names), ))\n percentages = np.zeros((len(class_names), ))\n av_durations = np.zeros((len(class_names), ))\n\n for i_seg in range(segments.shape[0]):\n s_percentages[class_names.index(classes[i_seg])] += \\\n (segments[i_seg, 1]-segments[i_seg, 0])\n\n for i in range(s_percentages.shape[0]):\n percentages[i] = 100.0 * s_percentages[i] / duration\n class_sum = sum(1 for c in classes if c == class_names[i])\n if class_sum > 0:\n av_durations[i] = s_percentages[i] / class_sum\n else:\n av_durations[i] = 0.0\n\n for i in range(percentages.shape[0]):\n print(class_names[i], percentages[i], av_durations[i])\n\n font = {'size': 10}\n plt.rc('font', **font)\n\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n ax1.set_yticks(np.array(range(len(class_names))))\n ax1.axis((0, duration, -1, len(class_names)))\n ax1.set_yticklabels(class_names)\n ax1.plot(np.array(range(len(flags_ind))) * mt_step +\n mt_step / 2.0, flags_ind)\n if flags_ind_gt.shape[0] > 0:\n ax1.plot(np.array(range(len(flags_ind_gt))) * mt_step +\n mt_step / 2.0, flags_ind_gt + 0.05, '--r')\n plt.xlabel(\"time (seconds)\")\n if accuracy >= 0:\n plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))\n\n ax2 = fig.add_subplot(223)\n plt.title(\"Classes percentage durations\")\n ax2.axis((0, len(class_names) + 1, 0, 100))\n ax2.set_xticks(np.array(range(len(class_names) + 1)))\n ax2.set_xticklabels([\" \"] + class_names)\n print(np.array(range(len(class_names))), percentages)\n ax2.bar(np.array(range(len(class_names))) + 0.5, percentages)\n\n ax3 = fig.add_subplot(224)\n plt.title(\"Segment average duration per class\")\n ax3.axis((0, len(class_names)+1, 0, av_durations.max()))\n ax3.set_xticks(np.array(range(len(class_names) + 1)))\n ax3.set_xticklabels([\" \"] + class_names)\n ax3.bar(np.array(range(len(class_names))) + 0.5, av_durations)\n fig.tight_layout()\n plt.show()\n return accuracy\n\n\ndef evaluate_speaker_diarization(labels, labels_gt):\n\n min_len = min(labels.shape[0], labels_gt.shape[0])\n labels = labels[0:min_len]\n labels_gt = labels_gt[0:min_len]\n\n unique_flags = np.unique(labels)\n unique_flags_gt = np.unique(labels_gt)\n\n # compute contigency table:\n contigency_matrix = np.zeros((unique_flags.shape[0],\n unique_flags_gt.shape[0]))\n for i in range(min_len):\n contigency_matrix[int(np.nonzero(unique_flags == labels[i])[0]),\n int(np.nonzero(unique_flags_gt == labels_gt[i])[0])] += 1.0\n\n columns, rows = contigency_matrix.shape\n row_sum = np.sum(contigency_matrix, axis=0)\n column_sum = np.sum(contigency_matrix, axis=1)\n matrix_sum = np.sum(contigency_matrix)\n\n purity_clust = np.zeros((columns, ))\n purity_speak = np.zeros((rows, ))\n # compute cluster purity:\n for i in range(columns):\n purity_clust[i] = np.max((contigency_matrix[i, :])) / (column_sum[i])\n\n for j in range(rows):\n purity_speak[j] = np.max((contigency_matrix[:, j])) / (row_sum[j])\n\n purity_cluster_m = np.sum(purity_clust * column_sum) / matrix_sum\n purity_speaker_m = np.sum(purity_speak * row_sum) / matrix_sum\n\n return purity_cluster_m, purity_speaker_m\n\n\ndef train_hmm_compute_statistics(features, labels):\n \"\"\"\n This function computes the statistics used to train\n an HMM joint segmentation-classification model\n using a sequence of sequential features and respective labels\n\n ARGUMENTS:\n - features: a np matrix of feature vectors (numOfDimensions x n_wins)\n - labels: a np array of class indices (n_wins x 1)\n RETURNS:\n - class_priors: matrix of prior class probabilities\n (n_classes x 1)\n - transmutation_matrix: transition matrix (n_classes x n_classes)\n - means: means matrix (numOfDimensions x 1)\n - cov: deviation matrix (numOfDimensions x 1)\n \"\"\"\n unique_labels = np.unique(labels)\n n_comps = len(unique_labels)\n\n n_feats = features.shape[0]\n\n if features.shape[1] < labels.shape[0]:\n print(\"trainHMM warning: number of short-term feature vectors \"\n \"must be greater or equal to the labels length!\")\n labels = labels[0:features.shape[1]]\n\n # compute prior probabilities:\n class_priors = np.zeros((n_comps,))\n for i, u_label in enumerate(unique_labels):\n class_priors[i] = np.count_nonzero(labels == u_label)\n # normalize prior probabilities\n class_priors = class_priors / class_priors.sum()\n\n # compute transition matrix:\n transmutation_matrix = np.zeros((n_comps, n_comps))\n for i in range(labels.shape[0]-1):\n transmutation_matrix[int(labels[i]), int(labels[i + 1])] += 1\n # normalize rows of transition matrix:\n for i in range(n_comps):\n transmutation_matrix[i, :] /= transmutation_matrix[i, :].sum()\n\n means = np.zeros((n_comps, n_feats))\n for i in range(n_comps):\n means[i, :] = \\\n np.array(features[:,\n np.nonzero(labels == unique_labels[i])[0]].mean(axis=1))\n\n cov = np.zeros((n_comps, n_feats))\n for i in range(n_comps):\n \"\"\"\n cov[i, :, :] = np.cov(features[:, np.nonzero(labels == u_labels[i])[0]])\n \"\"\"\n # use line above if HMM using full gaussian distributions are to be used\n cov[i, :] = np.std(features[:,\n np.nonzero(labels == unique_labels[i])[0]],\n axis=1)\n\n return class_priors, transmutation_matrix, means, cov\n\n\ndef train_hmm_from_file(wav_file, gt_file, hmm_model_name, mid_window, mid_step):\n \"\"\"\n This function trains a HMM model for segmentation-classification\n using a single annotated audio file\n ARGUMENTS:\n - wav_file: the path of the audio filename\n - gt_file: the path of the ground truth filename\n (a csv file of the form <segment start in seconds>,\n <segment end in seconds>,<segment label> in each row\n - hmm_model_name: the name of the HMM model to be stored\n - mt_win: mid-term window size\n - mt_step: mid-term window step\n RETURNS:\n - hmm: an object to the resulting HMM\n - class_names: a list of class_names\n\n After training, hmm, class_names, along with the mt_win and mt_step\n values are stored in the hmm_model_name file\n \"\"\"\n\n seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)\n flags, class_names = segments_to_labels(seg_start, seg_end, seg_labs, mid_step)\n sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)\n features, _, _ = \\\n mtf.mid_feature_extraction(signal, sampling_rate,\n mid_window * sampling_rate,\n mid_step * sampling_rate,\n round(sampling_rate * 0.050),\n round(sampling_rate * 0.050))\n class_priors, transumation_matrix, means, cov = \\\n train_hmm_compute_statistics(features, flags)\n hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], \"diag\")\n\n hmm.covars_ = cov\n hmm.means_ = means\n hmm.startprob_ = class_priors\n hmm.transmat_ = transumation_matrix\n\n save_hmm(hmm_model_name, hmm, class_names, mid_window, mid_step)\n\n return hmm, class_names\n\n\ndef train_hmm_from_directory(folder_path, hmm_model_name, mid_window, mid_step):\n \"\"\"\n This function trains a HMM model for segmentation-classification using\n a where WAV files and .segment (ground-truth files) are stored\n ARGUMENTS:\n - folder_path: the path of the data diretory\n - hmm_model_name: the name of the HMM model to be stored\n - mt_win: mid-term window size\n - mt_step: mid-term window step\n RETURNS:\n - hmm: an object to the resulting HMM\n - class_names: a list of class_names\n\n After training, hmm, class_names, along with the mt_win\n and mt_step values are stored in the hmm_model_name file\n \"\"\"\n\n flags_all = np.array([])\n class_names_all = []\n for i, f in enumerate(glob.glob(folder_path + os.sep + '*.wav')):\n # for each WAV file\n wav_file = f\n gt_file = f.replace('.wav', '.segments')\n if os.path.isfile(gt_file):\n seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)\n flags, class_names = \\\n segments_to_labels(seg_start, seg_end, seg_labs, mid_step)\n for c in class_names:\n # update class names:\n if c not in class_names_all:\n class_names_all.append(c)\n sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)\n feature_vector, _, _ = \\\n mtf.mid_feature_extraction(signal, sampling_rate,\n mid_window * sampling_rate,\n mid_step * sampling_rate,\n round(sampling_rate * 0.050),\n round(sampling_rate * 0.050))\n\n flag_len = len(flags)\n feat_cols = feature_vector.shape[1]\n min_sm = min(feat_cols, flag_len)\n feature_vector = feature_vector[:, 0:min_sm]\n flags = flags[0:min_sm]\n\n flags_new = []\n # append features and labels\n for j, fl in enumerate(flags):\n flags_new.append(class_names_all.index(class_names_all[flags[j]]))\n\n flags_all = np.append(flags_all, np.array(flags_new))\n\n if i == 0:\n f_all = feature_vector\n else:\n f_all = np.concatenate((f_all, feature_vector), axis=1)\n\n # compute HMM statistics\n class_priors, transmutation_matrix, means, cov = \\\n train_hmm_compute_statistics(f_all, flags_all)\n # train the HMM\n hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], \"diag\")\n hmm.covars_ = cov\n hmm.means_ = means\n hmm.startprob_ = class_priors\n hmm.transmat_ = transmutation_matrix\n\n save_hmm(hmm_model_name, hmm, class_names_all, mid_window, mid_step)\n\n return hmm, class_names_all\n\n\ndef save_hmm(hmm_model_name, model, classes, mid_window, mid_step):\n \"\"\"Save HMM model\"\"\"\n with open(hmm_model_name, \"wb\") as f_handle:\n cpickle.dump(model, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)\n cpickle.dump(classes, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)\n cpickle.dump(mid_window, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)\n cpickle.dump(mid_step, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)\n\n\ndef hmm_segmentation(audio_file, hmm_model_name, plot_results=False,\n gt_file=\"\"):\n sampling_rate, signal = audioBasicIO.read_audio_file(audio_file)\n\n with open(hmm_model_name, \"rb\") as f_handle:\n hmm = cpickle.load(f_handle)\n class_names = cpickle.load(f_handle)\n mid_window = cpickle.load(f_handle)\n mid_step = cpickle.load(f_handle)\n\n features, _, _ = \\\n mtf.mid_feature_extraction(signal, sampling_rate,\n mid_window * sampling_rate,\n mid_step * sampling_rate,\n round(sampling_rate * 0.050),\n round(sampling_rate * 0.050))\n\n # apply model\n labels = hmm.predict(features.T)\n labels_gt, class_names_gt, accuracy, cm = \\\n load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)\n return labels, class_names, accuracy, cm\n\n\ndef load_ground_truth_segments(gt_file, mt_step):\n seg_start, seg_end, seg_labels = read_segmentation_gt(gt_file)\n labels, class_names = segments_to_labels(seg_start, seg_end, seg_labels,\n mt_step)\n labels_temp = []\n for index, label in enumerate(labels):\n # \"align\" labels with GT\n if class_names[labels[index]] in class_names:\n labels_temp.append(class_names.index(class_names[\n labels[index]]))\n else:\n labels_temp.append(-1)\n labels = np.array(labels_temp)\n return labels, class_names\n\n\ndef calculate_confusion_matrix(predictions, ground_truth, classes):\n cm = np.zeros((len(classes), len(classes)))\n for index in range(min(predictions.shape[0], ground_truth.shape[0])):\n cm[int(ground_truth[index]), int(predictions[index])] += 1\n return cm\n\n\ndef mid_term_file_classification(input_file, model_name, model_type,\n plot_results=False, gt_file=\"\"):\n \"\"\"\n This function performs mid-term classification of an audio stream.\n Towards this end, supervised knowledge is used,\n i.e. a pre-trained classifier.\n ARGUMENTS:\n - input_file: path of the input WAV file\n - model_name: name of the classification model\n - model_type: svm or knn depending on the classifier type\n - plot_results: True if results are to be plotted using\n matplotlib along with a set of statistics\n\n RETURNS:\n - segs: a sequence of segment's endpoints: segs[i] is the\n endpoint of the i-th segment (in seconds)\n - classes: a sequence of class flags: class[i] is the\n class ID of the i-th segment\n \"\"\"\n labels = []\n accuracy = 0.0\n class_names = []\n cm = np.array([])\n if not os.path.isfile(model_name):\n print(\"mtFileClassificationError: input model_type not found!\")\n return labels, class_names, accuracy, cm\n\n # Load classifier:\n if model_type == \"knn\":\n classifier, mean, std, class_names, mt_win, mid_step, st_win, \\\n st_step, compute_beat = at.load_model_knn(model_name)\n else:\n classifier, mean, std, class_names, mt_win, mid_step, st_win, \\\n st_step, compute_beat = at.load_model(model_name)\n if compute_beat:\n print(\"Model \" + model_name + \" contains long-term music features \"\n \"(beat etc) and cannot be used in \"\n \"segmentation\")\n return labels, class_names, accuracy, cm\n # load input file\n sampling_rate, signal = audioBasicIO.read_audio_file(input_file)\n\n # could not read file\n if sampling_rate == 0:\n return labels, class_names, accuracy, cm\n\n # convert stereo (if) to mono\n signal = audioBasicIO.stereo_to_mono(signal)\n\n # mid-term feature extraction:\n mt_feats, _, _ = \\\n mtf.mid_feature_extraction(signal, sampling_rate,\n mt_win * sampling_rate,\n mid_step * sampling_rate,\n round(sampling_rate * st_win),\n round(sampling_rate * st_step))\n posterior_matrix = []\n\n # for each feature vector (i.e. for each fix-sized segment):\n for col_index in range(mt_feats.shape[1]):\n # normalize current feature v\n feature_vector = (mt_feats[:, col_index] - mean) / std\n\n # classify vector:\n label_predicted, posterior = \\\n at.classifier_wrapper(classifier, model_type, feature_vector)\n labels.append(label_predicted)\n\n # update probability matrix\n posterior_matrix.append(np.max(posterior))\n labels = np.array(labels)\n\n # convert fix-sized flags to segments and classes\n segs, classes = labels_to_segments(labels, mid_step)\n segs[-1] = len(signal) / float(sampling_rate)\n # Load grount-truth:\n labels_gt, class_names_gt, accuracy, cm = \\\n load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)\n\n return labels, class_names, accuracy, cm\n\n\ndef load_ground_truth(gt_file, labels, class_names, mid_step, plot_results):\n accuracy = 0\n cm = np.array([])\n labels_gt = np.array([])\n if os.path.isfile(gt_file):\n # load ground truth and class names\n labels_gt, class_names_gt = load_ground_truth_segments(gt_file,\n mid_step)\n # map predicted labels to ground truth class names\n # Note: if a predicted label does not belong to the ground truth\n # classes --> -1\n labels_new = []\n for il, l in enumerate(labels):\n if class_names[int(l)] in class_names_gt:\n labels_new.append(class_names_gt.index(class_names[int(l)]))\n else:\n labels_new.append(-1)\n labels_new = np.array(labels_new)\n cm = calculate_confusion_matrix(labels_new, labels_gt, class_names_gt)\n\n accuracy = plot_segmentation_results(labels_new, labels_gt,\n class_names, mid_step, not plot_results)\n if accuracy >= 0:\n print(\"Overall Accuracy: {0:.2f}\".format(accuracy))\n\n return labels_gt, class_names, accuracy, cm\n\n\ndef evaluate_segmentation_classification_dir(dir_name, model_name, method_name):\n\n accuracies = []\n class_names = []\n cm_total = np.array([])\n for index, wav_file in enumerate(glob.glob(dir_name + os.sep + '*.wav')):\n print(wav_file)\n\n gt_file = wav_file.replace('.wav', '.segments')\n\n if method_name.lower() in [\"svm\", \"svm_rbf\", \"knn\", \"randomforest\",\n \"gradientboosting\", \"extratrees\"]:\n flags_ind, class_names, accuracy, cm_temp = \\\n mid_term_file_classification(wav_file, model_name, method_name,\n False, gt_file)\n else:\n flags_ind, class_names, accuracy, cm_temp = \\\n hmm_segmentation(wav_file, model_name, False, gt_file)\n if accuracy > 0:\n if not index:\n cm_total = np.copy(cm_temp)\n else:\n cm_total = cm_total + cm_temp\n accuracies.append(accuracy)\n print(cm_temp, class_names)\n print(cm_total)\n\n if len(cm_total.shape) > 1:\n cm_total = cm_total / np.sum(cm_total)\n rec, pre, f1 = compute_metrics(cm_total, class_names)\n\n print(\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \")\n print(\"Average Accuracy: {0:.1f}\".\n format(100.0*np.array(accuracies).mean()))\n print(\"Average recall: {0:.1f}\".format(100.0*np.array(rec).mean()))\n print(\"Average precision: {0:.1f}\".format(100.0*np.array(pre).mean()))\n print(\"Average f1: {0:.1f}\".format(100.0*np.array(f1).mean()))\n print(\"Median Accuracy: {0:.1f}\".\n format(100.0*np.median(np.array(accuracies))))\n print(\"Min Accuracy: {0:.1f}\".format(100.0*np.array(accuracies).min()))\n print(\"Max Accuracy: {0:.1f}\".format(100.0*np.array(accuracies).max()))\n else:\n print(\"Confusion matrix was empty, accuracy for every file was 0\")\n\n\ndef silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,\n weight=0.5, plot=False):\n \"\"\"\n Event Detection (silence removal)\n ARGUMENTS:\n - signal: the input audio signal\n - sampling_rate: sampling freq\n - st_win, st_step: window size and step in seconds\n - smoothWindow: (optinal) smooth window (in seconds)\n - weight: (optinal) weight factor (0 < weight < 1)\n the higher, the more strict\n - plot: (optinal) True if results are to be plotted\n RETURNS:\n - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9],\n [1.4, 3.0]] means that\n the resulting segments are (0.1 - 0.9) seconds\n and (1.4, 3.0) seconds\n \"\"\"\n\n if weight >= 1:\n weight = 0.99\n if weight <= 0:\n weight = 0.01\n\n # Step 1: feature extraction\n signal = audioBasicIO.stereo_to_mono(signal)\n st_feats, _ = stf.feature_extraction(signal, sampling_rate,\n st_win * sampling_rate,\n st_step * sampling_rate)\n\n # Step 2: train binary svm classifier of low vs high energy frames\n # keep only the energy short-term sequence (2nd feature)\n st_energy = st_feats[1, :]\n en = np.sort(st_energy)\n # number of 10% of the total short-term windows\n st_windows_fraction = int(len(en) / 10)\n\n # compute \"lower\" 10% energy threshold\n low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15\n\n # compute \"higher\" 10% energy threshold\n high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15\n\n # get all features that correspond to low energy\n low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]\n\n # get all features that correspond to high energy\n high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]\n\n # form the binary classification task and ...\n features = [low_energy.T, high_energy.T]\n # normalize and train the respective svm probabilistic model\n\n # (ONSET vs SILENCE)\n features_norm, mean, std = at.normalize_features(features)\n svm = at.train_svm(features_norm, 1.0)\n\n # Step 3: compute onset probability based on the trained svm\n prob_on_set = []\n for index in range(st_feats.shape[1]):\n # for each frame\n cur_fv = (st_feats[:, index] - mean) / std\n # get svm probability (that it belongs to the ONSET class)\n prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])\n prob_on_set = np.array(prob_on_set)\n\n # smooth probability:\n prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)\n\n # Step 4A: detect onset frame indices:\n prog_on_set_sort = np.sort(prob_on_set)\n\n # find probability Threshold as a weighted average\n # of top 10% and lower 10% of the values\n nt = int(prog_on_set_sort.shape[0] / 10)\n threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +\n weight * np.mean(prog_on_set_sort[-nt::]))\n\n max_indices = np.where(prob_on_set > threshold)[0]\n # get the indices of the frames that satisfy the thresholding\n index = 0\n seg_limits = []\n time_clusters = []\n\n # Step 4B: group frame indices to onset segments\n while index < len(max_indices):\n # for each of the detected onset indices\n cur_cluster = [max_indices[index]]\n if index == len(max_indices)-1:\n break\n while max_indices[index+1] - cur_cluster[-1] <= 2:\n cur_cluster.append(max_indices[index+1])\n index += 1\n if index == len(max_indices)-1:\n break\n index += 1\n time_clusters.append(cur_cluster)\n seg_limits.append([cur_cluster[0] * st_step,\n cur_cluster[-1] * st_step])\n\n # Step 5: Post process: remove very small segments:\n min_duration = 0.2\n seg_limits_2 = []\n for s_lim in seg_limits:\n if s_lim[1] - s_lim[0] > min_duration:\n seg_limits_2.append(s_lim)\n seg_limits = seg_limits_2\n\n if plot:\n time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /\n sampling_rate)\n\n plt.subplot(2, 1, 1)\n plt.plot(time_x, signal)\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.subplot(2, 1, 2)\n plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step), \n prob_on_set)\n plt.title('Signal')\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.title('svm Probability')\n plt.show()\n\n return seg_limits\n\n\ndef speaker_diarization(filename, n_speakers, mid_window=2.0, mid_step=0.2,\n short_window=0.05, lda_dim=35, plot_res=False):\n \"\"\"\n ARGUMENTS:\n - filename: the name of the WAV file to be analyzed\n - n_speakers the number of speakers (clusters) in\n the recording (<=0 for unknown)\n - mid_window (opt) mid-term window size\n - mid_step (opt) mid-term window step\n - short_window (opt) short-term window size\n - lda_dim (opt LDA dimension (0 for no LDA)\n - plot_res (opt) 0 for not plotting the results 1 for plotting\n \"\"\"\n sampling_rate, signal = audioBasicIO.read_audio_file(filename)\n signal = audioBasicIO.stereo_to_mono(signal)\n duration = len(signal) / sampling_rate\n\n base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"data/models\")\n\n classifier_all, mean_all, std_all, class_names_all, _, _, _, _, _ = \\\n at.load_model_knn(os.path.join(base_dir, \"knn_speaker_10\"))\n classifier_fm, mean_fm, std_fm, class_names_fm, _, _, _, _, _ = \\\n at.load_model_knn(os.path.join(base_dir, \"knn_speaker_male_female\"))\n\n mid_feats, st_feats, _ = \\\n mtf.mid_feature_extraction(signal, sampling_rate,\n mid_window * sampling_rate,\n mid_step * sampling_rate,\n round(sampling_rate * short_window),\n round(sampling_rate * short_window * 0.5))\n\n mid_term_features = np.zeros((mid_feats.shape[0] + len(class_names_all) +\n len(class_names_fm), mid_feats.shape[1]))\n\n for index in range(mid_feats.shape[1]):\n feature_norm_all = (mid_feats[:, index] - mean_all) / std_all\n feature_norm_fm = (mid_feats[:, index] - mean_fm) / std_fm\n _, p1 = at.classifier_wrapper(classifier_all, \"knn\", feature_norm_all)\n _, p2 = at.classifier_wrapper(classifier_fm, \"knn\", feature_norm_fm)\n start = mid_feats.shape[0]\n end = mid_feats.shape[0] + len(class_names_all)\n mid_term_features[0:mid_feats.shape[0], index] = mid_feats[:, index]\n mid_term_features[start:end, index] = p1 + 1e-4\n mid_term_features[end::, index] = p2 + 1e-4\n\n mid_feats = mid_term_features # TODO\n feature_selected = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41,\n 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]\n\n mid_feats = mid_feats[feature_selected, :]\n\n mid_feats_norm, mean, std = at.normalize_features([mid_feats.T])\n mid_feats_norm = mid_feats_norm[0].T\n n_wins = mid_feats.shape[1]\n\n # remove outliers:\n dist_all = np.sum(distance.squareform(distance.pdist(mid_feats_norm.T)),\n axis=0)\n m_dist_all = np.mean(dist_all)\n i_non_outliers = np.nonzero(dist_all < 1.2 * m_dist_all)[0]\n\n # TODO: Combine energy threshold for outlier removal:\n # EnergyMin = np.min(mt_feats[1,:])\n # EnergyMean = np.mean(mt_feats[1,:])\n # Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0\n # i_non_outliers = np.nonzero(mt_feats[1,:] > Thres)[0]\n # print i_non_outliers\n\n mt_feats_norm_or = mid_feats_norm\n mid_feats_norm = mid_feats_norm[:, i_non_outliers]\n\n # LDA dimensionality reduction:\n if lda_dim > 0:\n\n # extract mid-term features with minimum step:\n window_ratio = int(round(mid_window / short_window))\n step_ratio = int(round(short_window / short_window))\n mt_feats_to_red = []\n num_of_features = len(st_feats)\n num_of_stats = 2\n for index in range(num_of_stats * num_of_features):\n mt_feats_to_red.append([])\n\n # for each of the short-term features:\n for index in range(num_of_features):\n cur_pos = 0\n feat_len = len(st_feats[index])\n while cur_pos < feat_len:\n n1 = cur_pos\n n2 = cur_pos + window_ratio\n if n2 > feat_len:\n n2 = feat_len\n short_features = st_feats[index][n1:n2]\n mt_feats_to_red[index].append(np.mean(short_features))\n mt_feats_to_red[index + num_of_features].\\\n append(np.std(short_features))\n cur_pos += step_ratio\n mt_feats_to_red = np.array(mt_feats_to_red)\n mt_feats_to_red_2 = np.zeros((mt_feats_to_red.shape[0] +\n len(class_names_all) +\n len(class_names_fm),\n mt_feats_to_red.shape[1]))\n limit = mt_feats_to_red.shape[0] + len(class_names_all)\n for index in range(mt_feats_to_red.shape[1]):\n feature_norm_all = (mt_feats_to_red[:, index] - mean_all) / std_all\n feature_norm_fm = (mt_feats_to_red[:, index] - mean_fm) / std_fm\n _, p1 = at.classifier_wrapper(classifier_all, \"knn\",\n feature_norm_all)\n _, p2 = at.classifier_wrapper(classifier_fm, \"knn\", feature_norm_fm)\n mt_feats_to_red_2[0:mt_feats_to_red.shape[0], index] = \\\n mt_feats_to_red[:, index]\n mt_feats_to_red_2[mt_feats_to_red.shape[0]:limit, index] = p1 + 1e-4\n mt_feats_to_red_2[limit::, index] = p2 + 1e-4\n mt_feats_to_red = mt_feats_to_red_2\n mt_feats_to_red = mt_feats_to_red[feature_selected, :]\n mt_feats_to_red, mean, std = at.normalize_features([mt_feats_to_red.T])\n mt_feats_to_red = mt_feats_to_red[0].T\n labels = np.zeros((mt_feats_to_red.shape[1], ))\n lda_step = 1.0\n lda_step_ratio = lda_step / short_window\n for index in range(labels.shape[0]):\n labels[index] = int(index * short_window / lda_step_ratio)\n clf = sklearn.discriminant_analysis.\\\n LinearDiscriminantAnalysis(n_components=lda_dim)\n clf.fit(mt_feats_to_red.T, labels)\n mid_feats_norm = (clf.transform(mid_feats_norm.T)).T\n\n if n_speakers <= 0:\n s_range = range(2, 10)\n else:\n s_range = [n_speakers]\n cluster_labels = []\n sil_all = []\n cluster_centers = []\n \n for speakers in s_range:\n k_means = sklearn.cluster.KMeans(n_clusters=speakers)\n k_means.fit(mid_feats_norm.T)\n cls = k_means.labels_ \n means = k_means.cluster_centers_\n\n cluster_labels.append(cls)\n cluster_centers.append(means)\n sil_1, sil_2 = [], []\n for c in range(speakers):\n # for each speaker (i.e. for each extracted cluster)\n clust_per_cent = np.nonzero(cls == c)[0].shape[0] / float(len(cls))\n if clust_per_cent < 0.020:\n sil_1.append(0.0)\n sil_2.append(0.0)\n else:\n # get subset of feature vectors\n mt_feats_norm_temp = mid_feats_norm[:, cls == c]\n # compute average distance between samples\n # that belong to the cluster (a values)\n dist = distance.pdist(mt_feats_norm_temp.T)\n sil_1.append(np.mean(dist)*clust_per_cent)\n sil_temp = []\n for c2 in range(speakers):\n # compute distances from samples of other clusters\n if c2 != c:\n clust_per_cent_2 = np.nonzero(cls == c2)[0].shape[0] /\\\n float(len(cls))\n mid_features_temp = mid_feats_norm[:, cls == c2]\n dist = distance.cdist(mt_feats_norm_temp.T,\n mid_features_temp.T)\n sil_temp.append(np.mean(dist)*(clust_per_cent\n + clust_per_cent_2)/2.0)\n sil_temp = np.array(sil_temp)\n # ... and keep the minimum value (i.e.\n # the distance from the \"nearest\" cluster)\n sil_2.append(min(sil_temp))\n sil_1 = np.array(sil_1)\n sil_2 = np.array(sil_2)\n sil = []\n for c in range(speakers):\n # for each cluster (speaker) compute silhouette\n sil.append((sil_2[c] - sil_1[c]) / (max(sil_2[c], sil_1[c]) + 1e-5))\n # keep the AVERAGE SILLOUETTE\n sil_all.append(np.mean(sil))\n\n imax = int(np.argmax(sil_all))\n # optimal number of clusters\n num_speakers = s_range[imax]\n\n # generate the final set of cluster labels\n # (important: need to retrieve the outlier windows:\n # this is achieved by giving them the value of their\n # nearest non-outlier window)\n cls = np.zeros((n_wins,))\n for index in range(n_wins):\n j = np.argmin(np.abs(index-i_non_outliers))\n cls[index] = cluster_labels[imax][j]\n \n # Post-process method 1: hmm smoothing\n for index in range(1):\n # hmm training\n start_prob, transmat, means, cov = \\\n train_hmm_compute_statistics(mt_feats_norm_or, cls)\n hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], \"diag\")\n hmm.startprob_ = start_prob\n hmm.transmat_ = transmat \n hmm.means_ = means\n hmm.covars_ = cov\n cls = hmm.predict(mt_feats_norm_or.T) \n \n # Post-process method 2: median filtering:\n cls = scipy.signal.medfilt(cls, 13)\n cls = scipy.signal.medfilt(cls, 11)\n\n class_names = [\"speaker{0:d}\".format(c) for c in range(num_speakers)]\n\n # load ground-truth if available\n gt_file = filename.replace('.wav', '.segments')\n # if groundtruth exists\n if os.path.isfile(gt_file):\n seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)\n flags_gt, class_names_gt = segments_to_labels(seg_start, seg_end,\n seg_labs, mid_step)\n\n if plot_res:\n fig = plt.figure() \n if n_speakers > 0:\n ax1 = fig.add_subplot(111)\n else:\n ax1 = fig.add_subplot(211)\n ax1.set_yticks(np.array(range(len(class_names))))\n ax1.axis((0, duration, -1, len(class_names)))\n ax1.set_yticklabels(class_names)\n ax1.plot(np.array(range(len(cls))) * mid_step + mid_step / 2.0, cls)\n\n if os.path.isfile(gt_file):\n if plot_res:\n ax1.plot(np.array(range(len(flags_gt))) *\n mid_step + mid_step / 2.0, flags_gt, 'r')\n purity_cluster_m, purity_speaker_m = \\\n evaluate_speaker_diarization(cls, flags_gt)\n print(\"{0:.1f}\\t{1:.1f}\".format(100 * purity_cluster_m,\n 100 * purity_speaker_m))\n if plot_res:\n plt.title(\"Cluster purity: {0:.1f}% - \"\n \"Speaker purity: {1:.1f}%\".format(100 * purity_cluster_m,\n 100 * purity_speaker_m))\n if plot_res:\n plt.xlabel(\"time (seconds)\")\n if n_speakers <= 0:\n plt.subplot(212)\n plt.plot(s_range, sil_all)\n plt.xlabel(\"number of clusters\")\n plt.ylabel(\"average clustering's sillouette\")\n plt.show()\n return cls\n\n\ndef speaker_diarization_evaluation(folder_name, lda_dimensions):\n \"\"\"\n This function prints the cluster purity and speaker purity for\n each WAV file stored in a provided directory (.SEGMENT files\n are needed as ground-truth)\n ARGUMENTS:\n - folder_name: the full path of the folder where the WAV and\n segment (ground-truth) files are stored\n - lda_dimensions: a list of LDA dimensions (0 for no LDA)\n \"\"\"\n types = ('*.wav', )\n wav_files = []\n for files in types:\n wav_files.extend(glob.glob(os.path.join(folder_name, files)))\n \n wav_files = sorted(wav_files)\n\n # get number of unique speakers per file (from ground-truth) \n num_speakers = []\n for wav_file in wav_files:\n gt_file = wav_file.replace('.wav', '.segments')\n if os.path.isfile(gt_file):\n _, _, seg_labs = read_segmentation_gt(gt_file)\n num_speakers.append(len(list(set(seg_labs))))\n else:\n num_speakers.append(-1)\n \n for dim in lda_dimensions:\n print(\"LDA = {0:d}\".format(dim))\n for i, wav_file in enumerate(wav_files):\n speaker_diarization(wav_file, num_speakers[i], 2.0, 0.2, 0.05, dim,\n plot_res=False)\n\n\ndef music_thumbnailing(signal, sampling_rate, short_window=1.0, short_step=0.5,\n thumb_size=10.0, limit_1=0, limit_2=1):\n \"\"\"\n This function detects instances of the most representative part of a\n music recording, also called \"music thumbnails\".\n A technique similar to the one proposed in [1], however a wider set of\n audio features is used instead of chroma features.\n In particular the following steps are followed:\n - Extract short-term audio features. Typical short-term window size: 1\n second\n - Compute the self-similarity matrix, i.e. all pairwise similarities\n between feature vectors\n - Apply a diagonal mask is as a moving average filter on the values of the\n self-similarty matrix.\n The size of the mask is equal to the desirable thumbnail length.\n - Find the position of the maximum value of the new (filtered)\n self-similarity matrix. The audio segments that correspond to the\n diagonial around that position are the selected thumbnails\n \n\n ARGUMENTS:\n - signal: input signal\n - sampling_rate: sampling frequency\n - short_window: window size (in seconds)\n - short_step: window step (in seconds)\n - thumb_size: desider thumbnail size (in seconds)\n \n RETURNS:\n - A1: beginning of 1st thumbnail (in seconds)\n - A2: ending of 1st thumbnail (in seconds)\n - B1: beginning of 2nd thumbnail (in seconds)\n - B2: ending of 2nd thumbnail (in seconds)\n\n USAGE EXAMPLE:\n import audioFeatureExtraction as aF\n [fs, x] = basicIO.readAudioFile(input_file)\n [A1, A2, B1, B2] = musicThumbnailing(x, fs)\n\n [1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing\n of popular music using chroma-based representations.\n Multimedia, IEEE Transactions on, 7(1), 96-104.\n \"\"\"\n signal = audioBasicIO.stereo_to_mono(signal)\n # feature extraction:\n st_feats, _ = stf.feature_extraction(signal, sampling_rate,\n sampling_rate * short_window,\n sampling_rate * short_step)\n\n # self-similarity matrix\n sim_matrix = self_similarity_matrix(st_feats)\n\n # moving filter:\n m_filter = int(round(thumb_size / short_step))\n diagonal = np.eye(m_filter, m_filter)\n sim_matrix = scipy.signal.convolve2d(sim_matrix, diagonal, 'valid')\n\n # post-processing (remove main diagonal elements)\n min_sm = np.min(sim_matrix)\n for i in range(sim_matrix.shape[0]):\n for j in range(sim_matrix.shape[1]):\n if abs(i-j) < 5.0 / short_step or i > j:\n sim_matrix[i, j] = min_sm\n\n # find max position:\n sim_matrix[0:int(limit_1 * sim_matrix.shape[0]), :] = min_sm\n sim_matrix[:, 0:int(limit_1 * sim_matrix.shape[0])] = min_sm\n sim_matrix[int(limit_2 * sim_matrix.shape[0])::, :] = min_sm\n sim_matrix[:, int(limit_2 * sim_matrix.shape[0])::] = min_sm\n\n rows, cols = np.unravel_index(sim_matrix.argmax(), sim_matrix.shape)\n i1 = rows\n i2 = rows\n j1 = cols\n j2 = cols\n\n while i2-i1 < m_filter:\n if i1 <= 0 or j1 <= 0 or i2 >= sim_matrix.shape[0]-2 or \\\n j2 >= sim_matrix.shape[1]-2:\n break\n if sim_matrix[i1-1, j1-1] > sim_matrix[i2 + 1, j2 + 1]:\n i1 -= 1\n j1 -= 1 \n else: \n i2 += 1\n j2 += 1 \n\n return short_step * i1, short_step * i2, short_step * j1, short_step * j2, \\\n sim_matrix\n\n\n\n" ]
[ [ "numpy.ones", "numpy.sum", "scipy.spatial.distance.pdist", "scipy.spatial.distance.cdist", "numpy.copy", "sklearn.cluster.KMeans", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "numpy.abs", "matplotlib.pyplot.title", "numpy.where", "numpy.nonzero", "numpy.unique", "numpy.mean", "numpy.eye", "matplotlib.pyplot.axvline", "scipy.signal.medfilt", "numpy.zeros", "matplotlib.pyplot.rc", "numpy.argmax", "numpy.count_nonzero", "numpy.arange", "numpy.max", "numpy.min", "numpy.sort", "numpy.std", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "scipy.signal.convolve2d", "numpy.array", "numpy.concatenate", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "matplotlib.pyplot.xlabel" ] ]
BwCai/DCAA-UDA
[ "359c2122060aebfbe4384c918768c261fe2dc9c7" ]
[ "models/adaptation_model_stage1.py" ]
[ "from models.base_model import BaseModel\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport os, sys\nimport torch\nimport numpy as np\nimport itertools\n\nfrom torch.autograd import Variable\nfrom optimizers import get_optimizer\nfrom schedulers import get_scheduler\nfrom models.sync_batchnorm import SynchronizedBatchNorm2d, DataParallelWithCallback\nfrom models.deeplab_multimodal import DeepLab\nfrom models.decoder import Decoder\nfrom models.aspp import ASPP\nfrom models.discriminator import FCDiscriminator, FCDiscriminator_low, FCDiscriminator_out, FCDiscriminator_class\nfrom loss import get_loss_function\nfrom .utils import freeze_bn, GradReverse, normalisation_pooling\nfrom metrics import runningScore\nimport pdb\n\ndef multimodal_merger(multi_modal_data, is_upsample=False, up_size=None):\n \"\"\"\n [Func Handler] multimodal_merger:\n @Input Params:\n multi_modal_data: dict.\n examples: {\n \"feat_cls\": feat_cls,\n \"output\": output,\n }\n @Reture:\n merge_out: dict.\n examples: {\n \"feat_cls\": feat_cls,\n \"output_comb\": output_comb,\n \"output\": output,\n }\n \"\"\"\n feat_cls = multi_modal_data['feat_cls']\n # merge class features\n feat_cls_cat = torch.cat(feat_cls, 1) # concat \n # merge output pred\n output = multi_modal_data['output']\n output_comb = 0\n for _i in range(len(output)):\n if is_upsample:\n output[_i] = F.interpolate(output[_i], size=up_size, mode='bilinear', align_corners=True)\n output_comb += output[_i]\n\n merge_out = {\n 'feat_cls': feat_cls,\n 'feat_cls_cat': feat_cls_cat,\n 'output_comb': output_comb,\n 'output': output,\n }\n return merge_out\n\nclass CustomMetricsMultimodalMerger():\n \"\"\"\n [Func Handler] objective_vectors_multimodal_merger:\n @Input Params:\n multi_modal_data: dict.\n examples: {\n \"class_threshold_group\": [model.class_threshold_group[modal_idx][i], ...]\n \"objective_vectors_group\": [model.objective_vectors_group[modal_idx][i], ...],\n }\n cate_idx: int. 0 ~ 18\n modal_ids: list.\n examples: [0, 1] or [0,]\n @Reture:\n merge_out: dict.\n examples: {\n \"class_threshold\": class_threshold,\n \"objective_vectors\": objective_vectors,\n }\n \"\"\"\n\n def __init__(self, modal_num, category_num, model):\n self.modal_num = modal_num\n self.category_num = category_num\n self._model = model\n\n def initialize_model(model):\n self._model = model\n\n def merge_class_threshold(self, modal_ids=[]):\n assert self._model is not None, \"[ERROR] Deeplab Model not initialize before using!\"\n _class_threshold_group = self._model.class_threshold_group[modal_ids]\n return torch.mean(_class_threshold_group, dim=0) # modal_num x 19 --> 19\n\n def merge_clu_threshold(self, clu_threshold, modal_ids=[]):\n _clu_threshold_group = clu_threshold[modal_ids]\n return torch.mean(_clu_threshold_group, dim=0)\n\n def merge_objective_vectors(self, modal_ids=[]):\n assert self._model is not None, \"[ERROR] Deeplab Model not initialize before using!\"\n _modal_num, _cate_num, _feat_dim = self._model.objective_vectors_group.size()\n _objective_vectors = self._model.objective_vectors_group[modal_ids]\n # modal_num x 19 x 256 --> 19 x modal_num x 256 --> 19 x (modal_num x 256)\n assert _objective_vectors.dim() == 4, \"objective_vector dimension != 4\"\n _objective_vectors = _objective_vectors.permute(1, 0, 2).contiguous()\n\n return _objective_vectors.view(_cate_num, -1)\n\nclass CustomMetrics():\n def __init__(self, numbers=19, modal_num=3, model=None):\n self.class_numbers = numbers\n self.classes_recall_thr = np.zeros([19, 3])\n self.classes_recall_thr_num = np.zeros([19])\n self.classes_recall_clu = np.zeros([19, 3])\n self.classes_recall_clu_num = np.zeros([19])\n self.running_metrics_val_threshold = runningScore(self.class_numbers)\n self.running_metrics_val_clusters = runningScore(self.class_numbers)\n self.clu_threshold = torch.full((modal_num + 1, 19), 2.5).cuda()\n self.multimodal_merger = CustomMetricsMultimodalMerger(\n modal_num=modal_num + 1, category_num=numbers, model=model\n )\n \n def update(self, feat_cls, outputs, labels, modal_ids=[0,]): \n '''calculate accuracy. caring about recall but not IoU'''\n batch, width, height = labels.shape\n labels = labels.reshape([batch, 1, width, height]).float()\n labels = F.interpolate(labels, size=feat_cls.size()[2:], mode='nearest')\n outputs_threshold = outputs.clone()\n outputs_threshold = F.softmax(outputs_threshold, dim=1)\n #self.running_metrics_val_threshold.update(labels.cpu().numpy(), outputs_threshold.argmax(1).cpu().numpy())\n self.running_metrics_val_threshold.update(labels, outputs_threshold.argmax(1))\n\n _class_threshold_set = self.multimodal_merger.merge_class_threshold(modal_ids=modal_idx)\n for i in range(19):\n outputs_threshold[:, i, :, :] = torch.where(outputs_threshold[:, i, :, :] > _class_threshold_set[i], torch.Tensor([1]).cuda(), torch.Tensor([0]).cuda())\n\n _batch, _channel, _w, _h = outputs_threshold.shape\n _tmp = torch.full([_batch, 1, _w, _h], 0.2,).cuda()\n _tmp = torch.cat((outputs_threshold, _tmp), 1)\n threshold_arg = _tmp.argmax(1, keepdim=True)\n threshold_arg[threshold_arg == 19] = 250 #ignore index\n truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), threshold_arg.cpu().int().numpy())\n self.classes_recall_thr[:, 0] += truth\n self.classes_recall_thr[:, 2] += pred_all\n self.classes_recall_thr[:, 1] += truth_all\n\n outputs_cluster = outputs.clone()\n _objective_vectors_set = self.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)\n\n for i in range(19):\n outputs_cluster[:, i, :, :] = torch.norm( _objective_vectors_set[i].reshape(-1,1,1).expand(-1,128,256) - feat_cls, 2, dim=1,)\n\n outputs_cluster_min, outputs_cluster_arg = outputs_cluster.min(dim=1, keepdim=True)\n outputs_cluster_second = outputs_cluster.scatter_(1, outputs_cluster_arg, 100)\n if torch.unique(outputs_cluster_second.argmax(1) - outputs_cluster_arg.squeeze()).squeeze().item() != 0:\n raise NotImplementedError('wrong when computing L2 norm!!')\n outputs_cluster_secondmin, outputs_cluster_secondarg = outputs_cluster_second.min(dim=1, keepdim=True)\n #self.running_metrics_val_clusters.update(labels.cpu().numpy(), outputs_cluster_arg.cpu().numpy())\n self.running_metrics_val_clusters.update(labels, outputs_cluster_arg)\n \n tmp_arg = outputs_cluster_arg.clone()\n pdb.set_trace()\n _clu_thresholds = self.multimodal_merger.merge_clu_threshold(self.clu_threshold, modal_ids=modal_ids)\n\n outputs_cluster_arg[(outputs_cluster_secondmin - outputs_cluster_min) < _clu_thresholds] = 250\n truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), outputs_cluster_arg.cpu().int().numpy())\n self.classes_recall_clu[:, 0] += truth\n self.classes_recall_clu[:, 2] += pred_all\n self.classes_recall_clu[:, 1] += truth_all\n return threshold_arg, outputs_cluster_arg\n\n def calc_recall(self, gt, argmax):\n truth = np.zeros([self.class_numbers])\n pred_all = np.zeros([self.class_numbers])\n truth_all = np.zeros([self.class_numbers])\n for i in range(self.class_numbers):\n truth[i] = (gt == i)[argmax == i].sum()\n pred_all[i] = (argmax == i).sum()\n truth_all[i] = (gt == i).sum()\n pass\n return truth, pred_all, truth_all\n \n def calc_mean_Clu_recall(self, ):\n return np.mean(self.classes_recall_clu[:, 0] / self.classes_recall_clu[:, 1])\n \n def calc_mean_Thr_recall(self, ):\n return np.mean(self.classes_recall_thr[:, 0] / self.classes_recall_thr[:, 1])\n\n def reset(self, ):\n self.running_metrics_val_clusters.reset()\n self.running_metrics_val_threshold.reset()\n self.classes_recall_clu = np.zeros([19, 3])\n self.classes_recall_thr = np.zeros([19, 3])\n\nclass CustomModel():\n def __init__(self, cfg, writer, logger, use_pseudo_label=False, modal_num=3):\n self.cfg = cfg\n self.writer = writer\n self.class_numbers = 19\n self.logger = logger\n cfg_model = cfg['model']\n self.cfg_model = cfg_model\n self.best_iou = -100\n self.iter = 0\n self.nets = []\n self.split_gpu = 0\n self.default_gpu = cfg['model']['default_gpu']\n self.PredNet_Dir = None\n self.valid_classes = cfg['training']['valid_classes']\n self.G_train = True\n self.cls_feature_weight = cfg['training']['cls_feature_weight']\n self.use_pseudo_label = use_pseudo_label\n self.modal_num = modal_num\n\n # cluster vectors & cuda initialization\n self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()\n self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()\n self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()\n self.class_threshold_group = torch.full([self.modal_num + 1, 19], 0.95).cuda()\n\n #self.metrics = CustomMetrics(self.class_numbers)\n self.metrics = CustomMetrics(self.class_numbers, modal_num=self.modal_num, model=self)\n\n bn = cfg_model['bn']\n if bn == 'sync_bn':\n BatchNorm = SynchronizedBatchNorm2d\n elif bn == 'bn':\n BatchNorm = nn.BatchNorm2d\n elif bn == 'gn':\n BatchNorm = nn.GroupNorm\n else:\n raise NotImplementedError('batch norm choice {} is not implemented'.format(bn))\n if use_pseudo_label:\n self.PredNet = DeepLab(\n num_classes=19,\n backbone=cfg_model['basenet']['version'],\n output_stride=16,\n bn=cfg_model['bn'],\n freeze_bn=True,\n modal_num=self.modal_num\n ).cuda()\n self.load_PredNet(cfg, writer, logger, dir=None, net=self.PredNet)\n self.PredNet_DP = self.init_device(self.PredNet, gpu_id=self.default_gpu, whether_DP=True) \n self.PredNet.eval()\n self.PredNet_num = 0\n\n self.BaseNet = DeepLab(\n num_classes=19,\n backbone=cfg_model['basenet']['version'],\n output_stride=16,\n bn=cfg_model['bn'],\n freeze_bn=True,\n modal_num=self.modal_num\n )\n\n logger.info('the backbone is {}'.format(cfg_model['basenet']['version']))\n\n self.BaseNet_DP = self.init_device(self.BaseNet, gpu_id=self.default_gpu, whether_DP=True)\n self.nets.extend([self.BaseNet])\n self.nets_DP = [self.BaseNet_DP]\n\n # Discriminator\n self.SOURCE_LABEL = 0\n self.TARGET_LABEL = 1\n self.DNets = []\n self.DNets_DP = []\n for _ in range(self.modal_num+1):\n _net_d = FCDiscriminator(inplanes=19)\n self.DNets.append(_net_d)\n _net_d_DP = self.init_device(_net_d, gpu_id=self.default_gpu, whether_DP=True)\n self.DNets_DP.append(_net_d_DP)\n\n self.nets.extend(self.DNets)\n self.nets_DP.extend(self.DNets_DP)\n\n self.optimizers = []\n self.schedulers = [] \n optimizer_cls = torch.optim.SGD\n optimizer_params = {k:v for k, v in cfg['training']['optimizer'].items() \n if k != 'name'}\n\n optimizer_cls_D = torch.optim.Adam\n optimizer_params_D = {k:v for k, v in cfg['training']['optimizer_D'].items() \n if k != 'name'}\n\n if self.use_pseudo_label:\n self.BaseOpti = optimizer_cls(self.BaseNet.parameters(), **optimizer_params)\n else:\n self.BaseOpti = optimizer_cls(self.BaseNet.optim_parameters(cfg['training']['optimizer']['lr']), **optimizer_params)\n self.optimizers.extend([self.BaseOpti])\n\n self.DiscOptis = []\n for _d_net in self.DNets: \n self.DiscOptis.append(\n optimizer_cls_D(_d_net.parameters(), **optimizer_params_D)\n )\n self.optimizers.extend(self.DiscOptis)\n\n self.schedulers = [] \n if self.use_pseudo_label:\n self.BaseSchedule = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])\n self.schedulers.extend([self.BaseSchedule])\n else:\n \"\"\"BaseSchedule detail see FUNC: scheduler_step()\"\"\"\n self.learning_rate = cfg['training']['optimizer']['lr']\n self.gamma = cfg['training']['lr_schedule']['gamma']\n self.num_steps = cfg['training']['lr_schedule']['max_iter']\n self._BaseSchedule_nouse = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])\n self.schedulers.extend([self._BaseSchedule_nouse])\n\n self.DiscSchedules = []\n for _disc_opt in self.DiscOptis:\n self.DiscSchedules.append(\n get_scheduler(_disc_opt, cfg['training']['lr_schedule'])\n )\n self.schedulers.extend(self.DiscSchedules)\n\n self.setup(cfg, writer, logger)\n\n self.adv_source_label = 0\n self.adv_target_label = 1\n self.bceloss = nn.BCEWithLogitsLoss(reduce=False)\n self.loss_fn = get_loss_function(cfg)\n self.mseloss = nn.MSELoss()\n self.l1loss = nn.L1Loss()\n self.smoothloss = nn.SmoothL1Loss()\n self.triplet_loss = nn.TripletMarginLoss()\n\n def create_PredNet(self,):\n ss = DeepLab(\n num_classes=19,\n backbone=self.cfg_model['basenet']['version'],\n output_stride=16,\n bn=self.cfg_model['bn'],\n freeze_bn=True,\n modal_num=self.modal_num,\n ).cuda()\n ss.eval()\n return ss\n\n def setup(self, cfg, writer, logger):\n '''\n set optimizer and load pretrained model\n '''\n for net in self.nets:\n # name = net.__class__.__name__\n self.init_weights(cfg['model']['init'], logger, net)\n print(\"Initializition completed\")\n if hasattr(net, '_load_pretrained_model') and cfg['model']['pretrained']:\n print(\"loading pretrained model for {}\".format(net.__class__.__name__))\n net._load_pretrained_model()\n '''load pretrained model\n '''\n if cfg['training']['resume_flag']:\n self.load_nets(cfg, writer, logger)\n pass\n\n def lr_poly(self):\n return self.learning_rate * ((1 - float(self.iter) / self.num_steps) ** (self.gamma))\n\n def adjust_basenet_learning_rate(self):\n lr = self.lr_poly()\n self.BaseOpti.param_groups[0]['lr'] = lr\n if len(self.BaseOpti.param_groups) > 1:\n self.BaseOpti.param_groups[1]['lr'] = lr * 10\n\n def forward(self, input):\n feat, feat_low, att_mask, feat_cls, output = self.BaseNet_DP(input)\n\n return feat, feat_low, feat_cls, output\n\n def forward_Up(self, input):\n feat, feat_low, feat_cls, outputs = self.forward(input)\n output = F.interpolate(outputs[-1], size=input.size()[2:], mode='bilinear', align_corners=True)\n return feat, feat_low, feat_cls, output\n\n def PredNet_Forward(self, input):\n with torch.no_grad():\n _, _, att_mask, feat_cls, output_result = self.PredNet_DP(input)\n return _, _, feat_cls, output_result\n\n def calculate_mean_vector(self, feat_cls, outputs, labels, ):\n outputs_softmax = F.softmax(outputs, dim=1)\n outputs_argmax = outputs_softmax.argmax(dim=1, keepdim=True)\n outputs_argmax = self.process_label(outputs_argmax.float())\n labels_expanded = self.process_label(labels)\n outputs_pred = labels_expanded * outputs_argmax\n scale_factor = F.adaptive_avg_pool2d(outputs_pred, 1)\n vectors = []\n ids = []\n for n in range(feat_cls.size()[0]):\n for t in range(self.class_numbers):\n if scale_factor[n][t].item()==0:\n continue\n if (outputs_pred[n][t] > 0).sum() < 10:\n continue\n s = feat_cls[n] * outputs_pred[n][t]\n scale = torch.sum(outputs_pred[n][t]) / labels.shape[2] / labels.shape[3] * 2\n s = normalisation_pooling()(s, scale)\n s = F.adaptive_avg_pool2d(s, 1) / scale_factor[n][t]\n vectors.append(s)\n ids.append(t)\n return vectors, ids\n\n def step(self, source_x, source_label, source_modal_ids, target_x, target_label, target_modal_ids, use_pseudo_loss=False):\n assert len(source_modal_ids) == source_x.size(0), \"modal_ids' batchsize != source_x's batchsize\"\n _, _, source_feat_cls, source_output = self.forward(input=source_x) \n\n \"\"\"source_output: [B x 19 x W x H, ...]\n select modal-branch output in each batchsize\n Specific-modal output\n \"\"\"\n source_output_modal_k = torch.stack(\n [\n source_output[_modal_i][_batch_i]\n for _batch_i, _modal_i in enumerate(source_modal_ids)\n ], \n dim=0,\n )\n # attention output & specific-modal output\n source_output_comb = torch.cat([source_output_modal_k, source_output[-1]], dim=0)\n\n source_label_comb = torch.cat([source_label, source_label.clone()], dim=0)\n\n source_outputUp = F.interpolate(source_output_comb, size=source_x.size()[-2:], mode='bilinear', align_corners=True)\n\n loss_GTA = self.loss_fn(input=source_outputUp, target=source_label_comb)\n #self.PredNet.eval()\n\n # adversarial loss\n # -----------------------------\n \"\"\"Generator (segmentation)\"\"\"\n # -----------------------------\n # On Source Domain \n loss_adv = torch.Tensor([0]).cuda()\n _batch_size = 0\n \n _, _, _, target_output = self.forward(target_x)\n\n target_modal_ids_tensor = torch.Tensor(target_modal_ids).cuda()\n for t_out, _d_net_DP, _d_net, modal_idx in zip(target_output, self.DNets_DP, self.DNets, range(len(target_output))):\n # set grad false\n self.set_requires_grad(self.logger, _d_net, requires_grad = False)\n # true/false discriminator\n t_D_out = _d_net_DP(F.softmax(t_out))\n #source_modal_ids\n loss_temp = torch.mean(self.bceloss(\n t_D_out,\n torch.FloatTensor(t_D_out.data.size()).fill_(1.0).cuda()\n ), [1,2,3])\n\n if modal_idx >= self.modal_num:\n loss_adv += torch.mean(loss_temp)\n elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:\n loss_adv += 0.0\n else:\n loss_adv += torch.mean(torch.masked_select(loss_temp, target_modal_ids_tensor==modal_idx))\n\n _batch_size += t_out.size(0)\n\n #loss_adv /= _batch_size\n loss_adv *= self.cfg['training']['loss_adv_lambda']\n\n loss_G = torch.Tensor([0]).cuda()\n loss_G = loss_G + loss_GTA + loss_adv\n\n self.BaseOpti.zero_grad()\n if loss_G.item() != 0:\n loss_G.backward()\n self.BaseOpti.step()\n\n # -----------------------------\n \"\"\"Discriminator \"\"\"\n # -----------------------------\n \n _batch_size = 0\n loss_D_comb = torch.Tensor([0]).cuda()\n source_modal_ids_tensor = torch.Tensor(source_modal_ids).cuda()\n for s_out, t_out, _d_net_DP, _d_net, _disc_opt, modal_idx in zip(source_output, target_output, self.DNets_DP, self.DNets, self.DiscOptis, range(len(source_output))):\n self.set_requires_grad(self.logger, _d_net, requires_grad = True)\n\n _batch_size = 0\n loss_D = torch.Tensor([0]).cuda()\n # source domain\n s_D_out = _d_net_DP(F.softmax(s_out.detach()))\n\n loss_temp_s = torch.mean(self.bceloss(\n s_D_out,\n torch.FloatTensor(s_D_out.data.size()).fill_(1.0).cuda()\n ), [1,2,3])\n\n if modal_idx >= self.modal_num:\n loss_D += torch.mean(loss_temp_s)\n elif torch.mean(torch.as_tensor((modal_idx==source_modal_ids_tensor), dtype=torch.float32)) == 0:\n loss_D += 0.0\n else:\n loss_D += torch.mean(torch.masked_select(loss_temp_s, source_modal_ids_tensor==modal_idx))\n\n # target domain\n _batch_size += (s_out.size(0) + t_out.size(0))\n \n t_D_out = _d_net_DP(F.softmax(t_out.detach()))\n loss_temp_t = torch.mean(self.bceloss(\n t_D_out,\n torch.FloatTensor(t_D_out.data.size()).fill_(0.0).cuda()\n ), [1,2,3])\n\n if modal_idx >= self.modal_num:\n loss_D += torch.mean(loss_temp_t)\n elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:\n loss_D += 0.0\n else:\n loss_D += torch.mean(torch.masked_select(loss_temp_t, target_modal_ids_tensor==modal_idx))\n\n loss_D *= self.cfg['training']['loss_adv_lambda']*0.5\n\n loss_D_comb += loss_D\n \n _disc_opt.zero_grad()\n if loss_D_comb.item() != 0:\n loss_D_comb.backward()\n _disc_opt.step()\n\n return loss_GTA, loss_adv, loss_D_comb\n\n\n def process_label(self, label):\n batch, channel, w, h = label.size()\n pred1 = torch.zeros(batch, 20, w, h).cuda()\n id = torch.where(label < 19, label, torch.Tensor([19]).cuda())\n pred1 = pred1.scatter_(1, id.long(), 1)\n return pred1\n\n def class_vectors_alignment(self, ids, vectors, modal_ids=[0,]):\n #loss = torch.Tensor([0]).cuda(self.default_gpu)\n loss = torch.Tensor([0]).cuda()\n\n \"\"\"construct category objective vectors\"\"\"\n # objective_vectors_group 2 x 19 x 256 --> 19 x 512\n _objective_vectors_set = self.metrics.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)\n\n for i in range(len(ids)):\n if ids[i] not in self.valid_classes:\n continue\n new_loss = self.smoothloss(vectors[i].squeeze().cuda(), _objective_vectors[ids[i]])\n while (new_loss.item() > 5):\n new_loss = new_loss / 10\n loss = loss + new_loss\n loss = loss / len(ids) * 10\n return loss\n\n def freeze_bn_apply(self):\n for net in self.nets:\n net.apply(freeze_bn)\n for net in self.nets_DP:\n net.apply(freeze_bn)\n\n def scheduler_step(self):\n if self.use_pseudo_label:\n for scheduler in self.schedulers:\n scheduler.step()\n else:\n \"\"\"skipped _BaseScheduler_nouse\"\"\"\n for scheduler in self.schedulers[1:]:\n scheduler.step()\n # baseNet scheduler\n self.adjust_basenet_learning_rate()\n \n def optimizer_zerograd(self):\n for optimizer in self.optimizers:\n optimizer.zero_grad()\n \n def optimizer_step(self):\n for opt in self.optimizers:\n opt.step()\n\n def init_device(self, net, gpu_id=None, whether_DP=False):\n gpu_id = gpu_id or self.default_gpu\n device = torch.device(\"cuda:{}\".format(gpu_id) if torch.cuda.is_available() else 'cpu')\n net = net.to(device)\n # if torch.cuda.is_available():\n if whether_DP:\n net = DataParallelWithCallback(net, device_ids=range(torch.cuda.device_count()))\n return net\n \n def eval(self, net=None, logger=None):\n \"\"\"Make specific models eval mode during test time\"\"\"\n if net == None:\n for net in self.nets:\n net.eval()\n for net in self.nets_DP:\n net.eval()\n if logger!=None: \n logger.info(\"Successfully set the model eval mode\") \n else:\n net.eval()\n if logger!=None: \n logger(\"Successfully set {} eval mode\".format(net.__class__.__name__))\n return\n\n def train(self, net=None, logger=None):\n if net==None:\n for net in self.nets:\n net.train()\n for net in self.nets_DP:\n net.train()\n else:\n net.train()\n return\n\n def set_requires_grad(self, logger, net, requires_grad = False):\n \"\"\"Set requires_grad=Fasle for all the networks to avoid unnecessary computations\n Parameters:\n net (BaseModel) -- the network which will be operated on\n requires_grad (bool) -- whether the networks require gradients or not\n \"\"\"\n for parameter in net.parameters():\n parameter.requires_grad = requires_grad\n \n def set_requires_grad_layer(self, logger, net, layer_type='batchnorm', requires_grad=False): \n ''' set specific type of layers whether needing grad\n '''\n\n # print('Warning: all the BatchNorm params are fixed!')\n # logger.info('Warning: all the BatchNorm params are fixed!')\n for net in self.nets:\n for _i in net.modules():\n if _i.__class__.__name__.lower().find(layer_type.lower()) != -1:\n _i.weight.requires_grad = requires_grad\n return\n\n def init_weights(self, cfg, logger, net, init_type='normal', init_gain=0.02):\n \"\"\"Initialize network weights.\n\n Parameters:\n net (network) -- network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n\n We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\n work better for some applications. Feel free to try yourself.\n \"\"\"\n init_type = cfg.get('init_type', init_type)\n init_gain = cfg.get('init_gain', init_gain)\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n nn.init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n nn.init.xavier_normal_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n nn.init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.constant_(m.bias.data, 0.0)\n elif isinstance(m, SynchronizedBatchNorm2d) or classname.find('BatchNorm2d') != -1 \\\n or isinstance(m, nn.GroupNorm):\n m.weight.data.fill_(1)\n m.bias.data.zero_() # BatchNorm Layer's weight is not a matrix; only normal distribution applies.\n\n\n print('initialize {} with {}'.format(init_type, net.__class__.__name__))\n logger.info('initialize {} with {}'.format(init_type, net.__class__.__name__))\n net.apply(init_func) # apply the initialization function <init_func>\n pass\n\n def adaptive_load_nets(self, net, model_weight):\n model_dict = net.state_dict()\n pretrained_dict = {k : v for k, v in model_weight.items() if k in model_dict}\n \n # print(\"[INFO] Pretrained dict:\", pretrained_dict.keys())\n model_dict.update(pretrained_dict)\n net.load_state_dict(model_dict)\n\n def load_nets(self, cfg, writer, logger): # load pretrained weights on the net\n if os.path.isfile(cfg['training']['resume']):\n logger.info(\n \"Loading model and optimizer from checkpoint '{}'\".format(cfg['training']['resume'])\n )\n checkpoint = torch.load(cfg['training']['resume'])\n _k = -1\n net_state_no = {}\n for net in self.nets:\n name = net.__class__.__name__\n if name not in net_state_no:\n net_state_no[name] = 0\n else:\n net_state_no[name] += 1\n _k += 1\n if checkpoint.get(name) == None:\n continue\n if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:\n continue\n if isinstance(checkpoint[name], list):\n self.adaptive_load_nets(net, checkpoint[name][net_state_no[name]][\"model_state\"])\n else:\n print(\"*****************************************\")\n print(\"[WARNING] Using depreciated load version! Model {}\".format(name))\n print(\"*****************************************\")\n self.adaptive_load_nets(net, checkpoint[name][\"model_state\"])\n if cfg['training']['optimizer_resume']:\n if isinstance(checkpoint[name], list):\n self.adaptive_load_nets(self.optimizers[_k], checkpoint[name][net_state_no[name]][\"optimizer_state\"])\n self.adaptive_load_nets(self.schedulers[_k], checkpoint[name][net_state_no[name]][\"scheduler_state\"])\n else:\n self.adaptive_load_nets(self.optimizers[_k], checkpoint[name][\"optimizer_state\"])\n self.adaptive_load_nets(self.schedulers[_k], checkpoint[name][\"scheduler_state\"])\n self.iter = checkpoint[\"iter\"]\n #self.best_iou = checkpoint['best_iou']\n logger.info(\n \"Loaded checkpoint '{}' (iter {})\".format(\n cfg['training']['resume'], checkpoint[\"iter\"]\n )\n )\n else:\n raise Exception(\"No checkpoint found at '{}'\".format(cfg['training']['resume']))\n\n\n def load_PredNet(self, cfg, writer, logger, dir=None, net=None): # load pretrained weights on the net\n dir = dir or cfg['training']['Pred_resume']\n best_iou = 0\n if os.path.isfile(dir):\n logger.info(\n \"Loading model and optimizer from checkpoint '{}'\".format(dir)\n )\n checkpoint = torch.load(dir)\n name = net.__class__.__name__\n if checkpoint.get(name) == None:\n return\n if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:\n return\n if isinstance(checkpoint[name], list):\n self.adaptive_load_nets(net, checkpoint[name][0][\"model_state\"])\n else:\n self.adaptive_load_nets(net, checkpoint[name][\"model_state\"])\n iter = checkpoint[\"iter\"]\n best_iou = checkpoint['best_iou']\n logger.info(\n \"Loaded checkpoint '{}' (iter {}) (best iou {}) for PredNet\".format(\n dir, checkpoint[\"iter\"], best_iou\n )\n )\n else:\n raise Exception(\"No checkpoint found at '{}'\".format(dir))\n if hasattr(net, 'best_iou'):\n #net.best_iou = best_iou\n pass\n return best_iou\n\n\n def set_optimizer(self, optimizer): #set optimizer to all nets\n pass\n\n def reset_objective_SingleVector(self,):\n self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()\n self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()\n self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()\n\n def update_objective_SingleVector(self, vectors, vectors_num, name='moving_average'):\n #vector = vector.squeeze().detach()\n if torch.sum(vectors) == 0:\n return\n if name == 'moving_average':\n self.objective_vectors_group = self.objective_vectors_group * 0.9999 + 0.0001 * vectors\n self.objective_vectors_num_group += vectors_num\n self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)\n elif name == 'mean':\n self.objective_vectors_group = self.objective_vectors_group * self.objective_vectors_num_group + vectors\n self.objective_vectors_num_group += vectors_num\n self.objective_vectors_group = self.objective_vectors_group / self.objective_vectors_num_group\n self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)\n else:\n raise NotImplementedError('no such updating way of objective vectors {}'.format(name))\n\ndef grad_reverse(x):\n return GradReverse()(x)\n\n" ]
[ [ "torch.nn.SmoothL1Loss", "torch.as_tensor", "torch.nn.functional.softmax", "torch.nn.L1Loss", "torch.no_grad", "torch.cuda.is_available", "torch.cat", "torch.nn.init.kaiming_normal_", "torch.nn.init.xavier_normal_", "torch.nn.init.normal_", "torch.cuda.device_count", "torch.nn.TripletMarginLoss", "torch.nn.functional.adaptive_avg_pool2d", "torch.Tensor", "torch.mean", "numpy.mean", "torch.load", "numpy.zeros", "torch.masked_select", "torch.sum", "torch.nn.MSELoss", "torch.nn.init.constant_", "torch.full", "torch.nn.BCEWithLogitsLoss", "torch.zeros", "torch.nn.init.orthogonal_", "torch.nn.functional.interpolate" ] ]
OSUrobotics/KinovaGrasping
[ "f22af60d3683fdc4ffecf49ccff179fbc6750748" ]
[ "gym-kinova-gripper/plotting_code/other_plots.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\n## Extra plotting functions that can be called for quick analysis\n\ndef plot_timestep_distribution(success_timesteps=None, fail_timesteps=None, all_timesteps=None, expert_saving_dir=None):\n \"\"\" Plot the distribution of time steps over successful and failed episodes \"\"\"\n if all_timesteps is None:\n success_timesteps = np.load(expert_saving_dir + \"/success_timesteps.npy\")\n fail_timesteps = np.load(expert_saving_dir + \"/fail_timesteps.npy\")\n all_timesteps = np.load(expert_saving_dir + \"/all_timesteps.npy\")\n\n n_bins = 40\n # We can set the number of bins with the `bins` kwarg\n plt.hist(all_timesteps, bins=n_bins, color=\"g\")\n plt.title(\"Total time steps distribution for all episodes (3x speed)\", weight='bold')\n plt.xlabel('# of time steps per episode')\n plt.ylabel('# of episodes with the time step count')\n plt.xlim(0, 800)\n plt.savefig(expert_saving_dir + \"/total_timestep_distribution\")\n plt.clf()\n\n plt.hist(success_timesteps, bins=n_bins, color=\"b\")\n plt.title(\"Time steps distribution for Successful episodes (3x speed)\", weight='bold')\n plt.xlabel('# of time steps per episode')\n plt.ylabel('# of episodes with the time step count')\n plt.savefig(expert_saving_dir + \"/success_timestep_distribution\")\n plt.clf()\n\n plt.hist(fail_timesteps, bins=n_bins, color=\"r\")\n plt.title(\"Time steps distribution for Failed episodes (3x speed)\", weight='bold')\n plt.xlabel('# of time steps per episode')\n plt.ylabel('# of episodes with the time step count')\n plt.savefig(expert_saving_dir + \"/fail_timestep_distribution\")\n plt.clf()\n\n\n'''\n# Plot the average velocity over an episode\ndef plot_average_velocity(replay_buffer,num_timesteps):\n \"\"\" Plot the average velocity over a certain number of episodes \"\"\"\n velocity_dir = \"./expert_average_velocity\"\n if not os.path.isdir(velocity_dir):\n os.mkdir(velocity_dir)\n\n #num_episodes = len(f1_vels)\n\n #plt.plot(np.arrange(len(f1_vels)), f1_vels)\n\n max_timesteps = 30\n timestep_vel_count = np.zeros(max_timesteps)\n wrist_avg_vels = np.zeros(max_timesteps)\n f1_avg_vels = np.zeros(max_timesteps)\n f2_avg_vels = np.zeros(max_timesteps)\n f3_avg_vels = np.zeros(max_timesteps)\n\n for episode_actions in replay_buffer.action:\n for timestep_idx in range(len(episode_actions)):\n timestep_vel_count[timestep_idx] += 1\n wrist_avg_vels[timestep_idx] = (wrist_avg_vels[timestep_idx] + episode_actions[timestep_idx][0]) / timestep_vel_count[timestep_idx]\n f1_avg_vels[timestep_idx] = (f1_avg_vels[timestep_idx] + episode_actions[timestep_idx][1]) / \\\n timestep_vel_count[timestep_idx]\n f2_avg_vels[timestep_idx] = (f2_avg_vels[timestep_idx] + episode_actions[timestep_idx][2]) / \\\n timestep_vel_count[timestep_idx]\n f3_avg_vels[timestep_idx] = (f3_avg_vels[timestep_idx] + episode_actions[timestep_idx][3]) / \\\n timestep_vel_count[timestep_idx]\n\n num_episodes = len(replay_buffer.action)\n print(\"replay_buffer.action: \",replay_buffer.action)\n print(\"f1_avg_vels: \",f1_avg_vels)\n plt.plot(np.arange(num_timesteps), f1_avg_vels, color=\"r\", label=\"Finger1\")\n plt.plot(np.arange(num_timesteps), f2_avg_vels, color=\"b\", label=\"Finger2\")\n plt.plot(np.arange(num_timesteps), f3_avg_vels, color=\"g\", label=\"Finger3\")\n plt.plot(np.arange(num_timesteps), wrist_avg_vels, color=\"y\", label=\"Wrist\")\n plt.legend()\n\n plt.title(\"Average velocity over \"+str(num_episodes)+\" episodes\", weight='bold')\n plt.xlabel('Timestep within an episode')\n plt.ylabel('Average Velocity at Timestep')\n #plt.savefig(velocity_dir + \"/velocity_plot\")\n #plt.clf()\n plt.show()\n'''" ]
[ [ "numpy.load", "matplotlib.pyplot.savefig", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.hist", "matplotlib.pyplot.xlabel" ] ]
wenyuC94/LogConcComp
[ "b17d6ba6a102ba83a8415774b0e6da27a362bd5d" ]
[ "src/utils.py" ]
[ "import os\nimport numpy as np\nimport numba as nb\n\ndef create_folder(storage_path):\n if not os.path.isdir(storage_path):\n os.makedirs(storage_path,exist_ok=True)\n lsdir = os.listdir(storage_path)\n for item in [\"info\",\"hist\",\"soln\",\"figs\"]:\n if item not in lsdir:\n os.makedirs(storage_path+item+\"/\",exist_ok=True)\n if item == \"figs\":\n lsdir_figs = os.listdir(storage_path+item+\"/\")\n for item1 in [\"crop\",\"raw\"]:\n if item1 not in lsdir_figs:\n os.makedirs(storage_path+item+\"/\"+item1+\"/\",exist_ok=True)\n \n \ndef time_to_string(runtime):\n seconds = runtime%60\n runmins = (runtime-seconds)/60\n mins = int(runmins%60)\n runhrs = (runmins-mins)/60\n hrs = int(runhrs)\n return \"%.2d:%.2d:%05.2f\"%(hrs,mins,seconds)\n\ndef multivariate_laplace(n,d,rng=None, random_state=None):\n rng = rng if rng is not None else np.random.RandomState(random_state)\n X = rng.randn(n,d)\n Z = rng.exponential(size=(n,1))\n return X*np.sqrt(Z)\n\n\[email protected](cache=True)\ndef np_apply_along_axis(func1d, axis, arr):\n assert arr.ndim == 2\n assert axis in [0, 1]\n if axis == 0:\n result = np.empty(arr.shape[1])\n for i in range(len(result)):\n result[i] = func1d(arr[:, i])\n else:\n result = np.empty(arr.shape[0])\n for i in range(len(result)):\n result[i] = func1d(arr[i, :])\n return result\n\n\[email protected](cache=True)\ndef np_apply_along_axis_kd(funckd, axis, arr, k = -1):\n assert arr.ndim == 2\n assert axis in [0, 1]\n if axis == 0:\n k = k if k > 0 else arr.shape[0]\n result = np.empty((k,arr.shape[1]))\n for i in range(arr.shape[1]):\n result[:, i] = funckd(arr[:, i])\n else:\n k = k if k > 0 else arr.shape[1]\n result = np.empty((arr.shape[0],k))\n for i in range(arr.shape[0]):\n result[i, :] = funckd(arr[i, :])\n return result\n\[email protected](cache=True)\ndef split(n, B):\n sep = n//B\n rem = n%B\n indices = []\n last = 0\n cur = 0\n for i in range(B):\n cur = last + sep + (i < rem)\n indices.append(cur)\n last = cur\n return indices\n\n" ]
[ [ "numpy.random.RandomState", "numpy.sqrt", "numpy.empty" ] ]
1suancaiyu/STEP
[ "54195112990feaee137f5137775c736d07c2d26f" ]
[ "classifier_stgcn_real_only/utils/temp.py" ]
[ "import h5py\nimport os\nimport numpy as np\n\nbase_path = os.path.dirname(os.path.realpath(__file__))\nfeature_file = '/media/uttaran/FCE1-7BF3/Gamma/Gait/classifier_stgcn/model_classifier_stgcn/featuresCombineddeep_features.txt'\nf = np.loadtxt(feature_file)\nfCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/featuresCombined.h5', 'r')\nfkeys = fCombined.keys()\ndfCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/deepFeaturesCombined.h5', 'w')\nfor i, fkey in enumerate(fkeys):\n fname = [fkey][0]\n feature = f[i, :]\n dfCombined.create_dataset(fname, data=feature)\ndfCombined.close()\n" ]
[ [ "numpy.loadtxt" ] ]