repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
fepegar/resseg-ijcars | [
"963e5548fb02c777038ef550c969149377071cfc"
] | [
"datasets.py"
] | [
"import hashlib\nfrom pathlib import Path\n\nimport torch\nimport pandas as pd\nimport torchio as tio\nfrom tqdm import tqdm\nfrom resector import RandomResection\nfrom sklearn.model_selection import KFold\n\nfrom utils import sglob, get_stem\n\n\nclass DataModule:\n def __init__(\n self,\n datasets_dir,\n train_batch_size,\n num_workers,\n ):\n self.train_batch_size = train_batch_size\n self.num_workers = num_workers\n self.datasets_dir = Path(datasets_dir).expanduser()\n\n def get_train_loader(self, dataset):\n return torch.utils.data.DataLoader(\n dataset,\n batch_size=self.train_batch_size,\n num_workers=self.num_workers,\n pin_memory=True,\n shuffle=True,\n )\n\n def get_val_loader(self, dataset):\n return torch.utils.data.DataLoader(\n dataset,\n batch_size=self.train_batch_size,\n num_workers=self.num_workers,\n pin_memory=True,\n shuffle=False,\n )\n\n def get_train_transform(self, resect=True):\n return get_train_transform(self.landmarks_path, resection_params=self.resection_params)\n\n def print_lengths(self, test=True):\n f = print if self.log is None else self.log.info\n f(f'{len(self.train_dataset):4} training instances')\n f(f'{len(self.train_loader):4} training batches')\n f(f'{len(self.val_dataset):4} validation instances')\n f(f'{len(self.val_loader):4} validation batches')\n if not test:\n return\n f(f'{len(self.test_dataset):4} test instances')\n f(f'{len(self.test_loader):4} test batches')\n\n def get_public_subjects(self):\n public_dataset_names = (\n 'IXI',\n 'ADNI1_15T',\n 'ADNI1_3T',\n 'ADNI2',\n 'OASIS_download',\n )\n all_subjects = []\n for name in public_dataset_names:\n subjects = get_subjects_list_from_dir(self.datasets_dir / name)\n all_subjects.extend(subjects)\n return all_subjects\n\n\nclass DataModulePublic(DataModule):\n def __init__(\n self,\n datasets_dir,\n real_dataset_dir,\n resection_params,\n train_batch_size,\n num_workers,\n pseudo_dir=None,\n split_ratio=0.9,\n split_seed=42,\n debug_ratio=0.02,\n log=None,\n debug=False,\n augment=True,\n verbose=False,\n cache_validation_set=True,\n histogram_standardization=True,\n ):\n super().__init__(datasets_dir, train_batch_size, num_workers)\n self.resection_params = resection_params\n\n # Precomputed from 90% of the public training data\n if histogram_standardization:\n self.landmarks_path = Path(__file__).parent / 'landmarks' / 'histogram_landmarks_default.npy'\n else:\n self.landmarks_path = None\n\n public_subjects = self.get_public_subjects()\n train_public, val_public = self.split_subjects(public_subjects, split_ratio, split_seed)\n\n train_transform = self.get_train_transform() if augment else self.get_val_transform()\n self.train_dataset = tio.SubjectsDataset(train_public, transform=train_transform)\n self.val_dataset = tio.SubjectsDataset(val_public, transform=train_transform)\n if cache_validation_set:\n self.val_dataset = cache(self.val_dataset, resection_params, augment=augment)\n test_transform = get_test_transform(self.landmarks_path)\n self.test_dataset = get_real_resection_dataset(real_dataset_dir, transform=test_transform)\n if debug:\n self.train_dataset = reduce_dataset(self.train_dataset, debug_ratio)\n self.val_dataset = reduce_dataset(self.val_dataset, debug_ratio)\n self.test_dataset = reduce_dataset(self.test_dataset, debug_ratio)\n\n self.train_loader = self.get_train_loader(self.train_dataset)\n self.val_loader = self.get_val_loader(self.val_dataset)\n self.test_loader = self.get_val_loader(self.test_dataset)\n\n self.log = log\n\n if verbose:\n self.print_lengths()\n\n @staticmethod\n def split_subjects(subjects, ratio, seed):\n len_subjects = len(subjects)\n len_training = int(len_subjects * ratio)\n len_validation = len_subjects - len_training\n lengths = len_training, len_validation\n with torch.random.fork_rng([]):\n torch.manual_seed(seed)\n train, val = torch.utils.data.random_split(subjects, lengths)\n return train, val\n\n def get_val_transform(self):\n return tio.Compose((get_simulation_transform(self.resection_params), get_test_transform(self.landmarks_path)))\n\n\nclass DataModuleCV(DataModule):\n def __init__(\n self,\n fold,\n num_folds,\n datasets_dir,\n dataset_name,\n train_batch_size,\n num_workers,\n use_public_landmarks=False,\n pseudo_dirname=None,\n split_seed=42,\n log=None,\n verbose=True,\n ):\n super().__init__(datasets_dir, train_batch_size, num_workers)\n self.resection_params = None\n real_dataset_dir = self.datasets_dir / 'real' / dataset_name\n real_subjects = get_real_resection_subjects(real_dataset_dir)\n train_subjects, val_subjects = self.split_subjects(real_subjects, fold, num_folds, split_seed)\n self.train_dataset = tio.SubjectsDataset(train_subjects)\n if use_public_landmarks:\n self.landmarks_path = get_landmarks_path()\n else:\n self.landmarks_path = get_landmarks_path(dataset=self.train_dataset)\n train_transform = self.get_train_transform(resect=False)\n self.train_dataset.set_transform(train_transform)\n test_transform = get_test_transform(self.landmarks_path)\n self.val_dataset = tio.SubjectsDataset(val_subjects, transform=test_transform)\n\n if pseudo_dirname is not None:\n pseudo_dir = self.datasets_dir / 'real' / pseudo_dirname\n pseudo_dataset = get_real_resection_dataset(pseudo_dir, transform=train_transform)\n self.train_dataset = torch.utils.data.ConcatDataset((self.train_dataset, pseudo_dataset))\n\n self.train_loader = self.get_train_loader(self.train_dataset)\n self.val_loader = self.test_loader = self.get_val_loader(self.val_dataset)\n\n self.log = log\n if verbose:\n self.print_lengths(test=False)\n\n @staticmethod\n def split_subjects(real_subjects, fold, num_folds, split_seed):\n kf = KFold(n_splits=num_folds, shuffle=True, random_state=split_seed)\n folds = list(kf.split(real_subjects))\n train_indices, val_indices = folds[fold]\n train_subjects = [real_subjects[i] for i in train_indices]\n val_subjects = [real_subjects[i] for i in val_indices]\n return train_subjects, val_subjects\n\n\ndef get_train_transform(landmarks_path, resection_params=None):\n spatial_transform = tio.Compose((\n tio.OneOf({\n tio.RandomAffine(): 0.9,\n tio.RandomElasticDeformation(): 0.1,\n }),\n tio.RandomFlip(),\n ))\n resolution_transform = tio.OneOf((\n tio.RandomAnisotropy(),\n tio.RandomBlur(),\n ),\n p=0.75,\n )\n transforms = []\n if resection_params is not None:\n transforms.append(get_simulation_transform(resection_params))\n if landmarks_path is not None:\n transforms.append(tio.HistogramStandardization({'image': landmarks_path}))\n transforms.extend([\n # tio.RandomGamma(p=0.2),\n resolution_transform,\n tio.RandomGhosting(p=0.2),\n tio.RandomSpike(p=0.2),\n tio.RandomMotion(p=0.2),\n tio.RandomBiasField(p=0.5),\n tio.ZNormalization(masking_method=tio.ZNormalization.mean),\n tio.RandomNoise(p=0.75), # always after ZNorm and after blur!\n spatial_transform,\n get_tight_crop(),\n ])\n return tio.Compose(transforms)\n\n\ndef get_subjects_list_from_dir(dataset_dir):\n dataset_dir = Path(dataset_dir)\n mni_dir = dataset_dir / 'mni'\n resection_dir = dataset_dir / 'resection'\n noise_paths = sglob(resection_dir, '*noise*')\n subjects_list = []\n for noise_path in noise_paths:\n stem = noise_path.stem.split('_noise')[0]\n image_path = mni_dir / f'{stem}_on_mni.nii.gz'\n gml_path = resection_dir / f'{stem}_gray_matter_left_seg.nii.gz'\n gmr_path = resection_dir / f'{stem}_gray_matter_right_seg.nii.gz'\n rl_path = resection_dir / f'{stem}_resectable_left_seg.nii.gz'\n rr_path = resection_dir / f'{stem}_resectable_right_seg.nii.gz'\n subject = tio.Subject(\n image=tio.ScalarImage(image_path),\n resection_noise=tio.ScalarImage(noise_path),\n resection_gray_matter_left=tio.LabelMap(gml_path),\n resection_gray_matter_right=tio.LabelMap(gmr_path),\n resection_resectable_left=tio.LabelMap(rl_path),\n resection_resectable_right=tio.LabelMap(rr_path),\n )\n subjects_list.append(subject)\n return subjects_list\n\n\ndef get_landmarks_path(dataset=None):\n landmarks_dir = Path(__file__).parent / 'landmarks'\n landmarks_dir.mkdir(exist_ok=True)\n if dataset is None: # get precomputed landmarks from public data\n landmarks_path = landmarks_dir / 'histogram_landmarks_default.npy'\n else:\n filename = f'histogram_landmarks_{get_stems_hash(dataset)}.npy'\n landmarks_path = landmarks_dir / filename\n if not landmarks_path.is_file():\n from torchio.transforms import train_histogram\n images_paths = [subject.image.path for subject in dataset.subjects]\n print('Training histogram landmarks:', landmarks_path)\n train_histogram(images_paths, output_path=landmarks_path)\n return landmarks_path\n\n\ndef get_stems_hash(dataset):\n # https://stackoverflow.com/a/27522708/3956024\n stems_string = ','.join(get_stem(subject.image.path) for subject in dataset.subjects)\n return hashlib.md5(stems_string.encode()).hexdigest()\n\n\ndef get_tight_crop():\n # Crop from (193, 229, 193) to (176, 216, 160)\n crop = tio.Crop((9, 8, 7, 6, 17, 16))\n return crop\n\n\ndef get_real_resection_subjects(dataset_dir):\n dataset_dir = Path(dataset_dir)\n image_dir = dataset_dir / 'image'\n label_dir = dataset_dir / 'label'\n image_paths = sglob(image_dir)\n label_paths = sglob(label_dir)\n assert len(image_paths) == len(label_paths)\n subjects = []\n for image_path, label_path in zip(image_paths, label_paths):\n subject = tio.Subject(\n image=tio.ScalarImage(image_path),\n label=tio.LabelMap(label_path),\n )\n subjects.append(subject)\n return subjects\n\n\ndef get_real_resection_dataset(dataset_dir, transform=None):\n subjects = get_real_resection_subjects(dataset_dir)\n return tio.SubjectsDataset(subjects, transform=transform)\n\n\ndef reduce_dataset(dataset, ratio):\n n = int(len(dataset) * ratio)\n return torch.utils.data.Subset(dataset, list(range(n)))\n\n\ndef cache(dataset, resection_params, augment=True, caches_dir='/tmp/val_set_cache', num_workers=12):\n caches_dir = Path(caches_dir)\n wm_lesion_p = resection_params['wm_lesion_p']\n clot_p = resection_params['clot_p']\n shape = resection_params['shape']\n texture = resection_params['texture']\n augment_string = '_no_augmentation' if not augment else ''\n dir_name = f'wm_{wm_lesion_p}_clot_{clot_p}_{shape}_{texture}{augment_string}'\n cache_dir = caches_dir / dir_name\n image_dir = cache_dir / 'image'\n label_dir = cache_dir / 'label'\n if not cache_dir.is_dir():\n print('Caching validation set')\n image_dir.mkdir(parents=True)\n label_dir.mkdir(parents=True)\n loader = torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n collate_fn=lambda x: x[0],\n )\n for subject in tqdm(loader):\n image_path = image_dir / subject.image.path.name\n label_path = label_dir / subject.image.path.name # label has no path because it was created not loaded\n subject.image.save(image_path)\n subject.label.save(label_path)\n\n subjects = []\n for im_path, label_path in zip(sglob(image_dir), sglob(label_dir)):\n subject = tio.Subject(\n image=tio.ScalarImage(im_path),\n label=tio.LabelMap(label_path),\n )\n subjects.append(subject)\n return tio.SubjectsDataset(subjects)\n\n\ndef get_test_transform(landmarks_path):\n transforms = []\n if landmarks_path is not None:\n transforms.append(tio.HistogramStandardization({'image': landmarks_path}))\n transforms.extend([\n tio.ZNormalization(masking_method=tio.ZNormalization.mean),\n get_tight_crop(),\n ])\n return tio.Compose(transforms)\n\n\ndef get_simulation_transform(resection_params):\n transform = RandomResection(\n volumes_range=(844, 83757), # percentiles 1 and 99 of volumes in labeled EPISURG\n wm_lesion_p=resection_params['wm_lesion_p'],\n clot_p=resection_params['clot_p'],\n shape=resection_params['shape'],\n texture=resection_params['texture'],\n )\n return transform\n\n\ndef get_pseudo_loader(\n threshold,\n percentile,\n metric,\n summary_path,\n dataset_name,\n num_workers,\n batch_size=2,\n remove_zero_volume=False,\n ):\n subjects = []\n subject_ids = get_certain_subjects(\n threshold,\n percentile,\n metric,\n summary_path,\n remove_zero_volume=remove_zero_volume,\n )\n dataset_dir = Path('/home/fernando/datasets/real/') / dataset_name\n assert dataset_dir.is_dir()\n image_dir = dataset_dir / 'image'\n label_dir = dataset_dir / 'label'\n for subject_id in subject_ids:\n image_path = list(image_dir.glob(f'{subject_id}_*'))[0]\n label_path = list(label_dir.glob(f'{subject_id}_*'))[0]\n subject = tio.Subject(\n image=tio.ScalarImage(image_path),\n label=tio.LabelMap(label_path),\n )\n subjects.append(subject)\n transform = get_train_transform(get_landmarks_path())\n dataset = tio.SubjectsDataset(subjects, transform=transform)\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n pin_memory=True,\n shuffle=True,\n num_workers=num_workers,\n )\n return loader\n\n\ndef get_certain_subjects(\n threshold,\n percentile,\n metric,\n summary_path,\n remove_zero_volume=False,\n ):\n df = pd.read_csv(summary_path, index_col=0, dtype={'Subject': str})\n if remove_zero_volume:\n df = df[df.Volume > 0]\n column = df[metric]\n assert not (threshold is None and percentile is None)\n assert not (threshold is not None and percentile is not None)\n if percentile is not None:\n df = df[column < column.quantile(percentile / 100)]\n elif threshold is not None:\n df = df[column < threshold]\n return df.Subject.values\n"
] | [
[
"pandas.read_csv",
"torch.random.fork_rng",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"sklearn.model_selection.KFold",
"torch.utils.data.random_split",
"torch.utils.data.ConcatDataset"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rxjx/autogluon | [
"648c19b8b76a6d663a2a8b42b9f3463e60c63e2c"
] | [
"text/src/autogluon/text/text_prediction/mx/models.py"
] | [
"import numpy as np\nimport scipy.special\nimport os\nimport math\nimport logging\nimport pandas as pd\nimport warnings\nimport time\nimport json\nimport pickle\nimport functools\nimport tqdm\nfrom typing import Tuple\n\nfrom autogluon.core.scheduler.scheduler_factory import scheduler_factory\nfrom autogluon.core.utils import set_logger_verbosity\nfrom sklearn.preprocessing import LabelEncoder\nimport mxnet as mx\nfrom mxnet.util import use_np\nfrom mxnet.lr_scheduler import PolyScheduler, CosineScheduler\nfrom mxnet.gluon.data import DataLoader\nfrom autogluon_contrib_nlp.models import get_backbone\nfrom autogluon_contrib_nlp.lr_scheduler import InverseSquareRootScheduler\nfrom autogluon_contrib_nlp.utils.config import CfgNode\nfrom autogluon_contrib_nlp.utils.misc import grouper, \\\n count_parameters, repeat, get_mxnet_available_ctx\nfrom autogluon_contrib_nlp.utils.parameter import move_to_ctx, clip_grad_global_norm\n\nfrom autogluon.core import args, space\nfrom autogluon.core.utils import in_ipynb, verbosity2loglevel\nfrom autogluon.core.utils.utils import get_cpu_count, get_gpu_count\nfrom autogluon.core.utils.loaders import load_pkl, load_pd\nfrom autogluon.core.task.base import compile_scheduler_options_v2\nfrom autogluon.core.task.base.base_task import schedulers\nfrom autogluon.core.metrics import get_metric, Scorer\nfrom autogluon.core.utils.multiprocessing_utils import force_forkserver\nfrom autogluon.core.dataset import TabularDataset\nfrom autogluon.core.decorator import sample_config\nfrom autogluon.core.constants import BINARY, MULTICLASS, REGRESSION\nfrom autogluon.core.scheduler.reporter import FakeReporter\n\nfrom .modules import MultiModalWithPretrainedTextNN\nfrom .preprocessing import MultiModalTextFeatureProcessor, base_preprocess_cfg,\\\n MultiModalTextBatchify, get_stats_string, auto_shrink_max_length, get_cls_sep_id\nfrom .utils import average_checkpoints, set_seed\nfrom .. import constants as _C\nfrom ..utils import logging_config\nfrom ..presets import ag_text_presets\nfrom ... import version\n\nlogger = logging.getLogger(__name__) # return logger\n\n\n@use_np\ndef get_optimizer(cfg, updates_per_epoch):\n \"\"\"\n\n Parameters\n ----------\n cfg\n Configuration\n updates_per_epoch\n The number of updates per training epoch\n\n Returns\n -------\n optimizer\n The optimizer\n optimizer_params\n Optimization parameters\n max_update\n Maximum update\n \"\"\"\n max_update = max(int(np.ceil(updates_per_epoch * cfg.num_train_epochs)), 3)\n warmup_steps = int(np.ceil(updates_per_epoch * cfg.num_train_epochs * cfg.warmup_portion))\n if cfg.lr_scheduler == 'triangular':\n lr_scheduler = PolyScheduler(max_update=max_update,\n base_lr=cfg.lr,\n warmup_begin_lr=cfg.begin_lr,\n pwr=1,\n final_lr=cfg.final_lr,\n warmup_steps=warmup_steps,\n warmup_mode='linear')\n elif cfg.lr_scheduler == 'inv_sqrt':\n lr_scheduler = InverseSquareRootScheduler(warmup_steps=warmup_steps,\n base_lr=cfg.lr,\n warmup_init_lr=cfg.begin_lr)\n elif cfg.lr_scheduler == 'constant':\n lr_scheduler = None\n elif cfg.lr_scheduler == 'cosine':\n lr_scheduler = CosineScheduler(max_update=max_update,\n base_lr=cfg.lr,\n final_lr=cfg.final_lr,\n warmup_steps=warmup_steps,\n warmup_begin_lr=cfg.begin_lr)\n else:\n raise ValueError('Unsupported lr_scheduler=\"{}\"'\n .format(cfg.lr_scheduler))\n optimizer_params = {'learning_rate': cfg.lr,\n 'wd': cfg.wd,\n 'lr_scheduler': lr_scheduler}\n optimizer = cfg.optimizer\n additional_params = {key: value for key, value in cfg.optimizer_params}\n optimizer_params.update(additional_params)\n return optimizer, optimizer_params, max_update\n\n\n@use_np\ndef apply_layerwise_decay(model, layerwise_decay, backbone_name, not_included=None):\n \"\"\"Apply the layer-wise gradient decay\n\n .. math::\n lr = lr * layerwise_decay^(max_depth - layer_depth)\n\n Parameters:\n ----------\n model\n The backbone model\n layerwise_decay: int\n layer-wise decay power\n not_included: list of str\n A list or parameter names that not included in the layer-wise decay\n \"\"\"\n if not_included is None:\n not_included = []\n # consider the task specific fine-tuning layer as the last layer, following with pooler\n # In addition, the embedding parameters have the smaller learning rate based on this setting.\n if 'albert' in backbone_name:\n # Skip if it is the ALBERT model.\n return\n if 'electra' in backbone_name:\n # For ELECTRA, it's called all_encoder_layers\n all_layers = model.encoder.all_encoder_layers\n else:\n # For other models, it's called all_layers\n all_layers = model.encoder.all_layers\n max_depth = len(all_layers) + 2\n for key, value in model.collect_params().items():\n if 'scores' in key:\n value.lr_mult = layerwise_decay ** 0\n if 'pool' in key:\n value.lr_mult = layerwise_decay ** 1\n if 'embed' in key:\n value.lr_mult = layerwise_decay ** max_depth\n\n for (layer_depth, layer) in enumerate(all_layers):\n layer_params = layer.collect_params()\n for key, value in layer_params.items():\n for pn in not_included:\n if pn in key:\n continue\n value.lr_mult = layerwise_decay ** (max_depth - (layer_depth + 1))\n\n\n@use_np\ndef freeze_layers(model, backbone_name, num_trainable_layers):\n if 'albert' in backbone_name:\n # Skip if it is the ALBERT model.\n return\n if 'electra' in backbone_name:\n # For ELECTRA, it's called all_encoder_layers\n all_layers = model.encoder.all_encoder_layers\n else:\n # For other models, it's called all_layers\n all_layers = model.encoder.all_layers\n if num_trainable_layers < 0:\n return\n assert num_trainable_layers <= len(all_layers)\n for i in range(len(all_layers) - num_trainable_layers):\n for p in all_layers[i].collect_params().values():\n p.grad_req = 'null'\n return\n\n\ndef base_optimization_config():\n \"\"\"The basic optimization phase\"\"\"\n cfg = CfgNode()\n cfg.lr_scheduler = 'triangular'\n cfg.optimizer = 'adamw'\n cfg.early_stopping_patience = 20 # Stop if we cannot find a better checkpoint\n cfg.optimizer_params = [('beta1', 0.9),\n ('beta2', 0.999),\n ('epsilon', 1e-6),\n ('correct_bias', False)]\n cfg.begin_lr = 0.0\n cfg.batch_size = 128\n cfg.nbest = 1 # Keep the top K performed models\n cfg.per_device_batch_size = 16 # Per-device batch-size\n cfg.auto_per_device_batch_size = True # Whether to automatically determine the runnable\n # per-device batch_size.\n cfg.val_batch_size_mult = 2 # By default, we 2X the batch size for validation\n cfg.lr = 1E-4\n cfg.final_lr = 0.0\n cfg.num_train_epochs = 10\n cfg.warmup_portion = 0.1\n cfg.layerwise_lr_decay = 0.8 # The layer_wise decay\n cfg.wd = 0.01 # Weight Decay\n cfg.max_grad_norm = 1.0 # Maximum Gradient Norm\n # The validation frequency = validation frequency * num_updates_in_an_epoch\n cfg.valid_frequency = 0.2\n # Logging frequency = log frequency * num_updates_in_an_epoch\n cfg.log_frequency = 0.05\n return cfg\n\n\ndef base_model_config():\n cfg = CfgNode()\n cfg.backbone = CfgNode()\n cfg.backbone.name = 'google_electra_base'\n cfg.network = MultiModalWithPretrainedTextNN.get_cfg()\n cfg.num_trainable_layers = -1 # Use a negative number to indicate that all layers are trainable.\n cfg.insert_sep = True # Whether to insert sep tokens between columns\n cfg.train_stochastic_chunk = False # Whether to sample a stochastic chunk from the training text\n cfg.test_stochastic_chunk = False # Whether to use stochastic chunk in testing\n cfg.use_avg_nbest = False # Whether to average the top performed models and use that as the final model.\n # This will usually give us better performance.\n cfg._disable_update = False # This is a hack for trying to disable the update. Should not be used usually\n cfg.inference_num_repeat = 1 # Whether to turn on randomness and repeat the inference for multiple times.\n return cfg\n\n\ndef base_misc_config():\n cfg = CfgNode()\n cfg.seed = 123\n cfg.exp_dir = './autonlp'\n return cfg\n\n\ndef base_cfg():\n cfg = CfgNode()\n cfg.version = 1\n cfg.optimization = base_optimization_config()\n cfg.preprocessing = base_preprocess_cfg()\n cfg.model = base_model_config()\n cfg.misc = base_misc_config()\n cfg.freeze()\n return cfg\n\n\n@use_np\ndef _classification_regression_predict(net, dataloader, problem_type, label_scaler,\n has_label=True, extract_embedding=False,\n num_repeat=1):\n \"\"\"\n\n Parameters\n ----------\n net\n The network\n dataloader\n The dataloader\n problem_type\n Types of the labels\n label_scaler\n Label scaler. We will reverse the centering process for regression problem\n has_label\n Whether label is used\n extract_embedding\n Whether to extract the embedding\n num_repeat\n The number of repeats to get the prediction.\n If it is larger than 1, we will average the predictions.\n If it is a regression problem, we will directly average the outputs.\n If it is a classification problem, we will average the logits\n\n Returns\n -------\n predictions\n The predictions\n \"\"\"\n import warnings\n # Filter mxnet warnings\n warnings.filterwarnings('ignore', module='mxnet')\n\n predictions = [[] for _ in range(num_repeat)]\n use_logits = num_repeat > 1 and (problem_type == MULTICLASS or problem_type == BINARY)\\\n and not extract_embedding\n if use_logits:\n logits = [[] for _ in range(num_repeat)]\n ctx_l = net.collect_params().list_ctx()\n for i in range(num_repeat):\n for sample_l in grouper(dataloader, len(ctx_l)):\n iter_pred_l = []\n if use_logits:\n iter_logits_l = []\n for sample, ctx in zip(sample_l, ctx_l):\n if sample is None:\n continue\n if has_label:\n batch_feature, batch_label = sample\n else:\n batch_feature = sample\n batch_feature = move_to_ctx(batch_feature, ctx)\n if extract_embedding:\n _, embeddings = net(batch_feature)\n iter_pred_l.append(embeddings)\n else:\n pred = net(batch_feature)\n if problem_type == MULTICLASS or problem_type == BINARY:\n if num_repeat > 1:\n iter_logits_l.append(pred)\n pred = mx.npx.softmax(pred, axis=-1)\n iter_pred_l.append(pred)\n for pred in iter_pred_l:\n predictions[i].append(pred.asnumpy())\n if use_logits:\n for ele in iter_logits_l:\n logits[i].append(ele.asnumpy())\n predictions[i] = np.concatenate(predictions[i], axis=0)\n if problem_type == REGRESSION and not extract_embedding:\n predictions[i] = label_scaler.inverse_transform(predictions[i])[:, 0]\n if use_logits:\n logits[i] = np.concatenate(logits[i], axis=0)\n if num_repeat == 1:\n return predictions[0]\n else:\n if use_logits:\n logits = np.stack(logits, axis=0).mean(axis=0)\n return scipy.special.softmax(logits, axis=-1)\n else:\n return np.stack(predictions, axis=0).mean(axis=0)\n\n\ndef calculate_metric(scorer, ground_truth, predictions, problem_type):\n if problem_type == BINARY and scorer.name == 'roc_auc':\n # For ROC_AUC, we need to feed in the probability of positive class to the scorer.\n return scorer._sign * scorer(ground_truth, predictions[:, 1])\n else:\n return scorer._sign * scorer(ground_truth, predictions)\n\n\n@use_np\ndef train_function(args, reporter, train_df_path, tuning_df_path,\n time_limit, time_start, base_config,\n problem_type, column_types,\n feature_columns, label_column,\n log_metrics, eval_metric, ngpus_per_trial,\n console_log, seed=None, verbosity=2):\n \"\"\"\n\n Parameters\n ----------\n args\n The arguments\n reporter\n Reporter of the HPO scheduler.\n If it is set to None, we won't use the reporter and will just run a single trial.\n train_df_path\n Path of the training dataframe\n tuning_df_path\n Path of the tuning dataframe\n time_limit\n The time limit of calling this function\n time_start\n The starting timestamp of the experiment\n base_config\n Basic configuration\n problem_type\n Type of the problem.\n column_types\n Type of columns\n feature_columns\n The feature columns\n label_column\n Label column\n log_metrics\n Metrics for logging\n eval_metric\n The stopping metric\n ngpus_per_trial\n The number of GPUs to use per each trial\n console_log\n Whether to log it to console\n seed\n The random seed\n verbosity\n The verbosity\n\n \"\"\"\n import warnings\n warnings.filterwarnings('ignore', module='mxnet')\n warnings.filterwarnings('ignore', module='sklearn')\n set_seed(seed)\n is_fake_reporter = isinstance(reporter, FakeReporter)\n if time_limit is not None:\n start_train_tick = time.time()\n time_left = time_limit - (start_train_tick - time_start)\n if time_left <= 0:\n if not is_fake_reporter:\n reporter.terminate()\n return\n if is_fake_reporter:\n search_space = args.rand\n task_id = 0\n else:\n search_space = args['search_space']\n task_id = args.task_id\n # Get the log metric scorers\n if isinstance(log_metrics, str):\n log_metrics = [log_metrics]\n # Load the training and tuning data from the parquet file\n train_data = pd.read_pickle(train_df_path)\n tuning_data = pd.read_pickle(tuning_df_path)\n log_metric_scorers = [get_metric(ele) for ele in log_metrics]\n eval_metric_scorer = get_metric(eval_metric)\n greater_is_better = eval_metric_scorer.greater_is_better\n cfg = base_config.clone()\n specified_values = []\n for key in search_space.keys():\n specified_values.append(key)\n specified_values.append(search_space[key])\n cfg.merge_from_list(specified_values)\n exp_dir = cfg.misc.exp_dir\n exp_dir = os.path.join(exp_dir, 'task{}'.format(task_id))\n os.makedirs(exp_dir, exist_ok=True)\n cfg.defrost()\n cfg.misc.exp_dir = exp_dir\n cfg.freeze()\n logger = logging.getLogger()\n set_logger_verbosity(verbosity, logger)\n logging_config(folder=exp_dir, name='training', logger=logger, console=console_log,\n level=logging.DEBUG,\n console_level=verbosity2loglevel(verbosity))\n logger.log(10, cfg)\n\n # Load backbone model\n backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \\\n = get_backbone(cfg.model.backbone.name)\n if 'roberta' in cfg.model.backbone.name:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)\n else:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg)\n # Build Preprocessor + Preprocess the training dataset + Inference problem type\n # TODO Dynamically cache the preprocessor that has been fitted.\n if problem_type == MULTICLASS or problem_type == BINARY:\n label_generator = LabelEncoder()\n label_generator.fit(pd.concat([train_data[label_column], tuning_data[label_column]]))\n else:\n label_generator = None\n preprocessor = MultiModalTextFeatureProcessor(column_types=column_types,\n label_column=label_column,\n tokenizer_name=cfg.model.backbone.name,\n label_generator=label_generator,\n cfg=cfg.preprocessing)\n logger.info('Fitting and transforming the train data...')\n train_dataset = preprocessor.fit_transform(train_data[feature_columns],\n train_data[label_column])\n with open(os.path.join(exp_dir, 'preprocessor.pkl'), 'wb') as of:\n pickle.dump(preprocessor, of)\n logger.info(f'Done! Preprocessor saved to {os.path.join(exp_dir, \"preprocessor.pkl\")}')\n logger.log(10, 'Train Data')\n logger.log(10, get_stats_string(preprocessor, train_dataset, is_train=True))\n logger.info('Process dev set...')\n tuning_dataset = preprocessor.transform(tuning_data[feature_columns],\n tuning_data[label_column])\n logger.info('Done!')\n # Auto Max Length\n if cfg.preprocessing.text.auto_max_length:\n max_length = auto_shrink_max_length(\n train_dataset,\n insert_sep=cfg.model.insert_sep,\n num_text_features=len(preprocessor.text_feature_names),\n auto_max_length_quantile=cfg.preprocessing.text.auto_max_length_quantile,\n round_to=cfg.preprocessing.text.auto_max_length_round_to,\n max_length=cfg.preprocessing.text.max_length)\n else:\n max_length = cfg.preprocessing.text.max_length\n train_stochastic_chunk = cfg.model.train_stochastic_chunk\n test_stochastic_chunk = cfg.model.test_stochastic_chunk\n inference_num_repeat = cfg.model.inference_num_repeat\n if max_length < cfg.preprocessing.text.max_length:\n inference_num_repeat = 1\n cfg.defrost()\n cfg.preprocessing.text.max_length = max_length\n cfg.model.inference_num_repeat = inference_num_repeat\n cfg.freeze()\n with open(os.path.join(exp_dir, 'cfg.yml'), 'w') as f:\n f.write(str(cfg))\n logger.info(f'Max length for chunking text: {max_length}, '\n f'Stochastic chunk: Train-{train_stochastic_chunk}/Test-{test_stochastic_chunk}, '\n f'Test #repeat: {inference_num_repeat}.')\n cls_id, sep_id = get_cls_sep_id(tokenizer)\n train_batchify_fn = MultiModalTextBatchify(\n num_text_inputs=len(preprocessor.text_feature_names),\n num_categorical_inputs=len(preprocessor.categorical_feature_names),\n num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,\n cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,\n mode='train', stochastic_chunk=train_stochastic_chunk,\n insert_sep=cfg.model.insert_sep)\n test_batchify_fn = MultiModalTextBatchify(\n num_text_inputs=len(preprocessor.text_feature_names),\n num_categorical_inputs=len(preprocessor.categorical_feature_names),\n num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,\n cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,\n mode='test', stochastic_chunk=test_stochastic_chunk,\n insert_sep=cfg.model.insert_sep)\n\n # Get the ground-truth dev labels\n gt_dev_labels = np.array([ele[-1] for ele in tuning_dataset])\n if problem_type == REGRESSION:\n gt_dev_labels = preprocessor.label_scaler.inverse_transform(np.expand_dims(gt_dev_labels,\n axis=-1))[:, 0]\n ctx_l = get_mxnet_available_ctx()\n if ngpus_per_trial == 0:\n ctx_l = [mx.cpu()]\n else:\n ctx_l = ctx_l[:ngpus_per_trial]\n base_batch_size = cfg.optimization.per_device_batch_size\n num_accumulated = int(np.ceil(cfg.optimization.batch_size / (base_batch_size * len(ctx_l))))\n inference_base_batch_size = base_batch_size * cfg.optimization.val_batch_size_mult\n train_dataloader = DataLoader(train_dataset,\n batch_size=base_batch_size,\n shuffle=True,\n batchify_fn=train_batchify_fn)\n dev_dataloader = DataLoader(tuning_dataset,\n batch_size=inference_base_batch_size,\n shuffle=False,\n batchify_fn=test_batchify_fn)\n if problem_type == REGRESSION:\n out_shape = 1\n elif problem_type == MULTICLASS:\n out_shape = len(label_generator.classes_)\n elif problem_type == BINARY:\n assert len(label_generator.classes_) == 2\n out_shape = 2\n else:\n raise NotImplementedError\n net = MultiModalWithPretrainedTextNN(\n text_backbone=text_backbone,\n num_text_features=1,\n num_categorical_features=len(preprocessor.categorical_feature_names),\n num_numerical_features=len(preprocessor.numerical_feature_names) > 0,\n numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0 else len(preprocessor.numerical_feature_names),\n num_categories=preprocessor.categorical_num_categories,\n get_embedding=False,\n cfg=cfg.model.network,\n out_shape=out_shape)\n net.initialize_with_pretrained_backbone(backbone_params_path, ctx=ctx_l)\n net.hybridize()\n num_total_params, num_total_fixed_params = count_parameters(net.collect_params())\n logger.info('#Total Params/Fixed Params={}/{}'.format(num_total_params,\n num_total_fixed_params))\n # Initialize the optimizer\n updates_per_epoch = int(np.ceil(len(train_dataloader) / (num_accumulated * len(ctx_l))))\n optimizer, optimizer_params, max_update \\\n = get_optimizer(cfg.optimization,\n updates_per_epoch=updates_per_epoch)\n valid_interval = int(math.ceil(cfg.optimization.valid_frequency * updates_per_epoch))\n train_log_interval = int(math.ceil(cfg.optimization.log_frequency * updates_per_epoch))\n\n if 0 < cfg.optimization.layerwise_lr_decay < 1:\n apply_layerwise_decay(net.text_backbone,\n cfg.optimization.layerwise_lr_decay,\n backbone_name=cfg.model.backbone.name)\n freeze_layers(net.text_backbone,\n backbone_name=cfg.model.backbone.name,\n num_trainable_layers=cfg.model.num_trainable_layers)\n\n # Do not apply weight decay to all the LayerNorm and bias\n for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():\n v.wd_mult = 0.0\n params = [p for p in net.collect_params().values() if p.grad_req != 'null']\n trainer = mx.gluon.Trainer(params,\n optimizer, optimizer_params,\n update_on_kvstore=False)\n # Set grad_req if gradient accumulation is required\n if num_accumulated > 1:\n logger.log(15, 'Using gradient accumulation.'\n ' Global batch size = {}'.format(cfg.optimization.batch_size))\n for p in params:\n p.grad_req = 'add'\n net.collect_params().zero_grad()\n train_loop_dataloader = grouper(repeat(train_dataloader), len(ctx_l))\n log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]\n log_num_samples_l = [0 for _ in ctx_l]\n logging_start_tick = time.time()\n nbest = cfg.optimization.nbest\n best_performance_score = [] # Stores the best performing checkpoints\n best_performance_update_idx = [] # Stores the update index that reached the best validation performance\n best_score = None\n mx.npx.waitall()\n no_better_rounds = 0\n report_idx = 0\n start_tick = time.time()\n if time_limit is not None:\n time_limit -= start_tick - time_start\n if time_limit <= 0:\n if not is_fake_reporter:\n reporter.terminate()\n return\n best_report_items = None\n report_local_jsonl_f = open(os.path.join(exp_dir, 'results_local.jsonl'), 'w')\n logger.info(f'Local training results will be saved to '\n f'{os.path.join(exp_dir, \"results_local.jsonl\")}.')\n for update_idx in range(max_update):\n for accum_idx in range(num_accumulated):\n sample_l = next(train_loop_dataloader)\n loss_l = []\n for i, (sample, ctx) in enumerate(zip(sample_l, ctx_l)):\n feature_batch, label_batch = sample\n feature_batch = move_to_ctx(feature_batch, ctx)\n label_batch = move_to_ctx(label_batch, ctx)\n with mx.autograd.record():\n pred = net(feature_batch)\n if problem_type == MULTICLASS or problem_type == BINARY:\n logits = mx.npx.log_softmax(pred, axis=-1)\n loss = - mx.npx.pick(logits,\n mx.np.expand_dims(label_batch, axis=-1))\n elif problem_type == REGRESSION:\n loss = mx.np.square(pred - mx.np.expand_dims(label_batch, axis=-1))\n loss_l.append(loss.mean() / len(ctx_l) / num_accumulated)\n log_loss_l[i] += loss_l[i] * len(ctx_l) * loss.shape[0] * num_accumulated\n log_num_samples_l[i] += loss.shape[0]\n for loss in loss_l:\n loss.backward()\n # Begin to update\n trainer.allreduce_grads()\n total_norm, ratio, is_finite = clip_grad_global_norm(params, cfg.optimization.max_grad_norm)\n if not cfg.model._disable_update:\n trainer.update(1.0, ignore_stale_grad=True)\n\n # Clear after update\n if num_accumulated > 1:\n net.collect_params().zero_grad()\n if (update_idx + 1) % train_log_interval == 0:\n log_loss = sum([ele.as_in_ctx(ctx_l[0]) for ele in log_loss_l]).asnumpy()\n log_num_samples = sum(log_num_samples_l)\n logger.log(15,\n '[Iter {}/{}, Epoch {}] train loss={:0.2e}, gnorm={:0.2e}, lr={:0.2e}, #samples processed={},'\n ' #sample per second={:.2f}. ETA={:.2f}min'\n .format(update_idx + 1, max_update,\n int(update_idx / updates_per_epoch),\n log_loss / log_num_samples, total_norm, trainer.learning_rate,\n log_num_samples,\n log_num_samples / (time.time() - logging_start_tick),\n (time.time() - start_tick) / (update_idx + 1)\n * (max_update - update_idx - 1) / 60))\n logging_start_tick = time.time()\n log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]\n log_num_samples_l = [0 for _ in ctx_l]\n if (update_idx + 1) % valid_interval == 0 or (update_idx + 1) == max_update:\n valid_start_tick = time.time()\n dev_predictions = \\\n _classification_regression_predict(net,\n dataloader=dev_dataloader,\n problem_type=problem_type,\n label_scaler=preprocessor.label_scaler,\n has_label=False,\n num_repeat=inference_num_repeat)\n log_scores = [calculate_metric(scorer, gt_dev_labels,\n dev_predictions,\n problem_type)\n for scorer in log_metric_scorers]\n dev_score = calculate_metric(eval_metric_scorer, gt_dev_labels,\n dev_predictions,\n problem_type)\n valid_time_spent = time.time() - valid_start_tick\n find_better = False\n find_topn_better = False\n if len(best_performance_score) < nbest:\n best_performance_score.append(dev_score)\n best_performance_update_idx.append(update_idx + 1)\n net.save_parameters(\n os.path.join(exp_dir,\n f'nbest_model{len(best_performance_score) - 1}.params'))\n find_topn_better = True\n if best_score is None or greater_is_better and dev_score >= best_score\\\n or (not greater_is_better and dev_score <= best_score):\n find_better = True\n net.save_parameters(os.path.join(exp_dir, f'best_model.params'))\n best_score = dev_score\n else:\n # First try to update the top-K\n if greater_is_better:\n if dev_score >= min(best_performance_score):\n find_topn_better = True\n replace_idx = np.argmin(best_performance_score)\n best_performance_score[replace_idx] = dev_score\n best_performance_update_idx[replace_idx] = update_idx + 1\n net.save_parameters(\n os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))\n if dev_score >= best_score:\n find_better = True\n net.save_parameters(os.path.join(exp_dir, f'best_model.params'))\n best_score = dev_score\n\n else:\n if dev_score <= max(best_performance_score):\n find_topn_better = True\n replace_idx = np.argmax(best_performance_score)\n best_performance_score[replace_idx] = dev_score\n best_performance_update_idx[replace_idx] = update_idx + 1\n net.save_parameters(\n os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))\n if dev_score <= best_score:\n find_better = True\n net.save_parameters(os.path.join(exp_dir, f'best_model.params'))\n best_score = dev_score\n if not find_better:\n no_better_rounds += 1\n else:\n no_better_rounds = 0\n mx.npx.waitall()\n loss_string = ', '.join(['{}={:0.4e}'.format(metric.name, score)\n for score, metric in zip(log_scores, log_metric_scorers)])\n logger.log(25, '[Iter {}/{}, Epoch {}] valid {}, time spent={:.3f}s,'\n ' total time spent={:.2f}min. Find new best={}, Find new top-{}={}'.format(\n update_idx + 1, max_update, int(update_idx / updates_per_epoch),\n loss_string, valid_time_spent, (time.time() - start_tick) / 60,\n find_better, nbest, find_topn_better))\n if reporter is not None:\n report_items = [('iteration', update_idx + 1),\n ('report_idx', report_idx + 1),\n ('epoch', int(update_idx / updates_per_epoch))] + \\\n [(metric.name, score)\n for score, metric in zip(log_scores, log_metric_scorers)] + \\\n [('find_better', find_better),\n ('find_new_topn', find_topn_better),\n ('nbest_stat', json.dumps([best_performance_score,\n best_performance_update_idx])),\n ('elapsed_time', int(time.time() - start_tick))]\n if eval_metric_scorer._sign < 0:\n report_items.append(('reward_attr', -dev_score))\n else:\n report_items.append(('reward_attr', dev_score))\n report_items.append(('eval_metric', eval_metric_scorer.name))\n report_items.append(('exp_dir', exp_dir))\n if find_better:\n best_report_items = report_items\n reporter(**dict(report_items))\n report_local_jsonl_f.write(json.dumps(dict(report_items)) + '\\n')\n report_local_jsonl_f.flush()\n report_idx += 1\n if no_better_rounds >= cfg.optimization.early_stopping_patience:\n logger.info('Early stopping patience reached!')\n break\n total_time_spent = time.time() - start_tick\n if time_limit is not None and total_time_spent > time_limit:\n break\n # Average checkpoints\n best_report_items_dict = dict(best_report_items)\n best_report_items_dict['report_idx'] = report_idx + 1\n reporter(**best_report_items_dict)\n report_local_jsonl_f.write(json.dumps(best_report_items_dict) + '\\n')\n report_local_jsonl_f.close()\n\n\ndef get_recommended_resource(nthreads_per_trial=None,\n ngpus_per_trial=None) -> Tuple[int, int]:\n \"\"\"Get the recommended resources.\n\n Internally, we will try to use GPU whenever it's possible. That means, we will use\n a single GPU for finetuning.\n\n Parameters\n ----------\n nthreads_per_trial\n The number of threads per trial provided by the user.\n ngpus_per_trial\n The number of GPUs per trial provided by the user.\n\n Returns\n -------\n nthreads_per_trial\n The recommended resource.\n ngpus_per_trial\n \"\"\"\n if nthreads_per_trial is None and ngpus_per_trial is None:\n nthreads_per_trial = get_cpu_count()\n ngpus_per_trial = 1\n elif nthreads_per_trial is not None and ngpus_per_trial is None:\n ngpus_per_trial = 1\n elif nthreads_per_trial is None and ngpus_per_trial is not None:\n if ngpus_per_trial != 0:\n num_parallel_jobs = get_gpu_count() // ngpus_per_trial\n nthreads_per_trial = max(get_cpu_count() // num_parallel_jobs, 1)\n else:\n nthreads_per_trial = get_cpu_count()\n nthreads_per_trial = min(nthreads_per_trial, get_cpu_count())\n ngpus_per_trial = min(ngpus_per_trial, get_gpu_count())\n assert nthreads_per_trial > 0 and ngpus_per_trial >= 0,\\\n 'Invalid number of threads and number of GPUs.'\n return nthreads_per_trial, ngpus_per_trial\n\n\n@use_np\nclass MultiModalTextModel:\n \"\"\"Learner of the multimodal text data.\n\n It will be called if the user call `fit()` in TextPredictor.\n\n It is used for making predictions on new data and viewing information about\n models trained during `fit()`.\n \"\"\"\n\n def __init__(self, column_types,\n feature_columns,\n label_columns,\n problem_type,\n eval_metric,\n log_metrics,\n output_directory=None):\n \"\"\"Creates model object.\n\n Parameters\n ----------\n column_types\n The column types.\n feature_columns\n Name of the feature columns\n label_columns\n Name of the label columns.\n problem_type\n Type of the problem\n eval_metric\n The evaluation metric\n log_metrics\n The metrics for logging\n output_directory\n The output directory to save the model\n logger\n The logger\n \"\"\"\n super(MultiModalTextModel, self).__init__()\n self._base_config = base_cfg()\n self._base_config.defrost()\n if output_directory is not None:\n self._output_directory = self._base_config.misc.exp_dir = output_directory\n self._base_config.misc.exp_dir = os.path.abspath(self._base_config.misc.exp_dir)\n self._base_config.freeze()\n self._output_directory = self._base_config.misc.exp_dir\n self._column_types = column_types\n self._eval_metric = eval_metric\n self._log_metrics = log_metrics\n\n self._label_columns = label_columns\n self._feature_columns = feature_columns\n self._problem_type = problem_type\n\n # Need to be set in the train call\n self._net = None # Network for training and inference\n self._embed_net = None # Network for extract the embedding\n self._config = None\n self._results = None\n self._preprocessor = None\n\n @property\n def results(self):\n return self._results\n\n @property\n def preprocessor(self):\n return self._preprocessor\n\n @property\n def output_directory(self):\n \"\"\" Get the output directory. The trained model and the training logs\n will be saved to this folder \"\"\"\n return self._output_directory\n\n @property\n def label_columns(self):\n \"\"\"Name of the label columns\"\"\"\n return self._label_columns\n\n @property\n def problem_type(self):\n \"\"\"Types of the problem\"\"\"\n return self._problem_type\n\n @property\n def feature_columns(self):\n \"\"\"Name of the features\"\"\"\n return self._feature_columns\n\n @property\n def base_config(self):\n \"\"\"The basic configuration. Internally, we will fill values in the base config by values\n in the search space.\"\"\"\n return self._base_config\n\n @property\n def results(self):\n \"\"\"Results of the final model\"\"\"\n return self._results\n\n @property\n def config(self):\n \"\"\"The configuration of the final trained model.\"\"\"\n return self._config\n\n @property\n def net(self):\n return self._net\n\n def train(self, train_data, tuning_data,\n num_cpus=None,\n num_gpus=None,\n time_limit=None,\n tune_kwargs=None,\n search_space=None,\n plot_results=False,\n console_log=True,\n seed=None,\n verbosity=2):\n \"\"\"The train function.\n\n Parameters\n ----------\n train_data\n The training data\n tuning_data\n The tuning data\n num_cpus\n Number of CPUs for each trial\n num_gpus\n Number of GPUs for each trial\n time_limit\n The time limits\n tune_kwargs\n Parameters of the HPO algorithms. For example, the scheduling\n algorithm, scheduling backend, HPO algorithm.\n search_space\n The search space options\n plot_results\n Whether to plot results or not\n console_log\n Whether to log into the console\n seed\n The seed\n verbosity\n Verbosity\n \"\"\"\n set_seed(seed)\n set_logger_verbosity(verbosity, logger)\n start_tick = time.time()\n assert len(self._label_columns) == 1, 'Currently, we only support single label.'\n # TODO(sxjscience) Try to support S3\n os.makedirs(self._output_directory, exist_ok=True)\n if search_space is None:\n search_space = \\\n ag_text_presets.create('default')['models']['MultimodalTextModel']['search_space']\n search_space_reg = args(search_space=space.Dict(**search_space))\n # Scheduler and searcher for HPO\n if tune_kwargs is None:\n tune_kwargs = ag_text_presets.create('default')['tune_kwargs']\n scheduler_options = tune_kwargs['scheduler_options']\n num_cpus, num_gpus = get_recommended_resource(num_cpus, num_gpus)\n if num_gpus == 0:\n if 'AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU' in os.environ:\n use_warning = int(os.environ['AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU'])\n else:\n use_warning = False\n if use_warning:\n warnings.warn('No GPU is detected in the machine and we will recommend you to '\n 'use TextPredictor on a GPU-enabled instance. Currently, '\n 'training on CPU is slow.')\n else:\n raise RuntimeError('No GPU is detected in the machine and we will '\n 'not proceed to run TextPredictor because they will train '\n 'too slowly with only CPU. You may try to set `ngpus_per_trial` '\n 'to a number larger than 0 when calling `.fit()`. '\n 'Also, you can set the environment variable '\n '\"AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU=1\" to force the model to '\n 'use CPU for training.')\n logger.info(f\"The GluonNLP V0 backend is used. \"\n f\"We will use {num_cpus} cpus and \"\n f\"{num_gpus} gpus to train each trial.\")\n if scheduler_options is None:\n scheduler_options = dict()\n if plot_results is None:\n if in_ipynb():\n plot_results = True\n else:\n plot_results = False\n scheduler_options = compile_scheduler_options_v2(\n scheduler_options=scheduler_options,\n scheduler=tune_kwargs['search_strategy'],\n search_strategy=tune_kwargs['searcher'],\n search_options=tune_kwargs['search_options'],\n nthreads_per_trial=num_cpus,\n ngpus_per_trial=num_gpus,\n checkpoint=os.path.join(self._output_directory, 'checkpoint.ag'),\n num_trials=tune_kwargs['num_trials'],\n time_out=time_limit,\n resume=False,\n visualizer=scheduler_options.get('visualizer'),\n time_attr='report_idx',\n reward_attr='reward_attr',\n dist_ip_addrs=scheduler_options.get('dist_ip_addrs'))\n # Create a temporary cache file. The internal train function will load the\n # temporary cache.\n os.makedirs(os.path.join(self._output_directory, 'data_cache'), exist_ok=True)\n train_df_path = os.path.join(self._output_directory, 'data_cache',\n 'cache_train_dataframe.pd.pkl')\n tuning_df_path = os.path.join(self._output_directory, 'data_cache',\n 'cache_tuning_dataframe.pd.pkl')\n train_data.to_pickle(train_df_path)\n tuning_data.to_pickle(tuning_df_path)\n train_fn = search_space_reg(functools.partial(train_function,\n train_df_path=train_df_path,\n time_limit=time_limit,\n time_start=start_tick,\n tuning_df_path=tuning_df_path,\n base_config=self.base_config,\n problem_type=self.problem_type,\n column_types=self._column_types,\n feature_columns=self._feature_columns,\n label_column=self._label_columns[0],\n log_metrics=self._log_metrics,\n eval_metric=self._eval_metric,\n ngpus_per_trial=scheduler_options['resource']['num_gpus'],\n console_log=console_log,\n verbosity=verbosity))\n no_job_finished_err_msg =\\\n 'No training job has been completed! '\\\n 'There are two possibilities: '\\\n '1) The time_limit is too small, '\\\n 'or 2) There are some internal errors in AutoGluon. '\\\n 'For the first case, you can increase the time_limit or set it to '\\\n 'None, e.g., setting \"predictor.fit(..., time_limit=None). To '\\\n 'further investigate the root cause, you can also try to set the '\\\n '\"verbosity=3\" and try again, i.e., predictor.set_verbosity(3).'\n if scheduler_options['num_trials'] == 1:\n train_fn(train_fn.args['search_space'],\n train_fn.args['_default_config'])\n best_model_saved_dir_path = os.path.join(self._output_directory, 'task0')\n cfg_path = os.path.join(self._output_directory, 'task0', 'cfg.yml')\n\n # Check whether the job has finished\n if not os.path.exists(cfg_path)\\\n or not os.path.exists(os.path.join(self._output_directory,\n 'task0', 'best_model.params')):\n raise RuntimeError(no_job_finished_err_msg)\n cfg = self.base_config.clone_merge(cfg_path)\n local_results = pd.read_json(os.path.join(self._output_directory, 'task0',\n 'results_local.jsonl'), lines=True)\n if plot_results:\n plot_training_curves = os.path.join(self._output_directory,\n 'plot_training_curves.png')\n import matplotlib.pyplot as plt\n plt.ylabel(self._eval_metric)\n plt.xlabel('report_idx')\n plt.title(\"Performance vs Training-Time\")\n plt.plot(local_results['report_idx'].iloc[:-1],\n local_results[local_results['eval_metric'][0]].iloc[:-1], label=f'task0')\n plt.legend(loc='best')\n plt.savefig(plot_training_curves)\n plt.show()\n self._results = local_results\n else:\n if tune_kwargs['search_strategy'] != 'local':\n # Force forkserver if it's not using the local sequential HPO\n force_forkserver()\n scheduler_cls, scheduler_params = scheduler_factory(scheduler_options)\n # Create scheduler, run HPO experiment\n scheduler = scheduler_cls(train_fn, **scheduler_options)\n scheduler.run()\n scheduler.join_jobs()\n if len(scheduler.config_history) == 0:\n raise RuntimeError(no_job_finished_err_msg)\n best_config = scheduler.get_best_config()\n logger.info('Results=', scheduler.searcher._results)\n logger.info('Best_config={}'.format(best_config))\n best_task_id = scheduler.get_best_task_id()\n best_model_saved_dir_path = os.path.join(self._output_directory,\n 'task{}'.format(best_task_id))\n best_cfg_path = os.path.join(best_model_saved_dir_path, 'cfg.yml')\n cfg = self.base_config.clone_merge(best_cfg_path)\n if plot_results:\n plot_training_curves = os.path.join(self._output_directory,\n 'plot_training_curves.png')\n scheduler.get_training_curves(filename=plot_training_curves,\n plot=plot_results,\n use_legend=True)\n self._results = dict()\n self._results.update(best_reward=scheduler.get_best_reward(),\n best_config=scheduler.get_best_config(),\n total_time=time.time() - start_tick,\n metadata=scheduler.metadata,\n training_history=scheduler.training_history,\n config_history=scheduler.config_history,\n reward_attr=scheduler._reward_attr,\n config=cfg)\n # Consider to move this to a separate predictor\n self._config = cfg\n # Average parameters\n # TODO(sxjscience) Clean up the temporary spaces used to store the intermediate checkpoints.\n if cfg.model.use_avg_nbest:\n nbest_path_l = []\n for best_id in range(cfg.optimization.nbest):\n nbest_path = os.path.join(best_model_saved_dir_path, f'nbest_model{best_id}.params')\n if os.path.exists(nbest_path):\n nbest_path_l.append(nbest_path)\n avg_nbest_path = os.path.join(best_model_saved_dir_path, 'nbest_model_avg.params')\n average_checkpoints(nbest_path_l, avg_nbest_path)\n with open(os.path.join(best_model_saved_dir_path, 'preprocessor.pkl'), 'rb') as in_f:\n self._preprocessor = pickle.load(in_f)\n backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \\\n = get_backbone(cfg.model.backbone.name)\n if 'roberta' in cfg.model.backbone.name:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)\n else:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg)\n if self._problem_type == REGRESSION:\n out_shape = 1\n elif self._problem_type == MULTICLASS:\n out_shape = len(self._preprocessor.label_generator.classes_)\n elif self._problem_type == BINARY:\n assert len(self._preprocessor.label_generator.classes_) == 2\n out_shape = 2\n else:\n raise NotImplementedError\n net = MultiModalWithPretrainedTextNN(\n text_backbone=text_backbone,\n num_text_features=1,\n num_categorical_features=len(self._preprocessor.categorical_feature_names),\n num_numerical_features=len(self._preprocessor.numerical_feature_names) > 0,\n numerical_input_units=None if len(self._preprocessor.numerical_feature_names) == 0 else len(\n self._preprocessor.numerical_feature_names),\n num_categories=self._preprocessor.categorical_num_categories,\n get_embedding=False,\n cfg=cfg.model.network,\n out_shape=out_shape)\n net.hybridize()\n if cfg.model.use_avg_nbest:\n net.load_parameters(avg_nbest_path, ctx=mx.cpu())\n else:\n net.load_parameters(os.path.join(best_model_saved_dir_path, 'best_model.params'),\n ctx=mx.cpu())\n self._net = net\n mx.npx.waitall()\n\n def evaluate(self, data, metrics=None, stochastic_chunk=None, num_repeat=None):\n \"\"\" Report the predictive performance evaluated for a given dataset.\n\n Parameters\n ----------\n data : str or :class:`TabularDataset` or `pandas.DataFrame`\n This Dataset must also contain the label-column with the same column-name as specified during `fit()`.\n If str is passed, `valid_data` will be loaded using the str value as the file path.\n metrics : str or List[str] or None\n Name of metric or a list of names of metrics to report.\n If it is not given, we will return the score of the stored eval_metric.\n stochastic_chunk\n Whether to use stochastic chunk\n num_repeat\n The number of repeats\n\n Returns\n -------\n ret : single number or a dict of metric --> metric scores\n Output\n \"\"\"\n if isinstance(metrics, str):\n metrics = [metrics]\n elif metrics is None:\n metrics = [self._eval_metric]\n assert self.net is not None\n # We will always use all resources that are available for evaluation\n ctx_l = get_mxnet_available_ctx()\n self.net.collect_params().reset_ctx(ctx_l)\n\n if not isinstance(data, pd.DataFrame):\n if isinstance(data, (list, dict)):\n data = pd.DataFrame(data)\n elif isinstance(data, str):\n data = load_pd.load(data)\n else:\n raise NotImplementedError(f'The format of data is not understood. '\n f'We have type(data)=\"{type(data)}\"')\n data = data[self._feature_columns + self._label_columns]\n if self._problem_type == MULTICLASS or self._problem_type == BINARY:\n ground_truth = self.preprocessor.label_generator.transform(\n data[self._label_columns[0]])\n predictions = self.predict_proba(data,\n stochastic_chunk=stochastic_chunk,\n num_repeat=num_repeat)\n else:\n ground_truth = pd.to_numeric(data[self._label_columns[0]]).to_numpy().astype(np.float32)\n predictions = self.predict(data,\n stochastic_chunk=stochastic_chunk,\n num_repeat=num_repeat)\n metric_scores = [calculate_metric(get_metric(metric),\n ground_truth, predictions, self._problem_type)\n for metric in metrics]\n\n # Once the inference is completed, we will cache all parameters back\n # to CPU to avoid memory overflow.\n self.net.collect_params().reset_ctx(mx.cpu())\n if len(metric_scores) == 1:\n return metric_scores[0]\n else:\n return {metric: score for metric, score in zip(metrics, metric_scores)}\n\n def _internal_predict(self, data, get_original_labels=True, get_probabilities=False,\n stochastic_chunk=None, num_repeat=None):\n assert self.net is not None\n assert self.config is not None\n # We will always use all resources that are available for evaluation\n ctx_l = get_mxnet_available_ctx()\n self.net.collect_params().reset_ctx(ctx_l)\n\n if not isinstance(data, pd.DataFrame):\n if isinstance(data, (list, dict)):\n data = pd.DataFrame(data)\n elif isinstance(data, str):\n data = load_pd.load(data)\n else:\n raise NotImplementedError(f'The format of data is not understood. '\n f'We have type(data)=\"{type(data)}\"')\n dataset = self.preprocessor.transform(data[self._feature_columns])\n inference_batch_size = self.config.optimization.per_device_batch_size \\\n * self.config.optimization.val_batch_size_mult\n cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)\n if stochastic_chunk is None:\n stochastic_chunk = self.config.model.test_stochastic_chunk\n batchify_fn = MultiModalTextBatchify(\n num_text_inputs=len(self.preprocessor.text_feature_names),\n num_categorical_inputs=len(self.preprocessor.categorical_feature_names),\n num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,\n cls_token_id=cls_id, sep_token_id=sep_id,\n max_length=self.config.preprocessing.text.max_length,\n mode='test',\n stochastic_chunk=stochastic_chunk,\n insert_sep=self.config.model.insert_sep)\n dataloader = DataLoader(dataset,\n batch_size=inference_batch_size,\n shuffle=False,\n batchify_fn=batchify_fn)\n if num_repeat is None:\n num_repeat = self.config.model.inference_num_repeat\n test_predictions = _classification_regression_predict(\n self._net,\n dataloader=dataloader,\n problem_type=self._problem_type,\n label_scaler=self.preprocessor.label_scaler,\n has_label=False,\n num_repeat=num_repeat)\n\n # Once the inference is completed, we will cache all parameters back\n # to CPU to avoid memory overflow.\n self.net.collect_params().reset_ctx(mx.cpu())\n if self._problem_type == MULTICLASS or self._problem_type == BINARY:\n if get_probabilities:\n return test_predictions\n else:\n test_predictions = test_predictions.argmax(axis=-1)\n if get_original_labels:\n test_predictions = np.array(\n self.preprocessor.label_generator.inverse_transform(test_predictions))\n return test_predictions\n\n @property\n def class_labels(self):\n \"\"\"The original name of the class labels.\n\n For example, the tabular data may contain classes equal to\n \"entailment\", \"contradiction\", \"neutral\". Internally, these will be converted to\n 0, 1, 2, ...\n\n This function returns the original names of these raw labels.\n\n Returns\n -------\n ret\n List that contain the class names. It will be None if it's not a classification problem.\n \"\"\"\n if self.problem_type == MULTICLASS or self.problem_type == BINARY:\n return self._preprocessor.label_generator.classes_\n else:\n warnings.warn('Accessing class names for a non-classification problem. Return None.')\n return None\n\n def predict_proba(self, test_data, stochastic_chunk=None, num_repeat=None):\n \"\"\"Predict class probabilities instead of class labels (for classification tasks).\n\n Parameters\n ----------\n test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str\n The test data to get predictions for. Can be DataFrame/Dataset or a file that can\n be loaded into DataFrame/Dataset.\n stochastic_chunk : bool\n Whether to enable stochastic chunk\n num_repeat : int or None\n The number of repeats for running the inference model.\n\n Returns\n -------\n probabilities : array\n The predicted class probabilities for each sample.\n Shape of this array is (#Samples, num_class).\n Here, the i-th number means the probability of belonging to the i-th class.\n You can access the class names by calling `self.class_names`.\n \"\"\"\n assert self.problem_type == MULTICLASS or self.problem_type == BINARY\n return self._internal_predict(test_data,\n get_original_labels=False,\n get_probabilities=True,\n stochastic_chunk=stochastic_chunk,\n num_repeat=num_repeat)\n\n def predict(self, test_data, get_original_labels=True, stochastic_chunk=None, num_repeat=None):\n \"\"\"Make predictions on new data.\n\n Parameters\n ----------\n test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str\n The test data to get predictions for. Can be DataFrame/Dataset or a file that can be loaded into DataFrame/Dataset.\n get_original_labels : bool, default = True\n Whether or not predictions should be formatted in terms of the original labels.\n For example, the labels might be \"entailment\" or \"not_entailment\" and predictions could either be of this form (if `True`) or integer-indices corresponding to these classes (if `False`).\n stochastic_chunk : bool or None, default = None\n Whether to turn on stochastic chunk\n num_repeat : int or None\n The number of repeats\n\n Returns\n -------\n predictions : array\n The predictions for each sample. Shape of this array is (#Samples,).\n \"\"\"\n return self._internal_predict(test_data,\n get_original_labels=get_original_labels,\n get_probabilities=False,\n stochastic_chunk=stochastic_chunk,\n num_repeat=num_repeat)\n\n def save(self, dir_path):\n \"\"\"Save this model to disk.\n\n Parameters\n ----------\n dir_path : str\n Directory where the model should be saved.\n \"\"\"\n os.makedirs(dir_path, exist_ok=True)\n self.net.save_parameters(os.path.join(dir_path, 'net.params'))\n with open(os.path.join(dir_path, 'cfg.yml'), 'w') as of:\n of.write(self.config.dump())\n # Save preprocessor\n with open(os.path.join(dir_path, 'preprocessor.pkl'), 'wb') as of:\n pickle.dump(self.preprocessor, of)\n if not isinstance(self._eval_metric, str):\n eval_metric = self._eval_metric.name\n else:\n eval_metric = self._eval_metric\n log_metrics = []\n for metric in self._log_metrics:\n if not isinstance(metric, str):\n log_metrics.append(metric.name)\n else:\n log_metrics.append(metric)\n # Save additional assets about the parsed dataset information\n with open(os.path.join(dir_path, 'assets.json'), 'w') as of:\n json.dump(\n {\n 'problem_type': self._problem_type,\n 'label_columns': self._label_columns,\n 'eval_metric': eval_metric,\n 'log_metrics': log_metrics,\n 'feature_columns': self._feature_columns,\n 'column_types': self._column_types,\n 'version': version.__version__,\n }, of, ensure_ascii=True)\n\n @classmethod\n def load(cls, dir_path: str):\n \"\"\"Load a model object previously produced by `fit()` from disk and return this object.\n It is highly recommended the predictor be loaded with the exact AutoGluon version\n it was fit with.\n\n Parameters\n ----------\n dir_path\n Path to directory where this model was previously saved.\n\n Returns\n -------\n model\n A `BertForTextPredictionBasic` object that can be used for making predictions on new data.\n \"\"\"\n cfg = base_cfg().clone_merge(os.path.join(dir_path, 'cfg.yml'))\n with open(os.path.join(dir_path, 'preprocessor.pkl'), 'rb') as in_f:\n preprocessor = pickle.load(in_f)\n with open(os.path.join(dir_path, 'assets.json'), 'r') as f:\n assets = json.load(f)\n label_columns = assets['label_columns']\n feature_columns = assets['feature_columns']\n eval_metric = assets['eval_metric']\n log_metrics = assets['log_metrics']\n problem_type = assets['problem_type']\n column_types = assets['column_types']\n # TODO(sxjscience) Post 0.1. In general, we will need to support compatible version check\n version = assets['version']\n backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \\\n = get_backbone(cfg.model.backbone.name)\n if 'roberta' in cfg.model.backbone.name:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)\n else:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg)\n if problem_type == REGRESSION:\n out_shape = 1\n elif problem_type == MULTICLASS:\n out_shape = len(preprocessor.label_generator.classes_)\n elif problem_type == BINARY:\n assert len(preprocessor.label_generator.classes_) == 2\n out_shape = 2\n else:\n raise NotImplementedError\n net = MultiModalWithPretrainedTextNN(\n text_backbone=text_backbone,\n num_text_features=1,\n num_categorical_features=len(preprocessor.categorical_feature_names),\n num_numerical_features=len(preprocessor.numerical_feature_names) > 0,\n numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0\n else len(preprocessor.numerical_feature_names),\n num_categories=preprocessor.categorical_num_categories,\n get_embedding=False,\n cfg=cfg.model.network,\n out_shape=out_shape)\n net.hybridize()\n ctx_l = mx.cpu()\n net.load_parameters(os.path.join(dir_path, 'net.params'), ctx=ctx_l)\n model = cls(column_types=column_types,\n label_columns=label_columns,\n feature_columns=feature_columns,\n problem_type=problem_type,\n eval_metric=eval_metric,\n log_metrics=log_metrics)\n model._net = net\n model._config = cfg\n model._preprocessor = preprocessor\n return model\n\n def extract_embedding(self, data, stochastic_chunk=None, num_repeat=None):\n \"\"\"Extract the embedding from the pretrained model.\n\n Parameters\n ----------\n data\n Data that can be parsed to pandas dataframe\n stochastic_chunk\n Whether to use stochastic chunk\n num_repeat\n The number of repeats\n\n Returns\n -------\n embeddings\n The output embeddings will have shape\n (#samples, embedding_dim)\n \"\"\"\n if not isinstance(data, pd.DataFrame):\n if isinstance(data, (list, dict)):\n data = pd.DataFrame(data)\n elif isinstance(data, str):\n data = load_pd.load(data)\n else:\n raise NotImplementedError(f'The format of data is not understood. '\n f'We have type(data)=\"{type(data)}\"')\n dataset = self.preprocessor.transform(data[self.feature_columns])\n inference_batch_size = self.config.optimization.per_device_batch_size \\\n * self.config.optimization.val_batch_size_mult\n cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)\n if stochastic_chunk is None:\n stochastic_chunk = self.config.model.test_stochastic_chunk\n batchify_fn = MultiModalTextBatchify(\n num_text_inputs=len(self.preprocessor.text_feature_names),\n num_categorical_inputs=len(self.preprocessor.categorical_feature_names),\n num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,\n cls_token_id=cls_id, sep_token_id=sep_id,\n max_length=self.config.preprocessing.text.max_length,\n mode='test',\n stochastic_chunk=stochastic_chunk,\n insert_sep=self.config.model.insert_sep)\n dataloader = DataLoader(dataset,\n batch_size=inference_batch_size,\n shuffle=False,\n batchify_fn=batchify_fn)\n if self._embed_net is None:\n embed_net = MultiModalWithPretrainedTextNN(\n text_backbone=self.net.text_backbone,\n num_text_features=1,\n num_categorical_features=len(self.preprocessor.categorical_feature_names),\n num_numerical_features=len(self.preprocessor.numerical_feature_names) > 0,\n numerical_input_units=None if len(self.preprocessor.numerical_feature_names) == 0\n else len(self.preprocessor.numerical_feature_names),\n num_categories=self.preprocessor.categorical_num_categories,\n get_embedding=True,\n cfg=self.config.model.network,\n out_shape=self.net.out_shape,\n params=self.net.collect_params(),\n prefix='embed_net_')\n embed_net.hybridize()\n self._embed_net = embed_net\n\n if num_repeat is None:\n num_repeat = self.config.model.inference_num_repeat\n ctx_l = get_mxnet_available_ctx()\n self._embed_net.collect_params().reset_ctx(ctx_l)\n embeddings = _classification_regression_predict(self._embed_net,\n dataloader=dataloader,\n problem_type=self._problem_type,\n label_scaler=self.preprocessor.label_scaler,\n has_label=False,\n extract_embedding=True,\n num_repeat=num_repeat)\n self._embed_net.collect_params().reset_ctx(mx.cpu())\n return embeddings\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.expand_dims",
"pandas.DataFrame",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.argmin",
"sklearn.preprocessing.LabelEncoder",
"numpy.stack",
"numpy.ceil",
"numpy.argmax",
"pandas.to_numeric",
"pandas.concat",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"pandas.read_pickle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
HDembinski/aghast | [
"ed97e9abc870e729d300622253aa7e9c870f77ec",
"ed97e9abc870e729d300622253aa7e9c870f77ec"
] | [
"python/tests/test_getitem.py",
"python/tests/test_validity.py"
] | [
"#!/usr/bin/env python\n\n# BSD 3-Clause License; see https://github.com/scikit-hep/aghast/blob/master/LICENSE\n\nimport sys\nimport unittest\n\nimport numpy\n\nfrom aghast import *\n\n\nclass Test(unittest.TestCase):\n def runTest(self):\n pass\n\n def test_getitem_twodim(self):\n a = Histogram(\n [Axis(IntegerBinning(0, 3)), Axis(IntegerBinning(0, 2))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n numpy.array(\n [\n [10, 100, 1000],\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n )\n )\n ),\n )\n a.checkvalid()\n assert a.axis[0].binning.toCategoryBinning().categories == [\"0\", \"1\", \"2\", \"3\"]\n assert a.axis[1].binning.toCategoryBinning().categories == [\"0\", \"1\", \"2\"]\n assert a.counts.counts.array.tolist() == [\n [10, 100, 1000],\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n\n assert a.counts[None, None] == sum(\n [10, 100, 1000, 20, 200, 2000, 30, 300, 3000, 40, 400, 4000]\n )\n assert a.counts[None, :].tolist() == [100, 1000, 10000]\n assert a.counts[None].tolist() == [100, 1000, 10000]\n assert a.counts[:, None].tolist() == [1110, 2220, 3330, 4440]\n assert a.counts[None, 1] == 1000\n assert a.counts[1, None] == 2220\n assert a.counts[None, 1:].tolist() == [1000, 10000]\n assert a.counts[1:, None].tolist() == [2220, 3330, 4440]\n assert a.counts[None, [2, 1, 1, 0]].tolist() == [10000, 1000, 1000, 100]\n assert a.counts[[3, 2, 2, 0], None].tolist() == [4440, 3330, 3330, 1110]\n assert a.counts[None, [True, False, True]].tolist() == [100, 10000]\n assert a.counts[[False, True, True, False], None].tolist() == [2220, 3330]\n\n assert a.counts[:, :].tolist() == [\n [10, 100, 1000],\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n assert a.counts[:].tolist() == [\n [10, 100, 1000],\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n assert a.counts[1:, :].tolist() == [\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n assert a.counts[1:].tolist() == [\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n assert a.counts[:, 1:].tolist() == [\n [100, 1000],\n [200, 2000],\n [300, 3000],\n [400, 4000],\n ]\n assert a.counts[2:, 1:].tolist() == [[300, 3000], [400, 4000]]\n assert a.counts[:, 1].tolist() == [100, 200, 300, 400]\n assert a.counts[1, :].tolist() == [20, 200, 2000]\n assert a.counts[1].tolist() == [20, 200, 2000]\n assert a.counts[2:, 1].tolist() == [300, 400]\n assert a.counts[1, 2:].tolist() == [2000]\n assert a.counts[:, [2, 0]].tolist() == [\n [1000, 10],\n [2000, 20],\n [3000, 30],\n [4000, 40],\n ]\n assert a.counts[[2, 0], :].tolist() == [[30, 300, 3000], [10, 100, 1000]]\n assert a.counts[1:, [2, 0]].tolist() == [[2000, 20], [3000, 30], [4000, 40]]\n assert a.counts[[2, 0], 1:].tolist() == [[300, 3000], [100, 1000]]\n assert a.counts[:, [True, False, True]].tolist() == [\n [10, 1000],\n [20, 2000],\n [30, 3000],\n [40, 4000],\n ]\n assert a.counts[[False, True, True, False], :].tolist() == [\n [20, 200, 2000],\n [30, 300, 3000],\n ]\n assert a.counts[1:, [True, False, True]].tolist() == [\n [20, 2000],\n [30, 3000],\n [40, 4000],\n ]\n assert a.counts[[False, True, True, False], 1:].tolist() == [\n [200, 2000],\n [300, 3000],\n ]\n\n assert a.counts[1, 2] == 2000\n assert a.counts[1, [2, 2, 0]].tolist() == [2000, 2000, 20]\n assert a.counts[[2, 2, 0], 1].tolist() == [300, 300, 100]\n assert a.counts[1, [True, False, True]].tolist() == [20, 2000]\n assert a.counts[[False, True, True, False], 1].tolist() == [200, 300]\n\n assert a.counts[[1, 2], [2, 0]].tolist() == [[2000, 20], [3000, 30]]\n assert a.counts[[False, True, True, False], [2, 0]].tolist() == [\n [2000, 20],\n [3000, 30],\n ]\n assert a.counts[[False, True, True, False], [True, False, True]].tolist() == [\n [20, 2000],\n [30, 3000],\n ]\n\n assert a.counts[[2, 0], [2, 2, 0]].tolist() == [\n [3000, 3000, 30],\n [1000, 1000, 10],\n ]\n assert a.counts[[2, 0], [True, False, True]].tolist() == [\n [30, 3000],\n [10, 1000],\n ]\n assert a.counts[[True, False, True, False], [True, False, True]].tolist() == [\n [10, 1000],\n [30, 3000],\n ]\n\n def test_getitem_IntegerBinning(self):\n a = Histogram(\n [Axis(IntegerBinning(-5, 5))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.arange(11, dtype=int))\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"-5\",\n \"-4\",\n \"-3\",\n \"-2\",\n \"-1\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n ]\n assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n assert a.counts[None] == 55\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]\n assert a.counts[5] == 5\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[numpy.array([7, 4, 7, 5, -1])].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False, True]\n ].tolist() == [0, 2, 4, 6, 8, 10]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False, True]\n )\n ].tolist() == [0, 2, 4, 6, 8, 10]\n\n a = Histogram(\n [Axis(IntegerBinning(-5, 5, loc_overflow=IntegerBinning.above1))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"-5\",\n \"-4\",\n \"-3\",\n \"-2\",\n \"-1\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"[6, +inf)\",\n ]\n assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]\n\n assert a.counts[None] == 55 + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]\n assert a.counts[5] == 5\n assert a.counts[numpy.inf] == 999\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False, True]\n ].tolist() == [0, 2, 4, 6, 8, 10]\n\n a = Histogram(\n [Axis(IntegerBinning(-5, 5, loc_overflow=IntegerBinning.below1))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[6, +inf)\",\n \"-5\",\n \"-4\",\n \"-3\",\n \"-2\",\n \"-1\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n ]\n assert a.counts.counts.array.tolist() == [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n assert a.counts[None] == 55 + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]\n assert a.counts[5] == 5\n assert a.counts[numpy.inf] == 999\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False, True]\n ].tolist() == [0, 2, 4, 6, 8, 10]\n\n a = Histogram(\n [\n Axis(\n IntegerBinning(\n -5,\n 5,\n loc_underflow=IntegerBinning.below2,\n loc_overflow=IntegerBinning.below1,\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n [123, 999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"(-inf, -6]\",\n \"[6, +inf)\",\n \"-5\",\n \"-4\",\n \"-3\",\n \"-2\",\n \"-1\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n ]\n assert a.counts.counts.array.tolist() == [\n 123,\n 999,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n ]\n\n assert a.counts[None] == 55 + 123 + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]\n assert a.counts[-numpy.inf : 5].tolist() == [123, 0, 1, 2, 3, 4]\n assert a.counts[-numpy.inf : numpy.inf].tolist() == [\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 999,\n ]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]\n assert a.counts[-numpy.inf :].tolist() == [\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n ]\n assert a.counts[5] == 5\n assert a.counts[-numpy.inf] == 123\n assert a.counts[numpy.inf] == 999\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -numpy.inf, -1]].tolist() == [\n 7,\n 4,\n 7,\n 999,\n 5,\n 123,\n 10,\n ]\n assert a.counts[\n [7, -numpy.inf, 4, 7, numpy.inf, 5, -numpy.inf, -1]\n ].tolist() == [7, 123, 4, 7, 999, 5, 123, 10]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False, True]\n ].tolist() == [0, 2, 4, 6, 8, 10]\n\n a = Histogram(\n [\n Axis(\n IntegerBinning(\n -5,\n 5,\n loc_underflow=IntegerBinning.above1,\n loc_overflow=IntegerBinning.below1,\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 123]\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[6, +inf)\",\n \"-5\",\n \"-4\",\n \"-3\",\n \"-2\",\n \"-1\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"(-inf, -6]\",\n ]\n assert a.counts.counts.array.tolist() == [\n 999,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 123,\n ]\n\n assert a.counts[None] == 55 + 123 + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]\n assert a.counts[-numpy.inf : 5].tolist() == [123, 0, 1, 2, 3, 4]\n assert a.counts[-numpy.inf : numpy.inf].tolist() == [\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 999,\n ]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]\n assert a.counts[-numpy.inf :].tolist() == [\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n ]\n assert a.counts[5] == 5\n assert a.counts[-numpy.inf] == 123\n assert a.counts[numpy.inf] == 999\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -numpy.inf, -1]].tolist() == [\n 7,\n 4,\n 7,\n 999,\n 5,\n 123,\n 10,\n ]\n assert a.counts[\n [7, -numpy.inf, 4, 7, numpy.inf, 5, -numpy.inf, -1]\n ].tolist() == [7, 123, 4, 7, 999, 5, 123, 10]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False, True]\n ].tolist() == [0, 2, 4, 6, 8, 10]\n\n def test_getitem_RegularBinning(self):\n a = Histogram(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.arange(10, dtype=int))\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[-5, -4)\",\n \"[-4, -3)\",\n \"[-3, -2)\",\n \"[-2, -1)\",\n \"[-1, 0)\",\n \"[0, 1)\",\n \"[1, 2)\",\n \"[2, 3)\",\n \"[3, 4)\",\n \"[4, 5)\",\n ]\n assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]\n assert a.counts[5] == 5\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 9]\n assert a.counts[numpy.array([7, 4, 7, 5, -1])].tolist() == [7, 4, 7, 5, 9]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False]\n ].tolist() == [0, 2, 4, 6, 8]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False]\n )\n ].tolist() == [0, 2, 4, 6, 8]\n\n a = Histogram(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n overflow=RealOverflow(loc_overflow=RealOverflow.above1),\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999], dtype=int)\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[-5, -4)\",\n \"[-4, -3)\",\n \"[-3, -2)\",\n \"[-2, -1)\",\n \"[-1, 0)\",\n \"[0, 1)\",\n \"[1, 2)\",\n \"[2, 3)\",\n \"[3, 4)\",\n \"[4, 5)\",\n \"[5, +inf]\",\n ]\n assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]\n\n assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]\n assert a.counts[5] == 5\n assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.inf, -1]].tolist() == [\n 7,\n 999,\n 4,\n 7,\n 5,\n 999,\n 9,\n ]\n assert a.counts[\n numpy.array([7, numpy.inf, 4, 7, 5, numpy.inf, -1])\n ].tolist() == [7, 999, 4, 7, 5, 999, 9]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False]\n ].tolist() == [0, 2, 4, 6, 8]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False]\n )\n ].tolist() == [0, 2, 4, 6, 8]\n\n a = Histogram(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n overflow=RealOverflow(loc_overflow=RealOverflow.below1),\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n numpy.array([999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int)\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[5, +inf]\",\n \"[-5, -4)\",\n \"[-4, -3)\",\n \"[-3, -2)\",\n \"[-2, -1)\",\n \"[-1, 0)\",\n \"[0, 1)\",\n \"[1, 2)\",\n \"[2, 3)\",\n \"[3, 4)\",\n \"[4, 5)\",\n ]\n assert a.counts.counts.array.tolist() == [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]\n assert a.counts[5] == 5\n assert a.counts[numpy.inf] == 999\n assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.inf, -1]].tolist() == [\n 7,\n 999,\n 4,\n 7,\n 5,\n 999,\n 9,\n ]\n assert a.counts[\n numpy.array([7, numpy.inf, 4, 7, 5, numpy.inf, -1])\n ].tolist() == [7, 999, 4, 7, 5, 999, 9]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False]\n ].tolist() == [0, 2, 4, 6, 8]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False]\n )\n ].tolist() == [0, 2, 4, 6, 8]\n\n a = Histogram(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n overflow=RealOverflow(\n loc_overflow=RealOverflow.below2,\n loc_nanflow=RealOverflow.below1,\n ),\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n numpy.array([999, 123, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int)\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[5, +inf]\",\n \"{nan}\",\n \"[-5, -4)\",\n \"[-4, -3)\",\n \"[-3, -2)\",\n \"[-2, -1)\",\n \"[-1, 0)\",\n \"[0, 1)\",\n \"[1, 2)\",\n \"[2, 3)\",\n \"[3, 4)\",\n \"[4, 5)\",\n ]\n assert a.counts.counts.array.tolist() == [\n 999,\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n ]\n\n assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999 + 123\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]\n assert a.counts[5] == 5\n assert a.counts[numpy.inf] == 999\n assert a.counts[numpy.nan] == 123\n assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.nan, -1]].tolist() == [\n 7,\n 999,\n 4,\n 7,\n 5,\n 123,\n 9,\n ]\n if sys.version_info[0] >= 3:\n exec(\n \"assert a.counts[[numpy.inf, ..., numpy.nan]].tolist() == [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 123]\"\n )\n assert a.counts[\n numpy.array([7, numpy.inf, 4, 7, 5, numpy.nan, -1])\n ].tolist() == [7, 999, 4, 7, 5, 123, 9]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False]\n ].tolist() == [0, 2, 4, 6, 8]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False]\n )\n ].tolist() == [0, 2, 4, 6, 8]\n\n a = Histogram(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n overflow=RealOverflow(\n loc_overflow=RealOverflow.above1,\n loc_nanflow=RealOverflow.below1,\n ),\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n numpy.array([123, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999], dtype=int)\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"{nan}\",\n \"[-5, -4)\",\n \"[-4, -3)\",\n \"[-3, -2)\",\n \"[-2, -1)\",\n \"[-1, 0)\",\n \"[0, 1)\",\n \"[1, 2)\",\n \"[2, 3)\",\n \"[3, 4)\",\n \"[4, 5)\",\n \"[5, +inf]\",\n ]\n assert a.counts.counts.array.tolist() == [\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 999,\n ]\n\n assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999 + 123\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]\n assert a.counts[5] == 5\n assert a.counts[numpy.inf] == 999\n assert a.counts[numpy.nan] == 123\n assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.nan, -1]].tolist() == [\n 7,\n 999,\n 4,\n 7,\n 5,\n 123,\n 9,\n ]\n assert a.counts[\n numpy.array([7, numpy.inf, 4, 7, 5, numpy.nan, -1])\n ].tolist() == [7, 999, 4, 7, 5, 123, 9]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False]\n ].tolist() == [0, 2, 4, 6, 8]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False]\n )\n ].tolist() == [0, 2, 4, 6, 8]\n",
"#!/usr/bin/env python\n\n# BSD 3-Clause License; see https://github.com/scikit-hep/aghast/blob/master/LICENSE\n\nimport unittest\n\nimport numpy\n\nfrom aghast import *\n\n\nclass Test(unittest.TestCase):\n def runTest(self):\n pass\n\n def test_validity_Metadata(self):\n h = Collection(\n {}, metadata=Metadata(\"\"\"{\"one\": 1, \"two\": 2}\"\"\", language=Metadata.json)\n )\n h.checkvalid()\n assert h.metadata.data == \"\"\"{\"one\": 1, \"two\": 2}\"\"\"\n assert h.metadata.language == Metadata.json\n\n def test_validity_Decoration(self):\n h = Collection(\n {},\n decoration=Decoration(\"\"\"points { color: red }\"\"\", language=Decoration.css),\n )\n h.checkvalid()\n assert h.decoration.data == \"\"\"points { color: red }\"\"\"\n assert h.decoration.css == Decoration.css\n\n def test_validity_RawInlineBuffer(self):\n h = Ntuple(\n [Column(\"one\", Column.int32)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [\n Page(\n RawInlineBuffer(\n numpy.zeros(1, dtype=numpy.int32)\n )\n )\n ],\n [0, 1],\n )\n ]\n )\n ]\n )\n ],\n )\n h.checkvalid()\n assert len(h.instances[0].chunks[0].column_chunks[0].pages[0].array) == 1\n\n h = Ntuple(\n [Column(\"one\", Column.int32)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x05\\x00\\x00\\x00\"))], [0, 1]\n )\n ]\n )\n ]\n )\n ],\n )\n h.checkvalid()\n assert h.instances[0].chunks[0].column_chunks[0].pages[0].array.tolist() == [5]\n\n def test_validity_RawExternalBuffer(self):\n buf = numpy.zeros(1, dtype=numpy.int32)\n h = Ntuple(\n [Column(\"one\", Column.int32)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [\n Page(\n RawExternalBuffer(\n buf.ctypes.data, buf.nbytes\n )\n )\n ],\n [0, 1],\n )\n ]\n )\n ]\n )\n ],\n )\n h.checkvalid()\n assert len(h.instances[0].chunks[0].column_chunks[0].pages[0].array) == 1\n\n buf = numpy.array([3.14], dtype=numpy.float64)\n h = Ntuple(\n [Column(\"one\", Column.float64)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [\n Page(\n RawExternalBuffer(\n buf.ctypes.data, buf.nbytes\n )\n )\n ],\n [0, 1],\n )\n ]\n )\n ]\n )\n ],\n )\n h.checkvalid()\n assert h.instances[0].chunks[0].column_chunks[0].pages[0].array.tolist() == [\n 3.14\n ]\n\n def test_validity_InterpretedInlineBuffer(self):\n h = BinnedEvaluatedFunction(\n [Axis()],\n InterpretedInlineBuffer(\n numpy.zeros(1, dtype=numpy.int32), dtype=InterpretedInlineBuffer.int32\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0]\n\n h = BinnedEvaluatedFunction(\n [Axis()],\n InterpretedInlineBuffer(\n b\"\\x07\\x00\\x00\\x00\", dtype=InterpretedInlineBuffer.int32\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [7]\n\n def test_validity_InterpretedExternalBuffer(self):\n buf = numpy.zeros(1, dtype=numpy.float64)\n h = BinnedEvaluatedFunction(\n [Axis()],\n InterpretedExternalBuffer(\n buf.ctypes.data, buf.nbytes, dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0]\n\n buf = numpy.array([3.14], dtype=numpy.float64)\n h = BinnedEvaluatedFunction(\n [Axis()],\n InterpretedExternalBuffer(\n buf.ctypes.data, buf.nbytes, dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [3.14]\n\n def test_validity_IntegerBinning(self):\n h = BinnedEvaluatedFunction(\n [Axis(IntegerBinning(10, 20))],\n InterpretedInlineBuffer(\n numpy.zeros(11), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n h = BinnedEvaluatedFunction(\n [Axis(IntegerBinning(20, 10))],\n InterpretedInlineBuffer(\n numpy.zeros(11), dtype=InterpretedInlineBuffer.float64\n ),\n )\n assert not h.isvalid\n h = BinnedEvaluatedFunction(\n [\n Axis(\n IntegerBinning(\n 10,\n 20,\n loc_underflow=IntegerBinning.nonexistent,\n loc_overflow=IntegerBinning.nonexistent,\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.zeros(11), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0] * 11\n h = BinnedEvaluatedFunction(\n [\n Axis(\n IntegerBinning(\n 10,\n 20,\n loc_underflow=IntegerBinning.below1,\n loc_overflow=IntegerBinning.nonexistent,\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.zeros(12), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0] * 12\n h = BinnedEvaluatedFunction(\n [\n Axis(\n IntegerBinning(\n 10,\n 20,\n loc_underflow=IntegerBinning.nonexistent,\n loc_overflow=IntegerBinning.above1,\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.zeros(12), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0] * 12\n h = BinnedEvaluatedFunction(\n [\n Axis(\n IntegerBinning(\n 10,\n 20,\n loc_underflow=IntegerBinning.below1,\n loc_overflow=IntegerBinning.above1,\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.zeros(13), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0] * 13\n\n def test_validity_RealInterval(self):\n h = BinnedEvaluatedFunction(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n InterpretedInlineBuffer(\n numpy.zeros(10), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n h = BinnedEvaluatedFunction(\n [Axis(RegularBinning(10, RealInterval(5, -5)))],\n InterpretedInlineBuffer(\n numpy.zeros(10), dtype=InterpretedInlineBuffer.float64\n ),\n )\n assert not h.isvalid\n\n def test_validity_RealOverflow(self):\n h = BinnedEvaluatedFunction(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n RealOverflow(\n loc_underflow=RealOverflow.nonexistent,\n loc_overflow=RealOverflow.nonexistent,\n loc_nanflow=RealOverflow.nonexistent,\n ),\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.zeros(10), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0] * 10\n h = BinnedEvaluatedFunction(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n RealOverflow(\n loc_underflow=RealOverflow.above1,\n loc_overflow=RealOverflow.nonexistent,\n loc_nanflow=RealOverflow.nonexistent,\n ),\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.zeros(11), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0] * 11\n h = BinnedEvaluatedFunction(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n RealOverflow(\n loc_underflow=RealOverflow.nonexistent,\n loc_overflow=RealOverflow.above1,\n loc_nanflow=RealOverflow.nonexistent,\n ),\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.zeros(11), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0] * 11\n h = BinnedEvaluatedFunction(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n RealOverflow(\n loc_underflow=RealOverflow.nonexistent,\n loc_overflow=RealOverflow.nonexistent,\n loc_nanflow=RealOverflow.above1,\n ),\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.zeros(11), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0] * 11\n h = BinnedEvaluatedFunction(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n RealOverflow(\n loc_underflow=RealOverflow.above1,\n loc_overflow=RealOverflow.nonexistent,\n loc_nanflow=RealOverflow.above2,\n ),\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.zeros(12), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0] * 12\n h = BinnedEvaluatedFunction(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n RealOverflow(\n loc_underflow=RealOverflow.above1,\n loc_overflow=RealOverflow.above2,\n loc_nanflow=RealOverflow.above3,\n ),\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.zeros(13), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0] * 13\n\n def test_validity_RegularBinning(self):\n h = BinnedEvaluatedFunction(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n InterpretedInlineBuffer(\n numpy.zeros(10), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n\n def test_validity_HexagonalBinning(self):\n h = BinnedEvaluatedFunction(\n [Axis(HexagonalBinning(3, 5, -5, -4))],\n InterpretedInlineBuffer(\n numpy.array([[0.0] * 2] * 3), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [[0.0] * 2] * 3\n h = BinnedEvaluatedFunction(\n [\n Axis(\n HexagonalBinning(\n 3,\n 5,\n -5,\n -4,\n qoverflow=RealOverflow(loc_nanflow=RealOverflow.above1),\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.array([[0.0] * 2] * 4), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [[0.0] * 2] * 4\n h = BinnedEvaluatedFunction(\n [\n Axis(\n HexagonalBinning(\n 3,\n 5,\n -5,\n -4,\n roverflow=RealOverflow(loc_nanflow=RealOverflow.above1),\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.array([[0.0] * 3] * 3), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [[0.0] * 3] * 3\n h = BinnedEvaluatedFunction(\n [\n Axis(\n HexagonalBinning(\n 3,\n 5,\n -5,\n -4,\n qoverflow=RealOverflow(loc_nanflow=RealOverflow.above1),\n roverflow=RealOverflow(loc_nanflow=RealOverflow.above1),\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.array([[0.0] * 3] * 4), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [[0.0] * 3] * 4\n\n def test_validity_EdgesBinning(self):\n h = BinnedEvaluatedFunction(\n [\n Axis(\n EdgesBinning(\n [3.3],\n overflow=RealOverflow(\n loc_underflow=RealOverflow.above1,\n loc_overflow=RealOverflow.above2,\n ),\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.array([0.0, 0.0]), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0, 0.0]\n h = BinnedEvaluatedFunction(\n [Axis(EdgesBinning([1.1, 2.2, 3.3]))],\n InterpretedInlineBuffer(\n numpy.array([0.0, 0.0]), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0, 0.0]\n\n def test_validity_IrregularBinning(self):\n h = BinnedEvaluatedFunction(\n [Axis(IrregularBinning([RealInterval(0.5, 1.5)]))],\n InterpretedInlineBuffer(\n numpy.array([0.0]), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0]\n h = BinnedEvaluatedFunction(\n [\n Axis(\n IrregularBinning(\n [\n RealInterval(0.5, 1.5),\n RealInterval(1.5, 1.5),\n RealInterval(0.0, 10.0),\n ]\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.array([0.0, 0.0, 0.0]), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0, 0.0, 0.0]\n\n def test_validity_CategoryBinning(self):\n h = BinnedEvaluatedFunction(\n [Axis(CategoryBinning([\"one\", \"two\", \"three\"]))],\n InterpretedInlineBuffer(\n numpy.array([0.0, 0.0, 0.0]), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0, 0.0, 0.0]\n h = BinnedEvaluatedFunction(\n [\n Axis(\n CategoryBinning(\n [\"one\", \"two\", \"three\"], loc_overflow=CategoryBinning.above1\n )\n )\n ],\n InterpretedInlineBuffer(\n numpy.array([0.0, 0.0, 0.0, 0.0]), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0, 0.0, 0.0, 0.0]\n\n def test_validity_SparseRegularBinning(self):\n h = BinnedEvaluatedFunction(\n [Axis(SparseRegularBinning([-5, -3, 10, 1000], 0.1))],\n InterpretedInlineBuffer(\n numpy.array([0.0, 0.0, 0.0, 0.0]), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0, 0.0, 0.0, 0.0]\n\n def test_validity_FractionBinning(self):\n h = BinnedEvaluatedFunction(\n [Axis(FractionBinning())],\n InterpretedInlineBuffer(\n numpy.array([0.0, 0.0]), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0, 0.0]\n assert h.axis[0].binning.error_method == FractionBinning.unspecified\n h = BinnedEvaluatedFunction(\n [Axis(FractionBinning()), Axis(RegularBinning(10, RealInterval(-5, 5)))],\n InterpretedInlineBuffer(\n numpy.array(\n [\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n ]\n ),\n dtype=InterpretedInlineBuffer.float64,\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n ]\n h = BinnedEvaluatedFunction(\n [Axis(RegularBinning(10, RealInterval(-5, 5))), Axis(FractionBinning())],\n InterpretedInlineBuffer(\n numpy.array(\n [\n [\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n ]\n ]\n ),\n dtype=InterpretedInlineBuffer.float64,\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n ]\n\n def test_validity_PredicateBinning(self):\n h = Histogram(\n [Axis(PredicateBinning([\"p\", \"q\"]))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.array([0.0, 0.0]))\n ),\n )\n h.checkvalid()\n\n def test_validity_Assignments(self):\n h = Histogram(\n [\n Axis(\n VariationBinning(\n [\n Variation(\n [\n Assignment(\"x\", \"1\"),\n Assignment(\"y\", \"2\"),\n Assignment(\"z\", \"3\"),\n ]\n )\n ]\n )\n )\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.array([0.0]))),\n )\n h.checkvalid()\n assert h.axis[0].binning.variations[0].assignments[1].expression == \"2\"\n\n def test_validity_Variation(self):\n h = Histogram(\n [\n Axis(\n VariationBinning(\n [\n Variation([Assignment(\"x\", \"1\")]),\n Variation([Assignment(\"x\", \"2\")]),\n ]\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.array([0.0, 0.0]))\n ),\n )\n h.checkvalid()\n\n def test_validity_VariationBinning(self):\n h = Histogram(\n [\n Axis(\n VariationBinning(\n [\n Variation([Assignment(\"x\", \"1\")]),\n Variation([Assignment(\"x\", \"2\")]),\n Variation([Assignment(\"x\", \"3\")]),\n ]\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.array([0.0, 0.0, 0.0]))\n ),\n )\n h.checkvalid()\n\n def test_validity_Axis(self):\n h = BinnedEvaluatedFunction(\n [Axis(expression=\"x\", title=\"wow\")],\n InterpretedInlineBuffer(\n numpy.array([0.0]), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.axis[0].expression == \"x\"\n assert h.values.array.tolist() == [0.0]\n\n def test_validity_UnweightedCounts(self):\n h = Histogram(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n )\n h.checkvalid()\n\n def test_validity_WeightedCounts(self):\n h = Histogram(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n WeightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n )\n h.checkvalid()\n h = Histogram(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n WeightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.arange(10)),\n sumw2=InterpretedInlineBuffer.fromarray(numpy.arange(10) ** 2),\n ),\n )\n h.checkvalid()\n h = Histogram(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n WeightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.arange(10)),\n sumw2=InterpretedInlineBuffer.fromarray(numpy.arange(10) ** 2),\n unweighted=UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.arange(10))\n ),\n ),\n )\n h.checkvalid()\n\n def test_validity_StatisticFilter(self):\n h = Histogram(\n [\n Axis(\n RegularBinning(10, RealInterval(-5, 5)),\n statistics=[\n Statistics(\n moments=[\n Moments(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0.0])\n ),\n 1,\n filter=StatisticFilter(excludes_nan=False),\n )\n ]\n )\n ],\n )\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n )\n h.checkvalid()\n\n def test_validity_Moments(self):\n h = Histogram(\n [\n Axis(\n RegularBinning(10, RealInterval(-5, 5)),\n statistics=[\n Statistics(\n moments=[\n Moments(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0.0])\n ),\n 1,\n ),\n Moments(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0.0])\n ),\n 2,\n ),\n ]\n )\n ],\n )\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n )\n h.checkvalid()\n h = Histogram(\n [\n Axis(\n RegularBinning(10, RealInterval(-5, 5)),\n statistics=[\n Statistics(\n moments=[\n Moments(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0.0])\n ),\n 0,\n weightpower=0,\n ),\n Moments(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0.0])\n ),\n 0,\n weightpower=1,\n ),\n ]\n )\n ],\n )\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n )\n h.checkvalid()\n\n def test_validity_Extremes(self):\n h = Histogram(\n [\n Axis(\n RegularBinning(10, RealInterval(-5, 5)),\n statistics=[\n Statistics(\n min=Extremes(\n InterpretedInlineBuffer.fromarray(numpy.array([0.0]))\n )\n )\n ],\n )\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n )\n h.checkvalid()\n h = Histogram(\n [\n Axis(\n RegularBinning(10, RealInterval(-5, 5)),\n statistics=[\n Statistics(\n min=Extremes(\n InterpretedInlineBuffer.fromarray(numpy.array([0.0]))\n ),\n max=Extremes(\n InterpretedInlineBuffer.fromarray(numpy.array([0.0]))\n ),\n )\n ],\n )\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n )\n h.checkvalid()\n\n def test_validity_Quantiles(self):\n h = Histogram(\n [\n Axis(\n RegularBinning(10, RealInterval(-5, 5)),\n statistics=[\n Statistics(\n quantiles=[\n Quantiles(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0.0])\n ),\n 0.25,\n ),\n Quantiles(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0.0])\n )\n ),\n Quantiles(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0.0])\n ),\n 0.75,\n ),\n ]\n )\n ],\n )\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n )\n h.checkvalid()\n h = Histogram(\n [\n Axis(\n RegularBinning(10, RealInterval(-5, 5)),\n statistics=[\n Statistics(\n quantiles=[\n Quantiles(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0.0])\n ),\n weightpower=0,\n ),\n Quantiles(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0.0])\n ),\n weightpower=1,\n ),\n ]\n )\n ],\n )\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n )\n h.checkvalid()\n\n def test_validity_Modes(self):\n h = Histogram(\n [\n Axis(\n RegularBinning(10, RealInterval(-5, 5)),\n statistics=[\n Statistics(\n mode=Modes(\n InterpretedInlineBuffer.fromarray(numpy.array([0.0]))\n )\n )\n ],\n )\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n )\n h.checkvalid()\n\n def test_validity_Statistics(self):\n h = Histogram(\n [\n Axis(\n RegularBinning(10, RealInterval(-5, 5)), statistics=[Statistics()]\n ),\n Axis(\n RegularBinning(10, RealInterval(-5, 5)), statistics=[Statistics()]\n ),\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),\n )\n h.checkvalid()\n h = Ntuple(\n [Column(\"one\", Column.int32), Column(\"two\", Column.int16)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x05\\x00\\x00\\x00\"))], [0, 1]\n ),\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x03\\x00\"))], [0, 1]\n ),\n ]\n )\n ]\n )\n ],\n column_statistics=[\n Statistics(\n moments=[\n Moments(\n InterpretedInlineBuffer.fromarray(numpy.array([0.0])), 1\n ),\n Moments(\n InterpretedInlineBuffer.fromarray(numpy.array([0.0])), 2\n ),\n ]\n )\n ],\n )\n h.checkvalid()\n\n def test_validity_Covariance(self):\n h = Histogram(\n [\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),\n axis_covariances=[\n Covariance(0, 1, InterpretedInlineBuffer.fromarray(numpy.arange(1)))\n ],\n )\n h.checkvalid()\n h = Histogram(\n [\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(1000))),\n axis_covariances=[\n Covariance(0, 1, InterpretedInlineBuffer.fromarray(numpy.arange(1))),\n Covariance(0, 2, InterpretedInlineBuffer.fromarray(numpy.arange(1))),\n Covariance(1, 2, InterpretedInlineBuffer.fromarray(numpy.arange(1))),\n ],\n )\n h.checkvalid()\n h = Histogram(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n [\n Profile(\n \"\",\n Statistics(\n [\n Moments(\n InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 1\n ),\n Moments(\n InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 2\n ),\n ]\n ),\n ),\n Profile(\n \"\",\n Statistics(\n [\n Moments(\n InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 1\n ),\n Moments(\n InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 2\n ),\n ]\n ),\n ),\n ],\n profile_covariances=[\n Covariance(0, 1, InterpretedInlineBuffer.fromarray(numpy.arange(1)))\n ],\n )\n h.checkvalid()\n h = Ntuple(\n [Column(\"one\", Column.int32), Column(\"two\", Column.int16)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x05\\x00\\x00\\x00\"))], [0, 1]\n ),\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x03\\x00\"))], [0, 1]\n ),\n ]\n )\n ]\n )\n ],\n column_covariances=[\n Covariance(0, 1, InterpretedInlineBuffer.fromarray(numpy.arange(1)))\n ],\n )\n h.checkvalid()\n h = Ntuple(\n [Column(\"one\", Column.int32), Column(\"two\", Column.int16)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x05\\x00\\x00\\x00\"))], [0, 1]\n ),\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x03\\x00\"))], [0, 1]\n ),\n ]\n )\n ]\n )\n ],\n column_covariances=[\n Covariance(\n 0,\n 1,\n InterpretedInlineBuffer.fromarray(numpy.arange(1)),\n weightpower=1,\n ),\n Covariance(\n 0,\n 1,\n InterpretedInlineBuffer.fromarray(numpy.arange(1)),\n weightpower=0,\n ),\n ],\n )\n h.checkvalid()\n\n def test_validity_Profile(self):\n h = Histogram(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(10))),\n [\n Profile(\n \"\",\n Statistics(\n [\n Moments(\n InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 1\n ),\n Moments(\n InterpretedInlineBuffer.fromarray(numpy.zeros(10)), 2\n ),\n ]\n ),\n )\n ],\n )\n h.checkvalid()\n h = Histogram(\n [\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),\n [\n Profile(\n \"\",\n Statistics(\n [\n Moments(\n InterpretedInlineBuffer.fromarray(numpy.zeros(100)), 1\n ),\n Moments(\n InterpretedInlineBuffer.fromarray(numpy.zeros(100)), 2\n ),\n ]\n ),\n )\n ],\n )\n h.checkvalid()\n\n def test_validity_Histogram(self):\n h = Histogram(\n [\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),\n )\n h.checkvalid()\n\n def test_validity_Parameter(self):\n h = ParameterizedFunction(\n \"x**2\",\n [\n Parameter(\"x\", InterpretedInlineBuffer.fromarray(numpy.array([5]))),\n Parameter(\"y\", InterpretedInlineBuffer.fromarray(numpy.array([6]))),\n ],\n )\n h.checkvalid()\n assert h.parameters[1].values.array.tolist() == [6]\n\n def test_validity_ParameterizedFunction(self):\n h = ParameterizedFunction(\"x**2\")\n h.checkvalid()\n h = Histogram(\n [\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),\n functions={\n \"f\": ParameterizedFunction(\n \"x**2\",\n [\n Parameter(\n \"x\", InterpretedInlineBuffer.fromarray(numpy.arange(100))\n )\n ],\n )\n },\n )\n h.checkvalid()\n\n def test_validity_EvaluatedFunction(self):\n h = Histogram(\n [\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),\n functions={\n \"f\": EvaluatedFunction(\n InterpretedInlineBuffer.fromarray(numpy.arange(100))\n )\n },\n )\n h.checkvalid()\n assert (\n h.functions[\"f\"].values.array.tolist()\n == numpy.arange(100).reshape((10, 10)).tolist()\n )\n h = Histogram(\n [\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),\n functions={\n \"f\": EvaluatedFunction(\n InterpretedInlineBuffer.fromarray(numpy.arange(100)),\n InterpretedInlineBuffer.fromarray(numpy.arange(100)),\n )\n },\n )\n h.checkvalid()\n h = Histogram(\n [\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n Axis(RegularBinning(10, RealInterval(-5, 5))),\n ],\n UnweightedCounts(InterpretedInlineBuffer.fromarray(numpy.arange(100))),\n functions={\n \"f\": EvaluatedFunction(\n InterpretedInlineBuffer.fromarray(numpy.arange(100)),\n InterpretedInlineBuffer.fromarray(numpy.arange(100)),\n [\n Quantiles(\n InterpretedInlineBuffer.fromarray(numpy.zeros(100)), 0.25\n ),\n Quantiles(InterpretedInlineBuffer.fromarray(numpy.zeros(100))),\n Quantiles(\n InterpretedInlineBuffer.fromarray(numpy.zeros(100)), 0.75\n ),\n ],\n )\n },\n )\n h.checkvalid()\n\n def test_validity_BinnedEvaluatedFunction(self):\n h = BinnedEvaluatedFunction(\n [Axis()],\n InterpretedInlineBuffer(\n numpy.array([0.0]), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [0.0]\n h = BinnedEvaluatedFunction(\n [Axis(), Axis()],\n InterpretedInlineBuffer(\n numpy.array([[0.0]]), dtype=InterpretedInlineBuffer.float64\n ),\n )\n h.checkvalid()\n assert h.values.array.tolist() == [[0.0]]\n\n def test_validity_Page(self):\n h = Ntuple(\n [Column(\"one\", Column.int32)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x05\\x00\\x00\\x00\"))], [0, 1]\n )\n ]\n )\n ]\n )\n ],\n )\n h.checkvalid()\n assert h.instances[0].chunks[0].column_chunks[0].pages[0].array.tolist() == [5]\n\n h = Ntuple(\n [Column(\"one\", Column.int32)],\n [NtupleInstance([Chunk([ColumnChunk([], [0])])])],\n )\n h.checkvalid()\n assert h.instances[0].chunks[0].column_chunks[0].array.tolist() == []\n assert {n: x.tolist() for n, x in h.instances[0].chunks[0].arrays.items()} == {\n \"one\": []\n }\n for arrays in h.instances[0].arrays:\n pass\n assert {n: x.tolist() for n, x in arrays.items()} == {\"one\": []}\n\n h = Ntuple(\n [Column(\"one\", Column.int32)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x05\\x00\\x00\\x00\"))], [0, 1]\n )\n ]\n )\n ]\n )\n ],\n )\n h.checkvalid()\n assert h.instances[0].chunks[0].column_chunks[0].array.tolist() == [5]\n assert {n: x.tolist() for n, x in h.instances[0].chunks[0].arrays.items()} == {\n \"one\": [5]\n }\n for arrays in h.instances[0].arrays:\n pass\n assert {n: x.tolist() for n, x in arrays.items()} == {\"one\": [5]}\n\n h = Ntuple(\n [Column(\"one\", Column.int32)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [\n Page(RawInlineBuffer(b\"\\x05\\x00\\x00\\x00\")),\n Page(\n RawInlineBuffer(\n b\"\\x04\\x00\\x00\\x00\\x03\\x00\\x00\\x00\"\n )\n ),\n ],\n [0, 1, 3],\n )\n ]\n )\n ]\n )\n ],\n )\n h.checkvalid()\n assert h.instances[0].chunks[0].column_chunks[0].array.tolist() == [5, 4, 3]\n assert {n: x.tolist() for n, x in h.instances[0].chunks[0].arrays.items()} == {\n \"one\": [5, 4, 3]\n }\n for arrays in h.instances[0].arrays:\n pass\n assert {n: x.tolist() for n, x in arrays.items()} == {\"one\": [5, 4, 3]}\n\n def test_validity_Chunk(self):\n h = Ntuple(\n [Column(\"one\", Column.float64)],\n [NtupleInstance([Chunk([ColumnChunk([], [0])])])],\n )\n h.checkvalid()\n\n h = Ntuple([Column(\"one\", Column.int32)], [NtupleInstance([])])\n h.checkvalid()\n for arrays in h.instances[0].arrays:\n assert False\n\n h = Ntuple(\n [Column(\"one\", Column.int32)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x05\\x00\\x00\\x00\"))], [0, 1]\n )\n ]\n )\n ]\n )\n ],\n )\n h.checkvalid()\n for arrays in h.instances[0].arrays:\n pass\n assert {n: x.tolist() for n, x in arrays.items()} == {\"one\": [5]}\n\n h = Ntuple(\n [Column(\"one\", Column.int32)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x05\\x00\\x00\\x00\"))], [0, 1]\n )\n ]\n ),\n Chunk(\n [\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x05\\x00\\x00\\x00\"))], [0, 1]\n )\n ]\n ),\n ]\n )\n ],\n )\n h.checkvalid()\n for arrays in h.instances[0].arrays:\n assert {n: x.tolist() for n, x in arrays.items()} == {\"one\": [5]}\n\n def test_validity_Column(self):\n h = Ntuple(\n [Column(\"one\", Column.float64), Column(\"two\", Column.int32)],\n [NtupleInstance([])],\n )\n h.checkvalid()\n\n h = Ntuple(\n [Column(\"one\", Column.int32), Column(\"two\", Column.int16)],\n [\n NtupleInstance(\n [\n Chunk(\n [\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x05\\x00\\x00\\x00\"))], [0, 1]\n ),\n ColumnChunk(\n [Page(RawInlineBuffer(b\"\\x03\\x00\"))], [0, 1]\n ),\n ]\n )\n ]\n )\n ],\n )\n h.checkvalid()\n for arrays in h.instances[0].arrays:\n pass\n assert {n: x.tolist() for n, x in arrays.items()} == {\"one\": [5], \"two\": [3]}\n\n def test_validity_Ntuple(self):\n h = Ntuple([Column(\"one\", Column.float64)], [NtupleInstance([])])\n h.checkvalid()\n\n def test_validity_collection(self):\n h = Collection()\n h.checkvalid()\n h = Collection(\n {\n \"id\": Histogram(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.arange(30))\n ),\n ),\n \"id2\": Histogram(\n [Axis(RegularBinning(100, RealInterval(-5, 5)))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.arange(300))\n ),\n ),\n },\n axis=[Axis(RegularBinning(3, RealInterval(-1, 1)))],\n )\n h.checkvalid()\n h = Collection(\n {\n \"b\": Collection(\n {\n \"c\": Histogram(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.arange(60))\n ),\n ),\n \"d\": Histogram(\n [Axis(RegularBinning(100, RealInterval(-5, 5)))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.arange(600))\n ),\n ),\n },\n axis=[Axis(FractionBinning())],\n )\n },\n axis=[Axis(RegularBinning(3, RealInterval(-1, 1)))],\n )\n h.checkvalid()\n assert (\n h.objects[\"b\"].objects[\"c\"].counts.counts.array.tolist()\n == numpy.arange(60).reshape((3, 2, 10)).tolist()\n )\n"
] | [
[
"numpy.arange",
"numpy.array"
],
[
"numpy.arange",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Andy-math/optimizer | [
"a65f5ee54a0ae4e02aefb008d47c2d551d071ef0"
] | [
"optimizer/_internals/common/linneq.py"
] | [
"# -*- coding: utf-8 -*-\n\n\nfrom typing import Optional, Tuple\n\nimport numpy\n\nfrom overloads import bind_checker, dyn_typing\nfrom overloads.shortcuts import assertNoInfNaN, assertNoNaN\nfrom overloads.typedefs import ndarray\n\n\ndef noCheck(_: bool) -> None:\n pass\n\n\ndef constraint_check(\n constraints: Tuple[ndarray, ndarray, ndarray, ndarray],\n *,\n theta: Optional[ndarray] = None\n) -> None:\n A, b, lb, ub = constraints\n if theta is not None:\n assertNoInfNaN(theta)\n assertNoInfNaN(A)\n assertNoInfNaN(b)\n assertNoNaN(lb)\n assertNoNaN(ub)\n\n\ndef _input_checker(\n parameters: Tuple[ndarray, Tuple[ndarray, ndarray, ndarray, ndarray]]\n) -> None:\n theta, constraints = parameters\n constraint_check(constraints, theta=theta)\n\n\nn = dyn_typing.SizeVar()\nnConstraint = dyn_typing.SizeVar()\n\n\n@dyn_typing.dyn_check_2(\n input=(\n dyn_typing.NDArray(numpy.float64, (n,)),\n dyn_typing.Tuple(\n (\n dyn_typing.NDArray(numpy.float64, (nConstraint, n)),\n dyn_typing.NDArray(numpy.float64, (nConstraint,)),\n dyn_typing.NDArray(numpy.float64, (n,)),\n dyn_typing.NDArray(numpy.float64, (n,)),\n )\n ),\n ),\n output=dyn_typing.Bool(),\n)\n@bind_checker.bind_checker_2(input=_input_checker, output=noCheck)\ndef check(\n theta: ndarray, constraints: Tuple[ndarray, ndarray, ndarray, ndarray]\n) -> bool:\n A, b, lb, ub = constraints\n \"\"\"检查参数theta是否满足约束[A @ theta <= b],空约束返回True\"\"\"\n result = bool(\n numpy.all(lb <= theta) and numpy.all(theta <= ub) and numpy.all(A @ theta <= b)\n )\n return result\n\n\nn = dyn_typing.SizeVar()\nnConstraint = dyn_typing.SizeVar()\n\n\n@dyn_typing.dyn_check_2(\n input=(\n dyn_typing.NDArray(numpy.float64, (n,)),\n dyn_typing.Tuple(\n (\n dyn_typing.NDArray(numpy.float64, (nConstraint, n)),\n dyn_typing.NDArray(numpy.float64, (nConstraint,)),\n dyn_typing.NDArray(numpy.float64, (n,)),\n dyn_typing.NDArray(numpy.float64, (n,)),\n )\n ),\n ),\n output=dyn_typing.Tuple(\n (\n dyn_typing.NDArray(numpy.float64, (n,)),\n dyn_typing.NDArray(numpy.float64, (n,)),\n )\n ),\n)\n@bind_checker.bind_checker_2(\n input=_input_checker, output=bind_checker.make_checker_2(assertNoNaN, assertNoNaN)\n)\ndef margin(\n theta: ndarray, constraints: Tuple[ndarray, ndarray, ndarray, ndarray]\n) -> Tuple[ndarray, ndarray]:\n \"\"\"\n 返回theta距离线性约束边界的间距下界和上界(h_lb, h_ub)\n h: 步长, lb: 下界, ub: 上界\n theta超出边界时 AssertionError\n \"\"\"\n assert check(theta, constraints)\n A, b, lb, ub = constraints\n if b.shape[0] == 0:\n h_lb = numpy.full(theta.shape, -numpy.inf)\n h_ub = numpy.full(theta.shape, numpy.inf)\n else:\n \"\"\"\n A @ (theta+h*(arange(n) == i)) == b\n => A @ h*(arange(n) == i) == b - A @ theta\n => h*A[:, i] == b - A @ theta (*must positive as valid point)\n => h == (b - A @ theta)/A[:, i]\n \"\"\"\n residual: ndarray = b - A @ theta # (nConst, )\n residual.shape = (A.shape[0], 1) # (nConst, 1)\n h: ndarray = residual / A # (nConst, n)\n \"\"\"\n lb: 所有负数里面取最大\n ub: 所有正数里面取最小\n 系数A为0,则约束与theta(i)无关\n \"\"\"\n h_lb = h.copy()\n h_ub = h.copy()\n h_lb[A >= 0] = -numpy.inf\n h_ub[A <= 0] = numpy.inf\n h_lb = h_lb.max(axis=0) # type: ignore\n h_ub = h_ub.min(axis=0) # type: ignore\n \"\"\"\n [lb/ub]补丁\n theta+h == [lb/ub]\n => h = [lb/ub]-theta\n \"\"\"\n h_lb = numpy.maximum(h_lb, lb - theta)\n h_ub = numpy.minimum(h_ub, ub - theta)\n return h_lb, h_ub\n"
] | [
[
"numpy.all",
"numpy.maximum",
"numpy.minimum",
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cowirihy/pymc3 | [
"f0b95773047af12f3c0ded04d707f02ddc4d4f6b",
"f0b95773047af12f3c0ded04d707f02ddc4d4f6b",
"f0b95773047af12f3c0ded04d707f02ddc4d4f6b",
"f0b95773047af12f3c0ded04d707f02ddc4d4f6b",
"f0b95773047af12f3c0ded04d707f02ddc4d4f6b"
] | [
"pymc3/tests/test_distributions_random.py",
"pymc3/model.py",
"pymc3/backends/ndarray.py",
"pymc3/blocking.py",
"pymc3/tests/test_data_container.py"
] | [
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport numpy as np\nimport numpy.testing as npt\nimport scipy.stats as st\nfrom scipy.special import expit\nfrom scipy import linalg\nimport numpy.random as nr\nimport theano\n\nimport pymc3 as pm\nfrom pymc3.distributions.dist_math import clipped_beta_rvs\nfrom pymc3.distributions.distribution import (draw_values,\n _DrawValuesContext,\n _DrawValuesContextBlocker)\nfrom .helpers import SeededTest\nfrom .test_distributions import (\n build_model, Domain, product, R, Rplus, Rplusbig, Runif, Rplusdunif,\n Unit, Nat, NatSmall, I, Simplex, Vector, PdMatrix,\n PdMatrixChol, PdMatrixCholUpper, RealMatrix, RandomPdMatrix\n)\n\n\ndef pymc3_random(dist, paramdomains, ref_rand, valuedomain=Domain([0]),\n size=10000, alpha=0.05, fails=10, extra_args=None,\n model_args=None):\n if model_args is None:\n model_args = {}\n model = build_model(dist, valuedomain, paramdomains, extra_args)\n domains = paramdomains.copy()\n for pt in product(domains, n_samples=100):\n pt = pm.Point(pt, model=model)\n pt.update(model_args)\n p = alpha\n # Allow KS test to fail (i.e., the samples be different)\n # a certain number of times. Crude, but necessary.\n f = fails\n while p <= alpha and f > 0:\n s0 = model.named_vars['value'].random(size=size, point=pt)\n s1 = ref_rand(size=size, **pt)\n _, p = st.ks_2samp(np.atleast_1d(s0).flatten(),\n np.atleast_1d(s1).flatten())\n f -= 1\n assert p > alpha, str(pt)\n\n\ndef pymc3_random_discrete(dist, paramdomains,\n valuedomain=Domain([0]), ref_rand=None,\n size=100000, alpha=0.05, fails=20):\n model = build_model(dist, valuedomain, paramdomains)\n domains = paramdomains.copy()\n for pt in product(domains, n_samples=100):\n pt = pm.Point(pt, model=model)\n p = alpha\n # Allow Chisq test to fail (i.e., the samples be different)\n # a certain number of times.\n f = fails\n while p <= alpha and f > 0:\n o = model.named_vars['value'].random(size=size, point=pt)\n e = ref_rand(size=size, **pt)\n o = np.atleast_1d(o).flatten()\n e = np.atleast_1d(e).flatten()\n observed = dict(zip(*np.unique(o, return_counts=True)))\n expected = dict(zip(*np.unique(e, return_counts=True)))\n for e in expected.keys():\n expected[e] = (observed.get(e, 0), expected[e])\n k = np.array([v for v in expected.values()])\n if np.all(k[:, 0] == k[:, 1]):\n p = 1.\n else:\n _, p = st.chisquare(k[:, 0], k[:, 1])\n f -= 1\n assert p > alpha, str(pt)\n\n\nclass TestDrawValues(SeededTest):\n def test_draw_scalar_parameters(self):\n with pm.Model():\n y = pm.Normal('y1', mu=0., sigma=1.)\n mu, tau = draw_values([y.distribution.mu, y.distribution.tau])\n npt.assert_almost_equal(mu, 0)\n npt.assert_almost_equal(tau, 1)\n\n def test_draw_dependencies(self):\n with pm.Model():\n x = pm.Normal('x', mu=0., sigma=1.)\n exp_x = pm.Deterministic('exp_x', pm.math.exp(x))\n\n x, exp_x = draw_values([x, exp_x])\n npt.assert_almost_equal(np.exp(x), exp_x)\n\n def test_draw_order(self):\n with pm.Model():\n x = pm.Normal('x', mu=0., sigma=1.)\n exp_x = pm.Deterministic('exp_x', pm.math.exp(x))\n\n # Need to draw x before drawing log_x\n exp_x, x = draw_values([exp_x, x])\n npt.assert_almost_equal(np.exp(x), exp_x)\n\n def test_draw_point_replacement(self):\n with pm.Model():\n mu = pm.Normal('mu', mu=0., tau=1e-3)\n sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)\n y = pm.Normal('y', mu=mu, sigma=sigma)\n mu2, tau2 = draw_values([y.distribution.mu, y.distribution.tau],\n point={'mu': 5., 'sigma': 2.})\n npt.assert_almost_equal(mu2, 5)\n npt.assert_almost_equal(tau2, 1 / 2.**2)\n\n def test_random_sample_returns_nd_array(self):\n with pm.Model():\n mu = pm.Normal('mu', mu=0., tau=1e-3)\n sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)\n y = pm.Normal('y', mu=mu, sigma=sigma)\n mu, tau = draw_values([y.distribution.mu, y.distribution.tau])\n assert isinstance(mu, np.ndarray)\n assert isinstance(tau, np.ndarray)\n\n\nclass TestDrawValuesContext:\n def test_normal_context(self):\n with _DrawValuesContext() as context0:\n assert context0.parent is None\n context0.drawn_vars['root_test'] = 1\n with _DrawValuesContext() as context1:\n assert id(context1.drawn_vars) == id(context0.drawn_vars)\n assert context1.parent == context0\n with _DrawValuesContext() as context2:\n assert id(context2.drawn_vars) == id(context0.drawn_vars)\n assert context2.parent == context1\n context2.drawn_vars['leaf_test'] = 2\n assert context1.drawn_vars['leaf_test'] == 2\n context1.drawn_vars['root_test'] = 3\n assert context0.drawn_vars['root_test'] == 3\n assert context0.drawn_vars['leaf_test'] == 2\n\n def test_blocking_context(self):\n with _DrawValuesContext() as context0:\n assert context0.parent is None\n context0.drawn_vars['root_test'] = 1\n with _DrawValuesContext() as context1:\n assert id(context1.drawn_vars) == id(context0.drawn_vars)\n assert context1.parent == context0\n with _DrawValuesContextBlocker() as blocker:\n assert id(blocker.drawn_vars) != id(context0.drawn_vars)\n assert blocker.parent is None\n blocker.drawn_vars['root_test'] = 2\n with _DrawValuesContext() as context2:\n assert id(context2.drawn_vars) == id(blocker.drawn_vars)\n assert context2.parent == blocker\n context2.drawn_vars['root_test'] = 3\n context2.drawn_vars['leaf_test'] = 4\n assert blocker.drawn_vars['root_test'] == 3\n assert 'leaf_test' not in context1.drawn_vars\n assert context0.drawn_vars['root_test'] == 1\n\n\nclass BaseTestCases:\n class BaseTestCase(SeededTest):\n shape = 5\n\n def setup_method(self, *args, **kwargs):\n super().setup_method(*args, **kwargs)\n self.model = pm.Model()\n\n def get_random_variable(self, shape, with_vector_params=False, name=None):\n if with_vector_params:\n params = {key: value * np.ones(self.shape, dtype=np.dtype(type(value))) for\n key, value in self.params.items()}\n else:\n params = self.params\n if name is None:\n name = self.distribution.__name__\n with self.model:\n if shape is None:\n return self.distribution(name, transform=None, **params)\n else:\n try:\n return self.distribution(name, shape=shape, transform=None, **params)\n except TypeError:\n if np.sum(np.atleast_1d(shape)) == 0:\n pytest.skip(\"Timeseries must have positive shape\")\n raise\n\n @staticmethod\n def sample_random_variable(random_variable, size):\n try:\n return random_variable.random(size=size)\n except AttributeError:\n return random_variable.distribution.random(size=size)\n\n @pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)\n def test_scalar_parameter_shape(self, size):\n rv = self.get_random_variable(None)\n if size is None:\n expected = 1,\n else:\n expected = np.atleast_1d(size).tolist()\n actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape\n assert tuple(expected) == actual\n\n @pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)\n def test_scalar_shape(self, size):\n shape = 10\n rv = self.get_random_variable(shape)\n\n if size is None:\n expected = []\n else:\n expected = np.atleast_1d(size).tolist()\n expected.append(shape)\n actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape\n assert tuple(expected) == actual\n\n @pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)\n def test_parameters_1d_shape(self, size):\n rv = self.get_random_variable(self.shape, with_vector_params=True)\n if size is None:\n expected = []\n else:\n expected = np.atleast_1d(size).tolist()\n expected.append(self.shape)\n actual = self.sample_random_variable(rv, size).shape\n assert tuple(expected) == actual\n\n @pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)\n def test_broadcast_shape(self, size):\n broadcast_shape = (2 * self.shape, self.shape)\n rv = self.get_random_variable(broadcast_shape, with_vector_params=True)\n if size is None:\n expected = []\n else:\n expected = np.atleast_1d(size).tolist()\n expected.extend(broadcast_shape)\n actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape\n assert tuple(expected) == actual\n\n @pytest.mark.parametrize('shape', [(), (1,), (1, 1), (1, 2), (10, 10, 1), (10, 10, 2)], ids=str)\n def test_different_shapes_and_sample_sizes(self, shape):\n prefix = self.distribution.__name__\n\n rv = self.get_random_variable(shape, name=f'{prefix}_{shape}')\n for size in (None, 1, 5, (4, 5)):\n if size is None:\n s = []\n else:\n try:\n s = list(size)\n except TypeError:\n s = [size]\n if s == [1]:\n s = []\n if shape not in ((), (1,)):\n s.extend(shape)\n e = tuple(s)\n a = self.sample_random_variable(rv, size).shape\n assert e == a\n\n\nclass TestGaussianRandomWalk(BaseTestCases.BaseTestCase):\n distribution = pm.GaussianRandomWalk\n params = {'mu': 1., 'sigma': 1.}\n\n @pytest.mark.xfail(reason=\"Supporting this makes a nasty API\")\n def test_broadcast_shape(self):\n super().test_broadcast_shape()\n\nclass TestNormal(BaseTestCases.BaseTestCase):\n distribution = pm.Normal\n params = {'mu': 0., 'tau': 1.}\n\nclass TestTruncatedNormal(BaseTestCases.BaseTestCase):\n distribution = pm.TruncatedNormal\n params = {'mu': 0., 'tau': 1., 'lower': -0.5, 'upper': 0.5}\n\nclass TestTruncatedNormalLower(BaseTestCases.BaseTestCase):\n distribution = pm.TruncatedNormal\n params = {'mu': 0., 'tau': 1., 'lower': -0.5}\n\nclass TestTruncatedNormalUpper(BaseTestCases.BaseTestCase):\n distribution = pm.TruncatedNormal\n params = {'mu': 0., 'tau': 1., 'upper': 0.5}\n\nclass TestSkewNormal(BaseTestCases.BaseTestCase):\n distribution = pm.SkewNormal\n params = {'mu': 0., 'sigma': 1., 'alpha': 5.}\n\n\nclass TestHalfNormal(BaseTestCases.BaseTestCase):\n distribution = pm.HalfNormal\n params = {'tau': 1.}\n\n\nclass TestUniform(BaseTestCases.BaseTestCase):\n distribution = pm.Uniform\n params = {'lower': 0., 'upper': 1.}\n\n\nclass TestTriangular(BaseTestCases.BaseTestCase):\n distribution = pm.Triangular\n params = {'c': 0.5, 'lower': 0., 'upper': 1.}\n\n\nclass TestWald(BaseTestCases.BaseTestCase):\n distribution = pm.Wald\n params = {'mu': 1., 'lam': 1., 'alpha': 0.}\n\n\nclass TestBeta(BaseTestCases.BaseTestCase):\n distribution = pm.Beta\n params = {'alpha': 1., 'beta': 1.}\n\n\nclass TestKumaraswamy(BaseTestCases.BaseTestCase):\n distribution = pm.Kumaraswamy\n params = {'a': 1., 'b': 1.}\n\n\nclass TestExponential(BaseTestCases.BaseTestCase):\n distribution = pm.Exponential\n params = {'lam': 1.}\n\n\nclass TestLaplace(BaseTestCases.BaseTestCase):\n distribution = pm.Laplace\n params = {'mu': 1., 'b': 1.}\n\n\nclass TestLognormal(BaseTestCases.BaseTestCase):\n distribution = pm.Lognormal\n params = {'mu': 1., 'tau': 1.}\n\n\nclass TestStudentT(BaseTestCases.BaseTestCase):\n distribution = pm.StudentT\n params = {'nu': 5., 'mu': 0., 'lam': 1.}\n\n\nclass TestPareto(BaseTestCases.BaseTestCase):\n distribution = pm.Pareto\n params = {'alpha': 0.5, 'm': 1.}\n\n\nclass TestCauchy(BaseTestCases.BaseTestCase):\n distribution = pm.Cauchy\n params = {'alpha': 1., 'beta': 1.}\n\n\nclass TestHalfCauchy(BaseTestCases.BaseTestCase):\n distribution = pm.HalfCauchy\n params = {'beta': 1.}\n\n\nclass TestGamma(BaseTestCases.BaseTestCase):\n distribution = pm.Gamma\n params = {'alpha': 1., 'beta': 1.}\n\n\nclass TestInverseGamma(BaseTestCases.BaseTestCase):\n distribution = pm.InverseGamma\n params = {'alpha': 0.5, 'beta': 0.5}\n\n\nclass TestChiSquared(BaseTestCases.BaseTestCase):\n distribution = pm.ChiSquared\n params = {'nu': 2.}\n\n\nclass TestWeibull(BaseTestCases.BaseTestCase):\n distribution = pm.Weibull\n params = {'alpha': 1., 'beta': 1.}\n\n\nclass TestExGaussian(BaseTestCases.BaseTestCase):\n distribution = pm.ExGaussian\n params = {'mu': 0., 'sigma': 1., 'nu': 1.}\n\n\nclass TestVonMises(BaseTestCases.BaseTestCase):\n distribution = pm.VonMises\n params = {'mu': 0., 'kappa': 1.}\n\n\nclass TestGumbel(BaseTestCases.BaseTestCase):\n distribution = pm.Gumbel\n params = {'mu': 0., 'beta': 1.}\n\n\nclass TestLogistic(BaseTestCases.BaseTestCase):\n distribution = pm.Logistic\n params = {'mu': 0., 's': 1.}\n\n\nclass TestLogitNormal(BaseTestCases.BaseTestCase):\n distribution = pm.LogitNormal\n params = {'mu': 0., 'sigma': 1.}\n\n\nclass TestBinomial(BaseTestCases.BaseTestCase):\n distribution = pm.Binomial\n params = {'n': 5, 'p': 0.5}\n\n\nclass TestBetaBinomial(BaseTestCases.BaseTestCase):\n distribution = pm.BetaBinomial\n params = {'n': 5, 'alpha': 1., 'beta': 1.}\n\n\nclass TestBernoulli(BaseTestCases.BaseTestCase):\n distribution = pm.Bernoulli\n params = {'p': 0.5}\n\n\nclass TestDiscreteWeibull(BaseTestCases.BaseTestCase):\n distribution = pm.DiscreteWeibull\n params = {'q': 0.25, 'beta': 2.}\n\n\nclass TestPoisson(BaseTestCases.BaseTestCase):\n distribution = pm.Poisson\n params = {'mu': 1.}\n\n\nclass TestNegativeBinomial(BaseTestCases.BaseTestCase):\n distribution = pm.NegativeBinomial\n params = {'mu': 1., 'alpha': 1.}\n\n\nclass TestConstant(BaseTestCases.BaseTestCase):\n distribution = pm.Constant\n params = {'c': 3}\n\n\nclass TestZeroInflatedPoisson(BaseTestCases.BaseTestCase):\n distribution = pm.ZeroInflatedPoisson\n params = {'theta': 1., 'psi': 0.3}\n\n\nclass TestZeroInflatedNegativeBinomial(BaseTestCases.BaseTestCase):\n distribution = pm.ZeroInflatedNegativeBinomial\n params = {'mu': 1., 'alpha': 1., 'psi': 0.3}\n\nclass TestZeroInflatedBinomial(BaseTestCases.BaseTestCase):\n distribution = pm.ZeroInflatedBinomial\n params = {'n': 10, 'p': 0.6, 'psi': 0.3}\n\nclass TestDiscreteUniform(BaseTestCases.BaseTestCase):\n distribution = pm.DiscreteUniform\n params = {'lower': 0., 'upper': 10.}\n\n\nclass TestGeometric(BaseTestCases.BaseTestCase):\n distribution = pm.Geometric\n params = {'p': 0.5}\n\n \nclass TestMoyal(BaseTestCases.BaseTestCase):\n distribution = pm.Moyal\n params = {'mu': 0., 'sigma': 1.}\n\n \nclass TestCategorical(BaseTestCases.BaseTestCase):\n distribution = pm.Categorical\n params = {'p': np.ones(BaseTestCases.BaseTestCase.shape)}\n\n def get_random_variable(self, shape, with_vector_params=False, **kwargs): # don't transform categories\n return super().get_random_variable(shape, with_vector_params=False, **kwargs)\n\n def test_probability_vector_shape(self):\n \"\"\"Check that if a 2d array of probabilities are passed to categorical correct shape is returned\"\"\"\n p = np.ones((10, 5))\n assert pm.Categorical.dist(p=p).random().shape == (10,)\n assert pm.Categorical.dist(p=p).random(size=4).shape == (4, 10)\n p = np.ones((3, 7, 5))\n assert pm.Categorical.dist(p=p).random().shape == (3, 7)\n assert pm.Categorical.dist(p=p).random(size=4).shape == (4, 3, 7)\n\n\nclass TestScalarParameterSamples(SeededTest):\n def test_bounded(self):\n # A bit crude...\n BoundedNormal = pm.Bound(pm.Normal, upper=0)\n\n def ref_rand(size, tau):\n return -st.halfnorm.rvs(size=size, loc=0, scale=tau ** -0.5)\n pymc3_random(BoundedNormal, {'tau': Rplus}, ref_rand=ref_rand)\n\n def test_uniform(self):\n def ref_rand(size, lower, upper):\n return st.uniform.rvs(size=size, loc=lower, scale=upper - lower)\n pymc3_random(pm.Uniform, {'lower': -Rplus, 'upper': Rplus}, ref_rand=ref_rand)\n\n def test_normal(self):\n def ref_rand(size, mu, sigma):\n return st.norm.rvs(size=size, loc=mu, scale=sigma)\n pymc3_random(pm.Normal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)\n\n def test_truncated_normal(self):\n def ref_rand(size, mu, sigma, lower, upper):\n return st.truncnorm.rvs((lower - mu) / sigma, (upper - mu) / sigma, size=size, loc=mu, scale=sigma)\n pymc3_random(pm.TruncatedNormal, {'mu': R, 'sigma': Rplusbig, 'lower': -Rplusbig, 'upper': Rplusbig},\n ref_rand=ref_rand)\n\n def test_truncated_normal_lower(self):\n def ref_rand(size, mu, sigma, lower):\n return st.truncnorm.rvs((lower - mu) / sigma, np.inf, size=size, loc=mu, scale=sigma)\n pymc3_random(pm.TruncatedNormal, {'mu': R, 'sigma': Rplusbig, 'lower': -Rplusbig},\n ref_rand=ref_rand)\n\n def test_truncated_normal_upper(self):\n def ref_rand(size, mu, sigma, upper):\n return st.truncnorm.rvs(-np.inf, (upper - mu) / sigma, size=size, loc=mu, scale=sigma)\n pymc3_random(pm.TruncatedNormal, {'mu': R, 'sigma': Rplusbig, 'upper': Rplusbig},\n ref_rand=ref_rand)\n\n def test_skew_normal(self):\n def ref_rand(size, alpha, mu, sigma):\n return st.skewnorm.rvs(size=size, a=alpha, loc=mu, scale=sigma)\n pymc3_random(pm.SkewNormal, {'mu': R, 'sigma': Rplus, 'alpha': R}, ref_rand=ref_rand)\n\n def test_half_normal(self):\n def ref_rand(size, tau):\n return st.halfnorm.rvs(size=size, loc=0, scale=tau ** -0.5)\n pymc3_random(pm.HalfNormal, {'tau': Rplus}, ref_rand=ref_rand)\n\n def test_wald(self):\n # Cannot do anything too exciting as scipy wald is a\n # location-scale model of the *standard* wald with mu=1 and lam=1\n def ref_rand(size, mu, lam, alpha):\n return st.wald.rvs(size=size, loc=alpha)\n pymc3_random(pm.Wald,\n {'mu': Domain([1., 1., 1.]), 'lam': Domain(\n [1., 1., 1.]), 'alpha': Rplus},\n ref_rand=ref_rand)\n\n def test_beta(self):\n def ref_rand(size, alpha, beta):\n return clipped_beta_rvs(a=alpha, b=beta, size=size)\n pymc3_random(pm.Beta, {'alpha': Rplus, 'beta': Rplus}, ref_rand=ref_rand)\n\n def test_exponential(self):\n def ref_rand(size, lam):\n return nr.exponential(scale=1. / lam, size=size)\n pymc3_random(pm.Exponential, {'lam': Rplus}, ref_rand=ref_rand)\n\n def test_laplace(self):\n def ref_rand(size, mu, b):\n return st.laplace.rvs(mu, b, size=size)\n pymc3_random(pm.Laplace, {'mu': R, 'b': Rplus}, ref_rand=ref_rand)\n\n def test_lognormal(self):\n def ref_rand(size, mu, tau):\n return np.exp(mu + (tau ** -0.5) * st.norm.rvs(loc=0., scale=1., size=size))\n pymc3_random(pm.Lognormal, {'mu': R, 'tau': Rplusbig}, ref_rand=ref_rand)\n\n def test_student_t(self):\n def ref_rand(size, nu, mu, lam):\n return st.t.rvs(nu, mu, lam**-.5, size=size)\n pymc3_random(pm.StudentT, {'nu': Rplus, 'mu': R, 'lam': Rplus}, ref_rand=ref_rand)\n\n def test_cauchy(self):\n def ref_rand(size, alpha, beta):\n return st.cauchy.rvs(alpha, beta, size=size)\n pymc3_random(pm.Cauchy, {'alpha': R, 'beta': Rplusbig}, ref_rand=ref_rand)\n\n def test_half_cauchy(self):\n def ref_rand(size, beta):\n return st.halfcauchy.rvs(scale=beta, size=size)\n pymc3_random(pm.HalfCauchy, {'beta': Rplusbig}, ref_rand=ref_rand)\n\n def test_gamma_alpha_beta(self):\n def ref_rand(size, alpha, beta):\n return st.gamma.rvs(alpha, scale=1. / beta, size=size)\n pymc3_random(pm.Gamma, {'alpha': Rplusbig, 'beta': Rplusbig}, ref_rand=ref_rand)\n\n def test_gamma_mu_sigma(self):\n def ref_rand(size, mu, sigma):\n return st.gamma.rvs(mu**2 / sigma**2, scale=sigma ** 2 / mu, size=size)\n pymc3_random(pm.Gamma, {'mu': Rplusbig, 'sigma': Rplusbig}, ref_rand=ref_rand)\n\n def test_inverse_gamma(self):\n def ref_rand(size, alpha, beta):\n return st.invgamma.rvs(a=alpha, scale=beta, size=size)\n pymc3_random(pm.InverseGamma, {'alpha': Rplus, 'beta': Rplus}, ref_rand=ref_rand)\n\n def test_pareto(self):\n def ref_rand(size, alpha, m):\n return st.pareto.rvs(alpha, scale=m, size=size)\n pymc3_random(pm.Pareto, {'alpha': Rplusbig, 'm': Rplusbig}, ref_rand=ref_rand)\n\n def test_ex_gaussian(self):\n def ref_rand(size, mu, sigma, nu):\n return nr.normal(mu, sigma, size=size) + nr.exponential(scale=nu, size=size)\n pymc3_random(pm.ExGaussian, {'mu': R, 'sigma': Rplus, 'nu': Rplus}, ref_rand=ref_rand)\n\n def test_vonmises(self):\n def ref_rand(size, mu, kappa):\n return st.vonmises.rvs(size=size, loc=mu, kappa=kappa)\n pymc3_random(pm.VonMises, {'mu': R, 'kappa': Rplus}, ref_rand=ref_rand)\n\n def test_triangular(self):\n def ref_rand(size, lower, upper, c):\n scale = upper - lower\n c_ = (c - lower) / scale\n return st.triang.rvs(size=size, loc=lower, scale=scale, c=c_)\n pymc3_random(pm.Triangular, {'lower': Runif, 'upper': Runif + 3, 'c': Runif + 1}, ref_rand=ref_rand)\n\n def test_flat(self):\n with pm.Model():\n f = pm.Flat('f')\n with pytest.raises(ValueError):\n f.random(1)\n\n def test_half_flat(self):\n with pm.Model():\n f = pm.HalfFlat('f')\n with pytest.raises(ValueError):\n f.random(1)\n\n def test_binomial(self):\n pymc3_random_discrete(pm.Binomial, {'n': Nat, 'p': Unit}, ref_rand=st.binom.rvs)\n\n def test_beta_binomial(self):\n pymc3_random_discrete(pm.BetaBinomial, {'n': Nat, 'alpha': Rplus, 'beta': Rplus},\n ref_rand=self._beta_bin)\n\n def _beta_bin(self, n, alpha, beta, size=None):\n return st.binom.rvs(n, st.beta.rvs(a=alpha, b=beta, size=size))\n\n def test_bernoulli(self):\n pymc3_random_discrete(pm.Bernoulli, {'p': Unit},\n ref_rand=lambda size, p=None: st.bernoulli.rvs(p, size=size))\n\n def test_poisson(self):\n pymc3_random_discrete(pm.Poisson, {'mu': Rplusbig}, size=500, ref_rand=st.poisson.rvs)\n\n def test_negative_binomial(self):\n def ref_rand(size, alpha, mu):\n return st.nbinom.rvs(alpha, alpha / (mu + alpha), size=size)\n pymc3_random_discrete(pm.NegativeBinomial, {'mu': Rplusbig, 'alpha': Rplusbig},\n size=100, fails=50, ref_rand=ref_rand)\n\n def test_geometric(self):\n pymc3_random_discrete(pm.Geometric, {'p': Unit}, size=500, fails=50, ref_rand=nr.geometric)\n\n def test_discrete_uniform(self):\n def ref_rand(size, lower, upper):\n return st.randint.rvs(lower, upper + 1, size=size)\n pymc3_random_discrete(pm.DiscreteUniform, {'lower': -NatSmall, 'upper': NatSmall},\n ref_rand=ref_rand)\n\n def test_discrete_weibull(self):\n def ref_rand(size, q, beta):\n u = np.random.uniform(size=size)\n\n return np.ceil(np.power(np.log(1 - u) / np.log(q), 1. / beta)) - 1\n\n pymc3_random_discrete(pm.DiscreteWeibull, {'q': Unit, 'beta': Rplusdunif},\n ref_rand=ref_rand)\n\n @pytest.mark.parametrize('s', [2, 3, 4])\n def test_categorical_random(self, s):\n def ref_rand(size, p):\n return nr.choice(np.arange(p.shape[0]), p=p, size=size)\n pymc3_random_discrete(pm.Categorical, {'p': Simplex(s)}, ref_rand=ref_rand)\n\n def test_constant_dist(self):\n def ref_rand(size, c):\n return c * np.ones(size, dtype=int)\n pymc3_random_discrete(pm.Constant, {'c': I}, ref_rand=ref_rand)\n\n def test_mv_normal(self):\n def ref_rand(size, mu, cov):\n return st.multivariate_normal.rvs(mean=mu, cov=cov, size=size)\n\n def ref_rand_tau(size, mu, tau):\n return ref_rand(size, mu, linalg.inv(tau))\n\n def ref_rand_chol(size, mu, chol):\n return ref_rand(size, mu, np.dot(chol, chol.T))\n\n def ref_rand_uchol(size, mu, chol):\n return ref_rand(size, mu, np.dot(chol.T, chol))\n\n for n in [2, 3]:\n pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'cov': PdMatrix(n)},\n size=100, valuedomain=Vector(R, n), ref_rand=ref_rand)\n pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'tau': PdMatrix(n)},\n size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_tau)\n pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'chol': PdMatrixChol(n)},\n size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_chol)\n pymc3_random(\n pm.MvNormal,\n {'mu': Vector(R, n), 'chol': PdMatrixCholUpper(n)},\n size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_uchol,\n extra_args={'lower': False}\n )\n\n def test_matrix_normal(self):\n def ref_rand(size, mu, rowcov, colcov):\n return st.matrix_normal.rvs(mean=mu, rowcov=rowcov, colcov=colcov, size=size)\n\n # def ref_rand_tau(size, mu, tau):\n # return ref_rand(size, mu, linalg.inv(tau))\n\n def ref_rand_chol(size, mu, rowchol, colchol):\n return ref_rand(size, mu, rowcov=np.dot(rowchol, rowchol.T),\n colcov=np.dot(colchol, colchol.T))\n\n def ref_rand_uchol(size, mu, rowchol, colchol):\n return ref_rand(size, mu, rowcov=np.dot(rowchol.T, rowchol),\n colcov=np.dot(colchol.T, colchol))\n\n for n in [2, 3]:\n pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'rowcov': PdMatrix(n), 'colcov': PdMatrix(n)},\n size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand)\n # pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'tau': PdMatrix(n)},\n # size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_tau)\n pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'rowchol': PdMatrixChol(n), 'colchol': PdMatrixChol(n)},\n size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_chol)\n # pymc3_random(\n # pm.MvNormal,\n # {'mu': RealMatrix(n, n), 'rowchol': PdMatrixCholUpper(n), 'colchol': PdMatrixCholUpper(n)},\n # size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_uchol,\n # extra_args={'lower': False}\n # )\n\n def test_kronecker_normal(self):\n def ref_rand(size, mu, covs, sigma):\n cov = pm.math.kronecker(covs[0], covs[1]).eval()\n cov += sigma**2 * np.identity(cov.shape[0])\n return st.multivariate_normal.rvs(mean=mu, cov=cov, size=size)\n\n def ref_rand_chol(size, mu, chols, sigma):\n covs = [np.dot(chol, chol.T) for chol in chols]\n return ref_rand(size, mu, covs, sigma)\n\n def ref_rand_evd(size, mu, evds, sigma):\n covs = []\n for eigs, Q in evds:\n covs.append(np.dot(Q, np.dot(np.diag(eigs), Q.T)))\n return ref_rand(size, mu, covs, sigma)\n\n sizes = [2, 3]\n sigmas = [0, 1]\n for n, sigma in zip(sizes, sigmas):\n N = n**2\n covs = [RandomPdMatrix(n), RandomPdMatrix(n)]\n chols = list(map(np.linalg.cholesky, covs))\n evds = list(map(np.linalg.eigh, covs))\n dom = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)\n mu = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)\n\n std_args = {'mu': mu}\n cov_args = {'covs': covs}\n chol_args = {'chols': chols}\n evd_args = {'evds': evds}\n if sigma is not None and sigma != 0:\n std_args['sigma'] = Domain([sigma], edges=(None, None))\n else:\n for args in [cov_args, chol_args, evd_args]:\n args['sigma'] = sigma\n\n pymc3_random(\n pm.KroneckerNormal, std_args, valuedomain=dom,\n ref_rand=ref_rand, extra_args=cov_args, model_args=cov_args)\n pymc3_random(\n pm.KroneckerNormal, std_args, valuedomain=dom,\n ref_rand=ref_rand_chol, extra_args=chol_args,\n model_args=chol_args)\n pymc3_random(\n pm.KroneckerNormal, std_args, valuedomain=dom,\n ref_rand=ref_rand_evd, extra_args=evd_args,\n model_args=evd_args)\n\n def test_mv_t(self):\n def ref_rand(size, nu, Sigma, mu):\n normal = st.multivariate_normal.rvs(cov=Sigma, size=size).T\n chi2 = st.chi2.rvs(df=nu, size=size)\n return mu + np.sqrt(nu) * (normal / chi2).T\n for n in [2, 3]:\n pymc3_random(pm.MvStudentT,\n {'nu': Domain([5, 10, 25, 50]), 'Sigma': PdMatrix(\n n), 'mu': Vector(R, n)},\n size=100, valuedomain=Vector(R, n), ref_rand=ref_rand)\n\n def test_dirichlet(self):\n def ref_rand(size, a):\n return st.dirichlet.rvs(a, size=size)\n for n in [2, 3]:\n pymc3_random(pm.Dirichlet, {'a': Vector(Rplus, n)},\n valuedomain=Simplex(n), size=100, ref_rand=ref_rand)\n\n def test_multinomial(self):\n def ref_rand(size, p, n):\n return nr.multinomial(pvals=p, n=n, size=size)\n for n in [2, 3]:\n pymc3_random_discrete(pm.Multinomial, {'p': Simplex(n), 'n': Nat},\n valuedomain=Vector(Nat, n), size=100, ref_rand=ref_rand)\n\n def test_gumbel(self):\n def ref_rand(size, mu, beta):\n return st.gumbel_r.rvs(loc=mu, scale=beta, size=size)\n pymc3_random(pm.Gumbel, {'mu': R, 'beta': Rplus}, ref_rand=ref_rand)\n\n def test_logistic(self):\n def ref_rand(size, mu, s):\n return st.logistic.rvs(loc=mu, scale=s, size=size)\n pymc3_random(pm.Logistic, {'mu': R, 's': Rplus}, ref_rand=ref_rand)\n\n def test_logitnormal(self):\n def ref_rand(size, mu, sigma):\n return expit(st.norm.rvs(loc=mu, scale=sigma, size=size))\n pymc3_random(pm.LogitNormal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)\n\n def test_moyal(self):\n def ref_rand(size, mu, sigma):\n return st.moyal.rvs(loc=mu, scale=sigma, size=size)\n pymc3_random(pm.Moyal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)\n\n \n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_interpolated(self):\n for mu in R.vals:\n for sigma in Rplus.vals:\n #pylint: disable=cell-var-from-loop\n def ref_rand(size):\n return st.norm.rvs(loc=mu, scale=sigma, size=size)\n\n class TestedInterpolated (pm.Interpolated):\n\n def __init__(self, **kwargs):\n x_points = np.linspace(mu - 5 * sigma, mu + 5 * sigma, 100)\n pdf_points = st.norm.pdf(x_points, loc=mu, scale=sigma)\n super().__init__(\n x_points=x_points,\n pdf_points=pdf_points,\n **kwargs\n )\n\n pymc3_random(TestedInterpolated, {}, ref_rand=ref_rand)\n\n @pytest.mark.skip('Wishart random sampling not implemented.\\n'\n 'See https://github.com/pymc-devs/pymc3/issues/538')\n def test_wishart(self):\n # Wishart non current recommended for use:\n # https://github.com/pymc-devs/pymc3/issues/538\n # for n in [2, 3]:\n # pymc3_random_discrete(Wisvaluedomainhart,\n # {'n': Domain([2, 3, 4, 2000]) , 'V': PdMatrix(n) },\n # valuedomain=PdMatrix(n),\n # ref_rand=lambda n=None, V=None, size=None: \\\n # st.wishart(V, df=n, size=size))\n pass\n\n def test_lkj(self):\n for n in [2, 10, 50]:\n #pylint: disable=cell-var-from-loop\n shape = n*(n-1)//2\n\n def ref_rand(size, eta):\n beta = eta - 1 + n/2\n return (st.beta.rvs(size=(size, shape), a=beta, b=beta)-.5)*2\n\n class TestedLKJCorr (pm.LKJCorr):\n\n def __init__(self, **kwargs):\n kwargs.pop('shape', None)\n super().__init__(n=n, **kwargs)\n\n pymc3_random(TestedLKJCorr,\n {'eta': Domain([1., 10., 100.])},\n size=10000//n,\n ref_rand=ref_rand)\n\n def test_normalmixture(self):\n def ref_rand(size, w, mu, sigma):\n component = np.random.choice(w.size, size=size, p=w)\n return np.random.normal(mu[component], sigma[component], size=size)\n\n pymc3_random(pm.NormalMixture, {'w': Simplex(2),\n 'mu': Domain([[.05, 2.5], [-5., 1.]], edges=(None, None)),\n 'sigma': Domain([[1, 1], [1.5, 2.]], edges=(None, None))},\n extra_args={'comp_shape': 2},\n size=1000,\n ref_rand=ref_rand)\n pymc3_random(pm.NormalMixture, {'w': Simplex(3),\n 'mu': Domain([[-5., 1., 2.5]], edges=(None, None)),\n 'sigma': Domain([[1.5, 2., 3.]], edges=(None, None))},\n extra_args={'comp_shape': 3},\n size=1000,\n ref_rand=ref_rand)\n\n\ndef test_mixture_random_shape():\n # test the shape broadcasting in mixture random\n y = np.concatenate([nr.poisson(5, size=10),\n nr.poisson(9, size=10)])\n with pm.Model() as m:\n comp0 = pm.Poisson.dist(mu=np.ones(2))\n w0 = pm.Dirichlet('w0', a=np.ones(2), shape=(2,))\n like0 = pm.Mixture('like0',\n w=w0,\n comp_dists=comp0,\n observed=y)\n\n comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),\n shape=(20, 2))\n w1 = pm.Dirichlet('w1', a=np.ones(2), shape=(2,))\n like1 = pm.Mixture('like1',\n w=w1,\n comp_dists=comp1,\n observed=y)\n\n comp2 = pm.Poisson.dist(mu=np.ones(2))\n w2 = pm.Dirichlet('w2',\n a=np.ones(2),\n shape=(20, 2))\n like2 = pm.Mixture('like2',\n w=w2,\n comp_dists=comp2,\n observed=y)\n\n comp3 = pm.Poisson.dist(mu=np.ones(2),\n shape=(20, 2))\n w3 = pm.Dirichlet('w3',\n a=np.ones(2),\n shape=(20, 2))\n like3 = pm.Mixture('like3',\n w=w3,\n comp_dists=comp3,\n observed=y)\n\n rand0, rand1, rand2, rand3 = draw_values([like0, like1, like2, like3],\n point=m.test_point,\n size=100)\n assert rand0.shape == (100, 20)\n assert rand1.shape == (100, 20)\n assert rand2.shape == (100, 20)\n assert rand3.shape == (100, 20)\n\n with m:\n ppc = pm.sample_posterior_predictive([m.test_point], samples=200)\n assert ppc['like0'].shape == (200, 20)\n assert ppc['like1'].shape == (200, 20)\n assert ppc['like2'].shape == (200, 20)\n assert ppc['like3'].shape == (200, 20)\n\[email protected]\ndef test_mixture_random_shape_fast():\n # test the shape broadcasting in mixture random\n y = np.concatenate([nr.poisson(5, size=10),\n nr.poisson(9, size=10)])\n with pm.Model() as m:\n comp0 = pm.Poisson.dist(mu=np.ones(2))\n w0 = pm.Dirichlet('w0', a=np.ones(2), shape=(2,))\n like0 = pm.Mixture('like0',\n w=w0,\n comp_dists=comp0,\n observed=y)\n\n comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),\n shape=(20, 2))\n w1 = pm.Dirichlet('w1', a=np.ones(2), shape=(2,))\n like1 = pm.Mixture('like1',\n w=w1,\n comp_dists=comp1,\n observed=y)\n\n comp2 = pm.Poisson.dist(mu=np.ones(2))\n w2 = pm.Dirichlet('w2',\n a=np.ones(2),\n shape=(20, 2))\n like2 = pm.Mixture('like2',\n w=w2,\n comp_dists=comp2,\n observed=y)\n\n comp3 = pm.Poisson.dist(mu=np.ones(2),\n shape=(20, 2))\n w3 = pm.Dirichlet('w3',\n a=np.ones(2),\n shape=(20, 2))\n like3 = pm.Mixture('like3',\n w=w3,\n comp_dists=comp3,\n observed=y)\n\n rand0, rand1, rand2, rand3 = draw_values([like0, like1, like2, like3],\n point=m.test_point,\n size=100)\n assert rand0.shape == (100, 20)\n assert rand1.shape == (100, 20)\n assert rand2.shape == (100, 20)\n assert rand3.shape == (100, 20)\n\n # I *think* that the mixture means that this is not going to work,\n # but I could be wrong. [2019/08/22:rpg]\n with m:\n ppc = pm.fast_sample_posterior_predictive([m.test_point], samples=200)\n assert ppc['like0'].shape == (200, 20)\n assert ppc['like1'].shape == (200, 20)\n assert ppc['like2'].shape == (200, 20)\n assert ppc['like3'].shape == (200, 20)\n\n\n\nclass TestDensityDist():\n @pytest.mark.parametrize(\"shape\", [(), (3,), (3, 2)], ids=str)\n def test_density_dist_with_random_sampleable(self, shape):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1, shape=shape)\n obs = pm.DensityDist(\n 'density_dist',\n normal_dist.logp,\n observed=np.random.randn(100, *shape),\n shape=shape,\n random=normal_dist.random)\n trace = pm.sample(100)\n\n samples = 500\n size = 100\n ppc = pm.sample_posterior_predictive(trace, samples=samples, model=model, size=size)\n assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape\n\n # ppc = pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=size)\n # assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape\n\n\n @pytest.mark.parametrize(\"shape\", [(), (3,), (3, 2)], ids=str)\n def test_density_dist_with_random_sampleable_failure(self, shape):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1, shape=shape)\n pm.DensityDist(\n 'density_dist',\n normal_dist.logp,\n observed=np.random.randn(100, *shape),\n shape=shape,\n random=normal_dist.random,\n wrap_random_with_dist_shape=False\n )\n trace = pm.sample(100)\n\n samples = 500\n with pytest.raises(RuntimeError):\n pm.sample_posterior_predictive(trace, samples=samples, model=model, size=100)\n\n with pytest.raises((TypeError, RuntimeError)):\n pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=100)\n\n\n @pytest.mark.parametrize(\"shape\", [(), (3,), (3, 2)], ids=str)\n def test_density_dist_with_random_sampleable_hidden_error(self, shape):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1, shape=shape)\n obs = pm.DensityDist(\n 'density_dist',\n normal_dist.logp,\n observed=np.random.randn(100, *shape),\n shape=shape,\n random=normal_dist.random,\n wrap_random_with_dist_shape=False,\n check_shape_in_random=False\n )\n trace = pm.sample(100)\n\n samples = 500\n ppc = pm.sample_posterior_predictive(trace, samples=samples, model=model)\n assert len(ppc['density_dist']) == samples\n assert ((samples,) + obs.distribution.shape) != ppc['density_dist'].shape\n\n ppc = pm.fast_sample_posterior_predictive(trace, samples=samples, model=model)\n assert len(ppc['density_dist']) == samples\n assert ((samples,) + obs.distribution.shape) != ppc['density_dist'].shape\n\n\n def test_density_dist_with_random_sampleable_handcrafted_success(self):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1)\n rvs = pm.Normal.dist(mu, 1, shape=100).random\n obs = pm.DensityDist(\n 'density_dist',\n normal_dist.logp,\n observed=np.random.randn(100),\n random=rvs,\n wrap_random_with_dist_shape=False\n )\n trace = pm.sample(100)\n\n samples = 500\n size = 100\n ppc = pm.sample_posterior_predictive(trace, samples=samples, model=model, size=size)\n assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape\n\n @pytest.mark.xfail\n def test_density_dist_with_random_sampleable_handcrafted_success_fast(self):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1)\n rvs = pm.Normal.dist(mu, 1, shape=100).random\n obs = pm.DensityDist(\n 'density_dist',\n normal_dist.logp,\n observed=np.random.randn(100),\n random=rvs,\n wrap_random_with_dist_shape=False\n )\n trace = pm.sample(100)\n\n samples = 500\n size = 100\n\n ppc = pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=size)\n assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape\n\n\n def test_density_dist_without_random_not_sampleable(self):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1)\n pm.DensityDist('density_dist', normal_dist.logp, observed=np.random.randn(100))\n trace = pm.sample(100)\n\n samples = 500\n with pytest.raises(ValueError):\n pm.sample_posterior_predictive(trace, samples=samples, model=model, size=100)\n\n with pytest.raises((TypeError, ValueError)):\n pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=100)\n\n\nclass TestNestedRandom(SeededTest):\n def build_model(self, distribution, shape, nested_rvs_info):\n with pm.Model() as model:\n nested_rvs = {}\n for rv_name, info in nested_rvs_info.items():\n try:\n value, nested_shape = info\n loc = 0.\n except ValueError:\n value, nested_shape, loc = info\n if value is None:\n nested_rvs[rv_name] = pm.Uniform(\n rv_name,\n 0 + loc,\n 1 + loc,\n shape=nested_shape,\n )\n else:\n nested_rvs[rv_name] = value * np.ones(nested_shape)\n rv = distribution(\n \"target\",\n shape=shape,\n **nested_rvs,\n )\n return model, rv, nested_rvs\n\n def sample_prior(\n self,\n distribution,\n shape,\n nested_rvs_info,\n prior_samples\n ):\n model, rv, nested_rvs = self.build_model(\n distribution,\n shape,\n nested_rvs_info,\n )\n with model:\n return pm.sample_prior_predictive(prior_samples)\n\n @pytest.mark.parametrize(\n [\"prior_samples\", \"shape\", \"mu\", \"alpha\"],\n [\n [10, (3,), (None, tuple()), (None, (3,))],\n [10, (3,), (None, (3,)), (None, tuple())],\n [10, (4, 3,), (None, (3,)), (None, (3,))],\n [10, (4, 3,), (None, (3,)), (None, (4, 3))],\n ],\n ids=str,\n )\n def test_NegativeBinomial(\n self,\n prior_samples,\n shape,\n mu,\n alpha,\n ):\n prior = self.sample_prior(\n distribution=pm.NegativeBinomial,\n shape=shape,\n nested_rvs_info=dict(mu=mu, alpha=alpha),\n prior_samples=prior_samples,\n )\n assert prior[\"target\"].shape == (prior_samples,) + shape\n\n @pytest.mark.parametrize(\n [\"prior_samples\", \"shape\", \"psi\", \"mu\", \"alpha\"],\n [\n [10, (3,), (0.5, tuple()), (None, tuple()), (None, (3,))],\n [10, (3,), (0.5, (3,)), (None, tuple()), (None, (3,))],\n [10, (3,), (0.5, tuple()), (None, (3,)), (None, tuple())],\n [10, (3,), (0.5, (3,)), (None, (3,)), (None, tuple())],\n [10, (4, 3,), (0.5, (3,)), (None, (3,)), (None, (3,))],\n [10, (4, 3,), (0.5, (3,)), (None, (3,)), (None, (4, 3))],\n ],\n ids=str,\n )\n def test_ZeroInflatedNegativeBinomial(\n self,\n prior_samples,\n shape,\n psi,\n mu,\n alpha,\n ):\n prior = self.sample_prior(\n distribution=pm.ZeroInflatedNegativeBinomial,\n shape=shape,\n nested_rvs_info=dict(psi=psi, mu=mu, alpha=alpha),\n prior_samples=prior_samples,\n )\n assert prior[\"target\"].shape == (prior_samples,) + shape\n\n @pytest.mark.parametrize(\n [\"prior_samples\", \"shape\", \"nu\", \"sigma\"],\n [\n [10, (3,), (None, tuple()), (None, (3,))],\n [10, (3,), (None, tuple()), (None, (3,))],\n [10, (3,), (None, (3,)), (None, tuple())],\n [10, (3,), (None, (3,)), (None, tuple())],\n [10, (4, 3,), (None, (3,)), (None, (3,))],\n [10, (4, 3,), (None, (3,)), (None, (4, 3))],\n ],\n ids=str,\n )\n def test_Rice(\n self,\n prior_samples,\n shape,\n nu,\n sigma,\n ):\n prior = self.sample_prior(\n distribution=pm.Rice,\n shape=shape,\n nested_rvs_info=dict(nu=nu, sigma=sigma),\n prior_samples=prior_samples,\n )\n assert prior[\"target\"].shape == (prior_samples,) + shape\n\n @pytest.mark.parametrize(\n [\"prior_samples\", \"shape\", \"mu\", \"sigma\", \"lower\", \"upper\"],\n [\n [10, (3,), (None, tuple()), (1., tuple()), (None, tuple(), -1), (None, (3,))],\n [10, (3,), (None, tuple()), (1., tuple()), (None, tuple(), -1), (None, (3,))],\n [10, (3,), (None, tuple()), (1., tuple()), (None, (3,), -1), (None, tuple())],\n [10, (3,), (None, tuple()), (1., tuple()), (None, (3,), -1), (None, tuple())],\n [10, (4, 3,), (None, (3,)), (1., tuple()), (None, (3,), -1), (None, (3,))],\n [10, (4, 3,), (None, (3,)), (1., tuple()), (None, (3,), -1), (None, (4, 3))],\n [10, (3,), (0., tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))],\n [10, (3,), (0., tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))],\n [10, (3,), (0., tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())],\n [10, (3,), (0., tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())],\n [10, (4, 3,), (0., tuple()), (None, (3,)), (None, (3,), -1), (None, (3,))],\n [10, (4, 3,), (0., tuple()), (None, (3,)), (None, (3,), -1), (None, (4, 3))],\n ],\n ids=str,\n )\n def test_TruncatedNormal(\n self,\n prior_samples,\n shape,\n mu,\n sigma,\n lower,\n upper,\n ):\n prior = self.sample_prior(\n distribution=pm.TruncatedNormal,\n shape=shape,\n nested_rvs_info=dict(mu=mu, sigma=sigma, lower=lower, upper=upper),\n prior_samples=prior_samples,\n )\n assert prior[\"target\"].shape == (prior_samples,) + shape\n\n\n @pytest.mark.parametrize(\n [\"prior_samples\", \"shape\", \"c\", \"lower\", \"upper\"],\n [\n [10, (3,), (None, tuple()), (-1., (3,)), (2, tuple())],\n [10, (3,), (None, tuple()), (-1., tuple()), (None, tuple(), 1)],\n [10, (3,), (None, (3,)), (-1., tuple()), (None, tuple(), 1)],\n [10, (4, 3,), (None, (3,)), (-1., tuple()), (None, (3,), 1)],\n [10, (4, 3,), (None, (3,)), (None, tuple(), -1), (None, (3,), 1)],\n ],\n ids=str,\n )\n def test_Triangular(\n self,\n prior_samples,\n shape,\n c,\n lower,\n upper,\n ):\n prior = self.sample_prior(\n distribution=pm.Triangular,\n shape=shape,\n nested_rvs_info=dict(c=c, lower=lower, upper=upper),\n prior_samples=prior_samples,\n )\n assert prior[\"target\"].shape == (prior_samples,) + shape\n",
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport functools\nimport itertools\nimport threading\nimport warnings\nfrom typing import Optional, TypeVar, Type, List, Union, TYPE_CHECKING, Any, cast\nfrom sys import modules\n\nimport numpy as np\nfrom pandas import Series\nimport scipy.sparse as sps\nimport theano.sparse as sparse\nimport theano\nimport theano.tensor as tt\nfrom theano.tensor.var import TensorVariable\nfrom theano.compile import SharedVariable\n\nfrom pymc3.theanof import set_theano_conf, floatX\nimport pymc3 as pm\nfrom pymc3.math import flatten_list\nfrom .memoize import memoize, WithMemoization\nfrom .theanof import gradient, hessian, inputvars, generator\nfrom .vartypes import typefilter, discrete_types, continuous_types, isgenerator\nfrom .blocking import DictToArrayBijection, ArrayOrdering\nfrom .util import get_transformed_name, get_var_name\nfrom .exceptions import ImputationWarning\n\n__all__ = [\n \"Model\",\n \"Factor\",\n \"compilef\",\n \"fn\",\n \"fastfn\",\n \"modelcontext\",\n \"Point\",\n \"Deterministic\",\n \"Potential\",\n \"set_data\",\n]\n\nFlatView = collections.namedtuple(\"FlatView\", \"input, replacements, view\")\n\n\nclass PyMC3Variable(TensorVariable):\n \"\"\"Class to wrap Theano TensorVariable for custom behavior.\"\"\"\n\n # Implement matrix multiplication infix operator: X @ w\n __matmul__ = tt.dot\n\n def __rmatmul__(self, other):\n return tt.dot(other, self)\n\n def _str_repr(self, name=None, dist=None, formatting=\"plain\"):\n if getattr(self, \"distribution\", None) is None:\n if formatting == \"latex\":\n return None\n else:\n return super().__str__()\n\n if name is None and hasattr(self, 'name'):\n name = self.name\n if dist is None and hasattr(self, 'distribution'):\n dist = self.distribution\n return self.distribution._str_repr(name=name, dist=dist, formatting=formatting)\n\n def _repr_latex_(self, **kwargs):\n return self._str_repr(formatting=\"latex\", **kwargs)\n\n def __str__(self, **kwargs):\n return self._str_repr(formatting=\"plain\", **kwargs)\n\n __latex__ = _repr_latex_\n\n\nclass InstanceMethod:\n \"\"\"Class for hiding references to instance methods so they can be pickled.\n\n >>> self.method = InstanceMethod(some_object, 'method_name')\n \"\"\"\n\n def __init__(self, obj, method_name):\n self.obj = obj\n self.method_name = method_name\n\n def __call__(self, *args, **kwargs):\n return getattr(self.obj, self.method_name)(*args, **kwargs)\n\n\ndef incorporate_methods(source, destination, methods, wrapper=None, override=False):\n \"\"\"\n Add attributes to a destination object which point to\n methods from from a source object.\n\n Parameters\n ----------\n source: object\n The source object containing the methods.\n destination: object\n The destination object for the methods.\n methods: list of str\n Names of methods to incorporate.\n wrapper: function\n An optional function to allow the source method to be\n wrapped. Should take the form my_wrapper(source, method_name)\n and return a single value.\n override: bool\n If the destination object already has a method/attribute\n an AttributeError will be raised if override is False (the default).\n \"\"\"\n for method in methods:\n if hasattr(destination, method) and not override:\n raise AttributeError(\n f\"Cannot add method {method!r}\"\n + \"to destination object as it already exists. \"\n \"To prevent this error set 'override=True'.\"\n )\n if hasattr(source, method):\n if wrapper is None:\n setattr(destination, method, getattr(source, method))\n else:\n setattr(destination, method, wrapper(source, method))\n else:\n setattr(destination, method, None)\n\n\ndef get_named_nodes_and_relations(graph):\n \"\"\"Get the named nodes in a theano graph (i.e., nodes whose name\n attribute is not None) along with their relationships (i.e., the\n node's named parents, and named children, while skipping unnamed\n intermediate nodes)\n\n Parameters\n ----------\n graph: a theano node\n\n Returns:\n --------\n leaf_dict: Dict[str, node]\n A dictionary of name:node pairs, of the named nodes that\n have no named ancestors in the provided theano graph.\n descendents: Dict[node, Set[node]]\n Each key is a theano named node, and the corresponding value\n is the set of theano named nodes that are descendents with no\n intervening named nodes in the supplied ``graph``.\n ancestors: Dict[node, Set[node]]\n A dictionary of node:set([ancestors]) pairs. Each key\n is a theano named node, and the corresponding value is the set\n of theano named nodes that are ancestors with no intervening named\n nodes in the supplied ``graph``.\n\n \"\"\"\n # We don't enforce distribution parameters to have a name but we may\n # attempt to get_named_nodes_and_relations from them anyway in\n # distributions.draw_values. This means that must take care only to add\n # graph to the ancestors and descendents dictionaries if it has a name.\n if graph.name is not None:\n ancestors = {graph: set()}\n descendents = {graph: set()}\n else:\n ancestors = {}\n descendents = {}\n descendents, ancestors = _get_named_nodes_and_relations(\n graph, None, ancestors, descendents\n )\n leaf_dict = {\n node.name: node for node, ancestor in ancestors.items() if len(ancestor) == 0\n }\n return leaf_dict, descendents, ancestors\n\n\ndef _get_named_nodes_and_relations(graph, descendent, descendents, ancestors):\n if getattr(graph, \"owner\", None) is None: # Leaf node\n if graph.name is not None: # Named leaf node\n if descendent is not None: # Is None for the first node\n try:\n descendents[graph].add(descendent)\n except KeyError:\n descendents[graph] = {descendent}\n ancestors[descendent].add(graph)\n else:\n descendents[graph] = set()\n # Flag that the leaf node has no children\n ancestors[graph] = set()\n else: # Intermediate node\n if graph.name is not None: # Intermediate named node\n if descendent is not None: # Is only None for the root node\n try:\n descendents[graph].add(descendent)\n except KeyError:\n descendents[graph] = {descendent}\n ancestors[descendent].add(graph)\n else:\n descendents[graph] = set()\n # The current node will be set as the descendent of the next\n # nodes only if it is a named node\n descendent = graph\n # Init the nodes children to an empty set\n ancestors[graph] = set()\n for i in graph.owner.inputs:\n temp_desc, temp_ances = _get_named_nodes_and_relations(\n i, descendent, descendents, ancestors\n )\n descendents.update(temp_desc)\n ancestors.update(temp_ances)\n return descendents, ancestors\n\n\ndef build_named_node_tree(graphs):\n \"\"\"Build the combined descence/ancestry tree of named nodes (i.e., nodes\n whose name attribute is not None) in a list (or iterable) of theano graphs.\n The relationship tree does not include unnamed intermediate nodes present\n in the supplied graphs.\n\n Parameters\n ----------\n graphs - iterable of theano graphs\n\n Returns:\n --------\n leaf_dict: Dict[str, node]\n A dictionary of name:node pairs, of the named nodes that\n have no named ancestors in the provided theano graphs.\n descendents: Dict[node, Set[node]]\n A dictionary of node:set([parents]) pairs. Each key is\n a theano named node, and the corresponding value is the set of\n theano named nodes that are descendents with no intervening named\n nodes in the supplied ``graphs``.\n ancestors: Dict[node, Set[node]]\n A dictionary of node:set([ancestors]) pairs. Each key\n is a theano named node, and the corresponding value is the set\n of theano named nodes that are ancestors with no intervening named\n nodes in the supplied ``graphs``.\n\n \"\"\"\n leaf_dict = {}\n named_nodes_descendents = {}\n named_nodes_ancestors = {}\n for graph in graphs:\n # Get the named nodes under the `param` node\n nn, nnd, nna = get_named_nodes_and_relations(graph)\n leaf_dict.update(nn)\n # Update the discovered parental relationships\n for k in nnd.keys():\n if k not in named_nodes_descendents.keys():\n named_nodes_descendents[k] = nnd[k]\n else:\n named_nodes_descendents[k].update(nnd[k])\n # Update the discovered child relationships\n for k in nna.keys():\n if k not in named_nodes_ancestors.keys():\n named_nodes_ancestors[k] = nna[k]\n else:\n named_nodes_ancestors[k].update(nna[k])\n return leaf_dict, named_nodes_descendents, named_nodes_ancestors\n\n\nT = TypeVar(\"T\", bound=\"ContextMeta\")\n\n\nclass ContextMeta(type):\n \"\"\"Functionality for objects that put themselves in a context using\n the `with` statement.\n \"\"\"\n\n def __new__(cls, name, bases, dct, **kargs): # pylint: disable=unused-argument\n \"Add __enter__ and __exit__ methods to the class.\"\n\n def __enter__(self):\n self.__class__.context_class.get_contexts().append(self)\n # self._theano_config is set in Model.__new__\n if hasattr(self, \"_theano_config\"):\n self._old_theano_config = set_theano_conf(self._theano_config)\n return self\n\n def __exit__(self, typ, value, traceback): # pylint: disable=unused-argument\n self.__class__.context_class.get_contexts().pop()\n # self._theano_config is set in Model.__new__\n if hasattr(self, \"_old_theano_config\"):\n set_theano_conf(self._old_theano_config)\n\n dct[__enter__.__name__] = __enter__\n dct[__exit__.__name__] = __exit__\n\n # We strip off keyword args, per the warning from\n # StackExchange:\n # DO NOT send \"**kargs\" to \"type.__new__\". It won't catch them and\n # you'll get a \"TypeError: type() takes 1 or 3 arguments\" exception.\n return super().__new__(cls, name, bases, dct)\n\n # FIXME: is there a more elegant way to automatically add methods to the class that\n # are instance methods instead of class methods?\n def __init__(\n cls, name, bases, nmspc, context_class: Optional[Type] = None, **kwargs\n ): # pylint: disable=unused-argument\n \"\"\"Add ``__enter__`` and ``__exit__`` methods to the new class automatically.\"\"\"\n if context_class is not None:\n cls._context_class = context_class\n super().__init__(name, bases, nmspc)\n\n def get_context(cls, error_if_none=True) -> Optional[T]:\n \"\"\"Return the most recently pushed context object of type ``cls``\n on the stack, or ``None``. If ``error_if_none`` is True (default),\n raise a ``TypeError`` instead of returning ``None``.\"\"\"\n try:\n candidate = cls.get_contexts()[-1] # type: Optional[T]\n except IndexError as e:\n # Calling code expects to get a TypeError if the entity\n # is unfound, and there's too much to fix.\n if error_if_none:\n raise TypeError(\"No %s on context stack\" % str(cls))\n return None\n return candidate\n\n def get_contexts(cls) -> List[T]:\n \"\"\"Return a stack of context instances for the ``context_class``\n of ``cls``.\"\"\"\n # This lazily creates the context class's contexts\n # thread-local object, as needed. This seems inelegant to me,\n # but since the context class is not guaranteed to exist when\n # the metaclass is being instantiated, I couldn't figure out a\n # better way. [2019/10/11:rpg]\n\n # no race-condition here, contexts is a thread-local object\n # be sure not to override contexts in a subclass however!\n context_class = cls.context_class\n assert isinstance(context_class, type), (\n \"Name of context class, %s was not resolvable to a class\" % context_class\n )\n if not hasattr(context_class, \"contexts\"):\n context_class.contexts = threading.local()\n\n contexts = context_class.contexts\n\n if not hasattr(contexts, \"stack\"):\n contexts.stack = []\n return contexts.stack\n\n # the following complex property accessor is necessary because the\n # context_class may not have been created at the point it is\n # specified, so the context_class may be a class *name* rather\n # than a class.\n @property\n def context_class(cls) -> Type:\n def resolve_type(c: Union[Type, str]) -> Type:\n if isinstance(c, str):\n c = getattr(modules[cls.__module__], c)\n if isinstance(c, type):\n return c\n raise ValueError(\"Cannot resolve context class %s\" % c)\n\n assert cls is not None\n if isinstance(cls._context_class, str):\n cls._context_class = resolve_type(cls._context_class)\n if not isinstance(cls._context_class, (str, type)):\n raise ValueError(\n \"Context class for %s, %s, is not of the right type\"\n % (cls.__name__, cls._context_class)\n )\n return cls._context_class\n\n # Inherit context class from parent\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n cls.context_class = super().context_class\n\n # Initialize object in its own context...\n # Merged from InitContextMeta in the original.\n def __call__(cls, *args, **kwargs):\n instance = cls.__new__(cls, *args, **kwargs)\n with instance: # appends context\n instance.__init__(*args, **kwargs)\n return instance\n\n\ndef modelcontext(model: Optional[\"Model\"]) -> \"Model\":\n \"\"\"\n Return the given model or, if none was supplied, try to find one in\n the context stack.\n \"\"\"\n if model is None:\n model = Model.get_context(error_if_none=False)\n\n if model is None:\n # TODO: This should be a ValueError, but that breaks\n # ArviZ (and others?), so might need a deprecation.\n raise TypeError(\"No model on context stack.\")\n return model\n\n\nclass Factor:\n \"\"\"Common functionality for objects with a log probability density\n associated with them.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @property\n def logp(self):\n \"\"\"Compiled log probability density function\"\"\"\n return self.model.fn(self.logpt)\n\n @property\n def logp_elemwise(self):\n return self.model.fn(self.logp_elemwiset)\n\n def dlogp(self, vars=None):\n \"\"\"Compiled log probability density gradient function\"\"\"\n return self.model.fn(gradient(self.logpt, vars))\n\n def d2logp(self, vars=None):\n \"\"\"Compiled log probability density hessian function\"\"\"\n return self.model.fn(hessian(self.logpt, vars))\n\n @property\n def logp_nojac(self):\n return self.model.fn(self.logp_nojact)\n\n def dlogp_nojac(self, vars=None):\n \"\"\"Compiled log density gradient function, without jacobian terms.\"\"\"\n return self.model.fn(gradient(self.logp_nojact, vars))\n\n def d2logp_nojac(self, vars=None):\n \"\"\"Compiled log density hessian function, without jacobian terms.\"\"\"\n return self.model.fn(hessian(self.logp_nojact, vars))\n\n @property\n def fastlogp(self):\n \"\"\"Compiled log probability density function\"\"\"\n return self.model.fastfn(self.logpt)\n\n def fastdlogp(self, vars=None):\n \"\"\"Compiled log probability density gradient function\"\"\"\n return self.model.fastfn(gradient(self.logpt, vars))\n\n def fastd2logp(self, vars=None):\n \"\"\"Compiled log probability density hessian function\"\"\"\n return self.model.fastfn(hessian(self.logpt, vars))\n\n @property\n def fastlogp_nojac(self):\n return self.model.fastfn(self.logp_nojact)\n\n def fastdlogp_nojac(self, vars=None):\n \"\"\"Compiled log density gradient function, without jacobian terms.\"\"\"\n return self.model.fastfn(gradient(self.logp_nojact, vars))\n\n def fastd2logp_nojac(self, vars=None):\n \"\"\"Compiled log density hessian function, without jacobian terms.\"\"\"\n return self.model.fastfn(hessian(self.logp_nojact, vars))\n\n @property\n def logpt(self):\n \"\"\"Theano scalar of log-probability of the model\"\"\"\n if getattr(self, \"total_size\", None) is not None:\n logp = self.logp_sum_unscaledt * self.scaling\n else:\n logp = self.logp_sum_unscaledt\n if self.name is not None:\n logp.name = \"__logp_%s\" % self.name\n return logp\n\n @property\n def logp_nojact(self):\n \"\"\"Theano scalar of log-probability, excluding jacobian terms.\"\"\"\n if getattr(self, \"total_size\", None) is not None:\n logp = tt.sum(self.logp_nojac_unscaledt) * self.scaling\n else:\n logp = tt.sum(self.logp_nojac_unscaledt)\n if self.name is not None:\n logp.name = \"__logp_%s\" % self.name\n return logp\n\n\ndef withparent(meth):\n \"\"\"Helper wrapper that passes calls to parent's instance\"\"\"\n\n def wrapped(self, *args, **kwargs):\n res = meth(self, *args, **kwargs)\n if getattr(self, \"parent\", None) is not None:\n getattr(self.parent, meth.__name__)(*args, **kwargs)\n return res\n\n # Unfortunately functools wrapper fails\n # when decorating built-in methods so we\n # need to fix that improper behaviour\n wrapped.__name__ = meth.__name__\n return wrapped\n\n\nclass treelist(list):\n \"\"\"A list that passes mutable extending operations used in Model\n to parent list instance.\n Extending treelist you will also extend its parent\n \"\"\"\n\n def __init__(self, iterable=(), parent=None):\n super().__init__(iterable)\n assert isinstance(parent, list) or parent is None\n self.parent = parent\n if self.parent is not None:\n self.parent.extend(self)\n\n # typechecking here works bad\n append = withparent(list.append)\n __iadd__ = withparent(list.__iadd__)\n extend = withparent(list.extend)\n\n def tree_contains(self, item):\n if isinstance(self.parent, treedict):\n return list.__contains__(self, item) or self.parent.tree_contains(item)\n elif isinstance(self.parent, list):\n return list.__contains__(self, item) or self.parent.__contains__(item)\n else:\n return list.__contains__(self, item)\n\n def __setitem__(self, key, value):\n raise NotImplementedError(\n \"Method is removed as we are not\"\n \" able to determine \"\n \"appropriate logic for it\"\n )\n\n # Added this because mypy didn't like having __imul__ without __mul__\n # This is my best guess about what this should do. I might be happier\n # to kill both of these if they are not used.\n def __mul__(self, other) -> \"treelist\":\n return cast(\"treelist\", list.__mul__(self, other))\n\n def __imul__(self, other) -> \"treelist\":\n t0 = len(self)\n list.__imul__(self, other)\n if self.parent is not None:\n self.parent.extend(self[t0:])\n return self # python spec says should return the result.\n\n\nclass treedict(dict):\n \"\"\"A dict that passes mutable extending operations used in Model\n to parent dict instance.\n Extending treedict you will also extend its parent\n \"\"\"\n\n def __init__(self, iterable=(), parent=None, **kwargs):\n super().__init__(iterable, **kwargs)\n assert isinstance(parent, dict) or parent is None\n self.parent = parent\n if self.parent is not None:\n self.parent.update(self)\n\n # typechecking here works bad\n __setitem__ = withparent(dict.__setitem__)\n update = withparent(dict.update)\n\n def tree_contains(self, item):\n # needed for `add_random_variable` method\n if isinstance(self.parent, treedict):\n return dict.__contains__(self, item) or self.parent.tree_contains(item)\n elif isinstance(self.parent, dict):\n return dict.__contains__(self, item) or self.parent.__contains__(item)\n else:\n return dict.__contains__(self, item)\n\n\nclass ValueGradFunction:\n \"\"\"Create a theano function that computes a value and its gradient.\n\n Parameters\n ----------\n costs: list of theano variables\n We compute the weighted sum of the specified theano values, and the gradient\n of that sum. The weights can be specified with `ValueGradFunction.set_weights`.\n grad_vars: list of named theano variables or None\n The arguments with respect to which the gradient is computed.\n extra_vars: list of named theano variables or None\n Other arguments of the function that are assumed constant. They\n are stored in shared variables and can be set using\n `set_extra_values`.\n dtype: str, default=theano.config.floatX\n The dtype of the arrays.\n casting: {'no', 'equiv', 'save', 'same_kind', 'unsafe'}, default='no'\n Casting rule for casting `grad_args` to the array dtype.\n See `numpy.can_cast` for a description of the options.\n Keep in mind that we cast the variables to the array *and*\n back from the array dtype to the variable dtype.\n compute_grads: bool, default=True\n If False, return only the logp, not the gradient.\n kwargs\n Extra arguments are passed on to `theano.function`.\n\n Attributes\n ----------\n size: int\n The number of elements in the parameter array.\n profile: theano profiling object or None\n The profiling object of the theano function that computes value and\n gradient. This is None unless `profile=True` was set in the\n kwargs.\n \"\"\"\n\n def __init__(\n self,\n costs,\n grad_vars,\n extra_vars=None,\n *,\n dtype=None,\n casting=\"no\",\n compute_grads=True,\n **kwargs\n ):\n from .distributions import TensorType\n\n if extra_vars is None:\n extra_vars = []\n\n names = [arg.name for arg in grad_vars + extra_vars]\n if any(name is None for name in names):\n raise ValueError(\"Arguments must be named.\")\n if len(set(names)) != len(names):\n raise ValueError(\"Names of the arguments are not unique.\")\n\n self._grad_vars = grad_vars\n self._extra_vars = extra_vars\n self._extra_var_names = {var.name for var in extra_vars}\n\n if dtype is None:\n dtype = theano.config.floatX\n self.dtype = dtype\n\n self._n_costs = len(costs)\n if self._n_costs == 0:\n raise ValueError(\"At least one cost is required.\")\n weights = np.ones(self._n_costs - 1, dtype=self.dtype)\n self._weights = theano.shared(weights, \"__weights\")\n\n cost = costs[0]\n for i, val in enumerate(costs[1:]):\n if cost.ndim > 0 or val.ndim > 0:\n raise ValueError(\"All costs must be scalar.\")\n cost = cost + self._weights[i] * val\n\n self._cost = cost\n self._ordering = ArrayOrdering(grad_vars)\n self.size = self._ordering.size\n self._extra_are_set = False\n for var in self._grad_vars:\n if not np.can_cast(var.dtype, self.dtype, casting):\n raise TypeError(\n f\"Invalid dtype for variable {var.name}. Can not \"\n f\"cast to {self.dtype} with casting rule {casting}.\"\n )\n if not np.issubdtype(var.dtype, np.floating):\n raise TypeError(\n f\"Invalid dtype for variable {var.name}. Must be \"\n f\"floating point but is {var.dtype}.\"\n )\n\n givens = []\n self._extra_vars_shared = {}\n for var in extra_vars:\n shared = theano.shared(var.tag.test_value, var.name + \"_shared__\")\n # test TensorType compatibility\n if hasattr(var.tag.test_value, \"shape\"):\n testtype = TensorType(var.dtype, var.tag.test_value.shape)\n\n if testtype != shared.type:\n shared.type = testtype\n self._extra_vars_shared[var.name] = shared\n givens.append((var, shared))\n\n self._vars_joined, self._cost_joined = self._build_joined(\n self._cost, grad_vars, self._ordering.vmap\n )\n\n if compute_grads:\n grad = tt.grad(self._cost_joined, self._vars_joined)\n grad.name = \"__grad\"\n outputs = [self._cost_joined, grad]\n else:\n outputs = self._cost_joined\n\n inputs = [self._vars_joined]\n\n self._theano_function = theano.function(\n inputs, outputs, givens=givens, **kwargs\n )\n\n def set_weights(self, values):\n if values.shape != (self._n_costs - 1,):\n raise ValueError(\"Invalid shape. Must be (n_costs - 1,).\")\n self._weights.set_value(values)\n\n def set_extra_values(self, extra_vars):\n self._extra_are_set = True\n for var in self._extra_vars:\n self._extra_vars_shared[var.name].set_value(extra_vars[var.name])\n\n def get_extra_values(self):\n if not self._extra_are_set:\n raise ValueError(\"Extra values are not set.\")\n\n return {\n var.name: self._extra_vars_shared[var.name].get_value()\n for var in self._extra_vars\n }\n\n def __call__(self, array, grad_out=None, extra_vars=None):\n if extra_vars is not None:\n self.set_extra_values(extra_vars)\n\n if not self._extra_are_set:\n raise ValueError(\"Extra values are not set.\")\n\n if array.shape != (self.size,):\n raise ValueError(\n \"Invalid shape for array. Must be %s but is %s.\"\n % ((self.size,), array.shape)\n )\n\n if grad_out is None:\n out = np.empty_like(array)\n else:\n out = grad_out\n\n output = self._theano_function(array)\n if grad_out is None:\n return output\n else:\n np.copyto(out, output[1])\n return output[0]\n\n @property\n def profile(self):\n \"\"\"Profiling information of the underlying theano function.\"\"\"\n return self._theano_function.profile\n\n def dict_to_array(self, point):\n \"\"\"Convert a dictionary with values for grad_vars to an array.\"\"\"\n array = np.empty(self.size, dtype=self.dtype)\n for varmap in self._ordering.vmap:\n array[varmap.slc] = point[varmap.var].ravel().astype(self.dtype)\n return array\n\n def array_to_dict(self, array):\n \"\"\"Convert an array to a dictionary containing the grad_vars.\"\"\"\n if array.shape != (self.size,):\n raise ValueError(\n f\"Array should have shape ({self.size},) but has {array.shape}\"\n )\n if array.dtype != self.dtype:\n raise ValueError(\n \"Array has invalid dtype. Should be %s but is %s\"\n % (self._dtype, self.dtype)\n )\n point = {}\n for varmap in self._ordering.vmap:\n data = array[varmap.slc].reshape(varmap.shp)\n point[varmap.var] = data.astype(varmap.dtyp)\n\n return point\n\n def array_to_full_dict(self, array):\n \"\"\"Convert an array to a dictionary with grad_vars and extra_vars.\"\"\"\n point = self.array_to_dict(array)\n for name, var in self._extra_vars_shared.items():\n point[name] = var.get_value()\n return point\n\n def _build_joined(self, cost, args, vmap):\n args_joined = tt.vector(\"__args_joined\")\n args_joined.tag.test_value = np.zeros(self.size, dtype=self.dtype)\n\n joined_slices = {}\n for vmap in vmap:\n sliced = args_joined[vmap.slc].reshape(vmap.shp)\n sliced.name = vmap.var\n joined_slices[vmap.var] = sliced\n\n replace = {var: joined_slices[var.name] for var in args}\n return args_joined, theano.clone(cost, replace=replace)\n\n\nclass Model(Factor, WithMemoization, metaclass=ContextMeta):\n \"\"\"Encapsulates the variables and likelihood factors of a model.\n\n Model class can be used for creating class based models. To create\n a class based model you should inherit from :class:`~.Model` and\n override :meth:`~.__init__` with arbitrary definitions (do not\n forget to call base class :meth:`__init__` first).\n\n Parameters\n ----------\n name: str\n name that will be used as prefix for names of all random\n variables defined within model\n model: Model\n instance of Model that is supposed to be a parent for the new\n instance. If ``None``, context will be used. All variables\n defined within instance will be passed to the parent instance.\n So that 'nested' model contributes to the variables and\n likelihood factors of parent model.\n theano_config: dict\n A dictionary of theano config values that should be set\n temporarily in the model context. See the documentation\n of theano for a complete list. Set config key\n ``compute_test_value`` to `raise` if it is None.\n\n Examples\n --------\n\n How to define a custom model\n\n .. code-block:: python\n\n class CustomModel(Model):\n # 1) override init\n def __init__(self, mean=0, sigma=1, name='', model=None):\n # 2) call super's init first, passing model and name\n # to it name will be prefix for all variables here if\n # no name specified for model there will be no prefix\n super().__init__(name, model)\n # now you are in the context of instance,\n # `modelcontext` will return self you can define\n # variables in several ways note, that all variables\n # will get model's name prefix\n\n # 3) you can create variables with Var method\n self.Var('v1', Normal.dist(mu=mean, sigma=sd))\n # this will create variable named like '{prefix_}v1'\n # and assign attribute 'v1' to instance created\n # variable can be accessed with self.v1 or self['v1']\n\n # 4) this syntax will also work as we are in the\n # context of instance itself, names are given as usual\n Normal('v2', mu=mean, sigma=sd)\n\n # something more complex is allowed, too\n half_cauchy = HalfCauchy('sd', beta=10, testval=1.)\n Normal('v3', mu=mean, sigma=half_cauchy)\n\n # Deterministic variables can be used in usual way\n Deterministic('v3_sq', self.v3 ** 2)\n\n # Potentials too\n Potential('p1', tt.constant(1))\n\n # After defining a class CustomModel you can use it in several\n # ways\n\n # I:\n # state the model within a context\n with Model() as model:\n CustomModel()\n # arbitrary actions\n\n # II:\n # use new class as entering point in context\n with CustomModel() as model:\n Normal('new_normal_var', mu=1, sigma=0)\n\n # III:\n # just get model instance with all that was defined in it\n model = CustomModel()\n\n # IV:\n # use many custom models within one context\n with Model() as model:\n CustomModel(mean=1, name='first')\n CustomModel(mean=2, name='second')\n \"\"\"\n\n if TYPE_CHECKING:\n\n def __enter__(self: \"Model\") -> \"Model\":\n ...\n\n def __exit__(self: \"Model\", *exc: Any) -> bool:\n ...\n\n def __new__(cls, *args, **kwargs):\n # resolves the parent instance\n instance = super().__new__(cls)\n if kwargs.get(\"model\") is not None:\n instance._parent = kwargs.get(\"model\")\n else:\n instance._parent = cls.get_context(error_if_none=False)\n theano_config = kwargs.get(\"theano_config\", None)\n if theano_config is None or \"compute_test_value\" not in theano_config:\n theano_config = {\"compute_test_value\": \"raise\"}\n instance._theano_config = theano_config\n return instance\n\n def __init__(self, name=\"\", model=None, theano_config=None, coords=None):\n self.name = name\n self.coords = {}\n self.RV_dims = {}\n self.add_coords(coords)\n\n if self.parent is not None:\n self.named_vars = treedict(parent=self.parent.named_vars)\n self.free_RVs = treelist(parent=self.parent.free_RVs)\n self.observed_RVs = treelist(parent=self.parent.observed_RVs)\n self.deterministics = treelist(parent=self.parent.deterministics)\n self.potentials = treelist(parent=self.parent.potentials)\n self.missing_values = treelist(parent=self.parent.missing_values)\n else:\n self.named_vars = treedict()\n self.free_RVs = treelist()\n self.observed_RVs = treelist()\n self.deterministics = treelist()\n self.potentials = treelist()\n self.missing_values = treelist()\n\n @property\n def model(self):\n return self\n\n @property\n def parent(self):\n return self._parent\n\n @property\n def root(self):\n model = self\n while not model.isroot:\n model = model.parent\n return model\n\n @property\n def isroot(self):\n return self.parent is None\n\n @property # type: ignore\n @memoize(bound=True)\n def bijection(self):\n vars = inputvars(self.vars)\n\n bij = DictToArrayBijection(ArrayOrdering(vars), self.test_point)\n\n return bij\n\n @property\n def dict_to_array(self):\n return self.bijection.map\n\n @property\n def ndim(self):\n return sum(var.dsize for var in self.free_RVs)\n\n @property\n def logp_array(self):\n return self.bijection.mapf(self.fastlogp)\n\n @property\n def dlogp_array(self):\n vars = inputvars(self.cont_vars)\n return self.bijection.mapf(self.fastdlogp(vars))\n\n def logp_dlogp_function(self, grad_vars=None, tempered=False, **kwargs):\n \"\"\"Compile a theano function that computes logp and gradient.\n\n Parameters\n ----------\n grad_vars: list of random variables, optional\n Compute the gradient with respect to those variables. If None,\n use all free random variables of this model.\n tempered: bool\n Compute the tempered logp `free_logp + alpha * observed_logp`.\n `alpha` can be changed using `ValueGradFunction.set_weights([alpha])`.\n \"\"\"\n if grad_vars is None:\n grad_vars = list(typefilter(self.free_RVs, continuous_types))\n else:\n for var in grad_vars:\n if var.dtype not in continuous_types:\n raise ValueError(\n \"Can only compute the gradient of \" \"continuous types: %s\" % var\n )\n\n if tempered:\n with self:\n free_RVs_logp = tt.sum([\n tt.sum(var.logpt) for var in self.free_RVs + self.potentials\n ])\n observed_RVs_logp = tt.sum([\n tt.sum(var.logpt) for var in self.observed_RVs\n ])\n\n costs = [free_RVs_logp, observed_RVs_logp]\n else:\n costs = [self.logpt]\n varnames = [var.name for var in grad_vars]\n extra_vars = [var for var in self.free_RVs if var.name not in varnames]\n return ValueGradFunction(costs, grad_vars, extra_vars, **kwargs)\n\n @property\n def logpt(self):\n \"\"\"Theano scalar of log-probability of the model\"\"\"\n with self:\n factors = [var.logpt for var in self.basic_RVs] + self.potentials\n logp = tt.sum([tt.sum(factor) for factor in factors])\n if self.name:\n logp.name = \"__logp_%s\" % self.name\n else:\n logp.name = \"__logp\"\n return logp\n\n @property\n def logp_nojact(self):\n \"\"\"Theano scalar of log-probability of the model but without the jacobian\n if transformed Random Variable is presented.\n Note that If there is no transformed variable in the model, logp_nojact\n will be the same as logpt as there is no need for Jacobian correction.\n \"\"\"\n with self:\n factors = [var.logp_nojact for var in self.basic_RVs] + self.potentials\n logp = tt.sum([tt.sum(factor) for factor in factors])\n if self.name:\n logp.name = \"__logp_nojac_%s\" % self.name\n else:\n logp.name = \"__logp_nojac\"\n return logp\n\n @property\n def varlogpt(self):\n \"\"\"Theano scalar of log-probability of the unobserved random variables\n (excluding deterministic).\"\"\"\n with self:\n factors = [var.logpt for var in self.free_RVs]\n return tt.sum(factors)\n\n @property\n def datalogpt(self):\n with self:\n factors = [var.logpt for var in self.observed_RVs]\n factors += [tt.sum(factor) for factor in self.potentials]\n return tt.sum(factors)\n\n @property\n def vars(self):\n \"\"\"List of unobserved random variables used as inputs to the model\n (which excludes deterministics).\n \"\"\"\n return self.free_RVs\n\n @property\n def basic_RVs(self):\n \"\"\"List of random variables the model is defined in terms of\n (which excludes deterministics).\n \"\"\"\n return self.free_RVs + self.observed_RVs\n\n @property\n def unobserved_RVs(self):\n \"\"\"List of all random variable, including deterministic ones.\"\"\"\n return self.vars + self.deterministics\n\n @property\n def test_point(self):\n \"\"\"Test point used to check that the model doesn't generate errors\"\"\"\n return Point(((var, var.tag.test_value) for var in self.vars), model=self)\n\n @property\n def disc_vars(self):\n \"\"\"All the discrete variables in the model\"\"\"\n return list(typefilter(self.vars, discrete_types))\n\n @property\n def cont_vars(self):\n \"\"\"All the continuous variables in the model\"\"\"\n return list(typefilter(self.vars, continuous_types))\n\n def shape_from_dims(self, dims):\n shape = []\n if len(set(dims)) != len(dims):\n raise ValueError(\"Can not contain the same dimension name twice.\")\n for dim in dims:\n if dim not in self.coords:\n raise ValueError(\n \"Unknown dimension name '%s'. All dimension \"\n \"names must be specified in the `coords` \"\n \"argument of the model or through a pm.Data \"\n \"variable.\" % dim\n )\n shape.extend(np.shape(self.coords[dim]))\n return tuple(shape)\n\n def add_coords(self, coords):\n if coords is None:\n return\n\n for name in coords:\n if name in {\"draw\", \"chain\"}:\n raise ValueError(\n \"Dimensions can not be named `draw` or `chain`, as they are reserved for the sampler's outputs.\"\n )\n if name in self.coords:\n if not coords[name].equals(self.coords[name]):\n raise ValueError(\n \"Duplicate and incompatiple coordinate: %s.\" % name\n )\n else:\n self.coords[name] = coords[name]\n\n def Var(self, name, dist, data=None, total_size=None, dims=None):\n \"\"\"Create and add (un)observed random variable to the model with an\n appropriate prior distribution.\n\n Parameters\n ----------\n name: str\n dist: distribution for the random variable\n data: array_like (optional)\n If data is provided, the variable is observed. If None,\n the variable is unobserved.\n total_size: scalar\n upscales logp of variable with ``coef = total_size/var.shape[0]``\n dims : tuple\n Dimension names for the variable.\n\n Returns\n -------\n FreeRV or ObservedRV\n \"\"\"\n name = self.name_for(name)\n\n if data is None:\n if getattr(dist, \"transform\", None) is None:\n with self:\n var = FreeRV(\n name=name, distribution=dist, total_size=total_size, model=self\n )\n self.free_RVs.append(var)\n else:\n with self:\n var = TransformedRV(\n name=name,\n distribution=dist,\n transform=dist.transform,\n total_size=total_size,\n model=self,\n )\n pm._log.debug(\n \"Applied {transform}-transform to {name}\"\n \" and added transformed {orig_name} to model.\".format(\n transform=dist.transform.name,\n name=name,\n orig_name=get_transformed_name(name, dist.transform),\n )\n )\n self.deterministics.append(var)\n self.add_random_variable(var, dims)\n return var\n elif isinstance(data, dict):\n with self:\n var = MultiObservedRV(\n name=name,\n data=data,\n distribution=dist,\n total_size=total_size,\n model=self,\n )\n self.observed_RVs.append(var)\n if var.missing_values:\n self.free_RVs += var.missing_values\n self.missing_values += var.missing_values\n for v in var.missing_values:\n self.named_vars[v.name] = v\n else:\n with self:\n var = ObservedRV(\n name=name,\n data=data,\n distribution=dist,\n total_size=total_size,\n model=self,\n )\n self.observed_RVs.append(var)\n if var.missing_values:\n self.free_RVs.append(var.missing_values)\n self.missing_values.append(var.missing_values)\n self.named_vars[var.missing_values.name] = var.missing_values\n\n self.add_random_variable(var, dims)\n return var\n\n def add_random_variable(self, var, dims=None):\n \"\"\"Add a random variable to the named variables of the model.\"\"\"\n if self.named_vars.tree_contains(var.name):\n raise ValueError(f\"Variable name {var.name} already exists.\")\n\n if dims is not None:\n if isinstance(dims, str):\n dims = (dims,)\n assert all(dim in self.coords for dim in dims)\n self.RV_dims[var.name] = dims\n\n self.named_vars[var.name] = var\n if not hasattr(self, self.name_of(var.name)):\n setattr(self, self.name_of(var.name), var)\n\n @property\n def prefix(self):\n return \"%s_\" % self.name if self.name else \"\"\n\n def name_for(self, name):\n \"\"\"Checks if name has prefix and adds if needed\n \"\"\"\n if self.prefix:\n if not name.startswith(self.prefix):\n return f\"{self.prefix}{name}\"\n else:\n return name\n else:\n return name\n\n def name_of(self, name):\n \"\"\"Checks if name has prefix and deletes if needed\n \"\"\"\n if not self.prefix or not name:\n return name\n elif name.startswith(self.prefix):\n return name[len(self.prefix) :]\n else:\n return name\n\n def __getitem__(self, key):\n try:\n return self.named_vars[key]\n except KeyError as e:\n try:\n return self.named_vars[self.name_for(key)]\n except KeyError:\n raise e\n\n def makefn(self, outs, mode=None, *args, **kwargs):\n \"\"\"Compiles a Theano function which returns ``outs`` and takes the variable\n ancestors of ``outs`` as inputs.\n\n Parameters\n ----------\n outs: Theano variable or iterable of Theano variables\n mode: Theano compilation mode\n\n Returns\n -------\n Compiled Theano function\n \"\"\"\n with self:\n return theano.function(\n self.vars,\n outs,\n allow_input_downcast=True,\n on_unused_input=\"ignore\",\n accept_inplace=True,\n mode=mode,\n *args,\n **kwargs\n )\n\n def fn(self, outs, mode=None, *args, **kwargs):\n \"\"\"Compiles a Theano function which returns the values of ``outs``\n and takes values of model vars as arguments.\n\n Parameters\n ----------\n outs: Theano variable or iterable of Theano variables\n mode: Theano compilation mode\n\n Returns\n -------\n Compiled Theano function\n \"\"\"\n return LoosePointFunc(self.makefn(outs, mode, *args, **kwargs), self)\n\n def fastfn(self, outs, mode=None, *args, **kwargs):\n \"\"\"Compiles a Theano function which returns ``outs`` and takes values\n of model vars as a dict as an argument.\n\n Parameters\n ----------\n outs: Theano variable or iterable of Theano variables\n mode: Theano compilation mode\n\n Returns\n -------\n Compiled Theano function as point function.\n \"\"\"\n f = self.makefn(outs, mode, *args, **kwargs)\n return FastPointFunc(f)\n\n def profile(self, outs, n=1000, point=None, profile=True, *args, **kwargs):\n \"\"\"Compiles and profiles a Theano function which returns ``outs`` and\n takes values of model vars as a dict as an argument.\n\n Parameters\n ----------\n outs: Theano variable or iterable of Theano variables\n n: int, default 1000\n Number of iterations to run\n point: point\n Point to pass to the function\n profile: True or ProfileStats\n args, kwargs\n Compilation args\n\n Returns\n -------\n ProfileStats\n Use .summary() to print stats.\n \"\"\"\n f = self.makefn(outs, profile=profile, *args, **kwargs)\n if point is None:\n point = self.test_point\n\n for _ in range(n):\n f(**point)\n\n return f.profile\n\n def flatten(self, vars=None, order=None, inputvar=None):\n \"\"\"Flattens model's input and returns:\n\n FlatView with\n * input vector variable\n * replacements ``input_var -> vars``\n * view `{variable: VarMap}`\n\n Parameters\n ----------\n vars: list of variables or None\n if None, then all model.free_RVs are used for flattening input\n order: ArrayOrdering\n Optional, use predefined ordering\n inputvar: tt.vector\n Optional, use predefined inputvar\n\n Returns\n -------\n flat_view\n \"\"\"\n if vars is None:\n vars = self.free_RVs\n if order is None:\n order = ArrayOrdering(vars)\n if inputvar is None:\n inputvar = tt.vector(\"flat_view\", dtype=theano.config.floatX)\n if theano.config.compute_test_value != \"off\":\n if vars:\n inputvar.tag.test_value = flatten_list(vars).tag.test_value\n else:\n inputvar.tag.test_value = np.asarray([], inputvar.dtype)\n replacements = {\n self.named_vars[name]: inputvar[slc].reshape(shape).astype(dtype)\n for name, slc, shape, dtype in order.vmap\n }\n view = {vm.var: vm for vm in order.vmap}\n flat_view = FlatView(inputvar, replacements, view)\n return flat_view\n\n def check_test_point(self, test_point=None, round_vals=2):\n \"\"\"Checks log probability of test_point for all random variables in the model.\n\n Parameters\n ----------\n test_point: Point\n Point to be evaluated.\n if None, then all model.test_point is used\n round_vals: int\n Number of decimals to round log-probabilities\n\n Returns\n -------\n Pandas Series\n \"\"\"\n if test_point is None:\n test_point = self.test_point\n\n return Series(\n {\n RV.name: np.round(RV.logp(self.test_point), round_vals)\n for RV in self.basic_RVs\n },\n name=\"Log-probability of test_point\",\n )\n\n def _str_repr(self, formatting=\"plain\", **kwargs):\n all_rv = itertools.chain(self.unobserved_RVs, self.observed_RVs)\n\n if formatting == \"latex\":\n rv_reprs = [rv.__latex__() for rv in all_rv]\n rv_reprs = [rv_repr.replace(r\"\\sim\", r\"&\\sim &\").strip(\"$\")\n for rv_repr in rv_reprs if rv_repr is not None]\n return r\"\"\"$$\n \\begin{{array}}{{rcl}}\n {}\n \\end{{array}}\n $$\"\"\".format(\n \"\\\\\\\\\".join(rv_reprs))\n else:\n rv_reprs = [rv.__str__() for rv in all_rv]\n rv_reprs = [rv_repr for rv_repr in rv_reprs if not 'TransformedDistribution()' in rv_repr]\n # align vars on their ~\n names = [s[:s.index('~')-1] for s in rv_reprs]\n distrs = [s[s.index('~')+2:] for s in rv_reprs]\n maxlen = str(max(len(x) for x in names))\n rv_reprs = [('{name:>' + maxlen + '} ~ {distr}').format(name=n, distr=d)\n for n, d in zip(names, distrs)]\n return \"\\n\".join(rv_reprs)\n\n def __str__(self, **kwargs):\n return self._str_repr(formatting=\"plain\", **kwargs)\n\n def _repr_latex_(self, **kwargs):\n return self._str_repr(formatting=\"latex\", **kwargs)\n\n __latex__ = _repr_latex_\n\n\n# this is really disgusting, but it breaks a self-loop: I can't pass Model\n# itself as context class init arg.\nModel._context_class = Model\n\n\ndef set_data(new_data, model=None):\n \"\"\"Sets the value of one or more data container variables.\n\n Parameters\n ----------\n new_data: dict\n New values for the data containers. The keys of the dictionary are\n the variables' names in the model and the values are the objects\n with which to update.\n model: Model (optional if in `with` context)\n\n Examples\n --------\n\n .. code:: ipython\n\n >>> import pymc3 as pm\n >>> with pm.Model() as model:\n ... x = pm.Data('x', [1., 2., 3.])\n ... y = pm.Data('y', [1., 2., 3.])\n ... beta = pm.Normal('beta', 0, 1)\n ... obs = pm.Normal('obs', x * beta, 1, observed=y)\n ... trace = pm.sample(1000, tune=1000)\n\n Set the value of `x` to predict on new data.\n\n .. code:: ipython\n\n >>> with model:\n ... pm.set_data({'x': [5., 6., 9.]})\n ... y_test = pm.sample_posterior_predictive(trace)\n >>> y_test['obs'].mean(axis=0)\n array([4.6088569 , 5.54128318, 8.32953844])\n \"\"\"\n model = modelcontext(model)\n\n for variable_name, new_value in new_data.items():\n if isinstance(model[variable_name], SharedVariable):\n if isinstance(new_value, list):\n new_value = np.array(new_value)\n model[variable_name].set_value(pandas_to_array(new_value))\n else:\n message = (\n \"The variable `{}` must be defined as `pymc3.\"\n \"Data` inside the model to allow updating. The \"\n \"current type is: \"\n \"{}.\".format(variable_name, type(model[variable_name]))\n )\n raise TypeError(message)\n\n\ndef fn(outs, mode=None, model=None, *args, **kwargs):\n \"\"\"Compiles a Theano function which returns the values of ``outs`` and\n takes values of model vars as arguments.\n\n Parameters\n ----------\n outs: Theano variable or iterable of Theano variables\n mode: Theano compilation mode\n\n Returns\n -------\n Compiled Theano function\n \"\"\"\n model = modelcontext(model)\n return model.fn(outs, mode, *args, **kwargs)\n\n\ndef fastfn(outs, mode=None, model=None):\n \"\"\"Compiles a Theano function which returns ``outs`` and takes values of model\n vars as a dict as an argument.\n\n Parameters\n ----------\n outs: Theano variable or iterable of Theano variables\n mode: Theano compilation mode\n\n Returns\n -------\n Compiled Theano function as point function.\n \"\"\"\n model = modelcontext(model)\n return model.fastfn(outs, mode)\n\n\ndef Point(*args, **kwargs):\n \"\"\"Build a point. Uses same args as dict() does.\n Filters out variables not in the model. All keys are strings.\n\n Parameters\n ----------\n args, kwargs\n arguments to build a dict\n \"\"\"\n model = modelcontext(kwargs.pop(\"model\", None))\n args = list(args)\n try:\n d = dict(*args, **kwargs)\n except Exception as e:\n raise TypeError(f\"can't turn {args} and {kwargs} into a dict. {e}\")\n return {\n get_var_name(k): np.array(v) for k, v in d.items()\n if get_var_name(k) in map(get_var_name, model.vars)\n }\n\n\nclass FastPointFunc:\n \"\"\"Wraps so a function so it takes a dict of arguments instead of arguments.\"\"\"\n\n def __init__(self, f):\n self.f = f\n\n def __call__(self, state):\n return self.f(**state)\n\n\nclass LoosePointFunc:\n \"\"\"Wraps so a function so it takes a dict of arguments instead of arguments\n but can still take arguments.\"\"\"\n\n def __init__(self, f, model):\n self.f = f\n self.model = model\n\n def __call__(self, *args, **kwargs):\n point = Point(model=self.model, *args, **kwargs)\n return self.f(**point)\n\n\ncompilef = fastfn\n\n\ndef _get_scaling(total_size, shape, ndim):\n \"\"\"\n Gets scaling constant for logp\n\n Parameters\n ----------\n total_size: int or list[int]\n shape: shape\n shape to scale\n ndim: int\n ndim hint\n\n Returns\n -------\n scalar\n \"\"\"\n if total_size is None:\n coef = floatX(1)\n elif isinstance(total_size, int):\n if ndim >= 1:\n denom = shape[0]\n else:\n denom = 1\n coef = floatX(total_size) / floatX(denom)\n elif isinstance(total_size, (list, tuple)):\n if not all(\n isinstance(i, int)\n for i in total_size\n if (i is not Ellipsis and i is not None)\n ):\n raise TypeError(\n \"Unrecognized `total_size` type, expected \"\n \"int or list of ints, got %r\" % total_size\n )\n if Ellipsis in total_size:\n sep = total_size.index(Ellipsis)\n begin = total_size[:sep]\n end = total_size[sep + 1 :]\n if Ellipsis in end:\n raise ValueError(\n \"Double Ellipsis in `total_size` is restricted, got %r\" % total_size\n )\n else:\n begin = total_size\n end = []\n if (len(begin) + len(end)) > ndim:\n raise ValueError(\n \"Length of `total_size` is too big, \"\n \"number of scalings is bigger that ndim, got %r\" % total_size\n )\n elif (len(begin) + len(end)) == 0:\n return floatX(1)\n if len(end) > 0:\n shp_end = shape[-len(end) :]\n else:\n shp_end = np.asarray([])\n shp_begin = shape[: len(begin)]\n begin_coef = [\n floatX(t) / shp_begin[i] for i, t in enumerate(begin) if t is not None\n ]\n end_coef = [floatX(t) / shp_end[i] for i, t in enumerate(end) if t is not None]\n coefs = begin_coef + end_coef\n coef = tt.prod(coefs)\n else:\n raise TypeError(\n \"Unrecognized `total_size` type, expected \"\n \"int or list of ints, got %r\" % total_size\n )\n return tt.as_tensor(floatX(coef))\n\n\nclass FreeRV(Factor, PyMC3Variable):\n \"\"\"Unobserved random variable that a model is specified in terms of.\"\"\"\n\n dshape = None # type: Tuple[int, ...]\n size = None # type: int\n distribution = None # type: Optional[Distribution]\n model = None # type: Optional[Model]\n\n def __init__(\n self,\n type=None,\n owner=None,\n index=None,\n name=None,\n distribution=None,\n total_size=None,\n model=None,\n ):\n \"\"\"\n Parameters\n ----------\n type: theano type (optional)\n owner: theano owner (optional)\n name: str\n distribution: Distribution\n model: Model\n total_size: scalar Tensor (optional)\n needed for upscaling logp\n \"\"\"\n if type is None:\n type = distribution.type\n super().__init__(type, owner, index, name)\n\n if distribution is not None:\n self.dshape = tuple(distribution.shape)\n self.dsize = int(np.prod(distribution.shape))\n self.distribution = distribution\n self.tag.test_value = (\n np.ones(distribution.shape, distribution.dtype) * distribution.default()\n )\n self.logp_elemwiset = distribution.logp(self)\n # The logp might need scaling in minibatches.\n # This is done in `Factor`.\n self.logp_sum_unscaledt = distribution.logp_sum(self)\n self.logp_nojac_unscaledt = distribution.logp_nojac(self)\n self.total_size = total_size\n self.model = model\n self.scaling = _get_scaling(total_size, self.shape, self.ndim)\n\n incorporate_methods(\n source=distribution,\n destination=self,\n methods=[\"random\"],\n wrapper=InstanceMethod,\n )\n\n @property\n def init_value(self):\n \"\"\"Convenience attribute to return tag.test_value\"\"\"\n return self.tag.test_value\n\n\ndef pandas_to_array(data):\n if hasattr(data, \"values\"): # pandas\n if data.isnull().any().any(): # missing values\n ret = np.ma.MaskedArray(data.values, data.isnull().values)\n else:\n ret = data.values\n elif hasattr(data, \"mask\"):\n if data.mask.any():\n ret = data\n else: # empty mask\n ret = data.filled()\n elif isinstance(data, theano.gof.graph.Variable):\n ret = data\n elif sps.issparse(data):\n ret = data\n elif isgenerator(data):\n ret = generator(data)\n else:\n ret = np.asarray(data)\n\n # type handling to enable index variables when data is int:\n if hasattr(data, \"dtype\"):\n if \"int\" in str(data.dtype):\n return pm.intX(ret)\n # otherwise, assume float:\n else:\n return pm.floatX(ret)\n # needed for uses of this function other than with pm.Data:\n else:\n return pm.floatX(ret)\n\n\ndef as_tensor(data, name, model, distribution):\n dtype = distribution.dtype\n data = pandas_to_array(data).astype(dtype)\n\n if hasattr(data, \"mask\"):\n impute_message = (\n \"Data in {name} contains missing values and\"\n \" will be automatically imputed from the\"\n \" sampling distribution.\".format(name=name)\n )\n warnings.warn(impute_message, ImputationWarning)\n from .distributions import NoDistribution\n\n testval = np.broadcast_to(distribution.default(), data.shape)[data.mask]\n fakedist = NoDistribution.dist(\n shape=data.mask.sum(),\n dtype=dtype,\n testval=testval,\n parent_dist=distribution,\n )\n missing_values = FreeRV(\n name=name + \"_missing\", distribution=fakedist, model=model\n )\n constant = tt.as_tensor_variable(data.filled())\n\n dataTensor = tt.set_subtensor(constant[data.mask.nonzero()], missing_values)\n dataTensor.missing_values = missing_values\n return dataTensor\n elif sps.issparse(data):\n data = sparse.basic.as_sparse(data, name=name)\n data.missing_values = None\n return data\n else:\n data = tt.as_tensor_variable(data, name=name)\n data.missing_values = None\n return data\n\n\nclass ObservedRV(Factor, PyMC3Variable):\n \"\"\"Observed random variable that a model is specified in terms of.\n Potentially partially observed.\n \"\"\"\n\n def __init__(\n self,\n type=None,\n owner=None,\n index=None,\n name=None,\n data=None,\n distribution=None,\n total_size=None,\n model=None,\n ):\n \"\"\"\n Parameters\n ----------\n type: theano type (optional)\n owner: theano owner (optional)\n name: str\n distribution: Distribution\n model: Model\n total_size: scalar Tensor (optional)\n needed for upscaling logp\n \"\"\"\n from .distributions import TensorType\n\n if hasattr(data, \"type\") and isinstance(data.type, tt.TensorType):\n type = data.type\n\n if type is None:\n data = pandas_to_array(data)\n type = TensorType(distribution.dtype, data.shape)\n\n self.observations = data\n\n super().__init__(type, owner, index, name)\n\n if distribution is not None:\n data = as_tensor(data, name, model, distribution)\n\n self.missing_values = data.missing_values\n self.logp_elemwiset = distribution.logp(data)\n # The logp might need scaling in minibatches.\n # This is done in `Factor`.\n self.logp_sum_unscaledt = distribution.logp_sum(data)\n self.logp_nojac_unscaledt = distribution.logp_nojac(data)\n self.total_size = total_size\n self.model = model\n self.distribution = distribution\n\n # make this RV a view on the combined missing/nonmissing array\n theano.gof.Apply(theano.compile.view_op, inputs=[data], outputs=[self])\n self.tag.test_value = theano.compile.view_op(data).tag.test_value\n self.scaling = _get_scaling(total_size, data.shape, data.ndim)\n\n @property\n def init_value(self):\n \"\"\"Convenience attribute to return tag.test_value\"\"\"\n return self.tag.test_value\n\n\nclass MultiObservedRV(Factor):\n \"\"\"Observed random variable that a model is specified in terms of.\n Potentially partially observed.\n \"\"\"\n\n def __init__(self, name, data, distribution, total_size=None, model=None):\n \"\"\"\n Parameters\n ----------\n type: theano type (optional)\n owner: theano owner (optional)\n name: str\n distribution: Distribution\n model: Model\n total_size: scalar Tensor (optional)\n needed for upscaling logp\n \"\"\"\n self.name = name\n self.data = {\n name: as_tensor(data, name, model, distribution)\n for name, data in data.items()\n }\n\n self.missing_values = [\n datum.missing_values\n for datum in self.data.values()\n if datum.missing_values is not None\n ]\n self.logp_elemwiset = distribution.logp(**self.data)\n # The logp might need scaling in minibatches.\n # This is done in `Factor`.\n self.logp_sum_unscaledt = distribution.logp_sum(**self.data)\n self.logp_nojac_unscaledt = distribution.logp_nojac(**self.data)\n self.total_size = total_size\n self.model = model\n self.distribution = distribution\n self.scaling = _get_scaling(\n total_size, self.logp_elemwiset.shape, self.logp_elemwiset.ndim\n )\n\n # Make hashable by id for draw_values\n def __hash__(self):\n return id(self)\n\n def __eq__(self, other):\n \"Use object identity for MultiObservedRV equality.\"\n # This is likely a Bad Thing, but changing it would break a lot of code.\n return self is other\n\n def __ne__(self, other):\n return not self == other\n\n\ndef _walk_up_rv(rv, formatting='plain'):\n \"\"\"Walk up theano graph to get inputs for deterministic RV.\"\"\"\n all_rvs = []\n parents = list(itertools.chain(*[j.inputs for j in rv.get_parents()]))\n if parents:\n for parent in parents:\n all_rvs.extend(_walk_up_rv(parent, formatting=formatting))\n else:\n name = rv.name if rv.name else \"Constant\"\n fmt = r\"\\text{{{name}}}\" if formatting == \"latex\" else \"{name}\"\n all_rvs.append(fmt.format(name=name))\n return all_rvs\n\n\ndef _repr_deterministic_rv(rv, formatting='plain'):\n \"\"\"Make latex string for a Deterministic variable\"\"\"\n if formatting == 'latex':\n return r\"$\\text{{{name}}} \\sim \\text{{Deterministic}}({args})$\".format(\n name=rv.name, args=r\",~\".join(_walk_up_rv(rv, formatting=formatting)))\n else:\n return \"{name} ~ Deterministic({args})\".format(\n name=rv.name, args=\", \".join(_walk_up_rv(rv, formatting=formatting)))\n\n\ndef Deterministic(name, var, model=None, dims=None):\n \"\"\"Create a named deterministic variable\n\n Parameters\n ----------\n name: str\n var: theano variables\n\n Returns\n -------\n var: var, with name attribute\n \"\"\"\n model = modelcontext(model)\n var = var.copy(model.name_for(name))\n model.deterministics.append(var)\n model.add_random_variable(var, dims)\n var._repr_latex_ = functools.partial(_repr_deterministic_rv, var, formatting='latex')\n var.__latex__ = var._repr_latex_\n\n # simply assigning var.__str__ is not enough, since str() will default to the class-\n # defined __str__ anyway; see https://stackoverflow.com/a/5918210/1692028\n old_type = type(var)\n new_type = type(old_type.__name__ + '_pymc3_Deterministic', (old_type,),\n {'__str__': functools.partial(_repr_deterministic_rv, var, formatting='plain')})\n var.__class__ = new_type\n\n return var\n\n\ndef Potential(name, var, model=None):\n \"\"\"Add an arbitrary factor potential to the model likelihood\n\n Parameters\n ----------\n name: str\n var: theano variables\n\n Returns\n -------\n var: var, with name attribute\n \"\"\"\n model = modelcontext(model)\n var.name = model.name_for(name)\n model.potentials.append(var)\n model.add_random_variable(var)\n return var\n\n\nclass TransformedRV(PyMC3Variable):\n \"\"\"\n Parameters\n ----------\n\n type: theano type (optional)\n owner: theano owner (optional)\n name: str\n distribution: Distribution\n model: Model\n total_size: scalar Tensor (optional)\n needed for upscaling logp\n \"\"\"\n\n def __init__(\n self,\n type=None,\n owner=None,\n index=None,\n name=None,\n distribution=None,\n model=None,\n transform=None,\n total_size=None,\n ):\n if type is None:\n type = distribution.type\n super().__init__(type, owner, index, name)\n\n self.transformation = transform\n\n if distribution is not None:\n self.model = model\n self.distribution = distribution\n self.dshape = tuple(distribution.shape)\n self.dsize = int(np.prod(distribution.shape))\n\n transformed_name = get_transformed_name(name, transform)\n\n self.transformed = model.Var(\n transformed_name, transform.apply(distribution), total_size=total_size\n )\n\n normalRV = transform.backward(self.transformed)\n\n theano.Apply(theano.compile.view_op, inputs=[normalRV], outputs=[self])\n self.tag.test_value = normalRV.tag.test_value\n self.scaling = _get_scaling(total_size, self.shape, self.ndim)\n incorporate_methods(\n source=distribution,\n destination=self,\n methods=[\"random\"],\n wrapper=InstanceMethod,\n )\n\n @property\n def init_value(self):\n \"\"\"Convenience attribute to return tag.test_value\"\"\"\n return self.tag.test_value\n\n\ndef as_iterargs(data):\n if isinstance(data, tuple):\n return data\n else:\n return [data]\n\n\ndef all_continuous(vars):\n \"\"\"Check that vars not include discrete variables, excepting\n ObservedRVs. \"\"\"\n vars_ = [var for var in vars if not isinstance(var, pm.model.ObservedRV)]\n if any([var.dtype in pm.discrete_types for var in vars_]):\n return False\n else:\n return True\n",
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"NumPy array trace backend\n\nStore sampling values in memory as a NumPy array.\n\"\"\"\nimport glob\nimport json\nimport os\nimport shutil\nfrom typing import Optional, Dict, Any, List\nimport warnings\n\nimport numpy as np\nfrom pymc3.backends import base\nfrom pymc3.backends.base import MultiTrace\nfrom pymc3.model import Model, modelcontext\nfrom pymc3.exceptions import TraceDirectoryError\n\n\ndef save_trace(trace: MultiTrace, directory: Optional[str]=None, overwrite=False) -> str:\n \"\"\"Save multitrace to file.\n\n TODO: Also save warnings.\n\n This is a custom data format for PyMC3 traces. Each chain goes inside\n a directory, and each directory contains a metadata json file, and a\n numpy compressed file. See https://docs.scipy.org/doc/numpy/neps/npy-format.html\n for more information about this format.\n\n Parameters\n ----------\n trace: pm.MultiTrace\n trace to save to disk\n directory: str (optional)\n path to a directory to save the trace\n overwrite: bool (default False)\n whether to overwrite an existing directory.\n\n Returns\n -------\n str, path to the directory where the trace was saved\n \"\"\"\n warnings.warn(\n 'The `save_trace` function will soon be removed.'\n 'Instead, use `arviz.to_netcdf` to save traces.',\n DeprecationWarning,\n )\n\n if directory is None:\n directory = '.pymc_{}.trace'\n idx = 1\n while os.path.exists(directory.format(idx)):\n idx += 1\n directory = directory.format(idx)\n\n if os.path.isdir(directory):\n if overwrite:\n shutil.rmtree(directory)\n else:\n raise OSError('Cautiously refusing to overwrite the already existing {}! Please supply '\n 'a different directory, or set `overwrite=True`'.format(directory))\n os.makedirs(directory)\n\n for chain, ndarray in trace._straces.items():\n SerializeNDArray(os.path.join(directory, str(chain))).save(ndarray)\n return directory\n\n\ndef load_trace(directory: str, model=None) -> MultiTrace:\n \"\"\"Loads a multitrace that has been written to file.\n\n A the model used for the trace must be passed in, or the command\n must be run in a model context.\n\n Parameters\n ----------\n directory: str\n Path to a pymc3 serialized trace\n model: pm.Model (optional)\n Model used to create the trace. Can also be inferred from context\n\n Returns\n -------\n pm.Multitrace that was saved in the directory\n \"\"\"\n warnings.warn(\n 'The `load_trace` function will soon be removed.'\n 'Instead, use `arviz.from_netcdf` to load traces.',\n DeprecationWarning,\n )\n straces = []\n for subdir in glob.glob(os.path.join(directory, '*')):\n if os.path.isdir(subdir):\n straces.append(SerializeNDArray(subdir).load(model))\n if not straces:\n raise TraceDirectoryError(\"%s is not a PyMC3 saved chain directory.\" % directory)\n return base.MultiTrace(straces)\n\n\nclass SerializeNDArray:\n metadata_file = 'metadata.json'\n samples_file = 'samples.npz'\n metadata_path = None # type: str\n samples_path = None # type: str\n\n def __init__(self, directory: str):\n \"\"\"Helper to save and load NDArray objects\"\"\"\n warnings.warn(\n 'The `SerializeNDArray` class will soon be removed. '\n 'Instead, use ArviZ to save/load traces.',\n DeprecationWarning,\n )\n self.directory = directory\n self.metadata_path = os.path.join(self.directory, self.metadata_file)\n self.samples_path = os.path.join(self.directory, self.samples_file)\n\n @staticmethod\n def to_metadata(ndarray):\n \"\"\"Extract ndarray metadata into json-serializable content\"\"\"\n if ndarray._stats is None:\n stats = ndarray._stats\n sampler_vars = None\n else:\n stats = []\n sampler_vars = []\n for stat in ndarray._stats:\n stats.append({key: value.tolist() for key, value in stat.items()})\n sampler_vars.append({key: str(value.dtype) for key, value in stat.items()})\n\n\n metadata = {\n 'draw_idx': ndarray.draw_idx,\n 'draws': ndarray.draws,\n '_stats': stats,\n 'chain': ndarray.chain,\n 'sampler_vars': sampler_vars\n }\n return metadata\n\n def save(self, ndarray):\n \"\"\"Serialize a ndarray to file\n\n The goal here is to be modestly safer and more portable than a\n pickle file. The expense is that the model code must be available\n to reload the multitrace.\n \"\"\"\n if not isinstance(ndarray, NDArray):\n raise TypeError('Can only save NDArray')\n\n if os.path.isdir(self.directory):\n shutil.rmtree(self.directory)\n\n os.mkdir(self.directory)\n\n with open(self.metadata_path, 'w') as buff:\n json.dump(SerializeNDArray.to_metadata(ndarray), buff)\n\n np.savez_compressed(self.samples_path, **ndarray.samples)\n\n def load(self, model: Model) -> 'NDArray':\n \"\"\"Load the saved ndarray from file\"\"\"\n if not os.path.exists(self.samples_path) or not os.path.exists(self.metadata_path):\n raise TraceDirectoryError(\"%s is not a trace directory\" % self.directory)\n\n new_trace = NDArray(model=model)\n with open(self.metadata_path) as buff:\n metadata = json.load(buff)\n\n metadata['_stats'] = [{k: np.array(v) for k, v in stat.items()} for stat in metadata['_stats']]\n\n # it seems like at least some old traces don't have 'sampler_vars'\n try:\n sampler_vars = metadata.pop('sampler_vars')\n new_trace._set_sampler_vars(sampler_vars)\n except KeyError:\n pass\n\n for key, value in metadata.items():\n setattr(new_trace, key, value)\n new_trace.samples = dict(np.load(self.samples_path))\n return new_trace\n\n\nclass NDArray(base.BaseTrace):\n \"\"\"NDArray trace object\n\n Parameters\n ----------\n name: str\n Name of backend. This has no meaning for the NDArray backend.\n model: Model\n If None, the model is taken from the `with` context.\n vars: list of variables\n Sampling values will be stored for these variables. If None,\n `model.unobserved_RVs` is used.\n \"\"\"\n\n supports_sampler_stats = True\n\n def __init__(self, name=None, model=None, vars=None, test_point=None):\n super().__init__(name, model, vars, test_point)\n self.draw_idx = 0\n self.draws = None\n self.samples = {}\n self._stats = None\n\n # Sampling methods\n\n def setup(self, draws, chain, sampler_vars=None) -> None:\n \"\"\"Perform chain-specific setup.\n\n Parameters\n ----------\n draws: int\n Expected number of draws\n chain: int\n Chain number\n sampler_vars: list of dicts\n Names and dtypes of the variables that are\n exported by the samplers.\n \"\"\"\n super().setup(draws, chain, sampler_vars)\n\n self.chain = chain\n if self.samples: # Concatenate new array if chain is already present.\n old_draws = len(self)\n self.draws = old_draws + draws\n self.draw_idx = old_draws\n for varname, shape in self.var_shapes.items():\n old_var_samples = self.samples[varname]\n new_var_samples = np.zeros((draws, ) + shape,\n self.var_dtypes[varname])\n self.samples[varname] = np.concatenate((old_var_samples,\n new_var_samples),\n axis=0)\n else: # Otherwise, make array of zeros for each variable.\n self.draws = draws\n for varname, shape in self.var_shapes.items():\n self.samples[varname] = np.zeros((draws, ) + shape,\n dtype=self.var_dtypes[varname])\n\n if sampler_vars is None:\n return\n\n if self._stats is None:\n self._stats = []\n for sampler in sampler_vars:\n data = dict() # type: Dict[str, np.ndarray]\n self._stats.append(data)\n for varname, dtype in sampler.items():\n data[varname] = np.zeros(draws, dtype=dtype)\n else:\n for data, vars in zip(self._stats, sampler_vars):\n if vars.keys() != data.keys():\n raise ValueError(\"Sampler vars can't change\")\n old_draws = len(self)\n for varname, dtype in vars.items():\n old = data[varname]\n new = np.zeros(draws, dtype=dtype)\n data[varname] = np.concatenate([old, new])\n\n def record(self, point, sampler_stats=None) -> None:\n \"\"\"Record results of a sampling iteration.\n\n Parameters\n ----------\n point: dict\n Values mapped to variable names\n \"\"\"\n for varname, value in zip(self.varnames, self.fn(point)):\n self.samples[varname][self.draw_idx] = value\n\n if self._stats is not None and sampler_stats is None:\n raise ValueError(\"Expected sampler_stats\")\n if self._stats is None and sampler_stats is not None:\n raise ValueError(\"Unknown sampler_stats\")\n if sampler_stats is not None:\n for data, vars in zip(self._stats, sampler_stats):\n for key, val in vars.items():\n data[key][self.draw_idx] = val\n self.draw_idx += 1\n\n def _get_sampler_stats(self, varname, sampler_idx, burn, thin):\n return self._stats[sampler_idx][varname][burn::thin]\n\n def close(self):\n if self.draw_idx == self.draws:\n return\n # Remove trailing zeros if interrupted before completed all\n # draws.\n self.samples = {var: vtrace[:self.draw_idx]\n for var, vtrace in self.samples.items()}\n if self._stats is not None:\n self._stats = [\n {var: trace[:self.draw_idx] for var, trace in stats.items()}\n for stats in self._stats]\n\n # Selection methods\n\n def __len__(self):\n if not self.samples: # `setup` has not been called.\n return 0\n return self.draw_idx\n\n def get_values(self, varname: str, burn=0, thin=1) -> np.ndarray:\n \"\"\"Get values from trace.\n\n Parameters\n ----------\n varname: str\n burn: int\n thin: int\n\n Returns\n -------\n A NumPy array\n \"\"\"\n return self.samples[varname][burn::thin]\n\n def _slice(self, idx):\n # Slicing directly instead of using _slice_as_ndarray to\n # support stop value in slice (which is needed by\n # iter_sample).\n\n # Only the first `draw_idx` value are valid because of preallocation\n idx = slice(*idx.indices(len(self)))\n\n sliced = NDArray(model=self.model, vars=self.vars)\n sliced.chain = self.chain\n sliced.samples = {varname: values[idx]\n for varname, values in self.samples.items()}\n sliced.sampler_vars = self.sampler_vars\n sliced.draw_idx = (idx.stop - idx.start) // idx.step\n\n if self._stats is None:\n return sliced\n sliced._stats = []\n for vars in self._stats:\n var_sliced = {}\n sliced._stats.append(var_sliced)\n for key, vals in vars.items():\n var_sliced[key] = vals[idx]\n\n return sliced\n\n def point(self, idx) -> Dict[str, Any]:\n \"\"\"Return dictionary of point values at `idx` for current chain\n with variable names as keys.\n \"\"\"\n idx = int(idx)\n return {varname: values[idx]\n for varname, values in self.samples.items()}\n\n\ndef _slice_as_ndarray(strace, idx):\n sliced = NDArray(model=strace.model, vars=strace.vars)\n sliced.chain = strace.chain\n\n # Happy path where we do not need to load everything from the trace\n if ((idx.step is None or idx.step >= 1) and\n (idx.stop is None or idx.stop == len(strace))):\n start, stop, step = idx.indices(len(strace))\n sliced.samples = {v: strace.get_values(v, burn=idx.start, thin=idx.step)\n for v in strace.varnames}\n sliced.draw_idx = (stop - start) // step\n else:\n start, stop, step = idx.indices(len(strace))\n sliced.samples = {v: strace.get_values(v)[start:stop:step]\n for v in strace.varnames}\n sliced.draw_idx = (stop - start) // step\n\n return sliced\n\n\ndef point_list_to_multitrace(point_list: List[Dict[str, np.ndarray]], model: Optional[Model]=None) -> MultiTrace:\n '''transform point list into MultiTrace'''\n _model = modelcontext(model)\n varnames = list(point_list[0].keys())\n with _model:\n chain = NDArray(model=_model, vars=[_model[vn] for vn in varnames])\n chain.setup(draws=len(point_list), chain=0)\n # since we are simply loading a trace by hand, we need only a vacuous function for\n # chain.record() to use. This crushes the default.\n def point_fun(point):\n return [point[vn] for vn in varnames]\n chain.fn = point_fun\n for point in point_list:\n chain.record(point)\n return MultiTrace([chain])\n",
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\npymc3.blocking\n\nClasses for working with subsets of parameters.\n\"\"\"\nimport copy\nimport numpy as np\nimport collections\n\nfrom .util import get_var_name\n\n__all__ = ['ArrayOrdering', 'DictToArrayBijection', 'DictToVarBijection']\n\nVarMap = collections.namedtuple('VarMap', 'var, slc, shp, dtyp')\nDataMap = collections.namedtuple('DataMap', 'list_ind, slc, shp, dtype, name')\n\n\n# TODO Classes and methods need to be fully documented.\n\n\nclass ArrayOrdering:\n \"\"\"\n An ordering for an array space\n \"\"\"\n\n def __init__(self, vars):\n self.vmap = []\n self.by_name = {}\n self.size = 0\n\n for var in vars:\n name = var.name\n if name is None:\n raise ValueError('Unnamed variable in ArrayOrdering.')\n if name in self.by_name:\n raise ValueError('Name of variable not unique: %s.' % name)\n if not hasattr(var, 'dshape') or not hasattr(var, 'dsize'):\n raise ValueError('Shape of variable not known %s' % name)\n\n slc = slice(self.size, self.size + var.dsize)\n varmap = VarMap(name, slc, var.dshape, var.dtype)\n self.vmap.append(varmap)\n self.by_name[name] = varmap\n self.size += var.dsize\n\n def __getitem__(self, key):\n return self.by_name[key]\n\n\nclass DictToArrayBijection:\n \"\"\"\n A mapping between a dict space and an array space\n \"\"\"\n\n def __init__(self, ordering, dpoint):\n self.ordering = ordering\n self.dpt = dpoint\n\n # determine smallest float dtype that will fit all data\n if all([x.dtyp == 'float16' for x in ordering.vmap]):\n self.array_dtype = 'float16'\n elif all([x.dtyp == 'float32' for x in ordering.vmap]):\n self.array_dtype = 'float32'\n else:\n self.array_dtype = 'float64'\n\n def map(self, dpt):\n \"\"\"\n Maps value from dict space to array space\n\n Parameters\n ----------\n dpt: dict\n \"\"\"\n apt = np.empty(self.ordering.size, dtype=self.array_dtype)\n for var, slc, _, _ in self.ordering.vmap:\n apt[slc] = dpt[var].ravel()\n return apt\n\n def rmap(self, apt):\n \"\"\"\n Maps value from array space to dict space\n\n Parameters\n ----------\n apt: array\n \"\"\"\n dpt = self.dpt.copy()\n\n for var, slc, shp, dtyp in self.ordering.vmap:\n dpt[var] = np.atleast_1d(apt)[slc].reshape(shp).astype(dtyp)\n\n return dpt\n\n def mapf(self, f):\n \"\"\"\n function f: DictSpace -> T to ArraySpace -> T\n\n Parameters\n ----------\n\n f: dict -> T\n\n Returns\n -------\n f: array -> T\n \"\"\"\n return Compose(f, self.rmap)\n\n\nclass ListArrayOrdering:\n \"\"\"\n An ordering for a list to an array space. Takes also non theano.tensors.\n Modified from pymc3 blocking.\n\n Parameters\n ----------\n list_arrays: list\n :class:`numpy.ndarray` or :class:`theano.tensor.Tensor`\n intype: str\n defining the input type 'tensor' or 'numpy'\n \"\"\"\n\n def __init__(self, list_arrays, intype='numpy'):\n if intype not in {'tensor', 'numpy'}:\n raise ValueError(\"intype not in {'tensor', 'numpy'}\")\n self.vmap = []\n self.intype = intype\n self.size = 0\n for array in list_arrays:\n if self.intype == 'tensor':\n name = array.name\n array = array.tag.test_value\n else:\n name = 'numpy'\n\n slc = slice(self.size, self.size + array.size)\n self.vmap.append(DataMap(\n len(self.vmap), slc, array.shape, array.dtype, name))\n self.size += array.size\n\n\nclass ListToArrayBijection:\n \"\"\"\n A mapping between a List of arrays and an array space\n\n Parameters\n ----------\n ordering: :class:`ListArrayOrdering`\n list_arrays: list\n of :class:`numpy.ndarray`\n \"\"\"\n\n def __init__(self, ordering, list_arrays):\n self.ordering = ordering\n self.list_arrays = list_arrays\n\n def fmap(self, list_arrays):\n \"\"\"\n Maps values from List space to array space\n\n Parameters\n ----------\n list_arrays: list\n of :class:`numpy.ndarray`\n\n Returns\n -------\n array: :class:`numpy.ndarray`\n single array comprising all the input arrays\n \"\"\"\n\n array = np.empty(self.ordering.size)\n for list_ind, slc, _, _, _ in self.ordering.vmap:\n array[slc] = list_arrays[list_ind].ravel()\n return array\n\n def dmap(self, dpt):\n \"\"\"\n Maps values from dict space to List space\n\n Parameters\n ----------\n list_arrays: list\n of :class:`numpy.ndarray`\n\n Returns\n -------\n point\n \"\"\"\n a_list = copy.copy(self.list_arrays)\n\n for list_ind, _, _, _, var in self.ordering.vmap:\n a_list[list_ind] = dpt[var].ravel()\n\n return a_list\n\n def rmap(self, array):\n \"\"\"\n Maps value from array space to List space\n Inverse operation of fmap.\n\n Parameters\n ----------\n array: :class:`numpy.ndarray`\n\n Returns\n -------\n a_list: list\n of :class:`numpy.ndarray`\n \"\"\"\n\n a_list = copy.copy(self.list_arrays)\n\n for list_ind, slc, shp, dtype, _ in self.ordering.vmap:\n a_list[list_ind] = np.atleast_1d(\n array)[slc].reshape(shp).astype(dtype)\n\n return a_list\n\n\nclass DictToVarBijection:\n \"\"\"\n A mapping between a dict space and the array space for one element within the dict space\n \"\"\"\n\n def __init__(self, var, idx, dpoint):\n self.var = get_var_name(var)\n self.idx = idx\n self.dpt = dpoint\n\n def map(self, dpt):\n return dpt[self.var][self.idx]\n\n def rmap(self, apt):\n dpt = self.dpt.copy()\n\n dvar = dpt[self.var].copy()\n dvar[self.idx] = apt\n\n dpt[self.var] = dvar\n\n return dpt\n\n def mapf(self, f):\n return Compose(f, self.rmap)\n\n\nclass Compose:\n \"\"\"\n Compose two functions in a pickleable way\n \"\"\"\n\n def __init__(self, fa, fb):\n self.fa = fa\n self.fb = fb\n\n def __call__(self, x):\n return self.fa(self.fb(x))\n",
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pymc3 as pm\nfrom .helpers import SeededTest\nimport numpy as np\nimport pandas as pd\nimport pytest\n\n\nclass TestData(SeededTest):\n def test_deterministic(self):\n data_values = np.array([0.5, 0.4, 5, 2])\n with pm.Model() as model:\n X = pm.Data(\"X\", data_values)\n pm.Normal(\"y\", 0, 1, observed=X)\n model.logp(model.test_point)\n\n def test_sample(self):\n x = np.random.normal(size=100)\n y = x + np.random.normal(scale=1e-2, size=100)\n\n x_pred = np.linspace(-3, 3, 200, dtype=\"float32\")\n\n with pm.Model():\n x_shared = pm.Data(\"x_shared\", x)\n b = pm.Normal(\"b\", 0.0, 10.0)\n pm.Normal(\"obs\", b * x_shared, np.sqrt(1e-2), observed=y)\n\n prior_trace0 = pm.sample_prior_predictive(1000)\n trace = pm.sample(1000, init=None, tune=1000, chains=1)\n pp_trace0 = pm.sample_posterior_predictive(trace, 1000)\n pp_trace01 = pm.fast_sample_posterior_predictive(trace, 1000)\n\n x_shared.set_value(x_pred)\n prior_trace1 = pm.sample_prior_predictive(1000)\n pp_trace1 = pm.sample_posterior_predictive(trace, samples=1000)\n pp_trace11 = pm.fast_sample_posterior_predictive(trace, samples=1000)\n\n assert prior_trace0[\"b\"].shape == (1000,)\n assert prior_trace0[\"obs\"].shape == (1000, 100)\n assert prior_trace1[\"obs\"].shape == (1000, 200)\n\n assert pp_trace0[\"obs\"].shape == (1000, 100)\n assert pp_trace01[\"obs\"].shape == (1000, 100)\n\n np.testing.assert_allclose(x, pp_trace0[\"obs\"].mean(axis=0), atol=1e-1)\n np.testing.assert_allclose(x, pp_trace01[\"obs\"].mean(axis=0), atol=1e-1)\n\n assert pp_trace1[\"obs\"].shape == (1000, 200)\n assert pp_trace11[\"obs\"].shape == (1000, 200)\n\n np.testing.assert_allclose(x_pred, pp_trace1[\"obs\"].mean(axis=0), atol=1e-1)\n np.testing.assert_allclose(x_pred, pp_trace11[\"obs\"].mean(axis=0), atol=1e-1)\n\n def test_sample_posterior_predictive_after_set_data(self):\n with pm.Model() as model:\n x = pm.Data(\"x\", [1.0, 2.0, 3.0])\n y = pm.Data(\"y\", [1.0, 2.0, 3.0])\n beta = pm.Normal(\"beta\", 0, 10.0)\n pm.Normal(\"obs\", beta * x, np.sqrt(1e-2), observed=y)\n trace = pm.sample(1000, tune=1000, chains=1)\n # Predict on new data.\n with model:\n x_test = [5, 6, 9]\n pm.set_data(new_data={\"x\": x_test})\n y_test = pm.sample_posterior_predictive(trace)\n y_test1 = pm.fast_sample_posterior_predictive(trace)\n\n assert y_test[\"obs\"].shape == (1000, 3)\n assert y_test1[\"obs\"].shape == (1000, 3)\n np.testing.assert_allclose(x_test, y_test[\"obs\"].mean(axis=0), atol=1e-1)\n np.testing.assert_allclose(x_test, y_test1[\"obs\"].mean(axis=0), atol=1e-1)\n\n def test_sample_after_set_data(self):\n with pm.Model() as model:\n x = pm.Data(\"x\", [1.0, 2.0, 3.0])\n y = pm.Data(\"y\", [1.0, 2.0, 3.0])\n beta = pm.Normal(\"beta\", 0, 10.0)\n pm.Normal(\"obs\", beta * x, np.sqrt(1e-2), observed=y)\n pm.sample(1000, init=None, tune=1000, chains=1)\n # Predict on new data.\n new_x = [5.0, 6.0, 9.0]\n new_y = [5.0, 6.0, 9.0]\n with model:\n pm.set_data(new_data={\"x\": new_x, \"y\": new_y})\n new_trace = pm.sample(1000, init=None, tune=1000, chains=1)\n pp_trace = pm.sample_posterior_predictive(new_trace, 1000)\n pp_tracef = pm.fast_sample_posterior_predictive(new_trace, 1000)\n\n assert pp_trace[\"obs\"].shape == (1000, 3)\n assert pp_tracef[\"obs\"].shape == (1000, 3)\n np.testing.assert_allclose(new_y, pp_trace[\"obs\"].mean(axis=0), atol=1e-1)\n np.testing.assert_allclose(new_y, pp_tracef[\"obs\"].mean(axis=0), atol=1e-1)\n\n def test_shared_data_as_index(self):\n \"\"\"\n Allow pm.Data to be used for index variables, i.e with integers as well as floats.\n See https://github.com/pymc-devs/pymc3/issues/3813\n \"\"\"\n with pm.Model() as model:\n index = pm.Data(\"index\", [2, 0, 1, 0, 2])\n y = pm.Data(\"y\", [1.0, 2.0, 3.0, 2.0, 1.0])\n alpha = pm.Normal(\"alpha\", 0, 1.5, shape=3)\n pm.Normal(\"obs\", alpha[index], np.sqrt(1e-2), observed=y)\n\n prior_trace = pm.sample_prior_predictive(1000, var_names=[\"alpha\"])\n trace = pm.sample(1000, init=None, tune=1000, chains=1)\n\n # Predict on new data\n new_index = np.array([0, 1, 2])\n new_y = [5.0, 6.0, 9.0]\n with model:\n pm.set_data(new_data={\"index\": new_index, \"y\": new_y})\n pp_trace = pm.sample_posterior_predictive(\n trace, 1000, var_names=[\"alpha\", \"obs\"]\n )\n pp_tracef = pm.fast_sample_posterior_predictive(\n trace, 1000, var_names=[\"alpha\", \"obs\"]\n )\n\n assert prior_trace[\"alpha\"].shape == (1000, 3)\n assert trace[\"alpha\"].shape == (1000, 3)\n assert pp_trace[\"alpha\"].shape == (1000, 3)\n assert pp_trace[\"obs\"].shape == (1000, 3)\n assert pp_tracef[\"alpha\"].shape == (1000, 3)\n assert pp_tracef[\"obs\"].shape == (1000, 3)\n\n def test_shared_data_as_rv_input(self):\n \"\"\"\n Allow pm.Data to be used as input for other RVs.\n See https://github.com/pymc-devs/pymc3/issues/3842\n \"\"\"\n with pm.Model() as m:\n x = pm.Data(\"x\", [1.0, 2.0, 3.0])\n _ = pm.Normal(\"y\", mu=x, shape=3)\n trace = pm.sample(chains=1)\n\n np.testing.assert_allclose(np.array([1.0, 2.0, 3.0]), x.get_value(), atol=1e-1)\n np.testing.assert_allclose(\n np.array([1.0, 2.0, 3.0]), trace[\"y\"].mean(0), atol=1e-1\n )\n\n with m:\n pm.set_data({\"x\": np.array([2.0, 4.0, 6.0])})\n trace = pm.sample(chains=1)\n\n np.testing.assert_allclose(np.array([2.0, 4.0, 6.0]), x.get_value(), atol=1e-1)\n np.testing.assert_allclose(\n np.array([2.0, 4.0, 6.0]), trace[\"y\"].mean(0), atol=1e-1\n )\n\n def test_creation_of_data_outside_model_context(self):\n with pytest.raises((IndexError, TypeError)) as error:\n pm.Data(\"data\", [1.1, 2.2, 3.3])\n error.match(\"No model on context stack\")\n\n def test_set_data_to_non_data_container_variables(self):\n with pm.Model() as model:\n x = np.array([1.0, 2.0, 3.0])\n y = np.array([1.0, 2.0, 3.0])\n beta = pm.Normal(\"beta\", 0, 10.0)\n pm.Normal(\"obs\", beta * x, np.sqrt(1e-2), observed=y)\n pm.sample(1000, init=None, tune=1000, chains=1)\n with pytest.raises(TypeError) as error:\n pm.set_data({\"beta\": [1.1, 2.2, 3.3]}, model=model)\n error.match(\"defined as `pymc3.Data` inside the model\")\n\n def test_model_to_graphviz_for_model_with_data_container(self):\n with pm.Model() as model:\n x = pm.Data(\"x\", [1.0, 2.0, 3.0])\n y = pm.Data(\"y\", [1.0, 2.0, 3.0])\n beta = pm.Normal(\"beta\", 0, 10.0)\n pm.Normal(\"obs\", beta * x, np.sqrt(1e-2), observed=y)\n pm.sample(1000, init=None, tune=1000, chains=1)\n\n g = pm.model_to_graphviz(model)\n\n # Data node rendered correctly?\n text = 'x [label=\"x\\n~\\nData\" shape=box style=\"rounded, filled\"]'\n assert text in g.source\n # Didn't break ordinary variables?\n text = 'beta [label=\"beta\\n~\\nNormal\"]'\n assert text in g.source\n text = 'obs [label=\"obs\\n~\\nNormal\" style=filled]'\n assert text in g.source\n\n def test_explicit_coords(self):\n N_rows = 5\n N_cols = 7\n data = np.random.uniform(size=(N_rows, N_cols))\n coords = {\n \"rows\": [f\"R{r+1}\" for r in range(N_rows)],\n \"columns\": [f\"C{c+1}\" for c in range(N_cols)]\n }\n # pass coordinates explicitly, use numpy array in Data container\n with pm.Model(coords=coords) as pmodel:\n pm.Data('observations', data, dims=(\"rows\", \"columns\"))\n\n assert \"rows\" in pmodel.coords\n assert pmodel.coords[\"rows\"] == ['R1', 'R2', 'R3', 'R4', 'R5']\n assert \"columns\" in pmodel.coords\n assert pmodel.coords[\"columns\"] == ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7']\n assert pmodel.RV_dims == {'observations': ('rows', 'columns')}\n\n def test_implicit_coords_series(self):\n ser_sales = pd.Series(\n data=np.random.randint(low=0, high=30, size=22),\n index=pd.date_range(start=\"2020-05-01\", periods=22, freq=\"24H\", name=\"date\"),\n name=\"sales\"\n )\n with pm.Model() as pmodel:\n pm.Data(\"sales\", ser_sales, dims=\"date\", export_index_as_coords=True)\n\n assert \"date\" in pmodel.coords\n assert len(pmodel.coords[\"date\"]) == 22\n assert pmodel.RV_dims == {'sales': ('date',)}\n\n def test_implicit_coords_dataframe(self):\n N_rows = 5\n N_cols = 7\n df_data = pd.DataFrame()\n for c in range(N_cols):\n df_data[f'Column {c+1}'] = np.random.normal(size=(N_rows,))\n df_data.index.name = 'rows'\n df_data.columns.name = 'columns'\n\n # infer coordinates from index and columns of the DataFrame\n with pm.Model() as pmodel:\n pm.Data('observations', df_data, dims=(\"rows\", \"columns\"), export_index_as_coords=True)\n\n assert \"rows\" in pmodel.coords\n assert \"columns\" in pmodel.coords\n assert pmodel.RV_dims == {'observations': ('rows', 'columns')}\n\n\ndef test_data_naming():\n \"\"\"\n This is a test for issue #3793 -- `Data` objects in named models are\n not given model-relative names.\n \"\"\"\n with pm.Model(\"named_model\") as model:\n x = pm.Data(\"x\", [1.0, 2.0, 3.0])\n y = pm.Normal(\"y\")\n assert y.name == \"named_model_y\"\n assert x.name == \"named_model_x\"\n"
] | [
[
"numpy.diag",
"numpy.dot",
"scipy.stats.pareto.rvs",
"numpy.sqrt",
"numpy.linspace",
"numpy.all",
"scipy.stats.wald.rvs",
"scipy.stats.uniform.rvs",
"scipy.stats.bernoulli.rvs",
"numpy.random.randn",
"scipy.stats.nbinom.rvs",
"scipy.stats.gumbel_r.rvs",
"scipy.stats.vonmises.rvs",
"numpy.exp",
"scipy.stats.chi2.rvs",
"scipy.stats.triang.rvs",
"numpy.unique",
"numpy.arange",
"scipy.stats.halfcauchy.rvs",
"scipy.stats.matrix_normal.rvs",
"scipy.stats.cauchy.rvs",
"numpy.atleast_1d",
"numpy.testing.assert_almost_equal",
"numpy.random.multinomial",
"numpy.random.poisson",
"scipy.stats.randint.rvs",
"scipy.linalg.inv",
"scipy.stats.t.rvs",
"scipy.stats.dirichlet.rvs",
"scipy.stats.halfnorm.rvs",
"numpy.log",
"scipy.stats.logistic.rvs",
"numpy.random.choice",
"scipy.stats.multivariate_normal.rvs",
"scipy.stats.invgamma.rvs",
"scipy.stats.norm.rvs",
"numpy.identity",
"scipy.stats.moyal.rvs",
"scipy.stats.laplace.rvs",
"scipy.stats.beta.rvs",
"scipy.stats.norm.pdf",
"numpy.random.exponential",
"numpy.ones",
"scipy.stats.skewnorm.rvs",
"scipy.stats.gamma.rvs",
"numpy.random.normal",
"scipy.stats.chisquare",
"numpy.random.uniform",
"scipy.stats.truncnorm.rvs"
],
[
"numpy.can_cast",
"scipy.sparse.issparse",
"numpy.asarray",
"numpy.empty_like",
"numpy.issubdtype",
"numpy.ones",
"numpy.shape",
"numpy.prod",
"numpy.copyto",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.concatenate",
"numpy.savez_compressed",
"numpy.load",
"numpy.array",
"numpy.zeros"
],
[
"numpy.atleast_1d",
"numpy.empty"
],
[
"numpy.sqrt",
"numpy.linspace",
"pandas.DataFrame",
"numpy.random.normal",
"pandas.date_range",
"numpy.random.uniform",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
natsutan/cocytus | [
"53840021eb5a84ab197d96fa37e8b43b0b255566"
] | [
"tools/cqt_diff/cqt_diff_vgg16.py"
] | [
"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn\nimport sys\n\nkeras_dir = '../../example/vgg16/keras/output/'\ncqt_dir = '../../example/vgg16/c_fix/output/'\nqp_file = '../../examplevgg16/c_fix/weight/'\n\nfix16mode = True\n\ndef layer_dump(i, q, fnum = 3):\n \"\"\"\n 引数で指定されたレイヤーの、Keras出力と、コキュートス出力を\n 比較して、画像に落とす。比較するフィルターは先頭から、fnum\n まで。\n 出力はoutputディレクトリーに行われる。\n :param i:int レイヤー番号\n :param q:int 出力データのQ位置\n :param fnum:int 画像化するフィルター数\n :return:\n \"\"\"\n\n for f in range(fnum):\n plt.figure()\n graph_name = 'l%02d_%d' % (i, f)\n kname = os.path.join(keras_dir+'l%02d_%d.npy' % (i, f))\n cname = os.path.join(cqt_dir+'l%02d_%d.npy' % (i, f))\n k_data_1 = np.load(kname)\n c_data_1 = np.load(cname)\n k_data = k_data_1.flatten()\n c_data = c_data_1.flatten()\n\n\n if fix16mode:\n c_data = c_data.astype(np.float32) / (2 ** q)\n\n x = np.arange(len(k_data))\n plt.plot(x, k_data, color='b', label='Keras')\n plt.plot(x, c_data, color='r', label='Cocytus')\n plt.title(graph_name)\n plt.legend()\n\n img_fname = os.path.join('output', graph_name+'.png')\n print('save %s' % img_fname)\n plt.savefig(img_fname)\n\n plt.figure()\n plt.plot(x, k_data - c_data, color='g', label='diff')\n plt.title(graph_name+'diff')\n plt.legend()\n img_fname = os.path.join('output', graph_name + '_diff.png')\n plt.savefig(img_fname)\n\n\ndef read_qpfile(odir):\n \"\"\"qpファイルを読み込み、入力、出力、重みのQ位置をリストにして返す\"\"\"\n iqs = []\n wqs = []\n oqs = []\n fname = os.path.join(odir, 'qp.txt')\n\n for i, l in enumerate(open(fname).readlines()):\n if i < 1:\n continue\n words = l.split(',')\n iqs.append(int(words[0]))\n oqs.append(int(words[1]))\n wqs.append(int(words[2]))\n\n return iqs, oqs, wqs\n\n\n#iqs, oqs, wqs = read_qpfile(qp_file)\n\n#for i in range(31):\n# layer_dump(i, oqs[i])\nlayer_dump(15, 3)\n\nprint('finish')"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"numpy.load",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
utahnlp/therapist-observer | [
"31eaf9a5c82c6d0f9a62427ac5df030d81547472",
"31eaf9a5c82c6d0f9a62427ac5df030d81547472"
] | [
"tensorflow/classes/bilm/model.py",
"tensorflow/classes/bilm/data.py"
] | [
"\nimport numpy as np\nimport tensorflow as tf\nimport h5py\nimport json\nimport re\n\nfrom .data import UnicodeCharsVocabulary, Batcher\n\nDTYPE = 'float32'\nDTYPE_INT = 'int64'\n\n\nclass BidirectionalLanguageModel(object):\n def __init__(\n self,\n options_file,\n weight_file,\n use_character_inputs=True,\n embedding_weight_file=None,\n max_batch_size=128,\n ):\n '''\n Creates the language model computational graph and loads weights\n\n Two options for input type:\n (1) To use character inputs (paired with Batcher)\n pass use_character_inputs=True, and ids_placeholder\n of shape (None, None, max_characters_per_token)\n to __call__\n (2) To use token ids as input (paired with TokenBatcher),\n pass use_character_inputs=False and ids_placeholder\n of shape (None, None) to __call__.\n In this case, embedding_weight_file is also required input\n\n options_file: location of the json formatted file with\n LM hyperparameters\n weight_file: location of the hdf5 file with LM weights\n use_character_inputs: if True, then use character ids as input,\n otherwise use token ids\n max_batch_size: the maximum allowable batch size \n '''\n with open(options_file, 'r') as fin:\n options = json.load(fin)\n\n if not use_character_inputs:\n if embedding_weight_file is None:\n raise ValueError(\n \"embedding_weight_file is required input with \"\n \"not use_character_inputs\"\n )\n\n self._options = options\n self._weight_file = weight_file\n self._embedding_weight_file = embedding_weight_file\n self._use_character_inputs = use_character_inputs\n self._max_batch_size = max_batch_size\n\n self._ops = {}\n self._graphs = {}\n\n def __call__(self, ids_placeholder):\n '''\n Given the input character ids (or token ids), returns a dictionary\n with tensorflow ops:\n\n {'lm_embeddings': embedding_op,\n 'lengths': sequence_lengths_op,\n 'mask': op to compute mask}\n\n embedding_op computes the LM embeddings and is shape\n (None, 3, None, 1024)\n lengths_op computes the sequence lengths and is shape (None, )\n mask computes the sequence mask and is shape (None, None)\n\n ids_placeholder: a tf.placeholder of type int32.\n If use_character_inputs=True, it is shape\n (None, None, max_characters_per_token) and holds the input\n character ids for a batch\n If use_character_input=False, it is shape (None, None) and\n holds the input token ids for a batch\n '''\n if ids_placeholder in self._ops:\n # have already created ops for this placeholder, just return them\n ret = self._ops[ids_placeholder]\n\n else:\n # need to create the graph\n if len(self._ops) == 0:\n # first time creating the graph, don't reuse variables\n lm_graph = BidirectionalLanguageModelGraph(\n self._options,\n self._weight_file,\n ids_placeholder,\n embedding_weight_file=self._embedding_weight_file,\n use_character_inputs=self._use_character_inputs,\n max_batch_size=self._max_batch_size)\n else:\n with tf.variable_scope('', reuse=True):\n lm_graph = BidirectionalLanguageModelGraph(\n self._options,\n self._weight_file,\n ids_placeholder,\n embedding_weight_file=self._embedding_weight_file,\n use_character_inputs=self._use_character_inputs,\n max_batch_size=self._max_batch_size)\n\n ops = self._build_ops(lm_graph)\n self._ops[ids_placeholder] = ops\n self._graphs[ids_placeholder] = lm_graph\n ret = ops\n\n return ret\n\n def _build_ops(self, lm_graph):\n with tf.control_dependencies([lm_graph.update_state_op]):\n # get the LM embeddings\n token_embeddings = lm_graph.embedding\n layers = [\n tf.concat([token_embeddings, token_embeddings], axis=2)\n ]\n\n n_lm_layers = len(lm_graph.lstm_outputs['forward'])\n for i in range(n_lm_layers):\n layers.append(\n tf.concat(\n [lm_graph.lstm_outputs['forward'][i],\n lm_graph.lstm_outputs['backward'][i]],\n axis=-1\n )\n )\n\n # The layers include the BOS/EOS tokens. Remove them\n sequence_length_wo_bos_eos = lm_graph.sequence_lengths - 2\n layers_without_bos_eos = []\n for layer in layers:\n layer_wo_bos_eos = layer[:, 1:, :]\n layer_wo_bos_eos = tf.reverse_sequence(\n layer_wo_bos_eos, \n lm_graph.sequence_lengths - 1,\n seq_axis=1,\n batch_axis=0,\n )\n layer_wo_bos_eos = layer_wo_bos_eos[:, 1:, :]\n layer_wo_bos_eos = tf.reverse_sequence(\n layer_wo_bos_eos,\n sequence_length_wo_bos_eos,\n seq_axis=1,\n batch_axis=0,\n )\n layers_without_bos_eos.append(layer_wo_bos_eos)\n\n # concatenate the layers\n lm_embeddings = tf.concat(\n [tf.expand_dims(t, axis=1) for t in layers_without_bos_eos],\n axis=1\n )\n\n # get the mask op without bos/eos.\n # tf doesn't support reversing boolean tensors, so cast\n # to int then back\n mask_wo_bos_eos = tf.cast(lm_graph.mask[:, 1:], 'int32')\n mask_wo_bos_eos = tf.reverse_sequence(\n mask_wo_bos_eos,\n lm_graph.sequence_lengths - 1,\n seq_axis=1,\n batch_axis=0,\n )\n mask_wo_bos_eos = mask_wo_bos_eos[:, 1:]\n mask_wo_bos_eos = tf.reverse_sequence(\n mask_wo_bos_eos,\n sequence_length_wo_bos_eos,\n seq_axis=1,\n batch_axis=0,\n )\n mask_wo_bos_eos = tf.cast(mask_wo_bos_eos, 'bool')\n\n return {\n 'lm_embeddings': lm_embeddings, \n 'lengths': sequence_length_wo_bos_eos,\n 'token_embeddings': lm_graph.embedding,\n 'mask': mask_wo_bos_eos,\n }\n\n\ndef _pretrained_initializer(varname, weight_file, embedding_weight_file=None):\n '''\n We'll stub out all the initializers in the pretrained LM with\n a function that loads the weights from the file\n '''\n weight_name_map = {}\n for i in range(2):\n for j in range(8): # if we decide to add more layers\n root = 'RNN_{}/RNN/MultiRNNCell/Cell{}'.format(i, j)\n weight_name_map[root + '/rnn/lstm_cell/kernel'] = \\\n root + '/LSTMCell/W_0'\n weight_name_map[root + '/rnn/lstm_cell/bias'] = \\\n root + '/LSTMCell/B'\n weight_name_map[root + '/rnn/lstm_cell/projection/kernel'] = \\\n root + '/LSTMCell/W_P_0'\n\n # convert the graph name to that in the checkpoint\n varname_in_file = varname[5:]\n if varname_in_file.startswith('RNN'):\n varname_in_file = weight_name_map[varname_in_file]\n\n if varname_in_file == 'embedding':\n with h5py.File(embedding_weight_file, 'r') as fin:\n # Have added a special 0 index for padding not present\n # in the original model.\n embed_weights = fin[varname_in_file][...]\n weights = np.zeros(\n (embed_weights.shape[0] + 1, embed_weights.shape[1]),\n dtype=DTYPE\n )\n weights[1:, :] = embed_weights\n else:\n with h5py.File(weight_file, 'r') as fin:\n if varname_in_file == 'char_embed':\n # Have added a special 0 index for padding not present\n # in the original model.\n char_embed_weights = fin[varname_in_file][...]\n weights = np.zeros(\n (char_embed_weights.shape[0] + 1,\n char_embed_weights.shape[1]),\n dtype=DTYPE\n )\n weights[1:, :] = char_embed_weights\n else:\n weights = fin[varname_in_file][...]\n\n # Tensorflow initializers are callables that accept a shape parameter\n # and some optional kwargs\n def ret(shape, **kwargs):\n if list(shape) != list(weights.shape):\n raise ValueError(\n \"Invalid shape initializing {0}, got {1}, expected {2}\".format(\n varname_in_file, shape, weights.shape)\n )\n return weights\n\n return ret\n\n\nclass BidirectionalLanguageModelGraph(object):\n '''\n Creates the computational graph and holds the ops necessary for runnint\n a bidirectional language model\n '''\n def __init__(self, options, weight_file, ids_placeholder,\n use_character_inputs=True, embedding_weight_file=None,\n max_batch_size=128):\n\n self.options = options\n self._max_batch_size = max_batch_size\n self.ids_placeholder = ids_placeholder\n self.use_character_inputs = use_character_inputs\n\n # this custom_getter will make all variables not trainable and\n # override the default initializer\n def custom_getter(getter, name, *args, **kwargs):\n kwargs['trainable'] = False\n kwargs['initializer'] = _pretrained_initializer(\n name, weight_file, embedding_weight_file\n )\n return getter(name, *args, **kwargs)\n\n if embedding_weight_file is not None:\n # get the vocab size\n with h5py.File(embedding_weight_file, 'r') as fin:\n # +1 for padding\n self._n_tokens_vocab = fin['embedding'].shape[0] + 1\n else:\n self._n_tokens_vocab = None\n\n with tf.variable_scope('bilm', custom_getter=custom_getter):\n self._build()\n\n def _build(self):\n if self.use_character_inputs:\n self._build_word_char_embeddings()\n else:\n self._build_word_embeddings()\n self._build_lstms()\n\n def _build_word_char_embeddings(self):\n '''\n options contains key 'char_cnn': {\n\n 'n_characters': 262,\n\n # includes the start / end characters\n 'max_characters_per_token': 50,\n\n 'filters': [\n [1, 32],\n [2, 32],\n [3, 64],\n [4, 128],\n [5, 256],\n [6, 512],\n [7, 512]\n ],\n 'activation': 'tanh',\n\n # for the character embedding\n 'embedding': {'dim': 16}\n\n # for highway layers\n # if omitted, then no highway layers\n 'n_highway': 2,\n }\n '''\n projection_dim = self.options['lstm']['projection_dim']\n\n cnn_options = self.options['char_cnn']\n filters = cnn_options['filters']\n n_filters = sum(f[1] for f in filters)\n max_chars = cnn_options['max_characters_per_token']\n char_embed_dim = cnn_options['embedding']['dim']\n n_chars = cnn_options['n_characters']\n if n_chars != 262:\n raise InvalidNumberOfCharacters(\n \"Set n_characters=262 after training see the README.md\"\n )\n if cnn_options['activation'] == 'tanh':\n activation = tf.nn.tanh\n elif cnn_options['activation'] == 'relu':\n activation = tf.nn.relu\n\n # the character embeddings\n with tf.device(\"/cpu:0\"):\n self.embedding_weights = tf.get_variable(\n \"char_embed\", [n_chars, char_embed_dim],\n dtype=DTYPE,\n initializer=tf.random_uniform_initializer(-1.0, 1.0)\n )\n # shape (batch_size, unroll_steps, max_chars, embed_dim)\n self.char_embedding = tf.nn.embedding_lookup(self.embedding_weights,\n self.ids_placeholder)\n\n # the convolutions\n def make_convolutions(inp):\n with tf.variable_scope('CNN') as scope:\n convolutions = []\n for i, (width, num) in enumerate(filters):\n if cnn_options['activation'] == 'relu':\n # He initialization for ReLU activation\n # with char embeddings init between -1 and 1\n #w_init = tf.random_normal_initializer(\n # mean=0.0,\n # stddev=np.sqrt(2.0 / (width * char_embed_dim))\n #)\n\n # Kim et al 2015, +/- 0.05\n w_init = tf.random_uniform_initializer(\n minval=-0.05, maxval=0.05)\n elif cnn_options['activation'] == 'tanh':\n # glorot init\n w_init = tf.random_normal_initializer(\n mean=0.0,\n stddev=np.sqrt(1.0 / (width * char_embed_dim))\n )\n w = tf.get_variable(\n \"W_cnn_%s\" % i,\n [1, width, char_embed_dim, num],\n initializer=w_init,\n dtype=DTYPE)\n b = tf.get_variable(\n \"b_cnn_%s\" % i, [num], dtype=DTYPE,\n initializer=tf.constant_initializer(0.0))\n\n conv = tf.nn.conv2d(\n inp, w,\n strides=[1, 1, 1, 1],\n padding=\"VALID\") + b\n # now max pool\n conv = tf.nn.max_pool(\n conv, [1, 1, max_chars-width+1, 1],\n [1, 1, 1, 1], 'VALID')\n\n # activation\n conv = activation(conv)\n conv = tf.squeeze(conv, squeeze_dims=[2])\n\n convolutions.append(conv)\n\n return tf.concat(convolutions, 2)\n\n embedding = make_convolutions(self.char_embedding)\n\n # for highway and projection layers\n n_highway = cnn_options.get('n_highway')\n use_highway = n_highway is not None and n_highway > 0\n use_proj = n_filters != projection_dim\n\n if use_highway or use_proj:\n # reshape from (batch_size, n_tokens, dim) to (-1, dim)\n batch_size_n_tokens = tf.shape(embedding)[0:2]\n embedding = tf.reshape(embedding, [-1, n_filters])\n\n # set up weights for projection\n if use_proj:\n assert n_filters > projection_dim\n with tf.variable_scope('CNN_proj') as scope:\n W_proj_cnn = tf.get_variable(\n \"W_proj\", [n_filters, projection_dim],\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=np.sqrt(1.0 / n_filters)),\n dtype=DTYPE)\n b_proj_cnn = tf.get_variable(\n \"b_proj\", [projection_dim],\n initializer=tf.constant_initializer(0.0),\n dtype=DTYPE)\n\n # apply highways layers\n def high(x, ww_carry, bb_carry, ww_tr, bb_tr):\n carry_gate = tf.nn.sigmoid(tf.matmul(x, ww_carry) + bb_carry)\n transform_gate = tf.nn.relu(tf.matmul(x, ww_tr) + bb_tr)\n return carry_gate * transform_gate + (1.0 - carry_gate) * x\n\n if use_highway:\n highway_dim = n_filters\n\n for i in range(n_highway):\n with tf.variable_scope('CNN_high_%s' % i) as scope:\n W_carry = tf.get_variable(\n 'W_carry', [highway_dim, highway_dim],\n # glorit init\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),\n dtype=DTYPE)\n b_carry = tf.get_variable(\n 'b_carry', [highway_dim],\n initializer=tf.constant_initializer(-2.0),\n dtype=DTYPE)\n W_transform = tf.get_variable(\n 'W_transform', [highway_dim, highway_dim],\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),\n dtype=DTYPE)\n b_transform = tf.get_variable(\n 'b_transform', [highway_dim],\n initializer=tf.constant_initializer(0.0),\n dtype=DTYPE)\n\n embedding = high(embedding, W_carry, b_carry,\n W_transform, b_transform)\n\n # finally project down if needed\n if use_proj:\n embedding = tf.matmul(embedding, W_proj_cnn) + b_proj_cnn\n\n # reshape back to (batch_size, tokens, dim)\n if use_highway or use_proj:\n shp = tf.concat([batch_size_n_tokens, [projection_dim]], axis=0)\n embedding = tf.reshape(embedding, shp)\n\n # at last assign attributes for remainder of the model\n self.embedding = embedding\n\n\n def _build_word_embeddings(self):\n projection_dim = self.options['lstm']['projection_dim']\n\n # the word embeddings\n with tf.device(\"/cpu:0\"):\n self.embedding_weights = tf.get_variable(\n \"embedding\", [self._n_tokens_vocab, projection_dim],\n dtype=DTYPE,\n )\n self.embedding = tf.nn.embedding_lookup(self.embedding_weights,\n self.ids_placeholder)\n\n\n def _build_lstms(self):\n # now the LSTMs\n # these will collect the initial states for the forward\n # (and reverse LSTMs if we are doing bidirectional)\n\n # parse the options\n lstm_dim = self.options['lstm']['dim']\n projection_dim = self.options['lstm']['projection_dim']\n n_lstm_layers = self.options['lstm'].get('n_layers', 1)\n cell_clip = self.options['lstm'].get('cell_clip')\n proj_clip = self.options['lstm'].get('proj_clip')\n use_skip_connections = self.options['lstm']['use_skip_connections']\n if use_skip_connections:\n print(\"USING SKIP CONNECTIONS\")\n else:\n print(\"NOT USING SKIP CONNECTIONS\")\n\n # the sequence lengths from input mask\n if self.use_character_inputs:\n mask = tf.reduce_any(self.ids_placeholder > 0, axis=2)\n else:\n mask = self.ids_placeholder > 0\n sequence_lengths = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1)\n batch_size = tf.shape(sequence_lengths)[0]\n\n # for each direction, we'll store tensors for each layer\n self.lstm_outputs = {'forward': [], 'backward': []}\n self.lstm_state_sizes = {'forward': [], 'backward': []}\n self.lstm_init_states = {'forward': [], 'backward': []}\n self.lstm_final_states = {'forward': [], 'backward': []}\n\n update_ops = []\n for direction in ['forward', 'backward']:\n if direction == 'forward':\n layer_input = self.embedding\n else:\n layer_input = tf.reverse_sequence(\n self.embedding,\n sequence_lengths,\n seq_axis=1,\n batch_axis=0\n )\n\n for i in range(n_lstm_layers):\n if projection_dim < lstm_dim:\n # are projecting down output\n lstm_cell = tf.nn.rnn_cell.LSTMCell(\n lstm_dim, num_proj=projection_dim,\n cell_clip=cell_clip, proj_clip=proj_clip)\n else:\n lstm_cell = tf.nn.rnn_cell.LSTMCell(\n lstm_dim,\n cell_clip=cell_clip, proj_clip=proj_clip)\n\n if use_skip_connections:\n # ResidualWrapper adds inputs to outputs\n if i == 0:\n # don't add skip connection from token embedding to\n # 1st layer output\n pass\n else:\n # add a skip connection\n lstm_cell = tf.nn.rnn_cell.ResidualWrapper(lstm_cell)\n\n # collect the input state, run the dynamic rnn, collect\n # the output\n state_size = lstm_cell.state_size\n # the LSTMs are stateful. To support multiple batch sizes,\n # we'll allocate size for states up to max_batch_size,\n # then use the first batch_size entries for each batch\n init_states = [\n tf.Variable(\n tf.zeros([self._max_batch_size, dim]),\n trainable=False\n )\n for dim in lstm_cell.state_size\n ]\n batch_init_states = [\n state[:batch_size, :] for state in init_states\n ]\n\n if direction == 'forward':\n i_direction = 0\n else:\n i_direction = 1\n variable_scope_name = 'RNN_{0}/RNN/MultiRNNCell/Cell{1}'.format(\n i_direction, i)\n with tf.variable_scope(variable_scope_name):\n layer_output, final_state = tf.nn.dynamic_rnn(\n lstm_cell,\n layer_input,\n sequence_length=sequence_lengths,\n initial_state=tf.nn.rnn_cell.LSTMStateTuple(\n *batch_init_states),\n )\n\n self.lstm_state_sizes[direction].append(lstm_cell.state_size)\n self.lstm_init_states[direction].append(init_states)\n self.lstm_final_states[direction].append(final_state)\n if direction == 'forward':\n self.lstm_outputs[direction].append(layer_output)\n else:\n self.lstm_outputs[direction].append(\n tf.reverse_sequence(\n layer_output,\n sequence_lengths,\n seq_axis=1,\n batch_axis=0\n )\n )\n\n with tf.control_dependencies([layer_output]):\n # update the initial states\n for i in range(2):\n new_state = tf.concat(\n [final_state[i][:batch_size, :],\n init_states[i][batch_size:, :]], axis=0)\n state_update_op = tf.assign(init_states[i], new_state)\n update_ops.append(state_update_op)\n \n layer_input = layer_output\n\n self.mask = mask\n self.sequence_lengths = sequence_lengths\n self.update_state_op = tf.group(*update_ops)\n\n\ndef dump_token_embeddings(vocab_file, options_file, weight_file, outfile):\n '''\n Given an input vocabulary file, dump all the token embeddings to the\n outfile. The result can be used as the embedding_weight_file when\n constructing a BidirectionalLanguageModel.\n '''\n with open(options_file, 'r') as fin:\n options = json.load(fin)\n max_word_length = options['char_cnn']['max_characters_per_token']\n\n vocab = UnicodeCharsVocabulary(vocab_file, max_word_length)\n batcher = Batcher(vocab_file, max_word_length)\n\n ids_placeholder = tf.placeholder('int32',\n shape=(None, None, max_word_length)\n )\n model = BidirectionalLanguageModel(options_file, weight_file)\n embedding_op = model(ids_placeholder)['token_embeddings']\n\n n_tokens = vocab.size\n embed_dim = int(embedding_op.shape[2])\n\n embeddings = np.zeros((n_tokens, embed_dim), dtype=DTYPE)\n\n config = tf.ConfigProto(allow_soft_placement=True)\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n for k in range(n_tokens):\n token = vocab.id_to_word(k)\n char_ids = batcher.batch_sentences([[token]])[0, 1, :].reshape(\n 1, 1, -1)\n embeddings[k, :] = sess.run(\n embedding_op, feed_dict={ids_placeholder: char_ids}\n )\n\n with h5py.File(outfile, 'w') as fout:\n ds = fout.create_dataset(\n 'embedding', embeddings.shape, dtype='float32', data=embeddings\n )\n\ndef dump_bilm_embeddings(vocab_file, dataset_file, options_file,\n weight_file, outfile):\n with open(options_file, 'r') as fin:\n options = json.load(fin)\n max_word_length = options['char_cnn']['max_characters_per_token']\n\n vocab = UnicodeCharsVocabulary(vocab_file, max_word_length)\n batcher = Batcher(vocab_file, max_word_length)\n\n ids_placeholder = tf.placeholder('int32',\n shape=(None, None, max_word_length)\n )\n model = BidirectionalLanguageModel(options_file, weight_file)\n ops = model(ids_placeholder)\n\n config = tf.ConfigProto(allow_soft_placement=True)\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n sentence_id = 0\n with open(dataset_file, 'r') as fin, h5py.File(outfile, 'w') as fout:\n for line in fin:\n sentence = line.strip().split()\n char_ids = batcher.batch_sentences([sentence])\n embeddings = sess.run(\n ops['lm_embeddings'], feed_dict={ids_placeholder: char_ids}\n )\n ds = fout.create_dataset(\n '{}'.format(sentence_id),\n embeddings.shape[1:], dtype='float32',\n data=embeddings[0, :, :, :]\n )\n\n sentence_id += 1\n\n",
"# originally based on https://github.com/tensorflow/models/tree/master/lm_1b\nimport glob\nimport random\n\nimport numpy as np\n\nclass Vocabulary(object):\n '''\n A token vocabulary. Holds a map from token to ids and provides\n a method for encoding text to a sequence of ids.\n '''\n def __init__(self, filename, validate_file=False):\n '''\n filename = the vocabulary file. It is a flat text file with one\n (normalized) token per line. In addition, the file should also\n contain the special tokens <S>, </S>, <UNK> (case sensitive).\n '''\n self._id_to_word = []\n self._word_to_id = {}\n self._unk = -1\n self._bos = -1\n self._eos = -1\n\n with open(filename) as f:\n idx = 0\n for line in f:\n word_name = line.strip()\n if word_name == '<S>':\n self._bos = idx\n elif word_name == '</S>':\n self._eos = idx\n elif word_name == '<UNK>':\n self._unk = idx\n if word_name == '!!!MAXTERMID':\n continue\n\n self._id_to_word.append(word_name)\n self._word_to_id[word_name] = idx\n idx += 1\n\n # check to ensure file has special tokens\n if validate_file:\n if self._bos == -1 or self._eos == -1 or self._unk == -1:\n raise ValueError(\"Ensure the vocabulary file has \"\n \"<S>, </S>, <UNK> tokens\")\n\n @property\n def bos(self):\n return self._bos\n\n @property\n def eos(self):\n return self._eos\n\n @property\n def unk(self):\n return self._unk\n\n @property\n def size(self):\n return len(self._id_to_word)\n\n def word_to_id(self, word):\n if word in self._word_to_id:\n return self._word_to_id[word]\n return self.unk\n\n def id_to_word(self, cur_id):\n return self._id_to_word[cur_id]\n\n def decode(self, cur_ids):\n \"\"\"Convert a list of ids to a sentence, with space inserted.\"\"\"\n return ' '.join([self.id_to_word(cur_id) for cur_id in cur_ids])\n\n def encode(self, sentence, reverse=False, split=True):\n \"\"\"Convert a sentence to a list of ids, with special tokens added.\n Sentence is a single string with tokens separated by whitespace.\n\n If reverse, then the sentence is assumed to be reversed, and\n this method will swap the BOS/EOS tokens appropriately.\"\"\"\n\n if split:\n word_ids = [\n self.word_to_id(cur_word) for cur_word in sentence.split()\n ]\n else:\n word_ids = [self.word_to_id(cur_word) for cur_word in sentence]\n\n if reverse:\n return np.array([self.eos] + word_ids + [self.bos], dtype=np.int32)\n else:\n return np.array([self.bos] + word_ids + [self.eos], dtype=np.int32)\n\n\nclass UnicodeCharsVocabulary(Vocabulary):\n \"\"\"Vocabulary containing character-level and word level information.\n\n Has a word vocabulary that is used to lookup word ids and\n a character id that is used to map words to arrays of character ids.\n\n The character ids are defined by ord(c) for c in word.encode('utf-8')\n This limits the total number of possible char ids to 256.\n To this we add 5 additional special ids: begin sentence, end sentence,\n begin word, end word and padding.\n\n WARNING: for prediction, we add +1 to the output ids from this\n class to create a special padding id (=0). As a result, we suggest\n you use the `Batcher`, `TokenBatcher`, and `LMDataset` classes instead\n of this lower level class. If you are using this lower level class,\n then be sure to add the +1 appropriately, otherwise embeddings computed\n from the pre-trained model will be useless.\n \"\"\"\n def __init__(self, filename, max_word_length, **kwargs):\n super(UnicodeCharsVocabulary, self).__init__(filename, **kwargs)\n self._max_word_length = max_word_length\n\n # char ids 0-255 come from utf-8 encoding bytes\n # assign 256-300 to special chars\n self.bos_char = 256 # <begin sentence>\n self.eos_char = 257 # <end sentence>\n self.bow_char = 258 # <begin word>\n self.eow_char = 259 # <end word>\n self.pad_char = 260 # <padding>\n\n num_words = len(self._id_to_word)\n\n self._word_char_ids = np.zeros([num_words, max_word_length],\n dtype=np.int32)\n\n # the charcter representation of the begin/end of sentence characters\n def _make_bos_eos(c):\n r = np.zeros([self.max_word_length], dtype=np.int32)\n r[:] = self.pad_char\n r[0] = self.bow_char\n r[1] = c\n r[2] = self.eow_char\n return r\n self.bos_chars = _make_bos_eos(self.bos_char)\n self.eos_chars = _make_bos_eos(self.eos_char)\n\n for i, word in enumerate(self._id_to_word):\n self._word_char_ids[i] = self._convert_word_to_char_ids(word)\n\n self._word_char_ids[self.bos] = self.bos_chars\n self._word_char_ids[self.eos] = self.eos_chars\n # TODO: properly handle <UNK>\n\n @property\n def word_char_ids(self):\n return self._word_char_ids\n\n @property\n def max_word_length(self):\n return self._max_word_length\n\n def _convert_word_to_char_ids(self, word):\n code = np.zeros([self.max_word_length], dtype=np.int32)\n code[:] = self.pad_char\n\n word_encoded = word.encode('utf-8', 'ignore')[:(self.max_word_length-2)]\n code[0] = self.bow_char\n k = 0\n for k, chr_id in enumerate(word_encoded, start=1):\n code[k] = ord(chr_id)\n code[k + 1] = self.eow_char\n\n return code\n\n def word_to_char_ids(self, word):\n if word in self._word_to_id:\n return self._word_char_ids[self._word_to_id[word]]\n else:\n return self._convert_word_to_char_ids(word)\n\n def encode_chars(self, sentence, reverse=False, split=True):\n '''\n Encode the sentence as a white space delimited string of tokens.\n '''\n if split:\n chars_ids = [self.word_to_char_ids(cur_word)\n for cur_word in sentence.split()]\n else:\n chars_ids = [self.word_to_char_ids(cur_word)\n for cur_word in sentence]\n if reverse:\n return np.vstack([self.eos_chars] + chars_ids + [self.bos_chars])\n else:\n return np.vstack([self.bos_chars] + chars_ids + [self.eos_chars])\n\n\nclass Batcher(object):\n ''' \n Batch sentences of tokenized text into character id matrices.\n '''\n def __init__(self, lm_vocab_file, max_token_length):\n '''\n lm_vocab_file = the language model vocabulary file (one line per\n token)\n max_token_length = the maximum number of characters in each token\n '''\n self._lm_vocab = UnicodeCharsVocabulary(\n lm_vocab_file, max_token_length\n )\n self._max_token_length = max_token_length\n\n def batch_sentences(self, sentences):\n '''\n Batch the sentences as character ids\n Each sentence is a list of tokens without <s> or </s>, e.g.\n [['The', 'first', 'sentence', '.'], ['Second', '.']]\n '''\n n_sentences = len(sentences)\n max_length = max(len(sentence) for sentence in sentences) + 2\n\n X_char_ids = np.zeros(\n (n_sentences, max_length, self._max_token_length),\n dtype=np.int64\n )\n\n for k, sent in enumerate(sentences):\n length = len(sent) + 2\n char_ids_without_mask = self._lm_vocab.encode_chars(\n sent, split=False)\n # add one so that 0 is the mask value\n X_char_ids[k, :length, :] = char_ids_without_mask + 1\n\n return X_char_ids\n\n\nclass TokenBatcher(object):\n ''' \n Batch sentences of tokenized text into token id matrices.\n '''\n def __init__(self, lm_vocab_file):\n '''\n lm_vocab_file = the language model vocabulary file (one line per\n token)\n '''\n self._lm_vocab = Vocabulary(lm_vocab_file)\n\n def batch_sentences(self, sentences):\n '''\n Batch the sentences as character ids\n Each sentence is a list of tokens without <s> or </s>, e.g.\n [['The', 'first', 'sentence', '.'], ['Second', '.']]\n '''\n n_sentences = len(sentences)\n max_length = max(len(sentence) for sentence in sentences) + 2\n\n X_ids = np.zeros((n_sentences, max_length), dtype=np.int64)\n\n for k, sent in enumerate(sentences):\n length = len(sent) + 2\n ids_without_mask = self._lm_vocab.encode(sent, split=False)\n # add one so that 0 is the mask value\n X_ids[k, :length] = ids_without_mask + 1\n\n return X_ids\n\n\n##### for training\ndef _get_batch(generator, batch_size, num_steps, max_word_length):\n \"\"\"Read batches of input.\"\"\"\n cur_stream = [None] * batch_size\n\n no_more_data = False\n while True:\n inputs = np.zeros([batch_size, num_steps], np.int32)\n if max_word_length is not None:\n char_inputs = np.zeros([batch_size, num_steps, max_word_length],\n np.int32)\n else:\n char_inputs = None\n targets = np.zeros([batch_size, num_steps], np.int32)\n\n for i in range(batch_size):\n cur_pos = 0\n\n while cur_pos < num_steps:\n if cur_stream[i] is None or len(cur_stream[i][0]) <= 1:\n try:\n cur_stream[i] = list(next(generator))\n except StopIteration:\n # No more data, exhaust current streams and quit\n no_more_data = True\n break\n\n how_many = min(len(cur_stream[i][0]) - 1, num_steps - cur_pos)\n next_pos = cur_pos + how_many\n\n inputs[i, cur_pos:next_pos] = cur_stream[i][0][:how_many]\n if max_word_length is not None:\n char_inputs[i, cur_pos:next_pos] = cur_stream[i][1][\n :how_many]\n targets[i, cur_pos:next_pos] = cur_stream[i][0][1:how_many+1]\n\n cur_pos = next_pos\n\n cur_stream[i][0] = cur_stream[i][0][how_many:]\n if max_word_length is not None:\n cur_stream[i][1] = cur_stream[i][1][how_many:]\n\n if no_more_data:\n # There is no more data. Note: this will not return data\n # for the incomplete batch\n break\n\n X = {'token_ids': inputs, 'tokens_characters': char_inputs,\n 'next_token_id': targets}\n\n yield X\n\nclass LMDataset(object):\n \"\"\"\n Hold a language model dataset.\n\n A dataset is a list of tokenized files. Each file contains one sentence\n per line. Each sentence is pre-tokenized and white space joined.\n \"\"\"\n def __init__(self, filepattern, vocab, reverse=False, test=False,\n shuffle_on_load=False):\n '''\n filepattern = a glob string that specifies the list of files.\n vocab = an instance of Vocabulary or UnicodeCharsVocabulary\n reverse = if True, then iterate over tokens in each sentence in reverse\n test = if True, then iterate through all data once then stop.\n Otherwise, iterate forever.\n shuffle_on_load = if True, then shuffle the sentences after loading.\n '''\n self._vocab = vocab\n self._all_shards = glob.glob(filepattern)\n print('Found %d shards at %s' % (len(self._all_shards), filepattern))\n self._shards_to_choose = []\n\n self._reverse = reverse\n self._test = test\n self._shuffle_on_load = shuffle_on_load\n self._use_char_inputs = hasattr(vocab, 'encode_chars')\n\n self._ids = self._load_random_shard()\n\n def _choose_random_shard(self):\n if len(self._shards_to_choose) == 0:\n self._shards_to_choose = list(self._all_shards)\n random.shuffle(self._shards_to_choose)\n shard_name = self._shards_to_choose.pop()\n return shard_name\n\n def _load_random_shard(self):\n \"\"\"Randomly select a file and read it.\"\"\"\n if self._test:\n if len(self._all_shards) == 0:\n # we've loaded all the data\n # this will propogate up to the generator in get_batch\n # and stop iterating\n raise StopIteration\n else:\n shard_name = self._all_shards.pop()\n else:\n # just pick a random shard\n shard_name = self._choose_random_shard()\n\n ids = self._load_shard(shard_name)\n self._i = 0\n self._nids = len(ids)\n return ids\n\n def _load_shard(self, shard_name):\n \"\"\"Read one file and convert to ids.\n\n Args:\n shard_name: file path.\n\n Returns:\n list of (id, char_id) tuples.\n \"\"\"\n print('Loading data from: %s' % shard_name)\n with open(shard_name) as f:\n sentences_raw = f.readlines()\n\n if self._reverse:\n sentences = []\n for sentence in sentences_raw:\n splitted = sentence.split()\n splitted.reverse()\n sentences.append(' '.join(splitted))\n else:\n sentences = sentences_raw\n\n if self._shuffle_on_load:\n random.shuffle(sentences)\n\n ids = [self.vocab.encode(sentence, self._reverse)\n for sentence in sentences]\n if self._use_char_inputs:\n chars_ids = [self.vocab.encode_chars(sentence, self._reverse)\n for sentence in sentences]\n else:\n chars_ids = [None] * len(ids)\n\n print('Loaded %d sentences.' % len(ids))\n print('Finished loading')\n return list(zip(ids, chars_ids))\n\n def get_sentence(self):\n while True:\n if self._i == self._nids:\n self._ids = self._load_random_shard()\n ret = self._ids[self._i]\n self._i += 1\n yield ret\n\n @property\n def max_word_length(self):\n if self._use_char_inputs:\n return self._vocab.max_word_length\n else:\n return None\n\n def iter_batches(self, batch_size, num_steps):\n for X in _get_batch(self.get_sentence(), batch_size, num_steps,\n self.max_word_length):\n\n # token_ids = (batch_size, num_steps)\n # char_inputs = (batch_size, num_steps, 50) of character ids\n # targets = word ID of next word (batch_size, num_steps)\n yield X\n\n @property\n def vocab(self):\n return self._vocab\n\nclass BidirectionalLMDataset(object):\n def __init__(self, filepattern, vocab, test=False, shuffle_on_load=False):\n '''\n bidirectional version of LMDataset\n '''\n self._data_forward = LMDataset(\n filepattern, vocab, reverse=False, test=test,\n shuffle_on_load=shuffle_on_load)\n self._data_reverse = LMDataset(\n filepattern, vocab, reverse=True, test=test,\n shuffle_on_load=shuffle_on_load)\n\n def iter_batches(self, batch_size, num_steps):\n max_word_length = self._data_forward.max_word_length\n\n for X, Xr in zip(\n _get_batch(self._data_forward.get_sentence(), batch_size,\n num_steps, max_word_length),\n _get_batch(self._data_reverse.get_sentence(), batch_size,\n num_steps, max_word_length)\n ):\n\n for k, v in Xr.items():\n X[k + '_reverse'] = v\n\n yield X\n\n\nclass InvalidNumberOfCharacters(Exception):\n pass\n\n"
] | [
[
"tensorflow.device",
"tensorflow.get_variable",
"tensorflow.concat",
"numpy.sqrt",
"tensorflow.control_dependencies",
"tensorflow.nn.rnn_cell.ResidualWrapper",
"tensorflow.nn.max_pool",
"tensorflow.zeros",
"tensorflow.nn.rnn_cell.LSTMStateTuple",
"tensorflow.cast",
"tensorflow.group",
"tensorflow.nn.conv2d",
"tensorflow.random_uniform_initializer",
"tensorflow.squeeze",
"tensorflow.ConfigProto",
"tensorflow.Session",
"numpy.zeros",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.reduce_any",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.reverse_sequence",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.reshape",
"tensorflow.assign",
"tensorflow.expand_dims",
"tensorflow.constant_initializer",
"tensorflow.variable_scope"
],
[
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
winstonolson/isofit_imgspec | [
"b6a56ba1abade7e08f14aa9264e6984a77e40a79"
] | [
"isofit/radiative_transfer/look_up_tables.py"
] | [
"#! /usr/bin/env python3\n#\n# Copyright 2018 California Institute of Technology\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ISOFIT: Imaging Spectrometer Optimal FITting\n# Author: David R Thompson, [email protected]\n#\n\nimport os\nimport numpy as np\nimport logging\nimport ray\nfrom collections import OrderedDict\nimport subprocess\nimport time\nimport atexit\n\nfrom isofit.core import common\nfrom isofit.configs import Config\nfrom isofit.configs.sections.radiative_transfer_config import RadiativeTransferEngineConfig\nfrom isofit.configs.sections.statevector_config import StateVectorElementConfig\nfrom isofit.configs.sections.implementation_config import ImplementationConfig\n\n\n### Functions ###\n\[email protected]\ndef spawn_rt(cmd, local_dir=None):\n \"\"\"Run a CLI command.\"\"\"\n\n print(cmd)\n\n # Add a very slight timing offset to prevent all subprocesses\n # starting simultaneously\n time.sleep(float(np.random.random(1))*2)\n\n subprocess.call(cmd, shell=True, cwd=local_dir)\n\n### Classes ###\n\nclass FileExistsError(Exception):\n \"\"\"FileExistsError with a message.\"\"\"\n\n def __init__(self, message):\n super(FileExistsError, self).__init__(message)\n\n\nclass TabularRT:\n \"\"\"A model of photon transport including the atmosphere.\"\"\"\n\n def __init__(self, engine_config: RadiativeTransferEngineConfig, full_config: Config):\n\n self.implementation_config: ImplementationConfig = full_config.implementation\n self.wl, self.fwhm = common.load_wavelen(full_config.forward_model.instrument.wavelength_file)\n if engine_config.wavelength_range is not None:\n valid_wl = np.logical_and(self.wl >= engine_config.wavelength_range[0],\n self.wl <= engine_config.wavelength_range[1])\n self.wl = self.wl[valid_wl]\n self.fwhm = self.fwhm[valid_wl]\n\n self.n_chan = len(self.wl)\n\n self.auto_rebuild = full_config.implementation.rte_auto_rebuild\n self.configure_and_exit = full_config.implementation.rte_configure_and_exit\n\n # We use a sorted dictionary here so that filenames for lookup\n # table (LUT) grid points are always constructed the same way, with\n # consistent dimesion ordering). Every state vector element has\n # a lookup table dimension, but some lookup table dimensions\n # (like geometry parameters) may not be in the state vector.\n # TODO: enforce a requirement that makes all SV elements be inside the LUT\n full_lut_grid = full_config.forward_model.radiative_transfer.lut_grid\n # selectively get lut components that are in this particular RTE\n self.lut_grid_config = OrderedDict()\n if engine_config.lut_names is not None:\n lut_names = engine_config.lut_names\n else:\n lut_names = full_config.forward_model.radiative_transfer.lut_grid.keys()\n\n for key, value in full_lut_grid.items():\n if key in lut_names:\n self.lut_grid_config[key] = value\n\n # selectively get statevector components that are in this particular RTE\n full_sv_names = full_config.forward_model.radiative_transfer.statevector.get_element_names()\n self.statevector_names = full_sv_names\n\n self.lut_dir = engine_config.lut_path\n self.n_point = len(self.lut_grid_config)\n self.n_state = len(self.statevector_names)\n\n self.luts = {}\n\n # Retrieved variables. We establish scaling, bounds, and\n # initial guesses for each state vector element. The state\n # vector elements are all free parameters in the RT lookup table,\n # and they all have associated dimensions in the LUT grid.\n self.bounds, self.scale, self.init = [], [], []\n self.prior_mean, self.prior_sigma = [], []\n for key in self.statevector_names:\n element: StateVectorElementConfig = full_config.forward_model.radiative_transfer.statevector.get_single_element_by_name(\n key)\n self.bounds.append(element.bounds)\n self.scale.append(element.scale)\n self.init.append(element.init)\n self.prior_sigma.append(element.prior_sigma)\n self.prior_mean.append(element.prior_mean)\n self.bounds = np.array(self.bounds)\n self.scale = np.array(self.scale)\n self.init = np.array(self.init)\n self.prior_mean = np.array(self.prior_mean)\n self.prior_sigma = np.array(self.prior_sigma)\n\n self.lut_dims = []\n self.lut_grids = []\n self.lut_names = []\n self.lut_interp_types = []\n for key, grid_values in self.lut_grid_config.items():\n\n # do some quick checks on the values\n if len(grid_values) == 1:\n err = 'Only 1 value in LUT grid {}. ' +\\\n '1-d LUT grids cannot be interpreted.'.format(key)\n raise ValueError(err)\n if grid_values != sorted(grid_values):\n logging.error('Lookup table grid needs ascending order')\n raise ValueError('Lookup table grid needs ascending order')\n\n # Store the values\n self.lut_grids.append(grid_values)\n self.lut_dims.append(len(grid_values))\n self.lut_names.append(key)\n\n # Store in an indication of the type of value each key is\n # (normal - n, degree - d, radian - r)\n if key in self.angular_lut_keys_radians:\n self.lut_interp_types.append('r')\n elif key in self.angular_lut_keys_degrees:\n self.lut_interp_types.append('d')\n else:\n self.lut_interp_types.append('n')\n\n # Cast as array for faster reference later\n self.lut_interp_types = np.array(self.lut_interp_types)\n\n # \"points\" contains all combinations of grid points\n # We will have one filename prefix per point\n self.points = common.combos(self.lut_grids)\n self.files = self.get_lut_filenames()\n\n def build_lut(self, rebuild=False):\n \"\"\"Each LUT is associated with a source directory. We build a lookup \n table by: \n (1) defining the LUT dimensions, state vector names, and the \n grid of values; \n (2) running the radiative transfer solver if needed, with each \n run defining a different point in the LUT; and \n (3) loading the LUTs, one per key atmospheric coefficient vector,\n into memory as VectorInterpolator objects.\"\"\"\n\n # Build the list of radiative transfer run commands. This\n # rebuild_cmd() function will be overriden by the child class to\n # perform setup activities unique to each RTM.\n rebuild_cmds = []\n for point, fn in zip(self.points, self.files):\n try:\n cmd = self.rebuild_cmd(point, fn)\n rebuild_cmds.append(cmd)\n except FileExistsError:\n pass\n\n if self.configure_and_exit:\n raise SystemExit\n # sys.exit(0)\n\n elif len(rebuild_cmds) > 0 and self.auto_rebuild:\n logging.info(\"Rebuilding radiative transfer look up table\")\n\n # check to make sure lut directory is there, create if not\n if os.path.isdir(self.lut_dir) is False:\n os.mkdir(self.lut_dir)\n\n # Make the LUT calls (in parallel if specified)\n results = ray.get([spawn_rt.remote(rebuild_cmd, self.lut_dir) for rebuild_cmd in rebuild_cmds])\n\n\n def get_lut_filenames(self):\n files = []\n for point in self.points:\n outf = '_'.join(['%s-%6.4f' % (n, x)\n for n, x in zip(self.lut_names, point)])\n files.append(outf)\n return files\n\n def summarize(self, x_RT, geom):\n \"\"\"Summary of state vector.\"\"\"\n\n if len(x_RT) < 1:\n return ''\n return 'Atmosphere: '+' '.join(['%s: %5.3f' % (si, xi) for si, xi in\n zip(self.statevector_names, x_RT)])\n"
] | [
[
"numpy.array",
"numpy.random.random",
"numpy.logical_and"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
B-tronics/KinemAutomation | [
"853e9ad2c9e702e1830571152393172960c0d055"
] | [
"poseestimation/poseestimation.py"
] | [
"import cv2\nimport numpy as np\nimport argparse\nimport csv\nimport os\nimport glob\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-c\", \"--csv\", help=\"Path to the CSV file holding the 2D data for the video.\")\nap.add_argument(\"-v\", \"--video\", help=\"Path to the video file.\")\nargs = vars(ap.parse_args())\n\ndir_name = args[\"csv\"]\ncsv_list = [os.path.basename(x) for x in glob.glob(dir_name+\"*.csv\")]\ncsv_list.sort()\n\noutput_path = os.getcwd() + \"/data/Knot_Tying/\"\ntry:\n os.makedirs(output_path)\nexcept FileExistsError as e:\n pass\n\ndir_name = args[\"video\"]\nvideo_list = [os.path.basename(x) for x in glob.glob(dir_name+\"*.avi\")]\nvideo_list.sort()\n\nfor i, csvs_file in enumerate(csv_list):\n\n video_path = args[\"video\"] + video_list[i]\n cap = cv2.VideoCapture(video_path)\n frame = cap.read()[1]\n frameSize = frame.shape\n cap.release()\n\n rows = []\n \n result_file = output_path + csvs_file\n csv_file_path = args[\"csv\"] + csvs_file\n\n with open(csv_file_path, \"r\") as f:\n csvReader = csv.reader(f)\n \n for i, row in enumerate(csvReader):\n rows.append(list(row))\n\n modelPoints = np.array([\n (0.0, 0.0, 0.0), # Origo\n (2.0, 0.0, 2.8), # Left from Origo \n (10.83, 0.5, 0.5), # RightAbove from Origo\n (10.83, -0.5, 0.5), # RightBelow from Origo\n (0.0, -3.16, 0.5), # Below Origo\n (0.0, 3.16, 0.5) # Above Orgio\n ])\n\n focalLength = frameSize[1]\n center = (frameSize[1]/2, frameSize[0]/2)\n cameraMatrix = np.array([\n [focalLength, 0, center[0]],\n [0, focalLength, center[1]],\n [0,0,1]\n ], dtype=\"double\")\n\n distCoeffs = np.zeros((4,1))\n\n with open(result_file, 'w') as r:\n rwriter = csv.writer(r)\n for row in rows:\n imagePoints = np.array([\n (float(row[0]), float(row[1])), # Origo\n (float(row[2]), float(row[3])), # Left from Origo\n (float(row[4]), float(row[5])), # RightAbove from Origo\n (float(row[6]), float(row[7])), # RightBelow from Origo\n (float(row[8]), float(row[9])), # Below Origo\n (float(row[10]), float(row[11])) # Above Origo\n ])\n\n (success, rotationVector, translationVector) = cv2.solvePnP(\n modelPoints, \n imagePoints,\n cameraMatrix,\n distCoeffs,\n flags=cv2.SOLVEPNP_ITERATIVE)\n\n data = [translationVector[0][0], translationVector[1][0], translationVector[2][0]]\n rwriter.writerow(data)"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ThomasLecat/ray | [
"eb025ea8cb27583e8ef6287f5654f23d1ab270ef",
"eb025ea8cb27583e8ef6287f5654f23d1ab270ef",
"eb025ea8cb27583e8ef6287f5654f23d1ab270ef"
] | [
"python/ray/util/sgd/tests/test_torch.py",
"python/ray/tune/integration/torch.py",
"python/ray/tune/tests/test_var.py"
] | [
"from unittest.mock import patch\nimport numpy as np\nimport os\nimport pytest\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.utils.data import DataLoader\n\nimport ray\nfrom ray import tune\nfrom ray.util.sgd.torch import TorchTrainer\nfrom ray.util.sgd.torch.training_operator import (\n get_test_operator, get_test_metrics_operator, TrainingOperator)\nfrom ray.util.sgd.torch.constants import SCHEDULER_STEP\nfrom ray.util.sgd.utils import (check_for_failure, NUM_SAMPLES, BATCH_COUNT,\n BATCH_SIZE)\n\nfrom ray.util.sgd.data.examples import mlp_identity\nfrom ray.util.sgd.torch.examples.train_example import (\n model_creator, optimizer_creator, data_creator, LinearDataset)\n\n\[email protected]\ndef ray_start_2_cpus():\n address_info = ray.init(num_cpus=2)\n yield address_info\n # The code after the yield will run as teardown code.\n ray.shutdown()\n # Ensure that tests don't ALL fail\n if dist.is_initialized():\n dist.destroy_process_group()\n\n\[email protected]\ndef ray_start_4_cpus():\n address_info = ray.init(num_cpus=4)\n yield address_info\n # The code after the yield will run as teardown code.\n ray.shutdown()\n # Ensure that tests don't ALL fail\n if dist.is_initialized():\n dist.destroy_process_group()\n\n\nOperator = TrainingOperator.from_creators(\n model_creator, optimizer_creator, data_creator, loss_creator=nn.MSELoss)\n\n\ndef test_single_step(ray_start_2_cpus): # noqa: F811\n trainer = TorchTrainer(training_operator_cls=Operator, num_workers=1)\n metrics = trainer.train(num_steps=1)\n assert metrics[BATCH_COUNT] == 1\n\n val_metrics = trainer.validate(num_steps=1)\n assert val_metrics[BATCH_COUNT] == 1\n trainer.shutdown()\n\n\ndef test_dead_trainer(ray_start_2_cpus): # noqa: F811\n TestOperator = get_test_operator(Operator)\n trainer = TorchTrainer(training_operator_cls=TestOperator, num_workers=2)\n trainer.train(num_steps=1)\n trainer.shutdown()\n with pytest.raises(RuntimeError):\n trainer.train()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_train(ray_start_2_cpus, num_workers): # noqa: F811\n trainer = TorchTrainer(\n training_operator_cls=Operator, num_workers=num_workers)\n for i in range(3):\n train_loss1 = trainer.train()[\"train_loss\"]\n validation_loss1 = trainer.validate()[\"val_loss\"]\n\n for i in range(3):\n train_loss2 = trainer.train()[\"train_loss\"]\n validation_loss2 = trainer.validate()[\"val_loss\"]\n\n assert train_loss2 <= train_loss1, (train_loss2, train_loss1)\n assert validation_loss2 <= validation_loss1, (validation_loss2,\n validation_loss1)\n trainer.shutdown()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_multi_model(ray_start_2_cpus, num_workers):\n def train(*, model=None, criterion=None, optimizer=None, iterator=None):\n model.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(iterator):\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n return {\n \"accuracy\": correct / total,\n \"train_loss\": train_loss / (batch_idx + 1)\n }\n\n def train_epoch(self, iterator, info):\n result = {}\n data = list(iterator)\n for i, (model, optimizer) in enumerate(\n zip(self.models, self.optimizers)):\n result[f\"model_{i}\"] = train(\n model=model,\n criterion=self.criterion,\n optimizer=optimizer,\n iterator=iter(data))\n return result\n\n class MultiModelOperator(TrainingOperator):\n def setup(self, config):\n models = nn.Linear(1, 1), nn.Linear(1, 1)\n opts = [\n torch.optim.SGD(model.parameters(), lr=0.0001)\n for model in models\n ]\n loss = nn.MSELoss()\n train_dataloader, val_dataloader = data_creator(config)\n self.models, self.optimizers, self.criterion = self.register(\n models=models, optimizers=opts, criterion=loss)\n self.register_data(\n train_loader=train_dataloader,\n validation_loader=val_dataloader)\n\n TestOperator = get_test_operator(MultiModelOperator)\n\n trainer1 = TorchTrainer(\n config={\"custom_func\": train_epoch},\n training_operator_cls=TestOperator,\n num_workers=num_workers)\n trainer1.train()\n state = trainer1.state_dict()\n\n models1 = trainer1.get_model()\n\n trainer1.shutdown()\n\n trainer2 = TorchTrainer(\n config={\"custom_func\": train_epoch},\n training_operator_cls=TestOperator,\n num_workers=num_workers)\n trainer2.load_state_dict(state)\n\n models2 = trainer2.get_model()\n\n for model_1, model_2 in zip(models1, models2):\n\n model1_state_dict = model_1.state_dict()\n model2_state_dict = model_2.state_dict()\n\n assert set(model1_state_dict.keys()) == set(model2_state_dict.keys())\n\n for k in model1_state_dict:\n assert torch.equal(model1_state_dict[k], model2_state_dict[k])\n\n trainer2.shutdown()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_multi_model_matrix(ray_start_2_cpus, num_workers): # noqa: F811\n def train_epoch(self, iterator, info):\n if self.config.get(\"models\", 1) > 1:\n assert len(self.models) == self.config[\"models\"], self.config\n\n if self.config.get(\"optimizers\", 1) > 1:\n assert len(\n self.optimizers) == self.config[\"optimizers\"], self.config\n\n if self.config.get(\"schedulers\", 1) > 1:\n assert len(\n self.schedulers) == self.config[\"schedulers\"], self.config\n return {\"done\": 1}\n\n def multi_model_creator(config):\n models = []\n for i in range(config.get(\"models\", 1)):\n models += [nn.Linear(1, 1)]\n return models[0] if len(models) == 1 else models\n\n def multi_optimizer_creator(models, config):\n optimizers = []\n main_model = models[0] if type(models) is list else models\n for i in range(config.get(\"optimizers\", 1)):\n optimizers += [torch.optim.SGD(main_model.parameters(), lr=0.0001)]\n return optimizers[0] if len(optimizers) == 1 else optimizers\n\n def multi_scheduler_creator(optimizer, config):\n schedulers = []\n main_opt = optimizer[0] if type(optimizer) is list else optimizer\n for i in range(config.get(\"schedulers\", 1)):\n schedulers += [\n torch.optim.lr_scheduler.StepLR(\n main_opt, step_size=30, gamma=0.1)\n ]\n return schedulers[0] if len(schedulers) == 1 else schedulers\n\n class MultiModelOperator(TrainingOperator):\n def setup(self, config):\n models = multi_model_creator(config)\n optimizers = multi_optimizer_creator(models, config)\n schedulers = multi_scheduler_creator(optimizers, config)\n train_loader, val_loader = data_creator(config)\n loss = nn.MSELoss()\n\n self.models, self.optimizers, self.criterion, self.schedulers = \\\n self.register(models=models, optimizers=optimizers,\n schedulers=schedulers,\n criterion=loss)\n self.register_data(\n train_loader=train_loader, validation_loader=val_loader)\n\n TestOperator = get_test_operator(MultiModelOperator)\n\n for model_count in range(1, 3):\n for optimizer_count in range(1, 3):\n for scheduler_count in range(1, 3):\n trainer = TorchTrainer(\n scheduler_step_freq=\"epoch\",\n training_operator_cls=TestOperator,\n num_workers=num_workers,\n config={\n \"models\": model_count,\n \"optimizers\": optimizer_count,\n \"schedulers\": scheduler_count,\n \"custom_func\": train_epoch\n })\n trainer.train()\n trainer.shutdown()\n\n\[email protected](\"scheduler_freq\", [\"epoch\", \"batch\", \"manual\", None])\ndef test_scheduler_freq(ray_start_2_cpus, scheduler_freq): # noqa: F811\n def train_epoch(self, iterator, info):\n assert info[SCHEDULER_STEP] == scheduler_freq\n return {\"done\": 1}\n\n def scheduler_creator(optimizer, config):\n return torch.optim.lr_scheduler.StepLR(\n optimizer, step_size=30, gamma=0.1)\n\n class TestTrainingOperator(TrainingOperator):\n def setup(self, config):\n model = model_creator(config)\n optimizer = optimizer_creator(model, config)\n train_loader, val_loader = data_creator(config)\n scheduler = scheduler_creator(optimizer, config)\n loss = nn.MSELoss()\n\n self.model, self.optimizer, self.criterion, self.scheduler = \\\n self.register(\n models=model, optimizers=optimizer,\n criterion=loss, schedulers=scheduler)\n self.register_data(\n train_loader=train_loader, validation_loader=val_loader)\n\n if scheduler_freq is None:\n with pytest.raises(ValueError):\n trainer = TorchTrainer(\n config={\"custom_func\": train_epoch},\n training_operator_cls=TestTrainingOperator,\n scheduler_step_freq=scheduler_freq)\n else:\n trainer = TorchTrainer(\n config={\"custom_func\": train_epoch},\n training_operator_cls=TestTrainingOperator,\n scheduler_step_freq=scheduler_freq)\n\n for i in range(3):\n trainer.train()\n trainer.shutdown()\n\n\ndef test_profiling(ray_start_2_cpus): # noqa: F811\n trainer = TorchTrainer(training_operator_cls=Operator)\n\n stats = trainer.train(profile=True)\n assert \"profile\" in stats\n stats = trainer.validate(profile=True)\n assert \"profile\" in stats\n trainer.shutdown()\n\n\ndef test_dataset(ray_start_4_cpus):\n \"\"\"\n This test tries training the mlp_identity example. We check the accuracy of\n the model as an all inclusive way of ensuring that we are properly sharding\n and iterating over the entire dataset (instead of repeating the first set\n of points for example).\n \"\"\"\n\n model_creator = mlp_identity.model_creator\n optimizer_creator = mlp_identity.optimizer_creator\n dataset_creator = mlp_identity.dataset_creator\n\n DatasetOperator = TrainingOperator.from_creators(\n model_creator=model_creator,\n optimizer_creator=optimizer_creator,\n loss_creator=nn.MSELoss)\n\n trainer = TorchTrainer(\n training_operator_cls=DatasetOperator,\n num_workers=2,\n )\n\n dataset = dataset_creator()\n for i in range(5):\n trainer.train(dataset=dataset, num_steps=100)\n\n input = mlp_identity.to_mat(0.5)\n prediction = float(trainer.get_model()(input)[0][0])\n assert 0.4 <= prediction <= 0.6\n trainer.shutdown()\n\n\ndef test_split_batch(ray_start_2_cpus):\n if not dist.is_available():\n return\n\n def data_creator(config):\n \"\"\"Returns training dataloader, validation dataloader.\"\"\"\n train_dataset = LinearDataset(2, 5, size=config[\"data_size\"])\n return DataLoader(\n train_dataset,\n batch_size=config[BATCH_SIZE],\n )\n\n data_size = 600\n batch_size = 21\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n data_creator,\n loss_creator=lambda config: nn.MSELoss())\n trainer = TorchTrainer(\n training_operator_cls=TestOperator,\n num_workers=2,\n config={\n BATCH_SIZE: batch_size,\n \"data_size\": data_size,\n })\n stats = trainer.train()\n assert trainer.config[BATCH_SIZE] == (batch_size - 1)\n assert stats[NUM_SAMPLES] == 600\n assert stats[BATCH_COUNT] == (data_size // 20)\n trainer.shutdown()\n\n\ndef test_reduce_result(ray_start_2_cpus):\n if not dist.is_available():\n return\n\n def data_creator(config):\n \"\"\"Returns training dataloader, validation dataloader.\"\"\"\n train_dataset = LinearDataset(2, 5, size=config[\"data_size\"])\n test_dataset = LinearDataset(2, 5, size=config[\"data_size\"])\n return DataLoader(\n train_dataset, batch_size=1), DataLoader(\n test_dataset, batch_size=1)\n\n data_size = 600\n\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n data_creator,\n loss_creator=lambda config: nn.MSELoss())\n trainer = TorchTrainer(\n training_operator_cls=TestOperator,\n num_workers=2,\n config={\"data_size\": data_size})\n list_stats = trainer.train(reduce_results=False, profile=True)\n assert len(list_stats) == 2\n assert [stats[NUM_SAMPLES] == data_size for stats in list_stats]\n assert [stats[BATCH_COUNT] == (data_size // 2) for stats in list_stats]\n list_stats = trainer.validate(reduce_results=False, profile=True)\n assert len(list_stats) == 2\n assert [stats[NUM_SAMPLES] == data_size for stats in list_stats]\n assert [stats[BATCH_COUNT] == (data_size // 2) for stats in list_stats]\n trainer.shutdown()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_metrics(ray_start_2_cpus, num_workers):\n data_size, val_size = 600, 500\n batch_size = 4\n\n num_train_steps = int(data_size / batch_size)\n num_val_steps = int(val_size / batch_size)\n\n train_scores = [1] + ([0] * num_train_steps)\n val_scores = [1] + ([0] * num_val_steps)\n\n TestOperator = get_test_metrics_operator(Operator)\n trainer = TorchTrainer(\n training_operator_cls=TestOperator,\n num_workers=num_workers,\n config={\n \"scores\": train_scores,\n \"val_scores\": val_scores,\n \"key\": \"score\",\n \"batch_size\": batch_size,\n \"data_size\": data_size,\n \"val_size\": val_size\n })\n\n stats = trainer.train(num_steps=num_train_steps)\n # Test that we output mean and last of custom metrics in an epoch\n assert \"score\" in stats\n assert stats[\"last_score\"] == 0\n\n assert stats[NUM_SAMPLES] == num_train_steps * batch_size\n expected_score = num_workers * (sum(train_scores) /\n (num_train_steps * batch_size))\n assert np.allclose(stats[\"score\"], expected_score)\n\n val_stats = trainer.validate()\n # Test that we output mean and last of custom metrics in validation\n assert val_stats[\"last_score\"] == 0\n expected_score = (sum(val_scores) /\n (num_val_steps * batch_size)) * num_workers\n assert np.allclose(val_stats[\"score\"], expected_score)\n assert val_stats[BATCH_COUNT] == np.ceil(num_val_steps / num_workers)\n assert val_stats[NUM_SAMPLES] == num_val_steps * batch_size\n assert val_stats[NUM_SAMPLES] == val_size\n\n trainer.shutdown()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_metrics_nan(ray_start_2_cpus, num_workers):\n data_size, val_size = 100, 100\n batch_size = 10\n\n num_train_steps = int(data_size / batch_size)\n num_val_steps = int(val_size / batch_size)\n\n train_scores = [np.nan] + ([0] * num_train_steps)\n val_scores = [np.nan] + ([0] * num_val_steps)\n TestOperator = get_test_metrics_operator(Operator)\n trainer = TorchTrainer(\n training_operator_cls=TestOperator,\n num_workers=num_workers,\n config={\n \"scores\": train_scores,\n \"val_scores\": val_scores,\n \"key\": \"score\",\n \"batch_size\": batch_size,\n \"data_size\": data_size,\n \"val_size\": val_size\n })\n\n stats = trainer.train(num_steps=num_train_steps)\n assert \"score\" in stats\n assert stats[\"last_score\"] == 0\n assert np.isnan(stats[\"score\"])\n\n stats = trainer.validate()\n assert \"score\" in stats\n assert stats[\"last_score\"] == 0\n assert np.isnan(stats[\"score\"])\n trainer.shutdown()\n\n\ndef test_scheduler_validate(ray_start_2_cpus): # noqa: F811\n from torch.optim.lr_scheduler import ReduceLROnPlateau\n\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n data_creator,\n scheduler_creator=lambda optimizer, cfg: ReduceLROnPlateau(optimizer),\n loss_creator=lambda config: nn.MSELoss())\n TestOperator = get_test_operator(TestOperator)\n trainer = TorchTrainer(\n scheduler_step_freq=\"manual\", training_operator_cls=TestOperator)\n trainer.update_scheduler(0.5)\n trainer.update_scheduler(0.5)\n assert all(\n trainer.apply_all_operators(\n lambda op: op._schedulers[0].last_epoch == 2))\n trainer.shutdown()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_tune_train(ray_start_2_cpus, num_workers): # noqa: F811\n TorchTrainable = TorchTrainer.as_trainable(\n **{\n \"training_operator_cls\": Operator,\n \"num_workers\": num_workers,\n \"use_gpu\": False,\n \"backend\": \"gloo\",\n \"config\": {\n \"batch_size\": 512,\n \"lr\": 0.001\n }\n })\n\n analysis = tune.run(\n TorchTrainable,\n num_samples=2,\n stop={\"training_iteration\": 2},\n verbose=1)\n\n # checks loss decreasing for every trials\n for path, df in analysis.trial_dataframes.items():\n mean_train_loss1 = df.loc[0, \"train_loss\"]\n mean_train_loss2 = df.loc[1, \"train_loss\"]\n mean_val_loss1 = df.loc[0, \"val_loss\"]\n mean_val_loss2 = df.loc[1, \"val_loss\"]\n\n assert mean_train_loss2 <= mean_train_loss1\n assert mean_val_loss2 <= mean_val_loss1\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_save_and_restore(ray_start_2_cpus, num_workers,\n tmp_path): # noqa: F811\n trainer1 = TorchTrainer(\n training_operator_cls=Operator, num_workers=num_workers)\n trainer1.train()\n checkpoint_path = os.path.join(tmp_path, \"checkpoint\")\n trainer1.save(checkpoint_path)\n\n model1 = trainer1.get_model()\n\n trainer1.shutdown()\n\n trainer2 = TorchTrainer(\n training_operator_cls=Operator, num_workers=num_workers)\n trainer2.load(checkpoint_path)\n\n model2 = trainer2.get_model()\n\n model1_state_dict = model1.state_dict()\n model2_state_dict = model2.state_dict()\n\n assert set(model1_state_dict.keys()) == set(model2_state_dict.keys())\n\n for k in model1_state_dict:\n assert torch.equal(model1_state_dict[k], model2_state_dict[k])\n trainer2.shutdown()\n\n\ndef test_wrap_ddp(ray_start_2_cpus, tmp_path): # noqa: F811\n if not dist.is_available():\n return\n trainer1 = TorchTrainer(\n training_operator_cls=Operator, wrap_ddp=False, num_workers=2)\n trainer1.train()\n checkpoint_path = os.path.join(tmp_path, \"checkpoint\")\n trainer1.save(checkpoint_path)\n\n model1 = trainer1.get_model()\n assert not hasattr(trainer1.local_worker.training_operator.model, \"module\")\n assert hasattr(trainer1.local_worker.training_operator, \"device_ids\")\n trainer1.shutdown()\n\n trainer2 = TorchTrainer(\n training_operator_cls=Operator, wrap_ddp=False, num_workers=2)\n trainer2.load(checkpoint_path)\n\n model2 = trainer2.get_model()\n\n model1_state_dict = model1.state_dict()\n model2_state_dict = model2.state_dict()\n\n assert set(model1_state_dict.keys()) == set(model2_state_dict.keys())\n\n for k in model1_state_dict:\n assert torch.equal(model1_state_dict[k], model2_state_dict[k])\n trainer2.shutdown()\n\n\ndef gen_step_with_fail(num_fails):\n def step_with_fail(self,\n num_steps=None,\n profile=False,\n info=None,\n dataset=None):\n params = dict(num_steps=num_steps, profile=profile, info=info)\n remote_worker_stats = [\n w.train_epoch.remote(**params) for w in self.remote_workers\n ]\n\n if self._num_failures < num_fails:\n time.sleep(1) # Make the batch will fail correctly.\n ray.kill(self.remote_workers[0])\n\n try:\n local_worker_stats = self.local_worker.train_epoch(**params)\n except RuntimeError:\n return False, None\n\n success = check_for_failure(remote_worker_stats)\n if success:\n return success, [local_worker_stats] + ray.get(remote_worker_stats)\n\n return success, None\n\n return step_with_fail\n\n\ndef test_fail_with_recover(ray_start_2_cpus): # noqa: F811\n if not dist.is_available():\n return\n\n def single_loader(config):\n dataset = LinearDataset(2, 5, size=1000000)\n return DataLoader(dataset, batch_size=config.get(\"batch_size\", 32))\n\n step_with_fail = gen_step_with_fail(3)\n\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n single_loader,\n loss_creator=lambda config: nn.MSELoss())\n with patch.object(TorchTrainer, \"_train_epoch\", step_with_fail):\n trainer1 = TorchTrainer(\n training_operator_cls=TestOperator,\n config={\"batch_size\": 100000},\n num_workers=2)\n\n with pytest.raises(RuntimeError):\n trainer1.train(max_retries=1)\n\n trainer1.shutdown(force=True)\n\n\ndef test_resize(ray_start_2_cpus): # noqa: F811\n if not dist.is_available():\n return\n\n def single_loader(config):\n dataset = LinearDataset(2, 5, size=1000000)\n return DataLoader(dataset, batch_size=config.get(\"batch_size\", 32))\n\n step_with_fail = gen_step_with_fail(1)\n\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n single_loader,\n loss_creator=lambda config: nn.MSELoss())\n with patch.object(TorchTrainer, \"_train_epoch\", step_with_fail):\n trainer1 = TorchTrainer(\n training_operator_cls=TestOperator,\n config={\"batch_size\": 100000},\n num_workers=2)\n\n @ray.remote\n def try_test():\n import time\n time.sleep(100)\n\n try_test.remote()\n trainer1.train(max_retries=1)\n assert len(trainer1.remote_workers) == 1\n\n trainer1.shutdown()\n\n\ndef test_fail_twice(ray_start_2_cpus): # noqa: F811\n if not dist.is_available():\n return\n\n def single_loader(config):\n dataset = LinearDataset(2, 5, size=1000000)\n return DataLoader(dataset, batch_size=config.get(\"batch_size\", 32))\n\n step_with_fail = gen_step_with_fail(2)\n\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n single_loader,\n loss_creator=lambda config: nn.MSELoss())\n\n with patch.object(TorchTrainer, \"_train_epoch\", step_with_fail):\n trainer1 = TorchTrainer(\n training_operator_cls=TestOperator,\n config={\"batch_size\": 100000},\n num_workers=2)\n\n # MAX RETRIES SHOULD BE ON BY DEFAULT\n trainer1.train()\n trainer1.shutdown()\n\n\ndef test_multi_input_model(ray_start_2_cpus):\n def model_creator(config):\n class MultiInputModel(nn.Module):\n def __init__(self):\n super(MultiInputModel, self).__init__()\n self._fc1 = torch.nn.Linear(1, 1)\n self._fc2 = torch.nn.Linear(1, 1)\n\n def forward(self, x, y):\n return self._fc1(x) + self._fc2(y)\n\n return MultiInputModel()\n\n def data_creator(config):\n class LinearDataset(torch.utils.data.Dataset):\n def __init__(self, a, b, size=1000):\n x = np.random.randn(size)\n y = np.random.randn(size)\n self.x = torch.tensor(x, dtype=torch.float32)\n self.y = torch.tensor(y, dtype=torch.float32)\n self.z = torch.tensor(a * (x + y) + 2 * b, dtype=torch.float32)\n\n def __getitem__(self, index):\n return (self.x[index, None], self.y[index, None],\n self.z[index, None])\n\n def __len__(self):\n return len(self.x)\n\n train_dataset = LinearDataset(3, 4)\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=config.get(\"batch_size\", 32),\n )\n return train_loader, None\n\n Operator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n data_creator,\n loss_creator=lambda config: nn.MSELoss())\n\n trainer = TorchTrainer(training_operator_cls=Operator, num_workers=1)\n\n metrics = trainer.train(num_steps=1)\n assert metrics[BATCH_COUNT] == 1\n\n trainer.shutdown()\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n\n sys.exit(pytest.main([\"-v\", \"-x\", __file__]))\n",
"# Original Code here:\n# https://github.com/pytorch/examples/blob/master/mnist/main.py\nfrom contextlib import contextmanager\nimport os\nimport logging\nimport shutil\nimport tempfile\nimport torch\nfrom datetime import timedelta\n\nimport ray\nfrom ray import tune\nfrom ray.tune.result import RESULT_DUPLICATE\nfrom ray.tune.logger import NoopLogger\nfrom ray.tune.function_runner import wrap_function\nfrom ray.tune.resources import Resources\nfrom ray.tune.trainable import TrainableUtil\nfrom ray.tune.utils import detect_checkpoint_function\nfrom ray.util.sgd.torch.utils import setup_process_group, setup_address\nfrom ray.util.sgd.torch.constants import NCCL_TIMEOUT_S\n\nlogger = logging.getLogger(__name__)\n\n_distributed_enabled = False\n\n\ndef is_distributed_trainable():\n \"\"\"Returns True if executing within a DistributedTrainable.\"\"\"\n return _distributed_enabled\n\n\ndef enable_distributed_trainable():\n global _distributed_enabled\n _distributed_enabled = True\n\n\ndef logger_creator(log_config, logdir, rank):\n worker_dir = os.path.join(logdir, \"worker_{}\".format(rank))\n os.makedirs(worker_dir, exist_ok=True)\n return NoopLogger(log_config, worker_dir)\n\n\nclass _TorchTrainable(tune.Trainable):\n \"\"\"Base class for distributed training on Tune.\n\n A wrapper class is needed to actually create a working\n version of this trainable.\n \"\"\"\n _function = None\n _num_workers = None\n _use_gpu = None\n _num_cpus_per_worker = None\n\n __slots__ = [\"workers\", \"_finished\"]\n\n @classmethod\n def default_process_group_parameters(self):\n return dict(timeout=timedelta(NCCL_TIMEOUT_S), backend=\"gloo\")\n\n @classmethod\n def get_remote_worker_options(self):\n num_gpus = 1 if self._use_gpu else 0\n num_cpus = int(self._num_cpus_per_worker or 1)\n return dict(num_cpus=num_cpus, num_gpus=num_gpus)\n\n def setup(self, config):\n self._finished = False\n num_workers = self._num_workers\n logdir = self.logdir\n assert self._function\n\n func_trainable = wrap_function(self.__class__._function)\n\n remote_trainable = ray.remote(func_trainable)\n remote_trainable = remote_trainable.options(\n **self.get_remote_worker_options())\n\n address = setup_address()\n self.workers = [\n remote_trainable.remote(\n config=config,\n logger_creator=lambda cfg: logger_creator(cfg, logdir, rank))\n for rank in range(num_workers)\n ]\n\n pgroup_params = self.default_process_group_parameters()\n from functools import partial\n setup_on_worker = partial(\n setup_process_group,\n url=address,\n world_size=num_workers,\n **pgroup_params)\n ray.get([\n w.execute.remote(lambda _: setup_on_worker(world_rank=rank))\n for rank, w in enumerate(self.workers)\n ])\n\n ray.get([\n w.execute.remote(lambda _: enable_distributed_trainable())\n for rank, w in enumerate(self.workers)\n ])\n\n def step(self):\n if self._finished:\n raise RuntimeError(\"Training has already finished.\")\n result = ray.get([w.step.remote() for w in self.workers])[0]\n if RESULT_DUPLICATE in result:\n self._finished = True\n return result\n\n def save_checkpoint(self, checkpoint_dir):\n # TODO: optimize if colocated\n save_obj = ray.get(self.workers[0].save_to_object.remote())\n checkpoint_path = TrainableUtil.create_from_pickle(\n save_obj, checkpoint_dir)\n return checkpoint_path\n\n def load_checkpoint(self, checkpoint_dir):\n checkpoint_obj = TrainableUtil.checkpoint_to_object(checkpoint_dir)\n return ray.get(\n w.restore_from_object.remote(checkpoint_obj) for w in self.workers)\n\n def stop(self):\n ray.get([worker.stop.remote() for worker in self.workers])\n\n\ndef DistributedTrainableCreator(func,\n use_gpu=False,\n num_workers=1,\n num_cpus_per_worker=1,\n backend=\"gloo\",\n timeout_s=NCCL_TIMEOUT_S):\n \"\"\"Creates a class that executes distributed training.\n\n Similar to running `torch.distributed.launch`.\n\n Note that you typically should not instantiate the object\n created.\n\n Args:\n func (callable): This function is a Tune trainable function.\n This function must have 2 args in the signature, and the\n latter arg must contain `checkpoint_dir`. For example:\n `func(config, checkpoint_dir=None)`.\n use_gpu (bool): Sets resource allocation for workers to 1 GPU\n if true. Also automatically sets CUDA_VISIBLE_DEVICES\n for each training worker.\n num_workers (int): Number of training workers to include in\n world.\n num_cpus_per_worker (int): Number of CPU resources to reserve\n per training worker.\n backend (str): One of \"gloo\", \"nccl\".\n timeout_s (float): Seconds before the torch process group\n times out. Useful when machines are unreliable. Defaults\n to 60 seconds.\n\n Returns:\n A trainable class object that can be passed to Tune. Resources\n are automatically set within the object, so users do\n not need to set `resources_per_trainable`.\n\n Example:\n\n .. code-block:: python\n\n trainable_cls = DistributedTrainableCreator(\n train_func, num_workers=2)\n analysis = tune.run(trainable_cls)\n \"\"\"\n detect_checkpoint_function(func, abort=True)\n\n class WrappedDistributedTorchTrainable(_TorchTrainable):\n _function = func\n _num_workers = num_workers\n _use_gpu = use_gpu\n _num_cpus_per_worker = num_cpus_per_worker\n\n @classmethod\n def default_process_group_parameters(self):\n return dict(timeout=timedelta(timeout_s), backend=backend)\n\n @classmethod\n def default_resource_request(cls, config):\n num_workers_ = int(config.get(\"num_workers\", num_workers))\n num_cpus = int(\n config.get(\"num_cpus_per_worker\", num_cpus_per_worker))\n use_gpu_ = config.get(\"use_gpu\", use_gpu)\n\n return Resources(\n cpu=0,\n gpu=0,\n extra_cpu=num_cpus * num_workers_,\n extra_gpu=num_workers_ if use_gpu_ else 0)\n\n return WrappedDistributedTorchTrainable\n\n\n@contextmanager\ndef distributed_checkpoint_dir(step, disable=False):\n \"\"\"ContextManager for creating a distributed checkpoint.\n\n Only checkpoints a file on the \"main\" training actor, avoiding\n redundant work.\n\n Args:\n step (int): Used to label the checkpoint\n disable (bool): Disable for prototyping.\n\n Yields:\n path (str): A path to a directory. This path will be used\n again when invoking the training_function.\n Example:\n\n .. code-block:: python\n\n def train_func(config, checkpoint_dir):\n if checkpoint_dir:\n path = os.path.join(checkpoint_dir, \"checkpoint\")\n model_state_dict = torch.load(path)\n\n if epoch % 3 == 0:\n with distributed_checkpoint_dir(step=epoch) as checkpoint_dir:\n path = os.path.join(checkpoint_dir, \"checkpoint\")\n torch.save(model.state_dict(), path)\n \"\"\"\n\n if torch.distributed.get_rank() == 0 and not disable:\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n yield checkpoint_dir\n else:\n path = tempfile.mkdtemp()\n yield path\n shutil.rmtree(path)\n\n\ndef _train_check_global(config, checkpoint_dir=None):\n \"\"\"For testing only. Putting this here because Ray has problems\n serializing within the test file.\"\"\"\n assert is_distributed_trainable()\n import time\n time.sleep(0.1)\n tune.report(is_distributed=True)\n\n\ndef _train_simple(config, checkpoint_dir=None):\n \"\"\"For testing only. Putting this here because Ray has problems\n serializing within the test file.\"\"\"\n import torch.nn as nn\n from torch.nn.parallel import DistributedDataParallel\n import torch.optim as optim\n # N is batch size; D_in is input dimension;\n # H is hidden dimension; D_out is output dimension.\n N, D_in, H, D_out = 8, 5, 5, 5\n\n # Create random Tensors to hold inputs and outputs\n x = torch.randn(N, D_in)\n y = torch.randn(N, D_out)\n loss_fn = nn.MSELoss()\n\n # Use the nn package to define our model and loss function.\n model = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n )\n optimizer = optim.SGD(model.parameters(), lr=0.1)\n\n if checkpoint_dir:\n with open(os.path.join(checkpoint_dir, \"checkpoint\")) as f:\n model_state, optimizer_state = torch.load(f)\n\n model.load_state_dict(model_state)\n optimizer.load_state_dict(optimizer_state)\n\n model = DistributedDataParallel(model)\n\n for epoch in range(config.get(\"epochs\", 10)):\n optimizer.zero_grad()\n output = model(x)\n loss = loss_fn(output, y)\n loss.backward()\n optimizer.step()\n\n if epoch % 3 == 0:\n if config.get(\"enable_checkpoint\", True):\n with distributed_checkpoint_dir(step=epoch) as checkpoint_dir:\n path = os.path.join(checkpoint_dir, \"checkpoint\")\n torch.save((model.state_dict(), optimizer.state_dict()),\n path)\n tune.report(mean_loss=loss.item())\n",
"import os\nimport numpy as np\nimport random\nimport unittest\n\nimport ray\nfrom ray.rllib import _register_all\n\nfrom ray import tune\nfrom ray.tune.result import DEFAULT_RESULTS_DIR\nfrom ray.tune.suggest import grid_search, BasicVariantGenerator\nfrom ray.tune.suggest.variant_generator import (RecursiveDependencyError,\n resolve_nested_dict)\n\n\nclass VariantGeneratorTest(unittest.TestCase):\n def setUp(self):\n ray.init()\n\n def tearDown(self):\n ray.shutdown()\n _register_all() # re-register the evicted objects\n\n def generate_trials(self, spec, name):\n suggester = BasicVariantGenerator()\n suggester.add_configurations({name: spec})\n return suggester.next_trials()\n\n def testParseToTrials(self):\n trials = self.generate_trials({\n \"run\": \"PPO\",\n \"num_samples\": 2,\n \"max_failures\": 5,\n \"config\": {\n \"env\": \"Pong-v0\",\n \"foo\": \"bar\"\n },\n }, \"tune-pong\")\n trials = list(trials)\n self.assertEqual(len(trials), 2)\n self.assertTrue(\"PPO_Pong-v0\" in str(trials[0]))\n self.assertEqual(trials[0].config, {\"foo\": \"bar\", \"env\": \"Pong-v0\"})\n self.assertEqual(trials[0].trainable_name, \"PPO\")\n self.assertEqual(trials[0].experiment_tag, \"0\")\n self.assertEqual(trials[0].max_failures, 5)\n self.assertEqual(trials[0].evaluated_params, {})\n self.assertEqual(trials[0].local_dir,\n os.path.join(DEFAULT_RESULTS_DIR, \"tune-pong\"))\n self.assertEqual(trials[1].experiment_tag, \"1\")\n\n def testEval(self):\n trials = self.generate_trials({\n \"run\": \"PPO\",\n \"config\": {\n \"foo\": {\n \"eval\": \"2 + 2\"\n },\n },\n }, \"eval\")\n trials = list(trials)\n self.assertEqual(len(trials), 1)\n self.assertEqual(trials[0].config, {\"foo\": 4})\n self.assertEqual(trials[0].evaluated_params, {\"foo\": 4})\n self.assertEqual(trials[0].experiment_tag, \"0_foo=4\")\n\n def testGridSearch(self):\n trials = self.generate_trials({\n \"run\": \"PPO\",\n \"config\": {\n \"bar\": {\n \"grid_search\": [True, False]\n },\n \"foo\": {\n \"grid_search\": [1, 2, 3]\n },\n \"baz\": \"asd\",\n },\n }, \"grid_search\")\n trials = list(trials)\n self.assertEqual(len(trials), 6)\n self.assertEqual(trials[0].config, {\n \"bar\": True,\n \"foo\": 1,\n \"baz\": \"asd\",\n })\n self.assertEqual(trials[0].evaluated_params, {\n \"bar\": True,\n \"foo\": 1,\n })\n self.assertEqual(trials[0].experiment_tag, \"0_bar=True,foo=1\")\n\n self.assertEqual(trials[1].config, {\n \"bar\": False,\n \"foo\": 1,\n \"baz\": \"asd\",\n })\n self.assertEqual(trials[1].evaluated_params, {\n \"bar\": False,\n \"foo\": 1,\n })\n self.assertEqual(trials[1].experiment_tag, \"1_bar=False,foo=1\")\n\n self.assertEqual(trials[2].config, {\n \"bar\": True,\n \"foo\": 2,\n \"baz\": \"asd\",\n })\n self.assertEqual(trials[2].evaluated_params, {\n \"bar\": True,\n \"foo\": 2,\n })\n\n self.assertEqual(trials[3].config, {\n \"bar\": False,\n \"foo\": 2,\n \"baz\": \"asd\",\n })\n self.assertEqual(trials[3].evaluated_params, {\n \"bar\": False,\n \"foo\": 2,\n })\n\n self.assertEqual(trials[4].config, {\n \"bar\": True,\n \"foo\": 3,\n \"baz\": \"asd\",\n })\n self.assertEqual(trials[4].evaluated_params, {\n \"bar\": True,\n \"foo\": 3,\n })\n\n self.assertEqual(trials[5].config, {\n \"bar\": False,\n \"foo\": 3,\n \"baz\": \"asd\",\n })\n self.assertEqual(trials[5].evaluated_params, {\n \"bar\": False,\n \"foo\": 3,\n })\n\n def testGridSearchAndEval(self):\n trials = self.generate_trials({\n \"run\": \"PPO\",\n \"config\": {\n \"qux\": tune.sample_from(lambda spec: 2 + 2),\n \"bar\": grid_search([True, False]),\n \"foo\": grid_search([1, 2, 3]),\n \"baz\": \"asd\",\n },\n }, \"grid_eval\")\n trials = list(trials)\n self.assertEqual(len(trials), 6)\n self.assertEqual(trials[0].config, {\n \"bar\": True,\n \"foo\": 1,\n \"qux\": 4,\n \"baz\": \"asd\",\n })\n self.assertEqual(trials[0].evaluated_params, {\n \"bar\": True,\n \"foo\": 1,\n \"qux\": 4,\n })\n self.assertEqual(trials[0].experiment_tag, \"0_bar=True,foo=1,qux=4\")\n\n def testConditionResolution(self):\n trials = self.generate_trials({\n \"run\": \"PPO\",\n \"config\": {\n \"x\": 1,\n \"y\": tune.sample_from(lambda spec: spec.config.x + 1),\n \"z\": tune.sample_from(lambda spec: spec.config.y + 1),\n },\n }, \"condition_resolution\")\n trials = list(trials)\n self.assertEqual(len(trials), 1)\n self.assertEqual(trials[0].config, {\"x\": 1, \"y\": 2, \"z\": 3})\n self.assertEqual(trials[0].evaluated_params, {\"y\": 2, \"z\": 3})\n self.assertEqual(trials[0].experiment_tag, \"0_y=2,z=3\")\n\n def testDependentLambda(self):\n trials = self.generate_trials({\n \"run\": \"PPO\",\n \"config\": {\n \"x\": grid_search([1, 2]),\n \"y\": tune.sample_from(lambda spec: spec.config.x * 100),\n },\n }, \"dependent_lambda\")\n trials = list(trials)\n self.assertEqual(len(trials), 2)\n self.assertEqual(trials[0].config, {\"x\": 1, \"y\": 100})\n self.assertEqual(trials[1].config, {\"x\": 2, \"y\": 200})\n\n def testDependentGridSearch(self):\n trials = self.generate_trials({\n \"run\": \"PPO\",\n \"config\": {\n \"x\": grid_search([\n tune.sample_from(lambda spec: spec.config.y * 100),\n tune.sample_from(lambda spec: spec.config.y * 200)\n ]),\n \"y\": tune.sample_from(lambda spec: 1),\n },\n }, \"dependent_grid_search\")\n trials = list(trials)\n self.assertEqual(len(trials), 2)\n self.assertEqual(trials[0].config, {\"x\": 100, \"y\": 1})\n self.assertEqual(trials[1].config, {\"x\": 200, \"y\": 1})\n\n def testDependentGridSearchCallable(self):\n class Normal:\n def __call__(self, _config):\n return random.normalvariate(mu=0, sigma=1)\n\n class Single:\n def __call__(self, _config):\n return 20\n\n trials = self.generate_trials({\n \"run\": \"PPO\",\n \"config\": {\n \"x\": grid_search(\n [tune.sample_from(Normal()),\n tune.sample_from(Normal())]),\n \"y\": tune.sample_from(Single()),\n },\n }, \"dependent_grid_search\")\n trials = list(trials)\n self.assertEqual(len(trials), 2)\n self.assertEqual(trials[0].config[\"y\"], 20)\n self.assertEqual(trials[1].config[\"y\"], 20)\n\n def testNestedValues(self):\n trials = self.generate_trials({\n \"run\": \"PPO\",\n \"config\": {\n \"x\": {\n \"y\": {\n \"z\": tune.sample_from(lambda spec: 1)\n }\n },\n \"y\": tune.sample_from(lambda spec: 12),\n \"z\": tune.sample_from(lambda spec: spec.config.x.y.z * 100),\n },\n }, \"nested_values\")\n trials = list(trials)\n self.assertEqual(len(trials), 1)\n self.assertEqual(trials[0].config, {\n \"x\": {\n \"y\": {\n \"z\": 1\n }\n },\n \"y\": 12,\n \"z\": 100\n })\n self.assertEqual(trials[0].evaluated_params, {\n \"x/y/z\": 1,\n \"y\": 12,\n \"z\": 100\n })\n\n def testLogUniform(self):\n sampler = tune.loguniform(1e-10, 1e-1)\n results = sampler.sample(None, 1000)\n assert abs(np.log(min(results)) / np.log(10) - -10) < 0.1\n assert abs(np.log(max(results)) / np.log(10) - -1) < 0.1\n\n sampler_e = tune.loguniform(np.e**-4, np.e, base=np.e)\n results_e = sampler_e.sample(None, 1000)\n assert abs(np.log(min(results_e)) - -4) < 0.1\n assert abs(np.log(max(results_e)) - 1) < 0.1\n\n def test_resolve_dict(self):\n config = {\n \"a\": {\n \"b\": 1,\n \"c\": 2,\n },\n \"b\": {\n \"a\": 3\n }\n }\n resolved = resolve_nested_dict(config)\n for k, v in [((\"a\", \"b\"), 1), ((\"a\", \"c\"), 2), ((\"b\", \"a\"), 3)]:\n self.assertEqual(resolved.get(k), v)\n\n def testRecursiveDep(self):\n try:\n list(\n self.generate_trials({\n \"run\": \"PPO\",\n \"config\": {\n \"foo\": tune.sample_from(lambda spec: spec.config.foo),\n },\n }, \"recursive_dep\"))\n except RecursiveDependencyError as e:\n assert \"`foo` recursively depends on\" in str(e), e\n else:\n raise\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n"
] | [
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.allclose",
"numpy.isnan",
"torch.distributed.is_initialized",
"torch.utils.data.DataLoader",
"torch.equal",
"torch.tensor",
"numpy.ceil",
"torch.nn.Linear",
"torch.distributed.is_available",
"numpy.random.randn",
"torch.distributed.destroy_process_group",
"torch.nn.MSELoss",
"torch.optim.lr_scheduler.StepLR"
],
[
"torch.load",
"torch.randn",
"torch.nn.Linear",
"torch.distributed.get_rank",
"torch.nn.ReLU",
"torch.nn.MSELoss",
"torch.nn.parallel.DistributedDataParallel"
],
[
"numpy.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yangxu0110/yysScript | [
"079101f57fb1a64b871924c988760d9e74063a71"
] | [
"yys/YuHunModule.py"
] | [
"# -*- coding: utf-8 -*-\nimport datetime\nimport logging\nimport os\nimport random\nimport time\nfrom tkinter import END\n\nimport cv2\nimport numpy\nimport numpy as np\nimport pyautogui\nfrom PIL import ImageGrab\nfrom matplotlib import pyplot as plt\n\npyautogui.FAILSAFE = False\nlogging.basicConfig(format=\"%(asctime)s :%(levelname)s:%(message)s\", datefmt=\"%d-%M-%Y %H:%M:%S\", level=logging.DEBUG)\n# 初始化SIFT探测器\nSIFT = cv2.xfeatures2d.SIFT_create()\n\n\ndef ComputeScreenShot(screenShot):\n \"\"\"\n 由于屏幕分辨率高,计算耗时,这里优化一下\n :return:\n \"\"\"\n kp2, des2 = SIFT.detectAndCompute(screenShot, None)\n return kp2, des2\n\n\ndef GetLocation(target, kp2, des2):\n \"\"\"\n 获取目标图像在截图中的位置\n :param target:\n :param screenShot:\n :return: 返回坐标(x,y) 与opencv坐标系对应\n \"\"\"\n MIN_MATCH_COUNT = 10\n img1 = target # cv2.cvtColor(target,cv2.COLOR_BGR2GRAY)# 查询图片\n # img2 = screenShot\n # img2 = cv2.cvtColor(screenShot, cv2.COLOR_BGR2GRAY) # 训练图片\n # img2 = cv2.resize(img2, dsize=None, fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST)\n # 用SIFT找到关键点和描述符\n\n kp1, des1 = SIFT.detectAndCompute(img1, None)\n\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=4)\n search_params = dict(checks=50)\n\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1, des2, k=2)\n good = []\n for m, n in matches:\n if m.distance < 0.7 * n.distance:\n good.append(m)\n if len(good) > MIN_MATCH_COUNT:\n src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n matchesMask = mask.ravel().tolist()\n h, w = img1.shape\n pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n if M is not None:\n dst = cv2.perspectiveTransform(pts, M)\n arr = np.int32(dst) #\n midPosArr = arr[0] + (arr[2] - arr[0]) // 2\n midPos = (midPosArr[0][0], midPosArr[0][1])\n # show=cv2.circle(img2,midPos,30,(255,255,255),thickness=5)\n # cv2.imshow('s',show)\n # cv2.waitKey()\n # cv2.destroyAllWindows()\n return midPos\n else:\n return None\n else:\n return None\n\n\ndef CheatPos(originPos, factor=5):\n \"\"\"\n 对原始点击坐标进行随机偏移,防止封号\n :param originPos:原始坐标\n :return:\n \"\"\"\n x, y = random.randint(-factor, factor), random.randint(-factor, factor)\n newPos = (originPos[0] + x, originPos[1] + y)\n return newPos\n\n\ndef Click(targetPosition):\n \"\"\"\n 点击屏幕上的某个点\n :param targetPosition:\n :return:\n \"\"\"\n if targetPosition is None:\n print('未检测到目标')\n else:\n\n pyautogui.moveTo(targetPosition, duration=0.20)\n pyautogui.click()\n time.sleep(random.randint(500, 1000) / 1000)\n\n # time.sleep(random.randint(100, 150) / 1000)\n\n\ndef loadImgs():\n \"\"\"\n 加载所有需要检测的目标图像\n :return:\n \"\"\"\n obj = {}\n path = os.getcwd() + '/img'\n file_list = os.listdir(path)\n\n for file in file_list:\n name = file.split('.')[0]\n file_path = path + '/' + file\n a = cv2.imread(file_path, 0)\n obj[name] = a\n\n return obj\n\n\ndef GetScreenShot():\n \"\"\"\n 获取屏幕截图\n :return:\n \"\"\"\n screen = ImageGrab.grab()\n # screen.save('screen.jpg')\n # screen = cv2.imread('screen.jpg')\n screen = cv2.cvtColor(numpy.asarray(screen), cv2.COLOR_RGB2BGR)\n logging.info('截屏成功')\n return screen\n\n\nclass YuHun():\n def __init__(self):\n self._flag = False\n self.NeedCloseGame=False\n self.NeedCloseSystem=False\n\n def Run(self, LogUI, NeedCloseGame, NeedCloseSystem):\n imgs = loadImgs()\n LogUI.insert(END,\n time.strftime('%Y-%m-%d %H:%M:%S ',\n time.localtime(time.time())) + '开始挑战\\n')\n Count = 1\n while self._flag is not True:\n logging.debug('开始挑战')\n screen = GetScreenShot()\n WindowShape = screen.shape\n result = []\n\n # 为了优化速度,把计算屏幕截图的特征提取出来,避免重复运算\n kp2, des2 = ComputeScreenShot(screen)\n for i in ['tili60', 'tili80', 'auto', 'jieshou2', 'jieshou1', 'end1', 'end2', 'reject', 'queding',\n 'tiaozhan']:\n obj = imgs[i]\n # begin = time.clock()\n pos = GetLocation(obj, kp2, des2)\n # logging.debug('检测结算目标图像')\n # print(time.clock()-begin)\n if pos is not None:\n if i == 'tili60' or i == 'tili80':\n print('window.py', NeedCloseSystem)\n if self.NeedCloseSystem:\n print('log')\n os.system('shutdown -s -t 60')\n return\n if not self.NeedCloseGame:\n # 需要手动关闭游戏\n LogUI.insert(END,\n time.strftime('%Y-%m-%d %H:%M:%S ',\n time.localtime(time.time())) + '体力用完,需要手动关闭加成或游戏\\n')\n return\n # 结束进程\n hasProcess = True\n while hasProcess:\n if 'onmyoji' in os.popen('tasklist /FI \"IMAGENAME eq onmyoji.exe\"').read():\n os.system('TASKKILL /F /IM onmyoji.exe')\n hasProcess = True\n else:\n hasProcess = False\n # 线程结束返回\n return\n elif i == 'end1':\n time.sleep(random.randint(300, 800) / 1000)\n pos = CheatPos(pos, 50)\n elif i == 'end2':\n newPos = (pos[0] + 80, pos[1] + 80)\n pos = CheatPos(newPos, 5)\n elif i == 'tiaozhan':\n LogUI.insert(END,\n time.strftime('%Y-%m-%d %H:%M:%S ',\n time.localtime(time.time())) + '第' + str(Count) + '轮开始\\n')\n Count += 1\n elif i == 'reject':\n pos = CheatPos(pos, 3)\n else:\n pos = CheatPos(pos, 10)\n result.append(pos)\n\n LogUI.see(END)\n else:\n result.append(None)\n # 开始检查结果\n for i in result:\n if i is not None:\n print(WindowShape[1] * 0.06)\n print(WindowShape[0] * 0.96)\n if i[0] < WindowShape[1] * 0.06 or i[1] > WindowShape[0] * 0.96:\n continue\n else:\n Click(i)\n if len(LogUI.get('1.0', 'end-1c')) > 6000:\n LogUI.delete(1.0, END) # 使用 delete\n LogUI.insert(END, ' 清空日志\\n')\n LogUI.see(END)\n\n def Terminate(self):\n self._flag = True\n\n\n# def YuHunTwoWindow(LogUI, NeedCloseGame, NeedCloseSystem):\n# \"\"\"\n# 自动御魂,双开模式\n# \"\"\"\n# imgs = loadImgs()\n# LogUI.insert(END,\n# time.strftime('%Y-%m-%d %H:%M:%S ',\n# time.localtime(time.time())) + '开始挑战\\n')\n# Count = 1\n# while True:\n#\n# logging.debug('开始挑战')\n# screen = GetScreenShot()\n# WindowShape = screen.shape\n# result = []\n#\n# # 为了优化速度,把计算屏幕截图的特征提取出来,避免重复运算\n# kp2, des2 = ComputeScreenShot(screen)\n# for i in ['tili60', 'tili80', 'auto', 'jieshou2', 'jieshou1', 'end1', 'end2', 'reject', 'queding', 'tiaozhan']:\n# obj = imgs[i]\n# # begin = time.clock()\n# pos = GetLocation(obj, kp2, des2)\n# # logging.debug('检测结算目标图像')\n# # print(time.clock()-begin)\n# if pos is not None:\n# if i == 'tili60' or i == 'tili80':\n# print('window.py', NeedCloseSystem)\n# if NeedCloseSystem:\n# print('log')\n# os.system('shutdown -s -t 60')\n# return\n# if not NeedCloseGame:\n# # print('体力用完,需要手动关闭加成或游戏')\n# LogUI.insert(END,\n# time.strftime('%Y-%m-%d %H:%M:%S ',\n# time.localtime(time.time())) + '体力用完,需要手动关闭加成或游戏\\n')\n# return\n# # 结束进程\n# hasProcess = True\n# while hasProcess:\n# if 'onmyoji' in os.popen('tasklist /FI \"IMAGENAME eq onmyoji.exe\"').read():\n# os.system('TASKKILL /F /IM onmyoji.exe')\n# hasProcess = True\n# else:\n# hasProcess = False\n# # 线程结束返回\n# return\n# elif i == 'end1':\n# time.sleep(random.randint(300, 800) / 1000)\n# pos = CheatPos(pos, 50)\n# elif i == 'end2':\n# newPos = (pos[0] + 80, pos[1] + 80)\n# pos = CheatPos(newPos, 5)\n# elif i == 'tiaozhan':\n# LogUI.insert(END,\n# time.strftime('%Y-%m-%d %H:%M:%S ',\n# time.localtime(time.time())) + '第' + str(Count) + '轮开始\\n')\n# Count += 1\n# elif i == 'reject':\n# pos = CheatPos(pos, 3)\n# else:\n# pos = CheatPos(pos, 10)\n# result.append(pos)\n#\n# LogUI.see(END)\n# else:\n# result.append(None)\n# # 开始检查结果\n# for i in result:\n# if i is not None:\n# print(WindowShape[1] * 0.06)\n# print(WindowShape[0] * 0.96)\n# if i[0] < WindowShape[1] * 0.06 or i[1] > WindowShape[0] * 0.96:\n# continue\n# else:\n# Click(i)\n# if len(LogUI.get('1.0', 'end-1c')) > 6000:\n# LogUI.delete(1.0, END) # 使用 delete\n# LogUI.insert(END, ' 清空日志\\n')\n# LogUI.see(END)\n\n\nif __name__ == '__main__':\n pass\n"
] | [
[
"numpy.asarray",
"numpy.int32",
"numpy.float32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KeDengMS/CNTK | [
"fce86cd9581e7ba746d1ec75bbd67dd35d35d11c"
] | [
"bindings/python/examples/test/SLUHandsOn_test.py"
] | [
"# Copyright (c) Microsoft. All rights reserved.\n\n# Licensed under the MIT license. See LICENSE.md file in the project root\n# for full license information.\n# ==============================================================================\n\n# TODO: This does not work yet, need to figure out the right pattern.\n\nimport numpy as np\nfrom cntk import DeviceDescriptor\n\n# this emulates a \"main\" function for SLUHandsOn\nfrom examples.SLUHandsOn.SLUHandsOn import *\nfrom examples.SLUHandsOn.SLUHandsOn import _Infer # TODO: remove\ndef slu_hands_on():\n reader = create_reader(data_dir + \"/atis.train.ctf\")\n model = create_model(_inf=_Infer(shape=input_dim, axis=[Axis.default_batch_axis(), Axis.default_dynamic_axis()]))\n loss, metric = train(reader, model, max_epochs=1)\n return metric, loss # note: strange order\n\nTOLERANCE_ABSOLUTE = 1E-1\n\ndef test_seq_classification_error(device_id):\n from cntk.utils import cntk_device\n DeviceDescriptor.set_default_device(cntk_device(device_id))\n\n evaluation_avg, loss_avg = slu_hands_on()\n\n expected_avg = [0.15570838301766451, 0.7846451368305728]\n assert np.allclose([evaluation_avg, loss_avg], expected_avg, atol=TOLERANCE_ABSOLUTE)\n\nif __name__=='__main__':\n test_seq_classification_error(0)\n"
] | [
[
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
backwardn/imagededup | [
"38ce34c35187ec33bd996d833293f8ee95ff8202",
"38ce34c35187ec33bd996d833293f8ee95ff8202",
"38ce34c35187ec33bd996d833293f8ee95ff8202"
] | [
"imagededup/utils/data_generator.py",
"tests/test_hashing.py",
"imagededup/methods/cnn.py"
] | [
"from pathlib import PurePath\nfrom typing import Tuple, List, Callable\n\nimport numpy as np\nfrom tensorflow.keras.utils import Sequence\n\nfrom imagededup.utils.image_utils import load_image\n\n\nclass DataGenerator(Sequence):\n \"\"\"Class inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator.\n\n Attributes:\n image_dir: Path of image directory.\n batch_size: Number of images per batch.\n basenet_preprocess: Basenet specific preprocessing function.\n target_size: Dimensions that images get resized into when loaded.\n \"\"\"\n\n def __init__(\n self,\n image_dir: PurePath,\n batch_size: int,\n basenet_preprocess: Callable,\n target_size: Tuple[int, int],\n ) -> None:\n \"\"\"Init DataGenerator object.\n \"\"\"\n self.image_dir = image_dir\n self.batch_size = batch_size\n self.basenet_preprocess = basenet_preprocess\n self.target_size = target_size\n self.counter = 0\n\n self._get_image_files()\n self.on_epoch_end()\n\n def _get_image_files(self) -> None:\n self.invalid_image_idx = []\n self.image_files = sorted(\n [\n i.absolute()\n for i in self.image_dir.glob('*')\n if not i.name.startswith('.')]\n ) # ignore hidden files\n\n def on_epoch_end(self) -> None:\n \"\"\"Method called at the end of every epoch.\n \"\"\"\n self.indexes = np.arange(len(self.image_files))\n self.valid_image_files = [\n j for i, j in enumerate(self.image_files) if i not in self.invalid_image_idx\n ]\n\n def __len__(self) -> int:\n \"\"\"Number of batches in the Sequence.\"\"\"\n return int(np.ceil(len(self.image_files) / self.batch_size))\n\n def __getitem__(self, index: int) -> Tuple[np.array, np.array]:\n \"\"\"Get batch at position `index`.\n \"\"\"\n batch_indexes = self.indexes[\n index * self.batch_size : (index + 1) * self.batch_size\n ]\n batch_samples = [self.image_files[i] for i in batch_indexes]\n X = self._data_generator(batch_samples)\n return X\n\n def _data_generator(\n self, image_files: List[PurePath]\n ) -> Tuple[np.array, np.array]:\n \"\"\"Generate data from samples in specified batch.\"\"\"\n # initialize images and labels tensors for faster processing\n X = np.empty((len(image_files), *self.target_size, 3))\n\n invalid_image_idx = []\n for i, image_file in enumerate(image_files):\n # load and randomly augment image\n img = load_image(\n image_file=image_file, target_size=self.target_size, grayscale=False\n )\n\n if img is not None:\n X[i, :] = img\n\n else:\n invalid_image_idx.append(i)\n self.invalid_image_idx.append(self.counter)\n\n self.counter += 1\n\n if invalid_image_idx:\n X = np.delete(X, invalid_image_idx, axis=0)\n\n # apply basenet specific preprocessing\n # input is 4D numpy array of RGB values within [0, 255]\n X = self.basenet_preprocess(X)\n\n return X\n",
"import os\nimport sys\nfrom pathlib import Path\nfrom PIL import Image\n\nimport pytest\nimport numpy as np\n\nfrom imagededup.methods.hashing import Hashing, PHash, DHash, AHash, WHash\n\np = Path(__file__)\n\nPATH_IMAGE_DIR = p.parent / 'data/mixed_images'\nPATH_IMAGE_DIR_STRING = os.path.join(os.getcwd(), 'tests/data/mixed_images')\nPATH_SINGLE_IMAGE = p.parent / 'data/mixed_images/ukbench00120.jpg'\nPATH_SINGLE_IMAGE_STRING = p.parent / 'data/mixed_images/ukbench00120.jpg'\nPATH_SINGLE_IMAGE_CORRUPT = p.parent / 'data/mixed_images/ukbench09268_corrupt.jpg'\nPATH_SINGLE_IMAGE_RESIZED = p.parent / 'data/mixed_images/ukbench00120_resize.jpg'\n\n\n# Test parent class (static methods/class attributes initialization)\n\n\[email protected]\ndef hasher():\n hashobj = Hashing()\n return hashobj\n\n\ndef test_correct_init_hashing(hasher):\n assert hasher.target_size == (8, 8)\n\n\ndef test_hamming_distance(hasher):\n # Put two numbers and check if hamming distance is correct\n number_1 = '1a'\n number_2 = '1f'\n hamdist = hasher.hamming_distance(number_1, number_2)\n assert hamdist == 2\n\n\ndef test__array_to_hash(hasher):\n hash_mat = np.array(\n [1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0]\n )\n assert hasher._array_to_hash(hash_mat) == '9191fa'\n\n\ndef test__check_hamming_distance_bounds_input_not_int(hasher):\n with pytest.raises(TypeError):\n hasher._check_hamming_distance_bounds(thresh=1.0)\n\n\ndef test__check_hamming_distance_bounds_out_of_bound(hasher):\n with pytest.raises(ValueError):\n hasher._check_hamming_distance_bounds(thresh=68)\n\n\ndef test__check_hamming_distance_bounds_correct(hasher):\n assert hasher._check_hamming_distance_bounds(thresh=20) is None\n\n\n# encode_image\n\n\[email protected]\ndef mocker_preprocess_image(mocker):\n ret_val = np.zeros((2, 2))\n preprocess_image_mocker = mocker.patch(\n 'imagededup.methods.hashing.preprocess_image', return_value=ret_val\n )\n return preprocess_image_mocker\n\n\[email protected]\ndef mocker_hash_func(mocker):\n ret_val = np.zeros((2, 2))\n hash_func_mocker = mocker.patch(\n 'imagededup.methods.hashing.Hashing._hash_func', return_value=ret_val\n )\n return hash_func_mocker\n\n\[email protected]\ndef mocker_load_image(mocker):\n ret_val = np.zeros((2, 2))\n load_image_mocker = mocker.patch(\n 'imagededup.methods.hashing.load_image', return_value=ret_val, autospec=True\n )\n return load_image_mocker\n\n\ndef test_encode_image_accepts_image_posixpath(\n hasher, mocker_load_image, mocker_hash_func\n):\n ret_val = np.zeros((2, 2))\n hasher.encode_image(image_file=PATH_SINGLE_IMAGE)\n mocker_load_image.assert_called_with(\n image_file=PATH_SINGLE_IMAGE, grayscale=True, target_size=(8, 8)\n )\n np.testing.assert_array_equal(ret_val, mocker_hash_func.call_args[0][0])\n\n\ndef test_encode_image_accepts_numpy_array(\n hasher, mocker_preprocess_image, mocker_hash_func\n):\n ret_val = np.zeros((2, 2))\n hasher.encode_image(image_array=ret_val)\n mocker_preprocess_image.assert_called_with(\n image=ret_val, target_size=(8, 8), grayscale=True\n )\n np.testing.assert_array_equal(ret_val, mocker_hash_func.call_args[0][0])\n\n\ndef test_encode_image_valerror_wrong_input(hasher):\n pil_im = Image.open(PATH_SINGLE_IMAGE)\n with pytest.raises(ValueError):\n hasher.encode_image(image_file=pil_im)\n\n\ndef test_encode_image_valerror_wrong_input_array(hasher):\n pil_im = Image.open(PATH_SINGLE_IMAGE)\n with pytest.raises(ValueError):\n hasher.encode_image(image_array=pil_im)\n\n\ndef test_encode_image_returns_none_image_pp_not_array(hasher, mocker):\n mocker.patch('imagededup.methods.hashing.load_image', return_value=None)\n assert hasher.encode_image(PATH_SINGLE_IMAGE) is None\n\n\ndef test_encode_image_returns_none_image_pp_not_array_array_input(hasher, mocker):\n mocker.patch('imagededup.methods.hashing.preprocess_image', return_value=None)\n assert hasher.encode_image(image_array=np.zeros((2, 2))) is None\n\n\ndef test_encode_image_accepts_non_posixpath(\n hasher, mocker_load_image, mocker_hash_func\n):\n ret_val = np.zeros((2, 2))\n hasher.encode_image(image_file=PATH_SINGLE_IMAGE_STRING)\n mocker_load_image.assert_called_with(\n image_file=PATH_SINGLE_IMAGE, grayscale=True, target_size=(8, 8)\n )\n np.testing.assert_array_equal(ret_val, mocker_hash_func.call_args[0][0])\n\n\n# _encoder\n\n\[email protected]\ndef mocker_encode_image(mocker):\n mocker.patch(\n 'imagededup.methods.hashing.parallelise', return_value='123456789ABCDEFA'\n )\n\n\n# encode_images\n\n\ndef test_encode_images_accepts_valid_posixpath(hasher, mocker_encode_image):\n assert len(hasher.encode_images(PATH_IMAGE_DIR)) == 6 # 6 files in the directory\n\n\ndef test_encode_images_accepts_non_posixpath(hasher, mocker_encode_image):\n assert len(hasher.encode_images(PATH_IMAGE_DIR_STRING)) == 6\n\n\ndef test_encode_images_rejects_non_directory_paths(hasher):\n with pytest.raises(ValueError):\n hasher.encode_images(PATH_SINGLE_IMAGE)\n\n\ndef test_encode_images_return_vals(hasher, mocker_encode_image):\n encoded_val = '123456789ABCDEFA'\n hashes = hasher.encode_images(PATH_IMAGE_DIR)\n assert isinstance(hashes, dict)\n assert list(hashes.values())[0] == encoded_val[0]\n assert PATH_SINGLE_IMAGE.name in hashes.keys()\n\n\ndef test_hash_func(hasher, mocker):\n inp_array = np.array((3, 3))\n ret_arr = np.array((2, 2))\n hash_algo_mocker = mocker.patch(\n 'imagededup.methods.hashing.Hashing._hash_algo', return_value=ret_arr\n )\n array_mocker = mocker.patch('imagededup.methods.hashing.Hashing._array_to_hash')\n hasher._hash_func(inp_array)\n np.testing.assert_array_equal(inp_array, hash_algo_mocker.call_args[0][0])\n array_mocker.assert_called_with(ret_arr)\n\n\n# _find_duplicates_dict\n\n\[email protected](sys.platform == 'win32', reason='Does not run on Windows.')\ndef test__find_duplicates_dict_outfile_none(mocker):\n encoding_map = {'1.jpg': '123456'}\n threshold = 10\n scores = True\n outfile = None\n verbose = False\n myhasher = PHash(verbose=verbose)\n hasheval_mocker = mocker.patch('imagededup.methods.hashing.HashEval')\n save_json_mocker = mocker.patch('imagededup.methods.hashing.save_json')\n myhasher._find_duplicates_dict(\n encoding_map=encoding_map,\n max_distance_threshold=threshold,\n scores=scores,\n outfile=outfile,\n )\n hasheval_mocker.assert_called_with(\n test=encoding_map,\n queries=encoding_map,\n distance_function=Hashing.hamming_distance,\n verbose=verbose,\n threshold=threshold,\n search_method='brute_force_cython',\n )\n hasheval_mocker.return_value.retrieve_results.assert_called_once_with(scores=scores)\n save_json_mocker.assert_not_called()\n\n\[email protected](sys.platform == 'win32', reason='Does not run on Windows.')\ndef test__find_duplicates_dict_outfile_none_verbose(hasher, mocker):\n encoding_map = {'1.jpg': '123456'}\n threshold = 10\n scores = True\n outfile = None\n hasheval_mocker = mocker.patch('imagededup.methods.hashing.HashEval')\n save_json_mocker = mocker.patch('imagededup.methods.hashing.save_json')\n hasher._find_duplicates_dict(\n encoding_map=encoding_map,\n max_distance_threshold=threshold,\n scores=scores,\n outfile=outfile,\n )\n hasheval_mocker.assert_called_with(\n test=encoding_map,\n queries=encoding_map,\n distance_function=Hashing.hamming_distance,\n verbose=True,\n threshold=threshold,\n search_method='brute_force_cython',\n )\n hasheval_mocker.return_value.retrieve_results.assert_called_once_with(scores=scores)\n save_json_mocker.assert_not_called()\n\n\[email protected](sys.platform == 'win32', reason='Does not run on Windows.')\ndef test__find_duplicates_dict_outfile_true(hasher, mocker):\n encoding_map = {'1.jpg': '123456'}\n threshold = 10\n scores = True\n outfile = True\n verbose = True\n hasheval_mocker = mocker.patch('imagededup.methods.hashing.HashEval')\n hasheval_mocker.return_value.retrieve_results.return_value = {\n 'filename.jpg': [('dup1.jpg', 3)],\n 'filename2.jpg': [('dup2.jpg', 10)],\n }\n save_json_mocker = mocker.patch('imagededup.methods.hashing.save_json')\n hasher._find_duplicates_dict(\n encoding_map=encoding_map,\n max_distance_threshold=threshold,\n scores=scores,\n outfile=outfile,\n )\n hasheval_mocker.assert_called_with(\n test=encoding_map,\n queries=encoding_map,\n distance_function=Hashing.hamming_distance,\n verbose=verbose,\n threshold=threshold,\n search_method='brute_force_cython',\n )\n hasheval_mocker.return_value.retrieve_results.assert_called_once_with(scores=scores)\n save_json_mocker.assert_called_once_with(\n hasheval_mocker.return_value.retrieve_results.return_value, outfile\n )\n\n\n# _find_duplicates_dir\n\n\[email protected](sys.platform == 'win32', reason='Does not run on Windows.')\ndef test__find_duplicates_dir(hasher, mocker):\n encoding_map = {'1.jpg': '123456'}\n threshold = 10\n scores = True\n outfile = True\n ret_val_find_dup_dict = {\n 'filename.jpg': [('dup1.jpg', 3)],\n 'filename2.jpg': [('dup2.jpg', 10)],\n }\n encode_images_mocker = mocker.patch(\n 'imagededup.methods.hashing.Hashing.encode_images', return_value=encoding_map\n )\n find_dup_dict_mocker = mocker.patch(\n 'imagededup.methods.hashing.Hashing._find_duplicates_dict',\n return_value=ret_val_find_dup_dict,\n )\n hasher._find_duplicates_dir(\n image_dir=PATH_IMAGE_DIR,\n max_distance_threshold=threshold,\n scores=scores,\n outfile=outfile,\n search_method='brute_force_cython',\n )\n encode_images_mocker.assert_called_once_with(PATH_IMAGE_DIR)\n find_dup_dict_mocker.assert_called_once_with(\n encoding_map=encoding_map,\n max_distance_threshold=threshold,\n scores=scores,\n outfile=outfile,\n search_method='brute_force_cython',\n )\n\n\n# find_duplicates\n\n\[email protected]\ndef mocker_hamming_distance(mocker):\n return mocker.patch(\n 'imagededup.methods.hashing.Hashing._check_hamming_distance_bounds'\n )\n\n\[email protected](sys.platform == 'win32', reason='Does not run on Windows.')\ndef test_find_duplicates_dir(hasher, mocker, mocker_hamming_distance):\n threshold = 10\n scores = True\n outfile = True\n find_dup_dir_mocker = mocker.patch(\n 'imagededup.methods.hashing.Hashing._find_duplicates_dir'\n )\n hasher.find_duplicates(\n image_dir=PATH_IMAGE_DIR,\n max_distance_threshold=threshold,\n outfile=outfile,\n scores=scores,\n search_method='brute_force_cython',\n )\n mocker_hamming_distance.assert_called_once_with(thresh=threshold)\n find_dup_dir_mocker.assert_called_once_with(\n image_dir=PATH_IMAGE_DIR,\n max_distance_threshold=threshold,\n scores=scores,\n outfile=outfile,\n search_method='brute_force_cython',\n )\n\n\[email protected](sys.platform == 'win32', reason='Does not run on Windows.')\ndef test_find_duplicates_dict(hasher, mocker, mocker_hamming_distance):\n encoding_map = {'1.jpg': '123456'}\n threshold = 10\n scores = True\n outfile = True\n find_dup_dict_mocker = mocker.patch(\n 'imagededup.methods.hashing.Hashing._find_duplicates_dict'\n )\n hasher.find_duplicates(\n encoding_map=encoding_map,\n max_distance_threshold=threshold,\n outfile=outfile,\n scores=scores,\n search_method='brute_force_cython',\n )\n mocker_hamming_distance.assert_called_once_with(thresh=threshold)\n find_dup_dict_mocker.assert_called_once_with(\n encoding_map=encoding_map,\n max_distance_threshold=threshold,\n scores=scores,\n outfile=outfile,\n search_method='brute_force_cython',\n )\n\n\ndef test_find_duplicates_wrong_input(hasher):\n with pytest.raises(ValueError):\n hasher.find_duplicates(max_distance_threshold=10)\n\n\n# find_duplicates_to_remove\n\n\ndef test_find_duplicates_to_remove_outfile_false(hasher, mocker):\n threshold = 10\n outfile = False\n ret_val_find_dup_dict = {\n 'filename.jpg': [('dup1.jpg', 3)],\n 'filename2.jpg': [('dup2.jpg', 10)],\n }\n find_duplicates_mocker = mocker.patch(\n 'imagededup.methods.hashing.Hashing.find_duplicates',\n return_value=ret_val_find_dup_dict,\n )\n get_files_to_remove_mocker = mocker.patch(\n 'imagededup.methods.hashing.get_files_to_remove'\n )\n save_json_mocker = mocker.patch('imagededup.methods.hashing.save_json')\n hasher.find_duplicates_to_remove(\n image_dir=PATH_IMAGE_DIR, max_distance_threshold=threshold, outfile=outfile\n )\n find_duplicates_mocker.assert_called_once_with(\n image_dir=PATH_IMAGE_DIR,\n encoding_map=None,\n max_distance_threshold=threshold,\n scores=False,\n )\n get_files_to_remove_mocker.assert_called_once_with(ret_val_find_dup_dict)\n save_json_mocker.assert_not_called()\n\n\ndef test_find_duplicates_to_remove_outfile_true(hasher, mocker):\n threshold = 10\n outfile = True\n ret_val_find_dup_dict = {\n 'filename.jpg': [('dup1.jpg', 3)],\n 'filename2.jpg': [('dup2.jpg', 10)],\n }\n ret_val_get_files_to_remove = ['1.jpg', '2.jpg']\n find_duplicates_mocker = mocker.patch(\n 'imagededup.methods.hashing.Hashing.find_duplicates',\n return_value=ret_val_find_dup_dict,\n )\n get_files_to_remove_mocker = mocker.patch(\n 'imagededup.methods.hashing.get_files_to_remove',\n return_value=ret_val_get_files_to_remove,\n )\n save_json_mocker = mocker.patch('imagededup.methods.hashing.save_json')\n hasher.find_duplicates_to_remove(\n image_dir=PATH_IMAGE_DIR, max_distance_threshold=threshold, outfile=outfile\n )\n find_duplicates_mocker.assert_called_once_with(\n image_dir=PATH_IMAGE_DIR,\n encoding_map=None,\n max_distance_threshold=threshold,\n scores=False,\n )\n get_files_to_remove_mocker.assert_called_once_with(ret_val_find_dup_dict)\n save_json_mocker.assert_called_once_with(ret_val_get_files_to_remove, outfile)\n\n\ndef test_find_duplicates_to_remove_encoding_map(hasher, mocker):\n encoding_map = {'1.jpg': '123456'}\n threshold = 10\n outfile = False\n ret_val_find_dup_dict = {\n 'filename.jpg': [('dup1.jpg', 3)],\n 'filename2.jpg': [('dup2.jpg', 10)],\n }\n find_duplicates_mocker = mocker.patch(\n 'imagededup.methods.hashing.Hashing.find_duplicates',\n return_value=ret_val_find_dup_dict,\n )\n get_files_to_remove_mocker = mocker.patch(\n 'imagededup.methods.hashing.get_files_to_remove'\n )\n save_json_mocker = mocker.patch('imagededup.methods.hashing.save_json')\n hasher.find_duplicates_to_remove(\n encoding_map=encoding_map, max_distance_threshold=threshold, outfile=outfile\n )\n find_duplicates_mocker.assert_called_once_with(\n encoding_map=encoding_map,\n image_dir=None,\n max_distance_threshold=threshold,\n scores=False,\n )\n get_files_to_remove_mocker.assert_called_once_with(ret_val_find_dup_dict)\n save_json_mocker.assert_not_called()\n\n\n# Integration tests\n\nphasher = PHash()\ndhasher = DHash()\nahasher = AHash()\nwhasher = WHash()\n\ncommon_test_parameters = [\n phasher.encode_image,\n dhasher.encode_image,\n ahasher.encode_image,\n whasher.encode_image,\n]\n\n\[email protected]('hash_function', common_test_parameters)\nclass TestCommon:\n def test_len_hash(self, hash_function):\n hash_im = hash_function(PATH_SINGLE_IMAGE)\n assert len(hash_im) == 16\n\n def test_hash_resize(self, hash_function):\n # Resize one image to (300, 300) and check that hamming distance between hashes is not too large\n hash_im_1 = hash_function(PATH_SINGLE_IMAGE)\n hash_im_2 = hash_function(PATH_SINGLE_IMAGE_RESIZED)\n hamdist = Hashing.hamming_distance(hash_im_1, hash_im_2)\n assert hamdist < 3\n\n def test_hash_small_rotation(self, hash_function):\n # Rotate image slightly (1 degree) and check that hamming distance between hashes is not too large\n orig_image = Image.open(PATH_SINGLE_IMAGE)\n rotated_image = np.array(orig_image.rotate(1))\n hash_im_1 = hash_function(image_array=np.array(orig_image))\n hash_im_2 = hash_function(image_array=rotated_image)\n hamdist = Hashing.hamming_distance(hash_im_1, hash_im_2)\n assert hamdist < 3\n\n def test_hash_distinct_images(self, hash_function):\n # Put in distinct images and check that hamming distance between hashes is large\n hash_im_1 = hash_function(PATH_SINGLE_IMAGE)\n hash_im_2 = hash_function(p.parent / 'data/mixed_images/ukbench09268.jpg')\n hamdist = Hashing.hamming_distance(hash_im_1, hash_im_2)\n assert hamdist > 20\n\n def test_same_hashes_with_different_inputs(self, hash_function):\n arr_inp = np.array(Image.open(PATH_SINGLE_IMAGE))\n assert hash_function(image_array=arr_inp) == hash_function(PATH_SINGLE_IMAGE)\n\n\ndef test_encode_images_returns_dict():\n hash_dict = phasher.encode_images(PATH_IMAGE_DIR)\n assert isinstance(hash_dict, dict)\n\n\ndef test_encode_images_return_non_none_hashes():\n hash_dict = dhasher.encode_images(PATH_IMAGE_DIR)\n for v in hash_dict.values():\n assert v is not None\n\n\n# For each of the hash types, check correctness of hashes for known images\n# Check encode_image(s)\n\n\[email protected](\n 'hash_object, expected_hash',\n [\n (phasher, '9fee256239984d71'),\n (dhasher, '2b69707551f1b87a'),\n (ahasher, '81b8bc3c3c3c1e0a'),\n (whasher, '89b8bc3c3c3c5e0e'),\n ],\n)\ndef test_encode_image_hash(hash_object, expected_hash):\n assert hash_object.encode_image(PATH_SINGLE_IMAGE) == expected_hash\n\n\ndef test_encode_image_corrupt_file():\n whasher = WHash()\n assert whasher.encode_image(PATH_SINGLE_IMAGE_CORRUPT) is None\n\n\ndef test_encode_images_corrupt_and_good_images():\n ahasher = AHash()\n hashes = ahasher.encode_images(PATH_IMAGE_DIR)\n assert len(hashes) == 5 # 5 non-corrupt files in the directory, 1 corrupt\n assert isinstance(hashes, dict)\n\n\ndef test_find_duplicates_correctness():\n phasher = PHash()\n duplicate_dict = phasher.find_duplicates(\n image_dir=PATH_IMAGE_DIR, max_distance_threshold=10\n )\n assert isinstance(duplicate_dict, dict)\n assert isinstance(list(duplicate_dict.values())[0], list)\n assert len(duplicate_dict['ukbench09268.jpg']) == 0\n assert duplicate_dict['ukbench00120.jpg'] == ['ukbench00120_resize.jpg']\n\n\ndef test_find_duplicates_correctness_score():\n phasher = PHash()\n duplicate_dict = phasher.find_duplicates(\n image_dir=PATH_IMAGE_DIR, max_distance_threshold=10, scores=True\n )\n assert isinstance(duplicate_dict, dict)\n duplicates = list(duplicate_dict.values())\n assert isinstance(duplicates[0], list)\n assert isinstance(duplicates[0][0], tuple)\n assert duplicate_dict['ukbench09268.jpg'] == []\n assert duplicate_dict['ukbench00120.jpg'] == [('ukbench00120_resize.jpg', 0)]\n\n\[email protected](sys.platform == 'win32', reason='Does not run on Windows.')\ndef test_find_duplicates_clearing():\n phasher = PHash()\n duplicate_dict = phasher.find_duplicates(\n image_dir=PATH_IMAGE_DIR,\n max_distance_threshold=10,\n scores=True,\n search_method='brute_force_cython',\n )\n\n duplicate_dict = phasher.find_duplicates(\n image_dir=PATH_IMAGE_DIR,\n max_distance_threshold=10,\n scores=True,\n search_method='brute_force_cython',\n )\n\n assert isinstance(duplicate_dict, dict)\n duplicates = list(duplicate_dict.values())\n assert isinstance(duplicates[0], list)\n assert isinstance(duplicates[0][0], tuple)\n assert duplicate_dict['ukbench09268.jpg'] == []\n assert duplicate_dict['ukbench00120.jpg'] == [('ukbench00120_resize.jpg', 0)]\n\n\ndef test_find_duplicates_outfile():\n dhasher = DHash()\n outfile_name = 'score_output.json'\n if os.path.exists(outfile_name):\n os.remove(outfile_name)\n _ = dhasher.find_duplicates(\n image_dir=PATH_IMAGE_DIR,\n max_distance_threshold=10,\n scores=True,\n outfile=outfile_name,\n )\n assert os.path.exists(outfile_name)\n # clean up\n if os.path.exists(outfile_name):\n os.remove(outfile_name)\n\n\ndef test_find_duplicates_encoding_map_input():\n encoding = {\n 'ukbench00120_resize.jpg': '9fee256239984d71',\n 'ukbench00120_rotation.jpg': '850d513c4fdcbb72',\n 'ukbench00120.jpg': '9fee256239984d71',\n 'ukbench00120_hflip.jpg': 'cabb7237e8cd3824',\n 'ukbench09268.jpg': 'c73c36c2da2f29c9',\n }\n phasher = PHash()\n duplicate_dict = phasher.find_duplicates(\n encoding_map=encoding, max_distance_threshold=10\n )\n assert isinstance(duplicate_dict, dict)\n assert isinstance(list(duplicate_dict.values())[0], list)\n assert len(duplicate_dict['ukbench09268.jpg']) == 0\n assert duplicate_dict['ukbench00120.jpg'] == ['ukbench00120_resize.jpg']\n\n\ndef test_find_duplicates_to_remove_dir():\n phasher = PHash()\n removal_list = phasher.find_duplicates_to_remove(\n image_dir=PATH_IMAGE_DIR, max_distance_threshold=10\n )\n assert isinstance(removal_list, list)\n assert removal_list == ['ukbench00120.jpg'] or removal_list == [\n 'ukbench00120_resize.jpg'\n ]\n\n\ndef test_find_duplicates_to_remove_encoding():\n encoding = {\n 'ukbench00120_resize.jpg': '9fee256239984d71',\n 'ukbench00120_rotation.jpg': '850d513c4fdcbb72',\n 'ukbench00120.jpg': '9fee256239984d71',\n 'ukbench00120_hflip.jpg': 'cabb7237e8cd3824',\n 'ukbench09268.jpg': 'c73c36c2da2f29c9',\n }\n phasher = PHash()\n removal_list = phasher.find_duplicates_to_remove(\n encoding_map=encoding, max_distance_threshold=10\n )\n assert isinstance(removal_list, list)\n assert removal_list == ['ukbench00120.jpg'] or removal_list == [\n 'ukbench00120_resize.jpg'\n ]\n\n\ndef test_find_duplicates_to_remove_outfile():\n dhasher = DHash()\n outfile_name = 'removal_list.json'\n if os.path.exists(outfile_name):\n os.remove(outfile_name)\n _ = dhasher.find_duplicates(\n image_dir=PATH_IMAGE_DIR, max_distance_threshold=10, outfile=outfile_name\n )\n assert os.path.exists(outfile_name)\n # clean up\n if os.path.exists(outfile_name):\n os.remove(outfile_name)\n\n\n# test verbose\ndef test_encode_images_verbose_true(capsys):\n phasher = PHash(verbose=True)\n phasher.encode_images(image_dir=PATH_IMAGE_DIR)\n out, err = capsys.readouterr()\n\n assert '%' in err\n assert '' == out\n\n\ndef test_encode_images_verbose_false(capsys):\n phasher = PHash(verbose=False)\n phasher.encode_images(image_dir=PATH_IMAGE_DIR)\n out, err = capsys.readouterr()\n\n assert '' == err\n assert '' == out\n\n\ndef test_find_duplicates_verbose_true(capsys):\n phasher = PHash(verbose=True)\n phasher.find_duplicates(\n image_dir=PATH_IMAGE_DIR, max_distance_threshold=10, scores=False, outfile=False\n )\n out, err = capsys.readouterr()\n\n assert '%' in err\n assert '' == out\n\n\ndef test_find_duplicates_verbose_false(capsys):\n phasher = PHash(verbose=False)\n phasher.find_duplicates(\n image_dir=PATH_IMAGE_DIR, max_distance_threshold=10, scores=False, outfile=False\n )\n out, err = capsys.readouterr()\n\n assert '' == out\n assert '' == err\n",
"from pathlib import Path, PurePath\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\n\nfrom imagededup.handlers.search.retrieval import get_cosine_similarity\nfrom imagededup.utils.general_utils import save_json, get_files_to_remove\nfrom imagededup.utils.image_utils import load_image, preprocess_image\nfrom imagededup.utils.logger import return_logger\n\n\nclass CNN:\n \"\"\"\n Find duplicates using CNN and/or generate CNN encodings given a single image or a directory of images.\n\n The module can be used for 2 purposes: Encoding generation and duplicate detection.\n - Encodings generation:\n To propagate an image through a Convolutional Neural Network architecture and generate encodings. The generated\n encodings can be used at a later time for deduplication. Using the method 'encode_image', the CNN encodings for a\n single image can be obtained while the 'encode_images' method can be used to get encodings for all images in a\n directory.\n\n - Duplicate detection:\n Find duplicates either using the encoding mapping generated previously using 'encode_images' or using a Path to the\n directory that contains the images that need to be deduplicated. 'find_duplciates' and 'find_duplicates_to_remove'\n methods are provided to accomplish these tasks.\n \"\"\"\n\n def __init__(self, verbose: bool = True) -> None:\n \"\"\"\n Initialize a keras MobileNet model that is sliced at the last convolutional layer.\n Set the batch size for keras generators to be 64 samples. Set the input image size to (224, 224) for providing\n as input to MobileNet model.\n\n Args:\n verbose: Display progress bar if True else disable it. Default value is True.\n \"\"\"\n from tensorflow.keras.applications.mobilenet import MobileNet, preprocess_input\n from imagededup.utils.data_generator import DataGenerator\n\n self.MobileNet = MobileNet\n self.preprocess_input = preprocess_input\n self.DataGenerator = DataGenerator\n\n self.target_size = (224, 224)\n self.batch_size = 64\n self.logger = return_logger(__name__)\n self._build_model()\n self.verbose = 1 if verbose is True else 0\n\n def _build_model(self):\n \"\"\"\n Build MobileNet model sliced at the last convolutional layer with global average pooling added.\n \"\"\"\n self.model = self.MobileNet(\n input_shape=(224, 224, 3), include_top=False, pooling='avg'\n )\n\n self.logger.info(\n 'Initialized: MobileNet pretrained on ImageNet dataset sliced at last conv layer and added '\n 'GlobalAveragePooling'\n )\n\n def _get_cnn_features_single(self, image_array: np.ndarray) -> np.ndarray:\n \"\"\"\n Generate CNN encodings for a single image.\n\n Args:\n image_array: Image typecast to numpy array.\n\n Returns:\n Encodings for the image in the form of numpy array.\n \"\"\"\n image_pp = self.preprocess_input(image_array)\n image_pp = np.array(image_pp)[np.newaxis, :]\n return self.model.predict(image_pp)\n\n def _get_cnn_features_batch(self, image_dir: PurePath) -> Dict[str, np.ndarray]:\n \"\"\"\n Generate CNN encodings for all images in a given directory of images.\n Args:\n image_dir: Path to the image directory.\n\n Returns:\n A dictionary that contains a mapping of filenames and corresponding numpy array of CNN encodings.\n \"\"\"\n self.logger.info('Start: Image encoding generation')\n self.data_generator = self.DataGenerator(\n image_dir=image_dir,\n batch_size=self.batch_size,\n target_size=self.target_size,\n basenet_preprocess=self.preprocess_input,\n )\n\n feat_vec = self.model.predict_generator(\n self.data_generator, len(self.data_generator), verbose=self.verbose\n )\n self.logger.info('End: Image encoding generation')\n\n filenames = [i.name for i in self.data_generator.valid_image_files]\n\n self.encoding_map = {j: feat_vec[i, :] for i, j in enumerate(filenames)}\n return self.encoding_map\n\n def encode_image(\n self,\n image_file: Optional[Union[PurePath, str]] = None,\n image_array: Optional[np.ndarray] = None,\n ) -> np.ndarray:\n \"\"\"\n Generate CNN encoding for a single image.\n\n Args:\n image_file: Path to the image file.\n image_array: Optional, used instead of image_file. Image typecast to numpy array.\n\n Returns:\n encoding: Encodings for the image in the form of numpy array.\n\n Example:\n ```\n from imagededup.methods import CNN\n myencoder = CNN()\n encoding = myencoder.encode_image(image_file='path/to/image.jpg')\n OR\n encoding = myencoder.encode_image(image_array=<numpy array of image>)\n ```\n \"\"\"\n if isinstance(image_file, str):\n image_file = Path(image_file)\n\n if isinstance(image_file, PurePath):\n if not image_file.is_file():\n raise ValueError(\n 'Please provide either image file path or image array!'\n )\n\n image_pp = load_image(\n image_file=image_file, target_size=self.target_size, grayscale=False\n )\n\n elif isinstance(image_array, np.ndarray):\n image_pp = preprocess_image(\n image=image_array, target_size=self.target_size, grayscale=False\n )\n else:\n raise ValueError('Please provide either image file path or image array!')\n\n return (\n self._get_cnn_features_single(image_pp)\n if isinstance(image_pp, np.ndarray)\n else None\n )\n\n def encode_images(self, image_dir: Union[PurePath, str]) -> Dict:\n \"\"\"Generate CNN encodings for all images in a given directory of images.\n\n Args:\n image_dir: Path to the image directory.\n Returns:\n dictionary: Contains a mapping of filenames and corresponding numpy array of CNN encodings.\n Example:\n ```\n from imagededup.methods import CNN\n myencoder = CNN()\n encoding_map = myencoder.encode_images(image_dir='path/to/image/directory')\n ```\n \"\"\"\n if isinstance(image_dir, str):\n image_dir = Path(image_dir)\n\n if not image_dir.is_dir():\n raise ValueError('Please provide a valid directory path!')\n\n return self._get_cnn_features_batch(image_dir)\n\n @staticmethod\n def _check_threshold_bounds(thresh: float) -> None:\n \"\"\"\n Check if provided threshold is valid. Raises TypeError if wrong threshold variable type is passed or a\n ValueError if an out of range value is supplied.\n\n Args:\n thresh: Threshold value (must be float between -1.0 and 1.0)\n\n Raises:\n TypeError: If wrong variable type is provided.\n ValueError: If wrong value is provided.\n \"\"\"\n if not isinstance(thresh, float):\n raise TypeError('Threshold must be a float between -1.0 and 1.0')\n if thresh < -1.0 or thresh > 1.0:\n raise ValueError('Threshold must be a float between -1.0 and 1.0')\n\n def _find_duplicates_dict(\n self,\n encoding_map: Dict[str, list],\n min_similarity_threshold: float,\n scores: bool,\n outfile: Optional[str] = None,\n ) -> Dict:\n \"\"\"\n Take in dictionary {filename: encoded image}, detects duplicates above the given cosine similarity threshold\n and returns a dictionary containing key as filename and value as a list of duplicate filenames. Optionally,\n the cosine distances could be returned instead of just duplicate filenames for each query file.\n\n Args:\n encoding_map: Dictionary with keys as file names and values as encoded images.\n min_similarity_threshold: Cosine similarity above which retrieved duplicates are valid.\n scores: Boolean indicating whether similarity scores are to be returned along with retrieved duplicates.\n\n Returns:\n if scores is True, then a dictionary of the form {'image1.jpg': [('image1_duplicate1.jpg',\n score), ('image1_duplicate2.jpg', score)], 'image2.jpg': [] ..}\n if scores is False, then a dictionary of the form {'image1.jpg': ['image1_duplicate1.jpg',\n 'image1_duplicate2.jpg'], 'image2.jpg':['image1_duplicate1.jpg',..], ..}\n \"\"\"\n\n # get all image ids\n # we rely on dictionaries preserving insertion order in Python >=3.6\n image_ids = np.array([*encoding_map.keys()])\n\n # put image encodings into feature matrix\n features = np.array([*encoding_map.values()])\n\n self.logger.info('Start: Calculating cosine similarities...')\n\n self.cosine_scores = get_cosine_similarity(features, self.verbose)\n\n np.fill_diagonal(\n self.cosine_scores, 2.0\n ) # allows to filter diagonal in results, 2 is a placeholder value\n\n self.logger.info('End: Calculating cosine similarities.')\n\n self.results = {}\n for i, j in enumerate(self.cosine_scores):\n duplicates_bool = (j >= min_similarity_threshold) & (j < 2)\n\n if scores:\n tmp = np.array([*zip(image_ids, j)], dtype=object)\n duplicates = list(map(tuple, tmp[duplicates_bool]))\n\n else:\n duplicates = list(image_ids[duplicates_bool])\n\n self.results[image_ids[i]] = duplicates\n\n if outfile and scores:\n save_json(results=self.results, filename=outfile, float_scores=True)\n elif outfile:\n save_json(results=self.results, filename=outfile)\n return self.results\n\n def _find_duplicates_dir(\n self,\n image_dir: Union[PurePath, str],\n min_similarity_threshold: float,\n scores: bool,\n outfile: Optional[str] = None,\n ) -> Dict:\n \"\"\"\n Take in path of the directory in which duplicates are to be detected above the given threshold.\n Returns dictionary containing key as filename and value as a list of duplicate file names. Optionally,\n the cosine distances could be returned instead of just duplicate filenames for each query file.\n\n Args:\n image_dir: Path to the directory containing all the images.\n min_similarity_threshold: Optional, hamming distance above which retrieved duplicates are valid. Default 0.9\n scores: Optional, boolean indicating whether Hamming distances are to be returned along with retrieved\n duplicates.\n outfile: Optional, name of the file the results should be written to.\n\n Returns:\n if scores is True, then a dictionary of the form {'image1.jpg': [('image1_duplicate1.jpg',\n score), ('image1_duplicate2.jpg', score)], 'image2.jpg': [] ..}\n if scores is False, then a dictionary of the form {'image1.jpg': ['image1_duplicate1.jpg',\n 'image1_duplicate2.jpg'], 'image2.jpg':['image1_duplicate1.jpg',..], ..}\n \"\"\"\n self.encode_images(image_dir=image_dir)\n\n return self._find_duplicates_dict(\n encoding_map=self.encoding_map,\n min_similarity_threshold=min_similarity_threshold,\n scores=scores,\n outfile=outfile,\n )\n\n def find_duplicates(\n self,\n image_dir: Union[PurePath, str] = None,\n encoding_map: Dict[str, list] = None,\n min_similarity_threshold: float = 0.9,\n scores: bool = False,\n outfile: Optional[str] = None,\n ) -> Dict:\n \"\"\"\n Find duplicates for each file. Take in path of the directory or encoding dictionary in which duplicates are to\n be detected above the given threshold. Return dictionary containing key as filename and value as a list of\n duplicate file names. Optionally, the cosine distances could be returned instead of just duplicate filenames for\n each query file.\n\n Args:\n image_dir: Path to the directory containing all the images or dictionary with keys as file names\n and values as numpy arrays which represent the CNN encoding for the key image file.\n encoding_map: Optional, used instead of image_dir, a dictionary containing mapping of filenames and\n corresponding CNN encodings.\n min_similarity_threshold: Optional, threshold value (must be float between -1.0 and 1.0). Default is 0.9\n scores: Optional, boolean indicating whether similarity scores are to be returned along with retrieved\n duplicates.\n outfile: Optional, name of the file to save the results, must be a json. Default is None.\n\n Returns:\n dictionary: if scores is True, then a dictionary of the form {'image1.jpg': [('image1_duplicate1.jpg',\n score), ('image1_duplicate2.jpg', score)], 'image2.jpg': [] ..}. if scores is False, then a\n dictionary of the form {'image1.jpg': ['image1_duplicate1.jpg', 'image1_duplicate2.jpg'],\n 'image2.jpg':['image1_duplicate1.jpg',..], ..}\n\n Example:\n ```\n from imagededup.methods import CNN\n myencoder = CNN()\n duplicates = myencoder.find_duplicates(image_dir='path/to/directory', min_similarity_threshold=0.85, scores=True,\n outfile='results.json')\n\n OR\n\n from imagededup.methods import CNN\n myencoder = CNN()\n duplicates = myencoder.find_duplicates(encoding_map=<mapping filename to cnn encodings>,\n min_similarity_threshold=0.85, scores=True, outfile='results.json')\n ```\n \"\"\"\n self._check_threshold_bounds(min_similarity_threshold)\n\n if image_dir:\n result = self._find_duplicates_dir(\n image_dir=image_dir,\n min_similarity_threshold=min_similarity_threshold,\n scores=scores,\n outfile=outfile,\n )\n elif encoding_map:\n result = self._find_duplicates_dict(\n encoding_map=encoding_map,\n min_similarity_threshold=min_similarity_threshold,\n scores=scores,\n outfile=outfile,\n )\n\n else:\n raise ValueError('Provide either an image directory or encodings!')\n\n return result\n\n def find_duplicates_to_remove(\n self,\n image_dir: PurePath = None,\n encoding_map: Dict[str, np.ndarray] = None,\n min_similarity_threshold: float = 0.9,\n outfile: Optional[str] = None,\n ) -> List:\n \"\"\"\n Give out a list of image file names to remove based on the similarity threshold. Does not remove the mentioned\n files.\n\n Args:\n image_dir: Path to the directory containing all the images or dictionary with keys as file names\n and values as numpy arrays which represent the CNN encoding for the key image file.\n encoding_map: Optional, used instead of image_dir, a dictionary containing mapping of filenames and\n corresponding CNN encodings.\n min_similarity_threshold: Optional, threshold value (must be float between -1.0 and 1.0). Default is 0.9\n outfile: Optional, name of the file to save the results, must be a json. Default is None.\n\n Returns:\n duplicates: List of image file names that should be removed.\n\n Example:\n ```\n from imagededup.methods import CNN\n myencoder = CNN()\n duplicates = myencoder.find_duplicates_to_remove(image_dir='path/to/images/directory'),\n min_similarity_threshold=0.85)\n\n OR\n\n from imagededup.methods import CNN\n myencoder = CNN()\n duplicates = myencoder.find_duplicates_to_remove(encoding_map=<mapping filename to cnn encodings>,\n min_similarity_threshold=0.85, outfile='results.json')\n ```\n \"\"\"\n if image_dir or encoding_map:\n duplicates = self.find_duplicates(\n image_dir=image_dir,\n encoding_map=encoding_map,\n min_similarity_threshold=min_similarity_threshold,\n scores=False,\n )\n\n files_to_remove = get_files_to_remove(duplicates)\n\n if outfile:\n save_json(files_to_remove, outfile)\n\n return files_to_remove\n"
] | [
[
"numpy.delete"
],
[
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.zeros"
],
[
"numpy.array",
"numpy.fill_diagonal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dendisuhubdy/flow_synthesizer | [
"7df51b574765c7834ebdda8a8936b2c0d363a93a",
"7df51b574765c7834ebdda8a8936b2c0d363a93a"
] | [
"code/semantic.py",
"code/latent_neighbors.py"
] | [
"#!/usr/bin/env python3\n\n#%% Plotting\nimport matplotlib\nmatplotlib.use('agg')\nimport os\nimport time\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n# Internal imports\nfrom utils.data import load_dataset, meta_pairs\nfrom models.basic import GatedMLP, GatedCNN, construct_encoder_decoder, construct_flow, construct_regressor\nfrom evaluate import evaluate_model\n\n# Define arguments\nparser = argparse.ArgumentParser()\n# Data arguments\nparser.add_argument('--path', type=str, default='', help='')\nparser.add_argument('--test_sounds', type=str, default='', help='')\nparser.add_argument('--output', type=str, default='outputs', help='')\nparser.add_argument('--dataset', type=str, default='32par', help='')\nparser.add_argument('--data', type=str, default='mel', help='')\nparser.add_argument('--train_type', type=str, default='fixed', help='')\nparser.add_argument('--nbworkers', type=int, default=0, help='')\n# Model arguments\nparser.add_argument('--model', type=str, default='cnn', help='')\nparser.add_argument('--loss', type=str, default='mse', help='')\nparser.add_argument('--rec_loss', type=str, default='mse', help='')\nparser.add_argument('--n_classes', type=int, default=32, help='')\nparser.add_argument('--n_hidden', type=int, default=1024, help='')\nparser.add_argument('--n_layers', type=int, default=4, help='')\n# Optimization arguments\nparser.add_argument('--batch_size', type=int, default=128, help='')\nparser.add_argument('--epochs', type=int, default=200, help='')\nparser.add_argument('--eval', type=int, default=100, help='')\nparser.add_argument('--lr', type=float, default=2e-4, help='')\n# Semantic arguments\nparser.add_argument('--semantic_dim', type=int, default=-1, help='')\nparser.add_argument('--semantic_type', type=str, default='cnn', help='')\n# CUDA arguments\nparser.add_argument('--device', type=str, default='cpu', help='Device for CUDA')\nargs = parser.parse_args()\n# Track start time (for HPC)\nstart_time = time.time()\n# In case we are CPU\nargs.synthesize = False\n# Parameter checking\nif (len(args.path) == 0):\n args.path = (args.device == 'cpu') and '/Users/esling/Datasets/diva_dataset' or '/fast-2/datasets/diva_dataset/'\n args.test_sounds = (args.device == 'cpu') and '/Users/esling/Datasets/synth_testing' or '/fast-2/datasets/flow_synthesizer/synth_testing'\n args.vocal_sounds = '/fast-2/datasets/flow_synthesizer/vocal_testing'\n #args.output = (args.device == 'cpu') and 'outputs' or '/fast-1/philippe/flow_results'\nif (args.device != 'cpu'):\n args.synthesize = True\n # Enable CuDNN optimization\n torch.backends.cudnn.benchmark=True\n# Handling cuda\nargs.cuda = not args.device == 'cpu' and torch.cuda.is_available()\nargs.device = torch.device(args.device if torch.cuda.is_available() else 'cpu')\nprint('Optimization will be on ' + str(args.device) + '.')\n\n\"\"\"\n###################\nBasic definitions\n################### \n\"\"\"\nprint('[Loading dataset]')\nref_split = args.path + '/reference_split_' + args.dataset+ \"_\" +args.data + '.npz'\nif (args.train_type == 'random' or (not os.path.exists(ref_split))):\n train_loader, valid_loader, test_loader, args = load_dataset(args)\n # Take fixed batch\n fixed_data, fixed_params, fixed_meta, fixed_audio = next(iter(test_loader))\n fixed_data, fixed_params, fixed_meta, fixed_audio = fixed_data.to(args.device), fixed_params.to(args.device), fixed_meta, fixed_audio\n fixed_batch = (fixed_data, fixed_params, fixed_meta, fixed_audio)\n if (args.train_type == 'fixed'):\n np.savez(ref_split, [train_loader, valid_loader, test_loader])\nelse:\n data = np.load(ref_split)['arr_0']\n train_loader, valid_loader, test_loader = data[0], data[1], data[2]\n fixed_data, fixed_params, fixed_meta, fixed_audio = next(iter(test_loader))\n fixed_data, fixed_params, fixed_meta, fixed_audio = fixed_data.to(args.device), fixed_params.to(args.device), fixed_meta, fixed_audio\n fixed_batch = (fixed_data, fixed_params, fixed_meta, fixed_audio)\n args.output_size = train_loader.dataset.output_size\n args.input_size = train_loader.dataset.input_size\n\n#%%\n\"\"\"\n###################\nSemantic data analysis\n################### \n\"\"\"\nfavs = [0, 1, 3, 4]\nprint('Favorite dims : ')\nprint(meta_pairs[favs[0] + 1])\nprint(meta_pairs[favs[1] + 1])\nprint(meta_pairs[favs[2] + 1])\nprint(meta_pairs[favs[3] + 1])\nprint('Analyzing basic tags properties')\nprint(meta_pairs)\n# Create basic structures for stats\nfull_params = []\nfull_meta = []\nnb_tags = torch.zeros(len(favs), 3).float()\nbatch_seen = torch.zeros(len(favs), 3).float()\n# Checking up basic features\nfor (loader, l_name) in [(train_loader, 'train'), (valid_loader, 'valid'), (test_loader, 'test')]:\n print(['Analyzing ' + l_name])\n for (_, params, meta, _) in loader:\n nb_tags += meta[:, favs].sum(dim=0).float()\n batch_seen += meta[:, favs].sum(dim=0).float().clamp(0, 1)\n full_params.append(params)\n full_meta.append(meta[:, favs])\n print('Number per batch')\n print(nb_tags / len(loader))\n print(batch_seen / len(loader))\nfull_params = torch.cat(full_params, dim=0)\nfull_meta = torch.cat(full_meta, dim=0)\n#%%\n# Now analyze global tags properties\nfull_var = full_params.std(dim=0)\nfor f in range(len(favs)):\n print('Full variance :')\n print(full_var)\n for c in range(3):\n print(meta_pairs[favs[f] + 1][c])\n # Find ids of that class\n ids = (full_meta[:, f, c] == 1)\n cur_var = (full_params[ids].std(dim=0))\n print(torch.abs(cur_var - full_var)/full_var)\n\n#%% Now try to optimize our favs (based on parameters)\n\"\"\"\nfor f in range(len(favs)):\n print('About to classify this from parameters')\n print(meta_pairs[favs[f] + 1])\n cur_tag = favs[f]\n # Create baseline classifier on parameters\n model = GatedMLP(full_params.shape[1], 2, hidden_size = 128, n_layers = 5, type_mod='normal').float().to(args.device)\n # Optimizer model\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n # Learning rate scheduler\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=20, verbose=True, threshold=1e-7)\n # Create loss\n loss = nn.CrossEntropyLoss(reduction='none').float().to(args.device)\n # Vector of final losses\n losses = torch.zeros(args.epochs, 3)\n # Train the model\n for i in range(args.epochs):\n print('Epoch ' + str(i))\n # Go through the 3 sets\n for (loader, l_name, l_i) in [(train_loader, 'train', 0), (valid_loader, 'valid', 1), (test_loader, 'test', 2)]:\n if (l_name == 'train'):\n model.train()\n else:\n model.eval()\n full_loss = 0\n for (_, y, meta, _) in loader:\n meta = meta[:, favs[f]].squeeze(1)\n target = meta[:, 1].long().to(args.device)\n y, meta = y.to(args.device).float(), meta.to(args.device).float()\n # Separate examples\n loss_mask = 1 - meta[:, 2]\n observed_examples = loss_mask.eq(1)\n unknown_examples = loss_mask.eq(0)\n optimizer.zero_grad()\n out = torch.softmax(model(y), dim=1)\n b_loss = (loss_mask * loss(out, target)).sum()\n if (l_name == 'train'):\n b_loss.backward()\n optimizer.step()\n full_loss += b_loss.item()\n full_loss /= len(loader)\n losses[i, l_i] = full_loss\n if (l_name == 'valid'):\n scheduler.step(full_loss)\n print(losses[i, :])\n # Now save reference results\n torch.save(losses, args.output + '/models/classify_params_' + meta_pairs[0][favs[f]] + '.results')\n\"\"\"\n \n#%% Now try to optimize our favs (based on audio)\nargs.kernel = 5\nargs.dilation = 3\nfor f in range(len(favs)):\n print('About to classify this from audio')\n print(meta_pairs[favs[f] + 1])\n cur_tag = favs[f]\n # Create baseline classifier on parameters\n model = GatedCNN(args.input_size, 2, n_layers = 3, type_mod='normal', args=args).float().to(args.device)\n # Optimizer model\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n # Learning rate scheduler\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=20, verbose=True, threshold=1e-7)\n # Create loss\n loss = nn.CrossEntropyLoss(reduction='none').float().to(args.device)\n # Vector of final losses\n losses = torch.zeros(args.epochs, 3)\n # Train the model\n for i in range(args.epochs):\n print('Epoch ' + str(i))\n # Go through the 3 sets\n for (loader, l_name, l_i) in [(train_loader, 'train', 0)]:\n model.train()\n full_loss = 0\n for (x, _, meta, _) in loader:\n meta = meta[:, favs[f]].squeeze(1)\n target = meta[:, 1].long().to(args.device)\n x, meta = x.float().to(args.device), meta.float().to(args.device)\n # Separate examples\n loss_mask = 1 - meta[:, 2]\n observed_examples = loss_mask.eq(1)\n unknown_examples = loss_mask.eq(0)\n optimizer.zero_grad()\n out = torch.softmax(model(x), dim=1)\n b_loss = (loss_mask * loss(out, target)).sum()\n b_loss.backward()\n optimizer.step()\n full_loss += b_loss\n full_loss /= len(loader)\n losses[i, l_i] = full_loss\n if (l_name == 'valid'):\n scheduler.step(full_loss)\n for (loader, l_name, l_i) in [(valid_loader, 'valid', 1), (test_loader, 'test', 2)]:\n model.eval()\n full_loss = 0\n with torch.no_grad():\n for (x, _, meta, _) in loader:\n meta = meta[:, favs[f]].squeeze(1)\n target = meta[:, 1].long().to(args.device)\n x, meta = x.float().to(args.device), meta.float().to(args.device)\n # Separate examples\n loss_mask = 1 - meta[:, 2]\n observed_examples = loss_mask.eq(1)\n unknown_examples = loss_mask.eq(0)\n optimizer.zero_grad()\n out = torch.softmax(model(x), dim=1)\n b_loss = (loss_mask * loss(out, target)).sum()\n if (l_name == 'train'):\n b_loss.backward()\n optimizer.step()\n full_loss += b_loss\n full_loss /= len(loader)\n losses[i, l_i] = full_loss\n if (l_name == 'valid'):\n scheduler.step(full_loss)\n print(losses[i, :])\n # Now save reference results\n torch.save(losses, args.output + '/models/classify_audios_' + meta_pairs[0][favs[f]] + '.results')\n ",
"# -*- coding: utf-8 -*-\n\nimport matplotlib\nmatplotlib.use('agg')\nimport torch\nimport torch.nn as nn\nimport os.path\nimport argparse\nimport numpy as np\nfrom utils.data import load_dataset\nfrom utils.plot import compare_batch_detailed\nfrom models.loss import spectral_losses\n# Plotting\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn import decomposition\nimport librosa\n\ndef evaluate_neighborhood(model, test_loader, args, train=False, name=None):\n from synth.synthesize import synthesize_batch\n print(' - Evaluate audio synthesis losses.')\n cur_batch = 0\n for (x, y, _, x_wave) in test_loader:\n print('Loading')\n # Send to device\n x, y = x.to(args.device), y.to(args.device)\n print('Loaded')\n # Encode our fixed batch\n _, out, _ = model.ae_model(x)\n print(' - Generate audio outputs.')\n # Select two random examples\n ids = [np.random.randint(0, x.shape[0]), np.random.randint(0, x.shape[0])]\n # Generate different local neighborhoods\n for i in [0, 1]:\n for v_r in [1, 0.5, 0.2]:\n out1 = out[ids[i]] + (torch.randn(64, out.shape[1]) * v_r).to(args.device)\n out1 = model.regression_model(out1)\n audio = synthesize_batch(out1.cpu(), test_loader.dataset.final_params, args.engine, args.generator, args.param_defaults, args.rev_idx, orig_wave=x_wave, name=None)\n # Compute mel spectrograms\n full_mels = []\n for b in range(x.shape[0]):\n _, mse, sc, lm, f_mel = spectral_losses(audio[b], x[b], test_loader, args, raw=True)\n if (args.data == 'mel'):\n f_mel = torch.log(f_mel + 1e-3)\n full_mels.append(f_mel.unsqueeze(0))\n full_mels = torch.cat(full_mels, dim=0)\n # Output batches comparisons\n if len(x.shape)>3: # get rid of mfcc\n x = x[:,0]\n id_full = [ids[i], 1, 2, 3, 4, 5, 6, 7]\n compare_batch_detailed(x[id_full].cpu(), y[id_full].cpu(), full_mels[:8].cpu().numpy(), out1[:8].detach().cpu(), None, x_wave[id_full].cpu(), audio[:8], name=name + '_' + str(cur_batch) + '_' + str(i) + '_' + str(v_r))\n # Create linear interpolation\n print('Perform interpolation')\n outs = torch.zeros(8, out.shape[1])\n for e in range(8):\n outs[e] = model.regression_model(((out[ids[0]] * ((7.0-e)/7.0)) + (out[ids[1]] * (e/7.0))).unsqueeze(0))[0]\n # Compute mel spectrograms\n full_mels = []\n audio = synthesize_batch(outs.cpu(), test_loader.dataset.final_params, args.engine, args.generator, args.param_defaults, args.rev_idx, orig_wave=x_wave, name=None)\n for b in range(outs.shape[0]):\n _, mse, sc, lm, f_mel = spectral_losses(audio[b], x[b], test_loader, args, raw=True)\n if (args.data == 'mel'):\n f_mel = torch.log(f_mel + 1e-3)\n full_mels.append(f_mel.unsqueeze(0))\n full_mels = torch.cat(full_mels, dim=0)\n # Output batches comparisons\n if len(x.shape)>3: # get rid of mfcc\n x = x[:,0]\n id_full = [ids[0], ids[1], 2, 3, 4, 5, 6, 7]\n compare_batch_detailed(x[id_full].cpu(), y[id_full].cpu(), full_mels[:8].cpu().numpy(), outs[:8].detach().cpu(), None, x_wave[id_full].cpu(), audio[:8], name=name + '_' + str(cur_batch) + 'interpolate')\n \n# Define arguments\nparser = argparse.ArgumentParser()\n# Data arguments\nparser.add_argument('--path', type=str, default='/fast-2/datasets/diva_dataset/', help='')\nparser.add_argument('--ref_model', type=str, default='/fast-1/philippe/flow_results_final/32par/models/vae_flow_mel_mse_cnn_mlp_iaf_1.model', help='')\nparser.add_argument('--output', type=str, default='/fast-1/philippe/flow_results_final/32par/', help='')\nparser.add_argument('--dataset', type=str, default='32par', help='')\nparser.add_argument('--data', type=str, default='mel', help='')\nparser.add_argument('--batch_size', type=int, default=128, help='')\nparser.add_argument('--n_classes', type=int, default=64, help='')\nparser.add_argument('--nbworkers', type=int, default=0, help='')\nparser.add_argument('--epochs', type=int, default=100, help='')\nparser.add_argument('--device', type=str, default='cuda:2', help='')\nparser.add_argument('--eval_type', type=str, default='summary', help='')\nargs = parser.parse_args()\nif (args.device != 'cpu'):\n matplotlib.use('agg')\n args.synthesize = True\n # Import synthesis\n from synth.synthesize import create_synth\n # Create synth rendering system\n args.engine, args.generator, args.param_defaults, args.rev_idx = create_synth(args.dataset)\n# Load dataset\nref_split = args.path + '/reference_split_' + args.dataset+ \"_\" +args.data + '.npz'\ndata = np.load(ref_split)['arr_0']\ntrain_loader, valid_loader, test_loader = data[0], data[1], data[2]\n# Load model\nmodel = torch.load(args.ref_model)\nmodel = model.to(args.device)\nevaluate_neighborhood(model, test_loader, args, train=False, name=args.output + '/neighbors_32')\n"
] | [
[
"torch.abs",
"torch.nn.CrossEntropyLoss",
"numpy.savez",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.zeros",
"torch.cat",
"matplotlib.use",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.load",
"torch.save"
],
[
"torch.zeros",
"torch.cat",
"matplotlib.use",
"torch.load",
"torch.randn",
"torch.log",
"numpy.load",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JesseTG/Liar | [
"a952ebc99fe1907e0f40ec4b40a725c75e25ac01"
] | [
"liar/public/views.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Public section, including homepage and signup.\"\"\"\n\nfrom collections import Counter, defaultdict\nimport operator\nimport re\nimport itertools\nimport math\n\nfrom flask import Blueprint, flash, redirect, render_template, request, url_for\nfrom flask import current_app\n\nfrom nltk.corpus import stopwords\nimport nltk\n\nfrom liar.utils import flash_errors\nfrom liar.extensions import cache, mongo\nfrom .. import queries\n\nimport scipy\nimport pandas as pd\nfrom sklearn import manifold\nfrom scipy.interpolate import interp1d\nfrom scipy.spatial.distance import squareform, pdist\n\nfrom numpy import amax\n\nfrom colour import Color\n\n\ndef take(n, iterable):\n \"Return first n items of the iterable as a list\"\n return list(islice(iterable, n))\n\n\ngag_list=[\"EX\",\"RP\",\"TO\",\"VB\",\"WP\",\"PRP\",\"DT\",\"VBP\",\"IN\",\"POS\",\".\",\"CD\",\"``\"]\n\ndef split_sentence(text):\n sentence=nltk.word_tokenize(text)\n tagged = nltk.pos_tag(sentence)\n tagged=[tag for tag in tagged if tag[1] not in gag_list]\n pass_list=[tag[0] for tag in tagged]\n return pass_list\n\n\n\n\ndef gen_dict(statement_text):\n words=[split_sentence(sentence) for sentence in statement_text]\n word_dict=defaultdict(int)\n for word_list in words:\n temp_dict=dict(Counter(word_list))\n word_dict={**word_dict,**temp_dict}\n return word_dict\n\nblueprint = Blueprint('public', __name__, static_folder='../static')\n\nCOLORS = tuple(map(Color, (\"#661a00\", \"#E71F28\", \"#EE9022\", \"#FFD503\", \"#C3D52D\", \"#83BF44\")))\ninterval = tuple(i/(len(COLORS) - 1) for i in range(len(COLORS)))\nred = interp1d(interval, [c.red for c in COLORS])\ngreen = interp1d(interval, [c.green for c in COLORS])\nblue = interp1d(interval, [c.blue for c in COLORS])\n\n\ndef gradient(i):\n return Color(rgb=(red(i), green(i), blue(i)))\n\[email protected](timeout=300)\ndef compute_points(combos):\n subjects = tuple(sorted(tuple(queries.subjects())))\n\n length = len(subjects)\n matrix = scipy.zeros((length, length))\n\n for c in combos:\n _id = c['_id']\n count = c['count']\n i_index = subjects.index(_id[0])\n j_index = subjects.index(_id[1])\n matrix[i_index, j_index] = count\n matrix[j_index, i_index] = count\n\n most = matrix.max()\n\n mds = manifold.MDS(n_components=2, n_init=10, max_iter=1000, eps=1e-9, dissimilarity=\"precomputed\", n_jobs=-1)\n return scipy.array(mds.fit_transform(most - matrix))\n\n\ndef viewbox(points):\n am = amax(points)\n margin = am * 0.05\n return \"{0} {1} {2} {3}\".format(-am - margin, -am - margin, am*2 + margin, am*2 + margin)\n\ndef build_data(points):\n nodes = tuple(queries.nodes())\n\n assert len(nodes) == len(points)\n # The MDS should provide one 2D point for each topic...\n\n for i in range(len(nodes)):\n node = nodes[i]\n point = points[i]\n node['x'] = point[0]\n node['y'] = point[1]\n node['radius'] = math.sqrt(node['numberOfRulings'])\n\n return { n['_id'] : n for n in nodes}\n\n\n#######################Word cloud#####################\ndef word_cloud():\n statements=mongo.db.statements\n statement_text=statements_df['statement'].tolist()\n wordcount=defaultdict(int)\n word_dict=gen_dict(statement_text)\n word_dict=dict(sorted(word_dict.items(), key=operator.itemgetter(1), reverse=True))\n return word_cloud\n#####################################################\n\ndef compute_edges(nodes, combos):\n def make_edge(combo):\n return {\n 'a': nodes[combo['_id'][0]],\n 'b': nodes[combo['_id'][1]],\n 'count': combo['count']\n }\n\n def allow_edge(edge):\n a = edge['a']\n b = edge['b']\n count = edge['count']\n\n return (count / a['numberOfRulings'] >= 0.05) or (count / b['numberOfRulings'] >= 0.05)\n\n return tuple(e for e in map(make_edge, combos))\n\n\[email protected]('/', methods=['GET'])\n#@cache.cached(timeout=10)\ndef home():\n combos = tuple(queries.combos())\n points = compute_points(combos)\n nodes = build_data(points)\n edges = compute_edges(nodes, combos)\n v = viewbox(points)\n\n \"\"\"Home page.\"\"\"\n return render_template('public/home.html', nodes=nodes, edges=edges, viewbox=v, gradient=gradient, colors=COLORS)\n\n\[email protected]('/about/')\ndef about():\n \"\"\"About page.\"\"\"\n return render_template('public/about.html')\n"
] | [
[
"scipy.zeros",
"numpy.amax",
"scipy.interpolate.interp1d",
"sklearn.manifold.MDS"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Jet132/keras-tuner | [
"be682573c6f6be1e3f3e6dcac786a34ccac19d3b",
"be682573c6f6be1e3f3e6dcac786a34ccac19d3b"
] | [
"keras_tuner/engine/base_tuner.py",
"keras_tuner/tuners/randomsearch_test.py"
] | [
"# Copyright 2019 The KerasTuner Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"Tuner base class.\"\n\n\nimport copy\nimport os\nimport warnings\n\nimport tensorflow as tf\n\nfrom keras_tuner import utils\nfrom keras_tuner.distribute import oracle_chief\nfrom keras_tuner.distribute import oracle_client\nfrom keras_tuner.distribute import utils as dist_utils\nfrom keras_tuner.engine import hypermodel as hm_module\nfrom keras_tuner.engine import oracle as oracle_module\nfrom keras_tuner.engine import stateful\nfrom keras_tuner.engine import trial as trial_module\nfrom keras_tuner.engine import tuner_utils\n\n\nclass BaseTuner(stateful.Stateful):\n \"\"\"Tuner base class.\n\n `BaseTuner` is the base class for all Tuners, which manages the search\n loop, Oracle, logging, saving, etc. Tuners for non-Keras models can be\n created by subclassing `BaseTuner`.\n\n Args:\n oracle: Instance of Oracle class.\n hypermodel: Instance of `HyperModel` class (or callable that takes\n hyperparameters and returns a `Model` instance). It is optional\n when `Tuner.run_trial()` is overriden and does not use\n `self.hypermodel`.\n directory: A string, the relative path to the working directory.\n project_name: A string, the name to use as prefix for files saved by\n this Tuner.\n logger: Optional instance of `kerastuner.Logger` class for\n streaming logs for monitoring.\n overwrite: Boolean, defaults to `False`. If `False`, reloads an\n existing project of the same name if one is found. Otherwise,\n overwrites the project.\n\n Attributes:\n remaining_trials: Number of trials remaining, `None` if `max_trials` is\n not set. This is useful when resuming a previously stopped search.\n \"\"\"\n\n def __init__(\n self,\n oracle,\n hypermodel=None,\n directory=None,\n project_name=None,\n logger=None,\n overwrite=False,\n ):\n # Ops and metadata\n self.directory = directory or \".\"\n self.project_name = project_name or \"untitled_project\"\n if overwrite and tf.io.gfile.exists(self.project_dir):\n tf.io.gfile.rmtree(self.project_dir)\n\n if not isinstance(oracle, oracle_module.Oracle):\n raise ValueError(\n \"Expected `oracle` argument to be an instance of `Oracle`. \"\n f\"Received: oracle={oracle} (of type ({type(oracle)}).\"\n )\n self.oracle = oracle\n self.oracle._set_project_dir(\n self.directory, self.project_name, overwrite=overwrite\n )\n\n # Run in distributed mode.\n if dist_utils.is_chief_oracle():\n # Blocks forever.\n oracle_chief.start_server(self.oracle)\n elif dist_utils.has_chief_oracle():\n # Proxies requests to the chief oracle.\n self.oracle = oracle_client.OracleClient(self.oracle)\n\n # To support tuning distribution.\n self.tuner_id = os.environ.get(\"KERASTUNER_TUNER_ID\", \"tuner0\")\n\n self.hypermodel = hm_module.get_hypermodel(hypermodel)\n\n # Logs etc\n self.logger = logger\n self._display = tuner_utils.Display(oracle=self.oracle)\n\n self._populate_initial_space()\n\n if not overwrite and tf.io.gfile.exists(self._get_tuner_fname()):\n tf.get_logger().info(\n \"Reloading Tuner from {}\".format(self._get_tuner_fname())\n )\n self.reload()\n\n def _populate_initial_space(self):\n \"\"\"Populate initial search space for oracle.\n\n Keep this function as a subroutine for AutoKeras to override. The space\n may not be ready at the initialization of the tuner, but after seeing\n the training data.\n\n Build hypermodel multiple times to find all conditional hps. It\n generates hp values based on the not activated `conditional_scopes`\n found in the builds.\n \"\"\"\n if self.hypermodel is None:\n return\n\n hp = self.oracle.get_space()\n\n # Lists of stacks of conditions used during `explore_space()`.\n scopes_never_active = []\n scopes_once_active = []\n\n while True:\n self.hypermodel.build(hp)\n\n # Update the recored scopes.\n for conditions in hp.active_scopes:\n if conditions not in scopes_once_active:\n scopes_once_active.append(copy.deepcopy(conditions))\n if conditions in scopes_never_active:\n scopes_never_active.remove(conditions)\n\n for conditions in hp.inactive_scopes:\n if conditions not in scopes_once_active:\n scopes_never_active.append(copy.deepcopy(conditions))\n\n # All conditional scopes are activated.\n if len(scopes_never_active) == 0:\n break\n\n # Generate new values to activate new conditions.\n conditions = scopes_never_active[0]\n for condition in conditions:\n hp.values[condition.name] = condition.values[0]\n\n self.oracle.update_space(hp)\n\n def search(self, *fit_args, **fit_kwargs):\n \"\"\"Performs a search for best hyperparameter configuations.\n\n Args:\n *fit_args: Positional arguments that should be passed to\n `run_trial`, for example the training and validation data.\n **fit_kwargs: Keyword arguments that should be passed to\n `run_trial`, for example the training and validation data.\n \"\"\"\n if \"verbose\" in fit_kwargs:\n self._display.verbose = fit_kwargs.get(\"verbose\")\n self.on_search_begin()\n while True:\n trial = self.oracle.create_trial(self.tuner_id)\n if trial.status == trial_module.TrialStatus.STOPPED:\n # Oracle triggered exit.\n tf.get_logger().info(\"Oracle triggered exit\")\n break\n if trial.status == trial_module.TrialStatus.IDLE:\n # Oracle is calculating, resend request.\n continue\n\n self.on_trial_begin(trial)\n results = self.run_trial(trial, *fit_args, **fit_kwargs)\n # `results` is None indicates user updated oracle in `run_trial()`.\n if results is None:\n warnings.warn(\n \"`Tuner.run_trial()` returned None. It should return one of \"\n \"float, dict, keras.callbacks.History, or a list of one \"\n \"of these types. The use case of calling \"\n \"`Tuner.oracle.update_trial()` in `Tuner.run_trial()` is \"\n \"deprecated, and will be removed in the future.\",\n DeprecationWarning,\n stacklevel=2,\n )\n else:\n self.oracle.update_trial(\n trial.trial_id,\n # Convert to dictionary before calling `update_trial()`\n # to pass it from gRPC.\n tuner_utils.convert_to_metrics_dict(\n results, self.oracle.objective, \"Tuner.run_trial()\"\n ),\n )\n self.on_trial_end(trial)\n self.on_search_end()\n\n def run_trial(self, trial, *fit_args, **fit_kwargs):\n \"\"\"Evaluates a set of hyperparameter values.\"\"\"\n raise NotImplementedError\n\n def save_model(self, trial_id, model, step=0):\n \"\"\"Saves a Model for a given trial.\n\n Args:\n trial_id: The ID of the `Trial` corresponding to this Model.\n model: The trained model.\n step: Integer, for models that report intermediate results to the\n `Oracle`, the step the saved file correspond to. For example, for\n Keras models this is the number of epochs trained.\n \"\"\"\n raise NotImplementedError\n\n def load_model(self, trial):\n \"\"\"Loads a Model from a given trial.\n\n For models that report intermediate results to the `Oracle`, generally\n `load_model` should load the best reported `step` by relying of\n `trial.best_step`.\n\n Args:\n trial: A `Trial` instance, the `Trial` corresponding to the model\n to load.\n \"\"\"\n raise NotImplementedError\n\n def on_trial_begin(self, trial):\n \"\"\"Called at the beginning of a trial.\n\n Args:\n trial: A `Trial` instance.\n \"\"\"\n if self.logger:\n self.logger.register_trial(trial.trial_id, trial.get_state())\n self._display.on_trial_begin(self.oracle.get_trial(trial.trial_id))\n\n def on_trial_end(self, trial):\n \"\"\"Called at the end of a trial.\n\n Args:\n trial: A `Trial` instance.\n \"\"\"\n # Send status to Logger\n if self.logger:\n self.logger.report_trial_state(trial.trial_id, trial.get_state())\n\n self.oracle.end_trial(trial.trial_id, trial_module.TrialStatus.COMPLETED)\n self.oracle.update_space(trial.hyperparameters)\n # Display needs the updated trial scored by the Oracle.\n self._display.on_trial_end(self.oracle.get_trial(trial.trial_id))\n self.save()\n\n def on_search_begin(self):\n \"\"\"Called at the beginning of the `search` method.\"\"\"\n if self.logger:\n self.logger.register_tuner(self.get_state())\n\n def on_search_end(self):\n \"\"\"Called at the end of the `search` method.\"\"\"\n if self.logger:\n self.logger.exit()\n\n def get_best_models(self, num_models=1):\n \"\"\"Returns the best model(s), as determined by the objective.\n\n This method is for querying the models trained during the search.\n For best performance, it is recommended to retrain your Model on the\n full dataset using the best hyperparameters found during `search`,\n which can be obtained using `tuner.get_best_hyperparameters()`.\n\n Args:\n num_models: Optional number of best models to return.\n Defaults to 1.\n\n Returns:\n List of trained models sorted from the best to the worst.\n \"\"\"\n best_trials = self.oracle.get_best_trials(num_models)\n models = [self.load_model(trial) for trial in best_trials]\n return models\n\n def get_best_hyperparameters(self, num_trials=1):\n \"\"\"Returns the best hyperparameters, as determined by the objective.\n\n This method can be used to reinstantiate the (untrained) best model\n found during the search process.\n\n Example:\n\n ```python\n best_hp = tuner.get_best_hyperparameters()[0]\n model = tuner.hypermodel.build(best_hp)\n ```\n\n Args:\n num_trials: Optional number of `HyperParameters` objects to return.\n\n Returns:\n List of `HyperParameter` objects sorted from the best to the worst.\n \"\"\"\n return [t.hyperparameters for t in self.oracle.get_best_trials(num_trials)]\n\n def search_space_summary(self, extended=False):\n \"\"\"Print search space summary.\n\n The methods prints a summary of the hyperparameters in the search\n space, which can be called before calling the `search` method.\n\n Args:\n extended: Optional boolean, whether to display an extended summary.\n Defaults to False.\n \"\"\"\n print(\"Search space summary\")\n hp = self.oracle.get_space()\n print(\"Default search space size: %d\" % len(hp.space))\n for p in hp.space:\n config = p.get_config()\n name = config.pop(\"name\")\n print(\"%s (%s)\" % (name, p.__class__.__name__))\n print(config)\n\n def results_summary(self, num_trials=10):\n \"\"\"Display tuning results summary.\n\n The method prints a summary of the search results including the\n hyperparameter values and evaluation results for each trial.\n\n Args:\n num_trials: Optional number of trials to display. Defaults to 10.\n \"\"\"\n print(\"Results summary\")\n print(\"Results in %s\" % self.project_dir)\n print(\"Showing %d best trials\" % num_trials)\n print(\"{}\".format(self.oracle.objective))\n\n best_trials = self.oracle.get_best_trials(num_trials)\n for trial in best_trials:\n trial.summary()\n\n @property\n def remaining_trials(self):\n \"\"\"Returns the number of trials remaining.\n\n Will return `None` if `max_trials` is not set. This is useful when\n resuming a previously stopped search.\n \"\"\"\n return self.oracle.remaining_trials()\n\n def get_state(self):\n return {}\n\n def set_state(self, state):\n pass\n\n def save(self):\n \"\"\"Saves this object to its project directory.\"\"\"\n if not dist_utils.has_chief_oracle():\n self.oracle.save()\n super(BaseTuner, self).save(self._get_tuner_fname())\n\n def reload(self):\n \"\"\"Reloads this object from its project directory.\"\"\"\n if not dist_utils.has_chief_oracle():\n self.oracle.reload()\n super(BaseTuner, self).reload(self._get_tuner_fname())\n\n @property\n def project_dir(self):\n dirname = os.path.join(str(self.directory), self.project_name)\n utils.create_directory(dirname)\n return dirname\n\n def get_trial_dir(self, trial_id):\n dirname = os.path.join(str(self.project_dir), \"trial_\" + str(trial_id))\n utils.create_directory(dirname)\n return dirname\n\n def _get_tuner_fname(self):\n return os.path.join(str(self.project_dir), str(self.tuner_id) + \".json\")\n",
"# Copyright 2019 The KerasTuner Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom keras_tuner.engine import tuner as tuner_module\nfrom keras_tuner.tuners import randomsearch\n\n\ndef test_update_space(tmp_path):\n # Tests that HyperParameters added after the first call to `build_model`\n # are sent to the Oracle via oracle.update_space.\n def build_model(hp):\n model = tf.keras.Sequential()\n for i in range(hp.Int(\"layers\", 0, 2)):\n model.add(\n tf.keras.layers.Dense(\n units=hp.Int(\"units_\" + str(i), 2, 4, 2), activation=\"relu\"\n )\n )\n model.add(tf.keras.layers.Dense(1, activation=\"sigmoid\"))\n model.compile(\"adam\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n return model\n\n class MyRandomSearch(randomsearch.RandomSearchOracle):\n def populate_space(self, trial_id):\n result = super(MyRandomSearch, self).populate_space(trial_id)\n if \"values\" in result:\n result[\"values\"][\"layers\"] = 2\n return result\n\n tuner = tuner_module.Tuner(\n oracle=MyRandomSearch(objective=\"accuracy\", max_trials=1),\n hypermodel=build_model,\n directory=tmp_path,\n )\n\n assert {hp.name for hp in tuner.oracle.get_space().space} == {\"layers\"}\n\n x, y = np.ones((10, 10)), np.ones((10, 1))\n tuner.search(x, y, epochs=1)\n\n assert {hp.name for hp in tuner.oracle.get_space().space} == {\n \"layers\",\n \"units_0\",\n \"units_1\",\n }\n"
] | [
[
"tensorflow.io.gfile.exists",
"tensorflow.get_logger",
"tensorflow.io.gfile.rmtree"
],
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Sequential",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
choderalab/gin | [
"9082431d8b664699a898c1e2fa490a18737d6e2d",
"9082431d8b664699a898c1e2fa490a18737d6e2d",
"9082431d8b664699a898c1e2fa490a18737d6e2d"
] | [
"lime/scripts/qc_datasets/ht_off_opt.py",
"lime/scripts/elf_with_wbo/ht_elf_with_wbo.py",
"lime/scripts/qc_datasets/exam_smirnoff_fit.py"
] | [
"# =============================================================================\n# imports\n# =============================================================================\nimport os\nimport sys\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\ntf.autograph.set_verbosity(3)\nfrom sklearn import metrics\nimport gin\nimport lime\nimport pandas as pd\nimport numpy as np\n# import qcportal as ptl\n# client = ptl.FractalClient()\n\n\nTRANSLATION = {\n 6: 0,\n 7: 1,\n 8: 2,\n 16: 3,\n 15: 4,\n 9: 5,\n 17: 6,\n 35: 7,\n 53: 8,\n 1: 9\n}\n\n\n# ds_qc = client.get_collection(\"OptimizationDataset\", \"OpenFF Full Optimization Benchmark 1\")\n# ds_name = tf.data.Dataset.from_tensor_slices(list(ds_qc.data.records))\n\ndef data_generator():\n for record_name in list(ds_qc.data.records):\n r = ds_qc.get_record(record_name, specification='default')\n if r is not None:\n traj = r.get_trajectory()\n if traj is not None:\n for snapshot in traj:\n energy = tf.convert_to_tensor(\n snapshot.properties.scf_total_energy,\n dtype=tf.float32)\n\n mol = snapshot.get_molecule()\n\n atoms = tf.convert_to_tensor(\n [TRANSLATION[atomic_number] for atomic_number in mol.atomic_numbers],\n dtype=tf.int64)\n\n adjacency_map = tf.tensor_scatter_nd_update(\n tf.zeros(\n (\n tf.shape(atoms, tf.int64)[0],\n tf.shape(atoms, tf.int64)[0]\n ),\n dtype=tf.float32),\n tf.convert_to_tensor(\n np.array(mol.connectivity)[:, :2],\n dtype=tf.int64),\n tf.convert_to_tensor(\n np.array(mol.connectivity)[:, 2],\n dtype=tf.float32))\n\n features = gin.probabilistic.featurization.featurize_atoms(\n atoms, adjacency_map)\n\n xyz = tf.convert_to_tensor(\n mol.geometry,\n dtype=tf.float32)\n\n jacobian = tf.convert_to_tensor(\n snapshot.return_result,\n dtype=tf.float32)\n\n atoms = tf.concat(\n [\n features,\n xyz,\n jacobian\n ],\n axis=1)\n\n yield(atoms, adjacency_map, energy)\n\n\ndef data_loader(idx):\n atoms_path = 'data/atoms/' + str(idx.numpy()) + '.npy'\n adjacency_map_path = 'data/adjacency_map/' + str(idx.numpy()) + '.npy'\n energy_path = 'data/energy/' + str(idx.numpy()) + '.npy'\n\n atoms = tf.convert_to_tensor(\n np.load(atoms_path))\n\n adjacency_map = tf.convert_to_tensor(\n np.load(adjacency_map_path))\n\n energy = tf.convert_to_tensor(\n np.load(energy_path))\n\n return atoms, adjacency_map, energy\n\n\n'''\nds = tf.data.Dataset.from_generator(\n data_generator,\n (tf.float32, tf.float32, tf.float32))\n'''\n\nds_path = tf.data.Dataset.from_tensor_slices(list(range(5000)))\n\nds = ds_path.map(\n lambda idx: tf.py_function(\n data_loader,\n [idx],\n [tf.float32, tf.float32, tf.float32]))\n\n\nds = ds.shuffle(100000, seed=2666)\n\n\nds = gin.probabilistic.gn.GraphNet.batch(\n ds, 128, feature_dimension=18, atom_dtype=tf.float32).cache(\n str(os.getcwd()) + '/temp')\n\nn_batches = int(gin.probabilistic.gn.GraphNet.get_number_batches(ds))\nn_te = n_batches // 10\n\nds_te = ds.take(n_te)\nds_vl = ds.skip(n_te).take(n_te)\nds_tr = ds.skip(2 * n_te)\n\nconfig_space = {\n 'D_V': [16, 32, 64, 128, 256],\n 'D_E': [16, 32, 64, 128, 256],\n 'D_A': [16, 32, 64, 128, 256],\n 'D_T': [16, 32, 64, 128, 256],\n 'D_U': [16, 32, 64, 128, 256],\n\n\n 'phi_e_0': [32, 64, 128],\n 'phi_e_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'phi_e_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'phi_v_0': [32, 64, 128],\n 'phi_v_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'phi_v_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'phi_a_0': [32, 64, 128],\n 'phi_a_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'phi_a_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'phi_t_0': [32, 64, 128],\n 'phi_t_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'phi_t_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'phi_u_0': [32, 64, 128],\n 'phi_u_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'phi_u_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'f_e_0': [32, 64, 128],\n 'f_e_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'f_e_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'f_r': [32, 64, 128],\n 'f_r_a': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'learning_rate': [1e-5, 1e-4, 1e-3]\n\n}\n\n# @tf.function\ndef flow(y_e, y_a, y_t, y_pair, atoms, adjacency_map, coordinates, atom_in_mol,\n bond_in_mol, angle_in_mol, torsion_in_mol, attr_in_mol):\n\n\n per_mol_mask = tf.stop_gradient(tf.matmul(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32),\n name='per_mol_mask_0'),\n tf.transpose(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32),\n name='per_mol_mask_1'))))\n\n bond_idxs, angle_idxs, torsion_idxs = gin.probabilistic.gn_hyper\\\n .get_geometric_idxs(atoms, adjacency_map)\n\n is_bond = tf.stop_gradient(tf.greater(\n adjacency_map,\n tf.constant(0, dtype=tf.float32)))\n\n distance_matrix = gin.deterministic.md.get_distance_matrix(\n coordinates)\n\n bond_distances = tf.boolean_mask(\n distance_matrix,\n is_bond,\n name='bond_mask')\n\n angle_angles = gin.deterministic.md.get_angles_cos(\n coordinates,\n angle_idxs)\n\n torsion_dihedrals = gin.deterministic.md.get_dihedrals_cos(\n coordinates,\n torsion_idxs)\n\n y_e_0, y_e_1 = tf.split(y_e, 2, 1)\n y_e_0 = tf.squeeze(y_e_0)\n y_e_1 = tf.squeeze(y_e_1)\n u_bond = tf.math.multiply(\n y_e_1,\n tf.math.pow(\n tf.math.subtract(\n bond_distances,\n tf.pow(\n y_e_0,\n tf.constant(2, dtype=tf.float32))),\n tf.constant(2, dtype=tf.float32)))\n\n\n y_a_0, y_a_1 = tf.split(y_a, 2, 1)\n y_a_0 = tf.squeeze(y_a_0)\n y_a_1 = tf.squeeze(y_a_1)\n u_angle = tf.math.multiply(\n y_a_1,\n tf.math.pow(\n tf.math.subtract(\n angle_angles,\n tf.tanh(\n y_a_1)),\n tf.constant(2, dtype=tf.float32)))\n\n y_t_0, y_t_1 = tf.split(y_t, 2, 1)\n y_t_0 = tf.squeeze(y_t_0)\n y_t_1 = tf.squeeze(y_t_1)\n u_dihedral = tf.math.multiply(\n y_t_1,\n tf.math.pow(\n tf.math.subtract(\n torsion_dihedrals,\n tf.tanh(\n y_t_0)),\n tf.constant(2, dtype=tf.float32)))\n\n u_pair_mask = tf.linalg.band_part(\n tf.nn.relu(\n tf.subtract(\n tf.subtract(\n per_mol_mask,\n adjacency_map),\n tf.eye(\n tf.shape(per_mol_mask)[0]))),\n 0, -1)\n\n _distance_matrix = tf.where(\n tf.greater(\n u_pair_mask,\n tf.constant(0, dtype=tf.float32)),\n distance_matrix,\n tf.ones_like(distance_matrix))\n\n _distance_matrix_inverse = tf.multiply(\n u_pair_mask,\n tf.pow(\n tf.math.add(\n _distance_matrix,\n tf.constant(1e-2, dtype=tf.float32)),\n tf.constant(-1, dtype=tf.float32)))\n\n y_pair_0, y_pair_1, y_pair_2 = tf.split(y_pair, 3, 2)\n y_pair_0 = tf.squeeze(y_pair_0)\n y_pair_1 = tf.squeeze(y_pair_1)\n y_pair_2 = tf.squeeze(y_pair_2)\n\n u_pair = tf.reduce_sum(\n [\n tf.multiply(\n y_pair_0,\n tf.pow(\n _distance_matrix_inverse,\n tf.constant(2, dtype=tf.float32))),\n tf.multiply(\n y_pair_1,\n tf.pow(\n _distance_matrix_inverse,\n tf.constant(6, dtype=tf.float32))),\n tf.multiply(\n y_pair_2,\n tf.pow(\n _distance_matrix_inverse,\n tf.constant(12, dtype=tf.float32)))\n ],\n axis=0)\n\n u_bond_tot = tf.matmul(\n tf.transpose(\n tf.where(\n bond_in_mol,\n tf.ones_like(bond_in_mol, dtype=tf.float32),\n tf.zeros_like(bond_in_mol, dtype=tf.float32))),\n tf.expand_dims(\n u_bond,\n axis=1))\n\n u_angle_tot = tf.matmul(\n tf.transpose(\n tf.where(\n angle_in_mol,\n tf.ones_like(angle_in_mol, dtype=tf.float32),\n tf.zeros_like(angle_in_mol, dtype=tf.float32))),\n tf.expand_dims(\n u_angle,\n axis=1))\n\n u_dihedral_tot = tf.matmul(\n tf.transpose(\n tf.where(\n torsion_in_mol,\n tf.ones_like(torsion_in_mol, dtype=tf.float32),\n tf.zeros_like(torsion_in_mol, dtype=tf.float32))),\n tf.expand_dims(\n u_dihedral,\n axis=1))\n\n u_pair_tot = tf.boolean_mask(\n tf.matmul(\n tf.transpose(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32))),\n tf.reduce_sum(\n u_pair,\n axis=1,\n keepdims=True)),\n attr_in_mol)\n\n u_tot = tf.squeeze(\n u_bond_tot + u_angle_tot + u_dihedral_tot + u_pair_tot)\n\n return u_tot\n\ndef init(point):\n global gn\n global optimizer\n\n class f_v(tf.keras.Model):\n \"\"\" Featurization of nodes.\n Here we simply featurize atoms using one-hot encoding.\n\n \"\"\"\n def __init__(self, units=point['D_V']):\n super(f_v, self).__init__()\n self.d = tf.keras.layers.Dense(units)\n\n # @tf.function\n def call(self, x):\n return self.d(x)\n\n class f_r(tf.keras.Model):\n \"\"\" Readout function\n \"\"\"\n def __init__(self, units=point['f_r'], f_r_a=point['f_r_a']):\n super(f_r, self).__init__()\n self.d_k = tf.keras.layers.Dense(units, activation='relu')\n self.d_q = tf.keras.layers.Dense(units, activation='relu')\n self.d_pair_0 = tf.keras.layers.Dense(units, activation='relu')\n self.d_pair_1 = tf.keras.layers.Dense(3,\n kernel_initializer='random_uniform',\n activity_regularizer=tf.keras.regularizers.l2(1e-5))\n\n self.d_e_1 = tf.keras.layers.Dense(2,\n kernel_initializer='random_uniform')\n\n self.d_e_0 = tf.keras.layers.Dense(units, activation='relu')\n\n self.d_a_1 = tf.keras.layers.Dense(2,\n kernel_initializer='random_uniform')\n self.d_a_0 = tf.keras.layers.Dense(units, activation='relu')\n\n self.d_t_1 = tf.keras.layers.Dense(2,\n kernel_initializer='random_uniform')\n self.d_t_0 = tf.keras.layers.Dense(units, activation='relu')\n\n self.d_e0_0 = lime.nets.for_gn.ConcatenateThenFullyConnect((units,\n 'relu', units, 'relu'))\n\n self.d_e0_1 = tf.keras.layers.Dense(1)\n\n self.units = units\n self.d_v = point['D_V']\n self.d_e = point['D_E']\n self.d_a = point['D_A']\n self.d_t = point['D_T']\n self.d_u = point['D_U']\n\n # @tf.function\n def call(self, h_v, h_e, h_a, h_t, h_u,\n h_v_history, h_e_history, h_a_history,\n h_t_history, h_u_history,\n atom_in_mol, bond_in_mol, angle_in_mol, torsion_in_mol,\n adjacency_map, coordinates):\n\n\n h_e_history.set_shape([None, 6, self.d_e])\n h_u_history.set_shape([None, 6, self.d_u])\n h_v_history.set_shape([None, 6, self.d_v])\n\n h_e_bar_history = tf.reduce_sum( # (n_mols, t, d_e)\n tf.multiply(\n tf.tile(\n tf.expand_dims(\n tf.expand_dims(\n tf.where( # (n_bonds, n_mols)\n tf.boolean_mask(\n bond_in_mol,\n tf.reduce_any(\n bond_in_mol,\n axis=1),\n axis=0),\n tf.ones_like(\n tf.boolean_mask(\n bond_in_mol,\n tf.reduce_any(\n bond_in_mol,\n axis=1),\n axis=0),\n dtype=tf.float32),\n tf.zeros_like(\n tf.boolean_mask(\n bond_in_mol,\n tf.reduce_any(\n bond_in_mol,\n axis=1),\n axis=0),\n dtype=tf.float32)),\n 2),\n 3),\n [\n 1,\n 1,\n tf.shape(h_e_history)[1],\n tf.shape(h_e)[1]\n ]),\n tf.tile( # (n_bonds, n_mols, t, d_e)\n tf.expand_dims(\n h_e_history, # (n_bonds, t, d_e)\n 1),\n [1, tf.shape(bond_in_mol)[1], 1, 1])),\n axis=0)\n\n h_v_bar_history = tf.reduce_sum( # (n_mols, t, d_e)\n tf.multiply(\n tf.tile(\n tf.expand_dims(\n tf.expand_dims(\n tf.where( # (n_atoms, n_mols)\n atom_in_mol,\n tf.ones_like(\n atom_in_mol,\n dtype=tf.float32),\n tf.zeros_like(\n atom_in_mol,\n dtype=tf.float32)),\n 2),\n 3),\n [1, 1, tf.shape(h_v_history)[1], tf.shape(h_v)[1]]),\n tf.tile( # (n_atoms, n_mols, t, d_e)\n tf.expand_dims(\n h_v_history, # (n_atoms, t, d_e)\n 1),\n [1, tf.shape(atom_in_mol)[1], 1, 1])),\n axis=0)\n\n e0 = tf.squeeze(self.d_e0_1(self.d_e0_0(\n tf.reshape(\n h_v_bar_history,\n [-1, 6 * self.d_v]),\n tf.reshape(\n h_e_bar_history,\n [-1, 6 * self.d_e]),\n tf.reshape(\n h_u_history,\n [-1, 6 * self.d_u]))))\n\n adjacency_map_full = tf.math.add(\n tf.transpose(\n adjacency_map),\n adjacency_map)\n\n per_mol_mask = tf.matmul(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32)),\n tf.transpose(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32))))\n\n # get distance matrix\n distance = gin.deterministic.md.get_distance_matrix(coordinates)\n\n distance = tf.expand_dims(\n distance,\n 2)\n\n n_atoms = tf.shape(distance, tf.int64)[0]\n\n # (n_atoms, n_atoms, units)\n k = tf.multiply(\n tf.tile(\n tf.expand_dims(\n per_mol_mask,\n 2),\n [1, 1, self.units]),\n tf.tile(\n tf.expand_dims(\n self.d_k(h_v),\n 1),\n [1, n_atoms, 1]))\n\n # (n_atoms, n_atoms, units)\n q = tf.multiply(\n tf.tile(\n tf.expand_dims(\n per_mol_mask,\n 2),\n [1, 1, self.units]),\n tf.tile(\n tf.expand_dims(\n self.d_q(h_v),\n 0),\n [n_atoms, 1, 1]))\n\n h_pair = tf.concat(\n [\n k,\n q,\n ],\n axis=2)\n\n h_pair = tf.math.multiply(\n tf.tile(\n tf.expand_dims(\n tf.math.multiply(\n tf.math.subtract(\n per_mol_mask,\n tf.eye(\n tf.shape(per_mol_mask)[0])),\n tf.where(\n tf.equal(\n adjacency_map_full,\n tf.constant(0, dtype=tf.float32)),\n tf.ones_like(adjacency_map),\n tf.zeros_like(adjacency_map))),\n 2),\n [1, 1, 3]),\n self.d_pair_1(self.d_pair_0(h_pair)))\n\n y_pair = h_pair\n\n y_a = self.d_a_1(\n self.d_a_0(\n tf.reshape(\n h_a_history,\n [\n tf.shape(h_a_history)[0],\n 6 * self.d_a\n ])))\n\n y_e = self.d_e_1(\n self.d_e_0(\n tf.reshape(\n h_e_history,\n [\n tf.shape(h_e_history)[0],\n 6 * self.d_e\n ])))\n\n\n y_t = self.d_t_1(\n self.d_t_0(\n tf.reshape(\n h_t_history,\n [\n tf.shape(h_t_history)[0],\n 6 * self.d_t\n ])))\n\n return e0, y_e, y_a, y_t, y_pair, bond_in_mol, angle_in_mol, torsion_in_mol\n\n\n gn = gin.probabilistic.gn_hyper.HyperGraphNet(\n f_e=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['f_e_0'], 'elu', point['D_E'], 'tanh')),\n f_a=tf.keras.layers.Dense(point['D_A'], activation='tanh'),\n f_t=tf.keras.layers.Dense(point['D_T'], activation='tanh'),\n f_v=f_v(),\n f_u=(lambda atoms, adjacency_map, batched_attr_in_mol: \\\n tf.tile(\n tf.zeros((1, point['D_U'])),\n [\n tf.math.count_nonzero(batched_attr_in_mol),\n 1\n ]\n )),\n phi_e=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['phi_e_0'], point['phi_e_a_0'], point['D_E'],\n point['phi_e_a_1'])),\n phi_u=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['phi_u_0'], point['phi_u_a_0'], point['D_U'],\n point['phi_u_a_1'])),\n phi_v=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['phi_v_0'], point['phi_v_a_0'], point['D_V'],\n point['phi_v_a_1'])),\n phi_a=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['phi_a_0'], point['phi_a_a_0'], point['D_A'],\n point['phi_a_a_1'])),\n phi_t=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['phi_t_0'], point['phi_t_a_0'], point['D_T'],\n point['phi_t_a_1'])),\n f_r=f_r(),\n repeat=5)\n\n optimizer = tf.keras.optimizers.Adam(1e-4)\n\ndef obj_fn(point):\n point = dict(zip(config_space.keys(), point))\n init(point)\n\n for dummy_idx in range(10):\n for atoms_, adjacency_map, atom_in_mol, bond_in_mol, u, attr_in_mol in ds_tr:\n atoms = atoms_[:, :12]\n coordinates = tf.Variable(atoms_[:, 12:15])\n jacobian = atoms_[:, 15:]\n with tf.GradientTape() as tape:\n e0, y_e, y_a, y_t, y_pair, bond_in_mol, angle_in_mol, torsion_in_mol = gn(\n atoms, adjacency_map, coordinates, atom_in_mol, attr_in_mol)\n\n\n with tf.GradientTape() as tape1:\n\n u_hat = flow(y_e, y_a, y_t, y_pair, atoms, adjacency_map,\n coordinates, atom_in_mol, bond_in_mol, angle_in_mol,\n torsion_in_mol, attr_in_mol)\n\n jacobian_hat = tape1.gradient(u_hat, coordinates)\n\n jacobian_hat = tf.boolean_mask(\n jacobian_hat,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n jacobian = tf.boolean_mask(\n jacobian,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n u = tf.boolean_mask(\n u,\n attr_in_mol)\n\n loss = tf.math.add(\n tf.reduce_sum(\n tf.keras.losses.MSE(\n tf.math.log(\n tf.norm(\n jacobian,\n axis=1)),\n tf.math.log(\n tf.norm(\n jacobian_hat,\n axis=1)))),\n tf.reduce_sum(\n tf.losses.cosine_similarity(\n jacobian,\n jacobian_hat,\n axis=1)))\n\n\n variables = gn.variables\n grad = tape.gradient(loss, variables)\n\n # if not tf.reduce_any([tf.reduce_any(tf.math.is_nan(_grad)) for _grad in grad]).numpy():\n\n optimizer.apply_gradients(\n zip(grad, variables))\n\n del loss\n del coordinates\n del tape\n del tape1\n\n y_true_tr = -1. * tf.ones([1, ], dtype=tf.float32)\n y_pred_tr = -1. * tf.ones([1, ], dtype=tf.float32)\n\n y_true_vl = -1. * tf.ones([1, ], dtype=tf.float32)\n y_pred_vl = -1. * tf.ones([1, ], dtype=tf.float32)\n\n y_true_te = -1. * tf.ones([1, ], dtype=tf.float32)\n y_pred_te = -1. * tf.ones([1, ], dtype=tf.float32)\n\n for atoms_, adjacency_map, atom_in_mol, bond_in_mol, u, attr_in_mol in ds_tr:\n atoms = atoms_[:, :12]\n coordinates = tf.Variable(atoms_[:, 12:15])\n jacobian = atoms_[:, 15:]\n with tf.GradientTape() as tape:\n e0, y_e, y_a, y_t, y_pair, bond_in_mol, angle_in_mol, torsion_in_mol = gn(\n atoms, adjacency_map, coordinates, atom_in_mol, attr_in_mol)\n\n\n with tf.GradientTape() as tape1:\n\n u_hat = flow(y_e, y_a, y_t, y_pair, atoms, adjacency_map,\n coordinates, atom_in_mol, bond_in_mol, angle_in_mol,\n torsion_in_mol, attr_in_mol)\n\n jacobian_hat = tape1.gradient(u_hat, coordinates)\n\n jacobian_hat = tf.boolean_mask(\n jacobian_hat,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n jacobian = tf.boolean_mask(\n jacobian,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n y_true_tr = tf.concat([y_true_tr, tf.reshape(jacobian, [-1])], axis=0)\n y_pred_tr = tf.concat([y_pred_tr, tf.reshape(jacobian_hat, [-1])], axis=0)\n\n for atoms_, adjacency_map, atom_in_mol, bond_in_mol, u, attr_in_mol in ds_te:\n atoms = atoms_[:, :12]\n coordinates = tf.Variable(atoms_[:, 12:15])\n jacobian = atoms_[:, 15:]\n with tf.GradientTape() as tape:\n e0, y_e, y_a, y_t, y_pair, bond_in_mol, angle_in_mol, torsion_in_mol = gn(\n atoms, adjacency_map, coordinates, atom_in_mol, attr_in_mol)\n\n\n with tf.GradientTape() as tape1:\n\n u_hat = flow(y_e, y_a, y_t, y_pair, atoms, adjacency_map,\n coordinates, atom_in_mol, bond_in_mol, angle_in_mol,\n torsion_in_mol, attr_in_mol)\n\n jacobian_hat = tape1.gradient(u_hat, coordinates)\n\n jacobian_hat = tf.boolean_mask(\n jacobian_hat,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n jacobian = tf.boolean_mask(\n jacobian,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n y_true_te = tf.concat([y_true_te, tf.reshape(jacobian, [-1])], axis=0)\n y_pred_te = tf.concat([y_pred_te, tf.reshape(jacobian_hat, [-1])], axis=0)\n\n for atoms_, adjacency_map, atom_in_mol, bond_in_mol, u, attr_in_mol in ds_vl:\n atoms = atoms_[:, :12]\n coordinates = tf.Variable(atoms_[:, 12:15])\n jacobian = atoms_[:, 15:]\n with tf.GradientTape() as tape:\n e0, y_e, y_a, y_t, y_pair, bond_in_mol, angle_in_mol, torsion_in_mol = gn(\n atoms, adjacency_map, coordinates, atom_in_mol, attr_in_mol)\n\n\n with tf.GradientTape() as tape1:\n\n u_hat = flow(y_e, y_a, y_t, y_pair, atoms, adjacency_map,\n coordinates, atom_in_mol, bond_in_mol, angle_in_mol,\n torsion_in_mol, attr_in_mol)\n\n jacobian_hat = tape1.gradient(u_hat, coordinates)\n\n jacobian_hat = tf.boolean_mask(\n jacobian_hat,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n jacobian = tf.boolean_mask(\n jacobian,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n y_true_vl = tf.concat([y_true_vl, tf.reshape(jacobian, [-1])], axis=0)\n y_pred_vl = tf.concat([y_pred_vl, tf.reshape(jacobian_hat, [-1])], axis=0)\n\n try:\n r2_tr = metrics.r2_score(y_true_tr[1:].numpy(), y_pred_tr[1:].numpy())\n rmse_tr = metrics.mean_squared_error(y_true_tr[1:].numpy(), y_pred_tr[1:].numpy())\n\n r2_vl = metrics.r2_score(y_true_vl[1:].numpy(), y_pred_vl[1:].numpy())\n rmse_vl = metrics.mean_squared_error(y_true_vl[1:].numpy(), y_pred_vl[1:].numpy())\n\n r2_te = metrics.r2_score(y_true_te[1:].numpy(), y_pred_te[1:].numpy())\n rmse_te = metrics.mean_squared_error(y_true_te[1:].numpy(), y_pred_te[1:].numpy())\n\n\n np.save('y_true_tr', y_true_tr[1:].numpy())\n np.save('y_pred_tr', y_pred_tr[1:].numpy())\n np.save('y_true_te', y_true_te[1:].numpy())\n np.save('y_pred_te', y_pred_te[1:].numpy())\n np.save('y_true_vl', y_true_vl[1:].numpy())\n np.save('y_pred_vl', y_pred_vl[1:].numpy())\n\n print(tf.stack([y_true_tr, y_pred_tr], axis=1))\n\n print(point, flush=True)\n print(r2_tr, flush=True)\n print(rmse_tr, flush=True)\n print(r2_vl, flush=True)\n print(rmse_vl, flush=True)\n print(r2_te, flush=True)\n print(rmse_te, flush=True)\n\n gn.save_weights('gn.h5')\n\n return rmse_vl\n\n except:\n print('nan')\n return None\n\nlime.optimize.dummy.optimize(obj_fn, config_space.values(), 1000)\n",
"\"\"\"\nMIT License\n\nCopyright (c) 2019 Chodera lab // Memorial Sloan Kettering Cancer Center,\nWeill Cornell Medical College, Nicea Research, and Authors\n\nAuthors:\nYuanqing Wang\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\"\"\"\nfrom sklearn import metrics\nimport tensorflow as tf\nimport gin\nimport lime\nimport time\nimport pandas as pd\nimport numpy as np\nimport os\n\n\nN_EPOCH = 30\n\nTRANSLATION = {\n b'C': 0,\n b'N': 1,\n b'O': 2,\n b'S': 3,\n b'P': 4,\n b'F': 5,\n b'Cl': 6,\n b'Br': 7,\n b'I': 8,\n b'H': 9\n}\n\noe_mols = gin.i_o.utils.file_to_oemols('/home/chodera/charge-datasets/datasets/riniker/ChEMBL_AM1BCC.oeb')\noe_mol_dicts = []\nfor oe_mol in oe_mols:\n try:\n oe_mol_dict =\\\n gin.i_o.utils.oemol_to_dict(oe_mol, read_wbo=True)\n oe_mol_dicts.append(oe_mol_dict)\n except:\n continue\n\n# oe_mol_dicts = [gin.i_o.utils.oemol_to_dict(oe_mol, wbo=True) for oe_mol in oe_mols]\nn_samples = len(oe_mol_dicts)\nds_idxs = tf.data.Dataset.from_tensor_slices(\n tf.expand_dims(\n tf.convert_to_tensor(\n list(range(n_samples))),\n 1))\n\ndef read_one_mol(idx):\n atoms = oe_mol_dicts[int(idx.numpy())]['atomic_symbols']\n atoms = tf.expand_dims(tf.convert_to_tensor(\n atoms,\n tf.string),\n 1)\n atoms = tf.cast(\n tf.map_fn(\n lambda x: TRANSLATION[x.numpy()[0]],\n atoms,\n tf.int32),\n tf.int64)\n\n atoms = tf.reshape(\n atoms,\n [-1])\n\n n_atoms = tf.shape(atoms, tf.int64)[0]\n\n bonds = tf.convert_to_tensor(\n oe_mol_dicts[int(idx.numpy())]['connectivity'],\n dtype=tf.float32)\n\n adjacency_map = tf.zeros(\n (n_atoms, n_atoms),\n tf.float32)\n\n adjacency_map = tf.tensor_scatter_nd_update(\n adjacency_map,\n\n tf.cast(\n bonds[:, :2],\n tf.int64),\n\n bonds[:, 2])\n\n adjacency_map = gin.i_o.utils.conjugate_average(atoms, adjacency_map)\n\n charges = tf.convert_to_tensor(\n oe_mol_dicts[int(idx.numpy())]['partial_charges'],\n tf.float32)\n\n return atoms, adjacency_map, charges\n\nds_mols = ds_idxs.map(\n lambda idx: tf.py_function(\n read_one_mol,\n [idx],\n [tf.int64, tf.float32, tf.float32])).shuffle(\n n_samples,\n seed=2666)\n\n\nds_all = gin.probabilistic.gn.GraphNet.batch(\n ds_mols, 256, per_atom_attr=True).cache(\n str(os.getcwd()) + '/temp')\n\n\n# get the number of samples\n# NOTE: there is no way to get the number of samples in a dataset\n# except loop through one time, unfortunately\nn_batches = gin.probabilistic.gn.GraphNet.get_number_batches(\n ds_all)\n\nn_batches = int(n_batches)\nn_global_te = int(0.2 * n_batches)\nds_global_tr = ds_all.skip(n_global_te)\nds_global_te = ds_all.take(n_global_te)\n\n\n# =============================================================================\n# utility functions\n# =============================================================================\[email protected]\ndef get_charges(e, s, Q):\n \"\"\" Solve the function to get the absolute charges of atoms in a\n molecule from parameters.\n\n Parameters\n ----------\n e : tf.Tensor, dtype = tf.float32, shape = (34, ),\n electronegativity.\n s : tf.Tensor, dtype = tf.float32, shape = (34, ),\n hardness.\n Q : tf.Tensor, dtype = tf.float32, shape=(),\n total charge of a molecule.\n\n We use Lagrange multipliers to analytically give the solution.\n\n $$\n\n U({\\bf q})\n &= \\sum_{i=1}^N \\left[ e_i q_i + \\frac{1}{2} s_i q_i^2\\right]\n - \\lambda \\, \\left( \\sum_{j=1}^N q_j - Q \\right) \\\\\n &= \\sum_{i=1}^N \\left[\n (e_i - \\lambda) q_i + \\frac{1}{2} s_i q_i^2 \\right\n ] + Q\n\n $$\n\n This gives us:\n\n $$\n\n q_i^*\n &= - e_i s_i^{-1}\n + \\lambda s_i^{-1} \\\\\n &= - e_i s_i^{-1}\n + s_i^{-1} \\frac{\n Q +\n \\sum\\limits_{i=1}^N e_i \\, s_i^{-1}\n }{\\sum\\limits_{j=1}^N s_j^{-1}}\n\n $$\n\n \"\"\"\n\n return tf.math.add(\n tf.math.multiply(\n tf.math.negative(\n e),\n tf.math.pow(\n s,\n -1)),\n\n tf.math.multiply(\n tf.math.pow(\n s,\n -1),\n tf.math.divide(\n tf.math.add(\n Q,\n tf.reduce_sum(\n tf.math.multiply(\n e,\n tf.math.pow(\n s,\n -1)))),\n tf.reduce_sum(\n tf.math.pow(\n s,\n -1)))))\n\n\[email protected]\ndef get_q_i_hat_total_per_mol(e, s, Qs, attr_in_mol):\n \"\"\" Calculate the charges per molecule based on\n `attr_in_mol`.\n\n \"\"\"\n attr_in_mol.set_shape([None, None])\n\n attr_in_mol = tf.boolean_mask(\n attr_in_mol,\n tf.reduce_any(\n attr_in_mol,\n axis=1),\n axis=0)\n\n attr_in_mol = tf.boolean_mask(\n attr_in_mol,\n tf.reduce_any(\n attr_in_mol,\n axis=0),\n axis=1)\n\n q_i = tf.tile(\n tf.expand_dims(\n tf.constant(\n 0,\n dtype=tf.float32),\n 0),\n [tf.shape(attr_in_mol, tf.int64)[0]])\n\n def loop_body(q_i, idx,\n e=e,\n s=s,\n Qs=Qs,\n attr_in_mol=attr_in_mol):\n\n # get attr\n _attr_in_mol = attr_in_mol[:, idx]\n\n # get the attributes of each molecule\n _Qs = Qs[idx]\n\n _e = tf.boolean_mask(\n e,\n _attr_in_mol)\n\n _s = tf.boolean_mask(\n s,\n _attr_in_mol)\n\n _idxs = tf.where(_attr_in_mol)\n\n # update\n q_i = tf.tensor_scatter_nd_update(\n q_i,\n\n # idxs\n _idxs,\n\n # update\n tf.reshape(\n get_charges(\n _e,\n _s,\n _Qs),\n [-1]))\n\n return q_i, tf.add(idx, tf.constant(1, dtype=tf.int64))\n\n idx = tf.constant(0, dtype=tf.int64)\n\n # loop_body(q_i, idx)\n\n\n q_i, idx = tf.while_loop(\n lambda _, idx: tf.less(\n idx,\n tf.shape(attr_in_mol, tf.int64)[1]),\n\n loop_body,\n\n [q_i, idx])\n\n\n return q_i\n\[email protected]\ndef get_q_total_per_mol(q_i, attr_in_mol):\n # attr_in_mol.set_shape([None, None])\n\n q_i = tf.boolean_mask(\n q_i,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n attr_in_mol = tf.boolean_mask(\n attr_in_mol,\n tf.reduce_any(\n attr_in_mol,\n axis=1),\n axis=0)\n\n attr_in_mol = tf.boolean_mask(\n attr_in_mol,\n tf.reduce_any(\n attr_in_mol,\n axis=0),\n axis=1)\n\n attr_in_mol = tf.where(\n attr_in_mol,\n\n tf.ones_like(\n attr_in_mol,\n dtype=tf.float32),\n\n tf.zeros_like(\n attr_in_mol,\n dtype=tf.float32))\n\n q_per_mol = tf.reduce_sum(\n tf.multiply(\n attr_in_mol,\n tf.tile(\n tf.expand_dims(\n q_i,\n 1),\n [\n 1,\n tf.shape(attr_in_mol, tf.int64)[1]\n ])),\n axis=0)\n\n return q_per_mol\n\n\nconfig_space = {\n 'D_V': [16, 32, 64, 128],\n 'D_E': [16, 32, 64, 128],\n 'D_U': [16, 32, 64, 128],\n\n 'phi_v_units': [16, 32, 64, 128],\n 'phi_v_activation': ['elu', 'relu', 'leaky_relu', 'tanh', 'sigmoid'],\n\n 'phi_e_units': [16, 32, 64, 128],\n 'phi_e_activation': ['elu', 'relu', 'leaky_relu', 'tanh', 'sigmoid'],\n\n 'phi_u_units': [16, 32, 64, 128],\n 'phi_u_activation': ['elu', 'relu', 'leaky_relu', 'tanh', 'sigmoid'],\n\n 'f_r_units': [16, 32, 64, 128],\n\n 'learning_rate': [1e-5, 1e-4, 1e-3, 1e-2]\n\n}\n\ndef init(point):\n global gn\n global optimizer\n\n class f_v(tf.keras.Model):\n \"\"\" Featurization of nodes.\n Here we simply featurize atoms using one-hot encoding.\n\n \"\"\"\n def __init__(self, units=point['D_V']):\n super(f_v, self).__init__()\n self.d = tf.keras.layers.Dense(units)\n\n @tf.function\n def call(self, x):\n x = tf.one_hot(x, 8)\n # set shape because Dense doesn't like variation\n x.set_shape([None, 8])\n return self.d(x)\n\n f_e = tf.keras.layers.Dense(point['D_E'])\n\n f_u=(lambda atoms, adjacency_map, batched_attr_mask: \\\n tf.tile(\n tf.zeros((1, point['D_U'])),\n [\n tf.math.count_nonzero(\n tf.reduce_any(\n batched_attr_mask,\n axis=0)),\n 1\n ]\n ))\n\n phi_v = lime.nets.for_gn.ConcatenateThenFullyConnect(\n (\n point['phi_v_units'],\n point['phi_v_activation'],\n point['phi_v_units'],\n point['D_V']\n ))\n\n\n phi_e = lime.nets.for_gn.ConcatenateThenFullyConnect(\n (\n point['phi_e_units'],\n point['phi_e_activation'],\n point['phi_e_units'],\n point['D_E']\n ))\n\n class phi_u(tf.keras.Model):\n def __init__(self, config=(\n point['phi_u_units'],\n point['phi_u_activation'],\n point['phi_u_units'],\n point['D_U']\n )):\n super(phi_u, self).__init__()\n self.d = lime.nets.for_gn.ConcatenateThenFullyConnect(config)\n\n @tf.function\n def call(self, h_u, h_u_0, h_e_bar, h_v_bar):\n return self.d(h_u, h_u_0, h_e_bar, h_v_bar)\n\n\n class f_r(tf.keras.Model):\n \"\"\" Readout function.\n \"\"\"\n\n def __init__(self, units=point['f_r_units']):\n super(f_r, self).__init__()\n self.d_e_0 = tf.keras.layers.Dense(units)\n self.d_s_0 = tf.keras.layers.Dense(units)\n self.d_e_1 = tf.keras.layers.Dense(1)\n self.d_s_1 = tf.keras.layers.Dense(1)\n\n @tf.function\n def call(self,\n h_e, h_v, h_u,\n h_e_history, h_v_history, h_u_history,\n atom_in_mol, bond_in_mol):\n\n # although this could take many many arguments,\n # we only take $h_e$ for now\n e = self.d_e_1(self.d_e_0(h_v))\n s = self.d_s_1(self.d_s_0(h_v))\n\n return e, s\n\n gn = gin.probabilistic.gn.GraphNet(\n f_e=f_e,\n f_v=f_v(),\n f_u=f_u,\n phi_e=phi_e,\n phi_v=phi_v,\n phi_u=phi_u(),\n f_r=f_r(),\n repeat=5)\n\n optimizer = tf.keras.optimizers.Adam(point['learning_rate'])\n\ndef obj_fn(point):\n N_EPOCHS = 30\n point = dict(zip(config_space.keys(), point))\n n_te = int(0.2 * 0.8 * n_batches)\n ds = ds_global_tr.shuffle(int(0.8 * n_batches))\n\n r2_train = []\n r2_test = []\n mse_train = []\n mse_test = []\n\n for idx in range(5):\n init(point)\n\n y_true_train = tf.constant([-1], dtype=tf.float32)\n y_pred_train = tf.constant([-1], dtype=tf.float32)\n y_true_test = tf.constant([-1], dtype=tf.float32)\n y_pred_test = tf.constant([-1], dtype=tf.float32)\n\n ds_tr = ds.take(idx * n_te).concatenate(\n ds.skip((idx + 1) * n_te).take((4 - idx) * n_te))\n\n ds_te = ds.skip(idx * n_te).take(n_te)\n\n for dummy_idx in range(N_EPOCHS):\n for atoms, adjacency_map, \\\n atom_in_mol, bond_in_mol, q_i, attr_in_mol \\\n in ds_tr:\n\n adjacency_map = tf.where(\n tf.greater_equal(\n adjacency_map,\n tf.constant(0, dtype=tf.float32)),\n\n tf.ones_like(adjacency_map),\n\n tf.zeros_like(adjacency_map))\n\n with tf.GradientTape() as tape:\n Qs = get_q_total_per_mol(q_i, attr_in_mol)\n\n e, s = gn(\n atoms, adjacency_map,\n atom_in_mol, bond_in_mol, attr_in_mol)\n\n e = tf.boolean_mask(\n e,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n s = tf.boolean_mask(\n s,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n q_i_hat = get_q_i_hat_total_per_mol(\n e, s, Qs, attr_in_mol)\n\n q_i = tf.boolean_mask(\n q_i,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n loss = tf.losses.mean_squared_error(\n q_i,\n q_i_hat)\n\n variables = gn.variables\n grad = tape.gradient(loss, variables)\n optimizer.apply_gradients(\n zip(grad, variables))\n\n\n gn.switch(True)\n\n for atoms, adjacency_map, \\\n atom_in_mol, bond_in_mol, q_i, attr_in_mol \\\n in ds_te:\n\n adjacency_map = tf.where(\n tf.greater_equal(\n adjacency_map,\n tf.constant(0, dtype=tf.float32)),\n\n tf.ones_like(adjacency_map),\n\n tf.zeros_like(adjacency_map))\n\n Qs = get_q_total_per_mol(q_i, attr_in_mol)\n\n e, s = gn(\n atoms, adjacency_map,\n atom_in_mol, bond_in_mol, attr_in_mol)\n\n e = tf.boolean_mask(\n e,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n s = tf.boolean_mask(\n s,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n q_i_hat = get_q_i_hat_total_per_mol(\n e, s, Qs, attr_in_mol)\n\n q_i = tf.boolean_mask(\n q_i,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n y_true_test = tf.concat(\n [\n y_true_test,\n tf.reshape(q_i, [-1])\n ],\n axis=0)\n\n y_pred_test = tf.concat(\n [\n y_pred_test,\n tf.reshape(q_i_hat, [-1])\n ],\n axis=0)\n\n mse_test.append(tf.losses.mean_squared_error(\n y_true_test[1:],\n y_pred_test[1:]).numpy())\n\n r2_test.append(metrics.r2_score(\n y_true_test[1:].numpy(),\n y_pred_test[1:].numpy()))\n\n print(r2_test, flush=True)\n for atoms, adjacency_map, \\\n atom_in_mol, bond_in_mol, q_i, attr_in_mol \\\n in ds_tr:\n\n adjacency_map = tf.where(\n tf.greater_equal(\n adjacency_map,\n tf.constant(0, dtype=tf.float32)),\n\n tf.ones_like(adjacency_map),\n\n tf.zeros_like(adjacency_map))\n\n Qs = get_q_total_per_mol(q_i, attr_in_mol)\n\n e, s = gn(\n atoms, adjacency_map,\n atom_in_mol, bond_in_mol, attr_in_mol)\n\n e = tf.boolean_mask(\n e,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n s = tf.boolean_mask(\n s,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n q_i_hat = get_q_i_hat_total_per_mol(\n e, s, Qs, attr_in_mol)\n\n q_i = tf.boolean_mask(\n q_i,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n y_true_train = tf.concat(\n [\n y_true_train,\n tf.reshape(q_i, [-1])\n ],\n axis=0)\n\n y_pred_train = tf.concat(\n [\n y_pred_train,\n tf.reshape(q_i_hat, [-1])\n ],\n axis=0)\n\n\n mse_train.append(tf.losses.mean_squared_error(\n y_true_train[1:],\n y_pred_train[1:]).numpy())\n\n r2_train.append(metrics.r2_score(\n y_true_train[1:].numpy(),\n y_pred_train[1:].numpy()))\n\n print(r2_train, flush=True)\n\n y_true_global_test = tf.constant([-1], dtype=tf.float32)\n y_pred_global_test = tf.constant([-1], dtype=tf.float32)\n\n init(point)\n\n time0 = time.time()\n\n for dummy_idx in range(N_EPOCHS):\n for atoms, adjacency_map, \\\n atom_in_mol, bond_in_mol, q_i, attr_in_mol \\\n in ds_global_tr:\n\n adjacency_map = tf.where(\n tf.greater_equal(\n adjacency_map,\n tf.constant(0, dtype=tf.float32)),\n\n tf.ones_like(adjacency_map),\n\n tf.zeros_like(adjacency_map))\n\n with tf.GradientTape() as tape:\n Qs = get_q_total_per_mol(q_i, attr_in_mol)\n\n e, s = gn(\n atoms, adjacency_map,\n atom_in_mol, bond_in_mol, attr_in_mol)\n\n e = tf.boolean_mask(\n e,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n s = tf.boolean_mask(\n s,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n q_i_hat = get_q_i_hat_total_per_mol(\n e, s, Qs, attr_in_mol)\n\n q_i = tf.boolean_mask(\n q_i,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n loss = tf.losses.mean_squared_error(\n q_i,\n q_i_hat)\n\n variables = gn.variables\n grad = tape.gradient(loss, variables)\n optimizer.apply_gradients(\n zip(grad, variables))\n\n time1 = time.time()\n\n gn.switch(True)\n\n for atoms, adjacency_map, \\\n atom_in_mol, bond_in_mol, q_i, attr_in_mol \\\n in ds_global_te:\n\n Qs = get_q_total_per_mol(q_i, attr_in_mol)\n\n e, s = gn(\n atoms, adjacency_map,\n atom_in_mol, bond_in_mol, attr_in_mol)\n\n e = tf.boolean_mask(\n e,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n s = tf.boolean_mask(\n s,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n q_i_hat = get_q_i_hat_total_per_mol(\n e, s, Qs, attr_in_mol)\n\n q_i = tf.boolean_mask(\n q_i,\n tf.reduce_any(\n attr_in_mol,\n axis=1))\n\n y_true_global_test = tf.concat(\n [\n y_true_global_test,\n tf.reshape(q_i, [-1])\n ],\n axis=0)\n\n y_pred_global_test = tf.concat(\n [\n y_pred_global_test,\n tf.reshape(q_i_hat, [-1])\n ],\n axis=0)\n\n y_true_global_test = y_true_global_test[1:]\n y_pred_global_test = y_pred_global_test[1:]\n\n mse_global_test = tf.losses.mean_squared_error(y_true_global_test,\n y_pred_global_test)\n r2_global_test = metrics.r2_score(y_true_global_test.numpy(),\n y_pred_global_test.numpy())\n\n print(point, flush=True)\n print('training time %s ' % (time1 - time0), flush=True)\n print('mse_train %s +- %s' % (np.mean(mse_train), np.std(mse_train)),\n flush=True)\n print('r2_train %s +- %s' % (np.mean(r2_train), np.std(r2_train)),\n flush=True)\n print('mse_test %s +- %s' % (np.mean(mse_test), np.std(mse_test)),\n flush=True)\n print('r2_test %s +- %s' % (np.mean(r2_test), np.std(r2_test)),\n flush=True)\n print('mse_global_test %s' % mse_global_test.numpy(),\n flush=True)\n print('r2_global_test %s ' % r2_global_test,\n flush=True)\n\n return mse_test\n\nlime.optimize.dummy.optimize(obj_fn, config_space.values(), 1000)\n",
"# =============================================================================\n# imports\n# =============================================================================\nimport os\nimport sys\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\ntf.autograph.set_verbosity(3)\nfrom sklearn import metrics\nimport gin\nimport lime\nimport pandas as pd\nimport numpy as np\nimport qcportal as ptl\nclient = ptl.FractalClient()\nfrom openforcefield.topology import Molecule\nfrom openforcefield.topology import Topology\nfrom openforcefield.typing.engines.smirnoff import ForceField\nFF = ForceField('test_forcefields/smirnoff99Frosst.offxml')\nimport cmiles\nfrom simtk import openmm\nimport random\n\nHARTREE_TO_KJ_PER_MOL = 2625.5\nBOHR_TO_NM = 0.0529177\nHARTREE_PER_BOHR_TO_KJ_PER_MOL_PER_NM = 49614.77\n\nTRANSLATION = {\n 6: 0,\n 7: 1,\n 8: 2,\n 16: 3,\n 15: 4,\n 9: 5,\n 17: 6,\n 35: 7,\n 53: 8,\n 1: 9\n}\n\nTRANSLATION_TO_ELEMENT = {\n 0: 6,\n 1: 7,\n 2: 8,\n 3: 16,\n 4: 9,\n 5: 17,\n 6: 53,\n 7: 1}\n\n\n\nds_qc = client.get_collection(\"OptimizationDataset\", \"OpenFF Full Optimization Benchmark 1\")\nds_name = tf.data.Dataset.from_tensor_slices(list(ds_qc.data.records))\n\ndef data_generator():\n for record_name in random.sample(list(ds_qc.data.records), 10):\n try:\n print(record_name, flush=True)\n r = ds_qc.get_record(record_name, specification='default')\n if r is not None:\n traj = r.get_trajectory()\n if traj is not None:\n for snapshot in traj:\n energy = tf.convert_to_tensor(\n snapshot.properties.scf_total_energy * HARTREE_TO_KJ_PER_MOL,\n dtype=tf.float32)\n\n mol = snapshot.get_molecule()\n # mol = snapshot.get_molecule().dict(encoding='json')\n \n atoms = tf.convert_to_tensor(\n [TRANSLATION[atomic_number] for atomic_number in mol.atomic_numbers],\n dtype=tf.int64)\n \n \n zeros = tf.zeros(\n (\n tf.shape(atoms, tf.int64)[0],\n tf.shape(atoms, tf.int64)[0]\n ),\n dtype=tf.float32)\n \n\n adjacency_map = tf.tensor_scatter_nd_update(\n tf.zeros(\n (\n tf.shape(atoms, tf.int64)[0],\n tf.shape(atoms, tf.int64)[0]\n ),\n dtype=tf.float32),\n tf.convert_to_tensor(\n np.array(mol.connectivity)[:, :2],\n dtype=tf.int64),\n tf.convert_to_tensor(\n np.array(mol.connectivity)[:, 2],\n dtype=tf.float32))\n\n xyz = tf.convert_to_tensor(\n mol.geometry * BOHR_TO_NM,\n dtype=tf.float32)\n\n jacobian = tf.convert_to_tensor(\n snapshot.return_result\\\n * HARTREE_PER_BOHR_TO_KJ_PER_MOL_PER_NM,\n dtype=tf.float32)\n\n mol = cmiles.utils.load_molecule(mol.dict(encoding='json'))\n\n top = Topology.from_molecules(Molecule.from_openeye(mol))\n sys = FF.create_openmm_system(top)\n\n angles = tf.convert_to_tensor(\n [[x[0], x[1], x[2], \n x[3]._value, \n x[4]._value] for x in\\\n [sys.getForces(\n )[0].getAngleParameters(idx)\\\n for idx in range(sys.getForces(\n )[0].getNumAngles())]],\n dtype=tf.float32)\n \n\n bonds = tf.convert_to_tensor([[x[0], x[1], \n x[2]._value, \n x[3]._value] for x in\\\n [sys.getForces(\n )[1].getBondParameters(idx)\\\n for idx in range(sys.getForces(\n )[1].getNumBonds())]],\n dtype=tf.float32)\n\n\n torsions = tf.convert_to_tensor([\n [x[0], x[1], x[2], x[3], x[4], x[5]._value, x[6]._value] for x in\\\n [sys.getForces(\n )[3].getTorsionParameters(idx)\\\n for idx in range(sys.getForces(\n )[3].getNumTorsions())]],\n dtype=tf.float32)\n\n\n particle_params = tf.convert_to_tensor([[\n x[0]._value,\n x[1]._value,\n x[2]._value\n ] for x in\\\n [sys.getForces(\n )[2].getParticleParameters(idx)\\\n for idx in range(sys.getForces(\n )[2].getNumParticles())]])\n\n \n yield(\n atoms,\n adjacency_map,\n energy,\n xyz,\n jacobian,\n angles,\n bonds,\n torsions,\n particle_params,\n sys)\n \n except:\n pass\n\n# @tf.function\ndef params_to_potential(\n q, sigma, epsilon,\n e_l, e_k,\n a_l, a_k,\n t_l, t_k,\n bond_idxs, angle_idxs, torsion_idxs,\n coordinates,\n atom_in_mol=tf.constant(False),\n bond_in_mol=tf.constant(False),\n attr_in_mol=tf.constant(False)):\n \n n_atoms = tf.shape(q, tf.int64)[0]\n n_angles = tf.shape(angle_idxs, tf.int64)[0]\n n_torsions = tf.shape(torsion_idxs, tf.int64)[0]\n n_bonds = tf.shape(bond_idxs, tf.int64)[0]\n \n if tf.logical_not(tf.reduce_any(atom_in_mol)):\n atom_in_mol = tf.tile(\n [[True]],\n [n_atoms, 1])\n\n if tf.logical_not(tf.reduce_any(bond_in_mol)):\n bond_in_mol = tf.tile(\n [[True]],\n [n_bonds, 1])\n\n if tf.logical_not(tf.reduce_any(attr_in_mol)):\n attr_in_mol = tf.constant([[True]])\n \n\n per_mol_mask = tf.stop_gradient(tf.matmul(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32),\n name='per_mol_mask_0'),\n tf.transpose(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32),\n name='per_mol_mask_1'))))\n\n\n distance_matrix = gin.deterministic.md.get_distance_matrix(\n coordinates)\n\n bond_distances = tf.gather_nd(\n distance_matrix,\n bond_idxs)\n\n angle_angles = gin.deterministic.md.get_angles_cos(\n coordinates,\n angle_idxs)\n\n torsion_dihedrals = gin.deterministic.md.get_dihedrals_cos(\n coordinates,\n torsion_idxs)\n\n # (n_atoms, n_atoms)\n q_pair = tf.multiply(\n q,\n tf.transpose(\n q))\n\n # (n_atoms, n_atoms)\n sigma_pair = tf.math.multiply(\n tf.constant(0.5, dtype=tf.float32),\n tf.math.add(\n sigma,\n tf.transpose(sigma)))\n\n # (n_atoms, n_atoms)\n epsilon_pair = tf.math.sqrt(\n tf.math.multiply(\n epsilon,\n tf.transpose(epsilon)))\n\n\n u_bond = 0.5 * tf.math.multiply(\n e_k,\n tf.math.pow(\n tf.math.subtract(\n bond_distances,\n e_l),\n tf.constant(2, dtype=tf.float32)))\n\n u_angle = 0.5 * tf.math.multiply(\n a_k,\n tf.math.pow(\n tf.math.subtract(\n tf.math.acos(angle_angles),\n a_l),\n tf.constant(2, dtype=tf.float32)))\n \n u_dihedral = tf.math.multiply(\n t_k,\n tf.math.pow(\n tf.math.subtract(\n torsion_dihedrals,\n t_l),\n tf.constant(2, dtype=tf.float32)))\n\n # (n_angles, n_atoms)\n angle_is_connected_to_atoms = tf.reduce_any(\n [\n tf.equal(\n tf.tile(\n tf.expand_dims(\n tf.range(n_atoms),\n 0),\n [n_angles, 1]),\n tf.tile(\n tf.expand_dims(\n angle_idxs[:, 0],\n 1),\n [1, n_atoms])),\n tf.equal(\n tf.tile(\n tf.expand_dims(\n tf.range(n_atoms),\n 0),\n [n_angles, 1]),\n tf.tile(\n tf.expand_dims(\n angle_idxs[:, 1],\n 1),\n [1, n_atoms])),\n tf.equal(\n tf.tile(\n tf.expand_dims(\n tf.range(n_atoms),\n 0),\n [n_angles, 1]),\n tf.tile(\n tf.expand_dims(\n angle_idxs[:, 2],\n 1),\n [1, n_atoms]))\n ],\n axis=0)\n\n # (n_torsions, n_atoms)\n torsion_is_connected_to_atoms = tf.reduce_any(\n [\n tf.equal(\n tf.tile(\n tf.expand_dims(\n tf.range(n_atoms),\n 0),\n [n_torsions, 1]),\n tf.tile(\n tf.expand_dims(\n torsion_idxs[:, 0],\n 1),\n [1, n_atoms])),\n tf.equal(\n tf.tile(\n tf.expand_dims(\n tf.range(n_atoms),\n 0),\n [n_torsions, 1]),\n tf.tile(\n tf.expand_dims(\n torsion_idxs[:, 1],\n 1),\n [1, n_atoms])),\n tf.equal(\n tf.tile(\n tf.expand_dims(\n tf.range(n_atoms),\n 0),\n [n_torsions, 1]),\n tf.tile(\n tf.expand_dims(\n torsion_idxs[:, 2],\n 1),\n [1, n_atoms])),\n tf.equal(\n tf.tile(\n tf.expand_dims(\n tf.range(n_atoms),\n 0),\n [n_torsions, 1]),\n tf.tile(\n tf.expand_dims(\n torsion_idxs[:, 3],\n 1),\n [1, n_atoms]))\n ],\n axis=0)\n\n\n angle_in_mol = tf.greater(\n tf.matmul(\n tf.where(\n angle_is_connected_to_atoms,\n tf.ones_like(\n angle_is_connected_to_atoms,\n tf.int64),\n tf.zeros_like(\n angle_is_connected_to_atoms,\n tf.int64)),\n tf.where(\n atom_in_mol,\n tf.ones_like(\n atom_in_mol,\n tf.int64),\n tf.zeros_like(\n atom_in_mol,\n tf.int64))),\n tf.constant(0, dtype=tf.int64))\n\n torsion_in_mol = tf.greater(\n tf.matmul(\n tf.where(\n torsion_is_connected_to_atoms,\n tf.ones_like(\n torsion_is_connected_to_atoms,\n tf.int64),\n tf.zeros_like(\n torsion_is_connected_to_atoms,\n tf.int64)),\n tf.where(\n atom_in_mol,\n tf.ones_like(\n atom_in_mol,\n tf.int64),\n tf.zeros_like(\n atom_in_mol,\n tf.int64))),\n tf.constant(0, dtype=tf.int64))\n\n u_pair_mask = tf.tensor_scatter_nd_update(\n per_mol_mask,\n bond_idxs,\n tf.zeros(\n shape=(\n tf.shape(bond_idxs, tf.int32)[0]),\n dtype=tf.float32))\n\n u_pair_mask = tf.tensor_scatter_nd_update(\n u_pair_mask,\n tf.stack(\n [\n angle_idxs[:, 0],\n angle_idxs[:, 2]\n ],\n axis=1),\n tf.zeros(\n shape=(\n tf.shape(angle_idxs, tf.int32)[0]),\n dtype=tf.float32))\n\n\n u_pair_mask = tf.linalg.set_diag(\n u_pair_mask,\n tf.zeros(\n shape=tf.shape(u_pair_mask)[0],\n dtype=tf.float32))\n\n u_pair_mask = tf.linalg.band_part(\n u_pair_mask,\n 0, -1)\n\n _distance_matrix = tf.where(\n tf.greater(\n u_pair_mask,\n tf.constant(0, dtype=tf.float32)),\n distance_matrix,\n tf.ones_like(distance_matrix))\n \n _distance_matrix_inverse = tf.multiply(\n u_pair_mask,\n tf.pow(\n tf.math.add(\n _distance_matrix,\n tf.constant(1e-5, dtype=tf.float32)),\n tf.constant(-1, dtype=tf.float32)))\n\n sigma_over_r = tf.multiply(\n sigma_pair,\n _distance_matrix_inverse)\n \n u_coulomb = tf.multiply(\n _distance_matrix_inverse,\n tf.multiply(\n 138.93 * q_pair,\n tf.tensor_scatter_nd_update(\n tf.ones_like(q_pair),\n tf.stack(\n [\n torsion_idxs[:, 0],\n torsion_idxs[:, 3]\n ],\n axis=1),\n tf.constant(\n 0.833,\n shape=(\n tf.shape(torsion_idxs)[0],\n ),\n dtype=tf.float32))))\n\n u_lj = tf.multiply(\n tf.where(\n tf.less(\n _distance_matrix,\n 0.1),\n tf.zeros_like(epsilon_pair),\n tf.multiply(\n epsilon_pair,\n tf.tensor_scatter_nd_update(\n tf.ones_like(epsilon_pair),\n tf.stack(\n [\n torsion_idxs[:, 0],\n torsion_idxs[:, 3]\n ],\n axis=1),\n tf.constant(\n 0.5,\n shape=(\n tf.shape(torsion_idxs)[0],\n ),\n dtype=tf.float32)))),\n tf.math.subtract(\n tf.pow(\n sigma_over_r,\n tf.constant(12, dtype=tf.float32)),\n tf.pow(\n sigma_over_r,\n tf.constant(6, dtype=tf.float32))))\n \n \n # print(tf.reduce_sum(u_coulomb))\n u_pair = u_coulomb + u_lj\n \n u_bond_tot = tf.matmul(\n tf.transpose(\n tf.where(\n bond_in_mol,\n tf.ones_like(bond_in_mol, dtype=tf.float32),\n tf.zeros_like(bond_in_mol, dtype=tf.float32))),\n tf.expand_dims(\n u_bond,\n axis=1))\n\n u_angle_tot = tf.matmul(\n tf.transpose(\n tf.where(\n angle_in_mol,\n tf.ones_like(angle_in_mol, dtype=tf.float32),\n tf.zeros_like(angle_in_mol, dtype=tf.float32))),\n tf.expand_dims(\n u_angle,\n axis=1))\n\n u_dihedral_tot = tf.matmul(\n tf.transpose(\n tf.where(\n torsion_in_mol,\n tf.ones_like(torsion_in_mol, dtype=tf.float32),\n tf.zeros_like(torsion_in_mol, dtype=tf.float32))),\n tf.expand_dims(\n u_dihedral,\n axis=1))\n\n u_pair_tot = tf.matmul(\n tf.transpose(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32))),\n tf.reduce_sum(\n u_pair,\n axis=1,\n keepdims=True))\n \n u_tot = tf.squeeze(\n u_pair_tot + u_bond_tot + u_angle_tot + u_dihedral_tot)\n \n return u_tot\n\ndef data_loader(idx):\n atoms_path = 'data/atoms/' + str(idx.numpy()) + '.npy'\n adjacency_map_path = 'data/adjacency_map/' + str(idx.numpy()) + '.npy'\n energy_path = 'data/energy/' + str(idx.numpy()) + '.npy'\n\n atoms = tf.convert_to_tensor(\n np.load(atoms_path))\n\n adjacency_map = tf.convert_to_tensor(\n np.load(adjacency_map_path))\n\n energy = tf.convert_to_tensor(\n np.load(energy_path))\n\n return atoms, adjacency_map, energy\n \n\n\ntraj = tf.ones(\n shape=(1, 6),\n dtype=tf.float32)\n\nfor atoms, adjacency_map, energy, xyz, jacobian, angles, bonds, torsions,\\\n particle_params, sys\\\n in data_generator():\n\n '''\n q, sigma, epsilon = tf.split(particle_params, 3, 1)\n e_l = bonds[:, 2]\n e_k = bonds[:, 3]\n bond_idxs = tf.cast(bonds[:, :2], tf.int64)\n \n a_l = angles[:, 3]\n a_k = angles[:, 4]\n angle_idxs = tf.cast(angles[:, :3], tf.int64)\n \n # xyz = tf.Variable(xyz * BOHR_TO_ANGSTROM)\n # jacobian = jacobian * HARTREE_PER_BOHR_TO_KCAL_PER_MOL_PER_ANGSTROM\n \n xyz = tf.Variable(xyz)\n\n with tf.GradientTape() as tape:\n u = -params_to_potential(\n q,\n sigma,\n epsilon,\n e_l, e_k,\n a_l, a_k,\n tf.constant([0.0], dtype=tf.float32),\n tf.constant([0.0], dtype=tf.float32),\n bond_idxs, angle_idxs, tf.constant([[0, 0, 0, 0]], dtype=tf.int64),\n xyz)\n\n \n jacobian_hat = tape.gradient(u, xyz)\n '''\n\n for idx in range(sys.getNumForces()):\n force = sys.getForce(idx)\n force.setForceGroup(idx)\n \n context = openmm.Context(sys, openmm.VerletIntegrator(0.001))\n \n context.setPositions(xyz * 1.0)\n\n force = sys.getForce(2)\n force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)\n force.updateParametersInContext(context)\n # print(context.getState(getEnergy=True, groups=1<<2).getPotentialEnergy())\n \n traj = tf.concat(\n [\n traj,\n tf.concat(\n [\n context.getState(\n getVelocities=True,\n getForces=True).getForces(asNumpy=True)._value,\n jacobian\n ],\n axis=1)\n ],\n axis=0)\n\n\n\nnp.save('traj', traj[1:].numpy())\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.tanh",
"tensorflow.boolean_mask",
"tensorflow.Variable",
"tensorflow.keras.regularizers.l2",
"tensorflow.squeeze",
"tensorflow.subtract",
"tensorflow.autograph.set_verbosity",
"numpy.load",
"tensorflow.norm",
"tensorflow.shape",
"tensorflow.reduce_any",
"tensorflow.keras.layers.Dense",
"tensorflow.zeros_like",
"tensorflow.py_function",
"tensorflow.split",
"numpy.array",
"tensorflow.GradientTape",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.math.count_nonzero",
"tensorflow.losses.cosine_similarity",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.keras.optimizers.Adam"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.zeros",
"tensorflow.cast",
"numpy.mean",
"tensorflow.where",
"tensorflow.boolean_mask",
"numpy.std",
"tensorflow.math.pow",
"tensorflow.shape",
"tensorflow.reduce_any",
"tensorflow.keras.layers.Dense",
"tensorflow.zeros_like",
"tensorflow.one_hot",
"tensorflow.py_function",
"tensorflow.GradientTape",
"tensorflow.losses.mean_squared_error",
"tensorflow.constant",
"tensorflow.math.negative",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.keras.optimizers.Adam"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.squeeze",
"tensorflow.autograph.set_verbosity",
"tensorflow.math.acos",
"numpy.load",
"tensorflow.tile",
"tensorflow.gather_nd",
"tensorflow.shape",
"tensorflow.reduce_any",
"tensorflow.less",
"tensorflow.zeros_like",
"numpy.array",
"tensorflow.math.subtract",
"tensorflow.multiply",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.linalg.band_part",
"tensorflow.ones_like",
"tensorflow.ones",
"tensorflow.expand_dims",
"tensorflow.compat.v1.logging.set_verbosity"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jacksonwalters/wordchef | [
"3edc2f8d5cbbaa064245ebaae923da68cae6556f"
] | [
"gen_vocab_vecs.py"
] | [
"import spacy, numpy, random, pickle, pandas, sys\nimport sklearn.neighbors as nbs\nfrom spacy.lookups import load_lookups\n\n\nMIN_PROB = -18\n\n#load NLP tool spaCy\nprint(\"Loading spaCy...\")\nnlp=spacy.load(\"en_core_web_lg\")\nprint(\"spaCy loaded.\")\n\n#load lexeme probability table\nlookups = load_lookups(\"en\", [\"lexeme_prob\"])\nnlp.vocab.lookups.add_table(\"lexeme_prob\", lookups.get_table(\"lexeme_prob\"))\n\n#get plaintext words as list from spacy vocab. ensure they have wordvector, are lowercase, and aren't too rare\nprint(\"Total number of words in spaCy vocab=\",len(nlp.vocab.strings))\nprint(\"Getting words...\")\nwords = [word for word in nlp.vocab.strings if nlp.vocab.has_vector(word) and word.islower() and nlp.vocab[word].prob >= MIN_PROB]\nprint(\"Retrieved \",len(words),\"lowercase words with vectors and prob >=.\",MIN_PROB)\n\n#get wordvectors for all words as numpy array\nprint(\"Total number of wordvectors=\",len(nlp.vocab.vectors))\nprint(\"Getting wordvectors...\")\nwordvecs = numpy.array([nlp.vocab.get_vector(word) for word in words])\nprint(\"Retrieved=\",len(wordvecs),\"wordvectors.\")\n\n#ensure the list of words corresponds to the list of wordvectors\nassert len(words) == len(wordvecs)\nspot_check = random.choice(range(0,len(words)))\nassert numpy.array_equal(nlp(words[spot_check]).vector,wordvecs[spot_check])\nprint(\"Spot check passed.\")\n\n#pickle the entire vocab\n#pickle.HIGHEST_PROTOCOL depends on Python version\nwith open('vocab.pkl', 'wb') as f:\n\t\tpickle.dump(words,f,protocol=4)\nprint(\"Dumped vocab words to pickle file vocab.pkl\")\n\n#place all wordvectors in balltree, and pickle entire tree\ntree = nbs.BallTree(wordvecs)\nwith open('balltree.pkl', 'wb') as f:\n\t\tpickle.dump(tree,f,protocol=4)\nprint(\"Dumped wordvector BallTree to pickle file balltree.pkl\")\n\n#create word:vector dict and pickle it\ndict = dict(zip(words,wordvecs))\nwith open('dict.pkl', 'wb') as f:\n\t\tpickle.dump(dict,f,protocol=4)\nprint(\"Dumped word2vec dictionary in dict.pkl\")\n"
] | [
[
"sklearn.neighbors.BallTree"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
naik-aakash/pymatgen | [
"394e0d71bf1d1025fcf75498cbb16aa3f41ce78c",
"394e0d71bf1d1025fcf75498cbb16aa3f41ce78c",
"394e0d71bf1d1025fcf75498cbb16aa3f41ce78c",
"394e0d71bf1d1025fcf75498cbb16aa3f41ce78c"
] | [
"pymatgen/analysis/interfaces/coherent_interfaces.py",
"pymatgen/io/lobster/outputs.py",
"pymatgen/analysis/defects/tests/test_defect_compatibility.py",
"pymatgen/io/abinit/tests/test_abiobjects.py"
] | [
"# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\"\"\"\nThis module provides classes to store, generate, and manipulate material interfaces.\n\"\"\"\n\nfrom itertools import product\nfrom typing import Iterator, Optional, Tuple, Union\n\nimport numpy as np\nfrom scipy.linalg import polar\n\nfrom pymatgen.analysis.elasticity.strain import Deformation\nfrom pymatgen.analysis.interfaces.zsl import ZSLGenerator, fast_norm\nfrom pymatgen.core import Structure\nfrom pymatgen.core.interface import Interface, label_termination\nfrom pymatgen.core.surface import SlabGenerator\n\nVector3D = Tuple[float, float, float]\nMatrix3D = Tuple[Vector3D, Vector3D, Vector3D]\nMatrix2D = Tuple[Vector3D, Vector3D]\n\n\nclass CoherentInterfaceBuilder:\n \"\"\"\n This class constructs the coherent interfaces between two crystalline slabs\n Coherency is defined by matching lattices not sub-planes.\n \"\"\"\n\n def __init__(\n self,\n substrate_structure: Structure,\n film_structure: Structure,\n film_miller: Tuple[int, int, int],\n substrate_miller: Tuple[int, int, int],\n zslgen: Optional[ZSLGenerator] = None,\n ):\n \"\"\"\n Args:\n substrate_structure: structure of substrate\n film_structure: structure of film\n film_miller: miller index of the film layer\n substrate_miller: miller index for the substrate layer\n zslgen: BiDirectionalZSL if you want custom lattice matching tolerances for coherency\n \"\"\"\n\n # Bulk structures\n self.substrate_structure = substrate_structure\n self.film_structure = film_structure\n self.film_miller = film_miller\n self.substrate_miller = substrate_miller\n self.zslgen = zslgen or ZSLGenerator(bidirectional=True)\n\n self._find_matches()\n self._find_terminations()\n\n def _find_matches(self) -> None:\n \"\"\"\n Finds and stores the ZSL matches\n \"\"\"\n self.zsl_matches = []\n\n film_sg = SlabGenerator(\n self.film_structure,\n self.film_miller,\n min_slab_size=1,\n min_vacuum_size=3,\n in_unit_planes=True,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n sub_sg = SlabGenerator(\n self.substrate_structure,\n self.substrate_miller,\n min_slab_size=1,\n min_vacuum_size=3,\n in_unit_planes=True,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n film_slab = film_sg.get_slab(shift=0)\n sub_slab = sub_sg.get_slab(shift=0)\n\n film_vectors = film_slab.lattice.matrix\n substrate_vectors = sub_slab.lattice.matrix\n\n # Generate all possible interface matches\n self.zsl_matches = list(self.zslgen(film_vectors[:2], substrate_vectors[:2], lowest=False))\n\n for match in self.zsl_matches:\n xform = get_2d_transform(film_vectors, match.film_vectors)\n strain, rot = polar(xform)\n assert np.allclose(\n strain, np.round(strain)\n ), \"Film lattice vectors changed during ZSL match, check your ZSL Generator parameters\"\n\n xform = get_2d_transform(substrate_vectors, match.substrate_vectors)\n strain, rot = polar(xform)\n assert np.allclose(\n strain, strain.astype(int)\n ), \"Substrate lattice vectors changed during ZSL match, check your ZSL Generator parameters\"\n\n def _find_terminations(self):\n \"\"\"\n Finds all terminations\n \"\"\"\n\n film_sg = SlabGenerator(\n self.film_structure,\n self.film_miller,\n min_slab_size=1,\n min_vacuum_size=3,\n in_unit_planes=True,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n sub_sg = SlabGenerator(\n self.substrate_structure,\n self.substrate_miller,\n min_slab_size=1,\n min_vacuum_size=3,\n in_unit_planes=True,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n film_slabs = film_sg.get_slabs()\n sub_slabs = sub_sg.get_slabs()\n\n film_shits = [s.shift for s in film_slabs]\n film_terminations = [label_termination(s) for s in film_slabs]\n\n sub_shifts = [s.shift for s in sub_slabs]\n sub_terminations = [label_termination(s) for s in sub_slabs]\n\n self._terminations = {\n (film_label, sub_label): (film_shift, sub_shift)\n for (film_label, film_shift), (sub_label, sub_shift) in product(\n zip(film_terminations, film_shits), zip(sub_terminations, sub_shifts)\n )\n }\n self.terminations = list(self._terminations.keys())\n\n def get_interfaces(\n self,\n termination: Tuple[str, str],\n gap: float = 2.0,\n vacuum_over_film: float = 20.0,\n film_thickness: Union[float, int] = 1,\n substrate_thickness: Union[float, int] = 1,\n in_layers: bool = True,\n ) -> Iterator[Interface]:\n \"\"\"\n Generates interface structures given the film and substrate structure\n as well as the desired terminations\n\n\n Args:\n terminations: termination from self.termination list\n gap: gap between film and substrate\n vacuum_over_film: vacuum over the top of the film\n film_thickness: the film thickness\n substrate_thickness: substrate thickness\n in_layers: set the thickness in layer units\n \"\"\"\n film_sg = SlabGenerator(\n self.film_structure,\n self.film_miller,\n min_slab_size=film_thickness,\n min_vacuum_size=3,\n in_unit_planes=in_layers,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n sub_sg = SlabGenerator(\n self.substrate_structure,\n self.substrate_miller,\n min_slab_size=substrate_thickness,\n min_vacuum_size=3,\n in_unit_planes=in_layers,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n film_shift, sub_shift = self._terminations[termination]\n\n film_slab = film_sg.get_slab(shift=film_shift)\n sub_slab = sub_sg.get_slab(shift=sub_shift)\n\n for match in self.zsl_matches:\n # Build film superlattice\n super_film_transform = np.round(\n from_2d_to_3d(get_2d_transform(film_slab.lattice.matrix[:2], match.film_sl_vectors))\n ).astype(int)\n film_sl_slab = film_slab.copy()\n film_sl_slab.make_supercell(super_film_transform)\n assert np.allclose(\n film_sl_slab.lattice.matrix[2], film_slab.lattice.matrix[2]\n ), \"2D transformation affected C-axis for Film transformation\"\n assert np.allclose(\n film_sl_slab.lattice.matrix[:2], match.film_sl_vectors\n ), \"Transformation didn't make proper supercell for film\"\n\n # Build substrate superlattice\n super_sub_transform = np.round(\n from_2d_to_3d(get_2d_transform(sub_slab.lattice.matrix[:2], match.substrate_sl_vectors))\n ).astype(int)\n sub_sl_slab = sub_slab.copy()\n sub_sl_slab.make_supercell(super_sub_transform)\n assert np.allclose(\n sub_sl_slab.lattice.matrix[2], sub_slab.lattice.matrix[2]\n ), \"2D transformation affected C-axis for Film transformation\"\n assert np.allclose(\n sub_sl_slab.lattice.matrix[:2], match.substrate_sl_vectors\n ), \"Transformation didn't make proper supercell for substrate\"\n\n # Add extra info\n match_dict = match.as_dict()\n interface_properties = {k: match_dict[k] for k in match_dict.keys() if not k.startswith(\"@\")}\n\n dfm = Deformation(match.match_transformation)\n\n strain = dfm.green_lagrange_strain\n interface_properties[\"strain\"] = strain\n interface_properties[\"von_mises_strain\"] = strain.von_mises_strain\n interface_properties[\"termination\"] = termination\n interface_properties[\"film_thickness\"] = film_thickness\n interface_properties[\"substrate_thickness\"] = substrate_thickness\n\n yield (\n Interface.from_slabs(\n substrate_slab=sub_sl_slab,\n film_slab=film_sl_slab,\n gap=gap,\n vacuum_over_film=vacuum_over_film,\n interface_properties=interface_properties,\n )\n )\n\n\ndef get_rot_3d_for_2d(film_matrix, sub_matrix) -> np.ndarray:\n \"\"\"\n Finds a trasnformation matrix that will rotate and strain the film to the subtrate while preserving the c-axis\n \"\"\"\n film_matrix = np.array(film_matrix)\n film_matrix = film_matrix.tolist()[:2]\n film_matrix.append(np.cross(film_matrix[0], film_matrix[1]))\n\n # Generate 3D lattice vectors for substrate super lattice\n # Out of plane substrate super lattice has to be same length as\n # Film out of plane vector to ensure no extra deformation in that\n # direction\n sub_matrix = np.array(sub_matrix)\n sub_matrix = sub_matrix.tolist()[:2]\n temp_sub = np.cross(sub_matrix[0], sub_matrix[1])\n temp_sub = temp_sub / fast_norm(temp_sub)\n temp_sub = temp_sub * fast_norm(film_matrix[2])\n sub_matrix.append(temp_sub)\n\n transform_matrix = np.transpose(np.linalg.solve(film_matrix, sub_matrix))\n\n rot, _ = polar(transform_matrix)\n\n return rot\n\n\ndef get_2d_transform(start: np.ndarray, end: np.ndarray) -> np.ndarray:\n \"\"\"\n Gets a 2d transformation matrix\n that converts start to end\n \"\"\"\n return np.dot(end, np.linalg.pinv(start))\n\n\ndef from_2d_to_3d(mat: np.ndarray) -> np.ndarray:\n \"\"\"Converts a 2D matrix to a 3D matrix\"\"\"\n new_mat = np.diag([1.0, 1.0, 1.0])\n new_mat[:2, :2] = mat\n return new_mat\n",
"# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License\n\n\"\"\"\nModule for reading Lobster output files. For more information\non LOBSTER see www.cohp.de.\n\"\"\"\n\nimport collections\nimport fnmatch\nimport os\nimport re\nimport warnings\nfrom collections import defaultdict\nfrom typing import Any, Dict, List, Optional\n\nimport numpy as np\nfrom monty.io import zopen\n\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.electronic_structure.bandstructure import LobsterBandStructureSymmLine\nfrom pymatgen.electronic_structure.core import Orbital, Spin\nfrom pymatgen.electronic_structure.dos import Dos, LobsterCompleteDos\nfrom pymatgen.io.vasp.inputs import Kpoints\nfrom pymatgen.io.vasp.outputs import Vasprun, VolumetricData\n\n__author__ = \"Janine George, Marco Esters\"\n__copyright__ = \"Copyright 2017, The Materials Project\"\n__version__ = \"0.2\"\n__maintainer__ = \"Janine George, Marco Esters \"\n__email__ = \"[email protected], [email protected]\"\n__date__ = \"Dec 13, 2017\"\n\nMODULE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass Cohpcar:\n \"\"\"\n Class to read COHPCAR/COOPCAR files generated by LOBSTER.\n\n .. attribute: cohp_data\n\n Dict that contains the COHP data of the form:\n {bond: {\"COHP\": {Spin.up: cohps, Spin.down:cohps},\n \"ICOHP\": {Spin.up: icohps, Spin.down: icohps},\n \"length\": bond length,\n \"sites\": sites corresponding to the bond}\n Also contains an entry for the average, which does not have\n a \"length\" key.\n\n .. attribute: efermi\n\n The Fermi energy in eV.\n\n .. attribute: energies\n\n Sequence of energies in eV. Note that LOBSTER shifts the energies\n so that the Fermi energy is at zero.\n\n .. attribute: is_spin_polarized\n\n Boolean to indicate if the calculation is spin polarized.\n\n .. attribute: orb_res_cohp\n\n orb_cohp[label] = {bond_data[\"orb_label\"]: {\"COHP\": {Spin.up: cohps, Spin.down:cohps},\n \"ICOHP\": {Spin.up: icohps, Spin.down: icohps},\n \"orbitals\": orbitals,\n \"length\": bond lengths,\n \"sites\": sites corresponding to the bond}}\n\n \"\"\"\n\n def __init__(self, are_coops: bool = False, are_cobis: bool = False, filename: str = None):\n \"\"\"\n Args:\n are_coops: Determines if the file is a list of COHPs or COOPs.\n Default is False for COHPs.\n are_cobis: Determines if the file is a list of COHPs or COOPs.\n Default is False for COHPs.\n\n filename: Name of the COHPCAR file. If it is None, the default\n file name will be chosen, depending on the value of are_coops.\n \"\"\"\n if are_coops and are_cobis:\n raise ValueError(\"You cannot have info about COOPs and COBIs in the same file.\")\n self.are_coops = are_coops\n self.are_cobis = are_cobis\n if filename is None:\n if are_coops:\n filename = \"COOPCAR.lobster\"\n elif are_cobis:\n filename = \"COBICAR.lobster\"\n else:\n filename = \"COHPCAR.lobster\"\n\n with zopen(filename, \"rt\") as f:\n contents = f.read().split(\"\\n\")\n\n # The parameters line is the second line in a COHPCAR file. It\n # contains all parameters that are needed to map the file.\n parameters = contents[1].split()\n # Subtract 1 to skip the average\n num_bonds = int(parameters[0]) - 1\n self.efermi = float(parameters[-1])\n if int(parameters[1]) == 2:\n spins = [Spin.up, Spin.down]\n self.is_spin_polarized = True\n else:\n spins = [Spin.up]\n self.is_spin_polarized = False\n\n # The COHP data start in row num_bonds + 3\n data = np.array([np.array(row.split(), dtype=float) for row in contents[num_bonds + 3 :]]).transpose()\n data = np.array([np.array(row.split(), dtype=float) for row in contents[num_bonds + 3 :]]).transpose()\n self.energies = data[0]\n cohp_data = {\n \"average\": {\n \"COHP\": {spin: data[1 + 2 * s * (num_bonds + 1)] for s, spin in enumerate(spins)},\n \"ICOHP\": {spin: data[2 + 2 * s * (num_bonds + 1)] for s, spin in enumerate(spins)},\n }\n } # type: Dict[Any, Any]\n\n orb_cohp = {} # type: Dict[str, Any]\n # present for Lobster versions older than Lobster 2.2.0\n veryold = False\n # the labeling had to be changed: there are more than one COHP for each atom combination\n # this is done to make the labeling consistent with ICOHPLIST.lobster\n bondnumber = 0\n for bond in range(num_bonds):\n bond_data = self._get_bond_data(contents[3 + bond])\n\n label = str(bondnumber)\n\n orbs = bond_data[\"orbitals\"]\n cohp = {spin: data[2 * (bond + s * (num_bonds + 1)) + 3] for s, spin in enumerate(spins)}\n\n icohp = {spin: data[2 * (bond + s * (num_bonds + 1)) + 4] for s, spin in enumerate(spins)}\n if orbs is None:\n bondnumber = bondnumber + 1\n label = str(bondnumber)\n cohp_data[label] = {\n \"COHP\": cohp,\n \"ICOHP\": icohp,\n \"length\": bond_data[\"length\"],\n \"sites\": bond_data[\"sites\"],\n }\n\n elif label in orb_cohp:\n orb_cohp[label].update(\n {\n bond_data[\"orb_label\"]: {\n \"COHP\": cohp,\n \"ICOHP\": icohp,\n \"orbitals\": orbs,\n \"length\": bond_data[\"length\"],\n \"sites\": bond_data[\"sites\"],\n }\n }\n )\n else:\n # present for Lobster versions older than Lobster 2.2.0\n if bondnumber == 0:\n veryold = True\n if veryold:\n bondnumber += 1\n label = str(bondnumber)\n\n orb_cohp[label] = {\n bond_data[\"orb_label\"]: {\n \"COHP\": cohp,\n \"ICOHP\": icohp,\n \"orbitals\": orbs,\n \"length\": bond_data[\"length\"],\n \"sites\": bond_data[\"sites\"],\n }\n }\n\n # present for lobster older than 2.2.0\n if veryold:\n for bond_str in orb_cohp:\n cohp_data[bond_str] = {\n \"COHP\": None,\n \"ICOHP\": None,\n \"length\": bond_data[\"length\"],\n \"sites\": bond_data[\"sites\"],\n }\n\n self.orb_res_cohp = orb_cohp if orb_cohp else None\n self.cohp_data = cohp_data\n\n @staticmethod\n def _get_bond_data(line: str) -> dict:\n \"\"\"\n Subroutine to extract bond label, site indices, and length from\n a LOBSTER header line. The site indices are zero-based, so they\n can be easily used with a Structure object.\n\n Example header line: No.4:Fe1->Fe9(2.4524893531900283)\n Example header line for orbtial-resolved COHP:\n No.1:Fe1[3p_x]->Fe2[3d_x^2-y^2](2.456180552772262)\n\n Args:\n line: line in the COHPCAR header describing the bond.\n\n Returns:\n Dict with the bond label, the bond length, a tuple of the site\n indices, a tuple containing the orbitals (if orbital-resolved),\n and a label for the orbitals (if orbital-resolved).\n \"\"\"\n\n orb_labs = [\n \"s\",\n \"p_y\",\n \"p_z\",\n \"p_x\",\n \"d_xy\",\n \"d_yz\",\n \"d_z^2\",\n \"d_xz\",\n \"d_x^2-y^2\",\n \"f_y(3x^2-y^2)\",\n \"f_xyz\",\n \"f_yz^2\",\n \"f_z^3\",\n \"f_xz^2\",\n \"f_z(x^2-y^2)\",\n \"f_x(x^2-3y^2)\",\n ]\n\n line_new = line.rsplit(\"(\", 1)\n # bondnumber = line[0].replace(\"->\", \":\").replace(\".\", \":\").split(':')[1]\n length = float(line_new[-1][:-1])\n\n sites = line_new[0].replace(\"->\", \":\").split(\":\")[1:3]\n site_indices = tuple(int(re.split(r\"\\D+\", site)[1]) - 1 for site in sites)\n\n # species = tuple(re.split(r\"\\d+\", site)[0] for site in sites)\n if \"[\" in sites[0]:\n orbs = [re.findall(r\"\\[(.*)\\]\", site)[0] for site in sites]\n orbitals = [tuple((int(orb[0]), Orbital(orb_labs.index(orb[1:])))) for orb in orbs] # type: Any\n orb_label = \"%d%s-%d%s\" % (\n orbitals[0][0],\n orbitals[0][1].name,\n orbitals[1][0],\n orbitals[1][1].name,\n ) # type: Any\n\n else:\n orbitals = None\n orb_label = None\n\n # a label based on the species alone is not feasible, there can be more than one bond for each atom combination\n # label = \"%s\" % (bondnumber)\n\n bond_data = {\n \"length\": length,\n \"sites\": site_indices,\n \"orbitals\": orbitals,\n \"orb_label\": orb_label,\n }\n return bond_data\n\n\nclass Icohplist:\n \"\"\"\n Class to read ICOHPLIST/ICOOPLIST files generated by LOBSTER.\n\n .. attribute: are_coops\n Boolean to indicate if the populations are COOPs or COHPs.\n\n .. attribute: is_spin_polarized\n Boolean to indicate if the calculation is spin polarized.\n\n .. attribute: Icohplist\n Dict containing the listfile data of the form:\n {bond: \"length\": bond length,\n \"number_of_bonds\": number of bonds\n \"icohp\": {Spin.up: ICOHP(Ef) spin up, Spin.down: ...}}\n\n .. attribute: IcohpCollection\n IcohpCollection Object\n\n \"\"\"\n\n def __init__(self, are_coops: bool = False, are_cobis: bool = False, filename: str = None):\n \"\"\"\n Args:\n are_coops: Determines if the file is a list of ICOOPs.\n Defaults to False for ICOHPs.\n are_cobis: Determines if the file is a list of ICOBIs.\n Defaults to False for ICOHPs.\n filename: Name of the ICOHPLIST file. If it is None, the default\n file name will be chosen, depending on the value of are_coops.\n \"\"\"\n if are_coops and are_cobis:\n raise ValueError(\"You cannot have info about COOPs and COBIs in the same file.\")\n self.are_coops = are_coops\n self.are_cobis = are_cobis\n if filename is None:\n if are_coops:\n filename = \"ICOOPLIST.lobster\"\n elif are_cobis:\n filename = \"ICOBILIST.lobster\"\n else:\n filename = \"ICOHPLIST.lobster\"\n\n # LOBSTER list files have an extra trailing blank line\n # and we don't need the header.\n with zopen(filename, \"rt\") as f:\n data = f.read().split(\"\\n\")[1:-1]\n if len(data) == 0:\n raise OSError(\"ICOHPLIST file contains no data.\")\n\n # Which Lobster version?\n if len(data[0].split()) == 8:\n version = \"3.1.1\"\n elif len(data[0].split()) == 6:\n version = \"2.2.1\"\n warnings.warn(\"Please consider using the new Lobster version. See www.cohp.de.\")\n else:\n raise ValueError\n\n # If the calculation is spin polarized, the line in the middle\n # of the file will be another header line.\n if \"distance\" in data[len(data) // 2]:\n # TODO: adapt this for orbitalwise stuff\n self.is_spin_polarized = True\n else:\n self.is_spin_polarized = False\n\n # check if orbitalwise ICOHPLIST\n # include case when there is only one ICOHP!!!\n if len(data) > 2 and \"_\" in data[2].split()[1]:\n self.orbitalwise = True\n warnings.warn(\"This is an orbitalwise IC**LIST.lobter. Currently, the orbitalwise information is not read!\")\n else:\n self.orbitalwise = False\n\n if self.orbitalwise:\n data_without_orbitals = []\n for line in data:\n if \"_\" not in line.split()[1]:\n data_without_orbitals.append(line)\n else:\n data_without_orbitals = data\n\n if \"distance\" in data_without_orbitals[len(data_without_orbitals) // 2]:\n # TODO: adapt this for orbitalwise stuff\n num_bonds = len(data_without_orbitals) // 2\n if num_bonds == 0:\n raise OSError(\"ICOHPLIST file contains no data.\")\n else:\n num_bonds = len(data_without_orbitals)\n\n list_labels = []\n list_atom1 = []\n list_atom2 = []\n list_length = []\n list_translation = []\n list_num = []\n list_icohp = []\n for bond in range(num_bonds):\n line = data_without_orbitals[bond].split()\n icohp = {}\n if version == \"2.2.1\":\n label = f\"{line[0]}\"\n atom1 = str(line[1])\n atom2 = str(line[2])\n length = float(line[3])\n icohp[Spin.up] = float(line[4])\n num = int(line[5])\n translation = [0, 0, 0]\n if self.is_spin_polarized:\n icohp[Spin.down] = float(data_without_orbitals[bond + num_bonds + 1].split()[4])\n\n elif version == \"3.1.1\":\n label = f\"{line[0]}\"\n atom1 = str(line[1])\n atom2 = str(line[2])\n length = float(line[3])\n translation = [int(line[4]), int(line[5]), int(line[6])]\n icohp[Spin.up] = float(line[7])\n num = int(1)\n\n if self.is_spin_polarized:\n icohp[Spin.down] = float(data_without_orbitals[bond + num_bonds + 1].split()[7])\n\n list_labels.append(label)\n list_atom1.append(atom1)\n list_atom2.append(atom2)\n list_length.append(length)\n list_translation.append(translation)\n list_num.append(num)\n list_icohp.append(icohp)\n\n # TODO: add functions to get orbital resolved iCOHPs\n\n # to avoid circular dependencies\n from pymatgen.electronic_structure.cohp import IcohpCollection\n\n self._icohpcollection = IcohpCollection(\n are_coops=are_coops,\n list_labels=list_labels,\n list_atom1=list_atom1,\n list_atom2=list_atom2,\n list_length=list_length,\n list_translation=list_translation,\n list_num=list_num,\n list_icohp=list_icohp,\n is_spin_polarized=self.is_spin_polarized,\n )\n\n @property\n def icohplist(self) -> Dict[Any, Dict[str, Any]]:\n \"\"\"\n Returns: icohplist compatible with older version of this class\n \"\"\"\n icohplist_new = {}\n for key, value in self._icohpcollection._icohplist.items():\n icohplist_new[key] = {\n \"length\": value._length,\n \"number_of_bonds\": value._num,\n \"icohp\": value._icohp,\n \"translation\": value._translation,\n }\n return icohplist_new\n\n @property\n def icohpcollection(self):\n \"\"\"\n Returns: IcohpCollection object\n \"\"\"\n return self._icohpcollection\n\n\nclass Doscar:\n \"\"\"\n Class to deal with Lobster's projected DOS and local projected DOS.\n The beforehand quantum-chemical calculation was performed with VASP\n\n .. attribute:: completedos\n\n LobsterCompleteDos Object\n\n .. attribute:: pdos\n List of Dict including numpy arrays with pdos. Access as pdos[atomindex]['orbitalstring']['Spin.up/Spin.down']\n\n .. attribute:: tdos\n Dos Object of the total density of states\n\n .. attribute:: energies\n numpy array of the energies at which the DOS was calculated (in eV, relative to Efermi)\n\n .. attribute:: tdensities\n tdensities[Spin.up]: numpy array of the total density of states for the Spin.up contribution at each of the\n energies\n tdensities[Spin.down]: numpy array of the total density of states for the Spin.down contribution at each of the\n energies\n\n if is_spin_polarized=False:\n tdensities[Spin.up]: numpy array of the total density of states\n\n\n .. attribute:: itdensities:\n itdensities[Spin.up]: numpy array of the total density of states for the Spin.up contribution at each of the\n energies\n itdensities[Spin.down]: numpy array of the total density of states for the Spin.down contribution at each of the\n energies\n\n if is_spin_polarized=False:\n itdensities[Spin.up]: numpy array of the total density of states\n\n\n .. attribute:: is_spin_polarized\n Boolean. Tells if the system is spin polarized\n \"\"\"\n\n def __init__(\n self,\n doscar: str = \"DOSCAR.lobster\",\n structure_file: str = \"POSCAR\",\n dftprogram: str = \"Vasp\",\n ):\n \"\"\"\n Args:\n doscar: DOSCAR filename, typically \"DOSCAR.lobster\"\n structure_file: for vasp, this is typically \"POSCAR\"\n dftprogram: so far only \"vasp\" is implemented\n \"\"\"\n self._doscar = doscar\n if dftprogram == \"Vasp\":\n self._final_structure = Structure.from_file(structure_file)\n\n self._parse_doscar()\n\n def _parse_doscar(self):\n doscar = self._doscar\n\n tdensities = {}\n itdensities = {}\n with zopen(doscar, \"rt\") as f:\n natoms = int(f.readline().split()[0])\n efermi = float([f.readline() for nn in range(4)][3].split()[17])\n dos = []\n orbitals = []\n for atom in range(natoms + 1):\n line = f.readline()\n ndos = int(line.split()[2])\n orbitals.append(line.split(\";\")[-1].split())\n line = f.readline().split()\n cdos = np.zeros((ndos, len(line)))\n cdos[0] = np.array(line)\n for nd in range(1, ndos):\n line = f.readline().split()\n cdos[nd] = np.array(line)\n dos.append(cdos)\n doshere = np.array(dos[0])\n if len(doshere[0, :]) == 5:\n self._is_spin_polarized = True\n elif len(doshere[0, :]) == 3:\n self._is_spin_polarized = False\n else:\n raise ValueError(\"There is something wrong with the DOSCAR. Can't extract spin polarization.\")\n energies = doshere[:, 0]\n if not self._is_spin_polarized:\n tdensities[Spin.up] = doshere[:, 1]\n itdensities[Spin.up] = doshere[:, 2]\n pdoss = []\n spin = Spin.up\n for atom in range(natoms):\n pdos = defaultdict(dict)\n data = dos[atom + 1]\n _, ncol = data.shape\n orbnumber = 0\n for j in range(1, ncol):\n orb = orbitals[atom + 1][orbnumber]\n pdos[orb][spin] = data[:, j]\n orbnumber = orbnumber + 1\n pdoss.append(pdos)\n else:\n tdensities[Spin.up] = doshere[:, 1]\n tdensities[Spin.down] = doshere[:, 2]\n itdensities[Spin.up] = doshere[:, 3]\n itdensities[Spin.down] = doshere[:, 4]\n pdoss = []\n for atom in range(natoms):\n pdos = defaultdict(dict)\n data = dos[atom + 1]\n _, ncol = data.shape\n orbnumber = 0\n for j in range(1, ncol):\n if j % 2 == 0:\n spin = Spin.down\n else:\n spin = Spin.up\n orb = orbitals[atom + 1][orbnumber]\n pdos[orb][spin] = data[:, j]\n if j % 2 == 0:\n orbnumber = orbnumber + 1\n pdoss.append(pdos)\n\n self._efermi = efermi\n self._pdos = pdoss\n self._tdos = Dos(efermi, energies, tdensities)\n self._energies = energies\n self._tdensities = tdensities\n self._itdensities = itdensities\n final_struct = self._final_structure\n\n pdossneu = {final_struct[i]: pdos for i, pdos in enumerate(self._pdos)}\n\n self._completedos = LobsterCompleteDos(final_struct, self._tdos, pdossneu)\n\n @property\n def completedos(self) -> LobsterCompleteDos:\n \"\"\"\n :return: CompleteDos\n \"\"\"\n return self._completedos\n\n @property\n def pdos(self) -> list:\n \"\"\"\n :return: Projected DOS\n \"\"\"\n return self._pdos\n\n @property\n def tdos(self) -> Dos:\n \"\"\"\n :return: Total DOS\n \"\"\"\n return self._tdos\n\n @property\n def energies(self) -> np.ndarray:\n \"\"\"\n :return: Energies\n \"\"\"\n return self._energies\n\n @property\n def tdensities(self) -> np.ndarray:\n \"\"\"\n :return: total densities as a np.ndarray\n \"\"\"\n return self._tdensities\n\n @property\n def itdensities(self) -> np.ndarray:\n \"\"\"\n :return: integrated total densities as a np.ndarray\n \"\"\"\n return self._itdensities\n\n @property\n def is_spin_polarized(self) -> bool:\n \"\"\"\n :return: Whether run is spin polarized.\n \"\"\"\n return self._is_spin_polarized\n\n\nclass Charge:\n \"\"\"\n Class to read CHARGE files generated by LOBSTER\n\n .. attribute: atomlist\n List of atoms in CHARGE.lobster\n .. attribute: types\n List of types of atoms in CHARGE.lobster\n .. attribute: Mulliken\n List of Mulliken charges of atoms in CHARGE.lobster\n .. attribute: Loewdin\n List of Loewdin charges of atoms in CHARGE.Loewdin\n .. attribute: num_atoms\n Number of atoms in CHARGE.lobster\n\n \"\"\"\n\n def __init__(self, filename: str = \"CHARGE.lobster\"):\n \"\"\"\n Args:\n filename: filename for the CHARGE file, typically \"CHARGE.lobster\"\n \"\"\"\n with zopen(filename, \"rt\") as f:\n data = f.read().split(\"\\n\")[3:-3]\n if len(data) == 0:\n raise OSError(\"CHARGES file contains no data.\")\n\n self.num_atoms = len(data)\n self.atomlist = [] # type: List[str]\n self.types = [] # type: List[str]\n self.Mulliken = [] # type: List[float]\n self.Loewdin = [] # type: List[float]\n for atom in range(0, self.num_atoms):\n line = data[atom].split()\n self.atomlist.append(line[1] + line[0])\n self.types.append(line[1])\n self.Mulliken.append(float(line[2]))\n self.Loewdin.append(float(line[3]))\n\n def get_structure_with_charges(self, structure_filename):\n \"\"\"\n get a Structure with Mulliken and Loewdin charges as site properties\n Args:\n structure_filename: filename of POSCAR\n Returns:\n Structure Object with Mulliken and Loewdin charges as site properties\n \"\"\"\n\n struct = Structure.from_file(structure_filename)\n Mulliken = self.Mulliken\n Loewdin = self.Loewdin\n site_properties = {\"Mulliken Charges\": Mulliken, \"Loewdin Charges\": Loewdin}\n new_struct = struct.copy(site_properties=site_properties)\n return new_struct\n\n\nclass Lobsterout:\n \"\"\"\n Class to read in the lobsterout and evaluate the spilling, save the basis, save warnings, save infos\n\n .. attribute: basis_functions\n list of basis functions that were used in lobster run as strings\n\n .. attribute: basis_type\n list of basis type that were used in lobster run as strings\n\n .. attribute: chargespilling\n list of charge spilling (first entry: result for spin 1, second entry: result for spin 2 or not present)\n\n .. attribute: dftprogram\n string representing the dft program used for the calculation of the wave function\n\n .. attribute: elements\n list of strings of elements that were present in lobster calculation\n\n .. attribute: has_CHARGE\n Boolean, indicates that CHARGE.lobster is present\n\n .. attribute: has_COHPCAR\n Boolean, indicates that COHPCAR.lobster and ICOHPLIST.lobster are present\n\n .. attribute: has_madelung\n Boolean, indicates that SitePotentials.lobster and MadelungEnergies.lobster are present\n\n .. attribute: has_COOPCAR\n Boolean, indicates that COOPCAR.lobster and ICOOPLIST.lobster are present\n\n .. attribute: has_COBICAR\n Boolean, indicates that COBICAR.lobster and ICOBILIST.lobster are present\n\n .. attribute: has_DOSCAR\n Boolean, indicates that DOSCAR.lobster is present\n\n .. attribute: has_Projection\n Boolean, indicates that projectionData.lobster is present\n\n .. attribute: has_bandoverlaps\n Boolean, indicates that bandOverlaps.lobster is present\n\n .. attribute: has_density_of_energies\n Boolean, indicates that DensityOfEnergy.lobster is present\n\n .. attribute: has_fatbands\n Boolean, indicates that fatband calculation was performed\n\n .. attribute: has_grosspopulation\n Boolean, indicates that GROSSPOP.lobster is present\n\n .. attribute: info_lines\n string with additional infos on the run\n\n .. attribute: info_orthonormalization\n string with infos on orthonormalization\n\n .. attribute: is_restart_from_projection\n Boolean that indicates that calculation was restartet from existing projection file\n\n .. attribute: lobster_version\n string that indicates Lobster version\n\n .. attribute: number_of_spins\n Integer indicating the number of spins\n\n .. attribute: number_of_threads\n integer that indicates how many threads were used\n\n .. attribute: timing\n dict with infos on timing\n\n .. attribute: totalspilling\n list of values indicating the total spilling for spin channel 1 (and spin channel 2)\n\n .. attribute: warninglines\n string with all warnings\n\n \"\"\"\n\n # TODO: add tests for skipping COBI and madelung\n # TODO: add tests for including COBI and madelung\n def __init__(self, filename=\"lobsterout\"):\n \"\"\"\n Args:\n filename: filename of lobsterout\n \"\"\"\n # read in file\n with zopen(filename, \"rt\") as f:\n data = f.read().split(\"\\n\") # [3:-3]\n if len(data) == 0:\n raise OSError(\"lobsterout does not contain any data\")\n\n # check if Lobster starts from a projection\n self.is_restart_from_projection = \"loading projection from projectionData.lobster...\" in data\n\n self.lobster_version = self._get_lobster_version(data=data)\n\n self.number_of_threads = int(self._get_threads(data=data))\n self.dftprogram = self._get_dft_program(data=data)\n\n self.number_of_spins = self._get_number_of_spins(data=data)\n chargespilling, totalspilling = self._get_spillings(data=data, number_of_spins=self.number_of_spins)\n self.chargespilling = chargespilling\n self.totalspilling = totalspilling\n\n (\n elements,\n basistype,\n basisfunctions,\n ) = self._get_elements_basistype_basisfunctions(data=data)\n self.elements = elements\n self.basis_type = basistype\n self.basis_functions = basisfunctions\n\n wall_time, user_time, sys_time = self._get_timing(data=data)\n timing = {}\n timing[\"walltime\"] = wall_time\n timing[\"usertime\"] = user_time\n timing[\"sys_time\"] = sys_time\n self.timing = timing\n\n warninglines = self._get_all_warning_lines(data=data)\n self.warninglines = warninglines\n\n orthowarning = self._get_warning_orthonormalization(data=data)\n self.info_orthonormalization = orthowarning\n\n infos = self._get_all_info_lines(data=data)\n self.info_lines = infos\n\n self.has_DOSCAR = \"writing DOSCAR.lobster...\" in data and \"SKIPPING writing DOSCAR.lobster...\" not in data\n self.has_COHPCAR = (\n \"writing COOPCAR.lobster and ICOOPLIST.lobster...\" in data\n and \"SKIPPING writing COOPCAR.lobster and ICOOPLIST.lobster...\" not in data\n )\n self.has_COOPCAR = (\n \"writing COHPCAR.lobster and ICOHPLIST.lobster...\" in data\n and \"SKIPPING writing COHPCAR.lobster and ICOHPLIST.lobster...\" not in data\n )\n self.has_COBICAR = (\n \"writing COBICAR.lobster and ICOBILIST.lobster...\" in data\n and \"SKIPPING writing COBICAR.lobster and ICOBILIST.lobster...\" not in data\n )\n\n self.has_CHARGE = \"SKIPPING writing CHARGE.lobster...\" not in data\n self.has_Projection = \"saving projection to projectionData.lobster...\" in data\n self.has_bandoverlaps = \"WARNING: I dumped the band overlap matrices to the file bandOverlaps.lobster.\" in data\n self.has_fatbands = self._has_fatband(data=data)\n self.has_grosspopulation = \"writing CHARGE.lobster and GROSSPOP.lobster...\" in data\n self.has_density_of_energies = \"writing DensityOfEnergy.lobster...\" in data\n self.has_madelung = (\n \"writing SitePotentials.lobster and MadelungEnergies.lobster...\" in data\n and \"skipping writing SitePotentials.lobster and MadelungEnergies.lobster...\" not in data\n )\n\n def get_doc(self):\n \"\"\"\n\n Returns: LobsterDict with all the information stored in lobsterout\n \"\"\"\n LobsterDict = {}\n # check if Lobster starts from a projection\n LobsterDict[\"restart_from_projection\"] = self.is_restart_from_projection\n LobsterDict[\"lobster_version\"] = self.lobster_version\n LobsterDict[\"threads\"] = self.number_of_threads\n LobsterDict[\"Dftprogram\"] = self.dftprogram\n\n LobsterDict[\"chargespilling\"] = self.chargespilling\n LobsterDict[\"totalspilling\"] = self.totalspilling\n\n LobsterDict[\"elements\"] = self.elements\n LobsterDict[\"basistype\"] = self.basis_type\n LobsterDict[\"basisfunctions\"] = self.basis_functions\n\n LobsterDict[\"timing\"] = self.timing\n\n LobsterDict[\"warnings\"] = self.warninglines\n\n LobsterDict[\"orthonormalization\"] = self.info_orthonormalization\n\n LobsterDict[\"infos\"] = self.info_lines\n\n LobsterDict[\"hasDOSCAR\"] = self.has_DOSCAR\n LobsterDict[\"hasCOHPCAR\"] = self.has_COHPCAR\n LobsterDict[\"hasCOOPCAR\"] = self.has_COOPCAR\n LobsterDict[\"hasCOBICAR\"] = self.has_COBICAR\n LobsterDict[\"hasCHARGE\"] = self.has_CHARGE\n LobsterDict[\"hasmadelung\"] = self.has_madelung\n LobsterDict[\"hasProjection\"] = self.has_Projection\n LobsterDict[\"hasbandoverlaps\"] = self.has_bandoverlaps\n LobsterDict[\"hasfatband\"] = self.has_fatbands\n LobsterDict[\"hasGrossPopuliation\"] = self.has_grosspopulation\n LobsterDict[\"hasDensityOfEnergies\"] = self.has_density_of_energies\n\n return LobsterDict\n\n @staticmethod\n def _get_lobster_version(data):\n for row in data:\n splitrow = row.split()\n if len(splitrow) > 1:\n if splitrow[0] == \"LOBSTER\":\n return splitrow[1]\n raise RuntimeError(\"Version not found.\")\n\n @staticmethod\n def _has_fatband(data):\n for row in data:\n splitrow = row.split()\n if len(splitrow) > 1:\n if splitrow[1] == \"FatBand\":\n return True\n return False\n\n @staticmethod\n def _get_dft_program(data):\n for row in data:\n splitrow = row.split()\n if len(splitrow) > 4:\n if splitrow[3] == \"program...\":\n return splitrow[4]\n return None\n\n @staticmethod\n def _get_number_of_spins(data):\n if \"spillings for spin channel 2\" in data:\n return 2\n return 1\n\n @staticmethod\n def _get_threads(data):\n for row in data:\n splitrow = row.split()\n if len(splitrow) > 11:\n if (splitrow[11]) == \"threads\" or (splitrow[11] == \"thread\"):\n return splitrow[10]\n raise ValueError(\"Threads not found.\")\n\n @staticmethod\n def _get_spillings(data, number_of_spins):\n charge_spilling = []\n total_spilling = []\n for row in data:\n splitrow = row.split()\n if len(splitrow) > 2:\n if splitrow[2] == \"spilling:\":\n if splitrow[1] == \"charge\":\n charge_spilling.append(np.float_(splitrow[3].replace(\"%\", \"\")) / 100.0)\n if splitrow[1] == \"total\":\n total_spilling.append(np.float_(splitrow[3].replace(\"%\", \"\")) / 100.0)\n\n if len(charge_spilling) == number_of_spins and len(total_spilling) == number_of_spins:\n break\n\n return charge_spilling, total_spilling\n\n @staticmethod\n def _get_elements_basistype_basisfunctions(data):\n begin = False\n end = False\n elements = []\n basistype = []\n basisfunctions = []\n for row in data:\n\n if begin and not end:\n splitrow = row.split()\n if splitrow[0] not in [\n \"INFO:\",\n \"WARNING:\",\n \"setting\",\n \"calculating\",\n \"post-processing\",\n \"saving\",\n \"spillings\",\n \"writing\",\n ]:\n\n elements.append(splitrow[0])\n basistype.append(splitrow[1].replace(\"(\", \"\").replace(\")\", \"\"))\n # last sign is a ''\n basisfunctions.append(splitrow[2:])\n else:\n end = True\n if \"setting up local basis functions...\" in row:\n begin = True\n return elements, basistype, basisfunctions\n\n @staticmethod\n def _get_timing(data):\n # will give back wall, user and sys time\n begin = False\n # end=False\n # time=[]\n\n for row in data:\n splitrow = row.split()\n if \"finished\" in splitrow:\n begin = True\n if begin:\n if \"wall\" in splitrow:\n wall_time = splitrow[2:10]\n if \"user\" in splitrow:\n user_time = splitrow[0:8]\n if \"sys\" in splitrow:\n sys_time = splitrow[0:8]\n\n wall_time_dict = {\n \"h\": wall_time[0],\n \"min\": wall_time[2],\n \"s\": wall_time[4],\n \"ms\": wall_time[6],\n }\n user_time_dict = {\n \"h\": user_time[0],\n \"min\": user_time[2],\n \"s\": user_time[4],\n \"ms\": user_time[6],\n }\n sys_time_dict = {\n \"h\": sys_time[0],\n \"min\": sys_time[2],\n \"s\": sys_time[4],\n \"ms\": sys_time[6],\n }\n\n return wall_time_dict, user_time_dict, sys_time_dict\n\n @staticmethod\n def _get_warning_orthonormalization(data):\n orthowarning = []\n for row in data:\n splitrow = row.split()\n if \"orthonormalized\" in splitrow:\n orthowarning.append(\" \".join(splitrow[1:]))\n return orthowarning\n\n @staticmethod\n def _get_all_warning_lines(data):\n ws = []\n for row in data:\n splitrow = row.split()\n if len(splitrow) > 0:\n if splitrow[0] == \"WARNING:\":\n ws.append(\" \".join(splitrow[1:]))\n return ws\n\n @staticmethod\n def _get_all_info_lines(data):\n infos = []\n for row in data:\n splitrow = row.split()\n if len(splitrow) > 0:\n if splitrow[0] == \"INFO:\":\n infos.append(\" \".join(splitrow[1:]))\n return infos\n\n\nclass Fatband:\n \"\"\"\n Reads in FATBAND_x_y.lobster files\n\n .. attribute: efermi\n\n efermi that was read in from vasprun.xml\n\n .. attribute: eigenvals\n {Spin.up:[][],Spin.down:[][]}, the first index of the array\n [][] refers to the band and the second to the index of the\n kpoint. The kpoints are ordered according to the order of the\n kpoints array. If the band structure is not spin polarized, we\n only store one data set under Spin.up.\n\n .. attribute: is_spinpolarized\n\n Boolean that tells you whether this was a spin-polarized calculation\n\n .. attribute: kpoints_array\n\n list of kpoint as numpy arrays, in frac_coords of the given lattice by default\n\n .. attribute: label_dict\n\n (dict) of {} this link a kpoint (in frac coords or cartesian coordinates depending on the coords).\n\n .. attribute: lattice\n\n lattice object of reciprocal lattice as read in from vasprun.xml\n\n .. attribute: nbands\n\n number of bands used in the calculation\n\n .. attribute: p_eigenvals\n\n dict of orbital projections as {spin: array of dict}.\n The indices of the array are [band_index, kpoint_index].\n The dict is then built the following way:\n {\"string of element\": \"string of orbital as read in from FATBAND file\"}\n If the band structure is not spin polarized, we only store one data set under Spin.up.\n\n .. attribute: structure\n\n structure read in from vasprun.xml\n \"\"\"\n\n def __init__(self, filenames=\".\", vasprun=\"vasprun.xml\", Kpointsfile=\"KPOINTS\"):\n \"\"\"\n Args:\n filenames (list or string): can be a list of file names or a path to a folder folder from which all\n \"FATBAND_*\" files will be read\n vasprun: corresponding vasprun file\n Kpointsfile: KPOINTS file for bandstructure calculation, typically \"KPOINTS\"\n \"\"\"\n warnings.warn(\"Make sure all relevant FATBAND files were generated and read in!\")\n warnings.warn(\"Use Lobster 3.2.0 or newer for fatband calculations!\")\n\n VASPRUN = Vasprun(\n filename=vasprun,\n ionic_step_skip=None,\n ionic_step_offset=0,\n parse_dos=True,\n parse_eigen=False,\n parse_projected_eigen=False,\n parse_potcar_file=False,\n occu_tol=1e-8,\n exception_on_bad_xml=True,\n )\n self.structure = VASPRUN.final_structure\n self.lattice = self.structure.lattice.reciprocal_lattice\n self.efermi = VASPRUN.efermi\n kpoints_object = Kpoints.from_file(Kpointsfile)\n\n atomtype = []\n atomnames = []\n orbital_names = []\n\n if not isinstance(filenames, list) or filenames is None:\n filenames_new = []\n if filenames is None:\n filenames = \".\"\n for file in os.listdir(filenames):\n if fnmatch.fnmatch(file, \"FATBAND_*.lobster\"):\n filenames_new.append(os.path.join(filenames, file))\n filenames = filenames_new\n if len(filenames) == 0:\n raise ValueError(\"No FATBAND files in folder or given\")\n for ifilename, filename in enumerate(filenames):\n with zopen(filename, \"rt\") as f:\n contents = f.read().split(\"\\n\")\n\n atomnames.append(os.path.split(filename)[1].split(\"_\")[1].capitalize())\n parameters = contents[0].split()\n atomtype.append(re.split(r\"[0-9]+\", parameters[3])[0].capitalize())\n orbital_names.append(parameters[4])\n\n # get atomtype orbital dict\n atom_orbital_dict = {}\n for iatom, atom in enumerate(atomnames):\n if atom not in atom_orbital_dict:\n atom_orbital_dict[atom] = []\n atom_orbital_dict[atom].append(orbital_names[iatom])\n # test if there are the same orbitals twice or if two different formats were used or if all necessary orbitals\n # are there\n for key, items in atom_orbital_dict.items():\n if len(set(items)) != len(items):\n raise ValueError(\"The are two FATBAND files for the same atom and orbital. The program will stop.\")\n split = []\n for item in items:\n split.append(item.split(\"_\")[0])\n for orb, number in collections.Counter(split).items():\n if number not in (1, 3, 5, 7):\n raise ValueError(\n \"Make sure all relevant orbitals were generated and that no duplicates (2p and 2p_x) are \"\n \"present\"\n )\n\n kpoints_array = []\n for ifilename, filename in enumerate(filenames):\n with zopen(filename, \"rt\") as f:\n contents = f.read().split(\"\\n\")\n\n if ifilename == 0:\n self.nbands = int(parameters[6])\n self.number_kpts = kpoints_object.num_kpts - int(contents[1].split()[2]) + 1\n\n if len(contents[1:]) == self.nbands + 2:\n self.is_spinpolarized = False\n elif len(contents[1:]) == self.nbands * 2 + 2:\n self.is_spinpolarized = True\n else:\n linenumbers = []\n for iline, line in enumerate(contents[1 : self.nbands * 2 + 4]):\n if line.split()[0] == \"#\":\n linenumbers.append(iline)\n\n if ifilename == 0:\n if len(linenumbers) == 2:\n self.is_spinpolarized = True\n else:\n self.is_spinpolarized = False\n\n if ifilename == 0:\n eigenvals = {}\n eigenvals[Spin.up] = [\n [collections.defaultdict(float) for i in range(self.number_kpts)] for j in range(self.nbands)\n ]\n if self.is_spinpolarized:\n eigenvals[Spin.down] = [\n [collections.defaultdict(float) for i in range(self.number_kpts)] for j in range(self.nbands)\n ]\n\n p_eigenvals = {}\n p_eigenvals[Spin.up] = [\n [\n {\n str(e): {str(orb): collections.defaultdict(float) for orb in atom_orbital_dict[e]}\n for e in atomnames\n }\n for i in range(self.number_kpts)\n ]\n for j in range(self.nbands)\n ]\n\n if self.is_spinpolarized:\n p_eigenvals[Spin.down] = [\n [\n {\n str(e): {str(orb): collections.defaultdict(float) for orb in atom_orbital_dict[e]}\n for e in atomnames\n }\n for i in range(self.number_kpts)\n ]\n for j in range(self.nbands)\n ]\n\n ikpoint = -1\n for iline, line in enumerate(contents[1:-1]):\n if line.split()[0] == \"#\":\n KPOINT = np.array(\n [\n float(line.split()[4]),\n float(line.split()[5]),\n float(line.split()[6]),\n ]\n )\n if ifilename == 0:\n kpoints_array.append(KPOINT)\n\n linenumber = 0\n iband = 0\n ikpoint += 1\n if linenumber == self.nbands:\n iband = 0\n if line.split()[0] != \"#\":\n\n if linenumber < self.nbands:\n if ifilename == 0:\n eigenvals[Spin.up][iband][ikpoint] = float(line.split()[1]) + self.efermi\n\n p_eigenvals[Spin.up][iband][ikpoint][atomnames[ifilename]][orbital_names[ifilename]] = float(\n line.split()[2]\n )\n if linenumber >= self.nbands and self.is_spinpolarized:\n if ifilename == 0:\n eigenvals[Spin.down][iband][ikpoint] = float(line.split()[1]) + self.efermi\n p_eigenvals[Spin.down][iband][ikpoint][atomnames[ifilename]][orbital_names[ifilename]] = float(\n line.split()[2]\n )\n\n linenumber += 1\n iband += 1\n\n self.kpoints_array = kpoints_array\n self.eigenvals = eigenvals\n self.p_eigenvals = p_eigenvals\n\n label_dict = {}\n for ilabel, label in enumerate(kpoints_object.labels[-self.number_kpts :], start=0):\n\n if label is not None:\n label_dict[label] = kpoints_array[ilabel]\n\n self.label_dict = label_dict\n\n def get_bandstructure(self):\n \"\"\"\n returns a LobsterBandStructureSymmLine object which can be plotted with a normal BSPlotter\n \"\"\"\n\n return LobsterBandStructureSymmLine(\n kpoints=self.kpoints_array,\n eigenvals=self.eigenvals,\n lattice=self.lattice,\n efermi=self.efermi,\n labels_dict=self.label_dict,\n structure=self.structure,\n projections=self.p_eigenvals,\n )\n\n\nclass Bandoverlaps:\n \"\"\"\n Class to read in bandOverlaps.lobster files. These files are not created during every Lobster run.\n .. attribute: bandoverlapsdict is a dict of the following form:\n {spin:{\"kpoint as string\": {\"maxDeviation\": float that describes the max deviation, \"matrix\": 2D\n array of the size number of bands times number of bands including the overlap matrices with } }}\n\n .. attribute: maxDeviation is a list of floats describing the maximal Deviation for each problematic kpoint\n\n \"\"\"\n\n def __init__(self, filename: str = \"bandOverlaps.lobster\"):\n \"\"\"\n Args:\n filename: filename of the \"bandOverlaps.lobster\" file\n \"\"\"\n with zopen(filename, \"rt\") as f:\n contents = f.read().split(\"\\n\")\n\n spin_numbers = [0, 1] if contents[0].split()[-1] == \"0\" else [1, 2]\n\n self._read(contents, spin_numbers)\n\n def _read(self, contents: list, spin_numbers: list):\n \"\"\"\n will read in all contents of the file\n Args:\n contents: list of strings\n spin_numbers: list of spin numbers depending on `Lobster` version.\n \"\"\"\n self.bandoverlapsdict = {} # type: Dict\n self.max_deviation = [] # type: List\n # This has to be done like this because there can be different numbers of problematic k-points per spin\n for line in contents:\n if f\"Overlap Matrix (abs) of the orthonormalized projected bands for spin {spin_numbers[0]}\" in line:\n spin = Spin.up\n elif f\"Overlap Matrix (abs) of the orthonormalized projected bands for spin {spin_numbers[1]}\" in line:\n spin = Spin.down\n elif \"k-point\" in line:\n kpoint = line.split(\" \")\n kpoint_array = []\n for kpointel in kpoint:\n if kpointel not in [\"at\", \"k-point\", \"\"]:\n kpoint_array.append(str(kpointel))\n\n elif \"maxDeviation\" in line:\n if spin not in self.bandoverlapsdict:\n self.bandoverlapsdict[spin] = {}\n if not \" \".join(kpoint_array) in self.bandoverlapsdict[spin]:\n self.bandoverlapsdict[spin][\" \".join(kpoint_array)] = {}\n maxdev = line.split(\" \")[2]\n self.bandoverlapsdict[spin][\" \".join(kpoint_array)][\"maxDeviation\"] = float(maxdev)\n self.max_deviation.append(float(maxdev))\n self.bandoverlapsdict[spin][\" \".join(kpoint_array)][\"matrix\"] = []\n\n else:\n overlaps = []\n for el in line.split(\" \"):\n if el not in [\"\"]:\n overlaps.append(float(el))\n self.bandoverlapsdict[spin][\" \".join(kpoint_array)][\"matrix\"].append(overlaps)\n\n def has_good_quality_maxDeviation(self, limit_maxDeviation: float = 0.1) -> bool:\n \"\"\"\n will check if the maxDeviation from the ideal bandoverlap is smaller or equal to limit_maxDeviation\n Args:\n limit_maxDeviation: limit of the maxDeviation\n Returns:\n Boolean that will give you information about the quality of the projection\n \"\"\"\n\n for deviation in self.max_deviation:\n if deviation > limit_maxDeviation:\n return False\n return True\n\n def has_good_quality_check_occupied_bands(\n self,\n number_occ_bands_spin_up: int,\n number_occ_bands_spin_down: Optional[int] = None,\n spin_polarized: bool = False,\n limit_deviation: float = 0.1,\n ) -> bool:\n \"\"\"\n will check if the deviation from the ideal bandoverlap of all occupied bands is smaller or equal to\n limit_deviation\n\n Args:\n number_occ_bands_spin_up (int): number of occupied bands of spin up\n number_occ_bands_spin_down (int): number of occupied bands of spin down\n spin_polarized (bool): If True, then it was a spin polarized calculation\n limit_deviation (float): limit of the maxDeviation\n Returns:\n Boolean that will give you information about the quality of the projection\n \"\"\"\n\n for matrix in self.bandoverlapsdict[Spin.up].values():\n for iband1, band1 in enumerate(matrix[\"matrix\"]):\n for iband2, band2 in enumerate(band1):\n if iband1 < number_occ_bands_spin_up and iband2 < number_occ_bands_spin_up:\n if iband1 == iband2:\n if abs(band2 - 1.0) > limit_deviation:\n return False\n else:\n if band2 > limit_deviation:\n return False\n\n if spin_polarized:\n for matrix in self.bandoverlapsdict[Spin.down].values():\n for iband1, band1 in enumerate(matrix[\"matrix\"]):\n for iband2, band2 in enumerate(band1):\n if number_occ_bands_spin_down is not None:\n if iband1 < number_occ_bands_spin_down and iband2 < number_occ_bands_spin_down:\n if iband1 == iband2:\n if abs(band2 - 1.0) > limit_deviation:\n return False\n else:\n if band2 > limit_deviation:\n return False\n else:\n ValueError(\"number_occ_bands_spin_down has to be specified\")\n return True\n\n\nclass Grosspop:\n \"\"\"\n Class to read in GROSSPOP.lobster files.\n\n .. attribute: list_dict_grosspop\n which is a list of dicts including all information about the grosspopulations, one sample dict looks like this:\n {'element': 'O', 'Mulliken GP': {'2s': '1.80', '2p_y': '1.83', '2p_z': '1.79', '2p_x': '1.75', 'total': '7.18'},\n 'Loewdin GP': {'2s': '1.60', '2p_y': '1.82', '2p_z': '1.77', '2p_x': '1.73', 'total': '6.92'}}\n The 0. entry of the list refers to the first atom in GROSSPOP.lobster and so on.\n \"\"\"\n\n def __init__(self, filename: str = \"GROSSPOP.lobster\"):\n \"\"\"\n Args:\n filename: filename of the \"GROSSPOP.lobster\" file\n \"\"\"\n # opens file\n with zopen(filename, \"rt\") as f:\n contents = f.read().split(\"\\n\")\n\n self.list_dict_grosspop = [] # type: List[Any]\n # transfers content of file to list of dict\n for line in contents[3:]:\n cleanline = [i for i in line.split(\" \") if not i == \"\"]\n if len(cleanline) == 5:\n smalldict = {}\n smalldict[\"element\"] = cleanline[1]\n smalldict[\"Mulliken GP\"] = {}\n smalldict[\"Loewdin GP\"] = {}\n smalldict[\"Mulliken GP\"][cleanline[2]] = float(cleanline[3])\n smalldict[\"Loewdin GP\"][cleanline[2]] = float(cleanline[4])\n elif len(cleanline) > 0:\n smalldict[\"Mulliken GP\"][cleanline[0]] = float(cleanline[1])\n smalldict[\"Loewdin GP\"][cleanline[0]] = float(cleanline[2])\n if \"total\" in cleanline[0]:\n self.list_dict_grosspop.append(smalldict)\n\n def get_structure_with_total_grosspop(self, structure_filename: str) -> Structure:\n \"\"\"\n get a Structure with Mulliken and Loewdin total grosspopulations as site properties\n Args:\n structure_filename (str): filename of POSCAR\n Returns:\n Structure Object with Mulliken and Loewdin total grosspopulations as site properties\n \"\"\"\n\n struct = Structure.from_file(structure_filename)\n site_properties = {} # type: Dict[str, Any]\n mullikengp = []\n loewdingp = []\n for grosspop in self.list_dict_grosspop:\n mullikengp.append(grosspop[\"Mulliken GP\"][\"total\"])\n loewdingp.append(grosspop[\"Loewdin GP\"][\"total\"])\n\n site_properties = {\n \"Total Mulliken GP\": mullikengp,\n \"Total Loewdin GP\": loewdingp,\n }\n new_struct = struct.copy(site_properties=site_properties)\n return new_struct\n\n\nclass Wavefunction:\n \"\"\"\n Class to read in wave function files from Lobster and transfer them into an object of the type VolumetricData\n\n .. attribute: grid\n\n grid for the wave function [Nx+1,Ny+1,Nz+1]\n\n .. attribute: points\n\n list of points\n\n .. attribute: real\n\n list of real part of wave function\n\n .. attribute: imaginary\n\n list of imaginary part of wave function\n\n .. attribute: distance\n\n list of distance to first point in wave function file\n \"\"\"\n\n def __init__(self, filename, structure):\n \"\"\"\n\n Args:\n filename: filename of wavecar file from Lobster\n structure: Structure object (e.g., created by Structure.from_file(\"\"))\n \"\"\"\n\n self.filename = filename\n self.structure = structure\n\n (\n self.grid,\n self.points,\n self.real,\n self.imaginary,\n self.distance,\n ) = Wavefunction._parse_file(filename)\n\n @staticmethod\n def _parse_file(filename):\n with zopen(filename, \"rt\") as f:\n contents = f.read().split(\"\\n\")\n points = []\n distance = []\n real = []\n imaginary = []\n splitline = contents[0].split()\n grid = [int(splitline[7]), int(splitline[8]), int(splitline[9])]\n for line in contents[1:]:\n splitline = line.split()\n if len(splitline) >= 6:\n points.append([float(splitline[0]), float(splitline[1]), float(splitline[2])])\n distance.append(float(splitline[3]))\n real.append(float(splitline[4]))\n imaginary.append(float(splitline[5]))\n\n if not len(real) == grid[0] * grid[1] * grid[2]:\n raise ValueError(\"Something went wrong while reading the file\")\n if not len(imaginary) == grid[0] * grid[1] * grid[2]:\n raise ValueError(\"Something went wrong while reading the file\")\n return grid, points, real, imaginary, distance\n\n def set_volumetric_data(self, grid, structure):\n \"\"\"\n Will create the VolumetricData Objects\n\n Args:\n grid: grid on which wavefunction was calculated, e.g. [1,2,2]\n structure: Structure object\n\n Returns:\n\n \"\"\"\n Nx = grid[0] - 1\n Ny = grid[1] - 1\n Nz = grid[2] - 1\n a = structure.lattice.matrix[0]\n b = structure.lattice.matrix[1]\n c = structure.lattice.matrix[2]\n new_x = []\n new_y = []\n new_z = []\n new_real = []\n new_imaginary = []\n new_density = []\n\n runner = 0\n for x in range(0, Nx + 1):\n for y in range(0, Ny + 1):\n for z in range(0, Nz + 1):\n x_here = x / float(Nx) * a[0] + y / float(Ny) * b[0] + z / float(Nz) * c[0]\n y_here = x / float(Nx) * a[1] + y / float(Ny) * b[1] + z / float(Nz) * c[1]\n z_here = x / float(Nx) * a[2] + y / float(Ny) * b[2] + z / float(Nz) * c[2]\n\n if x != Nx:\n if y != Ny:\n if z != Nz:\n if not np.isclose(self.points[runner][0], x_here, 1e-3):\n if not np.isclose(self.points[runner][1], y_here, 1e-3):\n if not np.isclose(self.points[runner][2], z_here, 1e-3):\n raise ValueError(\n \"The provided wavefunction from Lobster does not contain all relevant\"\n \" points. \"\n \"Please use a line similar to: printLCAORealSpaceWavefunction kpoint 1 \"\n \"coordinates 0.0 0.0 0.0 coordinates 1.0 1.0 1.0 box bandlist 1 \"\n )\n\n new_x.append(x_here)\n new_y.append(y_here)\n new_z.append(z_here)\n\n new_real.append(self.real[runner])\n new_imaginary.append(self.imaginary[runner])\n new_density.append(self.real[runner] ** 2 + self.imaginary[runner] ** 2)\n\n runner += 1\n\n self.final_real = np.reshape(new_real, [Nx, Ny, Nz])\n self.final_imaginary = np.reshape(new_imaginary, [Nx, Ny, Nz])\n self.final_density = np.reshape(new_density, [Nx, Ny, Nz])\n\n self.volumetricdata_real = VolumetricData(structure, {\"total\": self.final_real})\n self.volumetricdata_imaginary = VolumetricData(structure, {\"total\": self.final_imaginary})\n self.volumetricdata_density = VolumetricData(structure, {\"total\": self.final_density})\n\n def get_volumetricdata_real(self):\n \"\"\"\n will return a VolumetricData object including the real part of the wave function\n\n Returns: VolumetricData object\n \"\"\"\n\n if not hasattr(self, \"volumetricdata_real\"):\n self.set_volumetric_data(self.grid, self.structure)\n return self.volumetricdata_real\n\n def get_volumetricdata_imaginary(self):\n \"\"\"\n will return a VolumetricData object including the imaginary part of the wave function\n\n Returns: VolumetricData object\n \"\"\"\n if not hasattr(self, \"volumetricdata_imaginary\"):\n self.set_volumetric_data(self.grid, self.structure)\n return self.volumetricdata_imaginary\n\n def get_volumetricdata_density(self):\n \"\"\"\n will return a VolumetricData object including the imaginary part of the wave function\n\n Returns: VolumetricData object\n\n \"\"\"\n if not hasattr(self, \"volumetricdata_density\"):\n self.set_volumetric_data(self.grid, self.structure)\n return self.volumetricdata_density\n\n def write_file(self, filename=\"WAVECAR.vasp\", part=\"real\"):\n \"\"\"\n will save the wavefunction in a file format that can be read by VESTA\n This will only work if the wavefunction from lobster was constructed with:\n \"printLCAORealSpaceWavefunction kpoint 1 coordinates 0.0 0.0 0.0 coordinates 1.0 1.0 1.0 box bandlist 1 2 3 4\n 5 6 \"\n or similar (the whole unit cell has to be covered!)\n\n Args:\n filename: Filename for the output, e.g., WAVECAR.vasp\n part: which part of the wavefunction will be saved (\"real\" or \"imaginary\")\n\n Returns:\n\n \"\"\"\n if not (\n hasattr(self, \"volumetricdata_real\")\n and hasattr(self, \"volumetricdata_imaginary\")\n and hasattr(self, \"volumetricdata_density\")\n ):\n self.set_volumetric_data(self.grid, self.structure)\n if part == \"real\":\n self.volumetricdata_real.write_file(filename)\n elif part == \"imaginary\":\n self.volumetricdata_imaginary.write_file(filename)\n elif part == \"density\":\n self.volumetricdata_density.write_file(filename)\n else:\n raise ValueError('part can be only \"real\" or \"imaginary\" or \"density\"')\n\n\n# madleung and sitepotential classes\nclass MadelungEnergies:\n \"\"\"\n Class to read MadelungEnergies.lobster files generated by LOBSTER\n\n .. attribute: madelungenergies_Mulliken\n float that gives the madelung energy based on the Mulliken approach\n .. attribute: madelungenergies_Loewdin\n float that gives the madelung energy based on the Loewdin approach\n .. attribute: ewald_splitting\n Ewald Splitting parameter to compute SitePotentials\n\n \"\"\"\n\n def __init__(self, filename: str = \"MadelungEnergies.lobster\"):\n \"\"\"\n\n Args:\n filename: filename of the \"MadelungEnergies.lobster\" file\n \"\"\"\n\n with zopen(filename, \"rt\") as f:\n data = f.read().split(\"\\n\")[5]\n if len(data) == 0:\n raise OSError(\"MadelungEnergies file contains no data.\")\n line = data.split()\n self.ewald_splitting = float(line[0])\n self.madelungenergies_Mulliken = float(line[1])\n self.madelungenergies_Loewdin = float(line[2])\n\n\nclass SitePotential:\n \"\"\"\n Class to read SitePotentials.lobster files generated by LOBSTER\n\n .. attribute: atomlist\n List of atoms in SitePotentials.lobster\n .. attribute: types\n List of types of atoms in SitePotentials.lobster\n .. attribute: num_atoms\n Number of atoms in SitePotentials.lobster\n .. attribute: sitepotentials_Mulliken\n List of Mulliken potentials of sites in SitePotentials.lobster\n .. attribute: sitepotentials_Loewdin\n List of Loewdin potentials of sites in SitePotentials.lobster\n .. attribute: madelung_Mulliken\n float that gives the madelung energy based on the Mulliken approach\n .. attribute: madelung_Loewdin\n float that gives the madelung energy based on the Loewdin approach\n .. attribute: ewald_splitting\n Ewald Splitting parameter to compute SitePotentials\n \"\"\"\n\n def __init__(self, filename: str = \"SitePotentials.lobster\"):\n \"\"\"\n Args:\n filename: filename for the SitePotentials file, typically \"SitePotentials.lobster\"\n \"\"\"\n\n # site_potentials\n with zopen(filename, \"rt\") as f:\n data = f.read().split(\"\\n\")\n if len(data) == 0:\n raise OSError(\"SitePotentials file contains no data.\")\n\n self.ewald_splitting = float(data[0].split()[9])\n\n data = data[5:-1]\n self.num_atoms = len(data) - 2\n self.atomlist = [] # type: List[str]\n self.types = [] # type: List[str]\n self.sitepotentials_Mulliken = [] # type: List[float]\n self.sitepotentials_Loewdin = [] # type: List[float]\n for atom in range(0, self.num_atoms):\n line = data[atom].split()\n self.atomlist.append(line[1] + str(line[0]))\n self.types.append(line[1])\n self.sitepotentials_Mulliken.append(float(line[2]))\n self.sitepotentials_Loewdin.append(float(line[3]))\n\n self.madelungenergies_Mulliken = float(data[self.num_atoms + 1].split()[3])\n self.madelungenergies_Loewdin = float(data[self.num_atoms + 1].split()[4])\n\n def get_structure_with_site_potentials(self, structure_filename):\n \"\"\"\n get a Structure with Mulliken and Loewdin charges as site properties\n Args:\n structure_filename: filename of POSCAR\n Returns:\n Structure Object with Mulliken and Loewdin charges as site properties\n \"\"\"\n\n struct = Structure.from_file(structure_filename)\n Mulliken = self.sitepotentials_Mulliken\n Loewdin = self.sitepotentials_Loewdin\n site_properties = {\"Mulliken Site Potentials (eV)\": Mulliken, \"Loewdin Site Potentials (eV)\": Loewdin}\n new_struct = struct.copy(site_properties=site_properties)\n return new_struct\n",
"# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\nimport os\nimport unittest\n\nimport numpy as np\n\nfrom pymatgen.analysis.defects.core import DefectEntry, Interstitial, Vacancy\nfrom pymatgen.analysis.defects.defect_compatibility import DefectCompatibility\nfrom pymatgen.core import PeriodicSite\nfrom pymatgen.io.vasp import Outcar, Poscar, Vasprun\nfrom pymatgen.util.testing import PymatgenTest\n\n\nclass DefectCompatibilityTest(PymatgenTest):\n def setUp(self):\n struc = PymatgenTest.get_structure(\"VO2\")\n struc.make_supercell(3)\n struc = struc\n self.vac = Vacancy(struc, struc.sites[0], charge=-3)\n\n abc = self.vac.bulk_structure.lattice.abc\n axisdata = [np.arange(0.0, lattval, 0.2) for lattval in abc]\n bldata = [np.array([1.0 for u in np.arange(0.0, lattval, 0.2)]) for lattval in abc]\n dldata = [\n np.array([(-1 - np.cos(2 * np.pi * u / lattval)) for u in np.arange(0.0, lattval, 0.2)]) for lattval in abc\n ]\n self.frey_params = {\n \"axis_grid\": axisdata,\n \"bulk_planar_averages\": bldata,\n \"defect_planar_averages\": dldata,\n \"dielectric\": 15,\n \"initial_defect_structure\": struc.copy(),\n \"defect_frac_sc_coords\": struc.sites[0].frac_coords[:],\n }\n\n kumagai_bulk_struc = Poscar.from_file(\n os.path.join(PymatgenTest.TEST_FILES_DIR, \"defect\", \"CONTCAR_bulk\")\n ).structure\n bulk_out = Outcar(os.path.join(PymatgenTest.TEST_FILES_DIR, \"defect\", \"OUTCAR_bulk.gz\"))\n defect_out = Outcar(os.path.join(PymatgenTest.TEST_FILES_DIR, \"defect\", \"OUTCAR_vac_Ga_-3.gz\"))\n self.kumagai_vac = Vacancy(kumagai_bulk_struc, kumagai_bulk_struc.sites[0], charge=-3)\n kumagai_defect_structure = self.kumagai_vac.generate_defect_structure()\n self.kumagai_params = {\n \"bulk_atomic_site_averages\": bulk_out.electrostatic_potential,\n \"defect_atomic_site_averages\": defect_out.electrostatic_potential,\n \"site_matching_indices\": [[ind, ind - 1] for ind in range(len(kumagai_bulk_struc))],\n \"defect_frac_sc_coords\": [0.0, 0.0, 0.0],\n \"initial_defect_structure\": kumagai_defect_structure,\n \"dielectric\": 18.118 * np.identity(3),\n \"gamma\": 0.153156, # not necessary to load gamma, but speeds up unit test\n }\n\n v = Vasprun(os.path.join(PymatgenTest.TEST_FILES_DIR, \"vasprun.xml\"))\n eigenvalues = v.eigenvalues.copy()\n kptweights = v.actual_kpoints_weights\n potalign = -0.1\n vbm = v.eigenvalue_band_properties[2]\n cbm = v.eigenvalue_band_properties[1]\n defect_incar = v.incar\n self.bandfill_params = {\n \"eigenvalues\": eigenvalues,\n \"kpoint_weights\": kptweights,\n \"potalign\": potalign,\n \"vbm\": vbm,\n \"cbm\": cbm,\n \"run_metadata\": {\"defect_incar\": defect_incar},\n }\n\n self.band_edge_params = {\n \"hybrid_cbm\": 1.0,\n \"hybrid_vbm\": -1.0,\n \"vbm\": -0.5,\n \"cbm\": 0.6,\n \"num_hole_vbm\": 1.0,\n \"num_elec_cbm\": 1.0,\n }\n\n def test_process_entry(self):\n\n # basic process with no corrections\n dentry = DefectEntry(\n self.vac,\n 0.0,\n corrections={},\n parameters={\"vbm\": 0.0, \"cbm\": 0.0},\n entry_id=None,\n )\n dc = DefectCompatibility()\n dentry = dc.process_entry(dentry)\n self.assertIsNotNone(dentry)\n\n # process with corrections from parameters used in other unit tests\n params = self.frey_params.copy()\n params.update(self.bandfill_params)\n params.update(\n {\n \"hybrid_cbm\": params[\"cbm\"] + 0.2,\n \"hybrid_vbm\": params[\"vbm\"] - 0.4,\n }\n )\n dentry = DefectEntry(self.vac, 0.0, corrections={}, parameters=params, entry_id=None)\n dc = DefectCompatibility()\n dentry = dc.process_entry(dentry)\n self.assertAlmostEqual(dentry.corrections[\"bandedgeshifting_correction\"], 1.2)\n self.assertAlmostEqual(dentry.corrections[\"bandfilling_correction\"], 0.0)\n self.assertAlmostEqual(dentry.corrections[\"charge_correction\"], 5.44595036)\n\n # test over delocalized free carriers which forces skipping charge correction\n params = self.bandfill_params.copy() # No Freysoldt metadata\n params.update(\n {\n \"hybrid_cbm\": params[\"cbm\"] + 0.2,\n \"hybrid_vbm\": params[\"vbm\"] - 0.4,\n }\n )\n # modify the eigenvalue list to have free holes\n hole_eigenvalues = {}\n for spinkey, spinset in params[\"eigenvalues\"].items():\n hole_eigenvalues[spinkey] = []\n for kptset in spinset:\n hole_eigenvalues[spinkey].append([])\n for eig in kptset:\n if (eig[0] < params[\"vbm\"]) and (eig[0] > params[\"vbm\"] - 0.8):\n hole_eigenvalues[spinkey][-1].append([eig[0], 0.5])\n else:\n hole_eigenvalues[spinkey][-1].append(eig)\n\n params.update({\"eigenvalues\": hole_eigenvalues})\n dentry = DefectEntry(self.vac, 0.0, corrections={}, parameters=params, entry_id=None)\n dc = DefectCompatibility(free_chg_cutoff=0.8)\n dentry = dc.process_entry(dentry)\n self.assertAlmostEqual(dentry.corrections[\"bandedgeshifting_correction\"], 1.19999999)\n self.assertAlmostEqual(dentry.corrections[\"bandfilling_correction\"], -0.492633372744)\n self.assertAlmostEqual(dentry.corrections[\"charge_correction\"], 0.0)\n\n # turn off band filling and band edge shifting\n dc = DefectCompatibility(free_chg_cutoff=0.8, use_bandfilling=False, use_bandedgeshift=False)\n dentry = dc.process_entry(dentry)\n self.assertAlmostEqual(dentry.corrections[\"bandedgeshifting_correction\"], 0.0)\n self.assertAlmostEqual(dentry.corrections[\"bandfilling_correction\"], 0.0)\n self.assertAlmostEqual(dentry.corrections[\"charge_correction\"], 0.0)\n\n def test_perform_all_corrections(self):\n\n # return entry even if insufficient values are provided\n # for freysoldt, kumagai, bandfilling, or band edge shifting\n de = DefectEntry(self.vac, 0.0, corrections={}, parameters={}, entry_id=None)\n dc = DefectCompatibility()\n dentry = dc.perform_all_corrections(de)\n self.assertIsNotNone(dentry)\n # all other correction applications are tested in unit tests below\n\n def test_perform_freysoldt(self):\n de = DefectEntry(self.vac, 0.0, corrections={}, parameters=self.frey_params, entry_id=None)\n dc = DefectCompatibility()\n dentry = dc.perform_freysoldt(de)\n\n val = dentry.parameters[\"freysoldt_meta\"]\n self.assertAlmostEqual(val[\"freysoldt_electrostatic\"], 0.975893)\n self.assertAlmostEqual(val[\"freysoldt_potential_alignment_correction\"], 4.4700574)\n self.assertAlmostEqual(val[\"freysoldt_potalign\"], 1.4900191)\n self.assertTrue(\"pot_corr_uncertainty_md\" in val.keys())\n self.assertTrue(\"pot_plot_data\" in val.keys())\n\n def test_perform_kumagai(self):\n de = DefectEntry(self.kumagai_vac, 0.0, parameters=self.kumagai_params)\n dc = DefectCompatibility()\n dentry = dc.perform_kumagai(de)\n\n val = dentry.parameters[\"kumagai_meta\"]\n self.assertAlmostEqual(val[\"kumagai_electrostatic\"], 0.88236299)\n self.assertAlmostEqual(val[\"kumagai_potential_alignment_correction\"], 2.09704862)\n self.assertAlmostEqual(val[\"kumagai_potalign\"], 0.69901620)\n self.assertTrue(\"pot_corr_uncertainty_md\" in val.keys())\n self.assertTrue(\"pot_plot_data\" in val.keys())\n\n def test_run_bandfilling(self):\n de = DefectEntry(\n self.vac,\n 0.0,\n corrections={},\n parameters=self.bandfill_params,\n entry_id=None,\n )\n dc = DefectCompatibility()\n dentry = dc.perform_bandfilling(de)\n\n val = dentry.parameters[\"bandfilling_meta\"]\n self.assertAlmostEqual(val[\"num_hole_vbm\"], 0.0)\n self.assertAlmostEqual(val[\"num_elec_cbm\"], 0.0)\n self.assertAlmostEqual(val[\"bandfilling_correction\"], 0.0)\n\n def test_run_band_edge_shifting(self):\n de = DefectEntry(\n self.vac,\n 0.0,\n corrections={},\n parameters=self.band_edge_params,\n entry_id=None,\n )\n\n dc = DefectCompatibility()\n dentry = dc.perform_band_edge_shifting(de)\n val = dentry.parameters[\"bandshift_meta\"]\n self.assertEqual(val[\"vbmshift\"], -0.5)\n self.assertEqual(val[\"cbmshift\"], 0.4)\n self.assertEqual(val[\"bandedgeshifting_correction\"], 1.5)\n\n def test_delocalization_analysis(self):\n # return entry even if insufficient values are provided\n # for delocalization analysis with freysoldt, kumagai,\n # bandfilling, or band edge shifting\n de = DefectEntry(self.vac, 0.0, corrections={}, parameters={}, entry_id=None)\n dc = DefectCompatibility()\n dentry = dc.delocalization_analysis(de)\n self.assertIsNotNone(dentry)\n # all other correction applications are tested in unit tests below\n\n def test_check_freysoldt_delocalized(self):\n de = DefectEntry(self.vac, 0.0, corrections={}, parameters=self.frey_params, entry_id=None)\n de.parameters.update({\"is_compatible\": True}) # needs to be initialized with this here for unittest\n dc = DefectCompatibility(plnr_avg_var_tol=0.1, plnr_avg_minmax_tol=0.5)\n dentry = dc.perform_freysoldt(de)\n\n # check case which fits under compatibility constraints\n dentry = dc.check_freysoldt_delocalized(dentry)\n frey_delocal = dentry.parameters[\"delocalization_meta\"][\"plnr_avg\"]\n self.assertTrue(frey_delocal[\"is_compatible\"])\n ans_var = [0.00038993, 0.02119532, 0.02119532]\n ans_window = [0.048331509, 0.36797169, 0.36797169]\n for ax in range(3):\n ax_metadata = frey_delocal[\"metadata\"][ax]\n self.assertTrue(ax_metadata[\"frey_variance_compatible\"])\n self.assertAlmostEqual(ax_metadata[\"frey_variance\"], ans_var[ax])\n self.assertTrue(ax_metadata[\"frey_minmax_compatible\"])\n self.assertAlmostEqual(ax_metadata[\"frey_minmax_window\"], ans_window[ax])\n\n self.assertTrue(dentry.parameters[\"is_compatible\"])\n\n # check planar delocalization on 2nd and 3rd axes\n dc = DefectCompatibility(plnr_avg_var_tol=0.1, plnr_avg_minmax_tol=0.2)\n dentry.parameters.update({\"is_compatible\": True})\n dentry = dc.check_freysoldt_delocalized(dentry)\n frey_delocal = dentry.parameters[\"delocalization_meta\"][\"plnr_avg\"]\n self.assertFalse(frey_delocal[\"is_compatible\"])\n ax_metadata = frey_delocal[\"metadata\"][0]\n self.assertTrue(ax_metadata[\"frey_variance_compatible\"])\n self.assertTrue(ax_metadata[\"frey_minmax_compatible\"])\n for ax in [1, 2]:\n ax_metadata = frey_delocal[\"metadata\"][ax]\n self.assertTrue(ax_metadata[\"frey_variance_compatible\"])\n self.assertFalse(ax_metadata[\"frey_minmax_compatible\"])\n\n self.assertFalse(dentry.parameters[\"is_compatible\"])\n\n # check variance based delocalization on 2nd and 3rd axes\n dc = DefectCompatibility(plnr_avg_var_tol=0.01, plnr_avg_minmax_tol=0.5)\n dentry.parameters.update({\"is_compatible\": True})\n dentry = dc.check_freysoldt_delocalized(dentry)\n frey_delocal = dentry.parameters[\"delocalization_meta\"][\"plnr_avg\"]\n self.assertFalse(frey_delocal[\"is_compatible\"])\n ax_metadata = frey_delocal[\"metadata\"][0]\n self.assertTrue(ax_metadata[\"frey_variance_compatible\"])\n self.assertTrue(ax_metadata[\"frey_minmax_compatible\"])\n for ax in [1, 2]:\n ax_metadata = frey_delocal[\"metadata\"][ax]\n self.assertFalse(ax_metadata[\"frey_variance_compatible\"])\n self.assertTrue(ax_metadata[\"frey_minmax_compatible\"])\n\n self.assertFalse(dentry.parameters[\"is_compatible\"])\n\n def test_check_kumagai_delocalized(self):\n de = DefectEntry(self.kumagai_vac, 0.0, parameters=self.kumagai_params)\n de.parameters.update({\"is_compatible\": True}) # needs to be initialized with this here for unittest\n dc = DefectCompatibility(atomic_site_var_tol=13.3, atomic_site_minmax_tol=20.95)\n dentry = dc.perform_kumagai(de)\n\n # check case which fits under compatibility constraints\n dentry = dc.check_kumagai_delocalized(dentry)\n kumagai_delocal = dentry.parameters[\"delocalization_meta\"][\"atomic_site\"]\n self.assertTrue(kumagai_delocal[\"is_compatible\"])\n kumagai_md = kumagai_delocal[\"metadata\"]\n true_variance = 13.262304401193997\n true_minmax = 20.9435\n self.assertTrue(kumagai_md[\"kumagai_variance_compatible\"])\n self.assertAlmostEqual(kumagai_md[\"kumagai_variance\"], true_variance)\n self.assertTrue(kumagai_md[\"kumagai_minmax_compatible\"])\n self.assertAlmostEqual(kumagai_md[\"kumagai_minmax_window\"], true_minmax)\n\n self.assertTrue(dentry.parameters[\"is_compatible\"])\n\n # break variable compatibility\n dc = DefectCompatibility(atomic_site_var_tol=0.1, atomic_site_minmax_tol=20.95)\n de.parameters.update({\"is_compatible\": True})\n dentry = dc.perform_kumagai(de)\n dentry = dc.check_kumagai_delocalized(dentry)\n kumagai_delocal = dentry.parameters[\"delocalization_meta\"][\"atomic_site\"]\n self.assertFalse(kumagai_delocal[\"is_compatible\"])\n kumagai_md = kumagai_delocal[\"metadata\"]\n self.assertFalse(kumagai_md[\"kumagai_variance_compatible\"])\n self.assertAlmostEqual(kumagai_md[\"kumagai_variance\"], true_variance)\n self.assertTrue(kumagai_md[\"kumagai_minmax_compatible\"])\n self.assertAlmostEqual(kumagai_md[\"kumagai_minmax_window\"], true_minmax)\n\n self.assertFalse(dentry.parameters[\"is_compatible\"])\n\n # break maxmin compatibility\n dc = DefectCompatibility(atomic_site_var_tol=13.3, atomic_site_minmax_tol=0.5)\n de.parameters.update({\"is_compatible\": True})\n dentry = dc.perform_kumagai(de)\n dentry = dc.check_kumagai_delocalized(dentry)\n kumagai_delocal = dentry.parameters[\"delocalization_meta\"][\"atomic_site\"]\n self.assertFalse(kumagai_delocal[\"is_compatible\"])\n kumagai_md = kumagai_delocal[\"metadata\"]\n self.assertTrue(kumagai_md[\"kumagai_variance_compatible\"])\n self.assertAlmostEqual(kumagai_md[\"kumagai_variance\"], true_variance)\n self.assertFalse(kumagai_md[\"kumagai_minmax_compatible\"])\n self.assertAlmostEqual(kumagai_md[\"kumagai_minmax_window\"], true_minmax)\n\n self.assertFalse(dentry.parameters[\"is_compatible\"])\n\n def test_check_final_relaxed_structure_delocalized(self):\n # test structure delocalization analysis\n # first test no movement in atoms\n initial_defect_structure = self.vac.generate_defect_structure()\n final_defect_structure = initial_defect_structure.copy()\n sampling_radius = 4.55\n defect_frac_sc_coords = self.vac.site.frac_coords[:]\n\n params = {\n \"initial_defect_structure\": initial_defect_structure,\n \"final_defect_structure\": final_defect_structure,\n \"sampling_radius\": sampling_radius,\n \"defect_frac_sc_coords\": defect_frac_sc_coords,\n \"is_compatible\": True,\n }\n dentry = DefectEntry(self.vac, 0.0, corrections={}, parameters=params, entry_id=None)\n\n dc = DefectCompatibility(tot_relax_tol=0.1, perc_relax_tol=0.1, defect_tot_relax_tol=0.1)\n dentry = dc.check_final_relaxed_structure_delocalized(dentry)\n\n struc_delocal = dentry.parameters[\"delocalization_meta\"][\"structure_relax\"]\n self.assertTrue(dentry.parameters[\"is_compatible\"])\n self.assertTrue(struc_delocal[\"is_compatible\"])\n self.assertTrue(struc_delocal[\"metadata\"][\"structure_tot_relax_compatible\"])\n self.assertEqual(struc_delocal[\"metadata\"][\"tot_relax_outside_rad\"], 0.0)\n self.assertTrue(struc_delocal[\"metadata\"][\"structure_perc_relax_compatible\"])\n self.assertEqual(struc_delocal[\"metadata\"][\"perc_relax_outside_rad\"], 0.0)\n self.assertEqual(\n len(struc_delocal[\"metadata\"][\"full_structure_relax_data\"]),\n len(initial_defect_structure),\n )\n self.assertIsNone(struc_delocal[\"metadata\"][\"defect_index\"])\n\n defect_delocal = dentry.parameters[\"delocalization_meta\"][\"defectsite_relax\"]\n self.assertTrue(defect_delocal[\"is_compatible\"])\n self.assertIsNone(defect_delocal[\"metadata\"][\"relax_amount\"])\n\n # next test for when structure has delocalized outside of radius from defect\n pert_struct_fin_struct = initial_defect_structure.copy()\n pert_struct_fin_struct.perturb(0.1)\n dentry.parameters.update({\"final_defect_structure\": pert_struct_fin_struct})\n dentry = dc.check_final_relaxed_structure_delocalized(dentry)\n\n struc_delocal = dentry.parameters[\"delocalization_meta\"][\"structure_relax\"]\n self.assertFalse(dentry.parameters[\"is_compatible\"])\n self.assertFalse(struc_delocal[\"is_compatible\"])\n self.assertFalse(struc_delocal[\"metadata\"][\"structure_tot_relax_compatible\"])\n self.assertAlmostEqual(struc_delocal[\"metadata\"][\"tot_relax_outside_rad\"], 12.5)\n self.assertFalse(struc_delocal[\"metadata\"][\"structure_perc_relax_compatible\"])\n self.assertAlmostEqual(struc_delocal[\"metadata\"][\"perc_relax_outside_rad\"], 77.63975155)\n\n # now test for when an interstitial defect has migrated too much\n inter_def_site = PeriodicSite(\n \"H\",\n [7.58857304, 11.70848069, 12.97817518],\n self.vac.bulk_structure.lattice,\n to_unit_cell=True,\n coords_are_cartesian=True,\n )\n inter = Interstitial(self.vac.bulk_structure, inter_def_site, charge=0)\n\n initial_defect_structure = inter.generate_defect_structure()\n final_defect_structure = initial_defect_structure.copy()\n poss_deflist = sorted(\n final_defect_structure.get_sites_in_sphere(inter.site.coords, 2, include_index=True),\n key=lambda x: x[1],\n )\n def_index = poss_deflist[0][2]\n final_defect_structure.translate_sites(\n indices=[def_index], vector=[0.0, 0.0, 0.008]\n ) # fractional coords translation\n defect_frac_sc_coords = inter_def_site.frac_coords[:]\n\n params = {\n \"initial_defect_structure\": initial_defect_structure,\n \"final_defect_structure\": final_defect_structure,\n \"sampling_radius\": sampling_radius,\n \"defect_frac_sc_coords\": defect_frac_sc_coords,\n \"is_compatible\": True,\n }\n dentry = DefectEntry(inter, 0.0, corrections={}, parameters=params, entry_id=None)\n\n dentry = dc.check_final_relaxed_structure_delocalized(dentry)\n\n defect_delocal = dentry.parameters[\"delocalization_meta\"][\"defectsite_relax\"]\n self.assertFalse(defect_delocal[\"is_compatible\"])\n self.assertAlmostEqual(defect_delocal[\"metadata\"][\"relax_amount\"], 0.10836054)\n\n def test_bandfilling_SOC_calc(self):\n v = Vasprun(os.path.join(PymatgenTest.TEST_FILES_DIR, \"vasprun.xml.int_Te_SOC.gz\"))\n struc = v.structures[0]\n interstitial = Interstitial(struc, struc.sites[-1], charge=-2)\n eigenvalues = v.eigenvalues.copy()\n kptweights = v.actual_kpoints_weights\n potalign = -0.1\n defect_incar = v.incar\n\n bandfill_params = {\n \"eigenvalues\": eigenvalues,\n \"kpoint_weights\": kptweights,\n \"potalign\": potalign,\n \"vbm\": 1.6465, # bulk VBM\n \"cbm\": 3.1451, # bulk CBM\n \"run_metadata\": {\"defect_incar\": defect_incar},\n }\n\n soc_dentry = DefectEntry(\n interstitial,\n 0.0,\n corrections={},\n parameters=bandfill_params,\n entry_id=None,\n )\n dc = DefectCompatibility()\n soc_dentry = dc.process_entry(soc_dentry)\n\n self.assertAlmostEqual(soc_dentry.corrections[\"bandfilling_correction\"], -1.9628402187500003)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nimport os\n\nimport numpy as np\n\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.core.units import Ha_to_eV, bohr_to_ang\nfrom pymatgen.io.abinit.abiobjects import (\n Electrons,\n ElectronsAlgorithm,\n KSampling,\n PPModel,\n RelaxationMethod,\n Smearing,\n SpinMode,\n lattice_from_abivars,\n species_by_znucl,\n structure_to_abivars,\n)\nfrom pymatgen.util.testing import PymatgenTest\n\n\nclass LatticeFromAbivarsTest(PymatgenTest):\n def test_rprim_acell(self):\n l1 = lattice_from_abivars(acell=3 * [10], rprim=np.eye(3))\n self.assertAlmostEqual(l1.volume, bohr_to_ang**3 * 1000)\n assert l1.angles == (90, 90, 90)\n l2 = lattice_from_abivars(acell=3 * [10], angdeg=(90, 90, 90))\n assert l1 == l2\n\n l2 = lattice_from_abivars(acell=3 * [8], angdeg=(60, 60, 60))\n abi_rprimd = (\n np.reshape(\n [\n 4.6188022,\n 0.0000000,\n 6.5319726,\n -2.3094011,\n 4.0000000,\n 6.5319726,\n -2.3094011,\n -4.0000000,\n 6.5319726,\n ],\n (3, 3),\n )\n * bohr_to_ang\n )\n self.assertArrayAlmostEqual(l2.matrix, abi_rprimd)\n\n l3 = lattice_from_abivars(acell=[3, 6, 9], angdeg=(30, 40, 50))\n abi_rprimd = (\n np.reshape(\n [\n 3.0000000,\n 0.0000000,\n 0.0000000,\n 3.8567257,\n 4.5962667,\n 0.0000000,\n 6.8944000,\n 4.3895544,\n 3.7681642,\n ],\n (3, 3),\n )\n * bohr_to_ang\n )\n self.assertArrayAlmostEqual(l3.matrix, abi_rprimd)\n\n with self.assertRaises(ValueError):\n lattice_from_abivars(acell=[1, 1, 1], angdeg=(90, 90, 90), rprim=np.eye(3))\n with self.assertRaises(ValueError):\n lattice_from_abivars(acell=[1, 1, 1], angdeg=(-90, 90, 90))\n\n def test_znucl_typat(self):\n \"\"\"Test the order of typat and znucl in the Abinit input and enforce_typat, enforce_znucl.\"\"\"\n\n # Ga Ga1 1 0.33333333333333 0.666666666666667 0.500880 1.0\n # Ga Ga2 1 0.66666666666667 0.333333333333333 0.000880 1.0\n # N N3 1 0.333333333333333 0.666666666666667 0.124120 1.0\n # N N4 1 0.666666666666667 0.333333333333333 0.624120 1.0\n gan = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, \"abinit\", \"gan.cif\"))\n\n # By default, znucl is filled using the first new type found in sites.\n def_vars = structure_to_abivars(gan)\n def_znucl = def_vars[\"znucl\"]\n self.assertArrayEqual(def_znucl, [31, 7])\n def_typat = def_vars[\"typat\"]\n self.assertArrayEqual(def_typat, [1, 1, 2, 2])\n\n # But it's possible to enforce a particular value of typat and znucl.\n enforce_znucl = [7, 31]\n enforce_typat = [2, 2, 1, 1]\n enf_vars = structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=enforce_typat)\n self.assertArrayEqual(enf_vars[\"znucl\"], enforce_znucl)\n self.assertArrayEqual(enf_vars[\"typat\"], enforce_typat)\n self.assertArrayEqual(def_vars[\"xred\"], enf_vars[\"xred\"])\n\n assert [s.symbol for s in species_by_znucl(gan)] == [\"Ga\", \"N\"]\n\n for itype1, itype2 in zip(def_typat, enforce_typat):\n assert def_znucl[itype1 - 1] == enforce_znucl[itype2 - 1]\n\n with self.assertRaises(Exception):\n structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=None)\n\n\nclass SpinModeTest(PymatgenTest):\n def test_base(self):\n polarized = SpinMode.as_spinmode(\"polarized\")\n other_polarized = SpinMode.as_spinmode(\"polarized\")\n unpolarized = SpinMode.as_spinmode(\"unpolarized\")\n\n polarized.to_abivars()\n\n self.assertTrue(polarized is other_polarized)\n self.assertTrue(polarized == other_polarized)\n self.assertTrue(polarized != unpolarized)\n\n # Test pickle\n self.serialize_with_pickle(polarized)\n\n # Test dict methods\n self.assertMSONable(polarized)\n self.assertMSONable(unpolarized)\n\n\nclass SmearingTest(PymatgenTest):\n def test_base(self):\n fd1ev = Smearing.as_smearing(\"fermi_dirac:1 eV\")\n fd1ev.to_abivars()\n\n self.assertTrue(fd1ev)\n\n same_fd = Smearing.as_smearing(\"fermi_dirac:\" + str(1.0 / Ha_to_eV))\n\n self.assertTrue(same_fd == fd1ev)\n\n nosmear = Smearing.nosmearing()\n assert nosmear == Smearing.as_smearing(\"nosmearing\")\n\n self.assertFalse(nosmear)\n self.assertTrue(nosmear != fd1ev)\n self.assertMSONable(nosmear)\n\n new_fd1ev = Smearing.from_dict(fd1ev.as_dict())\n self.assertTrue(new_fd1ev == fd1ev)\n\n # Test pickle\n self.serialize_with_pickle(fd1ev)\n\n # Test dict methods\n self.assertMSONable(fd1ev)\n\n\nclass ElectronsAlgorithmTest(PymatgenTest):\n def test_base(self):\n algo = ElectronsAlgorithm(nstep=70)\n _ = algo.to_abivars()\n\n # Test pickle\n self.serialize_with_pickle(algo)\n\n # Test dict methods\n self.assertMSONable(algo)\n\n\nclass ElectronsTest(PymatgenTest):\n def test_base(self):\n default_electrons = Electrons()\n self.assertTrue(default_electrons.nsppol == 2)\n self.assertTrue(default_electrons.nspinor == 1)\n self.assertTrue(default_electrons.nspden == 2)\n\n _ = default_electrons.to_abivars()\n\n # new = Electron.from_dict(default_electrons.as_dict())\n\n # Test pickle\n self.serialize_with_pickle(default_electrons, test_eq=False)\n\n custom_electrons = Electrons(\n spin_mode=\"unpolarized\",\n smearing=\"marzari4:0.2 eV\",\n algorithm=ElectronsAlgorithm(nstep=70),\n nband=10,\n charge=1.0,\n comment=\"Test comment\",\n )\n\n # Test dict methods\n self.assertMSONable(custom_electrons)\n\n\nclass KSamplingTest(PymatgenTest):\n def test_base(self):\n monkhorst = KSampling.monkhorst((3, 3, 3), (0.5, 0.5, 0.5), 0, False, False)\n gamma_centered = KSampling.gamma_centered((3, 3, 3), False, False)\n\n monkhorst.to_abivars()\n\n # Test dict methods\n self.assertMSONable(monkhorst)\n self.assertMSONable(gamma_centered)\n\n\nclass RelaxationTest(PymatgenTest):\n def test_base(self):\n atoms_and_cell = RelaxationMethod.atoms_and_cell()\n atoms_only = RelaxationMethod.atoms_only()\n\n atoms_and_cell.to_abivars()\n\n # Test dict methods\n self.assertMSONable(atoms_and_cell)\n self.assertMSONable(atoms_only)\n\n\nclass PPModelTest(PymatgenTest):\n def test_base(self):\n godby = PPModel.as_ppmodel(\"godby:12 eV\")\n # print(godby)\n # print(repr(godby))\n godby.to_abivars()\n self.assertTrue(godby)\n\n same_godby = PPModel.as_ppmodel(\"godby:\" + str(12.0 / Ha_to_eV))\n self.assertTrue(same_godby == godby)\n\n noppm = PPModel.get_noppmodel()\n\n self.assertFalse(noppm)\n self.assertTrue(noppm != godby)\n new_godby = PPModel.from_dict(godby.as_dict())\n self.assertTrue(new_godby == godby)\n\n # Test pickle\n self.serialize_with_pickle(godby)\n\n # Test dict methods\n self.assertMSONable(godby)\n"
] | [
[
"numpy.diag",
"numpy.linalg.solve",
"numpy.allclose",
"numpy.round",
"numpy.linalg.pinv",
"numpy.cross",
"numpy.array",
"scipy.linalg.polar"
],
[
"numpy.reshape",
"numpy.array",
"numpy.isclose"
],
[
"numpy.arange",
"numpy.identity",
"numpy.cos"
],
[
"numpy.reshape",
"numpy.eye"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
venom12138/active_tracking_rl | [
"813342c322f8f710fc0f9ccf2a5d0746f955144f"
] | [
"envs/gym-track2d/gym_track2d/envs/track_1v1.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nfrom gym_track2d.envs.generators import RandomMazeGenerator, RandomBlockMazeGenerator\nfrom gym_track2d.envs.navigator import Navigator, RamAgent\n\n\nclass Track1v1Env(gym.Env):\n metadata = {'render.modes': ['human', 'rgb_array']}\n\n def __init__(self,\n map_type='Block',\n pob_size=6,\n action_type='VonNeumann',\n obs_type='Partial',\n target_mode='PZR',\n live_display=True,\n render_trace=True,\n level=0):\n \"\"\"Initialize the maze. DType: list\"\"\"\n # Random seed with internal gym seeding\n self.seed()\n self.num_agents_max = self.num_agents = 2\n self.map_type = map_type\n self.level = level\n # Size of the partial observable window\n self.pob_size = pob_size\n self.render_trace = render_trace\n self.traces = []\n self.traces_relative = []\n self.action_type = action_type\n self.obs_type = obs_type\n self.target_mode = target_mode\n\n # If True, show the updated display each time render is called rather\n # than storing the frames and creating an animation at the end\n self.live_display = live_display\n\n self.state = None\n\n # Maze: 0: free space, 1: wall\n self.init_maze(self.map_type)\n\n # Action space\n tracker_action_space = self.define_action(self.action_type)\n target_action_space = self.define_action(self.action_type)\n self.action_space = [tracker_action_space, target_action_space]\n\n # Observation space\n tracker_obs_space = self.define_observation(self.obs_type)\n target_obs_space = self.define_observation(self.obs_type)\n self.observation_space = [tracker_obs_space, target_obs_space]\n\n # nav\n self.Target = []\n for i in range(self.num_agents-1):\n if 'Nav' in self.target_mode or 'RPF' in self.target_mode:\n self.Target.append(Navigator(self.action_space[i+1], self.maze_generator))\n if 'Ram' in self.target_mode:\n self.Target.append(RamAgent(self.action_space[i+1]))\n\n # Colormap: order of color is, free space, wall, agent, food, poison\n self.cmap = colors.ListedColormap(['white', 'black', 'blue', 'green', 'red', 'gray', 'yellow'])\n self.bounds = [0, 1, 2, 3, 4, 5, 6] # values for each color\n self.norm = colors.BoundaryNorm(self.bounds, self.cmap.N)\n self.C_step = 0\n\n def step(self, action):\n # Player 0: try to catch player 1\n # Player 1: try to reach the goal and avoid player 0\n old_state = self.state.copy()\n # Update current state\n rewards = np.zeros(self.num_agents)\n done = False\n action = list(action)\n # move agents\n for i in range(self.num_agents - 1):\n if 'Ram' in self.target_mode:\n action[i+1] = self.Target[i].step()\n if 'Nav' in self.target_mode or 'RPF' in self.target_mode:\n action[i+1], goal = self.Target[i].step(old_state[i + 1], self.maze_generator, None)\n\n for i in range(self.num_agents):\n self.state[i], self.C_collision[i] = self._next_state(self.state[i], int(action[i]),\n self.action_type)\n\n self.traces_relative = []\n for j in range(self.num_agents):\n self.traces_relative.append([np.array(self.init_states[i]) - np.array(self.init_states[j]) for i in\n range(self.num_agents)])\n d_all = np.array([np.linalg.norm(np.array(self.state[i]) - np.array(self.state[0])) for i in range(self.num_agents)])\n\n max_distance = float(self.pob_size)\n distance = d_all[1]\n\n r_track = 1 - 2*distance/max_distance\n r_track = max(r_track, -1) # [-1, 1]\n r_target = -r_track - self.w_p * max(distance - max_distance, 0)/max_distance\n r_target = max(r_target, -1)\n rewards[0] = r_track\n rewards[1] = r_target\n\n if distance <= max_distance:\n self.C_far = 0\n else:\n self.C_far += 1\n if self.C_far > 10:\n done = True\n\n self.C_reward += rewards\n self.C_step += 1\n\n # Additional info\n info = {}\n self.distance = info['distance'] = d_all[1]\n # Footprint: Record agent trajectory\n self.traces.append(self.state[1])\n obs = self._get_obs()\n info['traces'] = self.traces\n info['traces_relative'] = self.traces_relative\n if 'Nav' in self.target_mode or 'Ram' in self.target_mode:\n # 相当于只取前两个,也就是两个的obs\n obs = obs[:2]\n rewards = rewards[:2]\n # print(\"num_agents:{}\".format(self.num_agents))\n # print(\"obs:{}\".format(np.array(obs).shape))\n # print('rewards:{}'.format(rewards))\n # print(obs.shape)\n return obs, rewards, done, info\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n\n return [seed]\n\n def reset(self):\n # Reset maze\n self.init_maze(self.map_type)\n self.state = self.init_states\n # set target\n if 'Nav' in self.target_mode or 'RPF' in self.target_mode:\n for i in range(self.num_agents-1):\n self.Target[i].reset(self.init_states[i+1], self.goal_states[i+1], self.maze_generator)\n if 'Ram' in self.target_mode:\n for i in range(self.num_agents-1):\n self.Target[i].reset()\n\n # set target reward\n if self.target_mode == 'PZR':\n self.w_p = 1\n elif self.target_mode == 'Far':\n self.w_p = -0.5\n else:\n self.w_p = 0\n\n self.distance = np.sum(np.abs(np.array(self.state[0]) - np.array(self.state[1])))\n self.C_reward = np.zeros(self.num_agents)\n self.C_step = 0\n self.C_collision = np.zeros(self.num_agents)\n self.C_far = 0\n\n # Clean the list of ax_imgs, the buffer for generating videos\n self.ax_imgs = []\n # Clean the traces of the trajectory\n self.traces = [self.init_states[0]]\n self.traces_relative = [np.array(self.init_states[i]) - np.array(self.init_states[0]) for i in range(self.num_agents)]\n obs = self._get_obs()\n if 'Nav' in self.target_mode or 'Ram' in self.target_mode or 'RPF' in self.target_mode:\n obs = obs[:2]\n return obs\n\n def render(self, mode='human', close=False):\n import time\n time.sleep(0.03)\n if close:\n plt.close()\n return\n\n obs = self._get_full_obs()\n partial_obs = self._get_partial_obs(0, self.pob_size)\n\n # For rendering traces: Only for visualization, does not affect the observation data\n if self.render_trace:\n obs[list(zip(*self.traces[:-1]))] = 6\n\n # Create Figure for rendering\n if not hasattr(self, 'fig'): # initialize figure and plotting axes\n self.fig, (self.ax_full, self.ax_partial) = plt.subplots(nrows=1, ncols=2)\n self.ax_full.axis('off')\n self.ax_partial.axis('off')\n\n self.fig.show()\n if self.live_display:\n # Only create the image the first time\n if not hasattr(self, 'ax_full_img'):\n self.ax_full_img = self.ax_full.imshow(obs, cmap=self.cmap, norm=self.norm, animated=True)\n if not hasattr(self, 'ax_partial_img'):\n self.ax_partial_img = self.ax_partial.imshow(partial_obs, cmap=self.cmap, norm=self.norm, animated=True)\n # Update the image data for efficient live video\n self.ax_full_img.set_data(obs)\n self.ax_partial_img.set_data(partial_obs)\n else:\n # Create a new image each time to allow an animation to be created\n self.ax_full_img = self.ax_full.imshow(obs, cmap=self.cmap, norm=self.norm, animated=True)\n self.ax_partial_img = self.ax_partial.imshow(partial_obs, cmap=self.cmap, norm=self.norm, animated=True)\n\n plt.draw()\n\n if self.live_display:\n # Update the figure display immediately\n self.fig.canvas.draw()\n else:\n # Put in AxesImage buffer for video generation\n self.ax_imgs.append([self.ax_full_img, self.ax_partial_img]) # List of axes to update figure frame\n\n self.fig.set_dpi(200)\n\n return self.fig\n\n def init_maze(self, map_type):\n if map_type == 'Maze':\n if self.level > 0:\n r = self.level * 0.02\n else:\n r = .03*np.random.random()\n self.maze_generator = RandomMazeGenerator(width=80, height=80, complexity=r, density=r)\n elif map_type == 'Block':\n if self.level > 0:\n r = self.level * 0.05\n else:\n r = 0.15*np.random.random()\n self.maze_generator = RandomBlockMazeGenerator(maze_size=80, obstacle_ratio=r)\n elif map_type == 'Empty':\n self.maze_generator = RandomBlockMazeGenerator(maze_size=80, obstacle_ratio=0)\n self.maze = np.array(self.maze_generator.get_maze())\n self.maze_size = self.maze.shape\n if 'RPF' in self.target_mode:\n self.maze_generator.static_goals()\n self.goal_states = self.maze_generator.sample_goal(self.num_agents)\n self.init_states = self.maze_generator.sample_close_states(self.num_agents, 1)\n while self.goal_test(self.init_states[0]): # Goal check\n self.goal_states = self.maze_generator.sample_goal(self.num_agents)\n\n def define_action(self, action_type):\n if action_type == 'VonNeumann': # Von Neumann neighborhood\n num_actions = 4\n elif action_type == 'Moore': # Moore neighborhood\n num_actions = 8\n else:\n raise TypeError('Action type must be either \\'VonNeumann\\' or \\'Moore\\'')\n return spaces.Discrete(num_actions)\n\n def define_observation(self, obs_type):\n low_obs = 0 # Lowest integer in observation\n high_obs = 6 # Highest integer in observation\n if obs_type == 'Full':\n obs_space = spaces.Box(low=low_obs, high=high_obs,\n shape=(1, self.maze_size[0], self.maze_size[1]), dtype=np.float32)\n elif self.obs_type == 'Partial':\n obs_space = spaces.Box(low=low_obs, high=high_obs,\n shape=(1, self.pob_size*2+1, self.pob_size*2+1), dtype=np.float32)\n else:\n raise TypeError('Observation type must be either \\'full\\' or \\'partial\\'')\n return obs_space\n\n def goal_test(self, state):\n \"\"\"Return True if current state is a goal state.\"\"\"\n if type(self.goal_states[0]) == list:\n return list(state) in self.goal_states\n elif type(self.goal_states[0]) == tuple:\n return tuple(state) in self.goal_states\n\n def _next_state(self, state, action, action_type='VonNeumann'):\n \"\"\"Return the next state from a given state by taking a given action.\"\"\"\n\n # Transition table to define movement for each action\n if action_type == 'VonNeumann':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1]}\n elif action_type == 'Moore':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1],\n 4: [-1, +1], 5: [+1, +1], 6: [-1, -1], 7: [+1, -1]}\n\n new_state = [state[0] + transitions[action][0], state[1] + transitions[action][1]]\n if self.maze[new_state[0]][new_state[1]] == 1: # Hit wall, stay there\n return state, True\n else: # Valid move for 0, 2, 3, 4\n return new_state, False\n\n def _get_obs(self):\n if self.obs_type == 'Full':\n obs = [np.expand_dims(self._get_full_obs(), 0) for i in range(self.num_agents)]\n return np.array(obs)\n elif self.obs_type == 'Partial':\n obs = [np.expand_dims(self._get_partial_obs(i, self.pob_size), 0) for i in range(self.num_agents)]\n return np.array(obs)\n\n def _get_full_obs(self):\n \"\"\"Return a 2D array representation of maze.\"\"\"\n obs = np.array(self.maze)\n\n # Set current position\n for i in range(self.num_agents):\n if i < 2:\n color = 2+2*i\n else:\n color = 2 + np.random.randint(0, 3)\n obs[self.state[i][0]][self.state[i][1]] = color\n\n return obs\n\n def _get_partial_obs(self, id=0, size=1, vec=False):\n \"\"\"Get partial observable window according to Moore neighborhood\"\"\"\n # Get maze with indicated location of current position and goal positions\n maze = self._get_full_obs()\n maze[self.state[id][0]][self.state[id][1]] = 2+2*id\n pos = np.array(self.state[id])\n\n under_offset = np.min(pos - size)\n over_offset = np.min(len(maze) - (pos + size + 1))\n offset = np.min([under_offset, over_offset])\n\n if offset < 0: # Need padding\n maze = np.pad(maze, np.abs(offset), 'constant', constant_values=1)\n pos += np.abs(offset)\n maze_p = maze[pos[0] - size: pos[0] + size + 1, pos[1] - size: pos[1] + size + 1]\n if vec:\n maze_p = maze_p.reshape(self.v_len)\n return maze_p"
] | [
[
"matplotlib.colors.BoundaryNorm",
"numpy.random.random",
"numpy.abs",
"numpy.min",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.draw",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
djroxx2000/transformers | [
"77770ec79883343d32051cfb6a04f64523cd8df1",
"76cadb7943c8492ec481f4f3925e9e8793a32c9d"
] | [
"src/transformers/models/roberta/modeling_roberta.py",
"src/transformers/models/deberta/modeling_deberta.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch RoBERTa model. \"\"\"\n\nimport math\n\nimport torch\nimport torch.utils.checkpoint\nfrom packaging import version\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN, gelu\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_roberta import RobertaConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"roberta-base\"\n_CONFIG_FOR_DOC = \"RobertaConfig\"\n_TOKENIZER_FOR_DOC = \"RobertaTokenizer\"\n\nROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"roberta-base\",\n \"roberta-large\",\n \"roberta-large-mnli\",\n \"distilroberta-base\",\n \"roberta-base-openai-detector\",\n \"roberta-large-openai-detector\",\n # See all RoBERTa models at https://huggingface.co/models?filter=roberta\n]\n\n\nclass RobertaEmbeddings(nn.Module):\n \"\"\"\n Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.\n \"\"\"\n\n # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n if version.parse(torch.__version__) > version.parse(\"1.6.0\"):\n self.register_buffer(\n \"token_type_ids\",\n torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),\n persistent=False,\n )\n\n # End copy\n self.padding_idx = config.pad_token_id\n self.position_embeddings = nn.Embedding(\n config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx\n )\n\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if position_ids is None:\n if input_ids is not None:\n # Create the position ids from the input token ids. Any padded tokens remain padded.\n position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)\n else:\n position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)\n\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs\n # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves\n # issue #5664\n if token_type_ids is None:\n if hasattr(self, \"token_type_ids\"):\n buffered_token_type_ids = self.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n \"\"\"\n We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\n Args:\n inputs_embeds: torch.Tensor\n\n Returns: torch.Tensor\n \"\"\"\n input_shape = inputs_embeds.size()[:-1]\n sequence_length = input_shape[1]\n\n position_ids = torch.arange(\n self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device\n )\n return position_ids.unsqueeze(0).expand(input_shape)\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta\nclass RobertaSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n seq_length = hidden_states.size()[1]\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput\nclass RobertaSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta\nclass RobertaAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = RobertaSelfAttention(config)\n self.output = RobertaSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate\nclass RobertaIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput\nclass RobertaOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta\nclass RobertaLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = RobertaAttention(config)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n self.crossattention = RobertaAttention(config)\n self.intermediate = RobertaIntermediate(config)\n self.output = RobertaOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n past_key_value=self_attn_past_key_value,\n )\n attention_output = self_attention_outputs[0]\n\n # if decoder, the last output is tuple of self-attn cache\n if self.is_decoder:\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n else:\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n cross_attn_present_key_value = None\n if self.is_decoder and encoder_hidden_states is not None:\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n\n # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n cross_attn_past_key_value,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n\n # add cross-attn cache to positions 3,4 of present_key_value tuple\n cross_attn_present_key_value = cross_attention_outputs[-1]\n present_key_value = present_key_value + cross_attn_present_key_value\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n\n # if decoder, return the attn key/values as the last output\n if self.is_decoder:\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta\nclass RobertaEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n if self.gradient_checkpointing and self.training:\n\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, past_key_value, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_decoder_cache,\n all_hidden_states,\n all_self_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler\nclass RobertaPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass RobertaPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = RobertaConfig\n base_model_prefix = \"roberta\"\n supports_gradient_checkpointing = True\n\n # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, RobertaEncoder):\n module.gradient_checkpointing = value\n\n def update_keys_to_ignore(self, config, del_keys_to_ignore):\n \"\"\"Remove some keys from ignore list\"\"\"\n if not config.tie_word_embeddings:\n # must make a new list, or the class variable gets modified!\n self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]\n self._keys_to_ignore_on_load_missing = [\n k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore\n ]\n\n\nROBERTA_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nROBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaModel(RobertaPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz\n Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n\n .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762\n\n \"\"\"\n\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = RobertaEmbeddings(config)\n self.encoder = RobertaEncoder(config)\n\n self.pooler = RobertaPooler(config) if add_pooling_layer else None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n # Copied from transformers.models.bert.modeling_bert.BertModel.forward\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n batch_size, seq_length = input_shape\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n if token_type_ids is None:\n if hasattr(self.embeddings, \"token_type_ids\"):\n buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. \"\"\", ROBERTA_START_DOCSTRING\n)\nclass RobertaForCausalLM(RobertaPreTrainedModel):\n _keys_to_ignore_on_save = [r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`\")\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n # The LM head weights require special treatment only when they are tied with the word embeddings\n self.update_keys_to_ignore(config, [\"lm_head.decoder.weight\"])\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n Returns:\n\n Example::\n\n >>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig\n >>> import torch\n\n >>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n >>> config = RobertaConfig.from_pretrained(\"roberta-base\")\n >>> config.is_decoder = True\n >>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"past_key_values\": past}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past\n\n\n@add_start_docstrings(\"\"\"RoBERTa Model with a `language modeling` head on top. \"\"\", ROBERTA_START_DOCSTRING)\nclass RobertaForMaskedLM(RobertaPreTrainedModel):\n _keys_to_ignore_on_save = [r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n # The LM head weights require special treatment only when they are tied with the word embeddings\n self.update_keys_to_ignore(config, [\"lm_head.decoder.weight\"])\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n mask=\"<mask>\",\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaLMHead(nn.Module):\n \"\"\"Roberta Head for masked language modeling.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n self.decoder.bias = self.bias\n\n def forward(self, features, **kwargs):\n x = self.dense(features)\n x = gelu(x)\n x = self.layer_norm(x)\n\n # project back to size of vocabulary with bias\n x = self.decoder(x)\n\n return x\n\n def _tie_weights(self):\n # To tie those two weights if they get disconnected (on TPU or when the bias is resized)\n self.bias = self.decoder.bias\n\n\n@add_start_docstrings(\n \"\"\"\n RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForSequenceClassification(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.classifier = RobertaClassificationHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForMultipleChoice(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.roberta = RobertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n token_type_ids=None,\n attention_mask=None,\n labels=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n flat_inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.roberta(\n flat_input_ids,\n position_ids=flat_position_ids,\n token_type_ids=flat_token_type_ids,\n attention_mask=flat_attention_mask,\n head_mask=head_mask,\n inputs_embeds=flat_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForTokenClassification(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForQuestionAnswering(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\ndef create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n \"\"\"\n Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\n are ignored. This is modified from fairseq's `utils.make_positions`.\n\n Args:\n x: torch.Tensor x:\n\n Returns: torch.Tensor\n \"\"\"\n # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.\n mask = input_ids.ne(padding_idx).int()\n incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask\n return incremental_indices.long() + padding_idx\n",
"# coding=utf-8\n# Copyright 2020 Microsoft and the Hugging Face Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch DeBERTa model. \"\"\"\n\nimport math\nfrom collections.abc import Sequence\n\nimport torch\nfrom torch import _softmax_backward_data, nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward\nfrom ...modeling_outputs import (\n BaseModelOutput,\n MaskedLMOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom .configuration_deberta import DebertaConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"DebertaConfig\"\n_TOKENIZER_FOR_DOC = \"DebertaTokenizer\"\n_CHECKPOINT_FOR_DOC = \"microsoft/deberta-base\"\n\nDEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/deberta-base\",\n \"microsoft/deberta-large\",\n \"microsoft/deberta-xlarge\",\n \"microsoft/deberta-base-mnli\",\n \"microsoft/deberta-large-mnli\",\n \"microsoft/deberta-xlarge-mnli\",\n]\n\n\nclass ContextPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)\n self.dropout = StableDropout(config.pooler_dropout)\n self.config = config\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n\n context_token = hidden_states[:, 0]\n context_token = self.dropout(context_token)\n pooled_output = self.dense(context_token)\n pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)\n return pooled_output\n\n @property\n def output_dim(self):\n return self.config.hidden_size\n\n\nclass XSoftmax(torch.autograd.Function):\n \"\"\"\n Masked Softmax which is optimized for saving memory\n\n Args:\n input (:obj:`torch.tensor`): The input tensor that will apply softmax.\n mask (:obj:`torch.IntTensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.\n dim (int): The dimension that will apply softmax\n\n Example::\n\n >>> import torch\n >>> from transformers.models.deberta.modeling_deberta import XSoftmax\n\n >>> # Make a tensor\n >>> x = torch.randn([4,20,100])\n\n >>> # Create a mask\n >>> mask = (x>0).int()\n\n >>> y = XSoftmax.apply(x, mask, dim=-1)\n \"\"\"\n\n @staticmethod\n def forward(self, input, mask, dim):\n self.dim = dim\n rmask = ~(mask.bool())\n\n output = input.masked_fill(rmask, float(\"-inf\"))\n output = torch.softmax(output, self.dim)\n output.masked_fill_(rmask, 0)\n self.save_for_backward(output)\n return output\n\n @staticmethod\n def backward(self, grad_output):\n (output,) = self.saved_tensors\n inputGrad = _softmax_backward_data(grad_output, output, self.dim, output)\n return inputGrad, None, None\n\n\nclass DropoutContext(object):\n def __init__(self):\n self.dropout = 0\n self.mask = None\n self.scale = 1\n self.reuse_mask = True\n\n\ndef get_mask(input, local_context):\n if not isinstance(local_context, DropoutContext):\n dropout = local_context\n mask = None\n else:\n dropout = local_context.dropout\n dropout *= local_context.scale\n mask = local_context.mask if local_context.reuse_mask else None\n\n if dropout > 0 and mask is None:\n mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool()\n\n if isinstance(local_context, DropoutContext):\n if local_context.mask is None:\n local_context.mask = mask\n\n return mask, dropout\n\n\nclass XDropout(torch.autograd.Function):\n \"\"\"Optimized dropout function to save computation and memory by using mask operation instead of multiplication.\"\"\"\n\n @staticmethod\n def forward(ctx, input, local_ctx):\n mask, dropout = get_mask(input, local_ctx)\n ctx.scale = 1.0 / (1 - dropout)\n if dropout > 0:\n ctx.save_for_backward(mask)\n return input.masked_fill(mask, 0) * ctx.scale\n else:\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.scale > 1:\n (mask,) = ctx.saved_tensors\n return grad_output.masked_fill(mask, 0) * ctx.scale, None\n else:\n return grad_output, None\n\n\nclass StableDropout(nn.Module):\n \"\"\"\n Optimized dropout module for stabilizing the training\n\n Args:\n drop_prob (float): the dropout probabilities\n \"\"\"\n\n def __init__(self, drop_prob):\n super().__init__()\n self.drop_prob = drop_prob\n self.count = 0\n self.context_stack = None\n\n def forward(self, x):\n \"\"\"\n Call the module\n\n Args:\n x (:obj:`torch.tensor`): The input tensor to apply dropout\n \"\"\"\n if self.training and self.drop_prob > 0:\n return XDropout.apply(x, self.get_context())\n return x\n\n def clear_context(self):\n self.count = 0\n self.context_stack = None\n\n def init_context(self, reuse_mask=True, scale=1):\n if self.context_stack is None:\n self.context_stack = []\n self.count = 0\n for c in self.context_stack:\n c.reuse_mask = reuse_mask\n c.scale = scale\n\n def get_context(self):\n if self.context_stack is not None:\n if self.count >= len(self.context_stack):\n self.context_stack.append(DropoutContext())\n ctx = self.context_stack[self.count]\n ctx.dropout = self.drop_prob\n self.count += 1\n return ctx\n else:\n return self.drop_prob\n\n\nclass DebertaLayerNorm(nn.Module):\n \"\"\"LayerNorm module in the TF style (epsilon inside the square root).\"\"\"\n\n def __init__(self, size, eps=1e-12):\n super().__init__()\n self.weight = nn.Parameter(torch.ones(size))\n self.bias = nn.Parameter(torch.zeros(size))\n self.variance_epsilon = eps\n\n def forward(self, hidden_states):\n input_type = hidden_states.dtype\n hidden_states = hidden_states.float()\n mean = hidden_states.mean(-1, keepdim=True)\n variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)\n hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon)\n hidden_states = hidden_states.to(input_type)\n y = self.weight * hidden_states + self.bias\n return y\n\n\nclass DebertaSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass DebertaAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = DisentangledSelfAttention(config)\n self.output = DebertaSelfOutput(config)\n self.config = config\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n self_output = self.self(\n hidden_states,\n attention_mask,\n return_att,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if return_att:\n self_output, att_matrix = self_output\n if query_states is None:\n query_states = hidden_states\n attention_output = self.output(self_output, query_states)\n\n if return_att:\n return (attention_output, att_matrix)\n else:\n return attention_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta\nclass DebertaIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass DebertaOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass DebertaLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = DebertaAttention(config)\n self.intermediate = DebertaIntermediate(config)\n self.output = DebertaOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n attention_output = self.attention(\n hidden_states,\n attention_mask,\n return_att=return_att,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if return_att:\n attention_output, att_matrix = attention_output\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n if return_att:\n return (layer_output, att_matrix)\n else:\n return layer_output\n\n\nclass DebertaEncoder(nn.Module):\n \"\"\"Modified BertEncoder with relative position bias support\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)])\n self.relative_attention = getattr(config, \"relative_attention\", False)\n if self.relative_attention:\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size)\n\n def get_rel_embedding(self):\n rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None\n return rel_embeddings\n\n def get_attention_mask(self, attention_mask):\n if attention_mask.dim() <= 2:\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)\n attention_mask = attention_mask.byte()\n elif attention_mask.dim() == 3:\n attention_mask = attention_mask.unsqueeze(1)\n\n return attention_mask\n\n def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):\n if self.relative_attention and relative_pos is None:\n q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)\n relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device)\n return relative_pos\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n output_hidden_states=True,\n output_attentions=False,\n query_states=None,\n relative_pos=None,\n return_dict=True,\n ):\n attention_mask = self.get_attention_mask(attention_mask)\n relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)\n\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[0]\n else:\n next_kv = hidden_states\n rel_embeddings = self.get_rel_embedding()\n for i, layer_module in enumerate(self.layer):\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n hidden_states = layer_module(\n next_kv,\n attention_mask,\n output_attentions,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if output_attentions:\n hidden_states, att_m = hidden_states\n\n if query_states is not None:\n query_states = hidden_states\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None\n else:\n next_kv = hidden_states\n\n if output_attentions:\n all_attentions = all_attentions + (att_m,)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\ndef build_relative_position(query_size, key_size, device):\n \"\"\"\n Build relative position according to the query and key\n\n We assume the absolute position of query :math:`P_q` is range from (0, query_size) and the absolute position of key\n :math:`P_k` is range from (0, key_size), The relative positions from query to key is :math:`R_{q \\\\rightarrow k} =\n P_q - P_k`\n\n Args:\n query_size (int): the length of query\n key_size (int): the length of key\n\n Return:\n :obj:`torch.LongTensor`: A tensor with shape [1, query_size, key_size]\n\n \"\"\"\n\n q_ids = torch.arange(query_size, dtype=torch.long, device=device)\n k_ids = torch.arange(key_size, dtype=torch.long, device=device)\n rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1)\n rel_pos_ids = rel_pos_ids[:query_size, :]\n rel_pos_ids = rel_pos_ids.unsqueeze(0)\n return rel_pos_ids\n\n\[email protected]\ndef c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):\n return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])\n\n\[email protected]\ndef p2c_dynamic_expand(c2p_pos, query_layer, key_layer):\n return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])\n\n\[email protected]\ndef pos_dynamic_expand(pos_index, p2c_att, key_layer):\n return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))\n\n\nclass DisentangledSelfAttention(nn.Module):\n \"\"\"\n Disentangled self-attention module\n\n Parameters:\n config (:obj:`str`):\n A model config class instance with the configuration to build a new model. The schema is similar to\n `BertConfig`, for more details, please refer :class:`~transformers.DebertaConfig`\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False)\n self.q_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))\n self.v_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))\n self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []\n\n self.relative_attention = getattr(config, \"relative_attention\", False)\n self.talking_head = getattr(config, \"talking_head\", False)\n\n if self.talking_head:\n self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)\n self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)\n\n if self.relative_attention:\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n self.pos_dropout = StableDropout(config.hidden_dropout_prob)\n\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False)\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = StableDropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n \"\"\"\n Call the module\n\n Args:\n hidden_states (:obj:`torch.FloatTensor`):\n Input states to the module usually the output from previous layer, it will be the Q,K and V in\n `Attention(Q,K,V)`\n\n attention_mask (:obj:`torch.ByteTensor`):\n An attention mask matrix of shape [`B`, `N`, `N`] where `B` is the batch size, `N` is the maximum\n sequence length in which element [i,j] = `1` means the `i` th token in the input can attend to the `j`\n th token.\n\n return_att (:obj:`bool`, optional):\n Whether return the attention matrix.\n\n query_states (:obj:`torch.FloatTensor`, optional):\n The `Q` state in `Attention(Q,K,V)`.\n\n relative_pos (:obj:`torch.LongTensor`):\n The relative position encoding between the tokens in the sequence. It's of shape [`B`, `N`, `N`] with\n values ranging in [`-max_relative_positions`, `max_relative_positions`].\n\n rel_embeddings (:obj:`torch.FloatTensor`):\n The embedding of relative distances. It's a tensor of shape [:math:`2 \\\\times\n \\\\text{max_relative_positions}`, `hidden_size`].\n\n\n \"\"\"\n if query_states is None:\n qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)\n query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1)\n else:\n\n def linear(w, b, x):\n if b is not None:\n return torch.matmul(x, w.t()) + b.t()\n else:\n return torch.matmul(x, w.t()) # + b.t()\n\n ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0)\n qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)]\n qkvb = [None] * 3\n\n q = linear(qkvw[0], qkvb[0], query_states)\n k, v = [linear(qkvw[i], qkvb[i], hidden_states) for i in range(1, 3)]\n query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]]\n\n query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])\n value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])\n\n rel_att = None\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n scale_factor = 1 + len(self.pos_att_type)\n scale = math.sqrt(query_layer.size(-1) * scale_factor)\n query_layer = query_layer / scale\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n if self.relative_attention:\n rel_embeddings = self.pos_dropout(rel_embeddings)\n rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)\n\n if rel_att is not None:\n attention_scores = attention_scores + rel_att\n\n # bxhxlxd\n if self.talking_head:\n attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\n\n attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)\n attention_probs = self.dropout(attention_probs)\n if self.talking_head:\n attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\n\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (-1,)\n context_layer = context_layer.view(*new_context_layer_shape)\n if return_att:\n return (context_layer, attention_probs)\n else:\n return context_layer\n\n def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):\n if relative_pos is None:\n q = query_layer.size(-2)\n relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device)\n if relative_pos.dim() == 2:\n relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)\n elif relative_pos.dim() == 3:\n relative_pos = relative_pos.unsqueeze(1)\n # bxhxqxk\n elif relative_pos.dim() != 4:\n raise ValueError(f\"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}\")\n\n att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions)\n relative_pos = relative_pos.long().to(query_layer.device)\n rel_embeddings = rel_embeddings[\n self.max_relative_positions - att_span : self.max_relative_positions + att_span, :\n ].unsqueeze(0)\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_key_layer = self.pos_proj(rel_embeddings)\n pos_key_layer = self.transpose_for_scores(pos_key_layer)\n\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_query_layer = self.pos_q_proj(rel_embeddings)\n pos_query_layer = self.transpose_for_scores(pos_query_layer)\n\n score = 0\n # content->position\n if \"c2p\" in self.pos_att_type:\n c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2))\n c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)\n c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos))\n score += c2p_att\n\n # position->content\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_query_layer /= math.sqrt(pos_query_layer.size(-1) * scale_factor)\n if query_layer.size(-2) != key_layer.size(-2):\n r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device)\n else:\n r_pos = relative_pos\n p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)\n if query_layer.size(-2) != key_layer.size(-2):\n pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)\n\n if \"p2c\" in self.pos_att_type:\n p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2))\n p2c_att = torch.gather(\n p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer)\n ).transpose(-1, -2)\n if query_layer.size(-2) != key_layer.size(-2):\n p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer))\n score += p2c_att\n\n return score\n\n\nclass DebertaEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n pad_token_id = getattr(config, \"pad_token_id\", 0)\n self.embedding_size = getattr(config, \"embedding_size\", config.hidden_size)\n self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)\n\n self.position_biased_input = getattr(config, \"position_biased_input\", True)\n if not self.position_biased_input:\n self.position_embeddings = None\n else:\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)\n\n if config.type_vocab_size > 0:\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)\n\n if self.embedding_size != config.hidden_size:\n self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)\n self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n if self.position_embeddings is not None:\n position_embeddings = self.position_embeddings(position_ids.long())\n else:\n position_embeddings = torch.zeros_like(inputs_embeds)\n\n embeddings = inputs_embeds\n if self.position_biased_input:\n embeddings += position_embeddings\n if self.config.type_vocab_size > 0:\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings += token_type_embeddings\n\n if self.embedding_size != self.config.hidden_size:\n embeddings = self.embed_proj(embeddings)\n\n embeddings = self.LayerNorm(embeddings)\n\n if mask is not None:\n if mask.dim() != embeddings.dim():\n if mask.dim() == 4:\n mask = mask.squeeze(1).squeeze(1)\n mask = mask.unsqueeze(2)\n mask = mask.to(embeddings.dtype)\n\n embeddings = embeddings * mask\n\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass DebertaPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = DebertaConfig\n base_model_prefix = \"deberta\"\n _keys_to_ignore_on_load_missing = [\"position_ids\"]\n _keys_to_ignore_on_load_unexpected = [\"position_embeddings\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n\nDEBERTA_START_DOCSTRING = r\"\"\"\n The DeBERTa model was proposed in `DeBERTa: Decoding-enhanced BERT with Disentangled Attention\n <https://arxiv.org/abs/2006.03654>`_ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build on top of\n BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two\n improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.```\n\n\n Parameters:\n config (:class:`~transformers.DebertaConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nDEBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.DebertaTokenizer`. See\n :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaModel(DebertaPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.embeddings = DebertaEmbeddings(config)\n self.encoder = DebertaEncoder(config)\n self.z_steps = 0\n self.config = config\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings.word_embeddings = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n raise NotImplementedError(\"The prune function is not implemented in DeBERTa model.\")\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n mask=attention_mask,\n inputs_embeds=inputs_embeds,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask,\n output_hidden_states=True,\n output_attentions=output_attentions,\n return_dict=return_dict,\n )\n encoded_layers = encoder_outputs[1]\n\n if self.z_steps > 1:\n hidden_states = encoded_layers[-2]\n layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]\n query_states = encoded_layers[-1]\n rel_embeddings = self.encoder.get_rel_embedding()\n attention_mask = self.encoder.get_attention_mask(attention_mask)\n rel_pos = self.encoder.get_rel_pos(embedding_output)\n for layer in layers[1:]:\n query_states = layer(\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=query_states,\n relative_pos=rel_pos,\n rel_embeddings=rel_embeddings,\n )\n encoded_layers.append(query_states)\n\n sequence_output = encoded_layers[-1]\n\n if not return_dict:\n return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]\n\n return BaseModelOutput(\n last_hidden_state=sequence_output,\n hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"DeBERTa Model with a `language modeling` head on top. \"\"\", DEBERTA_START_DOCSTRING)\nclass DebertaForMaskedLM(DebertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.deberta = DebertaModel(config)\n self.cls = DebertaOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n# copied from transformers.models.bert.BertPredictionHeadTransform with bert -> deberta\nclass DebertaPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\n# copied from transformers.models.bert.BertLMPredictionHead with bert -> deberta\nclass DebertaLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = DebertaPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\n# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta\nclass DebertaOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = DebertaLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaForSequenceClassification(DebertaPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n num_labels = getattr(config, \"num_labels\", 2)\n self.num_labels = num_labels\n\n self.deberta = DebertaModel(config)\n self.pooler = ContextPooler(config)\n output_dim = self.pooler.output_dim\n\n self.classifier = nn.Linear(output_dim, num_labels)\n drop_out = getattr(config, \"cls_dropout\", None)\n drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out\n self.dropout = StableDropout(drop_out)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.deberta.get_input_embeddings()\n\n def set_input_embeddings(self, new_embeddings):\n self.deberta.set_input_embeddings(new_embeddings)\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n encoder_layer = outputs[0]\n pooled_output = self.pooler(encoder_layer)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # regression task\n loss_fn = nn.MSELoss()\n logits = logits.view(-1).to(labels.dtype)\n loss = loss_fn(logits, labels.view(-1))\n elif labels.dim() == 1 or labels.size(-1) == 1:\n label_index = (labels >= 0).nonzero()\n labels = labels.long()\n if label_index.size(0) > 0:\n labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1)))\n labels = torch.gather(labels, 0, label_index.view(-1))\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))\n else:\n loss = torch.tensor(0).to(logits)\n else:\n log_softmax = nn.LogSoftmax(-1)\n loss = -((log_softmax(logits) * labels).sum(-1)).mean()\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n else:\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaForTokenClassification(DebertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.deberta = DebertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaForQuestionAnswering(DebertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.deberta = DebertaModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.zeros",
"torch.cat",
"torch.einsum",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.matmul",
"torch.tanh",
"torch.nn.BCEWithLogitsLoss",
"torch.tensor",
"torch.arange",
"torch.cumsum",
"torch.nn.MSELoss"
],
[
"torch.softmax",
"torch.nn.Dropout",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.nn.LogSoftmax",
"torch.zeros",
"torch.sqrt",
"torch.empty_like",
"torch.zeros_like",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch._softmax_backward_data",
"torch.nn.Linear",
"torch.matmul",
"torch.tensor",
"torch.arange",
"torch.clamp",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ranigb/Set-Tree | [
"fa3971f9a8ef98dbfd0f6de654efcde3006a197b",
"fa3971f9a8ef98dbfd0f6de654efcde3006a197b"
] | [
"settree/set_rf.py",
"nodegraphtree/graphtree.py"
] | [
"import numbers\nfrom warnings import catch_warnings, simplefilter, warn\nimport threading\n\nfrom abc import ABCMeta, abstractmethod\nimport numpy as np\nfrom scipy.sparse import issparse\nfrom scipy.sparse import hstack as sparse_hstack\nfrom joblib import Parallel, delayed\n\nfrom sklearn.base import ClassifierMixin, RegressorMixin, MultiOutputMixin\nfrom sklearn.metrics import r2_score\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.tree import (DecisionTreeClassifier, DecisionTreeRegressor,\n ExtraTreeClassifier, ExtraTreeRegressor)\nfrom sklearn.tree._tree import DTYPE, DOUBLE\nfrom sklearn.utils import check_random_state, check_array, compute_sample_weight\nfrom sklearn.exceptions import DataConversionWarning\nfrom sklearn.ensemble._base import BaseEnsemble, _partition_estimators\nfrom sklearn.utils.fixes import _joblib_parallel_args\nfrom sklearn.utils.multiclass import check_classification_targets\nfrom sklearn.utils.validation import check_is_fitted, _check_sample_weight\nfrom sklearn.utils.validation import _deprecate_positional_args\n\nfrom settree.set_tree import SetTree\nfrom settree.set_data import OPERATIONS\n\n__all__ = [\"SetRandomForestClassifier\",\n \"SetRandomForestRegressor\"]\n\nMAX_INT = np.iinfo(np.int32).max\n\n\ndef _get_n_samples_bootstrap(n_samples, max_samples):\n \"\"\"\n Get the number of samples in a bootstrap sample.\n Parameters\n ----------\n n_samples : int\n Number of samples in the dataset.\n max_samples : int or float\n The maximum number of samples to draw from the total available:\n - if float, this indicates a fraction of the total and should be\n the interval `(0, 1)`;\n - if int, this indicates the exact number of samples;\n - if None, this indicates the total number of samples.\n Returns\n -------\n n_samples_bootstrap : int\n The total number of samples to draw for the bootstrap sample.\n \"\"\"\n if max_samples is None:\n return n_samples\n\n if isinstance(max_samples, numbers.Integral):\n if not (1 <= max_samples <= n_samples):\n msg = \"`max_samples` must be in range 1 to {} but got value {}\"\n raise ValueError(msg.format(n_samples, max_samples))\n return max_samples\n\n if isinstance(max_samples, numbers.Real):\n if not (0 < max_samples < 1):\n msg = \"`max_samples` must be in range (0, 1) but got value {}\"\n raise ValueError(msg.format(max_samples))\n return int(round(n_samples * max_samples))\n\n msg = \"`max_samples` should be int or float, but got type '{}'\"\n raise TypeError(msg.format(type(max_samples)))\n\n\ndef _generate_sample_indices(random_state, n_samples, n_samples_bootstrap):\n \"\"\"\n Private function used to _parallel_build_trees function.\"\"\"\n\n random_instance = check_random_state(random_state)\n sample_indices = random_instance.randint(0, n_samples, n_samples_bootstrap)\n\n return sample_indices\n\n\ndef _generate_unsampled_indices(random_state, n_samples, n_samples_bootstrap):\n \"\"\"\n Private function used to forest._set_oob_score function.\"\"\"\n sample_indices = _generate_sample_indices(random_state, n_samples,\n n_samples_bootstrap)\n sample_counts = np.bincount(sample_indices, minlength=n_samples)\n unsampled_mask = sample_counts == 0\n indices_range = np.arange(n_samples)\n unsampled_indices = indices_range[unsampled_mask]\n\n return unsampled_indices\n\n\ndef _parallel_build_trees(tree, forest, X_set, y, sample_weight, tree_idx, n_trees,\n verbose=0, class_weight=None,\n n_samples_bootstrap=None):\n \"\"\"\n Private function used to fit a single tree in parallel.\"\"\"\n if verbose > 1:\n print(\"building tree %d of %d\" % (tree_idx + 1, n_trees))\n\n if forest.bootstrap:\n n_samples = X_set.shape[0]\n if sample_weight is None:\n curr_sample_weight = np.ones((n_samples,), dtype=np.float64)\n else:\n curr_sample_weight = sample_weight.copy()\n\n indices = _generate_sample_indices(tree.random_state, n_samples,\n n_samples_bootstrap)\n\n X_subset = X_set.get_subset(indices)\n y_subset = y.take(indices)\n sample_weights_subset = None if sample_weight is None else curr_sample_weight.take(indices)\n\n # todo: currently not supporting those options\n # sample_counts = np.bincount(indices, minlength=n_samples)\n # curr_sample_weight *= sample_counts\n #\n # if class_weight == 'subsample':\n # with catch_warnings():\n # simplefilter('ignore', DeprecationWarning)\n # curr_sample_weight *= compute_sample_weight('auto', y,\n # indices=indices)\n # elif class_weight == 'balanced_subsample':\n # curr_sample_weight *= compute_sample_weight('balanced', y,\n # indices=indices)\n\n tree.fit(X_subset, y_subset, sample_weights_subset)\n else:\n tree.fit(X_set, y, sample_weight)\n\n return tree\n\n\nclass BaseForest(MultiOutputMixin, BaseEnsemble, metaclass=ABCMeta):\n \"\"\"\n Base class for forests of trees.\n Warning: This class should not be used directly. Use derived classes\n instead.\n \"\"\"\n\n @abstractmethod\n def __init__(self,\n base_estimator,\n n_estimators=100, *,\n estimator_params=tuple(),\n bootstrap=False,\n oob_score=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n class_weight=None,\n max_samples=None):\n super().__init__(\n base_estimator=base_estimator,\n n_estimators=n_estimators,\n estimator_params=estimator_params)\n\n self.bootstrap = bootstrap\n self.oob_score = oob_score\n self.n_jobs = n_jobs\n self.random_state = random_state\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.max_samples = max_samples\n\n def apply(self, X_set):\n \"\"\"\n Apply trees in the forest to X, return leaf indices.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n X_leaves : ndarray of shape (n_samples, n_estimators)\n For each datapoint x in X and for each tree in the forest,\n return the index of the leaf x ends up in.\n \"\"\"\n #X = self._validate_X_predict(X)\n results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(prefer=\"threads\"))(\n delayed(tree.apply)(X_set)\n for tree in self.estimators_)\n\n return np.array(results).T\n\n def decision_path(self, X_set):\n # todo currently not working\n\n \"\"\"\n Return the decision path in the forest.\n .. versionadded:: 0.18\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n indicator : sparse matrix of shape (n_samples, n_nodes)\n Return a node indicator matrix where non zero elements indicates\n that the samples goes through the nodes. The matrix is of CSR\n format.\n n_nodes_ptr : ndarray of shape (n_estimators + 1,)\n The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]\n gives the indicator value for the i-th estimator.\n \"\"\"\n #X = self._validate_X_predict(X)\n indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(prefer='threads'))(\n delayed(tree.decision_path)(X_set)\n for tree in self.estimators_)\n\n n_nodes = [0]\n n_nodes.extend([i.shape[1] for i in indicators])\n n_nodes_ptr = np.array(n_nodes).cumsum()\n\n return sparse_hstack(indicators).tocsr(), n_nodes_ptr\n\n def fit(self, X_set, y, sample_weight=None):\n # Validate or convert input data\n\n if issparse(y):\n raise ValueError(\n \"sparse multilabel-indicator for y is not supported.\"\n )\n # X, y = self._validate_data(X, y, multi_output=True,\n # accept_sparse=\"csc\", dtype=DTYPE)\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X_set)\n\n # Remap output\n self.n_features_ = X_set.shape[1]\n\n y = np.atleast_1d(y)\n if y.ndim == 2 and y.shape[1] == 1:\n warn(\"A column-vector y was passed when a 1d array was\"\n \" expected. Please change the shape of y to \"\n \"(n_samples,), for example using ravel().\",\n DataConversionWarning, stacklevel=2)\n\n if y.ndim == 1:\n # reshape is necessary to preserve the data contiguity against vs\n # [:, np.newaxis] that does not.\n y = np.reshape(y, (-1, 1))\n\n self.n_outputs_ = y.shape[1]\n\n y, expanded_class_weight = self._validate_y_class_weight(y)\n\n # todo: the default was to cast y into float - keep it with it's current dtype\n #if getattr(y, \"dtype\", None) != DOUBLE or not y.flags.contiguous:\n # y = np.ascontiguousarray(y, dtype=DOUBLE)\n\n if expanded_class_weight is not None:\n if sample_weight is not None:\n sample_weight = sample_weight * expanded_class_weight\n else:\n sample_weight = expanded_class_weight\n\n # Get bootstrap sample size\n n_samples_bootstrap = _get_n_samples_bootstrap(\n n_samples=X_set.shape[0],\n max_samples=self.max_samples\n )\n\n # Check parameters\n self._validate_estimator()\n\n if not self.bootstrap and self.oob_score:\n raise ValueError(\"Out of bag estimation only available\"\n \" if bootstrap=True\")\n\n random_state = check_random_state(self.random_state)\n\n if not self.warm_start or not hasattr(self, \"estimators_\"):\n # Free allocated memory, if any\n self.estimators_ = []\n\n n_more_estimators = self.n_estimators - len(self.estimators_)\n\n if n_more_estimators < 0:\n raise ValueError('n_estimators=%d must be larger or equal to '\n 'len(estimators_)=%d when warm_start==True'\n % (self.n_estimators, len(self.estimators_)))\n\n elif n_more_estimators == 0:\n warn(\"Warm-start fitting without increasing n_estimators does not \"\n \"fit new trees.\")\n else:\n if self.warm_start and len(self.estimators_) > 0:\n # We draw from the random state to get the random state we\n # would have got if we hadn't used a warm_start.\n random_state.randint(MAX_INT, size=len(self.estimators_))\n\n trees = [self._make_estimator(append=False,\n random_state=random_state)\n for i in range(n_more_estimators)]\n\n # Parallel loop: we prefer the threading backend as the Cython code\n # for fitting the trees is internally releasing the Python GIL\n # making threading more efficient than multiprocessing in\n # that case. However, for joblib 0.12+ we respect any\n # parallel_backend contexts set at a higher level,\n # since correctness does not rely on using threads.\n trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(prefer='threads'))(\n delayed(_parallel_build_trees)(\n t, self, X_set, y, sample_weight, i, len(trees),\n verbose=self.verbose, class_weight=self.class_weight,\n n_samples_bootstrap=n_samples_bootstrap)\n for i, t in enumerate(trees))\n\n # Collect newly grown trees\n self.estimators_.extend(trees)\n\n if self.oob_score:\n self._set_oob_score(X_set, y)\n\n # Decapsulate classes_ attributes\n if hasattr(self, \"classes_\") and self.n_outputs_ == 1:\n self.n_classes_ = self.n_classes_[0]\n self.classes_ = self.classes_[0]\n\n return self\n\n @abstractmethod\n def _set_oob_score(self, X_set, y):\n \"\"\"\n Calculate out of bag predictions and score.\"\"\"\n\n def _validate_y_class_weight(self, y):\n # Default implementation\n return y, None\n\n def _validate_X_predict(self, X):\n \"\"\"\n Validate X whenever one tries to predict, apply, predict_proba.\"\"\"\n check_is_fitted(self)\n\n return self.estimators_[0]._validate_X_predict(X, check_input=True)\n\n @property\n def feature_importances_(self):\n \"\"\"\n The impurity-based feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance.\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n Returns\n -------\n feature_importances_ : ndarray of shape (n_features,)\n The values of this array sum to 1, unless all trees are single node\n trees consisting of only the root node, in which case it will be an\n array of zeros.\n \"\"\"\n check_is_fitted(self)\n\n all_importances = Parallel(n_jobs=self.n_jobs,\n **_joblib_parallel_args(prefer='threads'))(\n delayed(getattr)(tree, 'feature_importances_')\n for tree in self.estimators_ if tree.tree_.node_count > 1)\n\n if not all_importances:\n return np.zeros(self.n_features_, dtype=np.float64)\n\n all_importances = np.mean(all_importances,\n axis=0, dtype=np.float64)\n return all_importances / np.sum(all_importances)\n\n\ndef _accumulate_prediction(predict, X_set, out, lock):\n \"\"\"\n This is a utility function for joblib's Parallel.\n It can't go locally in ForestClassifier or ForestRegressor, because joblib\n complains that it cannot pickle it when placed there.\n \"\"\"\n prediction = predict(X_set)\n with lock:\n if len(out) == 1:\n out[0] += prediction\n else:\n for i in range(len(out)):\n out[i] += prediction[i]\n\n\nclass SetForestClassifier(ClassifierMixin, BaseForest, metaclass=ABCMeta):\n \"\"\"\n Base class for forest of trees-based classifiers.\n Warning: This class should not be used directly. Use derived classes\n instead.\n \"\"\"\n\n @abstractmethod\n def __init__(self,\n base_estimator,\n n_estimators=100, *,\n estimator_params=tuple(),\n bootstrap=False,\n oob_score=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n class_weight=None,\n max_samples=None):\n super().__init__(\n base_estimator,\n n_estimators=n_estimators,\n estimator_params=estimator_params,\n bootstrap=bootstrap,\n oob_score=oob_score,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose,\n warm_start=warm_start,\n class_weight=class_weight,\n max_samples=max_samples)\n\n def _set_oob_score(self, X_set, y):\n \"\"\"\n Compute out-of-bag score.\"\"\"\n #X = check_array(X, dtype=DTYPE, accept_sparse='csr')\n\n n_classes_ = self.n_classes_\n n_samples = y.shape[0]\n\n oob_decision_function = []\n oob_score = 0.0\n predictions = [np.zeros((n_samples, n_classes_[k]))\n for k in range(self.n_outputs_)]\n\n n_samples_bootstrap = _get_n_samples_bootstrap(\n n_samples, self.max_samples\n )\n\n for estimator in self.estimators_:\n unsampled_indices = _generate_unsampled_indices(\n estimator.random_state, n_samples, n_samples_bootstrap)\n X_subsample = X_set.get_subset(unsampled_indices)\n p_estimator = estimator.predict_proba(X_subsample)\n\n if self.n_outputs_ == 1:\n p_estimator = [p_estimator]\n\n for k in range(self.n_outputs_):\n predictions[k][unsampled_indices, :] += p_estimator[k]\n\n for k in range(self.n_outputs_):\n if (predictions[k].sum(axis=1) == 0).any():\n warn(\"Some inputs do not have OOB scores. \"\n \"This probably means too few trees were used \"\n \"to compute any reliable oob estimates.\")\n\n decision = (predictions[k] /\n predictions[k].sum(axis=1)[:, np.newaxis])\n oob_decision_function.append(decision)\n oob_score += np.mean(y[:, k] ==\n np.argmax(predictions[k], axis=1), axis=0)\n\n if self.n_outputs_ == 1:\n self.oob_decision_function_ = oob_decision_function[0]\n else:\n self.oob_decision_function_ = oob_decision_function\n\n self.oob_score_ = oob_score / self.n_outputs_\n\n def _validate_y_class_weight(self, y):\n check_classification_targets(y)\n\n y = np.copy(y)\n expanded_class_weight = None\n\n if self.class_weight is not None:\n y_original = np.copy(y)\n\n self.classes_ = []\n self.n_classes_ = []\n\n y_store_unique_indices = np.zeros(y.shape, dtype=np.int)\n for k in range(self.n_outputs_):\n classes_k, y_store_unique_indices[:, k] = \\\n np.unique(y[:, k], return_inverse=True)\n self.classes_.append(classes_k)\n self.n_classes_.append(classes_k.shape[0])\n y = y_store_unique_indices\n\n if self.class_weight is not None:\n valid_presets = ('balanced', 'balanced_subsample')\n if isinstance(self.class_weight, str):\n if self.class_weight not in valid_presets:\n raise ValueError('Valid presets for class_weight include '\n '\"balanced\" and \"balanced_subsample\".'\n 'Given \"%s\".'\n % self.class_weight)\n if self.warm_start:\n warn('class_weight presets \"balanced\" or '\n '\"balanced_subsample\" are '\n 'not recommended for warm_start if the fitted data '\n 'differs from the full dataset. In order to use '\n '\"balanced\" weights, use compute_class_weight '\n '(\"balanced\", classes, y). In place of y you can use '\n 'a large enough sample of the full training set '\n 'target to properly estimate the class frequency '\n 'distributions. Pass the resulting weights as the '\n 'class_weight parameter.')\n\n if (self.class_weight != 'balanced_subsample' or\n not self.bootstrap):\n if self.class_weight == \"balanced_subsample\":\n class_weight = \"balanced\"\n else:\n class_weight = self.class_weight\n expanded_class_weight = compute_sample_weight(class_weight,\n y_original)\n\n return y, expanded_class_weight\n\n def predict(self, X_set):\n \"\"\"\n Predict class for X.\n The predicted class of an input sample is a vote by the trees in\n the forest, weighted by their probability estimates. That is,\n the predicted class is the one with highest mean probability\n estimate across the trees.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n y : ndarray of shape (n_samples,) or (n_samples, n_outputs)\n The predicted classes.\n \"\"\"\n proba = self.predict_proba(X_set)\n\n if self.n_outputs_ == 1:\n return self.classes_.take(np.argmax(proba, axis=1), axis=0)\n\n else:\n n_samples = proba[0].shape[0]\n # all dtypes should be the same, so just take the first\n class_type = self.classes_[0].dtype\n predictions = np.empty((n_samples, self.n_outputs_),\n dtype=class_type)\n\n for k in range(self.n_outputs_):\n predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],\n axis=1),\n axis=0)\n\n return predictions\n\n def predict_proba(self, X_set):\n \"\"\"\n Predict class probabilities for X.\n The predicted class probabilities of an input sample are computed as\n the mean predicted class probabilities of the trees in the forest.\n The class probability of a single tree is the fraction of samples of\n the same class in a leaf.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes), or a list of n_outputs\n such arrays if n_outputs > 1.\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n \"\"\"\n check_is_fitted(self)\n # Check data\n # X = self._validate_X_predict(X)\n\n # Assign chunk of trees to jobs\n n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)\n\n # avoid storing the output of every estimator by summing them here\n all_proba = [np.zeros((X_set.shape[0], j), dtype=np.float64)\n for j in np.atleast_1d(self.n_classes_)]\n lock = threading.Lock()\n Parallel(n_jobs=n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(require=\"sharedmem\"))(\n delayed(_accumulate_prediction)(e.predict_proba, X_set, all_proba,\n lock)\n for e in self.estimators_)\n\n for proba in all_proba:\n proba /= len(self.estimators_)\n\n if len(all_proba) == 1:\n return all_proba[0]\n else:\n return all_proba\n\n def predict_log_proba(self, X_set):\n \"\"\"\n Predict class log-probabilities for X.\n The predicted class log-probabilities of an input sample is computed as\n the log of the mean predicted class probabilities of the trees in the\n forest.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes), or a list of n_outputs\n such arrays if n_outputs > 1.\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n \"\"\"\n proba = self.predict_proba(X_set)\n\n if self.n_outputs_ == 1:\n return np.log(proba)\n\n else:\n for k in range(self.n_outputs_):\n proba[k] = np.log(proba[k])\n\n return proba\n\n\nclass SetForestRegressor(RegressorMixin, BaseForest, metaclass=ABCMeta):\n \"\"\"\n Base class for forest of trees-based regressors.\n Warning: This class should not be used directly. Use derived classes\n instead.\n \"\"\"\n\n @abstractmethod\n def __init__(self,\n base_estimator,\n n_estimators=100, *,\n estimator_params=tuple(),\n bootstrap=False,\n oob_score=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n max_samples=None):\n super().__init__(\n base_estimator,\n n_estimators=n_estimators,\n estimator_params=estimator_params,\n bootstrap=bootstrap,\n oob_score=oob_score,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose,\n warm_start=warm_start,\n max_samples=max_samples)\n\n def predict(self, X_set):\n \"\"\"\n Predict regression target for X.\n The predicted regression target of an input sample is computed as the\n mean predicted regression targets of the trees in the forest.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n y : ndarray of shape (n_samples,) or (n_samples, n_outputs)\n The predicted values.\n \"\"\"\n check_is_fitted(self)\n # Check data\n # X = self._validate_X_predict(X)\n\n # Assign chunk of trees to jobs\n n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)\n\n # avoid storing the output of every estimator by summing them here\n if self.n_outputs_ > 1:\n y_hat = np.zeros((X_set.shape[0], self.n_outputs_), dtype=np.float64)\n else:\n y_hat = np.zeros((X_set.shape[0]), dtype=np.float64)\n\n # Parallel loop\n lock = threading.Lock()\n Parallel(n_jobs=n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(require=\"sharedmem\"))(\n delayed(_accumulate_prediction)(e.predict, X_set, [y_hat], lock)\n for e in self.estimators_)\n\n y_hat /= len(self.estimators_)\n\n return y_hat\n\n def _set_oob_score(self, X_set, y):\n \"\"\"\n Compute out-of-bag scores.\"\"\"\n # X = check_array(X, dtype=DTYPE, accept_sparse='csr')\n\n n_samples = y.shape[0]\n\n predictions = np.zeros((n_samples, self.n_outputs_))\n n_predictions = np.zeros((n_samples, self.n_outputs_))\n\n n_samples_bootstrap = _get_n_samples_bootstrap(\n n_samples, self.max_samples\n )\n\n for estimator in self.estimators_:\n unsampled_indices = _generate_unsampled_indices(\n estimator.random_state, n_samples, n_samples_bootstrap)\n X_subset = X_set.get_subset(unsampled_indices)\n p_estimator = estimator.predict(X_subset)\n\n if self.n_outputs_ == 1:\n p_estimator = p_estimator[:, np.newaxis]\n\n predictions[unsampled_indices, :] += p_estimator\n n_predictions[unsampled_indices, :] += 1\n\n if (n_predictions == 0).any():\n warn(\"Some inputs do not have OOB scores. \"\n \"This probably means too few trees were used \"\n \"to compute any reliable oob estimates.\")\n n_predictions[n_predictions == 0] = 1\n\n predictions /= n_predictions\n self.oob_prediction_ = predictions\n\n if self.n_outputs_ == 1:\n self.oob_prediction_ = \\\n self.oob_prediction_.reshape((n_samples, ))\n\n self.oob_score_ = 0.0\n\n for k in range(self.n_outputs_):\n self.oob_score_ += r2_score(y[:, k],\n predictions[:, k])\n\n self.oob_score_ /= self.n_outputs_\n\n def _compute_partial_dependence_recursion(self, grid, target_features):\n \"\"\"Fast partial dependence computation.\n Parameters\n ----------\n grid : ndarray of shape (n_samples, n_target_features)\n The grid points on which the partial dependence should be\n evaluated.\n target_features : ndarray of shape (n_target_features)\n The set of target features for which the partial dependence\n should be evaluated.\n Returns\n -------\n averaged_predictions : ndarray of shape (n_samples,)\n The value of the partial dependence function on each grid point.\n \"\"\"\n grid = np.asarray(grid, dtype=DTYPE, order='C')\n averaged_predictions = np.zeros(shape=grid.shape[0],\n dtype=np.float64, order='C')\n\n for tree in self.estimators_:\n # Note: we don't sum in parallel because the GIL isn't released in\n # the fast method.\n tree.tree_.compute_partial_dependence(\n grid, target_features, averaged_predictions)\n # Average over the forest\n averaged_predictions /= len(self.estimators_)\n\n return averaged_predictions\n\n\nclass SetRandomForestClassifier(SetForestClassifier):\n @_deprecate_positional_args\n def __init__(self,\n n_estimators=100, *,\n criterion=\"gini\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=\"auto\",\n max_leaf_nodes=None,\n min_impurity_decrease=0.,\n min_impurity_split=None,\n splitter='xgboost',\n operations=OPERATIONS,\n use_attention_set=True,\n attention_set_limit=1,\n bootstrap=True,\n oob_score=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n class_weight=None,\n ccp_alpha=0.0,\n max_samples=None):\n super().__init__(\n base_estimator=SetTree(),\n n_estimators=n_estimators,\n estimator_params=tuple(SetTree().get_params()),\n bootstrap=bootstrap,\n oob_score=oob_score,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose,\n warm_start=warm_start,\n class_weight=class_weight,\n max_samples=max_samples)\n\n self.criterion = criterion\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.max_features = max_features\n self.max_leaf_nodes = max_leaf_nodes\n self.min_impurity_decrease = min_impurity_decrease\n self.min_impurity_split = min_impurity_split\n self.operations = operations\n self.splitter = splitter\n self.use_attention_set = use_attention_set\n self.attention_set_limit = attention_set_limit\n self.classifier = True\n self.ccp_alpha = ccp_alpha\n\n\nclass SetRandomForestRegressor(SetForestRegressor):\n @_deprecate_positional_args\n def __init__(self,\n n_estimators=100, *,\n criterion=\"mse\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=\"auto\",\n max_leaf_nodes=None,\n min_impurity_decrease=0.,\n min_impurity_split=None,\n splitter='xgboost',\n operations=OPERATIONS,\n use_attention_set=True,\n attention_set_limit=1,\n bootstrap=True,\n oob_score=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n ccp_alpha=0.0,\n max_samples=None):\n super().__init__(\n base_estimator=SetTree(),\n n_estimators=n_estimators,\n estimator_params=tuple(SetTree().get_params()),\n bootstrap=bootstrap,\n oob_score=oob_score,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose,\n warm_start=warm_start,\n max_samples=max_samples)\n\n self.criterion = criterion\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.max_features = max_features\n self.max_leaf_nodes = max_leaf_nodes\n self.min_impurity_decrease = min_impurity_decrease\n self.min_impurity_split = min_impurity_split\n self.operations = operations\n self.splitter = splitter\n self.use_attention_set = use_attention_set\n self.attention_set_limit = attention_set_limit\n self.classifier = False\n self.ccp_alpha = ccp_alpha\n",
"from tree_node_learner import tree_node_learner, tree_node_learner_parameters\nfrom graph_data import graph_data\nfrom typing import List\nimport numpy as np\nfrom sklearn.base import BaseEstimator, RegressorMixin\nfrom collections import defaultdict\n################################################################\n### ###\n### Note that line 124 of boosting.py in starboost should ###\n### be changed to: ###\n### y_pred[:, i] += self.learning_rate * direction[:, i] ###\n### ###\n### The same fix should be applied in line 179 ###\n### /usr/local/lib/python3.8/dist-packages/starboost/ ###\n### ###\n################################################################\n\nclass graphtree(BaseEstimator, RegressorMixin):\n def __init__(self,\n graph:graph_data,\n graph_depths: List[int] = [0, 1, 2],\n max_attention_depth: int = 2,\n max_number_of_leafs:int = 10, \n min_gain:float = 0.0,\n min_leaf_size:int = 10,\n ):\n self.graph_depths = graph_depths\n self.max_attention_depth = max_attention_depth\n self.max_number_of_leafs = max_number_of_leafs\n self.min_gain = min_gain\n self.min_leaf_size = min_leaf_size\n self.graph = graph\n\n def get_params(self, deep=True):\n \"\"\"\n Get parameters for this estimator.\n Parameters\n ----------\n deep : bool, default=True\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n Returns\n -------\n params : dict\n Parameter names mapped to their values.\n \"\"\"\n out = dict()\n for key in self._get_param_names():\n value = getattr(self, key)\n if deep and hasattr(value, \"get_params\"):\n deep_items = value.get_params().items()\n out.update((key + \"__\" + k, val) for k, val in deep_items)\n out[key] = value\n return out\n\n def set_params(self, **params):\n \"\"\"\n Set the parameters of this estimator.\n The method works on simple estimators as well as on nested objects\n (such as :class:`~sklearn.pipeline.Pipeline`). The latter have\n parameters of the form ``<component>__<parameter>`` so that it's\n possible to update each component of a nested object.\n Parameters\n ----------\n **params : dict\n Estimator parameters.\n Returns\n -------\n self : estimator instance\n Estimator instance.\n \"\"\"\n if not params:\n # Simple optimization to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n\n nested_params = defaultdict(dict) # grouped by prefix\n for key, value in params.items():\n key, delim, sub_key = key.partition(\"__\")\n if key not in valid_params:\n raise ValueError(\n \"Invalid parameter %s for estimator %s. \"\n \"Check the list of available parameters \"\n \"with `estimator.get_params().keys()`.\" % (key, self)\n )\n\n if delim:\n nested_params[key][sub_key] = value\n else:\n setattr(self, key, value)\n valid_params[key] = value\n\n for key, sub_params in nested_params.items():\n valid_params[key].set_params(**sub_params)\n\n return self\n\n def fit(self, X:List[int], y:np.array):#, eval_set = None):\n X = X.flatten()\n y = y.flatten()\n if (len(X) != len(y)):\n raise ValueError(\"Size of X and y mismatch\") \n parms = tree_node_learner_parameters(\\\n graph_depths = self.graph_depths,\n max_attention_depth=self.max_attention_depth,\n max_number_of_leafs=self.max_number_of_leafs,\n min_gain=self.min_gain,\n min_leaf_size=self.min_leaf_size,\n graph=self.graph\n )\n self.tree_learner_ = tree_node_learner(parms, list(range(0, self.graph.get_number_of_nodes())), None)\n self.train_L2, self.train_total_gain = self.tree_learner_.fit(X, y)\n return(self)\n\n\n def predict(self, X:List[int]):\n all_predictions = self.tree_learner_.predict_all()\n if (isinstance(X,np.ndarray)):\n X = X[0].tolist()\n array = np.array(all_predictions[X])\n return(array.reshape(-1, 1))\n\n def print(self):\n self.tree_learner.print()\n\n \n\n "
] | [
[
"sklearn.utils.validation.check_is_fitted",
"sklearn.metrics.r2_score",
"numpy.asarray",
"sklearn.utils.fixes._joblib_parallel_args",
"numpy.mean",
"numpy.iinfo",
"sklearn.ensemble._base._partition_estimators",
"sklearn.utils.compute_sample_weight",
"scipy.sparse.issparse",
"numpy.unique",
"numpy.reshape",
"numpy.arange",
"numpy.atleast_1d",
"numpy.copy",
"numpy.argmax",
"numpy.zeros",
"sklearn.utils.validation._check_sample_weight",
"numpy.log",
"scipy.sparse.hstack",
"numpy.array",
"numpy.sum",
"sklearn.utils.multiclass.check_classification_targets",
"numpy.ones",
"numpy.bincount",
"sklearn.utils.check_random_state",
"numpy.empty"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
loevlie/ce_expansion | [
"17417b9467914dd91ee8e0325cfdc3bd19ad7f1e",
"17417b9467914dd91ee8e0325cfdc3bd19ad7f1e",
"17417b9467914dd91ee8e0325cfdc3bd19ad7f1e"
] | [
"example/ex_4_phase_diagrams/individual_size_plots.py",
"example/ex_1_agau_309_Icosahedron/main.py",
"ce_expansion/atomgraph/bcm.py"
] | [
"import collections\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as tick\nimport numpy as np\n\ndata = os.path.join(os.path.realpath(__file__), '..', '..', '..', 'data', 'larson_et_al')\nsys.path.append(data)\nimport ce_expansion.npdb.db_inter\n\nDEFAULT_DPI = 600 # Dots per inch\nDEFAULT_POINTSIZE = 15\nDEFAULT_MARKER = \"o\" # square\n\n\nclass Result(object):\n def __init__(self, shape, size, composition, excess_energy, temp):\n self.shape = shape\n self.size = size\n self.composition = composition\n self.excess_energy = excess_energy\n self.free_energy = self.get_free_energy_mix(temp)\n\n def get_free_energy_mix(self, T):\n \"\"\"\n Calculates Excess energies plus an entropic contribution.\n\n :param excess_energy: Excess energies from DB query\n :param comp: Compositions from DB query\n :param T: Temperature\n\n :return: Free energy of mixing = excess energy (related to enthalpy of mixing) - entropy of mixing\n \"\"\"\n\n if self.composition == 1 or self.composition == 0:\n return 0\n\n # k_b T [eV] = (25.7 mEV at 298 K)\n kt = 25.7E-3 / 298 * T\n del_s = self.composition * np.log(self.composition) + (1 - self.composition) * np.log(1 - self.composition)\n del_s *= -kt\n\n free_energy = self.excess_energy - del_s\n return free_energy\n\n\nclass OrderedSet(collections.UserList):\n \"\"\"\n Wrapper around a list that allows it to operate somewhat like a set.\n \"\"\"\n\n def add(self, value):\n \"\"\"\n If the value passed in is not in the set, then adds it to the set. Otherwise, does nothing.\n\n :param value: The value to be added.\n \"\"\"\n if value in self.data:\n pass\n else:\n self.data.append(value)\n\n\ndef get_data(alloy,\n size,\n temperature):\n \"\"\"\n Gets data for phase diagram\n\n :param alloy: Alloy of interest\n :param size: Size to consider\n :param temperature: Temperature to use\n :return: results object.\n \"\"\"\n\n # Book-keeping and initialization\n shapes = [\"icosahedron\", \"cuboctahedron\", \"elongated-pentagonal-bipyramid\"]\n\n # DB Query\n results = []\n for shape in shapes:\n query = ce_expansion.npdb.db_inter.get_bimet_result(metals=alloy, shape=shape, num_atoms=size)\n for result in query:\n # Calculate composition\n composition = result.n_metal1 / result.num_atoms\n # Calculate EE\n excess_energy = result.EE\n # Add to the list of results objects\n results.append(Result(shape, size, composition, excess_energy, temperature))\n return results\n\n\ndef make_plot(results, axis, size):\n \"\"\"\n Plots some results, y'know?\n\n :param results: A list of Results objects containing the shape, composition, and free energy of mixing\n :param axis: Pyplot axis to plot to\n :param size: size\n :return: None. Drops the plot in the working directory.\n \"\"\"\n # Split into 3 lists, for icosahedrons, cubs, and epbs\n # Each list is of the format (composition, free energy of mixing)\n icos = []\n cubs = []\n epbs = []\n types = {\"icosahedron\": icos,\n \"cuboctahedron\": cubs,\n \"elongated-pentagonal-bipyramid\": epbs}\n\n colors = {\"icosahedron\": \"red\",\n \"cuboctahedron\": \"blue\",\n \"elongated-pentagonal-bipyramid\": \"green\"}\n for result in results:\n types[result.shape].append((result.composition, result.free_energy, colors[result.shape]))\n\n for shape in [icos, cubs, epbs]:\n x = [i[0] * 100 for i in shape]\n y = [i[1] for i in shape]\n color = shape[0][2]\n axis.plot(x, y, color)\n\n # Label size\n axis.text(0.9, 0.5, f\"N={size}\", transform=axis.transAxes, size=20)\n\n\nalloys = [\"AgCu\"]#[\"AgAu\", \"AuCu\", \"AgCu\"]\nfor alloy in alloys:\n tens_sizes = [3871, 2869, 2057, 1415, 561] # sizes where we skipped 10% increments\n all_sizes = [309, 147, 55, 13] # sizes where we looked at all possible compositions\n\n for sizes in [tens_sizes, all_sizes]:\n fig, axes = plt.subplots(nrows=5, ncols=1, sharex=True, sharey=True)\n ymin = 0\n ymax = 0\n for plot_index, size in enumerate(sizes):\n # Query\n results = get_data(alloy, size, 250)\n results.sort(key=lambda i: i.composition)\n\n # Plot\n make_plot(results, axes[abs(plot_index)], size)\n\n # plot labels\n fig.text(0.5, 0.04, \"Composition (%)\", ha=\"center\", size=20)\n fig.text(0, 0.5, \"Free Energy of Mixing (eV/atom)\", va=\"center\", rotation=\"vertical\", size=20)\n fig.text(0.5, 0.95, f\"{alloy} @ 250K\", size=25, ha=\"center\")\n\n # Tickmarks\n plt.xlim(0, 100)\n ylimits = {\"AgAu\": [-0.1, 0],\n \"AgCu\": [-0.1+0.025, 0.025],\n \"AuCu\": [-0.3, 0]}\n\n ymin = ylimits[alloy][0]\n ymax = ylimits[alloy][1]\n\n plt.ylim(ymin, ymax)\n for axis in axes:\n # Set up X tickmarks\n axis.tick_params(axis=\"x\", labelsize=15)\n axis.xaxis.set_major_locator(tick.MultipleLocator(20))\n axis.xaxis.set_major_formatter(tick.FormatStrFormatter(\"%d\"))\n axis.xaxis.set_minor_locator(tick.MultipleLocator(10))\n axis.xaxis.grid(True, which='major')\n\n # Set up Y tickmarks\n axis.tick_params(axis=\"y\", labelsize=15)\n axis.yaxis.set_major_locator(tick.MultipleLocator((ymax - ymin) / 2))\n axis.yaxis.set_major_formatter(tick.FormatStrFormatter(\"%2.2f\"))\n axis.yaxis.set_minor_locator(tick.MultipleLocator((ymax - ymin) / 4))\n\n # Save and quit\n plt.savefig(f\"{alloy},{sizes[-1]}-{sizes[0]}.png\")\n plt.close()\n",
"#!/usr/bin/env python\n\nimport os\nimport sys\n\nimport ase.io\nimport numpy as np\n\nfrom ce_expansion.atomgraph import atomgraph, adjacency\n\n# Tell the program where /ce_expansion/ and /data/ can be found\ndata = os.path.join(os.path.realpath(__file__), '..', '..', '..', 'data', 'larson_et_al')\nsys.path.append(data)\n\n# Calculating the bonding is fairly slow in ASE's libraries.\n# Fortunately, every XYZ in the dataset has the same set of coordinates,\n# so we can use the same atomgraph for every system.\nprint(\"Building atom graph...\")\nfilename = \"Ag0Au309.xyz\" # chosen completely arbitrarily\npath = os.path.join(data, filename)\natoms = ase.io.read(path)\nbondlist = adjacency.buildBondsList(atoms, radius_dictionary={(\"Ag\", \"Ag\"): 3,\n (\"Ag\", \"Au\"): 3,\n (\"Au\", \"Au\"): 3})\n# Make the atomgraph; set \"0\" to Ag and set \"1\" to Au\ngraph = atomgraph.AtomGraph(bondlist, kind0=\"Ag\", kind1=\"Au\")\n\n# Array to hold mixing parameters and energies\ncsv_data = [None] * 310\n\n# Now we'll iterate over every xyz file in the directory\nprint(\"Calculating mixing parameters and cohesive energies...\")\nfor i in range(0, 310):\n # Specify the path to the file and open it as an ASE_Atoms Object\n filename = \"Ag\" + str(i) + \"Au\" + str(309 - i) + \".xyz\"\n print(filename[:-4])\n path = os.path.join(data, filename)\n atoms = ase.io.read(path)\n\n # Make a holder array for the chemical ordering\n ordering = np.zeros(309)\n\n # Iterate over every atom to get its chemical ordering\n for index, atom in enumerate(atoms):\n assert atom.symbol in [\"Ag\", \"Au\"]\n # Recall when we made the atomgraph, we said a \"0\" is Ag, and a \"1\" is Au\n if atom.symbol == \"Ag\":\n ordering[index] = 0\n elif atom.symbol == \"Au\":\n ordering[index] = 1\n\n # Calculate the mixing parameter\n mixing_parameter = graph.calcMixing(ordering)\n\n # Calculate the cohesive energy\n cohesive_energy = graph.getTotalCE(ordering)\n\n # CSV will have the following columns:\n # 1 - Chemical formula\n # 2 - Number of Ag atoms\n # 3 - Number of Au atoms\n # 4 - Mixing Parameter\n # 5 - BCM_BCM_CE_eV_eV\n # 6 - SE_Energy_eV\n # 7 - BCM_EE_eV\n\n csv_data[i] = [atoms.get_chemical_formula(),\n i,\n 309 - i,\n mixing_parameter,\n cohesive_energy,\n atoms.get_total_energy() * -1]\n\n# We need the monometallic cohesive energies to calculate excess energy\nfor i in csv_data:\n if i[1] == 0:\n mono_au = i[4]\n elif i[1] == 309:\n mono_ag = i[4]\n\n# Calculate excess energy\nfor entry in csv_data:\n ag_count = entry[1]\n au_count = entry[2]\n cohesive_energy = entry[4]\n\n excess_energy = cohesive_energy - mono_ag * (ag_count / 309) - mono_au * (au_count / 309)\n\n if abs(excess_energy) < 1E-10:\n excess_energy = 0\n entry.append(excess_energy)\n\n# Write to file and call it a day\nprint(\"Writing to File mixing_parameter_data.csv\")\nwith open(\"mixing_parameter_data.csv\", \"w\") as outp:\n outp.write(\"Chemical_Formula,Ag,Au,Mixing_Parameter,BCM_CE_eV,SE_Energy_eV,BCM_EE_eV\\n\")\n for entry in csv_data:\n outp.write(\",\".join(map(str, entry)) + \"\\n\")\n",
"import itertools\nimport collections.abc\nimport functools\nfrom typing import Iterable, Optional, Dict\n\nimport numpy as np\nimport ase\nimport ase.units\n\nfrom ce_expansion.atomgraph import adjacency\nfrom ce_expansion.data.gamma import GammaValues\n\n\ndef recursive_update(d: dict, u: dict) -> dict:\n \"\"\"\n recursively updates 'dict of dicts'\n Ex)\n d = {0: {1: 2}}\n u = {0: {3: 4}, 8: 9}\n\n recursive_update(d, u) == {0: {1: 2, 3: 4}, 8: 9}\n\n Args:\n d (dict): the nested dict object to update\n u (dict): the nested dict that contains new key-value pairs\n\n Returns:\n d (dict): the final updated dict\n \"\"\"\n for k, v in u.items():\n if isinstance(v, collections.abc.Mapping):\n d[k] = recursive_update(d.get(k, {}), v)\n else:\n d[k] = v\n return d\n\n\nclass BCModel:\n def __init__(self, atoms: ase.Atoms, metal_types: Optional[Iterable] = None,\n bond_list: Optional[Iterable] = None, info: dict = {}):\n \"\"\"\n Based on metal_types, create ce_bulk and gamma dicts from data given\n\n Args:\n atoms: ASE atoms object which contains the data of the NP being tested\n bond_list: list of atom indices involved in each bond\n\n KArgs:\n metal_types: List of metals found within the nano-particle\n If not passed, use elements provided by the atoms object\n \"\"\"\n self.atoms = atoms.copy()\n self.atoms.pbc = False\n self.info = info\n if metal_types is None:\n # get metal_types from atoms object\n self.metal_types = sorted(set(atoms.symbols))\n else:\n # ensure metal_types to unique, sorted list of metals\n self.metal_types = sorted(set(m.title() for m in metal_types))\n\n self.bond_list = bond_list\n if self.bond_list is None:\n self.bond_list = adjacency.build_bonds_arr(self.atoms)\n\n self.cn = np.bincount(self.bond_list[:, 0])\n\n # creating gamma list for every possible atom pairing\n self.gammas = None\n self.ce_bulk = None\n self._get_bcm_params()\n\n # get bonded atom columns\n self.a1 = self.bond_list[:, 0]\n self.a2 = self.bond_list[:, 1]\n\n # Calculate and set the precomps matrix\n self.precomps = None\n self.cn_precomps = None\n self._get_precomps()\n\n def __len__(self) -> int:\n return len(self.atoms)\n\n def calc_ce(self, orderings: np.ndarray) -> float:\n \"\"\"\n Calculates the Cohesive energy (in eV / atom) of the ordering given or of the default ordering of the NP\n\n [Cohesive Energy] = ( [precomp values of element A and B] / sqrt(12 * CN) ) / [num atoms]\n\n Args:\n orderings: The ordering of atoms within the NP; ordering key is based on Metals in alphabetical order\n\n Returns:\n Cohesive Energy (eV / atom)\n \"\"\"\n return (self.precomps[orderings[self.a1], orderings[self.a2]] / self.cn_precomps).sum() / len(self.atoms)\n\n def calc_ee(self, orderings: np.ndarray) -> float:\n \"\"\"\n Calculates the Excess energy (in eV / atom) of the ordering given or of the default ordering of the NP\n\n [Excess Energy] = [CE of NP] - sum([Pure Element NP] * [Comp of Element in NP])\n\n Args:\n orderings: The ordering of atoms within the NP; ordering key is based on Metals in alphabetical order\n\n Returns:\n Excess Energy (eV / atom)\n \"\"\"\n\n metals = np.bincount(orderings)\n\n # obtain atom fractions of each tested element\n x_i = np.zeros(len(self.metal_types)).astype(float)\n x_i[:len(metals)] = metals / metals.sum()\n\n # calculate energy of tested NP first;\n ee = self.calc_ce(orderings)\n\n # Then, subtract calculated pure NP energies multiplied by respective\n # fractions to get Excess Energy\n for ele in range(len(self.metal_types)):\n x_ele = x_i[ele]\n o_mono_x = np.ones(len(self), int) * ele\n\n ee -= self.calc_ce(o_mono_x) * x_ele\n return ee\n\n def calc_smix(self, orderings: np.ndarray) -> float:\n \"\"\"\n Uses boltzman constant, orderings, and element compositions to determine the smix of the nanoparticle\n\n Args:\n orderings: The ordering of atoms within the NP; ordering key is based on Metals in alphabetical order\n\n Returns:\n entropy of mixing (smix)\n\n \"\"\"\n\n x_i = np.bincount(orderings) / len(orderings)\n\n # drop 0s to avoid errors\n x_i = x_i[x_i != 0]\n\n kb = ase.units.kB\n\n smix = -kb * sum(x_i * np.log(x_i))\n\n return smix\n\n def calc_gmix(self, orderings: np.ndarray, T: float = 298.15) -> float:\n \"\"\"\n gmix (eV / atom) = self.ee - T * self.calc_smix(ordering)\n\n Args:\n T: Temperature of the system in Kelvin; Defaults at room temp of 25 C\n orderings: The ordering of atoms within the NP; ordering key is based on Metals in alphabetical order\n\n Returns:\n free energy of mixing (gmix)\n \"\"\"\n return self.calc_ee(orderings) - T * self.calc_smix(orderings)\n\n def metropolis(self, ordering: np.ndarray, num_steps: int = 1000) -> None:\n \"\"\"\n Metropolis-Hastings-based exploration of similar NPs\n\n Args:\n ordering: 1D chemical ordering array\n num_steps: How many steps to simulate for\n \"\"\"\n # Initialization\n # create new instance of ordering array\n ordering = ordering.copy()\n best_ordering = ordering.copy()\n best_energy = self.calc_ce(ordering)\n prev_energy = best_energy\n energy_history = np.zeros(num_steps)\n energy_history[0] = best_energy\n\n ordering_indices = np.arange(len(ordering))\n for step in range(1, num_steps):\n prev_ordering = ordering.copy()\n i, j = np.random.choice(ordering_indices, 2, replace=False)\n ordering[i], ordering[j] = ordering[j], ordering[i]\n\n # Evaluate the energy change\n energy = self.calc_ce(ordering)\n\n # Metropolis-related stuff\n ratio = energy / prev_energy\n if ratio > np.random.uniform():\n # Commit to the step\n energy_history[step] = energy\n if energy < best_energy:\n best_energy = energy\n best_ordering = ordering.copy()\n else:\n # Reject the step\n ordering = prev_ordering.copy()\n energy_history[step] = prev_energy\n\n return best_ordering, best_energy, energy_history\n\n @functools.cached_property\n def num_shells(self) -> int:\n \"\"\"\n Return number of shells in NP\n Use calc_shell_map if user did not define num_shells\n \"\"\"\n return max(self.shell_map)\n\n @functools.cached_property\n def shell_map(self) -> Dict[int, Iterable[int]]:\n \"\"\"\n Map of shell number and atom indices in shell\n\n 0: core atom(s)\n 1: shell (layer) 1 over core atom(s)\n etc.\n\n Returns:\n shell_map: dict of shell number and array of atom indices in shell\n \"\"\"\n remaining_atoms = set(range(len(self.atoms)))\n\n shell_map = {}\n cur_shell = 0\n srf = np.where(self.cn < 12)[0]\n shell_map[cur_shell] = srf\n\n remaining_atoms -= set(srf)\n\n coord_dict = {i: set(self.bond_list[self.bond_list[:, 0] == i].ravel())\n for i in remaining_atoms}\n\n while remaining_atoms:\n cur_shell -= 1\n shell = [i for i in remaining_atoms\n if coord_dict[i] - remaining_atoms]\n shell_map[cur_shell] = np.array(shell)\n remaining_atoms -= set(shell)\n\n shell_map = {k - cur_shell: v for k, v in shell_map.items()}\n return shell_map\n \n def get_info(self):\n \"\"\"\n Prints out and returns the information stored in the bcm object on how the model\n was parameterized. This can be any info that may be relevant but some good info to store\n are:\n 1. What method was used to calculate the Gamma values (e.g. NP or Dimer method)\n 2. Other info on how the gamma values were calculated (were energies from DFT (if so then what functional was used), experimental or approximated)\n 3. Information on the CE_Bulk value being used\n \n Returns:\n Info [dict]: Original info dictionary used to initialize the bcm instance\n \"\"\"\n for key in self.info:\n print(f'{key}: {self.info[key]}\\n')\n return self.info\n \n def _get_bcm_params(self) -> None:\n \"\"\"\n Creates gamma and ce_bulk dictionaries which are then used\n to created precomputed values for the BCM calculation\n\n Sets:\n gamma: Weighting factors of the computed elements within the BCM\n ce_bulk: Bulk Cohesive energy values\n \"\"\"\n gammas = {}\n ce_bulk = {}\n for item in itertools.combinations_with_replacement(self.metal_types, 2):\n # Casting metals and setting keys for dictionary\n metal_1, metal_2 = item\n\n gamma_obj = GammaValues(metal_1, metal_2)\n\n # using Update function to create clean Gamma an bulk dictionaries\n gammas = recursive_update(gammas, gamma_obj.gamma)\n # add ce_bulk vals\n ce_bulk[gamma_obj.element_a] = gamma_obj.ce_a\n ce_bulk[gamma_obj.element_b] = gamma_obj.ce_b\n\n self.ce_bulk = ce_bulk\n self.gammas = gammas\n\n def _get_precomps(self) -> None:\n \"\"\"\n Uses the Gamma and ce_bulk dictionaries to create a precomputed\n BCM matrix of gammas and ce_bulk values\n\n [precomps] = [gamma of element 1] * [ce_bulk of element 1 to element 2]\n\n Sets:\n precomps: Precomp Matrix\n \"\"\"\n # precompute values for BCM calc\n n_met = len(self.metal_types)\n\n precomps = np.ones((n_met, n_met))\n\n for i in range(n_met):\n for j in range(n_met):\n\n M1 = self.metal_types[i]\n M2 = self.metal_types[j]\n precomp_bulk = self.ce_bulk[M1]\n precomp_gamma = self.gammas[M1][M2]\n\n precomps[i, j] = precomp_gamma * precomp_bulk\n self.precomps = precomps\n self.cn_precomps = np.sqrt(self.cn * 12)[self.a1]\n"
] | [
[
"numpy.log",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.close",
"matplotlib.ticker.FormatStrFormatter"
],
[
"numpy.zeros"
],
[
"numpy.log",
"numpy.sqrt",
"numpy.random.choice",
"numpy.ones",
"numpy.bincount",
"numpy.random.uniform",
"numpy.array",
"numpy.where",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aerdem4/cuml | [
"088763cda9fd5e363af092b1d05c155f256cf0d7"
] | [
"python/cuml/benchmark/datagen.py"
] | [
"# Copyright (c) 2019, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Data generators for cuML benchmarks\n\nThe main entry point for consumers is gen_data, which\nwraps the underlying data generators.\n\nNotes when writing new generators:\n\nEach generator is a function that accepts:\n * n_samples (set to 0 for 'default')\n * n_features (set to 0 for 'default')\n * random_state\n * (and optional generator-specific parameters)\n\nThe function should return a 2-tuple (X, y), where X is a Pandas\ndataframe and y is a Pandas series. If the generator does not produce\nlabels, it can return (X, None)\n\nA set of helper functions (convert_*) can convert these to alternative\nformats. Future revisions may support generating cudf dataframes or\nGPU arrays directly instead.\n\n\"\"\"\n\nimport cudf\nimport gzip\nimport functools\nimport numpy as np\nimport os\nimport pandas as pd\n\nimport cuml.datasets\nimport sklearn.model_selection\n\nfrom urllib.request import urlretrieve\nfrom cuml.common import input_utils\nfrom numba import cuda\n\n\ndef _gen_data_regression(n_samples, n_features, random_state=42):\n \"\"\"Wrapper for sklearn make_regression\"\"\"\n if n_samples == 0:\n n_samples = int(1e6)\n if n_features == 0:\n n_features = 100\n X_arr, y_arr = cuml.datasets.make_regression(\n n_samples=n_samples, n_features=n_features, random_state=random_state)\n return cudf.DataFrame(X_arr), cudf.Series(y_arr)\n\n\ndef _gen_data_blobs(n_samples, n_features, random_state=42, centers=None):\n \"\"\"Wrapper for sklearn make_blobs\"\"\"\n if n_samples == 0:\n n_samples = int(1e6)\n if n_features == 0:\n n_samples = 100\n X_arr, y_arr = cuml.datasets.make_blobs(\n n_samples=n_samples, n_features=n_features, centers=centers,\n random_state=random_state)\n print(type(X_arr), type(y_arr))\n return (\n cudf.DataFrame(X_arr.astype(np.float32)),\n cudf.Series(y_arr.astype(np.float32)),\n )\n\n\ndef _gen_data_zeros(n_samples, n_features, random_state=42):\n \"\"\"Dummy generator for use in testing - returns all 0s\"\"\"\n return (\n cudf.DataFrame(np.zeros((n_samples, n_features), dtype=np.float32)),\n cudf.Series(np.zeros(n_samples, dtype=np.float32)),\n )\n\n\ndef _gen_data_classification(\n n_samples, n_features, random_state=42, n_classes=2\n):\n \"\"\"Wrapper for sklearn make_blobs\"\"\"\n if n_samples == 0:\n n_samples = int(1e6)\n if n_features == 0:\n n_samples = 100\n\n X_arr, y_arr = cuml.datasets.make_classification(\n n_samples=n_samples, n_features=n_features, n_classes=n_classes,\n random_state=random_state)\n\n return (\n cudf.DataFrame(X_arr.astype(np.float32)),\n cudf.Series(y_arr.astype(np.float32)),\n )\n\n\ndef _gen_data_higgs(n_samples=None, n_features=None, random_state=42):\n \"\"\"Wrapper returning Higgs in Pandas format\"\"\"\n X_df, y_df = load_higgs()\n if n_samples == 0:\n n_samples = X_df.shape[0]\n if n_features == 0:\n n_features = X_df.shape[1]\n if n_features > X_df.shape[1]:\n raise ValueError(\n \"Higgs dataset has only %d features, cannot support %d\"\n % (X_df.shape[1], n_features)\n )\n if n_samples > X_df.shape[0]:\n raise ValueError(\n \"Higgs dataset has only %d rows, cannot support %d\"\n % (X_df.shape[0], n_samples)\n )\n return X_df.iloc[:n_samples, :n_features], y_df.iloc[:n_samples]\n\n\ndef _download_and_cache(url, compressed_filepath, decompressed_filepath):\n if not os.path.isfile(compressed_filepath):\n urlretrieve(url, compressed_filepath)\n if not os.path.isfile(decompressed_filepath):\n cf = gzip.GzipFile(compressed_filepath)\n with open(decompressed_filepath, 'wb') as df:\n df.write(cf.read())\n return decompressed_filepath\n\n\n# Default location to cache datasets\nDATASETS_DIRECTORY = '.'\n\n\ndef load_higgs():\n \"\"\"Returns the Higgs Boson dataset as an X, y tuple of dataframes.\"\"\"\n higgs_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz' # noqa\n decompressed_filepath = _download_and_cache(\n higgs_url,\n os.path.join(DATASETS_DIRECTORY, \"HIGGS.csv.gz\"),\n os.path.join(DATASETS_DIRECTORY, \"HIGGS.csv\"),\n )\n col_names = ['label'] + [\n \"col-{}\".format(i) for i in range(2, 30)\n ] # Assign column names\n dtypes_ls = [np.int32] + [\n np.float32 for _ in range(2, 30)\n ] # Assign dtypes to each column\n data_df = pd.read_csv(\n decompressed_filepath, names=col_names,\n dtype={k: v for k, v in zip(col_names, dtypes_ls)}\n )\n X_df = data_df[data_df.columns.difference(['label'])]\n y_df = data_df['label']\n return cudf.DataFrame.from_pandas(X_df), cudf.Series.from_pandas(y_df)\n\n\ndef _convert_to_numpy(data):\n \"\"\"Returns tuple data with all elements converted to numpy ndarrays\"\"\"\n if data is None:\n return None\n elif isinstance(data, tuple):\n return tuple([_convert_to_numpy(d) for d in data])\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, cudf.DataFrame):\n return data.as_matrix()\n elif isinstance(data, cudf.Series):\n return data.to_array()\n elif isinstance(data, (pd.DataFrame, pd.Series)):\n return data.to_numpy()\n else:\n raise Exception(\"Unsupported type %s\" % str(type(data)))\n\n\ndef _convert_to_cudf(data):\n if data is None:\n return None\n elif isinstance(data, tuple):\n return tuple([_convert_to_cudf(d) for d in data])\n elif isinstance(data, (cudf.DataFrame, cudf.Series)):\n return data\n elif isinstance(data, pd.DataFrame):\n return cudf.DataFrame.from_pandas(data)\n elif isinstance(data, pd.Series):\n return cudf.Series.from_pandas(data)\n else:\n raise Exception(\"Unsupported type %s\" % str(type(data)))\n\n\ndef _convert_to_pandas(data):\n if data is None:\n return None\n elif isinstance(data, tuple):\n return tuple([_convert_to_pandas(d) for d in data])\n elif isinstance(data, (pd.DataFrame, pd.Series)):\n return data\n elif isinstance(data, (cudf.DataFrame, cudf.Series)):\n return data.to_pandas()\n else:\n raise Exception(\"Unsupported type %s\" % str(type(data)))\n\n\ndef _convert_to_gpuarray(data, order='F'):\n if data is None:\n return None\n elif isinstance(data, tuple):\n return tuple([_convert_to_gpuarray(d, order=order) for d in data])\n elif isinstance(data, pd.DataFrame):\n return _convert_to_gpuarray(cudf.DataFrame.from_pandas(data),\n order=order)\n elif isinstance(data, pd.Series):\n gs = cudf.Series.from_pandas(data)\n return cuda.as_cuda_array(gs)\n else:\n return input_utils.input_to_dev_array(data, order=order)[0]\n\n\ndef _convert_to_gpuarray_c(data):\n return _convert_to_gpuarray(data, order='C')\n\n\n_data_generators = {\n 'blobs': _gen_data_blobs,\n 'zeros': _gen_data_zeros,\n 'classification': _gen_data_classification,\n 'regression': _gen_data_regression,\n 'higgs': _gen_data_higgs,\n}\n_data_converters = {\n 'numpy': _convert_to_numpy,\n 'cudf': _convert_to_cudf,\n 'pandas': _convert_to_pandas,\n 'gpuarray': _convert_to_gpuarray,\n 'gpuarray-c': _convert_to_gpuarray_c,\n}\n\n\ndef all_datasets():\n return _data_generators\n\n\[email protected]_cache(maxsize=8)\ndef gen_data(\n dataset_name,\n dataset_format,\n n_samples=0,\n n_features=0,\n random_state=42,\n test_fraction=0.0,\n **kwargs\n):\n \"\"\"Returns a tuple of data from the specified generator.\n\n Output\n -------\n (train_features, train_labels, test_features, test_labels) tuple\n containing matrices or dataframes of the requested format.\n test_features and test_labels may be None if no splitting was done.\n\n Parameters\n ----------\n dataset_name : str\n Dataset to use. Can be a synthetic generator (blobs or regression)\n or a specified dataset (higgs currently, others coming soon)\n\n dataset_format : str\n Type of data to return. (One of cudf, numpy, pandas, gpuarray)\n\n n_samples : int\n Number of samples to include in training set (regardless of test split)\n test_fraction : float\n Fraction of the dataset to partition randomly into the test set.\n If this is 0.0, no test set will be created.\n \"\"\"\n data = _data_generators[dataset_name](\n int(n_samples / (1 - test_fraction)),\n n_features,\n random_state,\n **kwargs\n )\n if test_fraction != 0.0:\n if n_samples == 0:\n n_samples = int(data[0].shape[0] * (1 - test_fraction))\n X_train, X_test, y_train, y_test = tuple(\n sklearn.model_selection.train_test_split(\n *data, train_size=n_samples, random_state=random_state\n )\n )\n data = (X_train, y_train, X_test, y_test)\n else:\n data = (*data, None, None) # No test set\n\n data = _data_converters[dataset_format](data)\n return data\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rist-ro/argo | [
"a10c33346803239db8a64c104db7f22ec4e05bef",
"a10c33346803239db8a64c104db7f22ec4e05bef",
"a10c33346803239db8a64c104db7f22ec4e05bef",
"a10c33346803239db8a64c104db7f22ec4e05bef"
] | [
"word_embeddings/test/core/readers.py",
"datasets/BTSC.py",
"argo/core/hooks/ImportanceSamplingHook.py",
"argo/core/TFDeepLearningModel.py"
] | [
"import numpy as np\nimport operator, os, itertools\nfrom abc import ABC, abstractmethod\nimport numexpr as ne\nne.set_num_threads(20)\n\ndef rmtxt(s):\n if s.endswith(\".txt\"):\n s=os.path.splitext(s)[0]\n return s\n\ndef get_reader(inputfilename):\n basename=os.path.basename(inputfilename)\n reader=None\n if basename.startswith('glove'):\n reader=GloVeEmbeddingsFileReader()\n elif basename.startswith('word2vec'):\n reader=Word2vecEmbeddingsFileReader()\n else:\n raise RuntimeError('the inputfilename \\'%s\\'does not start with either glove or word2vec so I do not know how to read the word embeddings'%basename)\n \n return reader\n\ndef read_selected_words(inputname):\n with open(inputname, 'r') as fin:\n words = [line.rstrip().split()[0] for line in fin.readlines()]\n return words\n\n\nclass EmbeddingsFileReader(ABC):\n \n @abstractmethod\n def preprocess(self, fin):\n \"\"\" what to do to the newly opened text file as preprocessing \"\"\"\n pass\n \n @abstractmethod\n def tuple_from_params(self, parameters):\n pass\n\n def read_dictionary(self, inputname):\n \"\"\" read the dictionary from inputfile \"\"\"\n with open(inputname, 'r') as fin:\n self.preprocess(fin)\n words = [line.rstrip().split(' ')[0] for line in fin.readlines()]\n return self.dicts_from_wordslist(words)\n\n def read_word_counts(self, inputname):\n \"\"\" read the word counts from inputfile \"\"\"\n with open(inputname, 'r') as fin:\n self.preprocess(fin)\n counts = [int(line.rstrip().split(' ')[1]) for line in fin.readlines()]\n return counts\n\n def dicts_from_wordslist(self, words):\n dictionary_size = len(words)\n dictionary = {w: idx for idx, w in enumerate(words)}\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return (dictionary_size, dictionary, reversed_dictionary)\n \n def unpack_split_line(self, line, vecsize, onlyu):\n arr=line.rstrip().split(' ')\n word=arr[0]\n parameters = np.array(arr[1:], dtype=np.float)\n \n return list(itertools.chain([word], self.tuple_from_params(parameters, vecsize, onlyu)))\n \n def read_embeddings(self, inputname, vecsize, consideronlyfirstvec, words_set=None):\n \"\"\" read the embeddings from inputfile \"\"\"\n with open(inputname, 'r') as fin:\n self.preprocess(fin)\n words, u_embeddings, v_embeddings = self.get_embeddings_fromfile(fin, vecsize, consideronlyfirstvec, words_set)\n \n dictionary_size, dictionary, reversed_dictionary = self.dicts_from_wordslist(words)\n \n # u_biases and v_biases are not returned at the moment since we do not know what to do with them\n return (dictionary_size, dictionary, reversed_dictionary, u_embeddings, v_embeddings)\n\n\nclass GloVeEmbeddingsFileReader(EmbeddingsFileReader):\n \n def preprocess(self, fin):\n pass\n \n def tuple_from_params(self, parameters, vecsize, onlyu):\n \n l=len(parameters)\n if l!=vecsize and l!=2*vecsize+2:\n raise ValueError(\"the vecsize passed is not compatible with the observation of line lenghts in the inputfile: line length = %s\"%l)\n \n u_w = parameters[:vecsize]\n if onlyu:\n bias_u = None\n v_w = None\n bias_v = None\n else:\n bias_u = parameters[vecsize]\n v_w = parameters[vecsize+1:-1]\n bias_v = parameters[-1]\n \n return (u_w,bias_u,v_w,bias_v)\n \n def get_embeddings_fromfile(self, filestream, vecsize, consideronlyfirstvec, words_set=None):\n words, u_embeddings, u_biases, v_embeddings, v_biases = \\\n zip(*[self.unpack_split_line(line, vecsize, consideronlyfirstvec) \\\n for line in filestream.readlines()])\n if words_set:\n words, u_embeddings, u_biases, v_embeddings, v_biases = zip(*[(w,uw,bu,vw,bv) for w,uw,bu,vw,bv in zip(words,u_embeddings,u_biases,v_embeddings,v_biases) if w in words_set])\n u_embeddings = np.array(u_embeddings)\n u_biases = np.array(u_biases)\n v_embeddings = np.array(v_embeddings)\n v_biases = np.array(v_biases)\n return (words, u_embeddings, v_embeddings)\n \n\nclass Word2vecEmbeddingsFileReader(EmbeddingsFileReader):\n \n def preprocess(self, fin):\n \"\"\" here I need to skip the header and the first word if it is <\\s> - (what is this tag that word2vec introduces?) \"\"\"\n fin.readline()\n word = fin.readline().split(' ')[0]\n if not word=='</s>':\n fin.seek(0)\n fin.readline()\n \n def tuple_from_params(self, parameters, vecsize, onlyu):\n l=len(parameters)\n if l!=vecsize and l!=2*vecsize:\n raise ValueError(\"the vecsize passed is not compatible with the observation of line lenghts in the inputfile: line length = %s\"%l)\n \n u_w = parameters[:vecsize]\n if onlyu:\n v_w = None\n else:\n v_w = parameters[vecsize:]\n \n return (u_w,v_w)\n \n def get_embeddings_fromfile(self, filestream, vecsize, consideronlyfirstvec, words_set=None):\n words, u_embeddings, v_embeddings = zip(*[self.unpack_split_line(line, vecsize, consideronlyfirstvec) for line in filestream.readlines()])\n if words_set:\n words, u_embeddings, v_embeddings = zip(*[(w,uw,vw) for w,uw,vw in zip(words,u_embeddings,v_embeddings) if w in words_set])\n u_embeddings = np.array(u_embeddings)\n v_embeddings = np.array(v_embeddings)\n return (words, u_embeddings, v_embeddings)\n \n\ndef extract_vocabulary_from(vocabfile):\n with open(vocabfile, 'r') as fin:\n vocab_words = [line.rstrip().split(' ')[0] for line in fin.readlines()]\n vocab_words=set(vocab_words)\n\n #\n #\n # def __init__(self, dictionary, howmany=10, amonghowmany=None):\n # self.dictionary=dictionary\n # self.dictionary_size=len(dictionary)\n # self.howmany=howmany\n # self.amonghowmany=amonghowmany\n # self.x_0 = np.sqrt(np.ones(self.dictionary_size)/self.dictionary_size)\n #\n # def word_analogy_measures(self, u_a, u_b, u_d, u_embeddings, v_embeddings, space=\"euclidean\"):\n # \"\"\" which vector uc_star in u_embeddings is the one with the highest analogy_measure? \"\"\"\n # if space==\"euclidean\":\n # analogy_measure=self.analogy_measure_euclidean\n # elif space==\"sphere_in_0\":\n # analogy_measure=self.analogy_measure_on_the_sphere_in0\n # elif space==\"sphere_in_a\":\n # analogy_measure=self.analogy_measure_on_the_sphere_ina\n # elif space==\"sphere_logmap\":\n # #follows the logmaps and query the nearest one\n # analogy_measure=self.analogy_measure_on_the_sphere_logmap\n # else:\n # raise ValueError(\"Unrecognized space argument in find_closest function. space was %s\"%space)\n #\n # uc_star = sorted([(i,analogy_measure(u_a, u_b, uc, u_d, v_embeddings)) for (i,uc) in enumerate(u_embeddings[:self.amonghowmany])], key=operator.itemgetter(1))[:self.howmany]\n # return uc_star\n\n # def analogy_nearby(self, word_a, word_b, word_d, u_embeddings, v_embeddings, space=\"euclidean\"):\n # \"\"\"given three words a,b,d I want to find c such that a:b=c:d.\"\"\"\n # try:\n # a=self.dictionary[word_a]\n # b=self.dictionary[word_b]\n # d=self.dictionary[word_d]\n # except KeyError as kerr:\n # print(\"\\nKey Error: {0}\".format(kerr))\n # print(\"The word requested is not present in the dictionary.\\n\")\n # sys.exit(-1)\n #\n # u_a, u_b, u_d = u_embeddings[a], u_embeddings[b], u_embeddings[d]\n #\n # #iam is indexes and analogy_measures ordered by analogy measures. list of (i, measure)\n # iam = self.word_analogy_measures(u_a, u_b, u_d, u_embeddings, v_embeddings, space)\n #\n # return iam\n #\n # #DEPRECATED here just for backward compatibility test\n #\n # def analogy_nearby_sphere_closest(self, word_a, word_b, word_d, u_embeddings, v_embeddings):\n # \"\"\"given three words a,b,d I want to find c such that a:b=c:d.\"\"\"\n # try:\n # a=self.dictionary[word_a]\n # b=self.dictionary[word_b]\n # d=self.dictionary[word_d]\n # except KeyError as kerr:\n # print(\"\\nKey Error: {0}\".format(kerr))\n # print(\"The word requested is not present in the dictionary.\\n\")\n # sys.exit(-1)\n #\n # x_target = self.follow_logmap_on_the_sphere(u_embeddings[a], u_embeddings[b], u_embeddings[d], v_embeddings)\n # x_embeddings = [send_u_to_x_on_the_sphere(u, v_embeddings) for u in u_embeddings]\n # ans = self.find_closest_euclidean(x_target, x_embeddings)\n # return ans\n #\n #\n\ndef print_array(arr):\n mw = max(len(w) for w,d in arr)\n for (w,d) in arr:\n print(\" \"+\"\\t\".join((w.ljust(mw),str(d))))\n\n\ndef write_hdf(x, table_name='embeddings', outputname=\"table_test.hdf\"):\n with tables.open_file(outputname, 'w') as f:\n atom = tables.Atom.from_dtype(x.dtype)\n\n vec_size = 300\n array_c = f.create_earray(f.root, table_name, atom, (0, vec_size))\n\n chunk_size = 500\n for i in range(0, 70000, chunk_size):\n f.root.embeddings.append(x[i: i + chunk_size])\n\n\ndef read_hdf(filename, table_name='embeddings'):\n with tables.open_file(filename) as f:\n # print(f.root.embeddings)\n x = f.root[table_name][:, :]\n\n return x\n",
"\"\"\"\nModule for managing Belgium Traffic Signs Classification dataset\n\"\"\"\n\nfrom .ImageDataset import ImageDataset\n\nimport h5py\nimport sklearn\nimport sklearn.model_selection\nimport os\nimport pandas as pd\nimport skimage.data\nimport skimage.transform\nfrom skimage import io, color, exposure, transform\nimport glob\nimport urllib\nfrom .utils import normalize, min_max_data_np\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport pdb\n\nNUM_CLASSES = 62\nIMG_SIZE = 32\n\n\nclass BTSC(ImageDataset):\n \"\"\"\n This class manage the dataset BTSC, properties of the datasets are uniquely determined\n by the params dictionary\n\n \"\"\"\n\n default_params = {\n # \"something\" : param\n }\n\n def __init__(self, params):\n super().__init__(params)\n\n self._id = self.dataset_id(params)\n\n default_data_dir = '/ssd_data/datasets/BTSC'\n\n self.data_dir = self._params['data_dir'] if 'data_dir' in params else default_data_dir\n\n self._train_set_x, self._train_set_y, \\\n self._validation_set_x, self._validation_set_y, \\\n self._test_set_x, self._test_set_y = self.load_data(self.data_dir)\n\n\n @staticmethod\n def dataset_id(params):\n \"\"\"\n This method interprets the parameters and generate an id\n \"\"\"\n\n BTSC.check_params_impl(params)\n\n id = 'BTSC'\n\n return id\n\n @staticmethod\n def load_data(btsc_dir):\n\n try:\n os.stat(btsc_dir)\n except:\n os.mkdir(btsc_dir)\n\n X_train, Y_train, X_val, Y_val = load_train(os.path.join(btsc_dir, 'Training'))\n \n random_indices_train = np.random.RandomState(seed=8).permutation(X_train.shape[0])\n X_train = X_train[random_indices_train]; Y_train = Y_train[random_indices_train]\n \n random_indices_val = np.random.RandomState(seed=9).permutation(X_val.shape[0])\n X_val = X_val[random_indices_val]; Y_val = Y_val[random_indices_val]\n \n X_test, Y_test = load_test(os.path.join(btsc_dir, 'Testing'))\n random_indices_test = np.random.RandomState(seed=10).permutation(X_test.shape[0])\n X_test = X_test[random_indices_test]; Y_test = Y_test[random_indices_test]\n \n # normalize data consistently (in case they would not already be)\n all_min, all_max = min_max_data_np([X_train, X_val, X_test])\n X_train = normalize(X_train, all_min, all_max)\n X_val = normalize(X_val, all_min, all_max)\n X_test = normalize(X_test, all_min, all_max)\n \n# make_hist_of_classes(Y_train, \"Train\")\n# make_hist_of_classes(Y_val, \"Validation\")\n# make_hist_of_classes(Y_test, \"Test\")\n\n return X_train, Y_train, X_val, Y_val, X_test, Y_test\n\n\ndef preprocess_img(img):\n # Histogram normalization in y\n hsv = color.rgb2hsv(img)\n hsv[:,:,2] = exposure.equalize_hist(hsv[:,:,2])\n img = color.hsv2rgb(hsv)\n return img\n\ndef track_no(path): # returns the track number (as a string of 5 chars) of a given .ppm path \n return path[-15:-10]\n\ndef five_char(n): # returns a string of 5 char corresponding to n; e.g. 5->'00005', 23->'00023' \n if(n < 0 or n >= 10**5):\n raise ValueError(\"The number should be between 0 and 99999\")\n elif(n == 0):\n return \"00000\"\n else:\n no_digits = int(np.log10(n)) + 1\n no_zeros = 5 - no_digits\n return '0' * no_zeros + str(n)\n\n\ndef load_train(data_dir):\n h5filename = os.path.join(data_dir, 'BTSC_Train_and_Validation_correct-split' + str(IMG_SIZE) + '.h5')\n \n try:\n with h5py.File(h5filename, 'r') as hf:\n X_train, Y_train, X_val, Y_val = hf['train_imgs'][:], hf['train_labels'][:], hf['val_imgs'][:], hf['val_labels'][:]\n \n print(\"Loaded images from {:}\".format(h5filename))\n\n except (IOError, OSError, KeyError):\n print(\"Error in reading {:}. Processing all images...\".format(h5filename))\n img_root_dir = data_dir\n train_imgs = []\n train_labels = []\n val_imgs = [] \n val_labels = []\n \n# track_val = 0\n# track_total = 0\n# for i in range(NUM_CLASSES): \n# class_img_paths = glob.glob(os.path.join(img_root_dir, five_char(i) + '/*.ppm'))\n \n# tracknr = []\n# for img in class_img_paths:\n# tracknr.append(track_no(img))\n# print(\"Class \" + str(i) + \" has \" + str(len(class_img_paths)) + \" images and \" + str(len(set(tracknr))) + \" tracks\")\n \n# track_total += len(set(tracknr))\n# track_val += max(1, int(len(set(tracknr))*0.2))\n# print(\"No validation tracks: \", track_val, \" Total no: \", track_total)\n# import pdb;pdb.set_trace()\n\n np.random.seed(42)\n for cl in range(NUM_CLASSES): \n current_dir = img_root_dir + \"/\" + five_char(cl) + \"/\"\n class_img_paths = sorted(glob.glob(os.path.join(current_dir, '*.ppm'))) \n \n class_tracks = []\n for img_path in class_img_paths:\n class_tracks.append(int(track_no(img_path)))\n \n unique_tracks = np.unique(class_tracks)\n no_tracks_val = max(1, int(len(unique_tracks)*0.2))\n tracks_val = np.random.choice(unique_tracks, no_tracks_val, replace = False)\n \n print(\"Class \", str(cl), ', No tracks: ', len(unique_tracks), ', Val tracks: ', tracks_val)\n \n np.random.shuffle(class_img_paths)\n \n class_val_paths = [p for p in class_img_paths if int(track_no(p)) in tracks_val]\n class_train_paths = [p for p in class_img_paths if int(track_no(p)) not in tracks_val]\n \n for img_path in class_train_paths:\n try:\n img = preprocess_img(io.imread(img_path))\n img = skimage.transform.resize(img, (IMG_SIZE, IMG_SIZE), mode='constant')\n train_imgs.append(img)\n train_labels.append(cl) \n except (IOError, OSError):\n print('missed', img_path)\n pass\n \n for img_path in class_val_paths:\n try:\n img = preprocess_img(io.imread(img_path))\n img = skimage.transform.resize(img, (IMG_SIZE, IMG_SIZE), mode='constant')\n val_imgs.append(img)\n val_labels.append(cl) \n except (IOError, OSError):\n print('missed', img_path)\n pass \n \n X_train = np.array(train_imgs, dtype='float32') \n Y_train = np.array(train_labels, dtype='int32')\n X_val = np.array(val_imgs, dtype='float32')\n Y_val = np.array(val_labels, dtype='int32')\n \n with h5py.File(h5filename, 'w') as hf:\n hf.create_dataset('train_imgs', data=X_train)\n hf.create_dataset('train_labels', data=Y_train)\n hf.create_dataset('val_imgs', data=X_val)\n hf.create_dataset('val_labels', data=Y_val)\n\n return X_train, Y_train, X_val, Y_val\n\n\n\ndef load_test(data_dir): #normally data_dir='/ssd_data/datasets/BTSC + Training/Testing'\n \"\"\"Loads a data set and returns two lists:\n\n images: a list of Numpy arrays, each representing an image.\n labels: a list of numbers that represent the images labels.\n \"\"\"\n\n # Get all subdirectories of data_dir. Each represents a label.\n\n directories = [d for d in os.listdir(data_dir)\n if os.path.isdir(os.path.join(data_dir, d))]\n # Loop through the label directories and collect the data in\n # two lists, labels and images.\n labels = []\n images = []\n for d in directories:\n label_dir = os.path.join(data_dir, d)\n file_names = [os.path.join(label_dir, f)\n for f in os.listdir(label_dir) if f.endswith(\".ppm\")]\n # For each label, load it's images and add them to the images list.\n # And add the label number (i.e. directory name) to the labels list.\n for f in file_names:\n img_np = io.imread(f)\n images.append(preprocess_img(io.imread(f)))\n labels.append(int(d))\n\n images_newsize = [skimage.transform.resize(image, (IMG_SIZE, IMG_SIZE), mode='constant')\n for image in images]\n images_newsize = np.asarray(images_newsize, dtype = np.float32)\n labels = np.asarray(labels, dtype = np.int32)\n\n return images_newsize, labels\n\n\n# #TODO check me (Riccardo: do not delete, first check!)\n# import ipdb;ipdb.set_trace()\n# import ipdb;ipdb.set_trace()\n# shuffled_images32, shuffled_labels = zip(*np.random.shuffle(list(zip(images32, labels))))\n\n# return shuffled_images32, shuffled_labels\n\n\n\n\ndef make_hist_of_classes(y, text):\n plt.hist(y, bins = np.arange(NUM_CLASSES) + 1, density = True, color = 'm')\n plt.title(\"Distribution of Classes on \" + text)\n plt.xlabel('Class')\n plt.ylabel('Percentage')\n plt.savefig(\"/data1/temp/\" + text + str(len(y)) + str(np.random.randint(0, 100)) + \".png\")\n plt.close()\n\n# def load_test(btsc_dir='/ssd_data/datasets/BTSC'):\n# h5filename = os.path.join(btsc_dir, 'BTSC_Test.h5')\n\n# try:\n# with h5py.File(h5filename, 'r') as hf:\n# X, Y = hf['imgs'][:], hf['labels'][:]\n# print(\"Loaded images from {:}\".format(h5filename))\n\n# except (IOError, OSError, KeyError):\n# print(\"Error in reading {:}. Processing all images...\".format(h5filename))\n\n# img_root_dir = os.path.join(btsc_dir, 'Final_Test/Images/')\n# csvfilename = os.path.join(img_root_dir, 'GT-final_test.csv')\n\n# test = pd.read_csv(csvfilename, sep=';')\n\n# # Load test dataset\n# X = []\n# Y = []\n# for file_name, class_id in zip(list(test['Filename']), list(test['ClassId'])):\n# img_path = os.path.join(img_root_dir, file_name)\n# X.append(preprocess_img(io.imread(img_path)))\n# Y.append(class_id)\n\n# X = np.array(X, dtype='float32')\n# Y = np.array(Y, dtype='int32')\n\n# with h5py.File(h5filename, 'w') as hf:\n# hf.create_dataset('imgs', data=X)\n# hf.create_dataset('labels', data=Y)\n\n# return X, Y\n",
"import tensorflow as tf\n\n# from tensorflow import logging as tf_logging\nfrom ..argoLogging import get_logger\n\ntf_logging = get_logger()\n\nimport numpy as np\nfrom scipy import special as spl\n\nfrom ..utils.argo_utils import create_reset_metric\n# from datasets.Dataset import check_dataset_keys_not_loop\n\nfrom .EveryNEpochsTFModelHook import EveryNEpochsTFModelHook\n\nfrom datasets.Dataset import TRAIN, VALIDATION\n\n\nclass ImportanceSamplingHook(EveryNEpochsTFModelHook):\n def __init__(self,\n model,\n datasets_keys,\n period,\n time_reference,\n tensors_to_average,\n n_samples,\n batch_size,\n repetitions,\n dirName,\n dataset_keys=[TRAIN, VALIDATION],\n plot_offset=0,\n extra_feed_dict={}\n ):\n\n dirName = dirName + '/importance_sampling'\n\n super().__init__(model,\n period,\n time_reference,\n dataset_keys,\n dirName=dirName,\n plot_offset=plot_offset,\n extra_feed_dict=extra_feed_dict)\n\n self._n_samples = n_samples\n self._batch_size = batch_size\n self._repetitions = repetitions\n self._label_n_samples = \"_\".join([str(i) for i in self._n_samples])\n\n self._tensors_to_average = tensors_to_average\n\n tf_logging.info(\"Create ImportanceSampling for %s samples %d repetitions \" % (\n \" \".join([str(i) for i in self._n_samples]), self._repetitions))\n\n # nodes to be computed and saved\n names = [\"is-n\" + str(n_samples) + \"-r\" + str(rep) for n_samples in self._n_samples for rep in\n range(self._repetitions)]\n\n fileName = \"importance_sampling-n\" + \"_\".join([str(i) for i in n_samples])\n self._tensors_names = [[names]]\n self._tensors_plots = [[{\n 'fileName': fileName,\n 'logscale-y': 0}]]\n self._tensors_values = {}\n self._fileName = \"importance_sampling\" + \"_\" + self._label_n_samples\n\n \"\"\"\n Hook for importance sampling estimation\n \"\"\"\n\n def _begin_once(self):\n\n self._mean_values = {}\n self._mean_update_ops = {}\n self._mean_reset_ops = {}\n\n for dataset_str in self._datasets_keys:\n self._mean_values[dataset_str], self._mean_update_ops[dataset_str], self._mean_reset_ops[dataset_str] = zip(\n *[create_reset_metric(tf.metrics.mean, scope=dataset_str + \"_mean_reset_metric/\" + tnsr.name + \"/\" + self._label_n_samples,\n values=tnsr) for tnsr in self._tensors_to_average])\n\n def do_when_triggered(self, run_context, run_values):\n tf_logging.info(\"trigger for ImportanceSampling\")\n\n for dataset_str in self._datasets_keys:\n np_values = []\n for n in self._n_samples:\n\n for i in range(self._repetitions):\n mean_value = 0\n\n imp_sampling = None\n to_be_done = n\n for j in range(int(np.ceil(n / self._batch_size))):\n\n k = min(to_be_done, self._batch_size)\n to_be_done = to_be_done - k\n\n init_ops = (self._ds_initializers[dataset_str])\n run_context.session.run(init_ops)\n imp_sampling_ds = None\n while True:\n try:\n imp_sampling_batch = run_context.session.run(self._tensors_to_average,\n feed_dict={\n self._model.n_z_samples: k,\n self._ds_handle:\n self._ds_handles[\n dataset_str],\n\n **self._extra_feed_dict}\n )\n if imp_sampling_ds is None:\n imp_sampling_ds = imp_sampling_batch\n else:\n imp_sampling_ds = np.concatenate([imp_sampling_ds, imp_sampling_batch], axis=1)\n except tf.errors.OutOfRangeError:\n break\n # now I have a complete pass over the dataset, for the jth chunk\n if imp_sampling is None:\n imp_sampling = imp_sampling_ds\n else:\n imp_sampling = np.concatenate([imp_sampling, imp_sampling_ds], axis=0)\n\n imp_sampling_group_chunks = spl.logsumexp(imp_sampling, axis=0)\n imp_sampling_group_chunks -= np.log(n)\n mean_value = np.mean(imp_sampling_group_chunks)\n\n np_values.append(mean_value)\n\n self._tensors_values[dataset_str] = [[np_values]]\n\n self.log_to_file_and_screen()\n",
"import os\nfrom abc import abstractmethod\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom datasets.Dataset import Dataset, TRAIN_LOOP\nfrom .ArgoLauncher import ArgoLauncher\nfrom .DeepLearningModel import DeepLearningModel\nfrom .hooks.ArgoHook import STEPS, EPOCHS\nfrom .hooks.LoggingMeanTensorsHook import LoggingMeanTensorsHook\nfrom .hooks.CheckpointSaverHook import CheckpointSaverHook\nfrom .hooks.ImagesInputHook import ImagesInputHook\nfrom .hooks.FisherMatrixHook import FisherMatrixHook\n\nfrom .Regularizers import Regularizers\n\nfrom .optimizers.NaturalGradientOptimizer import NaturalGradientOptimizer\n\nfrom .argoLogging import get_logger\nfrom .utils.argo_utils import AC_REGULARIZATION, load_class, load_module, get_clipping_id, eval_method_from_tuple, \\\n NUMTOL, CUSTOM_REGULARIZATION\n\nfrom itertools import chain\n\nimport importlib\n\nimport pdb\n\ntf_logging = get_logger()\n\nfrom .optimizers.TFOptimizers import TFOptimizers\n\ndef load_model(conf_file, global_step=None, dataset=None, gpu=0, seed=0, model_class_base_path='', monitorSession=True):\n \"\"\"Load a TFDeepLearningModel and optionally save its network\n\n Args:\n conf_file (str): the conf file of the model where to find the experiment.\n dataset (datasets.Dataset): (optional) the argo Dataset of the model for the training. If not passed it will be reloaded.\n global_step (int): the global step to load the checkpoint (if None the last checkpoint present will be loaded).\n gpu (int) : the gpu on which the model will create the session\n seed (int) : the seed that the model will set\n model_class_base_path (str): the base path where to look for the model class\n\n Returns:\n TFDeepLearningModel: The loaded Argo TFDeepLearningModel.\n datasets.Dataset: the argo Dataset of the model for the training.\n\n \"\"\"\n\n dataset_conf, model_parameters, config = ArgoLauncher.process_conf_file(conf_file)\n\n if not dataset:\n dataset = Dataset.load_dataset(dataset_conf)\n\n ArgoTFDeepLearningModelClass = load_class(model_parameters[\"model\"], base_path=model_class_base_path)\n\n update_model_params(model_parameters, dataset)\n\n # baseDir = config[\"dirName\"]+\"/\"+dataset.id\n model_dir = os.path.split(os.path.dirname(conf_file))[0]\n model = ArgoTFDeepLearningModelClass(model_parameters, model_dir, gpu=gpu, seed=seed)\n model.init(dataset)\n # network = model._network\n # network.init_saver()\n \n model.create_session(model_parameters, config, monitorSession=monitorSession)\n\n #if global_step is None it will restore the last checkpoint in the folder model._checkpoint_dir, you can pass global_step to restore a particular chackpoint\n model.restore(global_step = global_step)\n\n # if save_net:\n # sess = model.get_raw_session()\n #\n # if network_dir:\n # path = network_dir+\"/\"+get_full_id(dataset, model)+'/'+network.name\n # else:\n # path = model.dirName+'/networks/'+network.name\n #\n # network.save(sess, path, global_step=global_step)\n\n return model, dataset\n\n\ndef load_model_without_session(conf_file, global_step=None, dataset=None, gpu=0, seed=0, model_class_base_path=''):\n \"\"\"Load a TFDeepLearningModel without session\n\n Args:\n conf_file (str): the conf file of the model where to find the experiment.\n dataset (datasets.Dataset): (optional) the argo Dataset of the model for the training. If not passed it will be reloaded.\n global_step (int): the global step to load the checkpoint (if None the last checkpoint present will be loaded).\n gpu (int) : the gpu on which the model will create the session\n seed (int) : the seed that the model will set\n model_class_base_path (str): the base path where to look for the model class\n\n Returns:\n TFDeepLearningModel: The loaded Argo TFDeepLearningModel.\n datasets.Dataset: the argo Dataset of the model for the training.\n\n \"\"\"\n\n dataset_conf, model_parameters, config = ArgoLauncher.process_conf_file(conf_file)\n\n if not dataset:\n dataset = Dataset.load_dataset(dataset_conf)\n\n ArgoTFDeepLearningModelClass = load_class(model_parameters[\"model\"], base_path=model_class_base_path)\n\n update_model_params(model_parameters, dataset)\n\n # baseDir = config[\"dirName\"]+\"/\"+dataset.id\n model_dir = os.path.split(os.path.dirname(conf_file))[0]\n model = ArgoTFDeepLearningModelClass(model_parameters, model_dir, gpu=gpu, seed=seed)\n model.init(dataset)\n\n # network = model._network\n # network.init_saver()\n \n checkpoint_name = model.checkpoint_name(global_step)\n\n return model, dataset, checkpoint_name\n\n\ndef load_network(ArgoTFDeepLearningModelClass, conf_file, dataset, global_step=None):\n \"\"\"Load the network of a specific model and the corresponding checkpoint.\n The Network needs to be applied (to generate the variables, that are instantiated in the _build of Sonnet)\n and then restored from the checkpoint.\n\n e.g.\n ```\n network, checkpoint_name = load_network(ClassificationModel, model_dir,\n dataset, model_params, config)\n logits = network(x)\n network.restore(sess, checkpoint_name)\n ```\n\n Args:\n ArgoTFDeepLearningModelClass (Class): the TFDeepLearningModel class to load.\n conf_file (str): the conf file of the model where to find the experiment.\n dataset (datasets.Dataset): (optional) the argo Dataset of the model for the training. If not passed it will be reloaded.\n global_step (int): (optional) the global step to load the checkpoint (if None the last checkpoint present will be loaded).\n\n Returns:\n ArgoAbstractNetwork: the Argo Network to load\n str: checkpoint_name\n \"\"\"\n\n dataset_conf, model_parameters, config = ArgoLauncher.process_conf_file(conf_file)\n # parallelism = 0 # 0 is equivalent to single\n\n # if dataset is not None:\n # print(\"load_network: `dataset` IS DEPRECATED, will be removed soon\")\n # if model_class_base_path is not '':\n # print(\"load_network: `model_class_base_path` IS DEPRECATED, will be removed soon\")\n #\n\n update_model_params(model_parameters, dataset)\n\n model_dir = os.path.split(os.path.dirname(conf_file))[0]\n model = ArgoTFDeepLearningModelClass(model_parameters, model_dir)\n\n network = model._network\n checkpoint_name = model.checkpoint_name(global_step)\n return network, checkpoint_name\n\n\ndef update_model_params(model_parameters, dataset):\n try:\n output_shape = dataset.y_shape\n except ValueError:\n output_shape = None\n\n dataset_info = {\"output_shape\": output_shape,\n \"input_shape\": dataset.x_shape_train}\n\n model_parameters.update(dataset_info)\n\n\nclass TFDeepLearningModel(DeepLearningModel):\n\n default_params= {\n **DeepLearningModel.default_params,\n\n \"stochastic\" : 1, # add Gaussian noise to the dataset\n \"stochastic_noise_param\" : 0, # variance of the noise added to the dataset\n\n #TODO never rescale\n # \"rescale\" : 0., # rescale inputs in [rescale, 1-rescale] (for numerical issues)\n\n \"optimizer\": (\"AdamOptimizer\", {\"learning_rate\" : 0.001,\n \"beta1\": 0.9,\n \"beta2\":0.999}),\n\n \"regularizers\" : {},\n \n \"grad_clipping\" : (None, {}),\n #\"natural_gradient\" : [0],\n\n \"batch_size_train\" : 128,\n \"batch_size_eval\" : 512,\n }\n\n def create_id(self):\n\n _id = '-st' + str(self._opts[\"stochastic\"]) +\\\n '-stp' + str(self._opts[\"stochastic_noise_param\"]) + \\\n '-bs' + str(self._opts[\"batch_size_train\"]) + \\\n '-tr' + TFOptimizers.create_id(self._opts[\"optimizer\"]) + \\\n '-c' + get_clipping_id(self._opts[\"grad_clipping\"]) \n\n if \"note\" in self._opts:\n _id += '-N' + self._opts[\"note\"]\n if \"pretrained_checkpoint\" in self._opts:\n longname = os.path.abspath(self._opts[\"pretrained_checkpoint\"])\n shortname = \"\".join([i[0] for i in longname.split(\"/\")[1:]])\n _id += '-PC' + shortname\n super_id = super().create_id()\n _id += super_id\n return _id\n\n def __init__(self, opts, dirName, check_ops = False, gpu=-1, seed=0):\n\n # this needs to be called before the parent constructor\n #self._regularizers = opts.get(\"regularizers\", {})\n #pdb.set_trace()\n \n super().__init__(opts, dirName, seed)\n \n # moved up the hierarchy\n # self.dirName = dirName + \"/\" + self._id\n\n self._check_ops = check_ops\n self._numerics_ops = None\n\n self._gpu = gpu\n\n self.sess = None\n self._saver = None\n self.global_step = None\n\n tf.set_random_seed(seed)\n\n #restore checkpoint\n self._restore_chkptfile = (None if \"pretrained_checkpoint\" not in self._opts else self._opts[\"pretrained_checkpoint\"])\n #checkpoints\n self._checkpoint_dir = self.dirName + \"/saved_models/\"\n #tensorboard\n self._tensorboard_dir = self.dirName + \"/tensorboard/\"\n\n self.summary_keys = [tf.GraphKeys.SUMMARIES]\n self.summary_nodes = {ck:[] for ck in self.summary_keys}\n self.summary_writers = {ck:[] for ck in self.summary_keys}\n\n self.stochastic = self._opts[\"stochastic\"]\n if self.stochastic==0:\n #no noise is added\n pass\n elif self.stochastic == 1:\n self._clip_after_noise = True\n elif self.stochastic== 2 :\n self._clip_after_noise = False\n\n else:\n raise ValueError(\"stochastic can be: 0 no noise, 1 noise and clip after,\"\n \"2 noise and do not clip after. Found stochastic {}\".format(self.stochastic))\n\n self.stochastic_noise_param = self._opts[\"stochastic_noise_param\"]\n\n # # TODO never rescale\n if \"rescale\" in self._opts:\n raise KeyError(\"the key `rescale` is not supported anymore. Rescaling is not allowed, remove it from the conf.\")\n # self.rescale = opts[\"rescale\"] # rescale the inputs if they are continuous\n\n self.batch_size = {}\n self.batch_size[\"train\"] = self._opts[\"batch_size_train\"]\n self.batch_size[\"eval\"] = self._opts[\"batch_size_eval\"]\n\n # important nodes\n self.x = None\n self.y = None\n self.x_shape = {}\n\n self.optimizer_tuple = self._opts[\"optimizer\"]\n #self.compute_natural_gradient = opts[\"natural_gradient\"]\n\n #self.learning_rate = opts[\"training_algorithm\"][\"learning_rate\"]\n\n #self.training_algorithm = opts[\"training_algorithm\"][\"algorithm\"]\n #self.learning_rate = opts[\"training_algorithm\"][\"learning_rate\"]\n\n self._grad_clipping_tuple = self._opts[\"grad_clipping\"]\n\n # important nodes\n self.loss = None\n self.regularizers = []\n \n # create regularizers\n if (\"regularizers\" not in self._opts) or (\"weights\" in self._opts[\"regularizers\"].keys() or \"bias\" in self._opts[\"regularizers\"].keys() or \"custom\" in self._opts[\"regularizers\"].keys()) or len(self._opts[\"regularizers\"].keys())==0:\n self.custom_regularizers = []\n else:\n self.custom_regularizers = {}\n for key in self._opts[\"regularizers\"].keys():\n self.custom_regularizers[key] = []\n \n self.update_ops = []\n # list of kl_losses on the weights in case of bayesian learning\n self.kl_losses = []\n\n self.datasets_initializers = {}\n self.datasets_handles_nodes = {}\n self.datasets_handles = {}\n\n # passed to ChechpoitSaverHook\n self._pb_output_nodes = None\n\n # NB dataset can be none in certain contexts,\n # NB maybe there is a better way to do this (Luigi)\n # TODO YES, this function should take the input node\n # IF using a CNN, shape of input node Must be of len 4, both sonnet functions and tf.layers.conv2d require this convention and this makes the code uniform: input_shape = [None, size1, size2, channels]\n # TODO-ARGO2 this function should restore variables from the last checkpoint if it is present, not always initialize the variables\n def init(self, dataset):\n\n self.binary = dataset.binary_input\n\n #TODO these two are probably useless... if you need the input shape just do tf.shape(self.raw_x) for some networks the input could change from train to eval\n #TODO if there is a way to avoid using explicitly the input dimension it is probably better...\n self.x_shape[\"train\"] = dataset.x_shape_train\n self.x_shape[\"eval\"] = dataset.x_shape_eval\n\n self.dataset = dataset\n\n self.create_feedable_placeholders()\n\n # create global steps\n self.create_global_steps(dataset.n_samples_train)\n\n self.create_input_nodes(dataset)\n\n # set optimizer\n self.set_optimizer()\n\n #self.create_is_training_node()\n\n self.create_network()\n\n #TODO-ARGO2 create loss, regularizer, optimizer and global step are typical of a training algorithm\n #TODO-ARGO2 how do we handle other cases that do not have a train? I think we should define\n #TODO-ARGO2 TrainableTFDeepLearningModel and redefine init\n\n # define self.loss and check it is finite\n self.create_loss()\n\n self.create_custom_regularizers()\n \n # define self.regularizers and self.update_ops\n self.create_regularizers_and_updates()\n\n # set the training operation for self.loss + self.regularizers + self.custom_regularizers\n self.set_training_op()\n \n # not used at the moment, could be useful at a certain point\n # self.create_random_update_op()\n\n # there are case in which multiple losses exit\n if isinstance(self.loss, dict):\n for k, v in self.loss.items():\n self.loss[k] = tf.check_numerics(v, \"self.loss\" + str(k) + \" is not finite\")\n else:\n self.loss = tf.check_numerics(self.loss, \"self.loss is not finite\")\n\n #session will be created after init\n \n \n def create_datasets_with_handles(self, dataset):\n datasets_nodes, handle, ds_initializers, ds_handles = dataset.get_dataset_with_handle(self.batch_size[\"train\"], self.batch_size[\"eval\"])\n self.datasets_initializers = ds_initializers\n self.datasets_handles_nodes = ds_handles\n self.ds_handle = handle\n self.datasets_nodes = datasets_nodes # this is needed, since ds_raw_x may be modified in create_input_nodes to remove the mask\n \n self.ds_raw_x = datasets_nodes[0][0]\n self.ds_aug_x = datasets_nodes[0][1]\n self.ds_perturb_x = datasets_nodes[0][2]\n \n return datasets_nodes, handle, ds_initializers, ds_handles\n\n def create_feedable_placeholders(self):\n \"\"\"\n DO NOT USE FOR MODEL SPECIFIC PLACEHOLDERS (e.g. losses or samples..)\n Create feedables. This function is setting additional placeholder\n (it probably should never be used since placeholders should be set 3in the right places)\n\n Sets:\n feedable placeholders with general purpose\n\n \"\"\"\n\n self.is_training = tf.placeholder_with_default(False, shape=(), name=\"is_training\")\n\n #def create_is_training_node(self):\n # self._is_training = tf.placeholder_with_default(False, shape=(), name=\"is_training\")\n\n @abstractmethod\n def create_network(self):\n \"\"\"\n It gets the input nodes from the dataset and creates the network\n starting from the input nodes created by `create_input_nodes`\n\n Sets:\n network nodes depending on the specific child class\n \"\"\"\n pass\n\n @abstractmethod\n def create_input_nodes(self, dataset):\n \"\"\"\n create input nodes for the network\n starting from the dataset\n\n Sets:\n input nodes depending on the specific child class\n \"\"\"\n pass\n\n @abstractmethod\n def create_loss(self):\n \"\"\"create loss nodes for the network\n based on the nodes that create_networks has created,\n this method will create the loss nodes\n\n Sets:\n self.loss\n other additional loss nodes to be monitored during train can be set\n\n \"\"\"\n pass\n\n # create custom regularizers \n def create_custom_regularizers(self):\n \n if isinstance(self.custom_regularizers, list):\n self._create_custom_regularizers()\n elif isinstance(self.custom_regularizers, dict):\n for key in self.custom_regularizers.keys():\n # add regularizers for discriminator\n self._create_custom_regularizers(key)\n else:\n raise Exception(\"self.custom_regularizers should be a list or a dict\")\n\n def _create_custom_regularizers(self, network=None):\n if network is None:\n regularizers = self._opts[\"regularizers\"]\n custom_regularizers = self.custom_regularizers\n else:\n regularizers = self._opts[\"regularizers\"][network]\n custom_regularizers = self.custom_regularizers[network]\n\n '''\n if \"custom\" in regularizers.keys():\n \n for regularizer_tuple in regularizers[\"custom\"]:\n\n regularizer_name = regularizer_tuple[0]\n regularizer_tuple[1][\"model\"] = self\n custom_regularizer = Regularizers.instantiate_regularizer(regularizer_tuple, module_path = \"\")\n ''\n regularizer_name = regularizer_tuple[0]\n regularizer_kwargs = regularizer_tuple[1]\n regularizer_kwargs[\"model\"] = self\n \n try:\n # load customized regularizers from core/Regularizers.py\n reg_module = importlib.import_module(\"core.Regularizers\", '.'.join(__name__.split('.')[:-1]))\n custom_regularizer, _, _ = eval_method_from_tuple(reg_module, (regularizer_name, regularizer_kwargs))\n except AttributeError as e:\n # try to load from argo\n try:\n # load customized regularizers from core/argo/core/Regularizers.py\n reg_module = importlib.import_module(\".Regularizers\", '.'.join(__name__.split('.')[:-1]))\n custom_regularizer, _, _ = eval_method_from_tuple(reg_module, (regularizer_name, regularizer_kwargs))\n\n except AttributeError as e:\n raise AttributeError(\"regularizer %s not found\" % regularizer_name) from e\n ''\n\n custom_regularizers.append(custom_regularizer)\n self.check_regularizers(regularizer_name, network)\n '''\n\n if \"custom\" in regularizers.keys():\n \n for regularizer_tuple in regularizers[\"custom\"]:\n\n regularizer_name = regularizer_tuple[0]\n regularizer_tuple[1][\"model\"] = self\n custom_regularizer = Regularizers.instantiate_regularizer(regularizer_tuple, module_path = \"\")\n\n custom_regularizers.append(custom_regularizer)\n self.check_regularizers(regularizer_name, network)\n\n \n\n def check_regularizers(self, regularizer_name, network=None):\n pass\n\n '''\n def create_custom_regularizers(self):\n # should not be an empty list\n return [0.]\n '''\n\n # save in self.regularizers the regularizers of the model\n def create_regularizers_and_updates(self):\n\n wb_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n # see keras_utils.py: activity_and_contractive_regularizers\n ac_regularizers = tf.get_collection(AC_REGULARIZATION)\n # if (not wb_regularizers) and (not ac_regularizers):\n # wb_regularizers = [tf.constant(0.)]\n\n #import pdb;pdb.set_trace()\n if len(wb_regularizers)>0:\n self.regularizers += wb_regularizers\n if len(ac_regularizers)>0:\n self.regularizers += ac_regularizers\n\n # self.regularizers += ([self.custom_regularizers[r] for r in self._opts[\"regularizers\"].keys() if len(self.custom_regularizers[r])>0])\n # we need to flatten the list if we have both custom regularizers and another type of regularizers\n # (weight/bias or contractive)\n self.regularizers += list(chain.from_iterable([self.custom_regularizers[r]\n for r in self._opts[\"regularizers\"].keys()\n if len(self.custom_regularizers[r]) > 0]))\n\n self.update_ops += tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n # ac_train_regularizers = tf.get_collection(get_ac_collection_name(\"train\"))\n # ac_validation_regularizers = tf.get_collection(get_ac_collection_name(\"validation\"))\n # ac_test_regularizers = tf.get_collection(get_ac_collection_name(\"test\"))\n # self.regularizer[\"train\"] = tf.add_n(wb_regularizers + ac_train_regularizers, name=\"regularization_train\")\n # self.regularizer[\"validation\"] = tf.add_n(wb_regularizers + ac_validation_regularizers, name=\"regularization_validation\")\n # self.regularizer[\"test\"] = tf.add_n(wb_regularizers + ac_test_regularizers, name=\"regularization_test\")\n\n \n def create_global_steps(self, n_points_train_set):\n self.n_batches_per_epoch = np.ceil(n_points_train_set/self.batch_size[\"train\"])\n\n self.global_step = tf.train.get_or_create_global_step()\n self.global_epoch = tf.cast(tf.floor(tf.cast(self.global_step, tf.float32) /\n self.n_batches_per_epoch),\n tf.int64, \"global_epoch\")\n\n tf.add_to_collection(\"global_epoch\", self.global_epoch)\n\n # this creates an operation to add to all trainable variables a white noise of param\n # std = tf.sqrt(variance)/10\n def create_random_update_op(self):\n\n vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n\n update_opts = []\n for var in vars:\n\n _, variance = tf.nn.moments(tf.reshape(var,[-1]),axes=[0])\n\n normal = tf.distributions.Normal(loc=0.0, scale=tf.sqrt(variance)/10)\n white_noise = normal.sample(var.get_shape())\n\n update_opts.append(var.assign(var + white_noise))\n\n self.random_update_op = tf.group(update_opts)\n\n #apply clipping\n def _clip_gradients(self, grads_and_vars, grad_clipping_tuple):\n\n clipping_method, clipping_kwargs = grad_clipping_tuple\n\n grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None]\n grads = [g for (g, v) in grads_and_vars_not_none]\n variables = [v for (g, v) in grads_and_vars_not_none]\n\n #self.grad_norms = [tf.norm(g) for g in grads]\n\n #else:\n # self.new_logits = self.logits\n\n self.grads = grads\n self.grads_norm = tf.global_norm(grads)\n\n #see https://www.tensorflow.org/api_docs/python/tf/train/Optimizer#processing_gradients_before_applying_them\n #if clipping_method == \"clip_by_global_norm\":\n #\n # print_ops = [tf.print(\"global norm \" + str(tf.norm(g))) for g in grads]\n # with tf.control_dependencies(print_ops):\n # clipped_grads, global_norm = tf.clip_by_global_norm(grads, clipping_kwargs[\"value\"])\n # self.clipped_grads_and_vars = [(clipped_grads[i], variables[i]) for i in range(len(grads))]\n\n\n #see https://www.tensorflow.org/api_docs/python/tf/train/Optimizer#processing_gradients_before_applying_them\n if clipping_method == \"clip_by_global_norm\":\n\n #clip_by_global_norm requires all the grads as argument, not only grad[i]\n grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None]\n grads = [g for (g, v) in grads_and_vars_not_none]\n variables = [v for (g, v) in grads_and_vars_not_none]\n\n #print_ops = [tf.print(\"loss=\", self.loss)] + [tf.print(\"norms_g\", tf.norm(g)) for g in grads] + [tf.print(\"g\", g) for g in grads] + [tf.print(\"p\", self.prob)] + [tf.print(\"invFisher\", self.invFisher)] + [tf.print(\"invA\", self.invA)] + [tf.print(\"invB\", self.invB)]\n\n #print_ops = [tf.print(\"partA =\", self.partA, summarize=-1), tf.print(\"partB =\", self.partB, summarize=-1), tf.print(\"prob_sliced =\", self.prob_sliced, summarize=-1), tf.print(\"natural_gradient_theta =\", self.natural_gradient_loss_theta, summarize=-1), tf.print(\"euclidean gradient =\", self.euclidean_gradient, summarize=-1), tf.print(\"TA =\", self.TA, summarize=-1), tf.print(\"Tup =\", self.Tup, summarize=-1) , tf.print(\"Tdown =\", self.Tdown, summarize=-1) , tf.print(\"TB =\", self.TB, summarize=-1)] + [tf.print(\"norms_g\", tf.norm(g), summarize=-1) for g in grads]\n #with tf.control_dependencies(print_ops):\n clip_value = clipping_kwargs[\"value\"]\n clipped_grads, global_norm = tf.clip_by_global_norm(grads, clip_value)\n clipped_grads_and_vars = [(clipped_grads[i], variables[i]) for i in range(len(grads))]\n\n elif clipping_method == \"clip_by_norm\":\n\n grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None]\n\n grads = [g for (g, v) in grads_and_vars_not_none]\n variables = [v for (g, v) in grads_and_vars_not_none]\n\n # How t handle numerical issues\n # 1) set nan/inf to zero\n # grads = [tf.where(tf.is_finite(g), g, tf.zeros_like(g)) for (g, v) in grads_and_vars_not_none]\n # 2) set nan/inf to noisy gradient,\n #grads = [tf.where(tf.is_finite(g), g, tfd.Normal(loc=0.0, scale=tf.sqrt(tf.nn.moments(tf.reshape(v,[-1]),axes=[0])[1])/10 + 0.01).sample(g.get_shape())) for (g, v) in grads_and_vars_not_none]\n\n clip_value = clipping_kwargs[\"value\"]\n clipped_grads_and_vars = [(tf.clip_by_norm(g, clip_value), v) for (g, v) in zip(grads, variables)]\n\n elif clipping_method == \"clip_by_value\":\n\n clip_value = clipping_kwargs[\"value\"]\n clipped_grads_and_vars = [(tf.clip_by_value(g, -clip_value, clip_value), v) for (g, v) in grads_and_vars if g is not None]\n\n elif not clipping_method:\n\n grads_and_vars_not_none = [(g, v) for (g, v) in grads_and_vars if g is not None]\n clipped_grads_and_vars = grads_and_vars_not_none\n\n else:\n raise Exception(\"clipping method not recognized: \" + clipping_method)\n\n return clipped_grads_and_vars\n\n ''''not used, please don't delete (Luigi)\n def check_numerics_grad(grads):\n\n condition = tf.constant(True)\n for g in grads:\n condition = tf.math.logical_and(condition,tf.check_numerics(g, \"check numerics for gradient failed\"))\n return condition\n\n # the function above could be used in the following context\n self.clipped_grads_and_vars = tf.cond(check_numerics_grad(grads),\n self.clip_gradients(self.grads_and_vars, self._grad_clipping_tuple),\n grads_and_vars_not_none)\n #grads_and_vars_not_none)\n\n '''\n\n def set_optimizer(self):\n\n with tf.variable_scope('optimizer'):\n self._optimizer, self._learning_rate = TFOptimizers.instantiate_optimizer(self, self.optimizer_tuple)\n\n def set_training_op(self):\n\n #def flatten_weights(weights_list):\n # weights_tensor = []\n # for w in weights_list:\n # print(w)\n # a = tf.reshape(w, [-1, ])\n # weights_tensor.append(a)\n #\n # return tf.concat(weights_tensor, axis=0)\n\n '''\n #########################################\n # Euclidean gradient computed in two steps, through the Jacobian\n #########################################\n\n #self.probabilities = tf.nn.softmax(self.logits)\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = tf.cast(self.y, tf.int32), logits = self.logits) + self.regularizer\n #middle_man = self.logits # self.natural_loss #\n\n #n = self.probabilities.get_shape().as_list()[1]\n self.euclidean_gradient = tf.gradients(loss, self.logits)[0] # from [g] to g\n\n trainable_vars = tf.trainable_variables()\n jacobians = jacobian(self.logits, trainable_vars) # [tf.reduce_sum(i, axis=0) for i in jacobian(self.logits, trainable_vars)]\n\n #self.nat_grad = [tf.reduce_mean(i, axis=0) for i in self.jacobian]\n\n # proper way to compute the contraction\n self.euclidean_gradient = [tf.reduce_mean(tf.einsum(get_contraction(i), i, self.euclidean_gradient), axis=0) for i in jacobians]\n # OLD self.nat_grad = [tf.reduce_mean(tf.tensordot(i, self.nat_grad_theta, [[1], [1]]), axis=[0, 1, -1]) for i in self.jacobian]\n\n self.euclidean_grad_norms = [tf.norm(g) for g in self.euclidean_gradient]\n\n '''\n\n total_loss = self.loss\n # add regularizers in case there are any\n if len(self.regularizers)>0:\n total_loss += tf.add_n(self.regularizers, name=\"regularization\")\n\n # 1st part of minimize: compute_gradient\n self.grads_and_vars = self._optimizer.compute_gradients(total_loss)\n\n # clip gradients\n clipped_grads_and_vars = self._clip_gradients(self.grads_and_vars, self._grad_clipping_tuple)\n\n # compute norms in case they need to be logged\n self.gradient_norms = [tf.norm(g) + NUMTOL for (g, v) in clipped_grads_and_vars]\n self.weight_norms = [tf.norm(v) + NUMTOL for (g, v) in clipped_grads_and_vars]\n # check that gradients are finite\n grads = [tf.check_numerics(g, \"grads is not finite\") for (g, v) in clipped_grads_and_vars]\n variables = [tf.check_numerics(v, \"grads is not finite\") for (g, v) in clipped_grads_and_vars]\n self.gradient_weight_global_norms = [tf.global_norm(grads), tf.global_norm(variables)]\n\n # 2nd part of minimize: apply_gradient\n optimizer_step = self._optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step)\n\n update_ops = tf.group(*self.update_ops)\n self.training_op = tf.group(update_ops, optimizer_step)\n\n\n def set_check_ops(self):\n self._check_ops = 1\n\n # TODO argo2 This is not working anymore with the new session\n #with self.sess.graph.as_default():\n self._numerics_ops = tf.add_check_numerics_ops()\n\n def release(self):\n super().release()\n self.sess.close()\n tf.reset_default_graph()\n\n def set_summaries(self):\n \"\"\"This function sets summaries and summaryFileWriters, it needs to be invoked before\n training to keep track of the summaries.\n (cannot be invoked in create_and_init_network because the FileWriter will corrupt data in the logfolder\n at each initialization)\n \"\"\"\n\n # wb_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n # ac_regularizers = tf.get_collection(AC_REGULARIZATION)\n #\n # reg_nodes = wb_regularizers + ac_regularizers\n\n # for rn in reg_nodes:\n # tf.summary.scalar(rn.name, rn,\n # collections=[tf.GraphKeys.SUMMARIES, 'regularization_summaries'])\n\n # for each key I get the collection of summary nodes\n # I set up a filewriter for each summary node\n self.summary_nodes = {sk: tf.get_collection(sk) for sk in self.summary_keys}\n\n for sk in self.summary_keys:\n self.summary_writers[sk] = [tf.compat.v1.summary.FileWriter(self._tensorboard_dir+sn.name)\n for sn in self.summary_nodes[sk]]\n\n\n def create_hooks(self, config):\n\n hooks=[]\n\n # get general arguments for the models hook\n self._time_reference_str = config[\"time_reference\"]\n self._check_time_reference(self._time_reference_str)\n self._plot_offset = config.get(\"plot_offset\", 0)\n self._default_model_hooks_kwargs = {\"time_reference\" : self._time_reference_str}\n\n self._plot_model_hooks_kwargs = {\"time_reference\" : self._time_reference_str,\n \"plot_offset\": self._plot_offset}\n\n self._n_steps_stats = self._get_steps(config[\"stats_period\"], self._time_reference_str)\n\n # stop hook\n tot_steps = int(self._opts['epochs']+1)*self.n_batches_per_epoch\n hooks.append(tf.train.StopAtStepHook(last_step=tot_steps))\n\n # general info hook (no average on validation but only on train loop)\n hooks.append(self._create_general_info_hook(config))\n\n # regularizers hook (no average on validation but only on train loop)\n hooks.append(self._create_regularizers_hook(config))\n\n # checkpoint hooks\n self._save_model = config[\"save_model\"]\n if self._save_model:\n max_to_keep = config.get(\"save_max_to_keep\", 5)\n self._init_session_saver(max_to_keep)\n self._checkpoint_basename = \"model.ckpt\"\n save_steps = self._get_steps(config[\"save_model_period\"], self._time_reference_str)\n\n hooks.append(CheckpointSaverHook(self._checkpoint_dir,\n save_steps = save_steps,\n saver = self._saver,\n checkpoint_basename = self._checkpoint_basename,\n pb_output_nodes = self._pb_output_nodes,\n save_pb_at_end = config.get(\"save_pb\", 0)\n ))\n\n # summary hook\n if config[\"save_summaries\"]:\n save_steps_summaries = self._get_steps(config[\"save_summaries_period\"], self._time_reference_str)\n\n self.set_summaries()\n\n summary_hooks = [tf.train.SummarySaverHook(save_steps=save_steps_summaries,\n output_dir=self._tensorboard_dir+sn.name,\n summary_op=sn,\n summary_writer=fw)\n for sk in self.summary_keys for sn,fw in zip(self.summary_nodes[sk], self.summary_writers[sk])]\n\n hooks += summary_hooks\n\n # images input hook\n kwargs = config.get(\"ImagesInputHook\", None)\n if kwargs:\n kwargs = {**self._default_model_hooks_kwargs,\n **kwargs}\n\n hooks.append(ImagesInputHook(model = self,\n dirName = self.dirName,\n **kwargs)\n )\n\n #gradient_hook = self._create_gradient_hook(config)\n #if gradient_hook is not None:\n # hooks.append(gradient_hook)\n\n\n kwargs = config.get(\"FisherMatrixHook\", None)\n if kwargs and isinstance(self._optimizer, NaturalGradientOptimizer):\n kwargs = {**self._default_model_hooks_kwargs,\n #'datasets_keys' : [TRAIN_LOOP],\n **kwargs}\n hooks.append(FisherMatrixHook(model = self,\n dirName = self.dirName,\n **kwargs\n )\n )\n\n return hooks\n\n def _create_gradient_hook(self, config):\n\n # gradienthook\n tensors_to_average = [\n [[self.gradient_weight_global_norms[0]],\n self.gradient_norms\n ],\n [[self.gradient_weight_global_norms[1]],\n self.weight_norms\n ],\n ]\n\n layer_names = np.array(list(range(len(self.gradient_norms))))\n layer_names = np.floor(layer_names / 2) + 1\n layer_names = [\"L\" + str(int(l)) for l in layer_names]\n\n tensors_to_average_names = [\n [[\"gradient_global_norms\"],\n layer_names\n ],\n [[\"weight_global_norms\"],\n layer_names\n ],\n ]\n\n tensors_to_average_plots = [\n [{\"fileName\": \"gradient_global_norms\", \"logscale-y\": 1, \"compose-label\": 0},\n {\"fileName\": \"gradient_norms\", \"logscale-y\": 1, \"compose-label\": 0}\n ],\n [{\"fileName\": \"weight_global_norms\", \"logscale-y\": 1, \"compose-label\": 0},\n {\"fileName\": \"weight_norms\", \"logscale-y\": 1, \"compose-label\": 0}\n ],\n ]\n\n kwargs = config.get(\"GradientsHook\", None)\n if kwargs:\n gradient_period = config[\"GradientsHook\"][\"period\"]\n gradient_steps = self._get_steps(gradient_period, self._time_reference_str)\n hook = LoggingMeanTensorsHook(model=self,\n fileName=\"gradient\",\n dirName=self.dirName,\n tensors_to_average=tensors_to_average,\n tensors_to_average_names=tensors_to_average_names,\n tensors_to_average_plots=tensors_to_average_plots,\n average_steps=gradient_steps,\n tensorboard_dir=self._tensorboard_dir,\n trigger_summaries=config[\"save_summaries\"],\n # trigger_plot=True,\n print_to_screen=False,\n plot_offset=self._plot_offset, # config.get(\"plot_start_epoch\", 1),\n train_loop_key=TRAIN_LOOP,\n datasets_keys=[],\n time_reference=self._time_reference_str\n )\n\n return hook\n else:\n return None\n\n # create custom regularizers id\n # passing the network equal to None support the possibility to use this function in presence\n # of multiple networks, used in gan and vae, not in hm\n def create_custom_regularizers_id(self, network=None):\n\n if network is None:\n regularizers = self._opts[\"regularizers\"]\n else:\n regularizers = self._opts[\"regularizers\"][network]\n \n ids = \"\"\n if \"custom\" in regularizers.keys():\n #import pdb;pdb.set_trace()\n for regularizer_tuple in regularizers[\"custom\"]:\n\n regularizer_name = regularizer_tuple[0]\n\n try:\n base_path = '.'.join(__name__.split('.')[:-3])\n regularizer_module = load_module(\"Regularizers\", base_path=base_path)\n id = regularizer_module.create_id(regularizer_tuple)\n except Exception as e:\n # try to load from argo\n try:\n id = Regularizers.create_id(regularizer_tuple)\n except Exception as e:\n raise Exception(\"regularizer %s not found\" % regularizer_name) from e\n\n if ids == \"\":\n ids = id\n else:\n ids = ids + \"_\" + id\n \n return ids\n\n \n def _create_regularizers_hook(self, config):\n\n wb_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n # see keras_utils.py: activity_and_contractive_regularizers\n ac_regularizers = tf.get_collection(AC_REGULARIZATION)\n custom_regularizers = tf.get_collection(CUSTOM_REGULARIZATION)\n\n if wb_regularizers:\n wb_regularizers_names = [r.name for r in wb_regularizers]\n else:\n wb_regularizers = [tf.zeros([1])]\n wb_regularizers_names = [\"none\"]\n wb_regularizers_fileNames = {\"fileName\" : \"wb_regularizers\"}\n\n if ac_regularizers:\n ac_regularizers_names = [r.name for r in ac_regularizers]\n else:\n ac_regularizers = [tf.zeros([1])]\n ac_regularizers_names = [\"none\"]\n ac_regularizers_fileNames = {\"fileName\" : \"ac_regularizers\"}\n\n if custom_regularizers:\n custom_regularizers_names = [r.name for r in custom_regularizers]\n else:\n custom_regularizers = [tf.zeros([1])]\n custom_regularizers_names = [\"none\"]\n custom_regularizers_fileNames = {\"fileName\": \"custom_regularizers\"}\n\n # logging hooks\n tensors_to_average = [[wb_regularizers], [ac_regularizers, custom_regularizers]]\n tensors_to_average_names = [[wb_regularizers_names], [ac_regularizers_names, custom_regularizers_names]]\n tensors_to_average_plots = [[wb_regularizers_fileNames],\n [ac_regularizers_fileNames, custom_regularizers_fileNames]]\n\n\n hook = LoggingMeanTensorsHook(model=self,\n fileName=\"regularizers\",\n dirName=self.dirName,\n tensors_to_average=tensors_to_average,\n tensors_to_average_names=tensors_to_average_names,\n tensors_to_average_plots=tensors_to_average_plots,\n average_steps=self._n_steps_stats,\n tensorboard_dir=self._tensorboard_dir,\n trigger_summaries=config[\"save_summaries\"],\n print_to_screen=False,\n # trigger_plot = True,\n plot_offset=self._plot_offset,\n train_loop_key=TRAIN_LOOP,\n datasets_keys=[],\n time_reference=self._time_reference_str\n )\n return hook\n\n\n def _create_general_info_hook(self, config):\n # logging hooks\n tensors_to_average = [\n [[self._learning_rate]]\n ]\n tensors_to_average_names = [\n [[\"learning_rate\"]],\n ]\n tensors_to_average_plots = [\n [{\"fileName\": \"learning_rate\"}]\n ]\n\n hook = LoggingMeanTensorsHook(model=self,\n fileName=\"info\",\n dirName=self.dirName,\n tensors_to_average=tensors_to_average,\n tensors_to_average_names=tensors_to_average_names,\n tensors_to_average_plots=tensors_to_average_plots,\n average_steps=self._n_steps_stats,\n tensorboard_dir=self._tensorboard_dir,\n trigger_summaries=config[\"save_summaries\"],\n print_to_screen=False,\n # trigger_plot = True,\n plot_offset = self._plot_offset,\n train_loop_key=TRAIN_LOOP,\n datasets_keys=[]\n )\n return hook\n\n\n # why passing opt?\n def create_session(self, opts, config, monitorSession=True):\n\n # save to set the right behavior in self.get_raw_session()\n self.monitorSession = monitorSession\n \n # set some important options\n if self._gpu == -1:\n sess_config = tf.ConfigProto(device_count = {'GPU': 0},\n allow_soft_placement=True)\n else:\n #config = tf.ConfigProto(log_device_placement=True)\n sess_config = tf.ConfigProto(allow_soft_placement=True)\n\n sess_config.gpu_options.allow_growth = True\n\n # self.sess = tf.Session(config=config)\n # self.sess = tf.InteractiveSession()\n\n # not needed anymore, moved in hooks...\n # self.set_summaries()\n\n if self._check_ops:\n self.set_check_ops()\n\n self.hooks = self.create_hooks(config)\n\n #TODO-ARGO2 if we would use a SingularMonitoredSession, it is possible to directly pass it to a saver for custom user saving..\n #TODO-ARGO2 How to handle this with the more stable Monitored Session? Maybe a TFTrainableDeepLearningModel\n #TODO-ARGO2 by the way it is possible to define a custom Monitored session\n #TODO-ARGO2 (to handle only hooks without fancy session stuffs http://davideng.me/2017/10/11/designing-a-custom-monitored-training-session.html\n \n\n if monitorSession:\n # MonitoredSession\n # this will restore all the variables from the latest checkpoint if it exists\n self._fix_checkpoint_abs_to_rel(self._checkpoint_dir) # need to ensure checkpoint has relative path saved\n\n chiefsess_creator = tf.train.ChiefSessionCreator(config=sess_config, checkpoint_dir=self._checkpoint_dir)\n\n if self._restore_chkptfile is not None:\n self._network.init_saver()\n # this is restoring variables \n self.sess = tf.train.MonitoredSession(session_creator=chiefsess_creator, hooks=self.hooks)\n\n # Restore from some checkpoint\n if self._restore_chkptfile is not None:\n raw_sess = self.get_raw_session()\n if raw_sess.run(self.global_step) == 0:\n self._network.restore(raw_sess, self._restore_chkptfile)\n else:\n self.sess = tf.Session(config=sess_config)\n #all_variables = tf.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES)\n #self.sess.run(tf.variables_initializer(all_variables))\n \n if self._save_model:\n self._save_graph()\n\n # SingularMonitoredSession\n # self.sess = tf.train.SingularMonitoredSession(checkpoint_dir=self._checkpoint_dir,\n # hooks=self.hooks, config=sess_config)\n\n\n #I do not want to trigger hooks for this!!\n self.datasets_handles = self.get_raw_session().run(self.datasets_handles_nodes)\n\n\n # to get the raw session in MonitoredSession see\n # https://github.com/tensorflow/tensorflow/issues/8425\n # https://github.com/tensorflow/tensorflow/issues/11971\n def get_raw_session(self):\n if self.sess is None:\n raise Exception(\"The session is None\")\n\n if self.monitorSession:\n return self.sess._tf_sess()\n else:\n # suppose regular Session()\n return self.sess\n\n\n def train(self):\n\n # epoch = 0\n # # start timer for TF\n # start_tf = timeit.default_timer()\n\n # INITIALIZATION OPERATIONS HERE\n # Also: I always perform a global_step evaluation to eventually \"wake up\" the\n # stop condition of StopAtStepHook if needed (otherwise it would do an extra step over its limit each time)\n # (possible bug in tf?)\n # initializer = self.dataset_initializers[\"train\"]\n # args = [initializer] if initializer is not None else []\n # args += [self.global_step]\n # self.get_raw_session().run(args)\n\n for hook in self.hooks:\n before_training = getattr(hook, 'before_training', None)\n if before_training is not None:\n before_training(self.get_raw_session())\n\n #i = 0\n\n # count of the nan / inf\n k = 0\n # MAX nan / inf\n MAX_K = 100\n\n print(\"Graph size: \" + str(self.graph_size))\n\n # loops over the batches\n while not self.sess.should_stop():\n # import pdb;pdb.set_trace()\n try:\n #import pdb; pdb.set_trace()\n # sess = self.get_raw_session()\n # sess.run(self._network.get_all_variables()[6])\n # g, g_norm, x, y, loss, grads_and_vars, _, global_epoch = self.sess.run([self.grads, self.grads_norm, self.x, self.y, self.loss, self.grads_and_vars , self.training_op, self.global_epoch],\n\n\n # loss must be evaluated and fetched to raise InvalidArgumentError if nan, see https://github.com/tensorflow/tensorflow/issues/11098\n _, _, global_epoch = self.sess.run([self.training_op, self.loss, self.global_epoch],\n feed_dict = {self.ds_handle : self.datasets_handles[TRAIN_LOOP],\n self.is_training : True})\n\n #pdb.set_trace()\n\n # self.sess.run([self.z],feed_dict = {self.ds_handle : self.datasets_handles[TRAIN_LOOP],self.is_training : True})[0].shape\n # pdb.set_trace()\n\n #self.invFisher, self.prob,\n #print(i, loss)\n #if i==180:\n # pdb.set_trace()\n #i = i + 1\n\n except tf.errors.InvalidArgumentError:\n\n raise Exception(\"an error has occurred during training, check stack trace UP HERE\")\n\n k = k+1\n print(\"an error has occurred during training, this happened \" + str(k) + \" times over \" + str(MAX_K))\n if k == MAX_K:\n raise Exception(\"an error has occurred during training, check stack trace UP HERE\")\n\n\n\n # here the are two possibilities we could try:\n # 1) ignore the current batch and proceed to the next, in case the error is a numerical error\n # 2) add noise the the weights, see self.random_update_op (so far it didn't help)\n # TODO not sure if I need to evaluate global_epoch or not in this case\n\n #_, global_epoch = self.sess.run([self.random_update_op, self.global_epoch], feed_dict = {self.ds_handle : self.datasets_handles[TRAIN_LOOP]})\n\n\n # if global_epoch != epoch:\n # # end timer for TF\n # stop_tf = timeit.default_timer()\n # time = stop_tf - start_tf\n #\n # epoch = global_epoch\n #\n # print(\"global epoch: \", global_epoch, \" time:\", time)\n #\n # # start timer for TF\n # start_tf = timeit.default_timer()\n\n def _init_session_saver(self, max_to_keep, variables=None):\n \"\"\" A saver with all the variables for the session is instantiated and set in self._saver, with variables,\n by default variables is None, all variables in the graph will be saved.\n It is probably a good idea since the whole session must be later be restored by the ChiefSession\n \"\"\"\n os.makedirs(self._checkpoint_dir, exist_ok=True)\n #variables = tf.trainable_variables()\n self._saver = tf.train.Saver(variables, max_to_keep=max_to_keep, save_relative_paths=True)\n\n def _save_graph(self):\n writer = tf.summary.FileWriter(logdir=self._checkpoint_dir,\n # graph=self.sess.graph,\n graph=tf.get_default_graph(),\n filename_suffix=\"-graph\"\n )\n writer.flush()\n\n def _assemble_checkpoint_name(self, checkpoint_dir):\n path = os.path.join(checkpoint_dir, \"model.ckpt\")\n return path\n\n def _latest_checkpoint(self, checkpoint_dir):\n with open(checkpoint_dir + 'checkpoint') as fs:\n potentiallyabsolutepath = fs.readline().split()[1]\n\n potentiallyabsolutepath = os.path.basename(potentiallyabsolutepath.strip('\"'))\n path = checkpoint_dir + os.path.basename(potentiallyabsolutepath)\n return path\n\n def _fix_checkpoint_abs_to_rel(self, checkpoint_dir):\n checkpointfilename = checkpoint_dir + 'checkpoint'\n exists = os.path.isfile(checkpointfilename)\n if exists:\n with open(checkpointfilename) as fs:\n lines = fs.readlines()\n\n fs = open(checkpointfilename, 'w')\n for line in lines:\n which_model, potentiallyabsolutepath = line.split()\n potentiallyabsolutepath = os.path.basename(potentiallyabsolutepath.strip('\"'))\n rel_path = '\\\"'+os.path.basename(potentiallyabsolutepath)+'\\\"'\n fs.write(\" \".join([which_model, rel_path]) + \"\\n\")\n\n fs.close()\n\n def checkpoint_name(self, global_step):\n if global_step:\n path = self._assemble_checkpoint_name(self._checkpoint_dir)\n path += \"-\"+str(global_step)\n else:\n path = self._latest_checkpoint(self._checkpoint_dir)\n\n if not path:\n raise Exception(\"could not find saved checkpoints in %s\"%self._checkpoint_dir)\n\n return path\n\n def save(self, global_step=None):\n if self._saver is None:\n raise Exception(\"saver must be initialized before attempt to save\")\n else:\n session = self.get_raw_session()\n path = self._assemble_checkpoint_name()\n self._saver.save(session, path, global_step=global_step)\n\n def restore(self, global_step=None):\n \"\"\"Restore the model variables.\n\n Args:\n global_step (type): the step from which to restore. By default it is None\n and the latest checkpoint in self.checkpoint_dir will be restored\n \"\"\"\n\n path = \"\"\n session = self.get_raw_session()\n\n if self._saver is None:\n raise Exception(\"saver must be initialized before attempt to restore\")\n else:\n path = self.checkpoint_name(global_step)\n self._saver.restore(session, path)\n\n @property\n def graph_size(self):\n return len([n.name for n in self.sess.graph.as_graph_def().node])\n\n def _check_time_reference(self, time_ref):\n time_choices = [EPOCHS, STEPS]\n if not time_ref in time_choices:\n raise ValueError(\"time_reference in the frequency tuple can only be in %s\" % time_choices)\n\n def _get_steps(self, n, time_reference):\n # try:\n # n, time_ref = frequency\n # except:\n # raise Exception(\"cannot unpack tuple %s, expected a couple (n, time_ref),\"\n # \"where time_ref can either be `epochs` or `steps`\" % frequency)\n\n self._check_time_reference(time_reference)\n n = float(n)\n\n if time_reference == EPOCHS:\n n = n * self.n_batches_per_epoch\n\n return int(n)\n"
] | [
[
"numpy.array"
],
[
"matplotlib.pyplot.title",
"numpy.random.seed",
"numpy.asarray",
"numpy.unique",
"numpy.random.choice",
"numpy.arange",
"numpy.random.shuffle",
"numpy.log10",
"numpy.random.randint",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.random.RandomState",
"matplotlib.pyplot.ylabel"
],
[
"numpy.log",
"numpy.concatenate",
"numpy.ceil",
"numpy.mean",
"scipy.special.logsumexp"
],
[
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.train.ChiefSessionCreator",
"tensorflow.get_default_graph",
"tensorflow.group",
"tensorflow.add_n",
"tensorflow.get_collection",
"tensorflow.placeholder_with_default",
"tensorflow.check_numerics",
"tensorflow.train.get_or_create_global_step",
"tensorflow.ConfigProto",
"numpy.ceil",
"tensorflow.clip_by_norm",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.norm",
"tensorflow.train.StopAtStepHook",
"tensorflow.train.MonitoredSession",
"numpy.floor",
"tensorflow.set_random_seed",
"tensorflow.add_check_numerics_ops",
"tensorflow.train.SummarySaverHook",
"tensorflow.add_to_collection",
"tensorflow.global_norm",
"tensorflow.clip_by_value",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.reshape",
"tensorflow.clip_by_global_norm",
"tensorflow.variable_scope",
"tensorflow.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
aerisweather/rioxarray | [
"1755f90ed827ea66477a235677c1c5ecd245833d"
] | [
"rioxarray/_io.py"
] | [
"\"\"\"\n\nCredits:\n\nThis file was adopted from: https://github.com/pydata/xarray # noqa\nSource file: https://github.com/pydata/xarray/blob/1d7bcbdc75b6d556c04e2c7d7a042e4379e15303/xarray/backends/rasterio_.py # noqa\n\"\"\"\n\nimport contextlib\nimport os\nimport re\nimport threading\nimport warnings\n\nimport numpy as np\nimport rasterio\nfrom packaging import version\nfrom rasterio.errors import NotGeoreferencedWarning\nfrom rasterio.vrt import WarpedVRT\nfrom xarray import Dataset, IndexVariable\nfrom xarray.backends.common import BackendArray\nfrom xarray.backends.file_manager import CachingFileManager, FileManager\nfrom xarray.backends.locks import SerializableLock\nfrom xarray.coding import times, variables\nfrom xarray.core import indexing\nfrom xarray.core.dataarray import DataArray\nfrom xarray.core.dtypes import maybe_promote\nfrom xarray.core.utils import is_scalar\nfrom xarray.core.variable import as_variable\n\nfrom rioxarray.exceptions import RioXarrayError\nfrom rioxarray.rioxarray import _generate_spatial_coords\n\n# TODO: should this be GDAL_LOCK instead?\nRASTERIO_LOCK = SerializableLock()\nNO_LOCK = contextlib.nullcontext()\n\n\nclass FileHandleLocal(threading.local):\n \"\"\"\n This contains the thread local ThreadURIManager\n \"\"\"\n\n def __init__(self): # pylint: disable=super-init-not-called\n self.thread_manager = None # Initialises in each thread\n\n\nclass ThreadURIManager:\n \"\"\"\n This handles opening & closing file handles in each thread.\n \"\"\"\n\n def __init__(\n self,\n opener,\n *args,\n mode=\"r\",\n kwargs=None,\n ):\n self._opener = opener\n self._args = args\n self._mode = mode\n self._kwargs = {} if kwargs is None else dict(kwargs)\n self._file_handle = None\n\n @property\n def file_handle(self):\n \"\"\"\n File handle returned by the opener.\n \"\"\"\n if self._file_handle is not None:\n return self._file_handle\n self._file_handle = self._opener(*self._args, mode=self._mode, **self._kwargs)\n return self._file_handle\n\n def close(self):\n \"\"\"\n Close file handle.\n \"\"\"\n if self._file_handle is not None:\n self._file_handle.close()\n self._file_handle = None\n\n def __del__(self):\n self.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, type_, value, traceback):\n self.close()\n\n\nclass URIManager(FileManager):\n \"\"\"\n The URI manager is used for lockless reading\n \"\"\"\n\n def __init__(\n self,\n opener,\n *args,\n mode=\"r\",\n kwargs=None,\n ):\n self._opener = opener\n self._args = args\n self._mode = mode\n self._kwargs = {} if kwargs is None else dict(kwargs)\n self._local = FileHandleLocal()\n\n def acquire(self, needs_lock=True):\n if self._local.thread_manager is None:\n self._local.thread_manager = ThreadURIManager(\n self._opener, *self._args, mode=self._mode, kwargs=self._kwargs\n )\n return self._local.thread_manager.file_handle\n\n @contextlib.contextmanager\n def acquire_context(self, needs_lock=True):\n try:\n yield self.acquire(needs_lock=needs_lock)\n except Exception:\n self.close(needs_lock=needs_lock)\n raise\n\n def close(self, needs_lock=True):\n if self._local.thread_manager is not None:\n self._local.thread_manager.close()\n self._local.thread_manager = None\n\n def __del__(self):\n self.close(needs_lock=False)\n\n def __getstate__(self):\n \"\"\"State for pickling.\"\"\"\n return (self._opener, self._args, self._mode, self._kwargs)\n\n def __setstate__(self, state):\n \"\"\"Restore from a pickle.\"\"\"\n opener, args, mode, kwargs = state\n self.__init__(opener, *args, mode=mode, kwargs=kwargs)\n\n\nclass RasterioArrayWrapper(BackendArray):\n \"\"\"A wrapper around rasterio dataset objects\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n\n def __init__(\n self,\n manager,\n lock,\n name,\n vrt_params=None,\n masked=False,\n mask_and_scale=False,\n unsigned=False,\n ):\n self.manager = manager\n self.lock = lock\n self.masked = masked or mask_and_scale\n self.mask_and_scale = mask_and_scale\n\n # cannot save riods as an attribute: this would break pickleability\n riods = manager.acquire()\n if vrt_params is not None:\n riods = WarpedVRT(riods, **vrt_params)\n self.vrt_params = vrt_params\n self._shape = (riods.count, riods.height, riods.width)\n\n self._dtype = None\n dtypes = riods.dtypes\n if not np.all(np.asarray(dtypes) == dtypes[0]):\n raise ValueError(\"All bands should have the same dtype\")\n\n dtype = _rasterio_to_numpy_dtype(dtypes)\n\n # handle unsigned case\n if mask_and_scale and unsigned and dtype.kind == \"i\":\n self._dtype = np.dtype(f\"u{dtype.itemsize}\")\n elif mask_and_scale and unsigned:\n warnings.warn(\n f\"variable {name!r} has _Unsigned attribute but is not \"\n \"of integer type. Ignoring attribute.\",\n variables.SerializationWarning,\n stacklevel=3,\n )\n self._fill_value = riods.nodata\n if self._dtype is None:\n if self.masked:\n self._dtype, self._fill_value = maybe_promote(dtype)\n else:\n self._dtype = dtype\n\n @property\n def dtype(self):\n \"\"\"\n Data type of the array\n \"\"\"\n return self._dtype\n\n @property\n def fill_value(self):\n \"\"\"\n Fill value of the array\n \"\"\"\n return self._fill_value\n\n @property\n def shape(self):\n \"\"\"\n Shape of the array\n \"\"\"\n return self._shape\n\n def _get_indexer(self, key):\n \"\"\"Get indexer for rasterio array.\n\n Parameter\n ---------\n key: tuple of int\n\n Returns\n -------\n band_key: an indexer for the 1st dimension\n window: two tuples. Each consists of (start, stop).\n squeeze_axis: axes to be squeezed\n np_ind: indexer for loaded numpy array\n\n See also\n --------\n indexing.decompose_indexer\n \"\"\"\n if len(key) != 3:\n raise RioXarrayError(\"rasterio datasets should always be 3D\")\n\n # bands cannot be windowed but they can be listed\n band_key = key[0]\n np_inds = []\n # bands (axis=0) cannot be windowed but they can be listed\n if isinstance(band_key, slice):\n start, stop, step = band_key.indices(self.shape[0])\n band_key = np.arange(start, stop, step)\n # be sure we give out a list\n band_key = (np.asarray(band_key) + 1).tolist()\n if isinstance(band_key, list): # if band_key is not a scalar\n np_inds.append(slice(None))\n\n # but other dims can only be windowed\n window = []\n squeeze_axis = []\n for iii, (ikey, size) in enumerate(zip(key[1:], self.shape[1:])):\n if isinstance(ikey, slice):\n # step is always positive. see indexing.decompose_indexer\n start, stop, step = ikey.indices(size)\n np_inds.append(slice(None, None, step))\n elif is_scalar(ikey):\n # windowed operations will always return an array\n # we will have to squeeze it later\n squeeze_axis.append(-(2 - iii))\n start = ikey\n stop = ikey + 1\n else:\n start, stop = np.min(ikey), np.max(ikey) + 1\n np_inds.append(ikey - start)\n window.append((start, stop))\n\n if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):\n # do outer-style indexing\n np_inds[-2:] = np.ix_(*np_inds[-2:])\n\n return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)\n\n def _getitem(self, key):\n band_key, window, squeeze_axis, np_inds = self._get_indexer(key)\n\n if not band_key or any(start == stop for (start, stop) in window):\n # no need to do IO\n shape = (len(band_key),) + tuple(stop - start for (start, stop) in window)\n out = np.zeros(shape, dtype=self.dtype)\n else:\n with self.lock:\n riods = self.manager.acquire(needs_lock=False)\n if self.vrt_params is not None:\n riods = WarpedVRT(riods, **self.vrt_params)\n out = riods.read(band_key, window=window, masked=self.masked)\n if self.masked:\n out = np.ma.filled(out.astype(self.dtype), self.fill_value)\n if self.mask_and_scale:\n for iii, band_iii in enumerate(np.atleast_1d(band_key) - 1):\n out[iii] = (\n out[iii] * riods.scales[band_iii] + riods.offsets[band_iii]\n )\n\n if squeeze_axis:\n out = np.squeeze(out, axis=squeeze_axis)\n return out[np_inds]\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER, self._getitem\n )\n\n\ndef _parse_envi(meta):\n \"\"\"Parse ENVI metadata into Python data structures.\n\n See the link for information on the ENVI header file format:\n http://www.harrisgeospatial.com/docs/enviheaderfiles.html\n\n Parameters\n ----------\n meta : dict\n Dictionary of keys and str values to parse, as returned by the rasterio\n tags(ns='ENVI') call.\n\n Returns\n -------\n parsed_meta : dict\n Dictionary containing the original keys and the parsed values\n\n \"\"\"\n\n def parsevec(value):\n return np.fromstring(value.strip(\"{}\"), dtype=\"float\", sep=\",\")\n\n def default(value):\n return value.strip(\"{}\")\n\n parse = {\"wavelength\": parsevec, \"fwhm\": parsevec}\n parsed_meta = {key: parse.get(key, default)(value) for key, value in meta.items()}\n return parsed_meta\n\n\ndef _rasterio_to_numpy_dtype(dtypes):\n \"\"\"Numpy dtype from first entry of rasterio dataset.dtypes\"\"\"\n # rasterio has some special dtype names (complex_int16 -> np.complex64)\n if dtypes[0] == \"complex_int16\":\n dtype = np.dtype(\"complex64\")\n else:\n dtype = np.dtype(dtypes[0])\n\n return dtype\n\n\ndef _to_numeric(value):\n \"\"\"\n Convert the value to a number\n \"\"\"\n try:\n value = int(value)\n except (TypeError, ValueError):\n try:\n value = float(value)\n except (TypeError, ValueError):\n pass\n return value\n\n\ndef _parse_tag(key, value):\n # NC_GLOBAL is appended to tags with netcdf driver and is not really needed\n key = key.split(\"NC_GLOBAL#\")[-1]\n if value.startswith(\"{\") and value.endswith(\"}\"):\n try:\n new_val = np.fromstring(value.strip(\"{}\"), dtype=\"float\", sep=\",\")\n # pylint: disable=len-as-condition\n value = new_val if len(new_val) else _to_numeric(value)\n except ValueError:\n value = _to_numeric(value)\n else:\n value = _to_numeric(value)\n return key, value\n\n\ndef _parse_tags(tags):\n parsed_tags = {}\n for key, value in tags.items():\n key, value = _parse_tag(key, value)\n parsed_tags[key] = value\n return parsed_tags\n\n\nNETCDF_DTYPE_MAP = {\n 0: object, # NC_NAT\n 1: np.byte, # NC_BYTE\n 2: np.char, # NC_CHAR\n 3: np.short, # NC_SHORT\n 4: np.int_, # NC_INT, NC_LONG\n 5: float, # NC_FLOAT\n 6: np.double, # NC_DOUBLE\n 7: np.ubyte, # NC_UBYTE\n 8: np.ushort, # NC_USHORT\n 9: np.uint, # NC_UINT\n 10: np.int64, # NC_INT64\n 11: np.uint64, # NC_UINT64\n 12: object, # NC_STRING\n}\n\n\ndef _load_netcdf_attrs(tags, data_array):\n \"\"\"\n Loads the netCDF attributes into the data array\n\n Attributes stored in this format:\n - variable_name#attr_name: attr_value\n \"\"\"\n for key, value in tags.items():\n key, value = _parse_tag(key, value)\n key_split = key.split(\"#\")\n if len(key_split) != 2:\n continue\n variable_name, attr_name = key_split\n if variable_name in data_array.coords:\n data_array.coords[variable_name].attrs.update({attr_name: value})\n\n\ndef _load_netcdf_1d_coords(tags):\n \"\"\"\n Dimension information:\n - NETCDF_DIM_EXTRA: '{time}' (comma separated list of dim names)\n - NETCDF_DIM_time_DEF: '{2,6}' (dim size, dim dtype)\n - NETCDF_DIM_time_VALUES: '{0,872712.659688}' (comma separated list of data)\n \"\"\"\n dim_names = tags.get(\"NETCDF_DIM_EXTRA\")\n if not dim_names:\n return {}\n dim_names = dim_names.strip(\"{}\").split(\",\")\n coords = {}\n for dim_name in dim_names:\n dim_def = tags.get(f\"NETCDF_DIM_{dim_name}_DEF\")\n if not dim_def:\n continue\n # pylint: disable=unused-variable\n dim_size, dim_dtype = dim_def.strip(\"{}\").split(\",\")\n dim_dtype = NETCDF_DTYPE_MAP.get(int(dim_dtype), object)\n dim_values = tags[f\"NETCDF_DIM_{dim_name}_VALUES\"].strip(\"{}\")\n coords[dim_name] = IndexVariable(\n dim_name, np.fromstring(dim_values, dtype=dim_dtype, sep=\",\")\n )\n return coords\n\n\ndef build_subdataset_filter(group_names, variable_names):\n \"\"\"\n Example::\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\":\n MODIS_Grid_2D:sur_refl_b01_1'\n\n Parameters\n ----------\n group_names: str or list or tuple\n Name or names of netCDF groups to filter by.\n\n variable_names: str or list or tuple\n Name or names of netCDF variables to filter by.\n\n Returns\n -------\n re.SRE_Pattern: output of re.compile()\n \"\"\"\n variable_query = r\"\\w+\"\n if variable_names is not None:\n if not isinstance(variable_names, (tuple, list)):\n variable_names = [variable_names]\n variable_names = [re.escape(variable_name) for variable_name in variable_names]\n variable_query = rf\"(?:{'|'.join(variable_names)})\"\n if group_names is not None:\n if not isinstance(group_names, (tuple, list)):\n group_names = [group_names]\n group_names = [re.escape(group_name) for group_name in group_names]\n group_query = rf\"(?:{'|'.join(group_names)})\"\n else:\n return re.compile(r\"\".join([r\".*(?:\\:/|\\:)(/+)?\", variable_query, r\"$\"]))\n return re.compile(\n r\"\".join(\n [r\".*(?:\\:/|\\:)(/+)?\", group_query, r\"[:/](/+)?\", variable_query, r\"$\"]\n )\n )\n\n\ndef _rio_transform(riods):\n \"\"\"\n Get the transform from a rasterio dataset\n reguardless of rasterio version.\n \"\"\"\n try:\n return riods.transform\n except AttributeError:\n return riods.affine # rasterio < 1.0\n\n\ndef _get_rasterio_attrs(riods):\n \"\"\"\n Get rasterio specific attributes\n \"\"\"\n # pylint: disable=too-many-branches\n # Add rasterio attributes\n attrs = _parse_tags(riods.tags(1))\n if hasattr(riods, \"nodata\") and riods.nodata is not None:\n # The nodata values for the raster bands\n attrs[\"_FillValue\"] = riods.nodata\n if hasattr(riods, \"scales\"):\n # The scale values for the raster bands\n if len(set(riods.scales)) > 1:\n attrs[\"scales\"] = riods.scales\n warnings.warn(\n \"Offsets differ across bands. The 'scale_factor' attribute will \"\n \"not be added. See the 'scales' attribute.\"\n )\n else:\n attrs[\"scale_factor\"] = riods.scales[0]\n if hasattr(riods, \"offsets\"):\n # The offset values for the raster bands\n if len(set(riods.offsets)) > 1:\n attrs[\"offsets\"] = riods.offsets\n warnings.warn(\n \"Offsets differ across bands. The 'add_offset' attribute will \"\n \"not be added. See the 'offsets' attribute.\"\n )\n else:\n attrs[\"add_offset\"] = riods.offsets[0]\n if hasattr(riods, \"descriptions\") and any(riods.descriptions):\n if len(set(riods.descriptions)) == 1:\n attrs[\"long_name\"] = riods.descriptions[0]\n else:\n # Descriptions for each dataset band\n attrs[\"long_name\"] = riods.descriptions\n if hasattr(riods, \"units\") and any(riods.units):\n # A list of units string for each dataset band\n if len(riods.units) == 1:\n attrs[\"units\"] = riods.units[0]\n else:\n attrs[\"units\"] = riods.units\n\n return attrs\n\n\ndef _decode_datetime_cf(data_array, decode_times, decode_timedelta):\n \"\"\"\n Decide the datetime based on CF conventions\n \"\"\"\n if decode_timedelta is None:\n decode_timedelta = decode_times\n\n for coord in data_array.coords:\n time_var = None\n if decode_times and \"since\" in data_array[coord].attrs.get(\"units\", \"\"):\n time_var = times.CFDatetimeCoder(use_cftime=True).decode(\n as_variable(data_array[coord]), name=coord\n )\n elif (\n decode_timedelta\n and data_array[coord].attrs.get(\"units\") in times.TIME_UNITS\n ):\n time_var = times.CFTimedeltaCoder().decode(\n as_variable(data_array[coord]), name=coord\n )\n if time_var is not None:\n dimensions, data, attributes, encoding = variables.unpack_for_decoding(\n time_var\n )\n data_array = data_array.assign_coords(\n {\n coord: IndexVariable(\n dims=dimensions,\n data=data,\n attrs=attributes,\n encoding=encoding,\n )\n }\n )\n return data_array\n\n\ndef _parse_driver_tags(riods, attrs, coords):\n # Parse extra metadata from tags, if supported\n parsers = {\"ENVI\": _parse_envi}\n\n driver = riods.driver\n if driver in parsers:\n meta = parsers[driver](riods.tags(ns=driver))\n\n for key, value in meta.items():\n # Add values as coordinates if they match the band count,\n # as attributes otherwise\n if isinstance(value, (list, np.ndarray)) and len(value) == riods.count:\n coords[key] = (\"band\", np.asarray(value))\n else:\n attrs[key] = value\n\n\ndef _load_subdatasets(\n riods,\n group,\n variable,\n parse_coordinates,\n chunks,\n cache,\n lock,\n masked,\n mask_and_scale,\n decode_times,\n decode_timedelta,\n **open_kwargs,\n):\n \"\"\"\n Load in rasterio subdatasets\n \"\"\"\n base_tags = _parse_tags(riods.tags())\n dim_groups = {}\n subdataset_filter = None\n if any((group, variable)):\n subdataset_filter = build_subdataset_filter(group, variable)\n for subdataset in riods.subdatasets:\n if subdataset_filter is not None and not subdataset_filter.match(subdataset):\n continue\n with rasterio.open(subdataset) as rds:\n shape = rds.shape\n rioda = open_rasterio(\n subdataset,\n parse_coordinates=shape not in dim_groups and parse_coordinates,\n chunks=chunks,\n cache=cache,\n lock=lock,\n masked=masked,\n mask_and_scale=mask_and_scale,\n default_name=subdataset.split(\":\")[-1].lstrip(\"/\").replace(\"/\", \"_\"),\n decode_times=decode_times,\n decode_timedelta=decode_timedelta,\n **open_kwargs,\n )\n if shape not in dim_groups:\n dim_groups[shape] = {rioda.name: rioda}\n else:\n dim_groups[shape][rioda.name] = rioda\n\n if len(dim_groups) > 1:\n dataset = [\n Dataset(dim_group, attrs=base_tags) for dim_group in dim_groups.values()\n ]\n elif not dim_groups:\n dataset = Dataset(attrs=base_tags)\n else:\n dataset = Dataset(list(dim_groups.values())[0], attrs=base_tags)\n return dataset\n\n\ndef _prepare_dask(result, riods, filename, chunks):\n \"\"\"\n Prepare the data for dask computations\n \"\"\"\n # pylint: disable=import-outside-toplevel\n from dask.base import tokenize\n\n # augment the token with the file modification time\n try:\n mtime = os.path.getmtime(filename)\n except OSError:\n # the filename is probably an s3 bucket rather than a regular file\n mtime = None\n\n if chunks in (True, \"auto\"):\n import dask\n from dask.array.core import normalize_chunks\n\n if version.parse(dask.__version__) < version.parse(\"0.18.0\"):\n msg = (\n \"Automatic chunking requires dask.__version__ >= 0.18.0 . \"\n f\"You currently have version {dask.__version__}\"\n )\n raise NotImplementedError(msg)\n block_shape = (1,) + riods.block_shapes[0]\n chunks = normalize_chunks(\n chunks=(1, \"auto\", \"auto\"),\n shape=(riods.count, riods.height, riods.width),\n dtype=riods.dtypes[0],\n previous_chunks=tuple((c,) for c in block_shape),\n )\n token = tokenize(filename, mtime, chunks)\n name_prefix = f\"open_rasterio-{token}\"\n return result.chunk(chunks, name_prefix=name_prefix, token=token)\n\n\ndef _handle_encoding(result, mask_and_scale, masked, da_name):\n \"\"\"\n Make sure encoding handled properly\n \"\"\"\n if \"grid_mapping\" in result.attrs:\n variables.pop_to(result.attrs, result.encoding, \"grid_mapping\", name=da_name)\n if mask_and_scale:\n if \"scale_factor\" in result.attrs:\n variables.pop_to(\n result.attrs, result.encoding, \"scale_factor\", name=da_name\n )\n if \"add_offset\" in result.attrs:\n variables.pop_to(result.attrs, result.encoding, \"add_offset\", name=da_name)\n if masked:\n if \"_FillValue\" in result.attrs:\n variables.pop_to(result.attrs, result.encoding, \"_FillValue\", name=da_name)\n if \"missing_value\" in result.attrs:\n variables.pop_to(\n result.attrs, result.encoding, \"missing_value\", name=da_name\n )\n\n\ndef open_rasterio(\n filename,\n parse_coordinates=None,\n chunks=None,\n cache=None,\n lock=None,\n masked=False,\n mask_and_scale=False,\n variable=None,\n group=None,\n default_name=None,\n decode_times=True,\n decode_timedelta=None,\n **open_kwargs,\n):\n # pylint: disable=too-many-statements,too-many-locals,too-many-branches\n \"\"\"Open a file with rasterio (experimental).\n\n This should work with any file that rasterio can open (most often:\n geoTIFF). The x and y coordinates are generated automatically from the\n file's geoinformation, shifted to the center of each pixel (see\n `\"PixelIsArea\" Raster Space\n <http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_\n for more information).\n\n Parameters\n ----------\n filename: str, rasterio.io.DatasetReader, or rasterio.vrt.WarpedVRT\n Path to the file to open. Or already open rasterio dataset.\n parse_coordinates: bool, optional\n Whether to parse the x and y coordinates out of the file's\n ``transform`` attribute or not. The default is to automatically\n parse the coordinates only if they are rectilinear (1D).\n It can be useful to set ``parse_coordinates=False``\n if your files are very large or if you don't need the coordinates.\n chunks: int, tuple or dict, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new\n DataArray into a dask array. Chunks can also be set to\n ``True`` or ``\"auto\"`` to choose sensible chunk sizes according to\n ``dask.config.get(\"array.chunk-size\")``.\n cache: bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False.\n lock: bool or dask.utils.SerializableLock, optional\n\n If chunks is provided, this argument is used to ensure that only one\n thread per process is reading from a rasterio file object at a time.\n\n By default and when a lock instance is provided,\n a :class:`xarray.backends.CachingFileManager` is used to cache File objects.\n Since rasterio also caches some data, this will make repeated reads from the\n same object fast.\n\n When ``lock=False``, no lock is used, allowing for completely parallel reads\n from multiple threads or processes. However, a new file handle is opened on\n each request.\n\n masked: bool, optional\n If True, read the mask and set values to NaN. Defaults to False.\n mask_and_scale: bool, optional\n Lazily scale (using the `scales` and `offsets` from rasterio) and mask.\n If the _Unsigned attribute is present treat integer arrays as unsigned.\n variable: str or list or tuple, optional\n Variable name or names to use to filter loading.\n group: str or list or tuple, optional\n Group name or names to use to filter loading.\n default_name: str, optional\n The name of the data array if none exists. Default is None.\n decode_times: bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n decode_timedelta: bool, optional\n If True, decode variables and coordinates with time units in\n {“days”, “hours”, “minutes”, “seconds”, “milliseconds”, “microseconds”}\n into timedelta objects. If False, leave them encoded as numbers.\n If None (default), assume the same value of decode_time.\n **open_kwargs: kwargs, optional\n Optional keyword arguments to pass into rasterio.open().\n\n Returns\n -------\n :obj:`xarray.Dataset` | :obj:`xarray.DataArray` | List[:obj:`xarray.Dataset`]:\n The newly created dataset(s).\n \"\"\"\n parse_coordinates = True if parse_coordinates is None else parse_coordinates\n masked = masked or mask_and_scale\n vrt_params = None\n if isinstance(filename, rasterio.io.DatasetReader):\n filename = filename.name\n elif isinstance(filename, rasterio.vrt.WarpedVRT):\n vrt = filename\n filename = vrt.src_dataset.name\n vrt_params = dict(\n src_crs=vrt.src_crs.to_string() if vrt.src_crs else None,\n crs=vrt.crs.to_string() if vrt.crs else None,\n resampling=vrt.resampling,\n tolerance=vrt.tolerance,\n src_nodata=vrt.src_nodata,\n nodata=vrt.nodata,\n width=vrt.width,\n height=vrt.height,\n src_transform=vrt.src_transform,\n transform=vrt.transform,\n dtype=vrt.working_dtype,\n warp_extras=vrt.warp_extras,\n )\n\n if lock in (True, None):\n lock = RASTERIO_LOCK\n elif lock is False:\n lock = NO_LOCK\n\n # ensure default for sharing is False\n # ref https://github.com/mapbox/rasterio/issues/1504\n open_kwargs[\"sharing\"] = open_kwargs.get(\"sharing\", False)\n\n with warnings.catch_warnings(record=True) as rio_warnings:\n if lock is not NO_LOCK:\n manager = CachingFileManager(\n rasterio.open, filename, lock=lock, mode=\"r\", kwargs=open_kwargs\n )\n else:\n manager = URIManager(rasterio.open, filename, mode=\"r\", kwargs=open_kwargs)\n riods = manager.acquire()\n captured_warnings = rio_warnings.copy()\n\n # raise the NotGeoreferencedWarning if applicable\n for rio_warning in captured_warnings:\n if not riods.subdatasets or not isinstance(\n rio_warning.message, NotGeoreferencedWarning\n ):\n warnings.warn(str(rio_warning.message), type(rio_warning.message))\n\n # open the subdatasets if they exist\n if riods.subdatasets:\n return _load_subdatasets(\n riods=riods,\n group=group,\n variable=variable,\n parse_coordinates=parse_coordinates,\n chunks=chunks,\n cache=cache,\n lock=lock,\n masked=masked,\n mask_and_scale=mask_and_scale,\n decode_times=decode_times,\n decode_timedelta=decode_timedelta,\n **open_kwargs,\n )\n\n if vrt_params is not None:\n riods = WarpedVRT(riods, **vrt_params)\n\n if cache is None:\n cache = chunks is None\n\n # Get bands\n if riods.count < 1:\n raise ValueError(\"Unknown dims\")\n\n # parse tags & load alternate coords\n attrs = _get_rasterio_attrs(riods=riods)\n coords = _load_netcdf_1d_coords(riods.tags())\n _parse_driver_tags(riods=riods, attrs=attrs, coords=coords)\n for coord in coords:\n if f\"NETCDF_DIM_{coord}\" in attrs:\n coord_name = coord\n attrs.pop(f\"NETCDF_DIM_{coord}\")\n break\n else:\n coord_name = \"band\"\n coords[coord_name] = np.asarray(riods.indexes)\n\n # Get geospatial coordinates\n if parse_coordinates:\n coords.update(\n _generate_spatial_coords(_rio_transform(riods), riods.width, riods.height)\n )\n\n unsigned = False\n encoding = {}\n if mask_and_scale and \"_Unsigned\" in attrs:\n unsigned = variables.pop_to(attrs, encoding, \"_Unsigned\") == \"true\"\n\n if masked:\n encoding[\"dtype\"] = str(_rasterio_to_numpy_dtype(riods.dtypes))\n\n da_name = attrs.pop(\"NETCDF_VARNAME\", default_name)\n data = indexing.LazilyOuterIndexedArray(\n RasterioArrayWrapper(\n manager,\n lock,\n name=da_name,\n vrt_params=vrt_params,\n masked=masked,\n mask_and_scale=mask_and_scale,\n unsigned=unsigned,\n )\n )\n\n # this lets you write arrays loaded with rasterio\n data = indexing.CopyOnWriteArray(data)\n if cache and chunks is None:\n data = indexing.MemoryCachedArray(data)\n\n result = DataArray(\n data=data, dims=(coord_name, \"y\", \"x\"), coords=coords, attrs=attrs, name=da_name\n )\n result.encoding = encoding\n\n # update attributes from NetCDF attributess\n _load_netcdf_attrs(riods.tags(), result)\n result = _decode_datetime_cf(\n result, decode_times=decode_times, decode_timedelta=decode_timedelta\n )\n\n # make sure the _FillValue is correct dtype\n if \"_FillValue\" in attrs:\n attrs[\"_FillValue\"] = result.dtype.type(attrs[\"_FillValue\"])\n\n # handle encoding\n _handle_encoding(result, mask_and_scale, masked, da_name)\n # Affine transformation matrix (always available)\n # This describes coefficients mapping pixel coordinates to CRS\n # For serialization store as tuple of 6 floats, the last row being\n # always (0, 0, 1) per definition (see\n # https://github.com/sgillies/affine)\n result.rio.write_transform(_rio_transform(riods), inplace=True)\n if hasattr(riods, \"crs\") and riods.crs:\n result.rio.write_crs(riods.crs, inplace=True)\n\n if chunks is not None:\n result = _prepare_dask(result, riods, filename, chunks)\n\n # Make the file closeable\n result.set_close(manager.close)\n result.rio._manager = manager\n # add file path to encoding\n result.encoding[\"source\"] = riods.name\n result.encoding[\"rasterio_dtype\"] = str(riods.dtypes[0])\n return result\n"
] | [
[
"numpy.ix_",
"numpy.min",
"numpy.asarray",
"numpy.arange",
"numpy.squeeze",
"numpy.dtype",
"numpy.atleast_1d",
"numpy.max",
"numpy.fromstring",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
suvarnak/GenerativeFSLCovid | [
"0bdeb4ed444c5c9d59697c71d0733fc3a100944c"
] | [
"graphs/models/concept_discriminator.py"
] | [
"\"\"\"\r\n discriminator model\r\n\"\"\"\r\nimport torch\r\nimport torch.nn as nn\r\nimport torchvision.models as models\r\nimport json\r\nfrom easydict import EasyDict as edict\r\nfrom graphs.weights_initializer import weights_init\r\n\r\n\r\nclass EncoderModel(nn.Module):\r\n def __init__(self,config):\r\n super(EncoderModel, self).__init__()\r\n self.config = config\r\n\r\n self.num_classes = self.config.num_classes\r\n\r\n self.progress = 0.0\r\n\r\n self.encoder = nn.Sequential(\r\n nn.Conv2d(in_channels=3,out_channels=32, kernel_size=3, stride=1, padding=1), # b, 32, 224, 224\r\n nn.ReLU(True),\r\n nn.MaxPool2d(2, stride=None), # b, 32, 112, 112\r\n nn.Conv2d(in_channels=32,out_channels=64, kernel_size=3, stride=1, padding=1), # b, 64, 112, 112\r\n nn.ReLU(True),\r\n nn.MaxPool2d(2, stride=None), # b, 64, 56, 56\r\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1), # b, 128, 56, 56\r\n nn.ReLU(True),\r\n nn.MaxPool2d(2, stride=None), # b, 128, 28, 28\r\n\t\t\t\t)\r\n self.linear_layers = nn.Sequential(\t\t\r\n\t\t\t\t\t\tnn.Linear(2*self.config.image_size*self.config.image_size, out_features=128),\r\n nn.Linear(128, out_features=self.config.num_ways),\r\n\r\n )\r\n\r\n\r\n def forward(self, x): \r\n #x = self.encoder(x)\r\n #print(x.size())\r\n #self.discriminator = nn.Sequential(self.encoder, self.fc())\r\n x = self.encoder(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.linear_layers(x)\r\n #print(x.size())\r\n\r\n #x = x.view(1, -1) \r\n #x = self.fc(x)\r\n return x\r\n\r\nclass ConceptDiscriminatorModel(torch.nn.Module): #new model\r\n def __init__(self, pretrained_model):\r\n super(ConceptDiscriminatorModel, self).__init__()\r\n self.new_model = nn.Sequential(\r\n nn.Linear(in_features=512, out_features=30))\r\n self.pretrained_model = pretrained_model\r\n\r\n def forward(self, x):\r\n x = self.pretrained_model(x)\r\n return x\r\n"
] | [
[
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nhsx-mirror/SynthVAE | [
"64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651",
"64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651",
"64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651",
"64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651"
] | [
"opacus/privacy_analysis.py",
"opacus/utils/tensor_utils.py",
"opacus/layers/param_rename.py",
"Hyperparameter_Tuning/Hyperparameter_Tuning_MIMIC.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nr\"\"\"\n*Based on Google's TF Privacy:* https://github.com/tensorflow/privacy/blob/master/tensorflow_privacy/privacy/analysis/rdp_accountant.py.\n*Here, we update this code to Python 3, and optimize dependencies.*\n\nFunctionality for computing Renyi Differential Privacy (RDP) of an additive\nSampled Gaussian Mechanism (SGM).\n\nExample:\n Suppose that we have run an SGM applied to a function with L2-sensitivity of 1.\n\n Its parameters are given as a list of tuples\n ``[(q_1, sigma_1, steps_1), ..., (q_k, sigma_k, steps_k)],``\n and we wish to compute epsilon for a given target delta.\n\n The example code would be:\n\n >>> max_order = 32\n >>> orders = range(2, max_order + 1)\n >>> rdp = np.zeros_like(orders, dtype=float)\n >>> for q, sigma, steps in parameters:\n >>> rdp += privacy_analysis.compute_rdp(q, sigma, steps, orders)\n >>> epsilon, opt_order = privacy_analysis.get_privacy_spent(orders, rdp, delta)\n\n\"\"\"\n\nimport math\nfrom typing import List, Tuple, Union\n\nimport numpy as np\nfrom scipy import special\n\n\n########################\n# LOG-SPACE ARITHMETIC #\n########################\n\n\ndef _log_add(logx: float, logy: float) -> float:\n r\"\"\"Adds two numbers in the log space.\n\n Args:\n logx: First term in log space.\n logy: Second term in log space.\n\n Returns:\n Sum of numbers in log space.\n \"\"\"\n a, b = min(logx, logy), max(logx, logy)\n if a == -np.inf: # adding 0\n return b\n # Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)\n return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)\n\n\ndef _log_sub(logx: float, logy: float) -> float:\n r\"\"\"Subtracts two numbers in the log space.\n\n Args:\n logx: First term in log space. Expected to be greater than the second term.\n logy: First term in log space. Expected to be less than the first term.\n\n Returns:\n Difference of numbers in log space.\n\n Raises:\n ValueError\n If the result is negative.\n \"\"\"\n if logx < logy:\n raise ValueError(\"The result of subtraction must be non-negative.\")\n if logy == -np.inf: # subtracting 0\n return logx\n if logx == logy:\n return -np.inf # 0 is represented as -np.inf in the log space.\n\n try:\n # Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).\n return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1\n except OverflowError:\n return logx\n\n\ndef _compute_log_a_for_int_alpha(q: float, sigma: float, alpha: int) -> float:\n r\"\"\"Computes :math:`log(A_\\alpha)` for integer ``alpha``.\n\n Notes:\n Note that\n :math:`A_\\alpha` is real valued function of ``alpha`` and ``q``,\n and that 0 < ``q`` < 1.\n\n Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf for details.\n\n Args:\n q: Sampling rate of SGM.\n sigma: The standard deviation of the additive Gaussian noise.\n alpha: The order at which RDP is computed.\n\n Returns:\n :math:`log(A_\\alpha)` as defined in Section 3.3 of\n https://arxiv.org/pdf/1908.10530.pdf.\n \"\"\"\n\n # Initialize with 0 in the log space.\n log_a = -np.inf\n\n for i in range(alpha + 1):\n log_coef_i = (\n math.log(special.binom(alpha, i))\n + i * math.log(q)\n + (alpha - i) * math.log(1 - q)\n )\n\n s = log_coef_i + (i * i - i) / (2 * (sigma ** 2))\n log_a = _log_add(log_a, s)\n\n return float(log_a)\n\n\ndef _compute_log_a_for_frac_alpha(q: float, sigma: float, alpha: float) -> float:\n r\"\"\"Computes :math:`log(A_\\alpha)` for fractional ``alpha``.\n\n Notes:\n Note that\n :math:`A_\\alpha` is real valued function of ``alpha`` and ``q``,\n and that 0 < ``q`` < 1.\n\n Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf for details.\n\n Args:\n q: Sampling rate of SGM.\n sigma: The standard deviation of the additive Gaussian noise.\n alpha: The order at which RDP is computed.\n\n Returns:\n :math:`log(A_\\alpha)` as defined in Section 3.3 of\n https://arxiv.org/pdf/1908.10530.pdf.\n \"\"\"\n # The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are\n # initialized to 0 in the log space:\n log_a0, log_a1 = -np.inf, -np.inf\n i = 0\n\n z0 = sigma ** 2 * math.log(1 / q - 1) + 0.5\n\n while True: # do ... until loop\n coef = special.binom(alpha, i)\n log_coef = math.log(abs(coef))\n j = alpha - i\n\n log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)\n log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)\n\n log_e0 = math.log(0.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))\n log_e1 = math.log(0.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))\n\n log_s0 = log_t0 + (i * i - i) / (2 * (sigma ** 2)) + log_e0\n log_s1 = log_t1 + (j * j - j) / (2 * (sigma ** 2)) + log_e1\n\n if coef > 0:\n log_a0 = _log_add(log_a0, log_s0)\n log_a1 = _log_add(log_a1, log_s1)\n else:\n log_a0 = _log_sub(log_a0, log_s0)\n log_a1 = _log_sub(log_a1, log_s1)\n\n i += 1\n if max(log_s0, log_s1) < -30:\n break\n\n return _log_add(log_a0, log_a1)\n\n\ndef _compute_log_a(q: float, sigma: float, alpha: float) -> float:\n r\"\"\"Computes :math:`log(A_\\alpha)` for any positive finite ``alpha``.\n\n Notes:\n Note that\n :math:`A_\\alpha` is real valued function of ``alpha`` and ``q``,\n and that 0 < ``q`` < 1.\n\n Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf\n for details.\n\n Args:\n q: Sampling rate of SGM.\n sigma: The standard deviation of the additive Gaussian noise.\n alpha: The order at which RDP is computed.\n\n Returns:\n :math:`log(A_\\alpha)` as defined in the paper mentioned above.\n \"\"\"\n if float(alpha).is_integer():\n return _compute_log_a_for_int_alpha(q, sigma, int(alpha))\n else:\n return _compute_log_a_for_frac_alpha(q, sigma, alpha)\n\n\ndef _log_erfc(x: float) -> float:\n r\"\"\"Computes :math:`log(erfc(x))` with high accuracy for large ``x``.\n\n Helper function used in computation of :math:`log(A_\\alpha)`\n for a fractional alpha.\n\n Args:\n x: The input to the function\n\n Returns:\n :math:`log(erfc(x))`\n \"\"\"\n return math.log(2) + special.log_ndtr(-x * 2 ** 0.5)\n\n\ndef _compute_rdp(q: float, sigma: float, alpha: float) -> float:\n r\"\"\"Computes RDP of the Sampled Gaussian Mechanism at order ``alpha``.\n\n Args:\n q: Sampling rate of SGM.\n sigma: The standard deviation of the additive Gaussian noise.\n alpha: The order at which RDP is computed.\n\n Returns:\n RDP at order ``alpha``; can be np.inf.\n \"\"\"\n if q == 0:\n return 0\n\n # no privacy\n if sigma == 0:\n return np.inf\n\n if q == 1.0:\n return alpha / (2 * sigma ** 2)\n\n if np.isinf(alpha):\n return np.inf\n\n return _compute_log_a(q, sigma, alpha) / (alpha - 1)\n\n\ndef compute_rdp(\n q: float, noise_multiplier: float, steps: int, orders: Union[List[float], float]\n) -> Union[List[float], float]:\n r\"\"\"Computes Renyi Differential Privacy (RDP) guarantees of the\n Sampled Gaussian Mechanism (SGM) iterated ``steps`` times.\n\n Args:\n q: Sampling rate of SGM.\n noise_multiplier: The ratio of the standard deviation of the\n additive Gaussian noise to the L2-sensitivity of the function\n to which it is added. Note that this is same as the standard\n deviation of the additive Gaussian noise when the L2-sensitivity\n of the function is 1.\n steps: The number of iterations of the mechanism.\n orders: An array (or a scalar) of RDP orders.\n\n Returns:\n The RDP guarantees at all orders; can be ``np.inf``.\n \"\"\"\n if isinstance(orders, float):\n rdp = _compute_rdp(q, noise_multiplier, orders)\n else:\n rdp = np.array([_compute_rdp(q, noise_multiplier, order) for order in orders])\n\n return rdp * steps\n\n\ndef get_privacy_spent(\n orders: Union[List[float], float], rdp: Union[List[float], float], delta: float\n) -> Tuple[float, float]:\n r\"\"\"Computes epsilon given a list of Renyi Differential Privacy (RDP) values at\n multiple RDP orders and target ``delta``.\n The computation of epslion, i.e. conversion from RDP to (eps, delta)-DP,\n is based on the theorem presented in the following work:\n Borja Balle et al. \"Hypothesis testing interpretations and Renyi differential privacy.\"\n International Conference on Artificial Intelligence and Statistics. PMLR, 2020.\n Particullary, Theorem 21 in the arXiv version https://arxiv.org/abs/1905.09982.\n Args:\n orders: An array (or a scalar) of orders (alphas).\n rdp: A list (or a scalar) of RDP guarantees.\n delta: The target delta.\n Returns:\n Pair of epsilon and optimal order alpha.\n Raises:\n ValueError\n If the lengths of ``orders`` and ``rdp`` are not equal.\n \"\"\"\n orders_vec = np.atleast_1d(orders)\n rdp_vec = np.atleast_1d(rdp)\n\n if len(orders_vec) != len(rdp_vec):\n raise ValueError(\n f\"Input lists must have the same length.\\n\"\n f\"\\torders_vec = {orders_vec}\\n\"\n f\"\\trdp_vec = {rdp_vec}\\n\"\n )\n\n eps = (\n rdp_vec\n - (np.log(delta) + np.log(orders_vec)) / (orders_vec - 1)\n + np.log((orders_vec - 1) / orders_vec)\n )\n\n # special case when there is no privacy\n if np.isnan(eps).all():\n return np.inf, np.nan\n\n idx_opt = np.nanargmin(eps) # Ignore NaNs\n return eps[idx_opt], orders_vec[idx_opt]\n",
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nUtils for generating stats from torch tensors.\n\"\"\"\nfrom typing import Iterator, List, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch.functional import F\n\n\ndef calc_sample_norms(\n named_params: Iterator[Tuple[str, torch.Tensor]], flat: bool = True\n) -> List[torch.Tensor]:\n r\"\"\"\n Calculates the norm of the given tensors for each sample.\n\n This function calculates the overall norm of the given tensors for each sample,\n assuming the each batch's dim is zero.\n\n Args:\n named_params: An iterator of tuples <name, param> with name being a\n string and param being a tensor of shape ``[B, ...]`` where ``B``\n is the size of the batch and is the 0th dimension.\n flat: A flag, when set to `True` returns a flat norm over all\n layers norms\n\n Example:\n >>> t1 = torch.rand((2, 5))\n >>> t2 = torch.rand((2, 5))\n >>> calc_sample_norms([(\"1\", t1), (\"2\", t2)])\n [tensor([1.5117, 1.0618])]\n\n Returns:\n A list of tensor norms where length of the list is the number of layers\n \"\"\"\n norms = [param.view(len(param), -1).norm(2, dim=-1) for name, param in named_params]\n # calc norm over all layer norms if flat = True\n if flat:\n norms = [torch.stack(norms, dim=0).norm(2, dim=0)]\n return norms\n\n\ndef sum_over_all_but_batch_and_last_n(\n tensor: torch.Tensor, n_dims: int\n) -> torch.Tensor:\n r\"\"\"\n Calculates the sum over all dimensions, except the first\n (batch dimension), and excluding the last n_dims.\n\n This function will ignore the first dimension and it will\n not aggregate over the last n_dims dimensions.\n\n Args:\n tensor: An input tensor of shape ``(B, ..., X[n_dims-1])``.\n n_dims: Number of dimensions to keep.\n\n Example:\n >>> tensor = torch.ones(1, 2, 3, 4, 5)\n >>> sum_over_all_but_batch_and_last_n(tensor, n_dims=2).shape\n torch.Size([1, 4, 5])\n\n Returns:\n A tensor of shape ``(B, ..., X[n_dims-1])``\n \"\"\"\n if tensor.dim() == n_dims + 1:\n return tensor\n else:\n dims = list(range(1, tensor.dim() - n_dims))\n return tensor.sum(dim=dims)\n\n\ndef unfold3d(\n tensor: torch.Tensor,\n kernel_size: Union[int, Tuple[int, int, int]],\n padding: Union[int, Tuple[int, int, int]] = 0,\n stride: Union[int, Tuple[int, int, int]] = 1,\n dilation: Union[int, Tuple[int, int, int]] = 1,\n):\n r\"\"\"\n Extracts sliding local blocks from an batched input tensor.\n\n :class:`torch.nn.Unfold` only supports 4D inputs (batched image-like tensors).\n This method implements the same action for 5D inputs\n\n Args:\n tensor: An input tensor of shape ``(B, C, D, H, W)``.\n kernel_size: the size of the sliding blocks\n padding: implicit zero padding to be added on both sides of input\n stride: the stride of the sliding blocks in the input spatial dimensions\n dilation: the spacing between the kernel points.\n\n Example:\n >>> B, C, D, H, W = 3, 4, 5, 6, 7\n >>> tensor = torch.arange(1,B*C*D*H*W+1.).view(B,C,D,H,W)\n >>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape\n torch.Size([3, 32, 120])\n\n Returns:\n A tensor of shape ``(B, C * np.product(kernel_size), L)``, where L - output spatial dimensions.\n See :class:`torch.nn.Unfold` for more details\n \"\"\"\n\n if len(tensor.shape) != 5:\n raise ValueError(\n f\"Input tensor must be of the shape [B, C, D, H, W]. Got{tensor.shape}\"\n )\n\n if isinstance(kernel_size, int):\n kernel_size = (kernel_size, kernel_size, kernel_size)\n\n if isinstance(padding, int):\n padding = (padding, padding, padding)\n\n if isinstance(stride, int):\n stride = (stride, stride, stride)\n\n if isinstance(dilation, int):\n dilation = (dilation, dilation, dilation)\n\n if dilation != (1, 1, 1):\n raise NotImplementedError(f\"dilation={dilation} not supported. We'd love a PR!\")\n\n batch_size, channels, _, _, _ = tensor.shape\n\n # Input shape: (B, C, D, H, W)\n tensor = F.pad(\n tensor, (padding[2], padding[2], padding[1], padding[1], padding[0], padding[0])\n )\n # Output shape: (B, C, D+2*padding[2], H+2*padding[1], W+2*padding[0])\n\n tensor = tensor.unfold(dimension=2, size=kernel_size[0], step=stride[0])\n tensor = tensor.unfold(dimension=3, size=kernel_size[1], step=stride[1])\n tensor = tensor.unfold(dimension=4, size=kernel_size[2], step=stride[2])\n # Output shape: (B, C, D_out, H_out, W_out, kernel_size[0], kernel_size[1], kernel_size[2])\n # For D_out, H_out, W_out definitions see :class:`torch.nn.Unfold`\n\n tensor = tensor.permute(0, 2, 3, 4, 1, 5, 6, 7)\n # Output shape: (B, D_out, H_out, W_out, C, kernel_size[0], kernel_size[1], kernel_size[2])\n\n tensor = tensor.reshape(batch_size, -1, channels * np.prod(kernel_size)).transpose(\n 1, 2\n )\n # Output shape: (B, D_out * H_out * W_out, C * kernel_size[0] * kernel_size[1] * kernel_size[2]\n\n return tensor\n",
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n\nfrom typing import Dict, Union\n\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.nn.modules.module import _IncompatibleKeys\n\n\ndef filter_out_old_keys(self, state_dict, prefix, local_metadata):\n new_state_dict = {\n param_name: param_value\n for param_name, param_value in state_dict.items()\n if param_name not in self.old_to_new\n }\n return new_state_dict\n\n\nclass ParamRenamedModule(nn.Module):\n \"\"\"\n This class defines a nn.Module whose parameters are renamed. This is useful when you want to\n reimplement a layer but make sure its state_dict and list of parameters are exactly the same\n as another reference layer so that you can have a drop-in replacement that does not depend on\n how your layer is actually implemented. In Opacus, this is used for DPLSTM, where our\n implementation leverages submodules and requires alignment to the state_dict of nn.LSTM.\n \"\"\"\n\n def __init__(self, rename_map: Dict[str, str]):\n \"\"\"\n Initializes internal state. Subclass this instead of ``torch.nn.Module`` whenever you need\n to rename your model's state.\n\n Args:\n rename_map: mapping from old name -> new name for each parameter you want renamed.\n Note that this must be a 1:1 mapping!\n \"\"\"\n super().__init__()\n self.old_to_new = rename_map\n self.new_to_old = {v: k for k, v in rename_map.items()}\n\n self._register_state_dict_hook(filter_out_old_keys)\n\n def _register_renamed_parameters(self):\n \"\"\"\n Internal function. This function simply registers parameters under their new name. They will\n automatically mask their duplicates coming from submodules. This trick works because\n self.parameters() proceeds recursively from the top, going into submodules after processing\n items at the current level, and will not return duplicates.\n \"\"\"\n for old_name, param in super().named_parameters():\n if old_name in self.old_to_new:\n new_name = self.old_to_new[old_name]\n self.register_parameter(new_name, param)\n\n def __setattr__(self, name: str, value: Union[Tensor, nn.Module]) -> None:\n \"\"\"\n Whenever you set an attribute, eg `self.linear`, this is called to actually register it in\n any nn.Module. We rely on the masking trick explained in the docs for\n ``_register_renamed_parameters`` to make sure we replace things only once. If a new parameter\n in the rename list is detected, we rename and mask it so next time this is called we will\n no longer find it.\n \"\"\"\n super().__setattr__(name, value)\n try:\n self._register_renamed_parameters()\n except AttributeError:\n # At the very beginning of instantiation, this will fail because we do not yet have\n # self._parameters. Safe to ignore.\n pass\n\n def load_state_dict(\n self, state_dict: Dict[str, Tensor], strict: bool = True,\n ):\n \"\"\"\n Identical to ``torch.nn.Module.load_state_dict()`` but handles the renamed keys.\n \"\"\"\n\n # nn.Module recomputes its state_dict(), without calling the same logic as in self.state_dict()\n # This means that it will find both the old and the renamed parameters. Both point to the\n # same parameter object, so either of them will set it correctly. It will however complain\n # that some keys are missing (the \"old\" keys). We can safely ignore those and process them\n # accordingly\n\n missing_keys, unexpected_keys = super().load_state_dict(\n state_dict, strict=False\n )\n missing_keys = [k for k in missing_keys if k not in self.old_to_new]\n if strict:\n error_msgs = []\n if len(unexpected_keys) > 0:\n error_msgs.insert(\n 0,\n \"Unexpected key(s) in state_dict: {}. \".format(\n \", \".join('\"{}\"'.format(k) for k in unexpected_keys)\n ),\n )\n if len(missing_keys) > 0:\n error_msgs.insert(\n 0,\n \"Missing key(s) in state_dict: {}. \".format(\n \", \".join('\"{}\"'.format(k) for k in missing_keys)\n ),\n )\n\n if len(error_msgs) > 0:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(\n self.__class__.__name__, \"\\n\\t\".join(error_msgs)\n )\n )\n return _IncompatibleKeys(missing_keys, unexpected_keys)\n",
"#%% -------- Import Libraries -------- #\n\n# Standard imports\nfrom selectors import EpollSelector\nfrom tokenize import String\nimport numpy as np\nimport pandas as pd\nimport torch\n\n# VAE is in other folder\nimport sys\n\nsys.path.append(\"../\")\n\n# Opacus support for differential privacy\nfrom opacus.utils.uniform_sampler import UniformWithReplacementSampler\n\n# For VAE dataset formatting\nfrom torch.utils.data import TensorDataset, DataLoader\n\n# VAE functions\nfrom VAE import Decoder, Encoder, VAE\n\n# For datetime columns we need a transformer\nfrom rdt.transformers import datetime\n\n# Utility file contains all functions required to run notebook\nfrom utils import (\n set_seed,\n mimic_pre_proc,\n constraint_filtering,\n plot_elbo,\n plot_likelihood_breakdown,\n plot_variable_distributions,\n reverse_transformers,\n)\nfrom metrics import distribution_metrics\n\nimport optuna\nimport pickle\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\") # We suppress warnings to avoid SDMETRICS throwing unique synthetic data warnings (i.e.\n# data in synthetic set is not in the real data set) as well as SKLEARN throwing convergence warnings (pre-processing uses\n# GMM from sklearn and this throws non convergence warnings)\n\nset_seed(0)\n\nfilepath = \".../Private MIMIC Data/table_one_synthvae.csv\"\n\n# Load in the MIMIC dataset\ndata_supp = pd.read_csv(filepath)\n\n# Save the original columns\n\noriginal_categorical_columns = [\n \"ETHNICITY\",\n \"DISCHARGE_LOCATION\",\n \"GENDER\",\n \"FIRST_CAREUNIT\",\n \"VALUEUOM\",\n \"LABEL\",\n]\noriginal_continuous_columns = [\"SUBJECT_ID\", \"VALUE\", \"age\"]\noriginal_datetime_columns = [\"ADMITTIME\", \"DISCHTIME\", \"DOB\", \"CHARTTIME\"]\n\n# Drop DOD column as it contains NANS - for now\n\n# data_supp = data_supp.drop('DOD', axis = 1)\n\noriginal_columns = (\n original_categorical_columns\n + original_continuous_columns\n + original_datetime_columns\n)\n#%% -------- Data Pre-Processing -------- #\n\npre_proc_method = \"GMM\"\n\n(\n x_train,\n original_metric_set,\n reordered_dataframe_columns,\n continuous_transformers,\n categorical_transformers,\n datetime_transformers,\n num_categories,\n num_continuous,\n) = mimic_pre_proc(data_supp=data_supp, pre_proc_method=pre_proc_method)\n\n#%% -------- Create & Train VAE -------- #\n\n# User defined parameters\n\n# General training\nbatch_size = 32\nn_epochs = 5\nlogging_freq = 1 # Number of epochs we should log the results to the user\npatience = 5 # How many epochs should we allow the model train to see if\n# improvement is made\ndelta = 10 # The difference between elbo values that registers an improvement\nfilepath = None # Where to save the best model\n\n\n# Privacy params\ndifferential_privacy = False # Do we want to implement differential privacy\nsample_rate = 0.1 # Sampling rate\nnoise_scale = None # Noise multiplier - influences how much noise to add\ntarget_eps = 1 # Target epsilon for privacy accountant\ntarget_delta = 1e-5 # Target delta for privacy accountant\n\n# Define the metrics you want the model to evaluate\n\n# Define distributional metrics required - for sdv_baselines this is set by default\ndistributional_metrics = [\n \"SVCDetection\",\n \"GMLogLikelihood\",\n \"CSTest\",\n \"KSTest\",\n \"KSTestExtended\",\n \"ContinuousKLDivergence\",\n \"DiscreteKLDivergence\",\n]\n\ngower = False\n\n# Prepare data for interaction with torch VAE\nY = torch.Tensor(x_train)\ndataset = TensorDataset(Y)\n\ngenerator = None\nsample_rate = batch_size / len(dataset)\ndata_loader = DataLoader(\n dataset,\n batch_sampler=UniformWithReplacementSampler(\n num_samples=len(dataset), sample_rate=sample_rate, generator=generator\n ),\n pin_memory=True,\n generator=generator,\n)\n\n\n# -------- Define our Optuna trial -------- #\n\n\ndef objective(\n trial,\n gower,\n distributional_metrics,\n differential_privacy=False,\n target_delta=1e-3,\n target_eps=10.0,\n n_epochs=50,\n):\n\n latent_dim = trial.suggest_int(\"Latent Dimension\", 2, 128, step=2) # Hyperparam\n hidden_dim = trial.suggest_int(\"Hidden Dimension\", 32, 1024, step=32) # Hyperparam\n\n encoder = Encoder(x_train.shape[1], latent_dim, hidden_dim=hidden_dim)\n decoder = Decoder(latent_dim, num_continuous, num_categories=num_categories)\n\n lr = trial.suggest_float(\"Learning Rate\", 1e-3, 1e-2, step=1e-5)\n vae = VAE(encoder, decoder, lr=1e-3) # lr hyperparam\n\n C = trial.suggest_int(\"C\", 10, 1e4, step=50)\n\n if differential_privacy == True:\n (\n training_epochs,\n log_elbo,\n log_reconstruction,\n log_divergence,\n log_categorical,\n log_numerical,\n ) = vae.diff_priv_train(\n data_loader,\n n_epochs=n_epochs,\n C=C, # Hyperparam\n target_eps=target_eps,\n target_delta=target_delta,\n sample_rate=sample_rate,\n )\n print(f\"(epsilon, delta): {vae.get_privacy_spent(target_delta)}\")\n\n else:\n\n (\n training_epochs,\n log_elbo,\n log_reconstruction,\n log_divergence,\n log_categorical,\n log_numerical,\n ) = vae.train(data_loader, n_epochs=n_epochs)\n\n # -------- Generate Synthetic Data -------- #\n\n synthetic_supp = constraint_filtering(\n n_rows=data_supp.shape[0],\n vae=vae,\n reordered_cols=reordered_dataframe_columns,\n data_supp_columns=data_supp.columns,\n cont_transformers=continuous_transformers,\n cat_transformers=categorical_transformers,\n date_transformers=datetime_transformers,\n pre_proc_method=pre_proc_method,\n )\n\n # -------- Datetime Handling -------- #\n\n # If the dataset has datetimes then we need to re-convert these to a numerical\n # Value representing seconds, this is so we can evaluate the metrics on them\n\n metric_synthetic_supp = synthetic_supp.copy()\n\n for index, column in enumerate(original_datetime_columns):\n\n # Fit datetime transformer - converts to seconds\n temp_datetime = datetime.DatetimeTransformer()\n temp_datetime.fit(metric_synthetic_supp, columns=column)\n\n metric_synthetic_supp = temp_datetime.transform(metric_synthetic_supp)\n\n # -------- SDV Metrics -------- #\n # Calculate the sdv metrics for SynthVAE\n\n metrics = distribution_metrics(\n gower_bool=gower,\n distributional_metrics=distributional_metrics,\n data_supp=data_supp,\n synthetic_supp=synthetic_supp,\n categorical_columns=original_categorical_columns,\n continuous_columns=original_continuous_columns,\n saving_filepath=None,\n pre_proc_method=pre_proc_method,\n )\n\n # Optuna wants a list of values in float form\n\n list_metrics = [metrics[i] for i in metrics.columns]\n\n print(list_metrics)\n\n return list_metrics\n\n\n#%% -------- Run Hyperparam Optimisation -------- #\n\n# If there is no study object in your folder then run and save the study so\n# It can be resumed if needed\n\nfirst_run = True # First run indicates if we are creating a new hyperparam study\n\nif first_run == True:\n\n if gower == True:\n directions = [\"maximize\" for i in range(distributional_metrics.shape[0] + 1)]\n else:\n directions = [\"maximize\" for i in range(distributional_metrics.shape[0])]\n\n study = optuna.create_study(directions=directions)\n\nelse:\n\n with open(\"no_dp_MIMIC.pkl\", \"rb\") as f:\n study = pickle.load(f)\n\nstudy.optimize(\n lambda trial: objective(\n trial,\n gower=gower,\n distributional_metrics=distributional_metrics,\n differential_privacy=differential_privacy,\n target_delta=target_delta,\n target_eps=target_eps,\n n_epochs=n_epochs,\n ),\n n_trials=3,\n gc_after_trial=True,\n) # GC to avoid OOM\n#%%\n\nstudy.best_trials\n#%% -------- Save The Study -------- #\n\n# For a multi objective study we need to find the best trials and basically\n# average between the 3 metrics to get the best trial\n\nwith open(\"no_dp_MIMIC.pkl\", \"wb\") as f:\n pickle.dump(study, f)\n"
] | [
[
"numpy.log",
"numpy.isnan",
"scipy.special.binom",
"numpy.atleast_1d",
"numpy.nanargmin",
"scipy.special.log_ndtr",
"numpy.isinf"
],
[
"torch.stack",
"torch.functional.F.pad",
"numpy.prod"
],
[
"torch.nn.modules.module._IncompatibleKeys"
],
[
"torch.utils.data.TensorDataset",
"pandas.read_csv",
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
linksdl/futuretec-project-self_driving_cars_projects | [
"38e8f14543132ec86a8bada8d708eefaef23fee8",
"38e8f14543132ec86a8bada8d708eefaef23fee8",
"38e8f14543132ec86a8bada8d708eefaef23fee8",
"38e8f14543132ec86a8bada8d708eefaef23fee8",
"38e8f14543132ec86a8bada8d708eefaef23fee8"
] | [
"udacity-program_self_driving_car_engineer_v1.0/part01-computer vision and deep learning/module03-deep learning/lesson02-miniflow/exercise07-backpropagation/miniflow.py",
"udacity-program_self_driving_car_engineer_v2.0/module03-sensor fusion/Lesson7-Multi-Target Tracking/Exercise19-Gating/4_gating.py",
"udacity-program_self_driving_car_engineer_v1.0/part01-computer vision and deep learning/module01-introduction/lesson03-comuter vision and fundamentals/exercise02-region of interest masking/region_masking.py",
"udacity-program_self_driving_car_engineer_v2.0/module02-computer vision/exercise02-data acquisiton and visualization/visualization.py",
"udacity-program_self_driving_car_engineer_v2.0/module03-sensor fusion/Lesson7-Multi-Target Tracking/Exercise11-Visibility/solution/2_fov.py"
] | [
"\"\"\"\n# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n@Time : 2022/3/26 16:58\n@File : miniflow.py\n\"\"\"\n\n\n\n\"\"\"\nImplement the backward method of the Sigmoid node.\n\"\"\"\nimport numpy as np\n\n\nclass Node(object):\n \"\"\"\n Base class for nodes in the network.\n\n Arguments:\n\n `inbound_nodes`: A list of nodes with edges into this node.\n \"\"\"\n def __init__(self, inbound_nodes=[]):\n \"\"\"\n Node's constructor (runs when the object is instantiated). Sets\n properties that all nodes need.\n \"\"\"\n # A list of nodes with edges into this node.\n self.inbound_nodes = inbound_nodes\n # The eventual value of this node. Set by running\n # the forward() method.\n self.value = None\n # A list of nodes that this node outputs to.\n self.outbound_nodes = []\n # New property! Keys are the inputs to this node and\n # their values are the partials of this node with\n # respect to that input.\n self.gradients = {}\n # Sets this node as an outbound node for all of\n # this node's inputs.\n for node in inbound_nodes:\n node.outbound_nodes.append(self)\n\n def forward(self):\n \"\"\"\n Every node that uses this class as a base class will\n need to define its own `forward` method.\n \"\"\"\n raise NotImplementedError\n\n def backward(self):\n \"\"\"\n Every node that uses this class as a base class will\n need to define its own `backward` method.\n \"\"\"\n raise NotImplementedError\n\n\nclass Input(Node):\n \"\"\"\n A generic input into the network.\n \"\"\"\n def __init__(self):\n # The base class constructor has to run to set all\n # the properties here.\n #\n # The most important property on an Input is value.\n # self.value is set during `topological_sort` later.\n Node.__init__(self)\n\n def forward(self):\n # Do nothing because nothing is calculated.\n pass\n\n def backward(self):\n # An Input node has no inputs so the gradient (derivative)\n # is zero.\n # The key, `self`, is reference to this object.\n self.gradients = {self: 0}\n # Weights and bias may be inputs, so you need to sum\n # the gradient from output gradients.\n for n in self.outbound_nodes:\n grad_cost = n.gradients[self]\n self.gradients[self] += grad_cost * 1\n\n\nclass Linear(Node):\n \"\"\"\n Represents a node that performs a linear transform.\n \"\"\"\n def __init__(self, X, W, b):\n # The base class (Node) constructor. Weights and bias\n # are treated like inbound nodes.\n Node.__init__(self, [X, W, b])\n\n def forward(self):\n \"\"\"\n Performs the math behind a linear transform.\n \"\"\"\n X = self.inbound_nodes[0].value\n W = self.inbound_nodes[1].value\n b = self.inbound_nodes[2].value\n self.value = np.dot(X, W) + b\n\n def backward(self):\n \"\"\"\n Calculates the gradient based on the output values.\n \"\"\"\n # Initialize a partial for each of the inbound_nodes.\n self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}\n # Cycle through the outputs. The gradient will change depending\n # on each output, so the gradients are summed over all outputs.\n for n in self.outbound_nodes:\n # Get the partial of the cost with respect to this node.\n grad_cost = n.gradients[self]\n # Set the partial of the loss with respect to this node's inputs.\n self.gradients[self.inbound_nodes[0]] += np.dot(grad_cost, self.inbound_nodes[1].value.T)\n # Set the partial of the loss with respect to this node's weights.\n self.gradients[self.inbound_nodes[1]] += np.dot(self.inbound_nodes[0].value.T, grad_cost)\n # Set the partial of the loss with respect to this node's bias.\n self.gradients[self.inbound_nodes[2]] += np.sum(grad_cost, axis=0, keepdims=False)\n\n\nclass Sigmoid(Node):\n \"\"\"\n Represents a node that performs the sigmoid activation function.\n \"\"\"\n def __init__(self, node):\n # The base class constructor.\n Node.__init__(self, [node])\n\n def _sigmoid(self, x):\n \"\"\"\n This method is separate from `forward` because it\n will be used with `backward` as well.\n\n `x`: A numpy array-like object.\n \"\"\"\n return 1. / (1. + np.exp(-x))\n\n def forward(self):\n \"\"\"\n Perform the sigmoid function and set the value.\n \"\"\"\n input_value = self.inbound_nodes[0].value\n self.value = self._sigmoid(input_value)\n\n def backward(self):\n \"\"\"\n Calculates the gradient using the derivative of\n the sigmoid function.\n \"\"\"\n # Initialize the gradients to 0.\n self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}\n\n # Cycle through the outputs. The gradient will change depending\n # on each output, so the gradients are summed over all outputs.\n for n in self.outbound_nodes:\n # Get the partial of the cost with respect to this node.\n grad_cost = n.gradients[self]\n \"\"\"\n TODO: Your code goes here!\n Set the gradients property to the gradients with respect to each input.\n NOTE: See the Linear node and MSE node for examples.\n \"\"\"\n sigmoid = self.value\n self.gradients[self.inbound_nodes[0]] += sigmoid * (1 - sigmoid) * grad_cost\n\n\nclass MSE(Node):\n def __init__(self, y, a):\n \"\"\"\n The mean squared error cost function.\n Should be used as the last node for a network.\n \"\"\"\n # Call the base class' constructor.\n Node.__init__(self, [y, a])\n\n def forward(self):\n \"\"\"\n Calculates the mean squared error.\n \"\"\"\n # NOTE: We reshape these to avoid possible matrix/vector broadcast\n # errors.\n #\n # For example, if we subtract an array of shape (3,) from an array of shape\n # (3,1) we get an array of shape(3,3) as the result when we want\n # an array of shape (3,1) instead.\n #\n # Making both arrays (3,1) ensures the result is (3,1) and does\n # an elementwise subtraction as expected.\n y = self.inbound_nodes[0].value.reshape(-1, 1)\n a = self.inbound_nodes[1].value.reshape(-1, 1)\n\n self.m = self.inbound_nodes[0].value.shape[0]\n # Save the computed output for backward.\n self.diff = y - a\n self.value = np.mean(self.diff**2)\n\n def backward(self):\n \"\"\"\n Calculates the gradient of the cost.\n\n This is the final node of the network so outbound nodes\n are not a concern.\n \"\"\"\n self.gradients[self.inbound_nodes[0]] = (2 / self.m) * self.diff\n self.gradients[self.inbound_nodes[1]] = (-2 / self.m) * self.diff\n\n\ndef topological_sort(feed_dict):\n \"\"\"\n Sort the nodes in topological order using Kahn's Algorithm.\n\n `feed_dict`: A dictionary where the key is a `Input` Node and the value is the respective value feed to that Node.\n\n Returns a list of sorted nodes.\n \"\"\"\n\n input_nodes = [n for n in feed_dict.keys()]\n\n G = {}\n nodes = [n for n in input_nodes]\n while len(nodes) > 0:\n n = nodes.pop(0)\n if n not in G:\n G[n] = {'in': set(), 'out': set()}\n for m in n.outbound_nodes:\n if m not in G:\n G[m] = {'in': set(), 'out': set()}\n G[n]['out'].add(m)\n G[m]['in'].add(n)\n nodes.append(m)\n\n L = []\n S = set(input_nodes)\n while len(S) > 0:\n n = S.pop()\n\n if isinstance(n, Input):\n n.value = feed_dict[n]\n\n L.append(n)\n for m in n.outbound_nodes:\n G[n]['out'].remove(m)\n G[m]['in'].remove(n)\n # if no other incoming edges add to S\n if len(G[m]['in']) == 0:\n S.add(m)\n return L\n\n\ndef forward_and_backward(graph):\n \"\"\"\n Performs a forward pass and a backward pass through a list of sorted Nodes.\n\n Arguments:\n\n `graph`: The result of calling `topological_sort`.\n \"\"\"\n # Forward pass\n for n in graph:\n n.forward()\n\n # Backward pass\n # see: https://docs.python.org/2.3/whatsnew/section-slices.html\n for n in graph[::-1]:\n n.backward()\n",
"# imports\nimport numpy as np\nimport matplotlib\n#matplotlib.use('wxagg') # change backend so that figure maximizing works on Mac as well\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom scipy.stats.distributions import chi2\n\nclass Association:\n '''Data association class with single nearest neighbor association and gating based on Mahalanobis distance'''\n def __init__(self):\n self.association_matrix = np.matrix([])\n self.unassigned_tracks = []\n self.unassigned_meas = []\n\n def associate(self, track_list, meas_list):\n N = len(track_list) # N tracks\n M = len(meas_list) # M measurements\n self.unassigned_tracks = list(range(N))\n self.unassigned_meas = list(range(M))\n\n # initialize association matrix\n self.association_matrix = np.inf*np.ones((N,M))\n\n # loop over all tracks and all measurements to set up association matrix\n for i in range(N):\n track = track_list[i]\n for j in range(M):\n meas = meas_list[j]\n dist = self.MHD(track, meas)\n if self.gating(dist):\n self.association_matrix[i,j] = dist\n\n def MHD(self, track, meas):\n # calc Mahalanobis distance\n H = np.matrix([[1, 0, 0, 0],\n [0, 1, 0, 0]])\n gamma = meas.z - H*track.x\n S = H*track.P*H.transpose() + meas.R\n MHD = gamma.transpose()*np.linalg.inv(S)*gamma # Mahalanobis distance formula\n return MHD\n\n def gating(self, MHD):\n # check if measurement lies inside gate\n limit = chi2.ppf(0.95, df=2)\n ############\n # TODO: return True if measurement lies inside gate, otherwise return False\n ############\n if MHD < limit:\n return True\n else:\n return False\n\n\n def get_closest_track_and_meas(self):\n # find closest track and measurement for next update\n A = self.association_matrix\n if np.min(A) == np.inf:\n return np.nan, np.nan\n ############\n # TODO:\n # - find indices of closest track and measurement for next update\n # - return NAN if no more associations can be found (i.e. minimum entry in association matrix is infinity)\n # - delete row and column in association matrix for closest track and measurement\n # - remove found track number from unassigned_tracks, meas number from unassigned_meas\n # - return indices of closest track and measurement for next update\n ############\n\n # get indices of minimum entry\n ij_min = np.unravel_index(np.argmin(A, axis=None), A.shape)\n ind_track = ij_min[0]\n ind_meas = ij_min[1]\n\n # delete row and column for next update\n A = np.delete(A, ind_track, 0)\n A = np.delete(A, ind_meas, 1)\n self.association_matrix = A\n\n # update this track with this measurement\n update_track = self.unassigned_tracks[ind_track]\n update_meas = self.unassigned_meas[ind_meas]\n\n # remove this track and measurement from list\n self.unassigned_tracks.remove(update_track)\n self.unassigned_meas.remove(update_meas)\n\n return update_track, update_meas\n\n##################\nclass Track:\n '''Track class with state, covariance, id'''\n def __init__(self, id):\n # save id\n self.id = id\n\n # generate random state x\n self.x = np.matrix([[np.random.uniform(2,8)],\n [np.random.uniform(-3,3)],\n [0],\n [0]])\n\n # set up estimation error covariance\n self.P = np.matrix([[2, 0, 0, 0],\n [0, 3, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\nclass Measurement:\n '''Measurement class with easurement, covariance, id'''\n def __init__(self, id, x, y):\n # save id\n self.id = id\n\n # generate random measurement z\n self.z = np.matrix([[x + np.random.normal(0,2)],\n [y + np.random.normal(0,2)]])\n\n # set up measurement covariance\n self.R = np.matrix([[2, 0],\n [0, 2]])\n\n\n#################\ndef run():\n '''generate tracks and measurements and call association'''\n # set up track_list and meas_list for association\n np.random.seed(5) # make random values predictable\n association = Association() # init data association\n track_list = []\n meas_list = []\n\n # initialize visualization\n fig, ax = plt.subplots()\n\n # generate and plot tracks and measurements\n for i in range(3):\n\n # tracks\n track = Track(i+1)\n track_list.append(track)\n ax.scatter(float(-track.x[1]), float(track.x[0]), marker='x', color='red', label='track')\n ax.text(float(-track.x[1]), float(track.x[0]), str(track.id), color='red')\n\n # measurements\n meas = Measurement(i+1, float(track.x[0]), float(track.x[1]))\n meas_list.append(meas)\n ax.scatter(float(-meas.z[1]), float(meas.z[0]), marker='o', color='green', label='measurement')\n ax.text(float(-meas.z[1]), float(meas.z[0]), str(meas.id), color='green')\n\n # calculate association matrix\n association.associate(track_list, meas_list)\n print('Association matrix:', association.association_matrix)\n print('unassigned_tracks list:', association.unassigned_tracks)\n print('unassigned_meas list:', association.unassigned_meas)\n\n # visualize distances\n for track in track_list:\n for meas in meas_list:\n dist = association.association_matrix[track.id-1, meas.id-1]\n if dist < np.inf:\n ax.plot([float(-track.x[1]), float(-meas.z[1])], [float(track.x[0]), float(meas.z[0])], color='gray')\n str_dist = \"{:.2f}\".format(dist)\n ax.text(float((-track.x[1] - meas.z[1])/2), float((track.x[0] + meas.z[0])/2), str_dist)\n\n # update associated tracks with measurements\n matrix_orig = association.association_matrix\n while association.association_matrix.shape[0]>0 and association.association_matrix.shape[1]>0:\n\n # search for next association between a track and a measurement\n ind_track, ind_meas = association.get_closest_track_and_meas()\n if np.isnan(ind_track):\n print('---no more associations---')\n break\n\n track = track_list[ind_track]\n meas = meas_list[ind_meas]\n dist = matrix_orig[ind_track, ind_meas]\n ax.plot([float(-track.x[1]), float(-meas.z[1])], [float(track.x[0]), float(meas.z[0])], color='blue', label='association')\n str_dist = \"{:.2f}\".format(dist)\n ax.text(float((-track.x[1] - meas.z[1])/2), float((track.x[0] + meas.z[0])/2), str_dist)\n print('found association between track', ind_track+1, 'and measurement', ind_meas+1, 'with MHD =', str_dist)\n print('New association matrix:', association.association_matrix)\n print('New unassigned_tracks list:', association.unassigned_tracks)\n print('New unassigned_meas list:', association.unassigned_meas)\n\n\n #################\n # visualization\n # maximize window\n mng = plt.get_current_fig_manager()\n #mng.frame.Maximize(True)\n\n # remove repeated labels\n handles, labels = ax.get_legend_handles_labels()\n handle_list, label_list = [], []\n for handle, label in zip(handles, labels):\n if label not in label_list:\n handle_list.append(handle)\n label_list.append(label)\n ax.legend(handle_list, label_list, loc='center left', shadow=True, fontsize='large', bbox_to_anchor=(0.9, 0.1))\n\n # axis\n ax.set_xlabel('y [m]')\n ax.set_ylabel('x [m]')\n ax.set_xlim(-5,5)\n ax.set_ylim(0,10)\n\n # correct x ticks (positive to the left)\n ticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(-x) if x!=0 else '{0:g}'.format(x))\n ax.xaxis.set_major_formatter(ticks_x)\n\n plt.show()\n\n####################\n# call main loop\nrun()\n",
"\"\"\"\n# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n@Time : 2022/3/25 20:21\n@File : region_masking.py\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\n\n\n# Read in the image and print some stats\nimage = mpimg.imread('test.jpeg')\nprint('This image is: ', type(image), 'with dimensions:', image.shape)\n\n# Pull out the x and y sizes and make a copy of the image\nysize = image.shape[0]\nxsize = image.shape[1]\nregion_select = np.copy(image)\n\n# Define a triangle region of interest\n# Keep in mind the origin (x=0, y=0) is in the upper left in image processing\n# Note: if you run this code, you'll find these are not sensible values!!\n# But you'll get a chance to play with them soon in a quiz\nleft_bottom = [0, 539]\nright_bottom = [900, 300]\napex = [400, 0]\n\n\n# Fit lines (y=Ax+B) to identify the 3 sided region of interest\n# np.polyfit() returns the coefficients [A, B] of the fit\nfit_left = np.polyfit((left_bottom[0], apex[0]), (left_bottom[1], apex[1]), 1)\nfit_right = np.polyfit((right_bottom[0], apex[0]), (right_bottom[1], apex[1]), 1)\nfit_bottom = np.polyfit((left_bottom[0], right_bottom[0]), (left_bottom[1], right_bottom[1]), 1)\n\n# Find the region inside the lines\nXX, YY = np.meshgrid(np.arange(0, xsize), np.arange(0, ysize))\nregion_thresholds = (YY > (XX*fit_left[0] + fit_left[1])) & \\\n (YY > (XX*fit_right[0] + fit_right[1])) & \\\n (YY < (XX*fit_bottom[0] + fit_bottom[1]))\n\n# Color pixels red which are inside the region of interest\nregion_select[region_thresholds] = [255, 0, 0]\n\n# Display the image\nplt.imshow(region_select)\n# uncomment if plot does not display\nplt.show()\n\n# Uncomment the following code if you are running the code locally and wish to save the image\nmpimg.imsave(\"test-after.png\", region_select)\n",
"\"\"\"\n# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n@Time : 2022/2/23 19:35\n@Author : [email protected]\n@ProjectName : udacity-program_self_driving_car_engineer_v1.0_source.0\n@File : visualization.py\n\"\"\"\nimport glob\nimport os.path\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\nfrom PIL import Image\nfrom utils import get_data\n\n\ndef viz(ground_truth):\n \"\"\"\n create a grid visualization of images with color coded bboxes\n args:\n - ground_truth [list[dict]]: ground truth data\n \"\"\"\n # IMPLEMENT THIS FUNCTION\n paths = glob.glob('../data/images/*')\n\n gt_dic = {}\n\n # mapping to access data faster\n for gt in ground_truth:\n gt_dic[gt['filename']] = gt\n\n # color mapping of classes\n color_map = {1: [1, 0, 0], 2: [0, 1, 0], 4: [0, 0, 1]}\n\n f, ax = plt.subplots(4, 5, figsize=(20, 10))\n for i in range(20):\n x = i % 4\n y = i % 5\n\n filename = os.path.basename(paths[i])\n img = Image.open(paths[i])\n ax[x, y].imshow(img)\n\n bboxes = gt_dic[filename]['boxes']\n classes = gt_dic[filename]['classes']\n\n for cl, bb in zip(classes, bboxes):\n y1, x1, y2, x2 = bb\n rec = Rectangle((x1, y1), x2 - x1, y2 - y1, facecolor='none', edgecolor=color_map[cl])\n ax[x, y].add_patch(rec)\n ax[x, y].axis('off')\n\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == \"__main__\":\n ground_truth, _ = get_data()\n viz(ground_truth)\n",
"# imports\nimport numpy as np\nimport matplotlib\n#matplotlib.use('wxagg') # change backend so that figure maximizing works on Mac as well \nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nclass Camera:\n '''Camera sensor class including field of view and coordinate transformation'''\n def __init__(self, phi, t):\n self.fov = [-np.pi/4, np.pi/4] # sensor field of view / opening angle\n \n # compute rotation around z axis\n M_rot = np.matrix([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n print(M_rot)\n \n # coordiante transformation matrix from sensor to vehicle coordinates\n self.sens_to_veh = np.matrix(np.identity(4)) \n self.sens_to_veh[0:3, 0:3] = M_rot\n self.sens_to_veh[0:3, 3] = t\n self.veh_to_sens = np.linalg.inv(self.sens_to_veh) # transformation vehicle to sensor coordinates\n \n def in_fov(self, x):\n # check if an object x can be seen by this sensor\n pos_veh = np.ones((4, 1)) # homogeneous coordinates\n pos_veh[0:3] = x[0:3] \n pos_sens = self.veh_to_sens*pos_veh # transform from vehicle to sensor coordinates\n visible = False\n # make sure to not divide by zero - we can exclude the whole negative x-range here\n if pos_sens[0] > 0: \n alpha = np.arctan(pos_sens[1]/pos_sens[0]) # calc angle between object and x-axis\n # no normalization needed because returned alpha always lies between [-pi/2, pi/2]\n if alpha > self.fov[0] and alpha < self.fov[1]:\n visible = True\n \n return visible\n \n#################\ndef run():\n '''generate random points and check visibility'''\n # camera with translation and rotation angle\n t = np.matrix([[2],\n [0],\n [0]])\n phi = np.radians(45)\n cam = Camera(phi, t)\n\n # initialize visualization\n fig, ax = plt.subplots()\n\n for i in range(50):\n # define track position and velocity\n x = np.matrix([[np.random.uniform(-5,5)],\n [np.random.uniform(-5,5)],\n [0],\n [0],\n [0],\n [0]])\n\n # check if x is visible by camera\n result = cam.in_fov(x)\n \n # plot results\n pos_veh = np.ones((4, 1)) # homogeneous coordinates\n pos_veh[0:3] = x[0:3] \n pos_sens = cam.veh_to_sens*pos_veh # transform from vehicle to sensor coordinates\n if result == True:\n col = 'green'\n ax.scatter(float(-pos_sens[1]), float(pos_sens[0]), marker='o', color=col, label='visible track')\n else:\n col = 'red'\n ax.scatter(float(-pos_sens[1]), float(pos_sens[0]), marker='o', color=col, label='invisible track')\n ax.text(float(-pos_sens[1]), float(pos_sens[0]), str(result))\n \n # plot FOV \n ax.plot([0, -5], [0, 5], color='blue', label='field of view') \n ax.plot([0, 5], [0, 5], color='blue')\n\n # maximize window \n mng = plt.get_current_fig_manager()\n #mng.frame.Maximize(True)\n\n # remove repeated labels\n handles, labels = ax.get_legend_handles_labels()\n handle_list, label_list = [], []\n for handle, label in zip(handles, labels):\n if label not in label_list:\n handle_list.append(handle)\n label_list.append(label)\n ax.legend(handle_list, label_list, loc='center left', shadow=True, fontsize='large', bbox_to_anchor=(0.9, 0.1))\n\n # axis\n ax.set_xlabel('y [m]')\n ax.set_ylabel('x [m]')\n ax.set_xlim(-5,5)\n ax.set_ylim(0,5)\n\n # correct x ticks (positive to the left)\n ticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(-x) if x!=0 else '{0:g}'.format(x))\n ax.xaxis.set_major_formatter(ticks_x)\n\n plt.show() \n\n####################\n# call main loop\nrun()"
] | [
[
"numpy.dot",
"numpy.mean",
"numpy.zeros_like",
"numpy.exp",
"numpy.sum"
],
[
"numpy.matrix",
"numpy.random.seed",
"scipy.stats.distributions.chi2.ppf",
"numpy.isnan",
"numpy.min",
"numpy.linalg.inv",
"matplotlib.pyplot.subplots",
"numpy.ones",
"matplotlib.pyplot.get_current_fig_manager",
"numpy.delete",
"numpy.argmin",
"numpy.random.normal",
"numpy.random.uniform",
"matplotlib.pyplot.show"
],
[
"numpy.polyfit",
"matplotlib.pyplot.imshow",
"matplotlib.image.imsave",
"numpy.arange",
"matplotlib.image.imread",
"numpy.copy",
"matplotlib.pyplot.show"
],
[
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
],
[
"numpy.matrix",
"numpy.radians",
"numpy.arctan",
"numpy.linalg.inv",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.ones",
"numpy.sin",
"matplotlib.pyplot.get_current_fig_manager",
"numpy.identity",
"numpy.random.uniform",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mailhexu/pymatgen | [
"b80ca9f34c519757d337487c489fb655f7598cc2",
"b80ca9f34c519757d337487c489fb655f7598cc2",
"b80ca9f34c519757d337487c489fb655f7598cc2",
"b80ca9f34c519757d337487c489fb655f7598cc2"
] | [
"pymatgen/electronic_structure/boltztrap.py",
"pymatgen/phasediagram/analyzer.py",
"pymatgen/symmetry/analyzer.py",
"pymatgen/phonon/dos.py"
] | [
"# coding: utf-8\n\nfrom __future__ import division, unicode_literals, print_function\n\nimport math\nimport os\nimport subprocess\nimport tempfile\nimport logging\n\nimport numpy as np\nfrom monty.dev import requires\nfrom monty.json import jsanitize\nfrom monty.os import cd\nfrom monty.os.path import which\nfrom scipy.constants import e, m_e\nfrom scipy.spatial import distance\n\nfrom pymatgen.core.lattice import Lattice\nfrom pymatgen.core.units import Energy, Length\nfrom pymatgen.electronic_structure.bandstructure import \\\n BandStructureSymmLine, Kpoint\nfrom pymatgen.electronic_structure.core import Orbital\nfrom pymatgen.electronic_structure.dos import Dos, Spin, CompleteDos\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen.symmetry.bandstructure import HighSymmKpath\n\n\"\"\"\nThis module provides classes to run and analyze boltztrap on pymatgen band\nstructure objects. Boltztrap is a software interpolating band structures and\ncomputing materials properties from this band structure using Boltzmann\nsemi-classical transport theory.\n\nBoltztrap has been developed by Georg Madsen.\n\nhttp://www.icams.de/content/research/software-development/boltztrap/\n\nYou need version 1.2.3 or higher\n\nReferences are::\n\n Madsen, G. K. H., and Singh, D. J. (2006).\n BoltzTraP. A code for calculating band-structure dependent quantities.\n Computer Physics Communications, 175, 67-71\n\"\"\"\n\n__author__ = \"Geoffroy Hautier, Zachary Gibbs, Francesco Ricci, Anubhav Jain\"\n__copyright__ = \"Copyright 2013, The Materials Project\"\n__version__ = \"1.1\"\n__maintainer__ = \"Geoffroy Hautier\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n__date__ = \"August 23, 2013\"\n\n\nclass BoltztrapRunner(object):\n \"\"\"\n This class is used to run Boltztrap on a band structure object.\n\n Args:\n bs:\n A band structure object\n nelec:\n the number of electrons\n dos_type:\n two options for the band structure integration: \"HISTO\"\n (histogram) or \"TETRA\" using the tetrahedon method. TETRA\n typically gives better results (especially for DOSes)\n but takes more time\n energy_grid:\n the energy steps used for the integration (eV)\n lpfac:\n the number of interpolation points in the real space. By\n default 10 gives 10 time more points in the real space than\n the number of kpoints given in reciprocal space\n run_type:\n type of boltztrap usage. by default\n - BOLTZ: (default) compute transport coefficients\n - BANDS: interpolate all bands contained in the energy range\n specified in energy_span_around_fermi variable, along specified\n k-points\n - DOS: compute total and partial dos (custom BoltzTraP code\n needed!)\n - FERMI: compute fermi surface or more correctly to\n get certain bands interpolated\n band_nb:\n indicates a band number. Used for Fermi Surface interpolation\n (run_type=\"FERMI\")\n spin:\n specific spin component (1: up, -1: down) of the band selected\n in FERMI mode (mandatory).\n cond_band:\n if a conduction band is specified in FERMI mode,\n set this variable as True\n tauref:\n reference relaxation time. Only set to a value different than\n zero if we want to model beyond the constant relaxation time.\n tauexp:\n exponent for the energy in the non-constant relaxation time\n approach\n tauen:\n reference energy for the non-constant relaxation time approach\n soc:\n results from spin-orbit coupling (soc) computations give\n typically non-polarized (no spin up or down) results but single\n electron occupations. If the band structure comes from a soc\n computation, you should set soc to True (default False)\n doping:\n the fixed doping levels you want to compute. Boltztrap provides\n both transport values depending on electron chemical potential\n (fermi energy) and for a series of fixed carrier\n concentrations. By default, this is set to 1e16 to 1e22 in\n increments of factors of 10.\n energy_span_around_fermi:\n usually the interpolation is not needed on the entire energy\n range but on a specific range around the fermi level.\n This energy gives this range in eV. by default it is 1.5 eV.\n If DOS or BANDS type are selected, this range is automatically\n set to cover the entire energy range.\n scissor:\n scissor to apply to the band gap (eV). This applies a scissor\n operation moving the band edges without changing the band\n shape. This is useful to correct the often underestimated band\n gap in DFT. Default is 0.0 (no scissor)\n kpt_line:\n list of fractional coordinates of kpoints as arrays or list of\n Kpoint objects for BANDS mode calculation (standard path of\n high symmetry k-points is automatically set as default)\n tmax:\n Maximum temperature (K) for calculation (default=1300)\n tgrid:\n Temperature interval for calculation (default=50)\n symprec: 1e-3 is the default in pymatgen. If the kmesh has been\n generated using a different symprec, it has to be specified\n to avoid a \"factorization error\" in BoltzTraP calculation.\n\n \"\"\"\n\n @requires(which('x_trans'),\n \"BoltztrapRunner requires the executables 'x_trans' to be in \"\n \"the path. Please download the Boltztrap at http://\"\n \"www.icams.de/content/research/software-development/boltztrap/ \"\n \"and follow the instructions in the README to compile \"\n \"Bolztrap accordingly. Then add x_trans to your path\")\n def __init__(self, bs, nelec, dos_type=\"HISTO\", energy_grid=0.005,\n lpfac=10, run_type=\"BOLTZ\", band_nb=None, tauref=0, tauexp=0,\n tauen=0, soc=False, doping=None, energy_span_around_fermi=1.5,\n scissor=0.0, kpt_line=None, spin=None, cond_band=False,\n tmax=1300, tgrid=50, symprec=1e-3):\n self.lpfac = lpfac\n self._bs = bs\n self._nelec = nelec\n self.dos_type = dos_type\n self.energy_grid = energy_grid\n self.error = []\n self.run_type = run_type\n self.band_nb = band_nb\n self.spin = spin\n self.cond_band = cond_band\n self.tauref = tauref\n self.tauexp = tauexp\n self.tauen = tauen\n self.soc = soc\n self.kpt_line = kpt_line\n if doping:\n self.doping = doping\n else:\n self.doping = []\n for d in [1e16, 1e17, 1e18, 1e19, 1e20, 1e21]:\n self.doping.extend([1 * d, 2.5 * d, 5 * d, 7.5 * d])\n self.doping.append(1e22)\n self.energy_span_around_fermi = energy_span_around_fermi\n self.scissor = scissor\n self.tmax = tmax\n self.tgrid = tgrid\n self._symprec = symprec\n if self.run_type in (\"DOS\", \"BANDS\"):\n self._auto_set_energy_range()\n\n def _auto_set_energy_range(self):\n \"\"\"\n automatically determine the energy range as min/max eigenvalue\n minus/plus the buffer_in_ev\n \"\"\"\n emins = [min([e_k[0] for e_k in self._bs.bands[Spin.up]])]\n emaxs = [max([e_k[0] for e_k in self._bs.bands[Spin.up]])]\n\n if self._bs.is_spin_polarized:\n emins.append(min([e_k[0] for e_k in\n self._bs.bands[Spin.down]]))\n\n emaxs.append(max([e_k[0] for e_k in\n self._bs.bands[Spin.down]]))\n\n min_eigenval = Energy(min(emins) - self._bs.efermi, \"eV\"). \\\n to(\"Ry\")\n max_eigenval = Energy(max(emaxs) - self._bs.efermi, \"eV\"). \\\n to(\"Ry\")\n\n # set energy range to buffer around min/max EV\n # buffer does not increase CPU time but will help get equal\n # energies for spin up/down for band structure\n const = Energy(2, \"eV\").to(\"Ry\")\n self._ll = min_eigenval - const\n self._hl = max_eigenval + const\n\n en_range = Energy(max((abs(self._ll), abs(self._hl))),\n \"Ry\").to(\"eV\")\n\n self.energy_span_around_fermi = en_range * 1.01\n print(\"energy_span_around_fermi = \",\n self.energy_span_around_fermi)\n\n @property\n def bs(self):\n return self._bs\n\n @property\n def nelec(self):\n return self._nelec\n\n def write_energy(self, output_file):\n with open(output_file, 'w') as f:\n f.write(\"test\\n\")\n f.write(\"{}\\n\".format(len(self._bs.kpoints)))\n\n if self.run_type == \"FERMI\":\n sign = -1.0 if self.cond_band else 1.0\n for i in range(len(self._bs.kpoints)):\n eigs = []\n eigs.append(Energy(\n self._bs.bands[Spin(self.spin)][self.band_nb][i] -\n self._bs.efermi, \"eV\").to(\"Ry\"))\n f.write(\"%12.8f %12.8f %12.8f %d\\n\"\n % (self._bs.kpoints[i].frac_coords[0],\n self._bs.kpoints[i].frac_coords[1],\n self._bs.kpoints[i].frac_coords[2],\n len(eigs)))\n for j in range(len(eigs)):\n f.write(\"%18.8f\\n\" % (sign * float(eigs[j])))\n\n else:\n for i, kpt in enumerate(self._bs.kpoints):\n eigs = []\n if self.run_type == \"DOS\":\n spin_lst = [self.spin]\n else:\n spin_lst = self._bs.bands\n\n for spin in spin_lst:\n # use 90% of bottom bands since highest eigenvalues\n # are usually incorrect\n # ask Geoffroy Hautier for more details\n nb_bands = int(math.floor(self._bs.nb_bands * 0.9))\n for j in range(nb_bands):\n eigs.append(\n Energy(self._bs.bands[Spin(spin)][j][i] -\n self._bs.efermi, \"eV\").to(\"Ry\"))\n eigs.sort()\n\n if self.run_type == \"DOS\" and self._bs.is_spin_polarized:\n eigs.insert(0, self._ll)\n eigs.append(self._hl)\n\n f.write(\"%12.8f %12.8f %12.8f %d\\n\"\n % (kpt.frac_coords[0],\n kpt.frac_coords[1],\n kpt.frac_coords[2],\n len(eigs)))\n\n for j in range(len(eigs)):\n f.write(\"%18.8f\\n\" % (float(eigs[j])))\n\n def write_struct(self, output_file):\n sym = SpacegroupAnalyzer(self._bs.structure, symprec=self._symprec)\n\n with open(output_file, 'w') as f:\n f.write(\"{} {}\\n\".format(self._bs.structure.composition.formula,\n sym.get_space_group_symbol()))\n\n f.write(\"{}\\n\".format(\"\\n\".join(\n [\" \".join([\"%.5f\" % Length(i, \"ang\").to(\"bohr\") for i in row])\n for row in self._bs.structure.lattice.matrix])))\n\n ops = sym.get_symmetry_dataset()['rotations']\n f.write(\"{}\\n\".format(len(ops)))\n\n for c in ops:\n for row in c:\n f.write(\"{}\\n\".format(\" \".join(str(i) for i in row)))\n\n def write_def(self, output_file):\n # This function is useless in std version of BoltzTraP code\n # because x_trans script overwrite BoltzTraP.def\n with open(output_file, 'w') as f:\n so = \"\"\n if self._bs.is_spin_polarized or self.soc:\n so = \"so\"\n f.write(\"5, 'boltztrap.intrans', 'old', 'formatted',0\\n\" +\n \"6,'boltztrap.outputtrans', 'unknown', \"\n \"'formatted',0\\n\" +\n \"20,'boltztrap.struct', 'old', 'formatted',0\\n\"\n + \"10,'boltztrap.energy\" + so + \"', 'old', \"\n \"'formatted',0\\n\" +\n \"48,'boltztrap.engre', 'unknown', \"\n \"'unformatted',0\\n\" +\n \"49,'boltztrap.transdos', 'unknown', \"\n \"'formatted',0\\n\" +\n \"50,'boltztrap.sigxx', 'unknown', 'formatted',\"\n \"0\\n\" +\n \"51,'boltztrap.sigxxx', 'unknown', 'formatted',\"\n \"0\\n\" +\n \"21,'boltztrap.trace', 'unknown', \"\n \"'formatted',0\\n\" +\n \"22,'boltztrap.condtens', 'unknown', \"\n \"'formatted',0\\n\" +\n \"24,'boltztrap.halltens', 'unknown', \"\n \"'formatted',0\\n\" +\n \"30,'boltztrap_BZ.cube', 'unknown', \"\n \"'formatted',0\\n\")\n\n def write_proj(self, output_file_proj, output_file_def):\n # This function is useless in std version of BoltzTraP code\n # because x_trans script overwrite BoltzTraP.def\n for oi, o in enumerate(Orbital):\n for site_nb in range(0, len(self._bs.structure.sites)):\n if oi < len(self._bs.projections[Spin.up][0][0]):\n with open(output_file_proj + \"_\" + str(site_nb) + \"_\" + str(\n o),\n 'w') as f:\n f.write(self._bs.structure.composition.formula + \"\\n\")\n f.write(str(len(self._bs.kpoints)) + \"\\n\")\n for i in range(len(self._bs.kpoints)):\n tmp_proj = []\n for j in range(\n int(math.floor(self._bs.nb_bands * 0.9))):\n tmp_proj.append(\n self._bs.projections[Spin(self.spin)][j][\n i][oi][site_nb])\n # TODO deal with the sorting going on at\n # the energy level!!!\n # tmp_proj.sort()\n\n if self.run_type == \"DOS\" and \\\n self._bs.is_spin_polarized:\n tmp_proj.insert(0, self._ll)\n tmp_proj.append(self._hl)\n\n f.write(\"%12.8f %12.8f %12.8f %d\\n\"\n % (self._bs.kpoints[i].frac_coords[0],\n self._bs.kpoints[i].frac_coords[1],\n self._bs.kpoints[i].frac_coords[2],\n len(tmp_proj)))\n for j in range(len(tmp_proj)):\n f.write(\"%18.8f\\n\" % float(tmp_proj[j]))\n with open(output_file_def, 'w') as f:\n so = \"\"\n if self._bs.is_spin_polarized:\n so = \"so\"\n f.write(\"5, 'boltztrap.intrans', 'old', 'formatted',0\\n\" +\n \"6,'boltztrap.outputtrans', 'unknown', \"\n \"'formatted',0\\n\" +\n \"20,'boltztrap.struct', 'old', 'formatted',0\\n\"\n + \"10,'boltztrap.energy\" + so + \"', 'old', \"\n \"'formatted',0\\n\" +\n \"48,'boltztrap.engre', 'unknown', \"\n \"'unformatted',0\\n\" +\n \"49,'boltztrap.transdos', 'unknown', \"\n \"'formatted',0\\n\" +\n \"50,'boltztrap.sigxx', 'unknown', 'formatted',\"\n \"0\\n\" +\n \"51,'boltztrap.sigxxx', 'unknown', 'formatted',\"\n \"0\\n\" +\n \"21,'boltztrap.trace', 'unknown', \"\n \"'formatted',0\\n\" +\n \"22,'boltztrap.condtens', 'unknown', \"\n \"'formatted',0\\n\" +\n \"24,'boltztrap.halltens', 'unknown', \"\n \"'formatted',0\\n\" +\n \"30,'boltztrap_BZ.cube', 'unknown', \"\n \"'formatted',0\\n\")\n i = 1000\n for oi, o in enumerate(Orbital):\n for site_nb in range(0, len(self._bs.structure.sites)):\n if oi < len(self._bs.projections[Spin.up][0][0]):\n f.write(str(i) + \",\\'\" + \"boltztrap.proj_\" + str(\n site_nb) + \"_\" + str(o.name) +\n \"\\' \\'old\\', \\'formatted\\',0\\n\")\n i += 1\n\n def write_intrans(self, output_file):\n setgap = 1 if self.scissor > 0.0001 else 0\n\n if self.run_type == \"BOLTZ\" or self.run_type == \"DOS\":\n with open(output_file, 'w') as fout:\n fout.write(\"GENE # use generic interface\\n\")\n fout.write(\n \"1 0 %d %f # iskip (not presently used) idebug \"\n \"setgap shiftgap \\n\"\n % (setgap, Energy(self.scissor, \"eV\").to(\"Ry\")))\n fout.write(\n \"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy \"\n \"span around Fermilevel, number of electrons\\n\"\n % (Energy(self.energy_grid, \"eV\").to(\"Ry\"),\n Energy(self.energy_span_around_fermi, \"eV\").to(\"Ry\"),\n self._nelec))\n fout.write(\n \"CALC # CALC (calculate expansion \"\n \"coeff), NOCALC read from file\\n\")\n fout.write(\n \"%d # lpfac, number of latt-points \"\n \"per k-point\\n\" % self.lpfac)\n fout.write(\n \"%s # run mode (only BOLTZ is \"\n \"supported)\\n\" % self.run_type)\n fout.write(\n \".15 # (efcut) energy range of \"\n \"chemical potential\\n\")\n fout.write(\n \"{} {} # Tmax, temperature grid\\n\". \\\n format(self.tmax, self.tgrid))\n fout.write(\n \"-1. # energyrange of bands given DOS output sig_xxx and \"\n \"dos_xxx (xxx is band number)\\n\")\n fout.write(self.dos_type + \"\\n\") # e.g., HISTO or TETRA\n fout.write(\"{} {} {} 0 0 0\\n\".format(\n self.tauref, self.tauexp, self.tauen))\n fout.write(\"{}\\n\".format(2 * len(self.doping)))\n\n for d in self.doping:\n fout.write(str(d) + \"\\n\")\n for d in self.doping:\n fout.write(str(-d) + \"\\n\")\n\n elif self.run_type == \"FERMI\":\n with open(output_file, 'w') as fout:\n fout.write(\"GENE # use generic interface\\n\")\n fout.write(\n \"1 0 0 0.0 # iskip (not presently used) idebug \"\n \"setgap shiftgap \\n\")\n fout.write(\n \"0.0 %f 0.1 %6.1f # Fermilevel (Ry),energygrid,\"\n \"energy span around Fermilevel, \"\n \"number of electrons\\n\"\n % (Energy(self.energy_grid, \"eV\").to(\"Ry\"), self._nelec))\n fout.write(\n \"CALC # CALC (calculate expansion \"\n \"coeff), NOCALC read from file\\n\")\n fout.write(\n \"%d # lpfac, number of latt-points \"\n \"per k-point\\n\" % self.lpfac)\n fout.write(\n \"FERMI # run mode (only BOLTZ is \"\n \"supported)\\n\")\n fout.write(str(1) +\n \" # actual band selected: \" +\n str(self.band_nb + 1) + \" spin: \" + str(self.spin))\n\n elif self.run_type == \"BANDS\":\n if self.kpt_line is None:\n kpath = HighSymmKpath(self._bs.structure)\n self.kpt_line = [Kpoint(k, self._bs.structure.lattice) for k\n in\n kpath.get_kpoints(coords_are_cartesian=False)[\n 0]]\n self.kpt_line = [kp.frac_coords for kp in self.kpt_line]\n elif type(self.kpt_line[0]) == Kpoint:\n self.kpt_line = [kp.frac_coords for kp in self.kpt_line]\n\n with open(output_file, 'w') as fout:\n fout.write(\"GENE # use generic interface\\n\")\n fout.write(\n \"1 0 %d %f # iskip (not presently used) idebug \"\n \"setgap shiftgap \\n\"\n % (setgap, Energy(self.scissor, \"eV\").to(\"Ry\")))\n fout.write(\n \"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy \"\n \"span around Fermilevel, \"\n \"number of electrons\\n\"\n % (Energy(self.energy_grid, \"eV\").to(\"Ry\"),\n Energy(self.energy_span_around_fermi, \"eV\").to(\"Ry\"),\n self._nelec))\n fout.write(\n \"CALC # CALC (calculate expansion \"\n \"coeff), NOCALC read from file\\n\")\n fout.write(\n \"%d # lpfac, number of latt-points \"\n \"per k-point\\n\" % self.lpfac)\n fout.write(\n \"BANDS # run mode (only BOLTZ is \"\n \"supported)\\n\")\n fout.write(\"P \" + str(len(self.kpt_line)) + \"\\n\")\n for kp in self.kpt_line:\n fout.writelines([str(k) + \" \" for k in kp])\n fout.write('\\n')\n\n def write_input(self, output_dir):\n if self._bs.is_spin_polarized or self.soc:\n self.write_energy(os.path.join(output_dir, \"boltztrap.energyso\"))\n else:\n self.write_energy(os.path.join(output_dir, \"boltztrap.energy\"))\n\n self.write_struct(os.path.join(output_dir, \"boltztrap.struct\"))\n self.write_intrans(os.path.join(output_dir, \"boltztrap.intrans\"))\n self.write_def(os.path.join(output_dir, \"BoltzTraP.def\"))\n\n if len(self.bs.projections) != 0 and self.run_type == \"DOS\":\n self.write_proj(os.path.join(output_dir, \"boltztrap.proj\"),\n os.path.join(output_dir, \"BoltzTraP.def\"))\n\n def run(self, path_dir=None, convergence=True, write_input=True,\n clear_dir=False, max_lpfac=150, min_egrid=0.00005):\n \"\"\"\n Write inputs (optional), run BoltzTraP, and ensure\n convergence (optional)\n Args:\n path_dir (str): directory in which to run BoltzTraP\n convergence (bool): whether to check convergence and make\n corrections if needed\n write_input: (bool) whether to write input files before the run\n (required for convergence mode)\n clear_dir: (bool) whether to remove all files in the path_dir\n before starting\n max_lpfac: (float) maximum lpfac value to try before reducing egrid\n in convergence mode\n min_egrid: (float) minimum egrid value to try before giving up in\n convergence mode\n\n Returns:\n\n \"\"\"\n\n # TODO: consider making this a part of custodian rather than pymatgen\n # A lot of this functionality (scratch dirs, handlers, monitors)\n # is built into custodian framework\n\n if convergence and not write_input:\n raise ValueError(\"Convergence mode requires write_input to be \"\n \"true\")\n\n if self.run_type in (\"BANDS\", \"DOS\", \"FERMI\"):\n convergence = False\n if self.lpfac > max_lpfac:\n max_lpfac = self.lpfac\n\n if self.run_type == \"BANDS\" and self.bs.is_spin_polarized:\n print(\"Reminder: for run_type \" + str(\n self.run_type) + \", spin component are not separated! \"\n \"(you have a spin polarized band structure)\")\n\n if self.run_type in (\"FERMI\", \"DOS\") and self.spin is None:\n if self.bs.is_spin_polarized:\n raise BoltztrapError(\n \"Spin parameter must be specified for spin polarized \"\n \"band structures!\")\n else:\n self.spin = 1\n\n dir_bz_name = \"boltztrap\"\n if path_dir is None:\n temp_dir = tempfile.mkdtemp()\n path_dir = os.path.join(temp_dir, dir_bz_name)\n else:\n path_dir = os.path.abspath(\n os.path.join(path_dir, dir_bz_name))\n\n if not os.path.exists(path_dir):\n os.mkdir(path_dir)\n elif clear_dir:\n for c in os.listdir(path_dir):\n os.remove(os.path.join(path_dir, c))\n\n FORMAT = \"%(message)s\"\n logging.basicConfig(level=logging.INFO, format=FORMAT,\n filename=os.path.join(path_dir, \"../boltztrap.out\"))\n\n with cd(path_dir):\n lpfac_start = self.lpfac\n converged = False\n\n while self.energy_grid >= min_egrid and not converged:\n self.lpfac = lpfac_start\n\n logging.info(\"lpfac, energy_grid: {} {}\".format(self.lpfac, self.energy_grid))\n\n while self.lpfac <= max_lpfac and not converged:\n\n if write_input:\n self.write_input(path_dir)\n\n bt_exe = [\"x_trans\", \"BoltzTraP\"]\n if self._bs.is_spin_polarized or self.soc:\n bt_exe.append(\"-so\")\n\n p = subprocess.Popen(bt_exe, stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n p.wait()\n\n for c in p.communicate():\n logging.info(c.decode())\n if \"error in factorization\" in c.decode():\n raise BoltztrapError(\"error in factorization\")\n\n warning = \"\"\n\n with open(os.path.join(path_dir,\n dir_bz_name + \".outputtrans\")) as f:\n for l in f:\n if \"Option unknown\" in l:\n raise BoltztrapError(\n \"DOS mode needs a custom version of \"\n \"BoltzTraP code is needed\")\n if \"WARNING\" in l:\n warning = l\n break\n if \"Error - Fermi level was not found\" in l:\n warning = l\n break\n\n if not warning and convergence:\n # check convergence for warning\n analyzer = BoltztrapAnalyzer.from_files(path_dir)\n for doping in ['n', 'p']:\n for c in analyzer.mu_doping[doping]:\n if len(analyzer.mu_doping[doping][c]) != len(\n analyzer.doping[doping]):\n warning = \"length of mu_doping array is \" \\\n \"incorrect\"\n break\n\n if doping == 'p' and \\\n sorted(\n analyzer.mu_doping[doping][\n c], reverse=True) != \\\n analyzer.mu_doping[doping][c]:\n warning = \"sorting of mu_doping array \" \\\n \"incorrect for p-type\"\n break\n\n # ensure n-type doping sorted correctly\n if doping == 'n' and sorted(\n analyzer.mu_doping[doping][c]) != \\\n analyzer.mu_doping[doping][c]:\n warning = \"sorting of mu_doping array \" \\\n \"incorrect for n-type\"\n break\n\n if warning:\n self.lpfac += 10\n logging.warn(\"Warning detected: {}! Increase lpfac to \"\n \"{}\".format(warning, self.lpfac))\n\n else:\n converged = True\n\n if not converged:\n self.energy_grid /= 10\n logging.info(\"Could not converge with max lpfac; \"\n \"Decrease egrid to {}\".format(self.energy_grid))\n\n if not converged:\n raise BoltztrapError(\n \"Doping convergence not reached with lpfac=\" + str(\n self.lpfac) + \", energy_grid=\" + str(self.energy_grid))\n\n return path_dir\n\n\nclass BoltztrapError(Exception):\n \"\"\"\n Exception class for boltztrap.\n Raised when the boltztrap gives an error\n \"\"\"\n\n def __init__(self, msg):\n self.msg = msg\n logging.error(self.msg)\n\n def __str__(self):\n return \"BoltztrapError : \" + self.msg\n\n\nclass BoltztrapAnalyzer(object):\n \"\"\"\n Class used to store all the data from a boltztrap run\n \"\"\"\n\n def __init__(self, gap=None, mu_steps=None, cond=None, seebeck=None,\n kappa=None, hall=None, doping=None,\n mu_doping=None, seebeck_doping=None, cond_doping=None,\n kappa_doping=None,\n hall_doping=None, intrans=None, dos=None, dos_partial=None,\n carrier_conc=None, vol=None, warning=None,\n bz_bands=None, bz_kpoints=None, fermi_surface_data=None):\n \"\"\"\n Constructor taking directly all the data generated by Boltztrap. You\n won't probably use it directly but instead use the from_files and\n from_dict methods.\n\n Args:\n gap: The gap after interpolation in eV\n mu_steps: The steps of electron chemical potential (or Fermi\n level) in eV.\n cond: The electronic conductivity tensor divided by a constant\n relaxation time (sigma/tau) at different temperature and\n fermi levels.\n The format is {temperature: [array of 3x3 tensors at each\n fermi level in mu_steps]}. The units are 1/(Ohm*m*s).\n seebeck: The Seebeck tensor at different temperatures and fermi\n levels. The format is {temperature: [array of 3x3 tensors at\n each fermi level in mu_steps]}. The units are V/K\n kappa: The electronic thermal conductivity tensor divided by a\n constant relaxation time (kappa/tau) at different temperature\n and fermi levels. The format is {temperature: [array of 3x3\n tensors at each fermi level in mu_steps]}\n The units are W/(m*K*s)\n hall: The hall tensor at different temperature and fermi levels\n The format is {temperature: [array of 27 coefficients list at\n each fermi level in mu_steps]}\n The units are m^3/C\n doping: The different doping levels that have been given to\n Boltztrap. The format is {'p':[],'n':[]} with an array of\n doping levels. The units are cm^-3\n mu_doping: Gives the electron chemical potential (or Fermi level)\n for a given set of doping.\n Format is {'p':{temperature: [fermi levels],'n':{temperature:\n [fermi levels]}}\n the fermi level array is ordered according to the doping\n levels in doping units for doping are in cm^-3 and for Fermi\n level in eV\n seebeck_doping: The Seebeck tensor at different temperatures and\n doping levels. The format is {'p': {temperature: [Seebeck\n tensors]}, 'n':{temperature: [Seebeck tensors]}}\n The [Seebeck tensors] array is ordered according to the\n doping levels in doping units for doping are in cm^-3 and for\n Seebeck in V/K\n cond_doping: The electronic conductivity tensor divided by a\n constant relaxation time (sigma/tau) at different\n temperatures and doping levels\n The format is {'p':{temperature: [conductivity tensors]},\n 'n':{temperature: [conductivity tensors]}}\n The [conductivity tensors] array is ordered according to the\n doping levels in doping units for doping are in cm^-3 and for\n conductivity in 1/(Ohm*m*s)\n kappa_doping: The thermal conductivity tensor divided by a constant\n relaxation time (kappa/tau) at different temperatures and\n doping levels.\n The format is {'p':{temperature: [thermal conductivity\n tensors]},'n':{temperature: [thermal conductivity tensors]}}\n The [thermal conductivity tensors] array is ordered according\n to the doping levels in doping units for doping are in cm^-3\n and for thermal conductivity in W/(m*K*s)\n hall_doping: The Hall tensor at different temperatures and doping\n levels.\n The format is {'p':{temperature: [Hall tensors]},\n 'n':{temperature: [Hall tensors]}}\n The [Hall tensors] array is ordered according to the doping\n levels in doping and each Hall tensor is represented by a 27\n coefficients list.\n The units are m^3/C\n intrans: a dictionary of inputs e.g. {\"scissor\": 0.0}\n carrier_conc: The concentration of carriers in electron (or hole)\n per unit cell\n dos: The dos computed by Boltztrap given as a pymatgen Dos object\n dos_partial: Data for the partial DOS projected on sites and\n orbitals\n vol: Volume of the unit cell in angstrom cube (A^3)\n warning: string if BoltzTraP outputted a warning, else None\n bz_bands: Data for interpolated bands on a k-point line\n (run_type=BANDS)\n bz_kpoints: k-point in reciprocal coordinates for interpolated\n bands (run_type=BANDS)\n fermi_surface_data: energy values in a 3D grid imported from the\n output .cube file.\n \"\"\"\n self.gap = gap\n self.mu_steps = mu_steps\n self._cond = cond\n self._seebeck = seebeck\n self._kappa = kappa\n self._hall = hall\n self.warning = warning\n self.doping = doping\n self.mu_doping = mu_doping\n self._seebeck_doping = seebeck_doping\n self._cond_doping = cond_doping\n self._kappa_doping = kappa_doping\n self._hall_doping = hall_doping\n self.intrans = intrans\n self._carrier_conc = carrier_conc\n self.dos = dos\n self.vol = vol\n self._dos_partial = dos_partial\n self._bz_bands = bz_bands\n self._bz_kpoints = bz_kpoints\n self.fermi_surface_data = fermi_surface_data\n\n def get_symm_bands(self, structure, efermi, kpt_line=None,\n labels_dict=None):\n \"\"\"\n Function useful to read bands from Boltztrap output and get a\n BandStructureSymmLine object comparable with that one from a DFT\n calculation (if the same kpt_line is provided). Default kpt_line\n and labels_dict is the standard path of high symmetry k-point for\n the specified structure. They could be extracted from the\n BandStructureSymmLine object that you want to compare with. efermi\n variable must be specified to create the BandStructureSymmLine\n object (usually it comes from DFT or Boltztrap calc)\n \"\"\"\n try:\n if kpt_line is None:\n kpath = HighSymmKpath(structure)\n kpt_line = [Kpoint(k, structure.lattice.reciprocal_lattice) for\n k in\n kpath.get_kpoints(coords_are_cartesian=False)[0]]\n labels_dict = {l: k for k, l in zip(\n *kpath.get_kpoints(coords_are_cartesian=False)) if l}\n kpt_line = [kp.frac_coords for kp in kpt_line]\n elif type(kpt_line[0]) == Kpoint:\n kpt_line = [kp.frac_coords for kp in kpt_line]\n labels_dict = {k: labels_dict[k].frac_coords for k in\n labels_dict}\n\n idx_list = []\n # kpt_dense=np.array([kp for kp in self._bz_kpoints])\n for i, kp in enumerate(kpt_line):\n w = []\n prec = 1e-05\n while len(w) == 0:\n w = np.where(np.all(\n np.abs(kp - self._bz_kpoints) < [prec] * 3,\n axis=1))[0]\n prec *= 10\n\n # print( prec )\n idx_list.append([i, w[0]])\n\n # if len(w)>0:\n # idx_list.append([i,w[0]])\n # else:\n # w=np.where(np.all(np.abs(kp.frac_coords-self._bz_kpoints)\n # <[1e-04,1e-04,1e-04],axis=1))[0]\n # idx_list.append([i,w[0]])\n\n idx_list = np.array(idx_list)\n # print( idx_list.shape )\n\n bands_dict = {Spin.up: (self._bz_bands * Energy(1, \"Ry\").to(\n \"eV\") + efermi).T[:, idx_list[:, 1]].tolist()}\n # bz_kpoints = bz_kpoints[idx_list[:,1]].tolist()\n\n sbs = BandStructureSymmLine(kpt_line, bands_dict,\n structure.lattice.reciprocal_lattice,\n efermi,\n labels_dict=labels_dict)\n\n return sbs\n\n except:\n raise BoltztrapError(\n \"Bands are not in output of BoltzTraP.\\nBolztrapRunner must \"\n \"be run with run_type=BANDS\")\n\n @staticmethod\n def check_acc_bzt_bands(sbs_bz, sbs_ref, warn_thr=(0.03, 0.03)):\n \"\"\"\n Compare sbs_bz BandStructureSymmLine calculated with boltztrap with\n the sbs_ref BandStructureSymmLine as reference (from MP for\n instance), computing correlation and energy difference for eight bands\n around the gap (semiconductors) or fermi level (metals).\n warn_thr is a threshold to get a warning in the accuracy of Boltztap\n interpolated bands.\n Return a dictionary with these keys:\n - \"N\": the index of the band compared; inside each there are:\n - \"Corr\": correlation coefficient for the 8 compared bands\n - \"Dist\": energy distance for the 8 compared bands\n - \"branch_name\": energy distance for that branch\n - \"avg_corr\": average of correlation coefficient over the 8 bands\n - \"avg_dist\": average of energy distance over the 8 bands\n - \"nb_list\": list of indexes of the 8 compared bands\n - \"acc_thr\": list of two float corresponing to the two warning\n thresholds in input\n - \"acc_err\": list of two bools:\n True if the avg_corr > warn_thr[0], and\n True if the avg_dist > warn_thr[1]\n See also compare_sym_bands function doc\n \"\"\"\n if not sbs_ref.is_metal() and not sbs_bz.is_metal():\n vbm_idx = sbs_bz.get_vbm()['band_index'][Spin.up][-1]\n cbm_idx = sbs_bz.get_cbm()['band_index'][Spin.up][0]\n nb_list = range(vbm_idx - 3, cbm_idx + 4)\n\n else:\n bnd_around_efermi = []\n delta = 0\n spin = sbs_bz.bands.keys()[0]\n while len(bnd_around_efermi) < 8 and delta < 100:\n delta += 0.1\n bnd_around_efermi = []\n for nb in range(len(sbs_bz.bands[spin])):\n for kp in range(len(sbs_bz.bands[spin][nb])):\n if abs(sbs_bz.bands[spin][nb][\n kp] - sbs_bz.efermi) < delta:\n bnd_around_efermi.append(nb)\n break\n if len(bnd_around_efermi) < 8:\n print(\"Warning! check performed on \" + str(\n len(bnd_around_efermi)))\n nb_list = bnd_around_efermi\n else:\n nb_list = bnd_around_efermi[:8]\n\n # print(nb_list)\n bcheck = compare_sym_bands(sbs_bz, sbs_ref, nb_list)\n # print(bcheck)\n acc_err = [False, False]\n avg_corr = sum([item[1]['Corr'] for item in bcheck.iteritems()]) / 8\n avg_distance = sum([item[1]['Dist'] for item in bcheck.iteritems()]) / 8\n\n if avg_corr > warn_thr[0]: acc_err[0] = True\n if avg_distance > warn_thr[0]: acc_err[1] = True\n\n bcheck['avg_corr'] = avg_corr\n bcheck['avg_distance'] = avg_distance\n bcheck['acc_err'] = acc_err\n bcheck['acc_thr'] = warn_thr\n bcheck['nb_list'] = nb_list\n\n if True in acc_err:\n print(\"Warning! some bands around gap are not accurate\")\n\n return bcheck\n\n def get_seebeck(self, output='eigs', doping_levels=True):\n \"\"\"\n Gives the seebeck coefficient (microV/K) in either a\n full 3x3 tensor form, as 3 eigenvalues, or as the average value\n (trace/3.0) If doping_levels=True, the results are given at\n different p and n doping\n levels (given by self.doping), otherwise it is given as a series\n of electron chemical potential values\n\n Args:\n output (string): the type of output. 'tensor' give the full\n 3x3 tensor, 'eigs' its 3 eigenvalues and\n 'average' the average of the three eigenvalues\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n\n Returns:\n If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.\n The 'p' links to Seebeck at p-type doping\n and 'n' to the Seebeck at n-type doping. Otherwise, returns a\n {temp:[]} dictionary\n The result contains either the sorted three eigenvalues of\n the symmetric\n Seebeck tensor (output='eigs') or a full tensor (3x3 array) (\n output='tensor') or as an average\n (output='average').\n\n units are microV/K\n \"\"\"\n return BoltztrapAnalyzer._format_to_output(self._seebeck,\n self._seebeck_doping,\n output,\n doping_levels, 1e6)\n\n def get_conductivity(self, output='eigs', doping_levels=True,\n relaxation_time=1e-14):\n \"\"\"\n Gives the conductivity (1/Ohm*m) in either a full 3x3 tensor\n form, as 3 eigenvalues, or as the average value\n (trace/3.0) If doping_levels=True, the results are given at\n different p and n doping\n levels (given by self.doping), otherwise it is given as a series\n of electron chemical potential values\n\n Args:\n output (string): the type of output. 'tensor' give the full\n 3x3 tensor, 'eigs' its 3 eigenvalues and\n 'average' the average of the three eigenvalues\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n relaxation_time (float): constant relaxation time in secs\n\n Returns:\n If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.\n The 'p' links to conductivity\n at p-type doping and 'n' to the conductivity at n-type\n doping. Otherwise,\n returns a {temp:[]} dictionary. The result contains either\n the sorted three eigenvalues of the symmetric\n conductivity tensor (format='eigs') or a full tensor (3x3\n array) (output='tensor') or as an average\n (output='average').\n The result includes a given constant relaxation time\n\n units are 1/Ohm*m\n \"\"\"\n return BoltztrapAnalyzer._format_to_output(self._cond,\n self._cond_doping, output,\n doping_levels,\n relaxation_time)\n\n def get_power_factor(self, output='eigs', doping_levels=True,\n relaxation_time=1e-14):\n \"\"\"\n Gives the power factor (Seebeck^2 * conductivity) in units\n microW/(m*K^2) in either a full 3x3 tensor form,\n as 3 eigenvalues, or as the average value (trace/3.0) If\n doping_levels=True, the results are given at\n different p and n doping levels (given by self.doping), otherwise it\n is given as a series of\n electron chemical potential values\n\n Args:\n output (string): the type of output. 'tensor' give the full 3x3\n tensor, 'eigs' its 3 eigenvalues and\n 'average' the average of the three eigenvalues\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n relaxation_time (float): constant relaxation time in secs\n\n Returns:\n If doping_levels=True, a dictionnary {temp:{'p':[],'n':[]}}. The\n 'p' links to power factor\n at p-type doping and 'n' to the conductivity at n-type doping.\n Otherwise,\n returns a {temp:[]} dictionary. The result contains either the\n sorted three eigenvalues of the symmetric\n power factor tensor (format='eigs') or a full tensor (3x3 array) (\n output='tensor') or as an average\n (output='average').\n The result includes a given constant relaxation time\n\n units are microW/(m K^2)\n \"\"\"\n result = None\n result_doping = None\n if doping_levels:\n result_doping = {doping: {t: [] for t in\n self._seebeck_doping[doping]} for\n doping in self._seebeck_doping}\n\n for doping in result_doping:\n for t in result_doping[doping]:\n for i in range(len(self.doping[doping])):\n full_tensor = np.dot(self._cond_doping[doping][t][i],\n np.dot(\n self._seebeck_doping[doping][\n t][i],\n self._seebeck_doping[doping][\n t][i]))\n result_doping[doping][t].append(full_tensor)\n\n else:\n result = {t: [] for t in self._seebeck}\n for t in result:\n for i in range(len(self.mu_steps)):\n full_tensor = np.dot(self._cond[t][i],\n np.dot(self._seebeck[t][i],\n self._seebeck[t][i]))\n result[t].append(full_tensor)\n\n return BoltztrapAnalyzer._format_to_output(result, result_doping,\n output, doping_levels,\n multi=1e6 * relaxation_time)\n\n def get_thermal_conductivity(self, output='eigs', doping_levels=True,\n k_el=True, relaxation_time=1e-14):\n \"\"\"\n Gives the electronic part of the thermal conductivity in either a\n full 3x3 tensor form,\n as 3 eigenvalues, or as the average value (trace/3.0) If\n doping_levels=True, the results are given at\n different p and n doping levels (given by self.doping), otherwise it\n is given as a series of\n electron chemical potential values\n\n Args:\n output (string): the type of output. 'tensor' give the full 3x3\n tensor, 'eigs' its 3 eigenvalues and\n 'average' the average of the three eigenvalues\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n k_el (boolean): True for k_0-PF*T, False for k_0\n relaxation_time (float): constant relaxation time in secs\n\n Returns:\n If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The\n 'p' links to thermal conductivity\n at p-type doping and 'n' to the thermal conductivity at n-type\n doping. Otherwise,\n returns a {temp:[]} dictionary. The result contains either the\n sorted three eigenvalues of the symmetric\n conductivity tensor (format='eigs') or a full tensor (3x3 array) (\n output='tensor') or as an average\n (output='average').\n The result includes a given constant relaxation time\n\n units are W/mK\n \"\"\"\n result = None\n result_doping = None\n if doping_levels:\n result_doping = {doping: {t: [] for t in\n self._seebeck_doping[doping]} for\n doping in self._seebeck_doping}\n for doping in result_doping:\n for t in result_doping[doping]:\n for i in range(len(self.doping[doping])):\n if k_el:\n pf_tensor = np.dot(self._cond_doping[doping][t][i],\n np.dot(\n self._seebeck_doping[doping][\n t][i],\n self._seebeck_doping[doping][\n t][i]))\n result_doping[doping][t].append((\n self._kappa_doping[doping][t][\n i] - pf_tensor * t))\n else:\n result_doping[doping][t].append((\n self._kappa_doping[doping][t][i]))\n else:\n result = {t: [] for t in self._seebeck}\n for t in result:\n for i in range(len(self.mu_steps)):\n if k_el:\n pf_tensor = np.dot(self._cond[t][i],\n np.dot(self._seebeck[t][i],\n self._seebeck[t][i]))\n result[t].append((self._kappa[t][i] - pf_tensor * t))\n else:\n result[t].append((self._kappa[t][i]))\n\n return BoltztrapAnalyzer._format_to_output(result, result_doping,\n output, doping_levels,\n multi=relaxation_time)\n\n def get_zt(self, output='eigs', doping_levels=True, relaxation_time=1e-14,\n kl=1.0):\n \"\"\"\n Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full\n 3x3 tensor form,\n as 3 eigenvalues, or as the average value (trace/3.0) If\n doping_levels=True, the results are given at\n different p and n doping levels (given by self.doping), otherwise it\n is given as a series of\n electron chemical potential values. We assume a constant relaxation\n time and a constant\n lattice thermal conductivity\n\n Args:\n output (string): the type of output. 'tensor' give the full 3x3\n tensor, 'eigs' its 3 eigenvalues and\n 'average' the average of the three eigenvalues\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n relaxation_time (float): constant relaxation time in secs\n k_l (float): lattice thermal cond in W/(m*K)\n\n Returns:\n If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The\n 'p' links to ZT\n at p-type doping and 'n' to the ZT at n-type doping. Otherwise,\n returns a {temp:[]} dictionary. The result contains either the\n sorted three eigenvalues of the symmetric\n ZT tensor (format='eigs') or a full tensor (3x3 array) (\n output='tensor') or as an average\n (output='average').\n The result includes a given constant relaxation time and lattice\n thermal conductivity\n \"\"\"\n result = None\n result_doping = None\n if doping_levels:\n result_doping = {doping: {t: [] for t in\n self._seebeck_doping[doping]} for\n doping in self._seebeck_doping}\n\n for doping in result_doping:\n for t in result_doping[doping]:\n for i in range(len(self.doping[doping])):\n pf_tensor = np.dot(self._cond_doping[doping][t][i],\n np.dot(\n self._seebeck_doping[doping][t][\n i],\n self._seebeck_doping[doping][t][\n i]))\n thermal_conduct = (self._kappa_doping[doping][t][i]\n - pf_tensor * t) * relaxation_time\n result_doping[doping][t].append(\n np.dot(pf_tensor * relaxation_time * t,\n np.linalg.inv(\n thermal_conduct + kl * np.eye(3, 3))))\n else:\n result = {t: [] for t in self._seebeck}\n for t in result:\n for i in range(len(self.mu_steps)):\n pf_tensor = np.dot(self._cond[t][i],\n np.dot(self._seebeck[t][i],\n self._seebeck[t][i]))\n thermal_conduct = (self._kappa[t][i]\n - pf_tensor * t) * relaxation_time\n result[t].append(np.dot(pf_tensor * relaxation_time * t,\n np.linalg.inv(\n thermal_conduct + kl *\n np.eye(3, 3))))\n\n return BoltztrapAnalyzer._format_to_output(result, result_doping,\n output, doping_levels)\n\n def get_average_eff_mass(self, output='eigs', doping_levels=True):\n \"\"\"\n Gives the average effective mass tensor. We call it average because\n it takes into account all the bands\n and regions in the Brillouin zone. This is different than the standard\n textbook effective mass which relates\n often to only one (parabolic) band.\n The average effective mass tensor is defined as the integrated\n average of the second derivative of E(k)\n This effective mass tensor takes into account:\n -non-parabolicity\n -multiple extrema\n -multiple bands\n\n For more information about it. See:\n\n Hautier, G., Miglio, A., Waroquiers, D., Rignanese, G., & Gonze,\n X. (2014).\n How Does Chemistry Influence Electron Effective Mass in Oxides?\n A High-Throughput Computational Analysis. Chemistry of Materials,\n 26(19), 5447–5458. doi:10.1021/cm404079a\n\n or\n\n Hautier, G., Miglio, A., Ceder, G., Rignanese, G.-M., & Gonze,\n X. (2013).\n Identification and design principles of low hole effective mass\n p-type transparent conducting oxides.\n Nature Communications, 4, 2292. doi:10.1038/ncomms3292\n\n Depending on the value of output, we have either the full 3x3\n effective mass tensor,\n its 3 eigenvalues or an average\n\n Args:\n output (string): 'eigs' for eigenvalues, 'tensor' for the full\n tensor and 'average' for an average (trace/3)\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n Returns:\n If doping_levels=True,a dictionary {'p':{temp:[]},'n':{temp:[]}}\n with an array of effective mass tensor, eigenvalues of average\n value (depending on output) for each temperature and for each\n doping level.\n The 'p' links to hole effective mass tensor and 'n' to electron\n effective mass tensor.\n \"\"\"\n result = None\n result_doping = None\n conc = self.get_carrier_concentration()\n if doping_levels:\n result_doping = {doping: {t: [] for t in self._cond_doping[doping]}\n for\n doping in self.doping}\n for doping in result_doping:\n for temp in result_doping[doping]:\n for i in range(len(self.doping[doping])):\n result_doping[doping][temp].append(np.linalg.inv(\n np.array(self._cond_doping[doping][temp][i])) * \\\n self.doping[doping][\n i] * 10 ** 6 * e ** 2 / m_e)\n else:\n result = {t: [] for t in self._seebeck}\n for temp in result:\n for i in range(len(self.mu_steps)):\n try:\n cond_inv = np.linalg.inv(np.array(self._cond[temp][i]))\n except np.linalg.LinAlgError:\n pass\n result[temp].append(cond_inv * \\\n conc[temp][i] * 10 ** 6 * e ** 2 / m_e)\n\n return BoltztrapAnalyzer._format_to_output(result, result_doping,\n output, doping_levels)\n\n def get_extreme(self, target_prop, maximize=True, min_temp=None,\n max_temp=None, min_doping=None, max_doping=None,\n isotropy_tolerance=0.05, use_average=True):\n\n \"\"\"\n This method takes in eigenvalues over a range of carriers,\n temperatures, and doping levels, and tells you what is the \"best\"\n value that can be achieved for the given target_property. Note that\n this method searches the doping dict only, not the full mu dict.\n\n Args:\n target_prop: target property, i.e. \"seebeck\", \"power factor\",\n \"conductivity\", \"kappa\", or \"zt\"\n maximize: True to maximize, False to minimize (e.g. kappa)\n min_temp: minimum temperature allowed\n max_temp: maximum temperature allowed\n min_doping: minimum doping allowed (e.g., 1E18)\n max_doping: maximum doping allowed (e.g., 1E20)\n isotropy_tolerance: tolerance for isotropic (0.05 = 5%)\n use_average: True for avg of eigenval, False for max eigenval\n\n Returns:\n A dictionary with keys {\"p\", \"n\", \"best\"} with sub-keys:\n {\"value\", \"temperature\", \"doping\", \"isotropic\"}\n\n \"\"\"\n\n def is_isotropic(x, isotropy_tolerance):\n \"\"\"\n Internal method to tell you if 3-vector \"x\" is isotropic\n\n Args:\n x: the vector to determine isotropy for\n isotropy_tolerance: tolerance, e.g. 0.05 is 5%\n \"\"\"\n if len(x) != 3:\n raise ValueError(\"Invalid input to is_isotropic!\")\n\n st = sorted(x)\n return bool(all([st[0], st[1], st[2]]) and \\\n (abs((st[1] - st[0]) / st[1]) <= isotropy_tolerance) and \\\n (abs((st[2] - st[0])) / st[2] <= isotropy_tolerance) and \\\n (abs((st[2] - st[1]) / st[2]) <= isotropy_tolerance))\n\n if target_prop.lower() == \"seebeck\":\n d = self.get_seebeck(output=\"eigs\", doping_levels=True)\n\n elif target_prop.lower() == \"power factor\":\n d = self.get_power_factor(output=\"eigs\", doping_levels=True)\n\n elif target_prop.lower() == \"conductivity\":\n d = self.get_conductivity(output=\"eigs\", doping_levels=True)\n\n elif target_prop.lower() == \"kappa\":\n d = self.get_thermal_conductivity(output=\"eigs\",\n doping_levels=True)\n elif target_prop.lower() == \"zt\":\n d = self.get_zt(output=\"eigs\", doping_levels=True)\n\n else:\n raise ValueError(\"Target property: {} not recognized!\".\n format(target_prop))\n\n absval = True # take the absolute value of properties\n\n x_val = None\n x_temp = None\n x_doping = None\n x_isotropic = None\n output = {}\n\n min_temp = min_temp or 0\n max_temp = max_temp or float('inf')\n min_doping = min_doping or 0\n max_doping = max_doping or float('inf')\n\n for pn in ('p', 'n'):\n for t in d[pn]: # temperatures\n if min_temp <= float(t) <= max_temp:\n for didx, evs in enumerate(d[pn][t]):\n doping_lvl = self.doping[pn][didx]\n if min_doping <= doping_lvl <= max_doping:\n isotropic = is_isotropic(evs, isotropy_tolerance)\n if absval:\n evs = [abs(x) for x in evs]\n if use_average:\n val = float(sum(evs)) / len(evs)\n else:\n val = max(evs)\n if x_val is None or (val > x_val and maximize) \\\n or (val < x_val and not maximize):\n x_val = val\n x_temp = t\n x_doping = doping_lvl\n x_isotropic = isotropic\n\n output[pn] = {'value': x_val, 'temperature': x_temp,\n 'doping': x_doping, 'isotropic': x_isotropic}\n x_val = None\n\n if maximize:\n max_type = 'p' if output['p']['value'] >= \\\n output['n']['value'] else 'n'\n else:\n max_type = 'p' if output['p']['value'] <= \\\n output['n']['value'] else 'n'\n\n output['best'] = output[max_type]\n output['best']['carrier_type'] = max_type\n\n return output\n\n @staticmethod\n def _format_to_output(tensor, tensor_doping, output, doping_levels,\n multi=1.0):\n if doping_levels:\n full_tensor = tensor_doping\n result = {doping: {t: [] for t in tensor_doping[doping]} for doping\n in tensor_doping}\n for doping in full_tensor:\n for temp in full_tensor[doping]:\n for i in range(len(full_tensor[doping][temp])):\n if output in ['eig', 'eigs']:\n result[doping][temp].append(sorted(\n np.linalg.eigh(full_tensor[doping][temp][i])[\n 0] * multi))\n elif output == 'tensor':\n result[doping][temp].append(\n np.array(full_tensor[doping][temp][i]) * multi)\n elif output == 'average':\n result[doping][temp].append(\n (full_tensor[doping][temp][i][0][0] \\\n + full_tensor[doping][temp][i][1][1] \\\n + full_tensor[doping][temp][i][2][\n 2]) * multi / 3.0)\n else:\n raise ValueError(\"Unknown output format: \"\n \"{}\".format(output))\n else:\n full_tensor = tensor\n result = {t: [] for t in tensor}\n for temp in full_tensor:\n for i in range(len(tensor[temp])):\n if output in ['eig', 'eigs']:\n result[temp].append(sorted(\n np.linalg.eigh(full_tensor[temp][i])[0] * multi))\n elif output == 'tensor':\n result[temp].append(\n np.array(full_tensor[temp][i]) * multi)\n elif output == 'average':\n result[temp].append((full_tensor[temp][i][0][0]\n + full_tensor[temp][i][1][1]\n + full_tensor[temp][i][2][\n 2]) * multi / 3.0)\n else:\n raise ValueError(\"Unknown output format: {}\".\n format(output))\n return result\n\n def get_complete_dos(self, structure, analyzer_for_second_spin=None):\n \"\"\"\n Gives a CompleteDos object with the DOS from the interpolated\n projected band structure\n Args:\n the structure (necessary to identify sites for projection)\n analyzer_for_second_spin must be specified to have a\n CompleteDos with both Spin components\n Returns:\n a CompleteDos object\n Example of use in case of spin polarized case:\n\n BoltztrapRunner(bs=bs,nelec=10,run_type=\"DOS\",spin=1).run(path_dir='dos_up/')\n an_up=BoltztrapAnalyzer.from_files(\"dos_up/boltztrap/\",dos_spin=1)\n\n BoltztrapRunner(bs=bs,nelec=10,run_type=\"DOS\",spin=-1).run(path_dir='dos_dw/')\n an_dw=BoltztrapAnalyzer.from_files(\"dos_dw/boltztrap/\",dos_spin=-1)\n\n cdos=an_up.get_complete_dos(bs.structure,an_dw)\n\n \"\"\"\n pdoss = {}\n spin_1 = list(self.dos.densities.keys())[0]\n\n if analyzer_for_second_spin:\n if not np.all(self.dos.energies ==\n analyzer_for_second_spin.dos.energies):\n raise BoltztrapError(\n \"Dos merging error: energies of the two dos are different\")\n\n spin_2 = list(analyzer_for_second_spin.dos.densities.keys())[0]\n if spin_1 == spin_2:\n raise BoltztrapError(\n \"Dos merging error: spin component are the same\")\n\n for s in self._dos_partial:\n if structure.sites[int(s)] not in pdoss:\n pdoss[structure.sites[int(s)]] = {}\n for o in self._dos_partial[s]:\n if Orbital[o] not in pdoss[structure.sites[int(s)]]:\n pdoss[structure.sites[int(s)]][Orbital[o]] = {}\n pdoss[structure.sites[int(s)]][Orbital[o]][\n spin_1] = self._dos_partial[s][o]\n if analyzer_for_second_spin:\n pdoss[structure.sites[int(s)]][Orbital[o]][\n spin_2] = analyzer_for_second_spin._dos_partial[s][o]\n if analyzer_for_second_spin:\n tdos = Dos(self.dos.efermi, self.dos.energies,\n {spin_1: self.dos.densities[spin_1],\n spin_2: analyzer_for_second_spin.dos.densities[\n spin_2]})\n else:\n tdos = self.dos\n\n return CompleteDos(structure, total_dos=tdos, pdoss=pdoss)\n\n def get_mu_bounds(self, temp=300):\n return min(self.mu_doping['p'][temp]), max(self.mu_doping['n'][temp])\n\n def get_carrier_concentration(self):\n \"\"\"\n gives the carrier concentration (in cm^-3)\n\n Returns\n a dictionary {temp:[]} with an array of carrier concentration\n (in cm^-3) at each temperature\n The array relates to each step of electron chemical potential\n \"\"\"\n\n return {temp: [1e24 * i / self.vol for i in self._carrier_conc[temp]]\n for temp in self._carrier_conc}\n\n def get_hall_carrier_concentration(self):\n \"\"\"\n gives the Hall carrier concentration (in cm^-3). This is the trace of\n the Hall tensor (see Boltztrap source code) Hall carrier concentration\n are not always exactly the same than carrier concentration.\n\n Returns\n a dictionary {temp:[]} with an array of Hall carrier concentration\n (in cm^-3) at each temperature The array relates to each step of\n electron chemical potential\n \"\"\"\n result = {temp: [] for temp in self._hall}\n for temp in self._hall:\n for i in self._hall[temp]:\n trace = (i[1][2][0] + i[2][0][1] + i[0][1][2]) / 3.0\n if trace != 0.0:\n result[temp].append(1e-6 / (trace * e))\n else:\n result[temp].append(0.0)\n return result\n\n @staticmethod\n def parse_outputtrans(path_dir):\n \"\"\"\n Parses .outputtrans file\n\n Args:\n path_dir: dir containing boltztrap.outputtrans\n\n Returns:\n tuple - (run_type, warning, efermi, gap, doping_levels)\n\n \"\"\"\n run_type = None\n warning = None\n efermi = None\n gap = None\n doping_levels = []\n\n with open(os.path.join(path_dir, \"boltztrap.outputtrans\"), 'r') \\\n as f:\n for line in f:\n if \"WARNING\" in line:\n warning = line\n elif \"Calc type:\" in line:\n run_type = line.split()[-1]\n elif line.startswith(\"VBM\"):\n efermi = Energy(line.split()[1], \"Ry\").to(\"eV\")\n elif line.startswith(\"Egap:\"):\n gap = Energy(float(line.split()[1]), \"Ry\").to(\"eV\")\n elif line.startswith(\"Doping level number\"):\n doping_levels.append(float(line.split()[6]))\n\n return run_type, warning, efermi, gap, doping_levels\n\n @staticmethod\n def parse_transdos(path_dir, efermi, dos_spin=1, trim_dos=False):\n\n \"\"\"\n Parses .transdos (total DOS) and .transdos_x_y (partial DOS) files\n Args:\n path_dir: (str) dir containing DOS files\n efermi: (float) Fermi energy\n dos_spin: (int) -1 for spin down, +1 for spin up\n trim_dos: (bool) whether to post-process / trim DOS\n\n Returns:\n tuple - (DOS, dict of partial DOS)\n \"\"\"\n\n data_dos = {'total': [], 'partial': {}}\n # parse the total DOS data\n ## format is energy, DOS, integrated DOS\n with open(os.path.join(path_dir, \"boltztrap.transdos\"), 'r') as f:\n count_series = 0 # TODO: why is count_series needed?\n for line in f:\n if line.lstrip().startswith(\"#\"):\n count_series += 1\n if count_series > 1:\n break\n else:\n data_dos['total'].append(\n [Energy(float(line.split()[0]), \"Ry\").to(\"eV\"),\n float(line.split()[1])])\n total_elec = float(line.split()[2])\n\n lw_l = 0\n hg_l = -len(data_dos['total'])\n if trim_dos:\n # Francesco knows what this does\n # It has something to do with a trick of adding fake energies\n # at the endpoints of the DOS, and then re-trimming it. This is\n # to get the same energy scale for up and down spin DOS.\n tmp_data = np.array(data_dos['total'])\n tmp_den = np.trim_zeros(tmp_data[:, 1], 'f')[1:]\n lw_l = len(tmp_data[:, 1]) - len(tmp_den)\n tmp_ene = tmp_data[lw_l:, 0]\n tmp_den = np.trim_zeros(tmp_den, 'b')[:-1]\n hg_l = len(tmp_ene) - len(tmp_den)\n tmp_ene = tmp_ene[:-hg_l]\n tmp_data = np.vstack((tmp_ene, tmp_den)).T\n data_dos['total'] = tmp_data.tolist()\n\n # parse partial DOS data\n for file_name in os.listdir(path_dir):\n if file_name.endswith(\n \"transdos\") and file_name != 'boltztrap.transdos':\n tokens = file_name.split(\".\")[1].split(\"_\")\n site = tokens[1]\n orb = '_'.join(tokens[2:])\n with open(os.path.join(path_dir, file_name), 'r') as f:\n for line in f:\n if not line.lstrip().startswith(\" #\"):\n if site not in data_dos['partial']:\n data_dos['partial'][site] = {}\n if orb not in data_dos['partial'][site]:\n data_dos['partial'][site][orb] = []\n data_dos['partial'][site][orb].append(\n float(line.split()[1]))\n data_dos['partial'][site][orb] = data_dos['partial'][site][\n orb][lw_l:-hg_l]\n\n dos_full = {'energy': [], 'density': []}\n\n for t in data_dos['total']:\n dos_full['energy'].append(t[0])\n dos_full['density'].append(t[1])\n\n dos = Dos(efermi, dos_full['energy'],\n {Spin(dos_spin): dos_full['density']})\n dos_partial = data_dos['partial'] # TODO: make this real DOS object?\n\n return dos, dos_partial\n\n @staticmethod\n def parse_intrans(path_dir):\n \"\"\"\n Parses boltztrap.intrans mainly to extract the value of scissor applied to the bands or some other inputs\n Args:\n path_dir: (str) dir containing the boltztrap.intrans file\n Returns:\n intrans (dict): a dictionary containing various inputs that had been used in the Boltztrap run.\n \"\"\"\n intrans = {}\n with open(os.path.join(path_dir, \"boltztrap.intrans\"), 'r') as f:\n for line in f:\n if \"iskip\" in line:\n intrans[\"scissor\"] = Energy(float(line.split(\" \")[3]),\n \"Ry\").to(\"eV\")\n if \"HISTO\" in line or \"TETRA\" in line:\n intrans[\"dos_type\"] = line[:-1]\n return intrans\n\n @staticmethod\n def parse_struct(path_dir):\n \"\"\"\n Parses boltztrap.struct file (only the volume)\n Args:\n path_dir: (str) dir containing the boltztrap.struct file\n\n Returns:\n (float) volume\n \"\"\"\n with open(os.path.join(path_dir, \"boltztrap.struct\"), 'r') as f:\n tokens = f.readlines()\n return Lattice([[Length(float(tokens[i].split()[j]), \"bohr\").\n to(\"ang\") for j in range(3)] for i in\n range(1, 4)]).volume\n\n @staticmethod\n def parse_cond_and_hall(path_dir, doping_levels=None):\n \"\"\"\n Parses the conductivity and Hall tensors\n Args:\n path_dir: Path containing .condtens / .halltens files\n doping_levels: ([float]) - doping lvls, parse outtrans to get this\n\n Returns:\n mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,\n mu_doping, seebeck_doping, cond_doping, kappa_doping,\n hall_doping, carrier_conc\n \"\"\"\n\n # Step 1: parse raw data but do not convert to final format\n t_steps = set()\n mu_steps = set()\n data_full = []\n data_hall = []\n data_doping_full = []\n data_doping_hall = []\n doping_levels = doping_levels or []\n\n # parse the full conductivity/Seebeck/kappa0/etc data\n ## also initialize t_steps and mu_steps\n with open(os.path.join(path_dir, \"boltztrap.condtens\"), 'r') as f:\n for line in f:\n if not line.startswith(\"#\"):\n mu_steps.add(float(line.split()[0]))\n t_steps.add(int(float(line.split()[1])))\n data_full.append([float(c) for c in line.split()])\n\n # parse the full Hall tensor\n with open(os.path.join(path_dir, \"boltztrap.halltens\"), 'r') as f:\n for line in f:\n if not line.startswith(\"#\"):\n data_hall.append([float(c) for c in line.split()])\n\n if len(doping_levels) != 0:\n # parse doping levels version of full cond. tensor, etc.\n with open(\n os.path.join(path_dir, \"boltztrap.condtens_fixdoping\"),\n 'r') as f:\n for line in f:\n if not line.startswith(\"#\") and len(line) > 2:\n data_doping_full.append([float(c)\n for c in line.split()])\n\n # parse doping levels version of full hall tensor\n with open(\n os.path.join(path_dir, \"boltztrap.halltens_fixdoping\"),\n 'r') as f:\n for line in f:\n if not line.startswith(\"#\") and len(line) > 2:\n data_doping_hall.append(\n [float(c) for c in line.split()])\n\n # Step 2: convert raw data to final format\n\n # sort t and mu_steps (b/c they are sets not lists)\n # and convert to correct energy\n t_steps = sorted([t for t in t_steps])\n mu_steps = sorted([Energy(m, \"Ry\").to(\"eV\") for m in mu_steps])\n\n # initialize output variables - could use defaultdict instead\n # I am leaving things like this for clarity\n cond = {t: [] for t in t_steps}\n seebeck = {t: [] for t in t_steps}\n kappa = {t: [] for t in t_steps}\n hall = {t: [] for t in t_steps}\n carrier_conc = {t: [] for t in t_steps}\n dos_full = {'energy': [], 'density': []}\n\n mu_doping = {'p': {t: [] for t in t_steps},\n 'n': {t: [] for t in t_steps}}\n seebeck_doping = {'p': {t: [] for t in t_steps},\n 'n': {t: [] for t in t_steps}}\n cond_doping = {'p': {t: [] for t in t_steps},\n 'n': {t: [] for t in t_steps}}\n kappa_doping = {'p': {t: [] for t in t_steps},\n 'n': {t: [] for t in t_steps}}\n hall_doping = {'p': {t: [] for t in t_steps},\n 'n': {t: [] for t in t_steps}}\n\n # process doping levels\n pn_doping_levels = {'p': [], 'n': []}\n for d in doping_levels:\n if d > 0:\n pn_doping_levels['p'].append(d)\n else:\n pn_doping_levels['n'].append(-d)\n\n # process raw conductivity data, etc.\n for d in data_full:\n temp, doping = d[1], d[2]\n carrier_conc[temp].append(doping)\n\n cond[temp].append(np.reshape(d[3:12], (3, 3)).tolist())\n seebeck[temp].append(np.reshape(d[12:21], (3, 3)).tolist())\n kappa[temp].append(np.reshape(d[21:30], (3, 3)).tolist())\n\n # process raw Hall data\n for d in data_hall:\n temp, doping = d[1], d[2]\n hall_tens = [np.reshape(d[3:12], (3, 3)).tolist(),\n np.reshape(d[12:21], (3, 3)).tolist(),\n np.reshape(d[21:30], (3, 3)).tolist()]\n hall[temp].append(hall_tens)\n\n # process doping conductivity data, etc.\n for d in data_doping_full:\n temp, doping, mu = d[0], d[1], d[-1]\n pn = 'p' if doping > 0 else 'n'\n mu_doping[pn][temp].append(Energy(mu, \"Ry\").to(\"eV\"))\n cond_doping[pn][temp].append(\n np.reshape(d[2:11], (3, 3)).tolist())\n seebeck_doping[pn][temp].append(\n np.reshape(d[11:20], (3, 3)).tolist())\n kappa_doping[pn][temp].append(\n np.reshape(d[20:29], (3, 3)).tolist())\n\n # process doping Hall data\n for d in data_doping_hall:\n temp, doping, mu = d[0], d[1], d[-1]\n pn = 'p' if doping > 0 else 'n'\n hall_tens = [np.reshape(d[2:11], (3, 3)).tolist(),\n np.reshape(d[11:20], (3, 3)).tolist(),\n np.reshape(d[20:29], (3, 3)).tolist()]\n hall_doping[pn][temp].append(hall_tens)\n\n return mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, \\\n mu_doping, seebeck_doping, cond_doping, kappa_doping, \\\n hall_doping, carrier_conc\n\n @staticmethod\n def from_files(path_dir, dos_spin=1):\n \"\"\"\n get a BoltztrapAnalyzer object from a set of files\n\n Args:\n path_dir: directory where the boltztrap files are\n dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down\n\n Returns:\n a BoltztrapAnalyzer object\n\n \"\"\"\n run_type, warning, efermi, gap, doping_levels = \\\n BoltztrapAnalyzer.parse_outputtrans(path_dir)\n\n vol = BoltztrapAnalyzer.parse_struct(path_dir)\n\n intrans = BoltztrapAnalyzer.parse_intrans(path_dir)\n\n if run_type == \"BOLTZ\":\n dos, pdos = BoltztrapAnalyzer.parse_transdos(\n path_dir, efermi, dos_spin=dos_spin, trim_dos=False)\n\n mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, \\\n seebeck_doping, cond_doping, kappa_doping, hall_doping, \\\n carrier_conc = BoltztrapAnalyzer. \\\n parse_cond_and_hall(path_dir, doping_levels)\n\n return BoltztrapAnalyzer(\n gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,\n mu_doping, seebeck_doping, cond_doping, kappa_doping,\n hall_doping, intrans, dos, pdos, carrier_conc, vol, warning)\n\n elif run_type == \"DOS\":\n trim = True if intrans[\"dos_type\"] == \"HISTO\" else False\n dos, pdos = BoltztrapAnalyzer.parse_transdos(\n path_dir, efermi, dos_spin=dos_spin, trim_dos=trim)\n\n return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos,\n warning=warning, vol=vol)\n\n elif run_type == \"BANDS\":\n bz_kpoints = np.loadtxt(\n os.path.join(path_dir, \"boltztrap_band.dat\"))[:, -3:]\n bz_bands = np.loadtxt(\n os.path.join(path_dir, \"boltztrap_band.dat\"))[:, 1:-6]\n return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints,\n warning=warning, vol=vol)\n\n elif run_type == \"FERMI\":\n \"\"\"\n \"\"\"\n\n if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')):\n fs_data = read_cube_file(\n os.path.join(path_dir, 'boltztrap_BZ.cube'))\n elif os.path.exists(os.path.join(path_dir, 'fort.30')):\n fs_data = read_cube_file(os.path.join(path_dir, 'fort.30'))\n else:\n raise BoltztrapError(\"No data file found for fermi surface\")\n return BoltztrapAnalyzer(fermi_surface_data=fs_data)\n\n else:\n raise ValueError(\"Run type: {} not recognized!\".format(run_type))\n\n def as_dict(self):\n\n results = {'gap': self.gap,\n 'mu_steps': self.mu_steps,\n 'scissor': self.intrans[\"scissor\"],\n 'cond': self._cond,\n 'seebeck': self._seebeck,\n 'kappa': self._kappa,\n 'hall': self._hall,\n 'doping': self.doping,\n 'mu_doping': self.mu_doping,\n 'seebeck_doping': self._seebeck_doping,\n 'cond_doping': self._cond_doping,\n 'kappa_doping': self._kappa_doping,\n 'hall_doping': self._hall_doping,\n 'dos': self.dos.as_dict(),\n 'dos_partial': self._dos_partial,\n 'carrier_conc': self._carrier_conc,\n 'vol': self.vol,\n 'warning': self.warning}\n return jsanitize(results)\n\n @staticmethod\n def from_dict(data):\n def _make_float_array(a):\n res = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]\n for i in range(3):\n for j in range(3):\n res[i][j] = float(a[i][j])\n return res\n\n def _make_float_hall(a):\n return [i for i in a[:27]]\n\n gap = data.get('gap')\n mu_steps = [float(d) for d in data['mu_steps']] if \\\n 'mu_steps' in data else None\n cond = {int(d): [_make_float_array(v) for v in data['cond'][d]]\n for d in data['cond']} if 'cond' in data else None\n seebeck = {int(d): [_make_float_array(v) for v in data['seebeck'][d]]\n for d in data['seebeck']} if 'seebeck' in data else None\n kappa = {int(d): [_make_float_array(v) for v in data['kappa'][d]]\n for d in data['kappa']} if 'kappa' in data else None\n hall = {int(d): [_make_float_hall(v) for v in data['hall'][d]]\n for d in data['hall']} if 'hall' in data else None\n doping = {'p': [float(d) for d in data['doping']['p']],\n 'n': [float(d) for d in data['doping']['n']]} if \\\n 'doping' in data else None\n\n mu_doping = {'p': {int(d): [\n float(v) for v in data['mu_doping']['p'][d]] for d in\n data['mu_doping']['p']}, 'n':\n {int(d): [float(v) for v in data['mu_doping']['n'][d]]\n for d in data['mu_doping'][\n 'n']}} if 'mu_doping' in data else None\n\n seebeck_doping = {'p': {int(d): [\n _make_float_array(v) for v in data['seebeck_doping']['p'][d]]\n for d in data['seebeck_doping']['p']}, 'n':\n {int(d): [_make_float_array(v) for v in\n data['seebeck_doping']['n'][d]] for d in\n data['seebeck_doping'][\n 'n']}} if 'seebeck_doping' in data \\\n else None\n\n cond_doping = {'p': {int(d): [_make_float_array(v)\n for v in data['cond_doping']['p'][d]]\n for d in data['cond_doping']['p']}, 'n':\n {int(d): [_make_float_array(v) for v in\n data['cond_doping']['n'][d]] for\n d in data['cond_doping'][\n 'n']}} if 'cond_doping' in data else None\n\n kappa_doping = {'p': {int(d): [_make_float_array(v)\n for v in data['kappa_doping']['p'][d]]\n for d in data['kappa_doping']['p']},\n 'n': {int(d): [_make_float_array(v) for v in\n data['kappa_doping']['n'][d]]\n for d in data['kappa_doping']['n']}} \\\n if 'kappa_doping' in data else None\n\n hall_doping = {'p': {int(d): [_make_float_hall(v) for v in\n data['hall_doping']['p'][d]] for d in\n data['hall_doping']['p']}, 'n':\n {int(d): [_make_float_hall(v) for v in\n data['hall_doping']['n'][d]] for d in\n data['hall_doping'][\n 'n']}} if \"hall_doping\" in data else None\n\n dos = Dos.from_dict(data['dos']) if 'dos' in data else None\n dos_partial = data.get('dos_partial')\n carrier_conc = data.get('carrier_conc')\n vol = data.get('vol')\n warning = data.get('warning')\n\n return BoltztrapAnalyzer(gap, mu_steps, cond, seebeck, kappa, hall,\n doping, mu_doping, seebeck_doping,\n cond_doping, kappa_doping, hall_doping, dos,\n dos_partial, carrier_conc, vol, warning)\n\n\ndef read_cube_file(filename):\n with open(filename, 'rt') as f:\n natoms = 0\n count_line = 0\n for line in f:\n line = line.rstrip(\"\\n\")\n if count_line == 0 and \"CUBE\" not in line:\n raise ValueError(\"CUBE file format not recognized\")\n\n if count_line == 2:\n tokens = line.split()\n origin = [float(tokens[i]) for i in range(1,4)]\n natoms = int(tokens[0])\n if count_line == 3:\n tokens = line.split()\n a1 = [float(tokens[i]) for i in range(1,4)]\n n1 = int(tokens[0])\n elif count_line == 4:\n tokens = line.split()\n a2 = [float(tokens[i]) for i in range(1,4)]\n n2 = int(tokens[0])\n elif count_line == 5:\n tokens = line.split()\n a3 = [float(tokens[i]) for i in range(1,4)]\n n3 = int(tokens[0])\n #kpoints=[[[0 for i in range(0,n1)] for j in range(0,n2)] for l in range(0,n3)]\n elif count_line > 5:\n break\n\n count_line += 1\n\n if 'fort.30' in filename:\n energy_data = np.genfromtxt(filename,skip_header=natoms+6,skip_footer=1)\n nlines_data = len(energy_data)\n last_line = np.genfromtxt(filename,skip_header=nlines_data+natoms+6)\n energy_data = np.append(energy_data.flatten(),last_line).reshape(n1,n2,n3)\n elif 'boltztrap_BZ.cube' in filename:\n energy_data = np.loadtxt(filename,skiprows=natoms+6).reshape(n1,n2,n3)\n \n energy_data /= Energy(1, \"eV\").to(\"Ry\")\n\n return energy_data\n\n\ndef compare_sym_bands(bands_obj, bands_ref_obj, nb=None):\n \"\"\"\n Compute the mean of correlation between bzt and vasp bandstructure on\n sym line, for all bands and locally (for each branches) the difference\n squared (%) if nb is specified.\n \"\"\"\n\n nkpt = len(bands_obj.kpoints)\n if bands_ref_obj.is_spin_polarized:\n nbands = min(bands_obj.nb_bands, 2 * bands_ref_obj.nb_bands)\n else:\n # TODO: why is this needed? Shouldn't pmg take care of nb_bands?\n nbands = min(len(bands_obj.bands[Spin.up]),\n len(bands_ref_obj.bands[Spin.up]))\n # print(nbands)\n arr_bands = np.array(bands_obj.bands[Spin.up][:nbands])\n # arr_bands_lavg = (arr_bands-np.mean(arr_bands,axis=1).reshape(nbands,1))\n\n if bands_ref_obj.is_spin_polarized:\n arr_bands_ref_up = np.array(bands_ref_obj.bands[Spin.up])\n arr_bands_ref_dw = np.array(bands_ref_obj.bands[Spin.down])\n # print(arr_bands_ref_up.shape)\n arr_bands_ref = np.vstack((arr_bands_ref_up, arr_bands_ref_dw))\n arr_bands_ref = np.sort(arr_bands_ref, axis=0)[:nbands]\n # print(arr_bands_ref.shape)\n else:\n arr_bands_ref = np.array(bands_ref_obj.bands[Spin.up][:nbands])\n\n # arr_bands_ref_lavg =\n # (arr_bands_ref-np.mean(arr_bands_ref,axis=1).reshape(nbands,1))\n\n # err = np.sum((arr_bands_lavg-arr_bands_ref_lavg)**2,axis=1)/nkpt\n corr = np.array(\n [distance.correlation(arr_bands[idx], arr_bands_ref[idx]) for idx in\n range(nbands)])\n\n if type(nb) == int: nb = [nb]\n\n bcheck = {}\n\n if max(nb) < nbands:\n branches = [[s['start_index'], s['end_index'], s['name']] for s in\n bands_ref_obj.branches]\n\n if not bands_obj.is_metal() and not bands_ref_obj.is_metal():\n zero_ref = bands_ref_obj.get_vbm()['energy']\n zero = bands_obj.get_vbm()['energy']\n if not zero:\n vbm = bands_ref_obj.get_vbm()['band_index'][Spin.up][-1]\n zero = max(arr_bands[vbm])\n else:\n zero_ref = 0 # bands_ref_obj.efermi\n zero = 0 # bands_obj.efermi\n print(zero, zero_ref)\n\n for nbi in nb:\n bcheck[nbi] = {}\n\n bcheck[nbi]['Dist'] = np.mean(abs(arr_bands[nbi] - zero\n - arr_bands_ref[nbi] + zero_ref))\n bcheck[nbi]['Corr'] = corr[nbi]\n\n for start, end, name in branches:\n # werr.append((sum((arr_bands_corr[nb][start:end+1] -\n # arr_bands_ref_corr[nb][start:end+1])**2)/(end+1-start)*100,name))\n bcheck[nbi][name] = np.mean(abs(arr_bands[nbi][start:end + 1]\n - zero\n - arr_bands_ref[nbi][\n start:end + 1] + zero_ref))\n else:\n bcheck = \"No nb given\"\n\n return bcheck\n",
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals\n\n\"\"\"\nThis module provides classes for analyzing phase diagrams.\n\"\"\"\n\nfrom six.moves import zip\nimport numpy as np\nimport itertools\nimport collections\n\nfrom monty.functools import lru_cache\nfrom monty.dev import deprecated\n\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.phasediagram.maker import PhaseDiagram, get_facets\nfrom pymatgen.analysis.reaction_calculator import Reaction\nfrom pymatgen.util.coord_utils import Simplex\n\n__author__ = \"Shyue Ping Ong\"\n__copyright__ = \"Copyright 2011, The Materials Project\"\n__version__ = \"1.1\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n__date__ = \"May 16, 2012\"\n\n\nclass PDAnalyzer(object):\n \"\"\"\n A class for performing analyses on Phase Diagrams.\n\n The algorithm is based on the work in the following papers:\n\n 1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from\n First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.\n doi:10.1021/cm702327g\n\n 2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities\n of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first\n principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.\n doi:10.1016/j.elecom.2010.01.010\n \"\"\"\n\n numerical_tol = 1e-8\n\n def __init__(self, pd):\n \"\"\"\n Initializes analyzer with a PhaseDiagram.\n\n Args:\n pd: Phase Diagram to analyze.\n \"\"\"\n self._pd = pd\n\n @lru_cache(1)\n def _get_facet_and_simplex(self, comp):\n \"\"\"\n Get any facet that a composition falls into. Cached so successive\n calls at same composition are fast.\n \"\"\"\n c = self._pd.pd_coords(comp)\n for f, s in zip(self._pd.facets, self._pd.simplexes):\n if s.in_simplex(c, PDAnalyzer.numerical_tol / 10):\n return f, s\n raise RuntimeError(\"No facet found for comp = {}\".format(comp))\n\n def _get_facet_chempots(self, facet):\n \"\"\"\n Calculates the chemical potentials for each element within a facet.\n\n Args:\n facet: Facet of the phase diagram.\n\n Returns:\n { element: chempot } for all elements in the phase diagram.\n \"\"\"\n complist = [self._pd.qhull_entries[i].composition for i in facet]\n energylist = [self._pd.qhull_entries[i].energy_per_atom for i in facet]\n m = [[c.get_atomic_fraction(e) for e in self._pd.elements] for c in complist]\n chempots = np.linalg.solve(m, energylist)\n return dict(zip(self._pd.elements, chempots))\n\n def get_decomposition(self, comp):\n \"\"\"\n Provides the decomposition at a particular composition.\n\n Args:\n comp: A composition\n\n Returns:\n Decomposition as a dict of {Entry: amount}\n \"\"\"\n facet, simplex = self._get_facet_and_simplex(comp)\n decomp_amts = simplex.bary_coords(self._pd.pd_coords(comp))\n return {self._pd.qhull_entries[f]: amt\n for f, amt in zip(facet, decomp_amts)\n if abs(amt) > PDAnalyzer.numerical_tol}\n\n def get_hull_energy(self, comp):\n \"\"\"\n Args:\n comp (Composition): Input composition\n\n Returns:\n Energy of lowest energy equilibrium at desired composition. Not\n normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)\n \"\"\"\n e = 0\n for k, v in self.get_decomposition(comp).items():\n e += k.energy_per_atom * v\n return e * comp.num_atoms\n\n def get_decomp_and_e_above_hull(self, entry, allow_negative=False):\n \"\"\"\n Provides the decomposition and energy above convex hull for an entry.\n Due to caching, can be much faster if entries with the same composition\n are processed together.\n\n Args:\n entry: A PDEntry like object\n allow_negative: Whether to allow negative e_above_hulls. Used to\n calculate equilibrium reaction energies. Defaults to False.\n\n Returns:\n (decomp, energy above convex hull) Stable entries should have\n energy above hull of 0. The decomposition is provided as a dict of\n {Entry: amount}.\n \"\"\"\n if entry in self._pd.stable_entries:\n return {entry: 1}, 0\n\n comp = entry.composition\n facet, simplex = self._get_facet_and_simplex(comp)\n decomp_amts = simplex.bary_coords(self._pd.pd_coords(comp))\n decomp = {self._pd.qhull_entries[f]: amt\n for f, amt in zip(facet, decomp_amts)\n if abs(amt) > PDAnalyzer.numerical_tol}\n energies = [self._pd.qhull_entries[i].energy_per_atom for i in facet]\n ehull = entry.energy_per_atom - np.dot(decomp_amts, energies)\n if allow_negative or ehull >= -PDAnalyzer.numerical_tol:\n return decomp, ehull\n raise ValueError(\"No valid decomp found!\")\n\n def get_e_above_hull(self, entry):\n \"\"\"\n Provides the energy above convex hull for an entry\n\n Args:\n entry: A PDEntry like object\n\n Returns:\n Energy above convex hull of entry. Stable entries should have\n energy above hull of 0.\n \"\"\"\n return self.get_decomp_and_e_above_hull(entry)[1]\n\n def get_equilibrium_reaction_energy(self, entry):\n \"\"\"\n Provides the reaction energy of a stable entry from the neighboring\n equilibrium stable entries (also known as the inverse distance to\n hull).\n\n Args:\n entry: A PDEntry like object\n\n Returns:\n Equilibrium reaction energy of entry. Stable entries should have\n equilibrium reaction energy <= 0.\n \"\"\"\n if entry not in self._pd.stable_entries:\n raise ValueError(\"Equilibrium reaction energy is available only \"\n \"for stable entries.\")\n if entry.is_element:\n return 0\n entries = [e for e in self._pd.stable_entries if e != entry]\n modpd = PhaseDiagram(entries, self._pd.elements)\n analyzer = PDAnalyzer(modpd)\n return analyzer.get_decomp_and_e_above_hull(entry,\n allow_negative=True)[1]\n\n def get_composition_chempots(self, comp):\n facet = self._get_facet_and_simplex(comp)[0]\n return self._get_facet_chempots(facet)\n\n @deprecated(get_composition_chempots)\n def get_facet_chempots(self, facet):\n return self._get_facet_chempots(facet)\n\n def get_transition_chempots(self, element):\n \"\"\"\n Get the critical chemical potentials for an element in the Phase\n Diagram.\n\n Args:\n element: An element. Has to be in the PD in the first place.\n\n Returns:\n A sorted sequence of critical chemical potentials, from less\n negative to more negative.\n \"\"\"\n if element not in self._pd.elements:\n raise ValueError(\"get_transition_chempots can only be called with \"\n \"elements in the phase diagram.\")\n\n critical_chempots = []\n for facet in self._pd.facets:\n chempots = self._get_facet_chempots(facet)\n critical_chempots.append(chempots[element])\n\n clean_pots = []\n for c in sorted(critical_chempots):\n if len(clean_pots) == 0:\n clean_pots.append(c)\n else:\n if abs(c - clean_pots[-1]) > PDAnalyzer.numerical_tol:\n clean_pots.append(c)\n clean_pots.reverse()\n return tuple(clean_pots)\n\n def get_critical_compositions(self, comp1, comp2):\n \"\"\"\n Get the critical compositions along the tieline between two\n compositions. I.e. where the decomposition products change.\n The endpoints are also returned.\n Args:\n comp1, comp2 (Composition): compositions that define the tieline\n Returns:\n [(Composition)]: list of critical compositions. All are of\n the form x * comp1 + (1-x) * comp2\n \"\"\"\n\n n1 = comp1.num_atoms\n n2 = comp2.num_atoms\n pd_els = self._pd.elements\n\n # the reduced dimensionality Simplexes don't use the\n # first element in the PD\n c1 = self._pd.pd_coords(comp1)\n c2 = self._pd.pd_coords(comp2)\n\n # none of the projections work if c1 == c2, so just return *copies*\n # of the inputs\n if np.all(c1 == c2):\n return[comp1.copy(), comp2.copy()]\n\n intersections = [c1, c2]\n for sc in self._pd.simplexes:\n intersections.extend(sc.line_intersection(c1, c2))\n intersections = np.array(intersections)\n\n # find position along line\n l = (c2 - c1)\n l /= np.sum(l ** 2) ** 0.5\n proj = np.dot(intersections - c1, l)\n\n # only take compositions between endpoints\n proj = proj[np.logical_and(proj > -self.numerical_tol,\n proj < proj[1] + self.numerical_tol)]\n proj.sort()\n\n # only unique compositions\n valid = np.ones(len(proj), dtype=np.bool)\n valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol\n proj = proj[valid]\n\n ints = c1 + l * proj[:, None]\n # reconstruct full-dimensional composition array\n cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T,\n ints], axis=-1)\n # mixing fraction when compositions are normalized\n x = proj / np.dot(c2 - c1, l)\n # mixing fraction when compositions are not normalized\n x_unnormalized = x * n1 / (n2 + x * (n1 - n2))\n num_atoms = n1 + (n2 - n1) * x_unnormalized\n cs *= num_atoms[:, None]\n return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]\n\n def get_element_profile(self, element, comp, comp_tol=1e-5):\n \"\"\"\n Provides the element evolution data for a composition.\n For example, can be used to analyze Li conversion voltages by varying\n uLi and looking at the phases formed. Also can be used to analyze O2\n evolution by varying uO2.\n\n Args:\n element: An element. Must be in the phase diagram.\n comp: A Composition\n comp_tol: The tolerance to use when calculating decompositions.\n Phases with amounts less than this tolerance are excluded.\n Defaults to 1e-5.\n\n Returns:\n Evolution data as a list of dictionaries of the following format:\n [ {'chempot': -10.487582010000001, 'evolution': -2.0,\n 'reaction': Reaction Object], ...]\n \"\"\"\n if element not in self._pd.elements:\n raise ValueError(\"get_transition_chempots can only be called with\"\n \" elements in the phase diagram.\")\n gccomp = Composition({el: amt for el, amt in comp.items()\n if el != element})\n elref = self._pd.el_refs[element]\n elcomp = Composition(element.symbol)\n evolution = []\n\n for cc in self.get_critical_compositions(elcomp, gccomp)[1:]:\n decomp_entries = self.get_decomposition(cc).keys()\n decomp = [k.composition for k in decomp_entries]\n rxn = Reaction([comp], decomp + [elcomp])\n rxn.normalize_to(comp)\n c = self.get_composition_chempots(cc + elcomp * 1e-5)[element]\n amt = -rxn.coeffs[rxn.all_comp.index(elcomp)]\n evolution.append({'chempot': c,\n 'evolution': amt,\n 'element_reference': elref,\n 'reaction': rxn, 'entries': decomp_entries})\n return evolution\n\n def get_chempot_range_map(self, elements, referenced=True, joggle=True):\n \"\"\"\n Returns a chemical potential range map for each stable entry.\n\n Args:\n elements: Sequence of elements to be considered as independent\n variables. E.g., if you want to show the stability ranges\n of all Li-Co-O phases wrt to uLi and uO, you will supply\n [Element(\"Li\"), Element(\"O\")]\n referenced: If True, gives the results with a reference being the\n energy of the elemental phase. If False, gives absolute values.\n joggle (boolean): Whether to joggle the input to avoid precision\n errors.\n\n Returns:\n Returns a dict of the form {entry: [simplices]}. The list of\n simplices are the sides of the N-1 dim polytope bounding the\n allowable chemical potential range of each entry.\n \"\"\"\n all_chempots = []\n pd = self._pd\n facets = pd.facets\n for facet in facets:\n chempots = self._get_facet_chempots(facet)\n all_chempots.append([chempots[el] for el in pd.elements])\n inds = [pd.elements.index(el) for el in elements]\n el_energies = {el: 0.0 for el in elements}\n if referenced:\n el_energies = {el: pd.el_refs[el].energy_per_atom\n for el in elements}\n chempot_ranges = collections.defaultdict(list)\n vertices = [list(range(len(self._pd.elements)))]\n if len(all_chempots) > len(self._pd.elements):\n vertices = get_facets(all_chempots, joggle=joggle)\n for ufacet in vertices:\n for combi in itertools.combinations(ufacet, 2):\n data1 = facets[combi[0]]\n data2 = facets[combi[1]]\n common_ent_ind = set(data1).intersection(set(data2))\n if len(common_ent_ind) == len(elements):\n common_entries = [pd.qhull_entries[i]\n for i in common_ent_ind]\n data = np.array([[all_chempots[i][j]\n - el_energies[pd.elements[j]]\n for j in inds] for i in combi])\n sim = Simplex(data)\n for entry in common_entries:\n chempot_ranges[entry].append(sim)\n\n return chempot_ranges\n\n def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=1e-2):\n \"\"\"\n returns a set of chemical potentials corresponding to the vertices of the simplex\n in the chemical potential phase diagram.\n The simplex is built using all elements in the target_composition except dep_elt.\n The chemical potential of dep_elt is computed from the target composition energy.\n This method is useful to get the limiting conditions for\n defects computations for instance.\n\n Args:\n target_comp: A Composition object\n dep_elt: the element for which the chemical potential is computed from the energy of\n the stable phase at the target composition\n tol_en: a tolerance on the energy to set\n\n Returns:\n [{Element:mu}]: An array of conditions on simplex vertices for\n which each element has a chemical potential set to a given\n value. \"absolute\" values (i.e., not referenced to element energies)\n \"\"\"\n muref = np.array([self._pd.el_refs[e].energy_per_atom\n for e in self._pd.elements if e != dep_elt])\n chempot_ranges = self.get_chempot_range_map(\n [e for e in self._pd.elements if e != dep_elt])\n\n for e in self._pd.elements:\n if not e in target_comp.elements:\n target_comp = target_comp + Composition({e: 0.0})\n coeff = [-target_comp[e] for e in self._pd.elements if e != dep_elt]\n for e in chempot_ranges.keys():\n if e.composition.reduced_composition == \\\n target_comp.reduced_composition:\n multiplicator = e.composition[dep_elt] / target_comp[dep_elt]\n ef = e.energy / multiplicator\n all_coords = []\n for s in chempot_ranges[e]:\n for v in s._coords:\n elts = [e for e in self._pd.elements if e != dep_elt]\n res = {}\n for i in range(len(elts)):\n res[elts[i]] = v[i] + muref[i]\n res[dep_elt]=(np.dot(v+muref, coeff)+ef)/target_comp[dep_elt]\n already_in = False\n for di in all_coords:\n dict_equals = True\n for k in di:\n if abs(di[k]-res[k]) > tol_en:\n dict_equals = False\n break\n if dict_equals:\n already_in = True\n break\n if not already_in:\n all_coords.append(res)\n return all_coords\n\n def get_chempot_range_stability_phase(self, target_comp, open_elt):\n \"\"\"\n returns a set of chemical potentials correspoding to the max and min\n chemical potential of the open element for a given composition. It is\n quite common to have for instance a ternary oxide (e.g., ABO3) for\n which you want to know what are the A and B chemical potential leading\n to the highest and lowest oxygen chemical potential (reducing and\n oxidizing conditions). This is useful for defect computations.\n\n Args:\n target_comp: A Composition object\n open_elt: Element that you want to constrain to be max or min\n\n Returns:\n {Element:(mu_min,mu_max)}: Chemical potentials are given in\n \"absolute\" values (i.e., not referenced to 0)\n \"\"\"\n muref = np.array([self._pd.el_refs[e].energy_per_atom\n for e in self._pd.elements if e != open_elt])\n chempot_ranges = self.get_chempot_range_map(\n [e for e in self._pd.elements if e != open_elt])\n for e in self._pd.elements:\n if not e in target_comp.elements:\n target_comp = target_comp + Composition({e: 0.0})\n coeff = [-target_comp[e] for e in self._pd.elements if e != open_elt]\n max_open = -float('inf')\n min_open = float('inf')\n max_mus = None\n min_mus = None\n for e in chempot_ranges.keys():\n if e.composition.reduced_composition == \\\n target_comp.reduced_composition:\n multiplicator = e.composition[open_elt] / target_comp[open_elt]\n ef = e.energy / multiplicator\n all_coords = []\n for s in chempot_ranges[e]:\n for v in s._coords:\n all_coords.append(v)\n if (np.dot(v + muref, coeff) + ef) / target_comp[\n open_elt] > max_open:\n max_open = (np.dot(v + muref, coeff) + ef) / \\\n target_comp[open_elt]\n max_mus = v\n if (np.dot(v + muref, coeff) + ef) / target_comp[\n open_elt] < min_open:\n min_open = (np.dot(v + muref, coeff) + ef) / \\\n target_comp[open_elt]\n min_mus = v\n elts = [e for e in self._pd.elements if e != open_elt]\n res = {}\n for i in range(len(elts)):\n res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i])\n res[open_elt] = (min_open, max_open)\n return res\n",
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals, print_function\nimport itertools\nimport logging\nfrom collections import defaultdict\n\nimport math\nfrom math import cos\nfrom math import sin\nfrom fractions import Fraction\n\nimport numpy as np\n\nfrom six.moves import filter, map, zip\nfrom monty.dev import deprecated\nimport spglib\n\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.symmetry.structure import SymmetrizedStructure\nfrom pymatgen.core.lattice import Lattice\nfrom pymatgen.core.structure import PeriodicSite\nfrom pymatgen.core.operations import SymmOp\nfrom pymatgen.util.coord_utils import find_in_coord_list, pbc_diff\n\n\"\"\"\nAn interface to the excellent spglib library by Atsushi Togo\n(http://spglib.sourceforge.net/) for pymatgen.\n\nv1.0 - Now works with both ordered and disordered structure.\nv2.0 - Updated for spglib 1.6.\nv3.0 - pymatgen no longer ships with spglib. Instead, spglib (the python\n version) is now a dependency and the SpacegroupAnalyzer merely serves\n as an interface to spglib for pymatgen Structures.\n\"\"\"\n\n__author__ = \"Shyue Ping Ong\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__version__ = \"3.0\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"[email protected]\"\n__date__ = \"May 14, 2016\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SpacegroupAnalyzer(object):\n \"\"\"\n Takes a pymatgen.core.structure.Structure object and a symprec.\n Uses pyspglib to perform various symmetry finding operations.\n\n Args:\n structure (Structure/IStructure): Structure to find symmetry\n symprec (float): Tolerance for symmetry finding. Defaults to 1e-3,\n which is fairly strict and works well for properly refined\n structures with atoms in the proper symmetry coordinates. For\n structures with slight deviations from their proper atomic\n positions (e.g., structures relaxed with electronic structure\n codes), a looser tolerance of 0.1 (the value used in Materials\n Project) is often needed.\n angle_tolerance (float): Angle tolerance for symmetry finding.\n \"\"\"\n\n def __init__(self, structure, symprec=1e-3, angle_tolerance=5):\n self._symprec = symprec\n self._angle_tol = angle_tolerance\n self._structure = structure\n latt = structure.lattice.matrix\n positions = structure.frac_coords\n unique_species = []\n zs = []\n magmoms = []\n\n for species, g in itertools.groupby(structure,\n key=lambda s: s.species_and_occu):\n if species in unique_species:\n ind = unique_species.index(species)\n zs.extend([ind + 1] * len(tuple(g)))\n else:\n unique_species.append(species)\n zs.extend([len(unique_species)] * len(tuple(g)))\n\n for site in structure:\n if hasattr(site, 'magmom'):\n magmoms.append(site.magmom)\n elif site.is_ordered and hasattr(site.specie, 'spin'):\n magmoms.append(site.specie.spin)\n else:\n magmoms.append(0)\n\n self._unique_species = unique_species\n self._numbers = zs\n # For now, we are setting magmom to zero.\n self._cell = latt, positions, zs, magmoms\n\n self._space_group_data = spglib.get_symmetry_dataset(\n self._cell, symprec=self._symprec, angle_tolerance=angle_tolerance)\n\n @deprecated(message=\"get_spacegroup has been renamed \"\n \"get_space_group_operations. Will be removed in \"\n \"pymatgen 5.0.\")\n def get_space_group(self):\n \"\"\"\n Get the SpacegroupOperations for the Structure.\n\n Returns:\n SpacgroupOperations object.\n \"\"\"\n return self.get_space_group_operations()\n\n @deprecated(message=\"get_spacegroup_symbol has been renamed \"\n \"get_space_group_symbol. Will be removed in \"\n \"pymatgen 5.0.\")\n def get_space_group_symbol(self):\n \"\"\"\n Get the spacegroup symbol (e.g., Pnma) for structure.\n\n Returns:\n (str): Spacegroup symbol for structure.\n \"\"\"\n return self._space_group_data[\"international\"]\n\n @deprecated(message=\"get_spacegroup_number has been renamed \"\n \"get_space_group_number. Will be removed in \"\n \"pymatgen 5.0.\")\n def get_space_group_number(self):\n \"\"\"\n Get the international spacegroup number (e.g., 62) for structure.\n\n Returns:\n (int): International spacegroup number for structure.\n \"\"\"\n return int(self._space_group_data[\"number\"])\n\n def get_space_group_operations(self):\n \"\"\"\n Get the SpacegroupOperations for the Structure.\n\n Returns:\n SpacgroupOperations object.\n \"\"\"\n return SpacegroupOperations(self.get_space_group_symbol(),\n self.get_space_group_number(),\n self.get_symmetry_operations())\n\n def get_space_group_symbol(self):\n \"\"\"\n Get the spacegroup symbol (e.g., Pnma) for structure.\n\n Returns:\n (str): Spacegroup symbol for structure.\n \"\"\"\n return self._space_group_data[\"international\"]\n\n def get_space_group_number(self):\n \"\"\"\n Get the international spacegroup number (e.g., 62) for structure.\n\n Returns:\n (int): International spacegroup number for structure.\n \"\"\"\n return int(self._space_group_data[\"number\"])\n\n def get_hall(self):\n \"\"\"\n Returns Hall symbol for structure.\n\n Returns:\n (str): Hall symbol\n \"\"\"\n return self._space_group_data[\"hall\"]\n\n @deprecated(message=\"get_point_group has been renamed \"\n \"get_point_group_symbol. Will be removed in \"\n \"pymatgen 5.0.\")\n def get_point_group(self):\n return self.get_point_group_symbol()\n\n def get_point_group_symbol(self):\n \"\"\"\n Get the point group associated with the structure.\n\n Returns:\n (Pointgroup): Point group for structure.\n \"\"\"\n rotations = self._space_group_data[\"rotations\"]\n # passing a 0-length rotations list to spglib can segfault\n if len(rotations) == 0:\n return '1'\n return spglib.get_pointgroup(rotations)[0].strip()\n\n def get_crystal_system(self):\n \"\"\"\n Get the crystal system for the structure, e.g., (triclinic,\n orthorhombic, cubic, etc.).\n\n Returns:\n (str): Crystal system for structure.\n \"\"\"\n n = self._space_group_data[\"number\"]\n\n f = lambda i, j: i <= n <= j\n cs = {\"triclinic\": (1, 2), \"monoclinic\": (3, 15),\n \"orthorhombic\": (16, 74), \"tetragonal\": (75, 142),\n \"trigonal\": (143, 167), \"hexagonal\": (168, 194),\n \"cubic\": (195, 230)}\n\n crystal_sytem = None\n\n for k, v in cs.items():\n if f(*v):\n crystal_sytem = k\n break\n return crystal_sytem\n\n def get_lattice_type(self):\n \"\"\"\n Get the lattice for the structure, e.g., (triclinic,\n orthorhombic, cubic, etc.).This is the same than the\n crystal system with the exception of the hexagonal/rhombohedral\n lattice\n\n Returns:\n (str): Lattice type for structure.\n \"\"\"\n n = self._space_group_data[\"number\"]\n system = self.get_crystal_system()\n if n in [146, 148, 155, 160, 161, 166, 167]:\n return \"rhombohedral\"\n elif system == \"trigonal\":\n return \"hexagonal\"\n else:\n return system\n\n def get_symmetry_dataset(self):\n \"\"\"\n Returns the symmetry dataset as a dict.\n\n Returns:\n (dict): With the following properties:\n number: International space group number\n international: International symbol\n hall: Hall symbol\n transformation_matrix: Transformation matrix from lattice of\n input cell to Bravais lattice L^bravais = L^original * Tmat\n origin shift: Origin shift in the setting of \"Bravais lattice\"\n rotations, translations: Rotation matrices and translation\n vectors. Space group operations are obtained by\n [(r,t) for r, t in zip(rotations, translations)]\n wyckoffs: Wyckoff letters\n \"\"\"\n return self._space_group_data\n\n def _get_symmetry(self):\n \"\"\"\n Get the symmetry operations associated with the structure.\n\n Returns:\n Symmetry operations as a tuple of two equal length sequences.\n (rotations, translations). \"rotations\" is the numpy integer array\n of the rotation matrices for scaled positions\n \"translations\" gives the numpy float64 array of the translation\n vectors in scaled positions.\n \"\"\"\n d = spglib.get_symmetry(self._cell, symprec=self._symprec,\n angle_tolerance=self._angle_tol)\n # Sometimes spglib returns small translation vectors, e.g.\n # [1e-4, 2e-4, 1e-4]\n # (these are in fractional coordinates, so should be small denominator\n # fractions)\n trans = []\n for t in d[\"translations\"]:\n trans.append([float(Fraction.from_float(c).limit_denominator(1000))\n for c in t])\n trans = np.array(trans)\n\n # fractional translations of 1 are more simply 0\n trans[np.abs(trans) == 1] = 0\n return d[\"rotations\"], trans\n\n def get_symmetry_operations(self, cartesian=False):\n \"\"\"\n Return symmetry operations as a list of SymmOp objects.\n By default returns fractional coord symmops.\n But cartesian can be returned too.\n\n Returns:\n ([SymmOp]): List of symmetry operations.\n \"\"\"\n rotation, translation = self._get_symmetry()\n symmops = []\n mat = self._structure.lattice.matrix.T\n invmat = np.linalg.inv(mat)\n for rot, trans in zip(rotation, translation):\n if cartesian:\n rot = np.dot(mat, np.dot(rot, invmat))\n trans = np.dot(trans, self._structure.lattice.matrix)\n op = SymmOp.from_rotation_and_translation(rot, trans)\n symmops.append(op)\n return symmops\n\n def get_point_group_operations(self, cartesian=False):\n \"\"\"\n Return symmetry operations as a list of SymmOp objects.\n By default returns fractional coord symmops.\n But cartesian can be returned too.\n\n Args:\n cartesian (bool): Whether to return SymmOps as cartesian or\n direct coordinate operations.\n\n Returns:\n ([SymmOp]): List of point group symmetry operations.\n \"\"\"\n rotation, translation = self._get_symmetry()\n symmops = []\n mat = self._structure.lattice.matrix.T\n invmat = np.linalg.inv(mat)\n for rot in rotation:\n if cartesian:\n rot = np.dot(mat, np.dot(rot, invmat))\n op = SymmOp.from_rotation_and_translation(rot, np.array([0, 0, 0]))\n symmops.append(op)\n return symmops\n\n def get_symmetrized_structure(self):\n \"\"\"\n Get a symmetrized structure. A symmetrized structure is one where the\n sites have been grouped into symmetrically equivalent groups.\n\n Returns:\n :class:`pymatgen.symmetry.structure.SymmetrizedStructure` object.\n \"\"\"\n ds = self.get_symmetry_dataset()\n sg = SpacegroupOperations(self.get_space_group_symbol(),\n self.get_space_group_number(),\n self.get_symmetry_operations())\n return SymmetrizedStructure(self._structure, sg,\n ds[\"equivalent_atoms\"],\n ds[\"wyckoffs\"])\n\n def get_refined_structure(self):\n \"\"\"\n Get the refined structure based on detected symmetry. The refined\n structure is a *conventional* cell setting with atoms moved to the\n expected symmetry positions.\n\n Returns:\n Refined structure.\n \"\"\"\n # Atomic positions have to be specified by scaled positions for spglib.\n lattice, scaled_positions, numbers \\\n = spglib.refine_cell(self._cell, self._symprec, self._angle_tol)\n\n species = [self._unique_species[i - 1] for i in numbers]\n s = Structure(lattice, species, scaled_positions)\n return s.get_sorted_structure()\n\n def find_primitive(self):\n \"\"\"\n Find a primitive version of the unit cell.\n\n Returns:\n A primitive cell in the input cell is searched and returned\n as an Structure object. If no primitive cell is found, None is\n returned.\n \"\"\"\n lattice, scaled_positions, numbers = spglib.find_primitive(\n self._cell, symprec=self._symprec)\n\n species = [self._unique_species[i - 1] for i in numbers]\n\n return Structure(lattice, species, scaled_positions,\n to_unit_cell=True).get_reduced_structure()\n\n def get_ir_reciprocal_mesh(self, mesh=(10, 10, 10), is_shift=(0, 0, 0)):\n \"\"\"\n k-point mesh of the Brillouin zone generated taken into account\n symmetry.The method returns the irreducible kpoints of the mesh\n and their weights\n\n Args:\n mesh (3x1 array): The number of kpoint for the mesh needed in\n each direction\n is_shift (3x1 array): Whether to shift the kpoint grid. (1, 1,\n 1) means all points are shifted by 0.5, 0.5, 0.5.\n\n Returns:\n A list of irreducible kpoints and their weights as a list of\n tuples [(ir_kpoint, weight)], with ir_kpoint given\n in fractional coordinates\n \"\"\"\n shift = np.array([1 if i else 0 for i in is_shift])\n mapping, grid = spglib.get_ir_reciprocal_mesh(\n np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)\n\n results = []\n tmp_map = list(mapping)\n for i in np.unique(mapping):\n results.append(((grid[i] + shift * (0.5, 0.5, 0.5)) / mesh,\n tmp_map.count(i)))\n return results\n\n def get_primitive_standard_structure(self, international_monoclinic=True):\n \"\"\"\n Gives a structure with a primitive cell according to certain standards\n the standards are defined in Setyawan, W., & Curtarolo, S. (2010).\n High-throughput electronic band structure calculations:\n Challenges and tools. Computational Materials Science,\n 49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010\n\n Returns:\n The structure in a primitive standardized cell\n \"\"\"\n conv = self.get_conventional_standard_structure(\n international_monoclinic=international_monoclinic)\n lattice = self.get_lattice_type()\n\n if \"P\" in self.get_space_group_symbol() or lattice == \"hexagonal\":\n return conv\n\n if lattice == \"rhombohedral\":\n # check if the conventional representation is hexagonal or\n # rhombohedral\n lengths, angles = conv.lattice.lengths_and_angles\n if abs(lengths[0]-lengths[2]) < 0.0001:\n transf = np.eye\n else:\n transf = np.array([[-1, 1, 1], [2, 1, 1], [-1, -2, 1]],\n dtype=np.float) / 3\n\n elif \"I\" in self.get_space_group_symbol():\n transf = np.array([[-1, 1, 1], [1, -1, 1], [1, 1, -1]],\n dtype=np.float) / 2\n elif \"F\" in self.get_space_group_symbol():\n transf = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]],\n dtype=np.float) / 2\n elif \"C\" in self.get_space_group_symbol():\n if self.get_crystal_system() == \"monoclinic\":\n transf = np.array([[1, 1, 0], [-1, 1, 0], [0, 0, 2]],\n dtype=np.float) / 2\n else:\n transf = np.array([[1, -1, 0], [1, 1, 0], [0, 0, 2]],\n dtype=np.float) / 2\n else:\n transf = np.eye(3)\n\n new_sites = []\n latt = Lattice(np.dot(transf, conv.lattice.matrix))\n for s in conv:\n new_s = PeriodicSite(\n s.specie, s.coords, latt,\n to_unit_cell=True, coords_are_cartesian=True,\n properties=s.properties)\n if not any(map(new_s.is_periodic_image, new_sites)):\n new_sites.append(new_s)\n\n if lattice == \"rhombohedral\":\n prim = Structure.from_sites(new_sites)\n lengths, angles = prim.lattice.lengths_and_angles\n a = lengths[0]\n alpha = math.pi * angles[0] / 180\n new_matrix = [\n [a * cos(alpha / 2), -a * sin(alpha / 2), 0],\n [a * cos(alpha / 2), a * sin(alpha / 2), 0],\n [a * cos(alpha) / cos(alpha / 2), 0,\n a * math.sqrt(1 - (cos(alpha) ** 2 / (cos(alpha / 2) ** 2)))]]\n new_sites = []\n latt = Lattice(new_matrix)\n for s in prim:\n new_s = PeriodicSite(\n s.specie, s.frac_coords, latt,\n to_unit_cell=True, properties=s.properties)\n if not any(map(new_s.is_periodic_image, new_sites)):\n new_sites.append(new_s)\n return Structure.from_sites(new_sites)\n\n return Structure.from_sites(new_sites)\n\n def get_conventional_standard_structure(\n self, international_monoclinic=True):\n \"\"\"\n Gives a structure with a conventional cell according to certain\n standards. The standards are defined in Setyawan, W., & Curtarolo,\n S. (2010). High-throughput electronic band structure calculations:\n Challenges and tools. Computational Materials Science,\n 49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010\n They basically enforce as much as possible\n norm(a1)<norm(a2)<norm(a3)\n\n Returns:\n The structure in a conventional standardized cell\n \"\"\"\n tol = 1e-5\n struct = self.get_refined_structure()\n latt = struct.lattice\n latt_type = self.get_lattice_type()\n sorted_lengths = sorted(latt.abc)\n sorted_dic = sorted([{'vec': latt.matrix[i],\n 'length': latt.abc[i],\n 'orig_index': i} for i in [0, 1, 2]],\n key=lambda k: k['length'])\n\n if latt_type in (\"orthorhombic\", \"cubic\"):\n # you want to keep the c axis where it is\n # to keep the C- settings\n transf = np.zeros(shape=(3, 3))\n if self.get_space_group_symbol().startswith(\"C\"):\n transf[2] = [0, 0, 1]\n a, b = sorted(latt.abc[:2])\n sorted_dic = sorted([{'vec': latt.matrix[i],\n 'length': latt.abc[i],\n 'orig_index': i} for i in [0, 1]],\n key=lambda k: k['length'])\n for i in range(2):\n transf[i][sorted_dic[i]['orig_index']] = 1\n c = latt.abc[2]\n else:\n for i in range(len(sorted_dic)):\n transf[i][sorted_dic[i]['orig_index']] = 1\n a, b, c = sorted_lengths\n latt = Lattice.orthorhombic(a, b, c)\n\n elif latt_type == \"tetragonal\":\n # find the \"a\" vectors\n # it is basically the vector repeated two times\n transf = np.zeros(shape=(3, 3))\n a, b, c = sorted_lengths\n for d in range(len(sorted_dic)):\n transf[d][sorted_dic[d]['orig_index']] = 1\n\n if abs(b - c) < tol:\n a, c = c, a\n transf = np.dot([[0, 0, 1], [0, 1, 0], [1, 0, 0]], transf)\n latt = Lattice.tetragonal(a, c)\n elif latt_type in (\"hexagonal\", \"rhombohedral\"):\n # for the conventional cell representation,\n # we allways show the rhombohedral lattices as hexagonal\n\n # check first if we have the refined structure shows a rhombohedral\n # cell\n # if so, make a supercell\n a, b, c = latt.abc\n if np.all(np.abs([a - b, c - b, a - c]) < 0.001):\n struct.make_supercell(((1, -1, 0), (0, 1, -1), (1, 1, 1)))\n a, b, c = sorted(struct.lattice.abc)\n\n if abs(b - c) < 0.001:\n a, c = c, a\n new_matrix = [[a / 2, -a * math.sqrt(3) / 2, 0],\n [a / 2, a * math.sqrt(3) / 2, 0],\n [0, 0, c]]\n latt = Lattice(new_matrix)\n transf = np.eye(3, 3)\n\n elif latt_type == \"monoclinic\":\n # You want to keep the c axis where it is to keep the C- settings\n\n if self.get_space_group_operations().int_symbol.startswith(\"C\"):\n transf = np.zeros(shape=(3, 3))\n transf[2] = [0, 0, 1]\n sorted_dic = sorted([{'vec': latt.matrix[i],\n 'length': latt.abc[i],\n 'orig_index': i} for i in [0, 1]],\n key=lambda k: k['length'])\n a = sorted_dic[0]['length']\n b = sorted_dic[1]['length']\n c = latt.abc[2]\n new_matrix = None\n for t in itertools.permutations(list(range(2)), 2):\n m = latt.matrix\n landang = Lattice(\n [m[t[0]], m[t[1]], m[2]]).lengths_and_angles\n if landang[1][0] > 90:\n # if the angle is > 90 we invert a and b to get\n # an angle < 90\n landang = Lattice(\n [-m[t[0]], -m[t[1]], m[2]]).lengths_and_angles\n transf = np.zeros(shape=(3, 3))\n transf[0][t[0]] = -1\n transf[1][t[1]] = -1\n transf[2][2] = 1\n a, b, c = landang[0]\n alpha = math.pi * landang[1][0] / 180\n new_matrix = [[a, 0, 0],\n [0, b, 0],\n [0, c * cos(alpha), c * sin(alpha)]]\n continue\n\n elif landang[1][0] < 90:\n transf = np.zeros(shape=(3, 3))\n transf[0][t[0]] = 1\n transf[1][t[1]] = 1\n transf[2][2] = 1\n a, b, c = landang[0]\n alpha = math.pi * landang[1][0] / 180\n new_matrix = [[a, 0, 0],\n [0, b, 0],\n [0, c * cos(alpha), c * sin(alpha)]]\n\n if new_matrix is None:\n # this if is to treat the case\n # where alpha==90 (but we still have a monoclinic sg\n new_matrix = [[a, 0, 0],\n [0, b, 0],\n [0, 0, c]]\n transf = np.zeros(shape=(3, 3))\n for c in range(len(sorted_dic)):\n transf[c][sorted_dic[c]['orig_index']] = 1\n #if not C-setting\n else:\n # try all permutations of the axis\n # keep the ones with the non-90 angle=alpha\n # and b<c\n new_matrix = None\n for t in itertools.permutations(list(range(3)), 3):\n m = latt.matrix\n landang = Lattice(\n [m[t[0]], m[t[1]], m[t[2]]]).lengths_and_angles\n if landang[1][0] > 90 and landang[0][1] < landang[0][2]:\n landang = Lattice(\n [-m[t[0]], -m[t[1]], m[t[2]]]).lengths_and_angles\n transf = np.zeros(shape=(3, 3))\n transf[0][t[0]] = -1\n transf[1][t[1]] = -1\n transf[2][t[2]] = 1\n a, b, c = landang[0]\n alpha = math.pi * landang[1][0] / 180\n new_matrix = [[a, 0, 0],\n [0, b, 0],\n [0, c * cos(alpha), c * sin(alpha)]]\n continue\n elif landang[1][0] < 90 and landang[0][1] < landang[0][2]:\n transf = np.zeros(shape=(3, 3))\n transf[0][t[0]] = 1\n transf[1][t[1]] = 1\n transf[2][t[2]] = 1\n a, b, c = landang[0]\n alpha = math.pi * landang[1][0] / 180\n new_matrix = [[a, 0, 0],\n [0, b, 0],\n [0, c * cos(alpha), c * sin(alpha)]]\n if new_matrix is None:\n # this if is to treat the case\n # where alpha==90 (but we still have a monoclinic sg\n new_matrix = [[sorted_lengths[0], 0, 0],\n [0, sorted_lengths[1], 0],\n [0, 0, sorted_lengths[2]]]\n transf = np.zeros(shape=(3, 3))\n for c in range(len(sorted_dic)):\n transf[c][sorted_dic[c]['orig_index']] = 1\n\n if international_monoclinic:\n # The above code makes alpha the non-right angle.\n # The following will convert to proper international convention\n # that beta is the non-right angle.\n op = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]\n transf = np.dot(op, transf)\n new_matrix = np.dot(op, new_matrix)\n beta = Lattice(new_matrix).beta\n if beta < 90:\n op = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]\n transf = np.dot(op, transf)\n new_matrix = np.dot(op, new_matrix)\n\n latt = Lattice(new_matrix)\n\n elif latt_type == \"triclinic\":\n #we use a LLL Minkowski-like reduction for the triclinic cells\n struct = struct.get_reduced_structure(\"LLL\")\n\n a, b, c = latt.lengths_and_angles[0]\n alpha, beta, gamma = [math.pi * i / 180\n for i in latt.lengths_and_angles[1]]\n new_matrix = None\n test_matrix = [[a, 0, 0],\n [b * cos(gamma), b * sin(gamma), 0.0],\n [c * cos(beta),\n c * (cos(alpha) - cos(beta) * cos(gamma)) /\n sin(gamma),\n c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2\n - cos(beta) ** 2\n + 2 * cos(alpha) * cos(beta)\n * cos(gamma)) / sin(gamma)]]\n\n def is_all_acute_or_obtuse(m):\n recp_angles = np.array(Lattice(m).reciprocal_lattice.angles)\n return np.all(recp_angles <= 90) or np.all(recp_angles > 90)\n\n if is_all_acute_or_obtuse(test_matrix):\n transf = np.eye(3)\n new_matrix = test_matrix\n\n test_matrix = [[-a, 0, 0],\n [b * cos(gamma), b * sin(gamma), 0.0],\n [-c * cos(beta),\n -c * (cos(alpha) - cos(beta) * cos(gamma)) /\n sin(gamma),\n -c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2\n - cos(beta) ** 2\n + 2 * cos(alpha) * cos(beta)\n * cos(gamma)) / sin(gamma)]]\n\n if is_all_acute_or_obtuse(test_matrix):\n transf = [[-1, 0, 0],\n [0, 1, 0],\n [0, 0, -1]]\n new_matrix = test_matrix\n\n test_matrix = [[-a, 0, 0],\n [-b * cos(gamma), -b * sin(gamma), 0.0],\n [c * cos(beta),\n c * (cos(alpha) - cos(beta) * cos(gamma)) /\n sin(gamma),\n c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2\n - cos(beta) ** 2\n + 2 * cos(alpha) * cos(beta)\n * cos(gamma)) / sin(gamma)]]\n\n if is_all_acute_or_obtuse(test_matrix):\n transf = [[-1, 0, 0],\n [0, -1, 0],\n [0, 0, 1]]\n new_matrix = test_matrix\n\n test_matrix = [[a, 0, 0],\n [-b * cos(gamma), -b * sin(gamma), 0.0],\n [-c * cos(beta),\n -c * (cos(alpha) - cos(beta) * cos(gamma)) /\n sin(gamma),\n -c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2\n - cos(beta) ** 2\n + 2 * cos(alpha) * cos(beta)\n * cos(gamma)) / sin(gamma)]]\n if is_all_acute_or_obtuse(test_matrix):\n transf = [[1, 0, 0],\n [0, -1, 0],\n [0, 0, -1]]\n new_matrix = test_matrix\n\n latt = Lattice(new_matrix)\n\n new_coords = np.dot(transf, np.transpose(struct.frac_coords)).T\n new_struct = Structure(latt, struct.species_and_occu, new_coords,\n site_properties=struct.site_properties,\n to_unit_cell=True)\n return new_struct.get_sorted_structure()\n\n def get_kpoint_weights(self, kpoints, atol=1e-5):\n \"\"\"\n Calculate the weights for a list of kpoints.\n\n Args:\n kpoints (Sequence): Sequence of kpoints. np.arrays is fine. Note\n that the code does not check that the list of kpoints\n provided does not contain duplicates.\n atol (float): Tolerance for fractional coordinates comparisons.\n\n Returns:\n List of weights, in the SAME order as kpoints.\n \"\"\"\n kpts = np.array(kpoints)\n shift = []\n mesh = []\n for i in range(3):\n nonzero = [i for i in kpts[:, i] if abs(i) > 1e-5]\n if len(nonzero) != len(kpts):\n # gamma centered\n if not nonzero:\n mesh.append(1)\n else:\n m = np.abs(np.round(1/np.array(nonzero)))\n mesh.append(int(max(m)))\n shift.append(0)\n else:\n # Monk\n m = np.abs(np.round(0.5/np.array(nonzero)))\n mesh.append(int(max(m)))\n shift.append(1)\n\n mapping, grid = spglib.get_ir_reciprocal_mesh(\n np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)\n mapping = list(mapping)\n grid = (np.array(grid) + np.array(shift) * (0.5, 0.5, 0.5)) / mesh\n weights = []\n mapped = defaultdict(int)\n for k in kpoints:\n for i, g in enumerate(grid):\n if np.allclose(pbc_diff(k, g), (0, 0, 0), atol=atol):\n mapped[tuple(g)] += 1\n weights.append(mapping.count(mapping[i]))\n break\n if (len(mapped) != len(set(mapping))) or (\n not all([v == 1 for v in mapped.values()])):\n raise ValueError(\"Unable to find 1:1 corresponding between input \"\n \"kpoints and irreducible grid!\")\n return [w/sum(weights) for w in weights]\n\n\nclass PointGroupAnalyzer(object):\n \"\"\"\n A class to analyze the point group of a molecule. The general outline of\n the algorithm is as follows:\n\n 1. Center the molecule around its center of mass.\n 2. Compute the inertia tensor and the eigenvalues and eigenvectors.\n 3. Handle the symmetry detection based on eigenvalues.\n\n a. Linear molecules have one zero eigenvalue. Possible symmetry\n operations are C*v or D*v\n b. Asymetric top molecules have all different eigenvalues. The\n maximum rotational symmetry in such molecules is 2\n c. Symmetric top molecules have 1 unique eigenvalue, which gives a\n unique rotation axis. All axial point groups are possible\n except the cubic groups (T & O) and I.\n d. Spherical top molecules have all three eigenvalues equal. They\n have the rare T, O or I point groups.\n\n .. attribute:: sch_symbol\n\n Schoenflies symbol of the detected point group.\n \"\"\"\n inversion_op = SymmOp.inversion()\n\n def __init__(self, mol, tolerance=0.3, eigen_tolerance=0.01,\n matrix_tol=0.1):\n \"\"\"\n The default settings are usually sufficient.\n\n Args:\n mol (Molecule): Molecule to determine point group for.\n tolerance (float): Distance tolerance to consider sites as\n symmetrically equivalent. Defaults to 0.3 Angstrom.\n eigen_tolerance (float): Tolerance to compare eigen values of\n the inertia tensor. Defaults to 0.01.\n matrix_tol (float): Tolerance used to generate the full set of\n symmetry operations of the point group.\n \"\"\"\n self.mol = mol\n self.centered_mol = mol.get_centered_molecule()\n self.tol = tolerance\n self.eig_tol = eigen_tolerance\n self.mat_tol = matrix_tol\n self._analyze()\n if self.sch_symbol in [\"C1v\", \"C1h\"]:\n self.sch_symbol = \"Cs\"\n\n def _analyze(self):\n if len(self.centered_mol) == 1:\n self.sch_symbol = \"Kh\"\n else:\n inertia_tensor = np.zeros((3, 3))\n total_inertia = 0\n for site in self.centered_mol:\n c = site.coords\n wt = site.species_and_occu.weight\n for i in range(3):\n inertia_tensor[i, i] += wt * (c[(i + 1) % 3] ** 2\n + c[(i + 2) % 3] ** 2)\n for i, j in [(0, 1), (1, 2), (0, 2)]:\n inertia_tensor[i, j] += -wt * c[i] * c[j]\n inertia_tensor[j, i] += -wt * c[j] * c[i]\n total_inertia += wt * np.dot(c, c)\n\n # Normalize the inertia tensor so that it does not scale with size\n # of the system. This mitigates the problem of choosing a proper\n # comparison tolerance for the eigenvalues.\n inertia_tensor /= total_inertia\n eigvals, eigvecs = np.linalg.eig(inertia_tensor)\n self.principal_axes = eigvecs.T\n self.eigvals = eigvals\n v1, v2, v3 = eigvals\n eig_zero = abs(v1 * v2 * v3) < self.eig_tol ** 3\n eig_all_same = abs(v1 - v2) < self.eig_tol and abs(\n v1 - v3) < self.eig_tol\n eig_all_diff = abs(v1 - v2) > self.eig_tol and abs(\n v1 - v3) > self.eig_tol and abs(v2 - v3) > self.eig_tol\n\n self.rot_sym = []\n self.symmops = [SymmOp(np.eye(4))]\n if eig_zero:\n logger.debug(\"Linear molecule detected\")\n self._proc_linear()\n elif eig_all_same:\n logger.debug(\"Spherical top molecule detected\")\n self._proc_sph_top()\n elif eig_all_diff:\n logger.debug(\"Asymmetric top molecule detected\")\n self._proc_asym_top()\n else:\n logger.debug(\"Symmetric top molecule detected\")\n self._proc_sym_top()\n\n def _proc_linear(self):\n if self.is_valid_op(PointGroupAnalyzer.inversion_op):\n self.sch_symbol = \"D*h\"\n self.symmops.append(PointGroupAnalyzer.inversion_op)\n else:\n self.sch_symbol = \"C*v\"\n\n def _proc_asym_top(self):\n \"\"\"\n Handles assymetric top molecules, which cannot contain rotational\n symmetry larger than 2.\n \"\"\"\n self._check_R2_axes_asym()\n if len(self.rot_sym) == 0:\n logger.debug(\"No rotation symmetries detected.\")\n self._proc_no_rot_sym()\n elif len(self.rot_sym) == 3:\n logger.debug(\"Dihedral group detected.\")\n self._proc_dihedral()\n else:\n logger.debug(\"Cyclic group detected.\")\n self._proc_cyclic()\n\n def _proc_sym_top(self):\n \"\"\"\n Handles symetric top molecules which has one unique eigenvalue whose\n corresponding principal axis is a unique rotational axis. More complex\n handling required to look for R2 axes perpendicular to this unique\n axis.\n \"\"\"\n if abs(self.eigvals[0] - self.eigvals[1]) < self.eig_tol:\n ind = 2\n elif abs(self.eigvals[1] - self.eigvals[2]) < self.eig_tol:\n ind = 0\n else:\n ind = 1\n logger.debug(\"Eigenvalues = %s.\" % self.eigvals)\n unique_axis = self.principal_axes[ind]\n self._check_rot_sym(unique_axis)\n logger.debug(\"Rotation symmetries = %s\" % self.rot_sym)\n if len(self.rot_sym) > 0:\n self._check_perpendicular_r2_axis(unique_axis)\n\n if len(self.rot_sym) >= 2:\n self._proc_dihedral()\n elif len(self.rot_sym) == 1:\n self._proc_cyclic()\n else:\n self._proc_no_rot_sym()\n\n def _proc_no_rot_sym(self):\n \"\"\"\n Handles molecules with no rotational symmetry. Only possible point\n groups are C1, Cs and Ci.\n \"\"\"\n self.sch_symbol = \"C1\"\n if self.is_valid_op(PointGroupAnalyzer.inversion_op):\n self.sch_symbol = \"Ci\"\n self.symmops.append(PointGroupAnalyzer.inversion_op)\n else:\n for v in self.principal_axes:\n mirror_type = self._find_mirror(v)\n if not mirror_type == \"\":\n self.sch_symbol = \"Cs\"\n break\n\n def _proc_cyclic(self):\n \"\"\"\n Handles cyclic group molecules.\n \"\"\"\n main_axis, rot = max(self.rot_sym, key=lambda v: v[1])\n self.sch_symbol = \"C{}\".format(rot)\n mirror_type = self._find_mirror(main_axis)\n if mirror_type == \"h\":\n self.sch_symbol += \"h\"\n elif mirror_type == \"v\":\n self.sch_symbol += \"v\"\n elif mirror_type == \"\":\n if self.is_valid_op(SymmOp.rotoreflection(main_axis,\n angle=180 / rot)):\n self.sch_symbol = \"S{}\".format(2 * rot)\n\n def _proc_dihedral(self):\n \"\"\"\n Handles dihedral group molecules, i.e those with intersecting R2 axes\n and a main axis.\n \"\"\"\n main_axis, rot = max(self.rot_sym, key=lambda v: v[1])\n self.sch_symbol = \"D{}\".format(rot)\n mirror_type = self._find_mirror(main_axis)\n if mirror_type == \"h\":\n self.sch_symbol += \"h\"\n elif not mirror_type == \"\":\n self.sch_symbol += \"d\"\n\n def _check_R2_axes_asym(self):\n \"\"\"\n Test for 2-fold rotation along the principal axes. Used to handle\n asymetric top molecules.\n \"\"\"\n for v in self.principal_axes:\n op = SymmOp.from_axis_angle_and_translation(v, 180)\n if self.is_valid_op(op):\n self.symmops.append(op)\n self.rot_sym.append((v, 2))\n\n def _find_mirror(self, axis):\n \"\"\"\n Looks for mirror symmetry of specified type about axis. Possible\n types are \"h\" or \"vd\". Horizontal (h) mirrors are perpendicular to\n the axis while vertical (v) or diagonal (d) mirrors are parallel. v\n mirrors has atoms lying on the mirror plane while d mirrors do\n not.\n \"\"\"\n mirror_type = \"\"\n\n # First test whether the axis itself is the normal to a mirror plane.\n if self.is_valid_op(SymmOp.reflection(axis)):\n self.symmops.append(SymmOp.reflection(axis))\n mirror_type = \"h\"\n else:\n # Iterate through all pairs of atoms to find mirror\n for s1, s2 in itertools.combinations(self.centered_mol, 2):\n if s1.species_and_occu == s2.species_and_occu:\n normal = s1.coords - s2.coords\n if np.dot(normal, axis) < self.tol:\n op = SymmOp.reflection(normal)\n if self.is_valid_op(op):\n self.symmops.append(op)\n if len(self.rot_sym) > 1:\n mirror_type = \"d\"\n for v, r in self.rot_sym:\n if not np.linalg.norm(v - axis) < self.tol:\n if np.dot(v, normal) < self.tol:\n mirror_type = \"v\"\n break\n else:\n mirror_type = \"v\"\n break\n\n return mirror_type\n\n def _get_smallest_set_not_on_axis(self, axis):\n \"\"\"\n Returns the smallest list of atoms with the same species and\n distance from origin AND does not lie on the specified axis. This\n maximal set limits the possible rotational symmetry operations,\n since atoms lying on a test axis is irrelevant in testing rotational\n symmetryOperations.\n \"\"\"\n\n def not_on_axis(site):\n v = np.cross(site.coords, axis)\n return np.linalg.norm(v) > self.tol\n\n valid_sets = []\n origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)\n for test_set in dist_el_sites.values():\n valid_set = list(filter(not_on_axis, test_set))\n if len(valid_set) > 0:\n valid_sets.append(valid_set)\n\n return min(valid_sets, key=lambda s: len(s))\n\n def _check_rot_sym(self, axis):\n \"\"\"\n Determines the rotational symmetry about supplied axis. Used only for\n symmetric top molecules which has possible rotational symmetry\n operations > 2.\n \"\"\"\n min_set = self._get_smallest_set_not_on_axis(axis)\n max_sym = len(min_set)\n for i in range(max_sym, 0, -1):\n if max_sym % i != 0:\n continue\n op = SymmOp.from_axis_angle_and_translation(axis, 360 / i)\n rotvalid = self.is_valid_op(op)\n if rotvalid:\n self.symmops.append(op)\n self.rot_sym.append((axis, i))\n return i\n return 1\n\n def _check_perpendicular_r2_axis(self, axis):\n \"\"\"\n Checks for R2 axes perpendicular to unique axis. For handling\n symmetric top molecules.\n \"\"\"\n min_set = self._get_smallest_set_not_on_axis(axis)\n for s1, s2 in itertools.combinations(min_set, 2):\n test_axis = np.cross(s1.coords - s2.coords, axis)\n if np.linalg.norm(test_axis) > self.tol:\n op = SymmOp.from_axis_angle_and_translation(test_axis, 180)\n r2present = self.is_valid_op(op)\n if r2present:\n self.symmops.append(op)\n self.rot_sym.append((test_axis, 2))\n return True\n\n def _proc_sph_top(self):\n \"\"\"\n Handles Sperhical Top Molecules, which belongs to the T, O or I point\n groups.\n \"\"\"\n self._find_spherical_axes()\n if len(self.rot_sym) == 0:\n logger.debug(\"Accidental speherical top!\")\n self._proc_sym_top()\n main_axis, rot = max(self.rot_sym, key=lambda v: v[1])\n if rot < 3:\n logger.debug(\"Accidental speherical top!\")\n self._proc_sym_top()\n elif rot == 3:\n mirror_type = self._find_mirror(main_axis)\n if mirror_type != \"\":\n if self.is_valid_op(PointGroupAnalyzer.inversion_op):\n self.symmops.append(PointGroupAnalyzer.inversion_op)\n self.sch_symbol = \"Th\"\n else:\n self.sch_symbol = \"Td\"\n else:\n self.sch_symbol = \"T\"\n elif rot == 4:\n if self.is_valid_op(PointGroupAnalyzer.inversion_op):\n self.symmops.append(PointGroupAnalyzer.inversion_op)\n self.sch_symbol = \"Oh\"\n else:\n self.sch_symbol = \"O\"\n elif rot == 5:\n if self.is_valid_op(PointGroupAnalyzer.inversion_op):\n self.symmops.append(PointGroupAnalyzer.inversion_op)\n self.sch_symbol = \"Ih\"\n else:\n self.sch_symbol = \"I\"\n\n def _find_spherical_axes(self):\n \"\"\"\n Looks for R5, R4, R3 and R2 axes in spherical top molecules. Point\n group T molecules have only one unique 3-fold and one unique 2-fold\n axis. O molecules have one unique 4, 3 and 2-fold axes. I molecules\n have a unique 5-fold axis.\n \"\"\"\n rot_present = defaultdict(bool)\n origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)\n test_set = min(dist_el_sites.values(), key=lambda s: len(s))\n coords = [s.coords for s in test_set]\n for c1, c2, c3 in itertools.combinations(coords, 3):\n for cc1, cc2 in itertools.combinations([c1, c2, c3], 2):\n if not rot_present[2]:\n test_axis = cc1 + cc2\n if np.linalg.norm(test_axis) > self.tol:\n op = SymmOp.from_axis_angle_and_translation(test_axis,\n 180)\n rot_present[2] = self.is_valid_op(op)\n if rot_present[2]:\n self.symmops.append(op)\n self.rot_sym.append((test_axis, 2))\n\n test_axis = np.cross(c2 - c1, c3 - c1)\n if np.linalg.norm(test_axis) > self.tol:\n for r in (3, 4, 5):\n if not rot_present[r]:\n op = SymmOp.from_axis_angle_and_translation(\n test_axis, 360 / r)\n rot_present[r] = self.is_valid_op(op)\n if rot_present[r]:\n self.symmops.append(op)\n self.rot_sym.append((test_axis, r))\n break\n if rot_present[2] and rot_present[3] and (\n rot_present[4] or rot_present[5]):\n break\n\n def get_pointgroup(self):\n \"\"\"\n Returns a PointGroup object for the molecule.\n \"\"\"\n return PointGroupOperations(self.sch_symbol, self.symmops, self.mat_tol)\n\n def is_valid_op(self, symmop):\n \"\"\"\n Check if a particular symmetry operation is a valid symmetry operation\n for a molecule, i.e., the operation maps all atoms to another\n equivalent atom.\n\n Args:\n symmop (SymmOp): Symmetry operation to test.\n\n Returns:\n (bool): Whether SymmOp is valid for Molecule.\n \"\"\"\n coords = self.centered_mol.cart_coords\n for site in self.centered_mol:\n coord = symmop.operate(site.coords)\n ind = find_in_coord_list(coords, coord, self.tol)\n if not (len(ind) == 1 and self.centered_mol[ind[0]].species_and_occu == site.species_and_occu):\n return False\n return True\n\n\ndef cluster_sites(mol, tol):\n \"\"\"\n Cluster sites based on distance and species type.\n\n Args:\n mol (Molecule): Molecule **with origin at center of mass**.\n tol (float): Tolerance to use.\n\n Returns:\n (origin_site, clustered_sites): origin_site is a site at the center\n of mass (None if there are no origin atoms). clustered_sites is a\n dict of {(avg_dist, species_and_occu): [list of sites]}\n \"\"\"\n # Cluster works for dim > 2 data. We just add a dummy 0 for second\n # coordinate.\n dists = [[np.linalg.norm(site.coords), 0] for site in mol]\n import scipy.cluster as spcluster\n f = spcluster.hierarchy.fclusterdata(dists, tol, criterion='distance')\n clustered_dists = defaultdict(list)\n for i, site in enumerate(mol):\n clustered_dists[f[i]].append(dists[i])\n avg_dist = {label: np.mean(val) for label, val in clustered_dists.items()}\n clustered_sites = defaultdict(list)\n origin_site = None\n for i, site in enumerate(mol):\n if avg_dist[f[i]] < tol:\n origin_site = site\n else:\n clustered_sites[(avg_dist[f[i]],\n site.species_and_occu)].append(site)\n return origin_site, clustered_sites\n\n\ndef generate_full_symmops(symmops, tol, max_recursion_depth=300):\n \"\"\"\n Recursive algorithm to permute through all possible combinations of the\n initially supplied symmetry operations to arrive at a complete set of\n operations mapping a single atom to all other equivalent atoms in the\n point group. This assumes that the initial number already uniquely\n identifies all operations.\n\n Args:\n symmops ([SymmOp]): Initial set of symmetry operations.\n\n Returns:\n Full set of symmetry operations.\n \"\"\"\n\n a = [o.affine_matrix for o in symmops]\n\n if len(symmops) > max_recursion_depth:\n logger.debug(\"Generation of symmetry operations in infinite loop. \" +\n \"Possible error in initial operations or tolerance too \"\n \"low.\")\n else:\n for op1, op2 in itertools.product(symmops, symmops):\n m = np.dot(op1.affine_matrix, op2.affine_matrix)\n d = np.abs(a - m) < tol\n if not np.any(np.all(np.all(d, axis=2), axis=1)):\n return generate_full_symmops(symmops + [SymmOp(m)], tol, \n max_recursion_depth)\n\n return symmops\n\n\nclass SpacegroupOperations(list):\n \"\"\"\n Represents a space group, which is a collection of symmetry operations.\n\n Args:\n int_symbol (str): International symbol of the spacegroup.\n int_number (int): International number of the spacegroup.\n symmops ([SymmOp]): Symmetry operations associated with the\n spacegroup.\n \"\"\"\n\n def __init__(self, int_symbol, int_number, symmops):\n self.int_symbol = int_symbol\n self.int_number = int_number\n super(SpacegroupOperations, self).__init__(symmops)\n\n def are_symmetrically_equivalent(self, sites1, sites2, symm_prec=1e-3):\n \"\"\"\n Given two sets of PeriodicSites, test if they are actually\n symmetrically equivalent under this space group. Useful, for example,\n if you want to test if selecting atoms 1 and 2 out of a set of 4 atoms\n are symmetrically the same as selecting atoms 3 and 4, etc.\n\n One use is in PartialRemoveSpecie transformation to return only\n symmetrically distinct arrangements of atoms.\n\n Args:\n sites1 ([Site]): 1st set of sites\n sites2 ([Site]): 2nd set of sites\n symm_prec (float): Tolerance in atomic distance to test if atoms\n are symmetrically similar.\n\n Returns:\n (bool): Whether the two sets of sites are symmetrically\n equivalent.\n \"\"\"\n def in_sites(site):\n for test_site in sites1:\n if test_site.is_periodic_image(site, symm_prec, False):\n return True\n return False\n\n for op in self:\n newsites2 = [PeriodicSite(site.species_and_occu,\n op.operate(site.frac_coords),\n site.lattice) for site in sites2]\n for site in newsites2:\n if not in_sites(site):\n break\n else:\n return True\n return False\n\n def __str__(self):\n return \"{} ({}) spacegroup\".format(self.int_symbol, self.int_number)\n\n\nclass PointGroupOperations(list):\n \"\"\"\n Defines a point group, which is essentially a sequence of symmetry\n operations.\n\n Args:\n sch_symbol (str): Schoenflies symbol of the point group.\n operations ([SymmOp]): Initial set of symmetry operations. It is\n sufficient to provide only just enough operations to generate\n the full set of symmetries.\n tol (float): Tolerance to generate the full set of symmetry\n operations.\n\n .. attribute:: sch_symbol\n\n Schoenflies symbol of the point group.\n \"\"\"\n def __init__(self, sch_symbol, operations, tol=0.1):\n self.sch_symbol = sch_symbol\n super(PointGroupOperations, self).__init__(\n generate_full_symmops(operations, tol))\n\n def __str__(self):\n return self.sch_symbol\n\n def __repr__(self):\n return self.__str__()\n\n",
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals\nimport numpy as np\n\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.util.coord_utils import get_linear_interpolated_value\nfrom monty.json import MSONable\n\n\"\"\"\nThis module defines classes to represent the phonon density of states, etc.\n\"\"\"\n\n\nclass PhononDos(MSONable):\n \"\"\"\n Basic DOS object. All other DOS objects are extended versions of this\n object.\n\n Args:\n frequencies: A sequences of frequencies in THz\n densities: A list representing the density of states.\n \"\"\"\n\n def __init__(self, frequencies, densities):\n self.frequencies = np.array(frequencies)\n self.densities = np.array(densities)\n\n def get_smeared_densities(self, sigma):\n \"\"\"\n Returns the densities, but with a Gaussian smearing of\n std dev sigma applied.\n\n Args:\n sigma: Std dev of Gaussian smearing function.\n\n Returns:\n Gaussian-smeared densities.\n \"\"\"\n\n from scipy.ndimage.filters import gaussian_filter1d\n diff = [self.frequencies[i + 1] - self.frequencies[i]\n for i in range(len(self.frequencies) - 1)]\n avgdiff = sum(diff) / len(diff)\n\n smeared_dens = gaussian_filter1d(self.densities, sigma / avgdiff)\n return smeared_dens\n\n def __add__(self, other):\n \"\"\"\n Adds two DOS together. Checks that frequency scales are the same.\n Otherwise, a ValueError is thrown.\n\n Args:\n other: Another DOS object.\n\n Returns:\n Sum of the two DOSs.\n \"\"\"\n if not all(np.equal(self.frequencies, other.frequencies)):\n raise ValueError(\"Frequencies of both DOS are not compatible!\")\n densities = self.frequencies + other.frequencies\n return PhononDos(self.frequencies, densities)\n\n def __radd__(self, other):\n \"\"\"\n Reflected addition of two DOS objects\n\n Args:\n other: Another DOS object.\n\n Returns:\n Sum of the two DOSs.\n \"\"\"\n\n return self.__add__(other)\n\n def get_interpolated_value(self, frequency):\n \"\"\"\n Returns interpolated density for a particular frequency.\n\n Args:\n frequency: frequency to return the density for.\n \"\"\"\n return get_linear_interpolated_value(self.frequencies,\n self.densities, frequency)\n\n def __str__(self):\n \"\"\"\n Returns a string which can be easily plotted (using gnuplot).\n \"\"\"\n stringarray = [\"#{:30s} {:30s}\".format(\"Frequency\", \"Density\")]\n for i, frequency in enumerate(self.frequencies):\n stringarray.append(\"{:.5f} {:.5f}\"\n .format(frequency, self.densities[i]))\n return \"\\n\".join(stringarray)\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Returns PhononDos object from dict representation of PhononDos.\n \"\"\"\n return cls(d[\"frequencies\"], d[\"densities\"])\n\n def as_dict(self):\n \"\"\"\n Json-serializable dict representation of PhononDos.\n \"\"\"\n return {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"frequencies\": list(self.frequencies),\n \"densities\": list(self.densities)}\n\n\nclass CompletePhononDos(PhononDos):\n \"\"\"\n This wrapper class defines a total dos, and also provides a list of PDos.\n\n Args:\n structure: Structure associated with this particular DOS.\n total_dos: total Dos for structure\n pdoss: The pdoss are supplied as an {Site: Densities}\n\n .. attribute:: pdos\n\n Dict of partial densities of the form {Site:Densities}\n \"\"\"\n def __init__(self, structure, total_dos, pdoss):\n super(CompletePhononDos, self).__init__(\n frequencies=total_dos.frequencies, densities=total_dos.densities)\n self.pdos = pdoss\n self.structure = structure\n\n def get_site_dos(self, site):\n \"\"\"\n Get the Dos for a site.\n\n Args:\n site: Site in Structure associated with CompletePhononDos.\n\n Returns:\n PhononDos containing summed orbital densities for site.\n \"\"\"\n return PhononDos(self.frequencies, self.pdos[site])\n\n def get_element_dos(self):\n \"\"\"\n Get element projected Dos.\n\n Returns:\n dict of {Element: Dos}\n \"\"\"\n\n el_dos = {}\n for site, atom_dos in self.pdos.items():\n el = site.specie\n if el not in el_dos:\n el_dos[el] = atom_dos\n else:\n el_dos[el] += atom_dos\n return {el: PhononDos(self.frequencies, densities)\n for el, densities in el_dos.items()}\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Returns CompleteDos object from dict representation.\n \"\"\"\n tdos = PhononDos.from_dict(d)\n struct = Structure.from_dict(d[\"structure\"])\n pdoss = {}\n for at, pdos in zip(struct, d[\"pdos\"]):\n pdoss[at] = pdos\n\n return cls(struct, tdos, pdoss)\n\n def as_dict(self):\n \"\"\"\n Json-serializable dict representation of CompletePhononDos.\n \"\"\"\n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"structure\": self.structure.as_dict(),\n \"frequencies\": list(self.frequencies),\n \"densities\": list(self.densities),\n \"pdos\": []}\n if len(self.pdos) > 0:\n for at in self.structure:\n d[\"pdos\"].append(list(self.pdos[at]))\n return d\n\n def __str__(self):\n return \"Complete phonon DOS for \" + str(self.structure)\n"
] | [
[
"numpy.dot",
"scipy.spatial.distance.correlation",
"numpy.abs",
"numpy.reshape",
"numpy.eye",
"numpy.sort",
"numpy.genfromtxt",
"numpy.all",
"numpy.loadtxt",
"numpy.linalg.eigh",
"numpy.trim_zeros",
"numpy.array",
"numpy.vstack"
],
[
"numpy.dot",
"numpy.linalg.solve",
"numpy.logical_and",
"numpy.all",
"numpy.array",
"numpy.sum"
],
[
"numpy.dot",
"numpy.abs",
"numpy.unique",
"numpy.linalg.inv",
"numpy.linalg.eig",
"numpy.eye",
"numpy.linalg.norm",
"numpy.transpose",
"numpy.all",
"numpy.mean",
"numpy.cross",
"numpy.array",
"scipy.cluster.hierarchy.fclusterdata",
"numpy.zeros"
],
[
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.array",
"numpy.equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
petrapoklukar/DCA | [
"e5b3f3481433306a4b33e712272f8bbf5e9d05ce"
] | [
"dca/visualization.py"
] | [
"from dca.DCA import DelaunayGraph\nfrom dca.schemes import DelaunayGraphVisualizer\nimport numpy as np\nimport os\nimport matplotlib as mpl\n\nif not \"DISPLAY\" in os.environ:\n print(\"no display found. Using non-interactive Agg backend\")\n mpl.use(\"Agg\")\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom matplotlib.collections import LineCollection\nfrom typing import Optional\n\n\n# -------------------------------------------------------------------------- #\n# Matplotlib settings\n# -------------------------------------------------------------------------- #\nSMALL_SIZE = 12\nMEDIUM_SIZE = 15\n\nplt.rc(\"font\", size=SMALL_SIZE) # controls default text sizes\nplt.rc(\"axes\", titlesize=SMALL_SIZE) # fontsize of the axes title\nplt.rc(\"axes\", labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\nplt.rc(\"xtick\", labelsize=MEDIUM_SIZE) # fontsize of the tick labels\nplt.rc(\"ytick\", labelsize=MEDIUM_SIZE) # fontsize of the tick labels\nplt.rc(\"legend\", fontsize=MEDIUM_SIZE) # legend fontsize\nplt.rc(\"figure\", titlesize=SMALL_SIZE) # fontsize of the figure title\n\nR_color, E_color = \"C0\", \"C1\"\n\n# -------------------------------------------------------------------------- #\n# DelaunayGeomCA: visualization\n# -------------------------------------------------------------------------- #\ndef get_color(edge, num_R):\n \"\"\"\n Gets the color of the edge.\n :param edge: edge given as a list of two indices.\n :param num_R: number of R points in the graph.\n :return: color of the edge and its zorder\n \"\"\"\n R_color, E_color = \"C0\", \"C1\"\n edge = sorted(edge)\n if edge[0] < num_R:\n if edge[1] >= num_R:\n comp_color = \"gray\"\n zorder = 10\n else:\n comp_color = R_color\n zorder = 5\n else:\n comp_color = E_color\n zorder = 5\n return comp_color, zorder\n\n\ndef _plot_Delaunay_graph(\n G_visualizer: DelaunayGraphVisualizer,\n edges: np.ndarray,\n filename: str,\n root: str,\n vertices: Optional[np.ndarray] = None,\n labels: Optional[np.ndarray] = None,\n figsize: tuple = (5, 5),\n keep_range: bool = False,\n):\n \"\"\"\n Plots a Delaunay graph.\n :param G_visualizer: DelaunayGraphVisualizer object.\n :param edges: array of edges to plot.\n :param filename: filename of the image.\n :param root: root directory of the experiment.\n :param vertices: array of vertices to plot.\n :param labels: array of vertex labels.\n :param figsize: size of the figure.\n :param keep_range: whether to remember current xlim and ylim.\n :return: xlim and ylim if keep_range else None\n \"\"\"\n input_data = G_visualizer.get_input_array_data()\n Rplot_kwds = {\"alpha\": 0.7, \"s\": 50, \"linewidths\": 0}\n Eplot_kwds = {\"alpha\": 0.7, \"s\": 50, \"linewidths\": 0, \"marker\": \"X\"}\n Rvertices, Evertices = (\n input_data[: G_visualizer.num_R],\n input_data[G_visualizer.num_R :],\n )\n\n if vertices is not None:\n Rcolors = np.empty(shape=G_visualizer.num_R).astype(str)\n Rcolors[vertices[vertices < G_visualizer.num_R].astype(int)] = R_color\n Rcolors[\n np.setdiff1d(\n np.arange(G_visualizer.num_R), vertices[vertices < G_visualizer.num_R]\n )\n ] = \"gray\"\n\n Ecolors = np.empty(shape=G_visualizer.num_E).astype(str)\n Ecolors[\n vertices[vertices >= G_visualizer.num_R].astype(int) - G_visualizer.num_R\n ] = E_color\n Ecolors[\n np.setdiff1d(\n np.arange(G_visualizer.num_E).astype(int),\n vertices[vertices >= G_visualizer.num_R].astype(int)\n - G_visualizer.num_R,\n )\n ] = \"gray\"\n\n if labels is not None:\n labels = labels[vertices]\n else:\n Rcolors = np.repeat(R_color, G_visualizer.num_R).astype(str)\n Ecolors = np.repeat(E_color, G_visualizer.num_E).astype(str)\n\n plt.figure(figsize=figsize)\n plt.clf()\n # Plot vertices\n if labels is not None:\n plt.scatter(\n Rvertices.T[0], Rvertices.T[1], c=labels[: G_visualizer.num_R], **Rplot_kwds\n )\n plt.scatter(\n Evertices.T[0], Evertices.T[1], c=labels[G_visualizer.num_R :], **Eplot_kwds\n )\n\n else:\n plt.scatter(Rvertices.T[0], Rvertices.T[1], color=Rcolors, **Rplot_kwds)\n plt.scatter(Evertices.T[0], Evertices.T[1], color=Ecolors, **Eplot_kwds)\n\n # Plot edges\n # draw edges of correct color\n for e in edges:\n e0, e1 = int(e[0]), int(e[1])\n start = (\n Rvertices[e0]\n if e0 < G_visualizer.num_R\n else Evertices[e0 - G_visualizer.num_R]\n )\n end = (\n Rvertices[e1]\n if e1 < G_visualizer.num_R\n else Evertices[e1 - G_visualizer.num_R]\n )\n color, zorder = get_color(e, G_visualizer.num_R)\n plt.plot(\n [start[0], end[0]],\n [start[1], end[1]],\n \"-\",\n linewidth=1.0,\n color=color,\n zorder=zorder,\n )\n plt.axis(\"off\")\n plt.tight_layout()\n save_path = os.path.join(root, filename)\n if keep_range:\n assert G_visualizer.xlim is not None and G_visualizer.ylim is not None\n plt.xlim(*G_visualizer.xlim)\n plt.ylim(*G_visualizer.ylim)\n current_xlim = plt.xlim()\n current_ylim = plt.ylim()\n plt.savefig(save_path)\n plt.clf()\n plt.close()\n return current_xlim, current_ylim\n\n\ndef _plot_Delaunay_graph_colorbar(\n G_visualizer: DelaunayGraphVisualizer,\n edges: np.ndarray,\n distances: np.ndarray,\n filename: str,\n root: str,\n figsize: tuple = (5, 5),\n):\n \"\"\"\n Plots a Delaunay graph with colored edge lengths.\n :param G_visualizer: DelaunayGraphVisualizer object.\n :param edges: array of edges to plot.\n :param distances: array of edge lengths.\n :param filename: filename of the image.\n :param root: root directory of the experiment.\n :param figsize: size of the figure.\n \"\"\"\n input_data = G_visualizer.get_input_array_data()\n Rplot_kwds = {\"alpha\": 0.7, \"s\": 50, \"linewidths\": 0}\n Eplot_kwds = {\"alpha\": 0.7, \"s\": 50, \"linewidths\": 0, \"marker\": \"X\"}\n Rvertices, Evertices = (\n input_data[: G_visualizer.num_R],\n input_data[G_visualizer.num_R :],\n )\n\n # Plot vertices\n # Plot edges, color = distance\n plt.figure(figsize=figsize)\n axis = plt.gca()\n segments, colors = [], []\n for e in edges:\n start = (\n Rvertices[e[0]]\n if e[0] < G_visualizer.num_R\n else Evertices[e[0] - G_visualizer.num_R]\n )\n end = (\n Rvertices[e[1]]\n if e[1] < G_visualizer.num_R\n else Evertices[e[1] - G_visualizer.num_R]\n )\n segments.append([start, end])\n colors.append(distances[e[0], e[1]])\n colors = np.array(colors)\n lc = LineCollection(segments, cmap=\"viridis_r\")\n lc.set_array(colors)\n axis.add_artist(lc)\n cb = plt.colorbar(lc, ax=axis)\n cb.ax.set_ylabel(filename)\n\n Rcolors = np.repeat(R_color, G_visualizer.num_R).astype(str)\n Ecolors = np.repeat(E_color, G_visualizer.num_E).astype(str)\n axis.scatter(Rvertices.T[0], Rvertices.T[1], color=Rcolors, **Rplot_kwds)\n axis.scatter(Evertices.T[0], Evertices.T[1], color=Ecolors, **Eplot_kwds)\n axis.set_xlim(np.min(input_data[:, 0]) - 1, np.max(input_data[:, 0]) + 1)\n axis.set_ylim(np.min(input_data[:, 1]) - 1, np.max(input_data[:, 1]) + 1)\n save_path = os.path.join(root, filename)\n plt.savefig(save_path)\n plt.clf()\n plt.close()\n\n\ndef _plot_isolated_components(\n G: DelaunayGraph, G_visualizer: DelaunayGraphVisualizer, root: str\n):\n \"\"\"\n Plots outliers in a distilled Delaunay graph.\n :param G: Delaunay graph.\n :param G_visualizer: DelaunayGraphVisualizer object.\n :param root: root directory of the experiment.\n \"\"\"\n # Get outliers\n n_components = len(G.comp_stats)\n R_outliers, E_outliers = [], []\n for i in range(G.first_trivial_component_idx, n_components):\n if G.comp_stats[i].Ridx.size == 1:\n R_outliers.append(G.comp_stats[i].Ridx.item())\n if G.comp_stats[i].Eidx.size == 1:\n E_outliers.append(G.comp_stats[i].Eidx.item())\n vertices = np.concatenate([R_outliers, np.array(E_outliers) + G.num_R])\n _plot_Delaunay_graph(\n G_visualizer,\n edges=np.array([]),\n filename=\"components_isolated\",\n root=root,\n vertices=vertices,\n keep_range=True,\n )\n\n\ndef _plot_RE_components_quality(\n G: DelaunayGraph,\n root: str,\n annotate_largest: bool = True,\n min_comp_size: int = 0,\n display_smaller: bool = False,\n figsize: tuple = (10, 5),\n):\n \"\"\"\n Visualizes components quality as a scatter plot.\n :param G: Delaunay graph.\n :param root: root directory of the experiment.\n :param annotate_largest: if annotate the size (in percentage) of the largest component.\n :param min_comp_size: minimum size (number of vertices) of the components to visualize.\n :param display_smaller: if display aggregated components with size smaller than min_comp_size.\n :param figsize: size of the plot.\n \"\"\"\n n_comp = len(G.comp_stats)\n total_n_pts = G.num_R + G.num_E\n max_score, last_display_comp = 0, 0\n small_R_comp, small_E_comp, small_RE_comp = 0, 0, 0\n quality_scores, ticks_labels = [], []\n fig, ax = plt.subplots(figsize=figsize)\n for comp_id in range(n_comp):\n compR = G.comp_stats[comp_id].Ridx\n compE = G.comp_stats[comp_id].Eidx\n comp_n_points = len(compR) + len(compE)\n if comp_n_points >= min_comp_size:\n comp_quality = np.round(G.comp_stats[comp_id].comp_quality, 2)\n max_score = max(max_score, comp_quality)\n last_display_comp = comp_id + 1\n quality_scores.append(comp_quality)\n if len(compR) != 0:\n if len(compE) != 0:\n comp_color = \"gray\"\n else:\n comp_color = R_color\n else:\n comp_color = E_color\n\n ax.scatter(\n comp_id,\n comp_quality,\n c=comp_color,\n linestyle=\"--\",\n s=1000 * (comp_n_points) / total_n_pts,\n alpha=0.8,\n zorder=10,\n )\n else:\n if len(compR) != 0:\n if len(compE) != 0:\n small_RE_comp += 1\n else:\n small_R_comp += 1\n else:\n small_E_comp += 1\n\n if min_comp_size > 0 and display_smaller:\n if small_RE_comp + small_R_comp + small_E_comp > 0:\n ticks_labels = [last_display_comp]\n\n if small_RE_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_RE_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=\"gray\")\n\n if small_R_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_R_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=R_color)\n\n if small_E_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_E_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=E_color)\n\n # Annotate the largest component\n if annotate_largest:\n largest_comp_size = len(G.comp_stats[0].Ridx) + len(G.comp_stats[0].Eidx)\n ax.annotate(\n round(largest_comp_size / total_n_pts, 2),\n xy=(0, G.comp_stats[0].comp_quality + 0.03),\n ha=\"center\",\n va=\"bottom\",\n color=\"k\",\n )\n if max_score == 0:\n ax.plot(0, G.comp_stats[0].comp_quality, \"kX\")\n\n ax.plot(\n np.arange(last_display_comp),\n quality_scores,\n color=\"gray\",\n linestyle=\"--\",\n alpha=0.5,\n zorder=0,\n )\n displayed_ticks = np.arange(\n last_display_comp, step=max(int(last_display_comp / 10), 1)\n )\n if min_comp_size == 0:\n ax.set_xticks(displayed_ticks)\n ax.set_xticklabels(displayed_ticks)\n else:\n new_ticks = np.arange(\n last_display_comp, last_display_comp + len(ticks_labels) * 2, 2\n )\n ax.set_xticks(np.concatenate([displayed_ticks, new_ticks]))\n ax.set_xticklabels(list(displayed_ticks) + ticks_labels)\n max_score = 1.0 if max_score == 0 else max_score\n ax.set_ylim((-0.05, max_score + 0.1))\n ax.set_yticks(np.arange(0, max_score + 0.1, 0.1))\n\n # ax.tick_params(axis='x', rotation=45)\n ax.set_xlabel(\"component index\")\n ax.set_ylabel(\"component quality\")\n legend_elements = [\n Line2D(\n [0],\n [0],\n markerfacecolor=R_color,\n markersize=10,\n label=\"R\",\n marker=\"o\",\n color=\"w\",\n ),\n Line2D(\n [0],\n [0],\n markerfacecolor=E_color,\n markersize=10,\n label=\"E\",\n marker=\"o\",\n color=\"w\",\n ),\n Line2D(\n [0],\n [0],\n markerfacecolor=\"gray\",\n markersize=10,\n label=\"mix\",\n marker=\"o\",\n color=\"w\",\n ),\n ]\n ax.legend(\n handles=legend_elements,\n ncol=len(legend_elements),\n loc=\"upper center\",\n framealpha=0.5,\n )\n name = \"component_quality_min_size{0}_annotated{1}_displaysmaller{2}\".format(\n min_comp_size, int(annotate_largest), int(display_smaller)\n )\n path = os.path.join(root, name)\n plt.tight_layout()\n plt.savefig(path)\n plt.clf()\n plt.close()\n\n\ndef _plot_RE_components_consistency(\n G: DelaunayGraph,\n root: str,\n annotate_largest: bool = True,\n min_comp_size: int = 0,\n display_smaller: bool = False,\n figsize: tuple = (10, 5),\n):\n \"\"\"\n Visualizes components consistency as a scatter plot.\n :param G: Delaunay graph.\n :param root: root directory of the experiment.\n :param annotate_largest: if annotate the size (in percentage) of the largest component.\n :param min_comp_size: minimum size (number of vertices) of the components to visualize.\n :param display_smaller: if display aggregated components with size smaller than min_comp_size.\n :param figsize: size of the plot.\n \"\"\"\n n_comp = len(G.comp_stats)\n total_n_pts = G.num_R + G.num_E\n max_score, last_display_comp = 0, 0\n small_R_comp, small_E_comp, small_RE_comp = 0, 0, 0\n consistency_scores, ticks_labels = [], []\n\n fig, ax = plt.subplots(figsize=figsize)\n for comp_id in range(n_comp):\n compR = G.comp_stats[comp_id].Ridx\n compE = G.comp_stats[comp_id].Eidx\n comp_n_points = len(compR) + len(compE)\n if comp_n_points >= min_comp_size:\n comp_consistency = np.round(G.comp_stats[comp_id].comp_consistency, 2)\n max_score = max(max_score, comp_consistency)\n last_display_comp = comp_id + 1\n consistency_scores.append(comp_consistency)\n if len(compR) != 0:\n if len(compE) != 0:\n comp_color = \"gray\"\n else:\n comp_color = R_color\n else:\n comp_color = E_color\n\n ax.scatter(\n comp_id,\n comp_consistency,\n c=comp_color,\n linestyle=\"--\",\n s=1000 * (comp_n_points) / total_n_pts,\n alpha=0.8,\n zorder=10,\n )\n else:\n if len(compR) != 0:\n if len(compE) != 0:\n small_RE_comp += 1\n else:\n small_R_comp += 1\n else:\n small_E_comp += 1\n\n if min_comp_size > 0 and display_smaller:\n if small_RE_comp + small_R_comp + small_E_comp > 0:\n ticks_labels = [last_display_comp]\n\n if small_RE_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_RE_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=\"gray\")\n\n if small_R_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_R_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=R_color)\n\n if small_E_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_E_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=E_color)\n\n # Annotate the largest component\n if annotate_largest:\n largest_comp_size = len(G.comp_stats[0].Ridx) + len(G.comp_stats[0].Eidx)\n ax.annotate(\n round(largest_comp_size / total_n_pts, 2),\n xy=(0, G.comp_stats[0].comp_consistency + 0.03),\n ha=\"center\",\n va=\"bottom\",\n color=\"k\",\n )\n if max_score == 0:\n ax.plot(0, G.comp_stats[0].comp_consistency, \"kX\")\n\n ax.plot(\n np.arange(last_display_comp),\n consistency_scores,\n color=\"gray\",\n linestyle=\"--\",\n alpha=0.5,\n zorder=0,\n )\n displayed_ticks = np.arange(\n last_display_comp, step=max(int(last_display_comp / 10), 1)\n )\n if min_comp_size == 0:\n ax.set_xticks(displayed_ticks)\n ax.set_xticklabels(displayed_ticks)\n else:\n new_ticks = np.arange(\n last_display_comp, last_display_comp + len(ticks_labels) * 2, 2\n )\n ax.set_xticks(np.concatenate([displayed_ticks, new_ticks]))\n ax.set_xticklabels(list(displayed_ticks) + ticks_labels)\n max_score = 1.0 if max_score == 0 else max_score\n ax.set_ylim((-0.05, max_score + 0.1))\n ax.set_yticks(np.arange(0, max_score + 0.1, 0.1))\n\n # ax.tick_params(axis='x', rotation=45)\n ax.set_xlabel(\"component index\")\n ax.set_ylabel(\"component consistency\")\n legend_elements = [\n Line2D(\n [0],\n [0],\n markerfacecolor=R_color,\n markersize=10,\n label=\"R\",\n marker=\"o\",\n color=\"w\",\n ),\n Line2D(\n [0],\n [0],\n markerfacecolor=E_color,\n markersize=10,\n label=\"E\",\n marker=\"o\",\n color=\"w\",\n ),\n Line2D(\n [0],\n [0],\n markerfacecolor=\"gray\",\n markersize=10,\n label=\"mix\",\n marker=\"o\",\n color=\"w\",\n ),\n ]\n ax.legend(\n handles=legend_elements,\n ncol=len(legend_elements),\n loc=\"upper center\",\n framealpha=0.5,\n )\n name = \"component_consistency_min_size{0}_annotated{1}_displaysmaller{2}\".format(\n min_comp_size, int(annotate_largest), int(display_smaller)\n )\n path = os.path.join(root, name)\n plt.tight_layout()\n plt.savefig(path)\n plt.clf()\n plt.close()\n"
] | [
[
"matplotlib.pyplot.rc",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.round",
"numpy.concatenate",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.repeat",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.collections.LineCollection",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.use",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Parita-D/olympic-hero | [
"8a809a6308146c09235af43379f29e7e5e83827d"
] | [
"code.py"
] | [
"# --------------\n#Importing header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#Path of the file\r\ndata = pd.read_csv(path)\r\ndata.rename(columns={\"Total\":\"Total_Medals\"}, inplace=True)\r\ndata.head(10)\r\n#Code starts here\r\n\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n\r\n\r\ndata['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'],'Summer', 'Winter')\r\ndata['Better_Event'] = np.where(data['Total_Summer']==data['Total_Winter'], 'Both', data['Better_Event'])\r\n\r\nbetter_event = data.Better_Event.value_counts().index[0]\r\nprint(better_event)\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n\r\n\r\ntop_countries = data[['Country_Name', 'Total_Summer', 'Total_Winter', 'Total_Medals']]\r\n\r\ntop_countries.drop(top_countries.tail(1).index, inplace=True, axis=0)\r\nprint(top_countries)\r\n\r\ndef top_ten(top_countries,c_name):\r\n country_list=[]\r\n df = top_countries.nlargest(10, [c_name]) \r\n country_list = list(df['Country_Name'])\r\n return(country_list)\r\n\r\ntop_10_summer = top_ten(top_countries,'Total_Summer')\r\ntop_10_winter = top_ten(top_countries,'Total_Winter')\r\ntop_10 = top_ten(top_countries,'Total_Medals')\r\n\r\ncommon = list(set(top_10_summer) & set(top_10_winter) & set(top_10))\r\nprint(common)\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\n\r\nsummer_df = data[data['Country_Name'].isin(top_10_summer)]\r\nwinter_df = data[data['Country_Name'].isin(top_10_winter)]\r\ntop_df = data[data['Country_Name'].isin(top_10)]\r\n\r\nsummer_df.plot(kind='bar')\r\nwinter_df.plot(kind='bar')\r\ntop_df.plot(kind='bar')\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n\r\n\r\nsummer_df['Golden_Ratio'] = summer_df['Gold_Summer']/summer_df['Total_Summer']\r\n\r\nsummer_max_ratio = summer_df.Golden_Ratio.max()\r\nsummer_country_gold = summer_df.loc[summer_df.Golden_Ratio==summer_df.Golden_Ratio.max(), 'Country_Name'].values[0] \r\n\r\nwinter_df['Golden_Ratio'] = winter_df['Gold_Winter']/winter_df['Total_Winter']\r\n\r\nwinter_max_ratio = winter_df.Golden_Ratio.max()\r\n\r\nwinter_country_gold = winter_df.loc[winter_df.Golden_Ratio==winter_df.Golden_Ratio.max(), 'Country_Name'].values[0]\r\n\r\ntop_df['Golden_Ratio'] = top_df['Gold_Total']/top_df['Total_Medals']\r\n\r\ntop_max_ratio = top_df.Golden_Ratio.max()\r\n\r\ntop_country_gold = top_df.loc[top_df.Golden_Ratio==top_df.Golden_Ratio.max(), 'Country_Name'].values[0]\r\n\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n\r\n\r\ndata_1=data[:-1]\r\ndata_1['Total_Points']=data_1['Gold_Total']*3 + data_1['Silver_Total']*2 + data_1['Bronze_Total']*1\r\nmost_points = data_1.Total_Points.max()\r\nbest_country = data_1.loc[data_1.Total_Points==data_1.Total_Points.max(), 'Country_Name'].values[0] \r\n\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\n\r\nbest = data[data['Country_Name']==best_country]\r\n\r\nbest=best[['Gold_Total','Silver_Total','Bronze_Total']]\r\n\r\nbest.plot.bar(stacked=True)\r\nplt.xlabel('United States')\r\nplt.ylabel('Medals Tally')\r\nplt.xticks(rotation=45)\n\n\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"numpy.where",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
pjk645/pyGAM | [
"29425798e13651f03c1fd3cc1096071cd752403a",
"29425798e13651f03c1fd3cc1096071cd752403a"
] | [
"pygam/tests/test_GAM_methods.py",
"pygam/utils.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport sys\n\nimport numpy as np\nimport pytest\nimport scipy as sp\n\nfrom pygam import *\n\n\ndef test_LinearGAM_prediction(mcycle_X_y, mcycle_gam):\n \"\"\"\n check that we the predictions we get are correct shape\n \"\"\"\n X, y = mcycle_X_y\n preds = mcycle_gam.predict(X)\n assert(preds.shape == y.shape)\n\ndef test_LogisticGAM_accuracy(default_X_y):\n \"\"\"\n check that we can compute accuracy correctly\n \"\"\"\n X, y = default_X_y\n gam = LogisticGAM().fit(X, y)\n\n preds = gam.predict(X)\n acc0 = (preds == y).mean()\n acc1 = gam.accuracy(X, y)\n assert(acc0 == acc1)\n\ndef test_PoissonGAM_exposure(coal_X_y):\n \"\"\"\n check that we can fit a Poisson GAM with exposure, and it scales predictions\n \"\"\"\n X, y = coal_X_y\n gam = PoissonGAM().fit(X, y, exposure=np.ones_like(y))\n assert((gam.predict(X, exposure=np.ones_like(y)*2) == 2 *gam.predict(X)).all())\n\ndef test_PoissonGAM_loglike(coal_X_y):\n \"\"\"\n check that our loglikelihood is scaled by exposure\n\n predictions that are twice as large with twice the exposure\n should have lower loglikelihood\n \"\"\"\n X, y = coal_X_y\n exposure = np.ones_like(y)\n gam_high_var = PoissonGAM().fit(X, y * 2, exposure=exposure * 2)\n gam_low_var = PoissonGAM().fit(X, y, exposure=exposure)\n\n assert gam_high_var.loglikelihood(X, y * 2, exposure * 2) < gam_low_var.loglikelihood(X, y, exposure)\n\ndef test_large_GAM(coal_X_y):\n \"\"\"\n check that we can fit a GAM in py3 when we have more than 50,000 samples\n \"\"\"\n X = np.linspace(0, 100, 100000)\n y = X**2\n gam = LinearGAM().fit(X, y)\n assert(gam._is_fitted)\n\ndef test_summary(mcycle_X_y, mcycle_gam):\n \"\"\"\n check that we can get a summary if we've fitted the model, else not\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n\n try:\n gam.summary()\n except AttributeError:\n assert(True)\n\n mcycle_gam.summary()\n assert(True)\n\ndef test_more_splines_than_samples(mcycle_X_y):\n \"\"\"\n check that gridsearch returns the expected number of models\n \"\"\"\n X, y = mcycle_X_y\n n = len(X)\n\n gam = LinearGAM(s(0, n_splines=n+1)).fit(X, y)\n assert(gam._is_fitted)\n\n # TODO here is our bug:\n # we cannot display the term-by-term effective DoF because we have fewer\n # values than coefficients\n assert len(gam.statistics_['edof_per_coef']) < len(gam.coef_)\n gam.summary()\n\ndef test_deviance_residuals(mcycle_X_y, mcycle_gam):\n \"\"\"\n for linear GAMs, the deviance residuals should be equal to the y - y_pred\n \"\"\"\n X, y = mcycle_X_y\n res = mcycle_gam.deviance_residuals(X, y)\n err = y - mcycle_gam.predict(X)\n assert((res == err).all())\n\ndef test_conf_intervals_return_array(mcycle_X_y, mcycle_gam):\n \"\"\"\n make sure that the confidence_intervals method returns an array\n \"\"\"\n X, y = mcycle_X_y\n conf_ints = mcycle_gam.confidence_intervals(X)\n assert(conf_ints.ndim == 2)\n\ndef test_conf_intervals_quantiles_width_interchangable(mcycle_X_y, mcycle_gam):\n \"\"\"\n getting confidence_intervals via width or specifying quantiles\n should return the same result\n \"\"\"\n X, y = mcycle_X_y\n conf_ints_a = mcycle_gam.confidence_intervals(X, width=.9)\n conf_ints_b = mcycle_gam.confidence_intervals(X, quantiles=[.05, .95])\n assert(np.allclose(conf_ints_a, conf_ints_b))\n\ndef test_conf_intervals_ordered(mcycle_X_y, mcycle_gam):\n \"\"\"\n comfidence intervals returned via width should be ordered\n \"\"\"\n X, y = mcycle_X_y\n conf_ints = mcycle_gam.confidence_intervals(X)\n assert((conf_ints[:,0] <= conf_ints[:,1]).all())\n\ndef test_summary_returns_12_lines(mcycle_gam):\n \"\"\"\n check that the summary method works and returns 24 lines like:\n\n LinearGAM\n =============================================== ==========================================================\n Distribution: NormalDist Effective DoF: 11.2495\n Link Function: IdentityLink Log Likelihood: -952.605\n Number of Samples: 133 AIC: 1929.7091\n AICc: 1932.4197\n GCV: 605.6546\n Scale: 514.2013\n Pseudo R-Squared: 0.7969\n ==========================================================================================================\n Feature Function Data Type Num Splines Spline Order Linear Fit Lambda P > x Sig. Code\n ================== ============== ============= ============= =========== ========== ========== ==========\n feature 1 numerical 25 3 False 1.0 3.43e-03 **\n intercept 6.85e-02 .\n ==========================================================================================================\n Significance codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n WARNING: Fitting splines and a linear function to a feature introduces a model identifiability problem\n which can cause p-values to appear significant when they are not.\n\n WARNING: p-values calculated in this manner behave correctly for un-penalized models or models with\n known smoothing parameters, but when smoothing parameters have been estimated, the p-values\n are typically lower than they should be, meaning that the tests reject the null too readily.\n \"\"\"\n if sys.version_info.major == 2:\n from StringIO import StringIO\n if sys.version_info.major == 3:\n from io import StringIO\n stdout = sys.stdout #keep a handle on the real standard output\n sys.stdout = StringIO() #Choose a file-like object to write to\n mcycle_gam.summary()\n assert(len(sys.stdout.getvalue().split('\\n')) == 24)\n\ndef test_is_fitted_predict(mcycle_X_y):\n \"\"\"\n test predict requires fitted model\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.predict(X)\n\ndef test_is_fitted_predict_mu(mcycle_X_y):\n \"\"\"\n test predict_mu requires fitted model\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.predict_mu(X)\n\ndef test_is_fitted_dev_resid(mcycle_X_y):\n \"\"\"\n test deviance_residuals requires fitted model\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.deviance_residuals(X, y)\n\ndef test_is_fitted_conf_intervals(mcycle_X_y):\n \"\"\"\n test confidence_intervals requires fitted model\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.confidence_intervals(X)\n\ndef test_is_fitted_pdep(mcycle_X_y):\n \"\"\"\n test partial_dependence requires fitted model\n \"\"\"\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.partial_dependence(term=0)\n\ndef test_is_fitted_summary(mcycle_X_y):\n \"\"\"\n test summary requires fitted model\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.summary()\n\ndef test_set_params_with_external_param():\n \"\"\"\n test set_params sets a real parameter\n \"\"\"\n gam = GAM(lam=1)\n gam.set_params(lam=420)\n assert(gam.lam == 420)\n\ndef test_set_params_with_phony_param():\n \"\"\"\n test set_params should not set any phony param\n \"\"\"\n gam = GAM()\n gam.set_params(cat=420)\n assert(not hasattr(gam, 'cat'))\n\ndef test_set_params_with_phony_param_force():\n \"\"\"\n test set_params can set phony params if we use the force=True\n \"\"\"\n gam = GAM()\n assert(not hasattr(gam, 'cat'))\n\n gam.set_params(cat=420, force=True)\n assert(gam.cat == 420)\n\ndef test_get_params():\n \"\"\"\n test gam gets our params\n \"\"\"\n gam = GAM(lam=420)\n params = gam.get_params()\n assert(params['lam'] == 420)\n\n\nclass TestSamplingFromPosterior(object):\n\n def test_drawing_samples_from_unfitted_model(self, mcycle_X_y, mcycle_gam):\n X, y = mcycle_X_y\n gam = LinearGAM()\n\n with pytest.raises(AttributeError):\n gam.sample(X, y)\n\n with pytest.raises(AttributeError):\n gam._sample_coef(X, y)\n\n with pytest.raises(AttributeError):\n gam._bootstrap_samples_of_smoothing(X, y)\n\n assert mcycle_gam._is_fitted\n\n mcycle_gam.sample(X, y, n_draws=2)\n mcycle_gam._sample_coef(X, y, n_draws=2)\n mcycle_gam._bootstrap_samples_of_smoothing(X, y, n_bootstraps=1)\n assert True\n\n def test_sample_quantity(self, mcycle_X_y, mcycle_gam):\n X, y = mcycle_X_y\n for quantity in ['coefficients', 'response']:\n with pytest.raises(ValueError):\n mcycle_gam.sample(X, y, quantity=quantity, n_draws=2)\n for quantity in ['coef', 'mu', 'y']:\n mcycle_gam.sample(X, y, quantity=quantity, n_draws=2)\n assert True\n\n def test_shape_of_random_samples(self, mcycle_X_y, mcycle_gam):\n X, y = mcycle_X_y\n n_samples = len(X)\n n_draws = 5\n\n sample_coef = mcycle_gam.sample(X, y, quantity='coef', n_draws=n_draws)\n sample_mu = mcycle_gam.sample(X, y, quantity='mu', n_draws=n_draws)\n sample_y = mcycle_gam.sample(X, y, quantity='y', n_draws=n_draws)\n assert sample_coef.shape == (n_draws, len(mcycle_gam.coef_))\n assert sample_mu.shape == (n_draws, n_samples)\n assert sample_y.shape == (n_draws, n_samples)\n\n n_samples_in_grid = 500\n idxs = np.random.choice(np.arange(len(X)), n_samples_in_grid)\n XX = X[idxs]\n\n sample_coef = mcycle_gam.sample(X, y, quantity='coef', n_draws=n_draws,\n sample_at_X=XX)\n sample_mu = mcycle_gam.sample(X, y, quantity='mu', n_draws=n_draws,\n sample_at_X=XX)\n sample_y = mcycle_gam.sample(X, y, quantity='y', n_draws=n_draws,\n sample_at_X=XX)\n\n assert sample_coef.shape == (n_draws, len(mcycle_gam.coef_))\n assert sample_mu.shape == (n_draws, n_samples_in_grid)\n assert sample_y.shape == (n_draws, n_samples_in_grid)\n\n def test_shape_bootstrap_samples_of_smoothing(self, mcycle_X_y, mcycle_gam):\n X, y = mcycle_X_y\n\n for n_bootstraps in [1, 2]:\n coef_bootstraps, cov_bootstraps = (\n mcycle_gam._bootstrap_samples_of_smoothing(\n X, y, n_bootstraps=n_bootstraps))\n assert len(coef_bootstraps) == len(cov_bootstraps) == n_bootstraps\n for coef, cov in zip(coef_bootstraps, cov_bootstraps):\n assert coef.shape == mcycle_gam.coef_.shape\n assert cov.shape == mcycle_gam.statistics_['cov'].shape\n\n for n_draws in [1, 2]:\n coef_draws = mcycle_gam._simulate_coef_from_bootstraps(\n n_draws, coef_bootstraps, cov_bootstraps)\n assert coef_draws.shape == (n_draws, len(mcycle_gam.coef_))\n\n def test_bad_sample_params(self, mcycle_X_y, mcycle_gam):\n X, y = mcycle_X_y\n with pytest.raises(ValueError):\n mcycle_gam.sample(X, y, n_draws=0)\n with pytest.raises(ValueError):\n mcycle_gam.sample(X, y, n_bootstraps=0)\n\n\ndef test_prediction_interval_unknown_scale():\n \"\"\"\n the prediction intervals should be correct to a few decimal places\n we test at a large sample limit, where the t distribution becomes normal\n \"\"\"\n n = 1000000\n X = np.linspace(0,1,n)\n y = np.random.randn(n)\n\n gam_a = LinearGAM(terms=l(0)).fit(X, y)\n gam_b = LinearGAM(s(0, n_splines=4)).fit(X, y)\n\n XX = gam_a.generate_X_grid(term=0)\n intervals_a = gam_a.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)\n intervals_b = gam_b.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)\n\n assert np.allclose(intervals_a[0], sp.stats.norm.ppf(0.1), atol=0.01)\n assert np.allclose(intervals_a[1], sp.stats.norm.ppf(0.9), atol=0.01)\n\n assert np.allclose(intervals_b[0], sp.stats.norm.ppf(0.1), atol=0.01)\n assert np.allclose(intervals_b[1], sp.stats.norm.ppf(0.9), atol=0.01)\n\ndef test_prediction_interval_known_scale():\n \"\"\"\n the prediction intervals should be correct to a few decimal places\n we test at a large sample limit.\n \"\"\"\n n = 1000000\n X = np.linspace(0,1,n)\n y = np.random.randn(n)\n\n gam_a = LinearGAM(terms=l(0), scale=1.).fit(X, y)\n gam_b = LinearGAM(s(0, n_splines=4), scale=1.).fit(X, y)\n\n XX = gam_a.generate_X_grid(term=0)\n intervals_a = gam_a.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)\n intervals_b = gam_b.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)\n\n assert np.allclose(intervals_a[0], sp.stats.norm.ppf(0.1), atol=0.01)\n assert np.allclose(intervals_a[1], sp.stats.norm.ppf(0.9), atol=0.01)\n\n assert np.allclose(intervals_b[0], sp.stats.norm.ppf(0.1), atol=0.01)\n assert np.allclose(intervals_b[1], sp.stats.norm.ppf(0.9), atol=0.01)\n\ndef test_pvalue_rejects_useless_feature(wage_X_y):\n \"\"\"\n check that a p-value can reject a useless feature\n \"\"\"\n X, y = wage_X_y\n\n # add empty feature\n X = np.c_[X, np.arange(X.shape[0])]\n gam = LinearGAM(s(0) + s(1) + f(2) + s(3)).fit(X, y)\n\n # now do the test, with some safety\n p_values = gam._estimate_p_values()\n print(p_values)\n assert(p_values[-2] > .5) # because -1 is intercept\n\ndef test_fit_quantile_is_close_enough(head_circumference_X_y):\n \"\"\"see that we get close to the desired quantile\n\n and check that repeating on an already fitted returns the same\n \"\"\"\n X, y = head_circumference_X_y\n\n quantile = 0.99\n tol = 1e-4\n\n gam = ExpectileGAM().fit_quantile(X, y, quantile=quantile, max_iter=20, tol=tol)\n ratio = gam._get_quantile_ratio(X, y)\n\n assert np.abs(ratio - quantile) <= tol\n\n # now check if we had to refit\n gam2 = gam.fit_quantile(X, y, quantile=quantile, max_iter=20, tol=tol)\n\n assert gam == gam2\n\n\ndef test_fit_quantile_NOT_close_enough(head_circumference_X_y):\n \"\"\"see that we DO NOT get close to the desired quantile\n \"\"\"\n X, y = head_circumference_X_y\n\n quantile = 0.99\n tol = 1e-5\n\n gam = ExpectileGAM().fit_quantile(X, y, quantile=quantile, max_iter=1, tol=tol)\n ratio = gam._get_quantile_ratio(X, y)\n\n assert np.abs(ratio - quantile) > tol\n\ndef test_fit_quantile_raises_ValueError(head_circumference_X_y):\n \"\"\"see that we DO NOT get fit on bad argument requests\n \"\"\"\n X, y = head_circumference_X_y\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, quantile=0)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, quantile=1)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, quantile=-0.1)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, quantile=1.1)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, tol=0, quantile=0.5)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, tol=-0.1, quantile=0.5)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, max_iter=0, quantile=0.5)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, max_iter=-1, quantile=0.5)\n\nclass TestRegressions(object):\n def test_pvalue_invariant_to_scale(self, wage_X_y):\n \"\"\"\n regression test.\n\n a bug made the F-statistic sensitive to scale changes, when it should be invariant.\n\n check that a p-value should not change when we change the scale of the response\n \"\"\"\n X, y = wage_X_y\n\n gamA = LinearGAM(s(0) + s(1) + f(2)).fit(X, y * 1000000)\n gamB = LinearGAM(s(0) + s(1) + f(2)).fit(X, y)\n\n assert np.allclose(gamA.statistics_['p_values'], gamB.statistics_['p_values'])\n\n def test_2d_y_still_allow_fitting_in_PoissonGAM(self, coal_X_y):\n \"\"\"\n regression test.\n\n there was a bug where we forgot to check the y_array before converting\n exposure to weights.\n \"\"\"\n X, y = coal_X_y\n two_d_data = np.ones_like(y).ravel()[:, None]\n\n # 2d y should cause no problems now\n gam = PoissonGAM().fit(X, y[:, None])\n assert gam._is_fitted\n\n # 2d weghts should cause no problems now\n gam = PoissonGAM().fit(X, y, weights=two_d_data)\n assert gam._is_fitted\n\n # 2d exposure should cause no problems now\n gam = PoissonGAM().fit(X, y, exposure=two_d_data)\n assert gam._is_fitted\n\n def test_non_int_exposure_produced_no_inf_in_PoissonGAM_ll(self, coal_X_y):\n \"\"\"\n regression test.\n\n there was a bug where we forgot to round the rescaled counts before\n computing the loglikelihood. since Poisson requires integer observations,\n small numerical errors caused the pmf to return -inf, which shows up\n in the loglikelihood computations, AIC, AICc..\n \"\"\"\n X, y = coal_X_y\n\n rate = 1.2 + np.cos(np.linspace(0, 2. * np.pi, len(y)))\n\n gam = PoissonGAM().fit(X, y, exposure=rate)\n\n assert np.isfinite(gam.statistics_['loglikelihood'])\n\n def test_initial_estimate_runs_for_int_obseravtions(self, toy_classification_X_y):\n \"\"\"\n regression test\n\n ._initial_estimate would fail when trying to add small numbers to\n integer observations\n\n casting the observations to float in that method fixes that\n \"\"\"\n X, y = toy_classification_X_y\n gam = LogisticGAM().fit(X, y)\n assert gam._is_fitted\n\n def test_r_squared_for_new_dataset(self, mcycle_gam, mcycle_X_y):\n \"\"\"\n regression test\n\n estimate r squared used to refer to a non-existant method when `mu=None`\n \"\"\"\n X, y = mcycle_X_y\n mcycle_gam._estimate_r2(X, y)\n\n def test_score_method(self, mcycle_gam, mcycle_X_y):\n \"\"\"\n regression test\n\n score returns calculated r^2 for X data using trained gam\n\n \"\"\"\n X, y = mcycle_X_y\n assert mcycle_gam.score(X, y) <= 1\n",
"\"\"\"\nPygam utilities\n\"\"\"\n\nfrom __future__ import division\nfrom copy import deepcopy\nimport numbers\nimport sys\nimport warnings\n\nimport scipy as sp\nfrom scipy import sparse\nimport numpy as np\nfrom numpy.linalg import LinAlgError\n\ntry:\n from sksparse.cholmod import cholesky as spcholesky\n from sksparse.test_cholmod import CholmodNotPositiveDefiniteError\n SKSPIMPORT = True\nexcept ImportError:\n SKSPIMPORT = False\n\n\nclass NotPositiveDefiniteError(ValueError):\n \"\"\"Exception class to raise if a matrix is not positive definite\n \"\"\"\n\nclass OptimizationError(ValueError):\n \"\"\"Exception class to raise if PIRLS optimization fails\n \"\"\"\n\n\ndef cholesky(A, sparse=True, verbose=True):\n \"\"\"\n Choose the best possible cholesky factorizor.\n\n if possible, import the Scikit-Sparse sparse Cholesky method.\n Permutes the output L to ensure A = L.H . L\n\n otherwise defaults to numpy's non-sparse version\n\n Parameters\n ----------\n A : array-like\n array to decompose\n sparse : boolean, default: True\n whether to return a sparse array\n verbose : bool, default: True\n whether to print warnings\n \"\"\"\n if SKSPIMPORT:\n A = sp.sparse.csc_matrix(A)\n try:\n F = spcholesky(A)\n\n # permutation matrix P\n P = sp.sparse.lil_matrix(A.shape)\n p = F.P()\n P[np.arange(len(p)), p] = 1\n\n # permute\n L = F.L()\n L = P.T.dot(L)\n except CholmodNotPositiveDefiniteError as e:\n raise NotPositiveDefiniteError('Matrix is not positive definite')\n\n if sparse:\n return L.T # upper triangular factorization\n return L.T.A # upper triangular factorization\n\n else:\n msg = 'Could not import Scikit-Sparse or Suite-Sparse.\\n'\\\n 'This will slow down optimization for models with '\\\n 'monotonicity/convexity penalties and many splines.\\n'\\\n 'See installation instructions for installing '\\\n 'Scikit-Sparse and Suite-Sparse via Conda.'\n if verbose:\n warnings.warn(msg)\n\n if sp.sparse.issparse(A):\n A = A.A\n\n try:\n L = sp.linalg.cholesky(A, lower=False)\n except LinAlgError as e:\n raise NotPositiveDefiniteError('Matrix is not positive definite')\n\n if sparse:\n return sp.sparse.csc_matrix(L)\n return L\n\n\ndef make_2d(array, verbose=True):\n \"\"\"\n tiny tool to expand 1D arrays the way i want\n\n Parameters\n ----------\n array : array-like\n\n verbose : bool, default: True\n whether to print warnings\n\n Returns\n -------\n np.array of with ndim = 2\n \"\"\"\n array = np.asarray(array)\n if array.ndim < 2:\n msg = 'Expected 2D input data array, but found {}D. '\\\n 'Expanding to 2D.'.format(array.ndim)\n if verbose:\n warnings.warn(msg)\n array = np.atleast_1d(array)[:,None]\n return array\n\n\ndef check_array(array, force_2d=False, n_feats=None, ndim=None,\n min_samples=1, name='Input data', verbose=True):\n \"\"\"\n tool to perform basic data validation.\n called by check_X and check_y.\n\n ensures that data:\n - is ndim dimensional\n - contains float-compatible data-types\n - has at least min_samples\n - has n_feats\n - is finite\n\n Parameters\n ----------\n array : array-like\n force_2d : boolean, default: False\n whether to force a 2d array. Setting to True forces ndim = 2\n n_feats : int, default: None\n represents number of features that the array should have.\n not enforced if n_feats is None.\n ndim : int default: None\n number of dimensions expected in the array\n min_samples : int, default: 1\n name : str, default: 'Input data'\n name to use when referring to the array\n verbose : bool, default: True\n whether to print warnings\n\n Returns\n -------\n array : validated array\n \"\"\"\n # make array\n if force_2d:\n array = make_2d(array, verbose=verbose)\n ndim = 2\n else:\n array = np.array(array)\n\n # cast to float\n dtype = array.dtype\n if dtype.kind not in ['i', 'f']:\n try:\n array = array.astype('float')\n except ValueError as e:\n raise ValueError('{} must be type int or float, '\\\n 'but found type: {}\\n'\\\n 'Try transforming data with a LabelEncoder first.'\\\n .format(name, dtype.type))\n\n # check finite\n if not(np.isfinite(array).all()):\n raise ValueError('{} must not contain Inf nor NaN'.format(name))\n\n # check ndim\n if ndim is not None:\n if array.ndim != ndim:\n raise ValueError('{} must have {} dimensions. '\\\n 'found shape {}'.format(name, ndim, array.shape))\n\n # check n_feats\n if n_feats is not None:\n m = array.shape[1]\n if m != n_feats:\n raise ValueError('{} must have {} features, '\\\n 'but found {}'.format(name, n_feats, m))\n\n # minimum samples\n n = array.shape[0]\n if n < min_samples:\n raise ValueError('{} should have at least {} samples, '\\\n 'but found {}'.format(name, min_samples, n))\n\n return array\n\n\ndef check_y(y, link, dist, min_samples=1, verbose=True):\n \"\"\"\n tool to ensure that the targets:\n - are in the domain of the link function\n - are numerical\n - have at least min_samples\n - is finite\n\n Parameters\n ----------\n y : array-like\n link : Link object\n dist : Distribution object\n min_samples : int, default: 1\n verbose : bool, default: True\n whether to print warnings\n\n Returns\n -------\n y : array containing validated y-data\n \"\"\"\n y = np.ravel(y)\n\n y = check_array(y, force_2d=False, min_samples=min_samples, ndim=1,\n name='y data', verbose=verbose)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n \n if np.any(np.isnan(link.link(y, dist))):\n raise ValueError('y data is not in domain of {} link function. ' \\\n 'Expected domain: {}, but found {}' \\\n .format(link, get_link_domain(link, dist),\n [float('%.2f'%np.min(y)),\n float('%.2f'%np.max(y))]))\n return y\n\ndef check_X(X, n_feats=None, min_samples=1, edge_knots=None, dtypes=None,\n features=None, verbose=True):\n \"\"\"\n tool to ensure that X:\n - is 2 dimensional\n - contains float-compatible data-types\n - has at least min_samples\n - has n_feats\n - has categorical features in the right range\n - is finite\n\n Parameters\n ----------\n X : array-like\n n_feats : int. default: None\n represents number of features that X should have.\n not enforced if n_feats is None.\n min_samples : int, default: 1\n edge_knots : list of arrays, default: None\n dtypes : list of strings, default: None\n features : list of ints,\n which features are considered by the model\n verbose : bool, default: True\n whether to print warnings\n\n Returns\n -------\n X : array with ndims == 2 containing validated X-data\n \"\"\"\n # check all features are there\n if bool(features):\n features = flatten(features)\n max_feat = max(flatten(features))\n\n if n_feats is None:\n n_feats = max_feat\n\n n_feats = max(n_feats, max_feat)\n\n # basic diagnostics\n X = check_array(X, force_2d=True, n_feats=n_feats, min_samples=min_samples,\n name='X data', verbose=verbose)\n\n # check our categorical data has no new categories\n if (edge_knots is not None) and (dtypes is not None) and (features is not None):\n\n # get a flattened list of tuples\n edge_knots = flatten(edge_knots)[::-1]\n dtypes = flatten(dtypes)\n assert len(edge_knots) % 2 == 0 # sanity check\n\n # form pairs\n n = len(edge_knots) // 2\n edge_knots = [(edge_knots.pop(), edge_knots.pop()) for _ in range(n)]\n\n # check each categorical term\n for i, ek in enumerate(edge_knots):\n dt = dtypes[i]\n feature = features[i]\n x = X[:, feature]\n\n if dt == 'categorical':\n min_ = ek[0]\n max_ = ek[-1]\n if (np.unique(x) < min_).any() or \\\n (np.unique(x) > max_).any():\n min_ += .5\n max_ -= 0.5\n raise ValueError('X data is out of domain for categorical '\\\n 'feature {}. Expected data on [{}, {}], '\\\n 'but found data on [{}, {}]'\\\n .format(i, min_, max_, x.min(), x.max()))\n\n return X\n\ndef check_X_y(X, y):\n \"\"\"\n tool to ensure input and output data have the same number of samples\n\n Parameters\n ----------\n X : array-like\n y : array-like\n\n Returns\n -------\n None\n \"\"\"\n if len(X) != len(y):\n raise ValueError('Inconsistent input and output data shapes. '\\\n 'found X: {} and y: {}'.format(X.shape, y.shape))\n\ndef check_lengths(*arrays):\n \"\"\"\n tool to ensure input and output data have the same number of samples\n\n Parameters\n ----------\n *arrays : iterable of arrays to be checked\n\n Returns\n -------\n None\n \"\"\"\n lengths = [len(array) for array in arrays]\n if len(np.unique(lengths)) > 1:\n raise ValueError('Inconsistent data lengths: {}'.format(lengths))\n\n\ndef check_param(param, param_name, dtype, constraint=None, iterable=True,\n max_depth=2):\n \"\"\"\n checks the dtype of a parameter,\n and whether it satisfies a numerical contraint\n\n Parameters\n ---------\n param : object\n param_name : str, name of the parameter\n dtype : str, desired dtype of the parameter\n contraint : str, default: None\n numerical constraint of the parameter.\n if None, no constraint is enforced\n iterable : bool, default: True\n whether to allow iterable param\n max_depth : int, default: 2\n maximum nesting of the iterable.\n only used if iterable == True\n Returns\n -------\n list of validated and converted parameter(s)\n \"\"\"\n msg = []\n msg.append(param_name + \" must be \"+ dtype)\n if iterable:\n msg.append(\" or nested iterable of depth \" + str(max_depth) +\n \" containing \" + dtype + \"s\")\n\n msg.append(\", but found \" + param_name + \" = {}\".format(repr(param)))\n\n if constraint is not None:\n msg = (\" \" + constraint).join(msg)\n else:\n msg = ''.join(msg)\n\n # check param is numerical\n try:\n param_dt = np.array(flatten(param))# + np.zeros_like(flatten(param), dtype='int')\n # param_dt = np.array(param).astype(dtype)\n except (ValueError, TypeError):\n raise TypeError(msg)\n\n # check iterable\n if iterable:\n if check_iterable_depth(param) > max_depth:\n raise TypeError(msg)\n if (not iterable) and isiterable(param):\n raise TypeError(msg)\n\n # check param is correct dtype\n if not (param_dt == np.array(flatten(param)).astype(float)).all():\n raise TypeError(msg)\n\n # check constraint\n if constraint is not None:\n if not (eval('np.' + repr(param_dt) + constraint)).all():\n raise ValueError(msg)\n\n return param\n\ndef get_link_domain(link, dist):\n \"\"\"\n tool to identify the domain of a given monotonic link function\n\n Parameters\n ----------\n link : Link object\n dist : Distribution object\n\n Returns\n -------\n domain : list of length 2, representing the interval of the domain.\n \"\"\"\n domain = np.array([-np.inf, -1, 0, 1, np.inf])\n domain = domain[~np.isnan(link.link(domain, dist))]\n return [domain[0], domain[-1]]\n\n\ndef load_diagonal(cov, load=None):\n \"\"\"Return the given square matrix with a small amount added to the diagonal\n to make it positive semi-definite.\n \"\"\"\n n, m = cov.shape\n assert n == m, \"matrix must be square, but found shape {}\".format((n, m))\n\n if load is None:\n load = np.sqrt(np.finfo(np.float64).eps) # machine epsilon\n return cov + np.eye(n) * load\n\n\ndef round_to_n_decimal_places(array, n=3):\n \"\"\"\n tool to keep round a float to n decimal places.\n\n n=3 by default\n\n Parameters\n ----------\n array : np.array\n n : int. number of decimal places to keep\n\n Returns\n -------\n array : rounded np.array\n \"\"\"\n # check if in scientific notation\n if issubclass(array.__class__, float) and '%.e'%array == str(array):\n return array # do nothing\n\n shape = np.shape(array)\n out = ((np.atleast_1d(array) * 10**n).round().astype('int') / (10.**n))\n return out.reshape(shape)\n\n\n# Credit to Hugh Bothwell from http://stackoverflow.com/questions/5084743/how-to-print-pretty-string-output-in-python\nclass TablePrinter(object):\n \"Print a list of dicts as a table\"\n def __init__(self, fmt, sep=' ', ul=None):\n \"\"\"\n @param fmt: list of tuple(heading, key, width)\n heading: str, column label\n key: dictionary key to value to print\n width: int, column width in chars\n @param sep: string, separation between columns\n @param ul: string, character to underline column label, or None for no underlining\n \"\"\"\n super(TablePrinter,self).__init__()\n self.fmt = str(sep).join('{lb}{0}:{1}{rb}'.format(key, width, lb='{', rb='}') for heading,key,width in fmt)\n self.head = {key:heading for heading,key,width in fmt}\n self.ul = {key:str(ul)*width for heading,key,width in fmt} if ul else None\n self.width = {key:width for heading,key,width in fmt}\n\n def row(self, data):\n if sys.version_info < (3,):\n return self.fmt.format(**{ k:str(data.get(k,''))[:w] for k,w in self.width.iteritems() })\n else:\n return self.fmt.format(**{ k:str(data.get(k,''))[:w] for k,w in self.width.items() })\n\n def __call__(self, dataList):\n _r = self.row\n res = [_r(data) for data in dataList]\n res.insert(0, _r(self.head))\n if self.ul:\n res.insert(1, _r(self.ul))\n return '\\n'.join(res)\n\n\ndef space_row(left, right, filler=' ', total_width=-1):\n \"\"\"space the data in a row with optional filling\n\n Arguments\n ---------\n left : str, to be aligned left\n right : str, to be aligned right\n filler : str, default ' '.\n must be of length 1\n total_width : int, width of line.\n if negative number is specified,\n then that number of spaces is used between the left and right text\n\n Returns\n -------\n str\n \"\"\"\n left = str(left)\n right = str(right)\n filler = str(filler)[:1]\n\n if total_width < 0:\n spacing = - total_width\n else:\n spacing = total_width - len(left) - len(right)\n\n return left + filler * spacing + right\n\ndef sig_code(p_value):\n \"\"\"create a significance code in the style of R's lm\n\n Arguments\n ---------\n p_value : float on [0, 1]\n\n Returns\n -------\n str\n \"\"\"\n assert 0 <= p_value <= 1, 'p_value must be on [0, 1]'\n if p_value < 0.001:\n return '***'\n if p_value < 0.01:\n return '**'\n if p_value < 0.05:\n return '*'\n if p_value < 0.1:\n return '.'\n return ' '\n\ndef gen_edge_knots(data, dtype, verbose=True):\n \"\"\"\n generate uniform knots from data including the edges of the data\n\n for discrete data, assumes k categories in [0, k-1] interval\n\n Parameters\n ----------\n data : array-like with one dimension\n dtype : str in {'categorical', 'numerical'}\n verbose : bool, default: True\n whether to print warnings\n\n Returns\n -------\n np.array containing ordered knots\n \"\"\"\n if dtype not in ['categorical', 'numerical']:\n raise ValueError('unsupported dtype: {}'.format(dtype))\n if dtype == 'categorical':\n return np.r_[np.min(data) - 0.5, np.max(data) + 0.5]\n else:\n knots = np.r_[np.min(data), np.max(data)]\n if knots[0] == knots[1] and verbose:\n warnings.warn('Data contains constant feature. '\\\n 'Consider removing and setting fit_intercept=True',\n stacklevel=2)\n return knots\n\ndef b_spline_basis(x, edge_knots, n_splines=20, spline_order=3, sparse=True,\n periodic=True, verbose=True):\n \"\"\"\n tool to generate b-spline basis using vectorized De Boor recursion\n the basis functions extrapolate linearly past the end-knots.\n\n Parameters\n ----------\n x : array-like, with ndims == 1.\n edge_knots : array-like contaning locations of the 2 edge knots.\n n_splines : int. number of splines to generate. must be >= spline_order+1\n default: 20\n spline_order : int. order of spline basis to create\n default: 3\n sparse : boolean. whether to return a sparse basis matrix or not.\n default: True\n periodic: bool, default: True\n whether to repeat basis functions (True) or linearly extrapolate (False).\n verbose : bool, default: True\n whether to print warnings\n\n Returns\n -------\n basis : sparse csc matrix or array containing b-spline basis functions\n with shape (len(x), n_splines)\n \"\"\"\n if np.ravel(x).ndim != 1:\n raise ValueError('Data must be 1-D, but found {}'\\\n .format(np.ravel(x).ndim))\n\n if (n_splines < 1) or not isinstance(n_splines, numbers.Integral):\n raise ValueError('n_splines must be int >= 1')\n\n if (spline_order < 0) or not isinstance(spline_order, numbers.Integral):\n raise ValueError('spline_order must be int >= 1')\n\n if n_splines < spline_order + 1:\n raise ValueError('n_splines must be >= spline_order + 1. '\\\n 'found: n_splines = {} and spline_order = {}'\\\n .format(n_splines, spline_order))\n\n if n_splines == 0 and verbose:\n warnings.warn('Requested 1 spline. This is equivalent to '\\\n 'fitting an intercept', stacklevel=2)\n\n n_splines += spline_order * periodic\n\n # rescale edge_knots to [0,1], and generate boundary knots\n edge_knots = np.sort(deepcopy(edge_knots))\n offset = edge_knots[0]\n scale = edge_knots[-1] - edge_knots[0]\n if scale == 0:\n scale = 1\n boundary_knots = np.linspace(0, 1, 1 + n_splines - spline_order)\n diff = np.diff(boundary_knots[:2])[0]\n\n # rescale x as well\n x = (np.ravel(deepcopy(x)) - offset) / scale\n\n # wrap periodic values\n if periodic:\n x = x % (1 + 1e-9)\n\n # append 0 and 1 in order to get derivatives for extrapolation\n x = np.r_[x, 0., 1.]\n\n # determine extrapolation indices\n x_extrapolte_l = (x < 0)\n x_extrapolte_r = (x > 1)\n x_interpolate = ~(x_extrapolte_r + x_extrapolte_l)\n\n # formatting\n x = np.atleast_2d(x).T\n n = len(x)\n\n # augment knots\n aug = np.arange(1, spline_order + 1) * diff\n aug_knots = np.r_[-aug[::-1],\n boundary_knots,\n 1 + aug]\n aug_knots[-1] += 1e-9 # want last knot inclusive\n\n # prepare Haar Basis\n bases = (x >= aug_knots[:-1]).astype(np.int) * \\\n (x < aug_knots[1:]).astype(np.int)\n bases[-1] = bases[-2][::-1] # force symmetric bases at 0 and 1\n\n # do recursion from Hastie et al. vectorized\n maxi = len(aug_knots) - 1\n for m in range(2, spline_order + 2):\n maxi -= 1\n\n # left sub-basis\n num = (x - aug_knots[:maxi])\n num *= bases[:, :maxi]\n denom = aug_knots[m-1 : maxi+m-1] - aug_knots[:maxi]\n left = num/denom\n\n # right sub-basis\n num = (aug_knots[m : maxi+m] - x) * bases[:, 1:maxi+1]\n denom = aug_knots[m:maxi+m] - aug_knots[1 : maxi+1]\n right = num/denom\n\n # track previous bases and update\n prev_bases = bases[-2:]\n bases = left + right\n\n if periodic and spline_order > 0:\n # make spline domain periodic\n bases[:, :spline_order] = np.max([bases[:, :spline_order],\n bases[:, -spline_order:]],\n axis=0)\n # remove extra splines used only for ensuring correct domain\n bases = bases[:, :-spline_order]\n\n # extrapolate\n # since we have repeated end-knots, only the last 2 basis functions are\n # non-zero at the end-knots, and they have equal and opposite gradient.\n if (any(x_extrapolte_r) or any(x_extrapolte_l)) and spline_order>0:\n bases[~x_interpolate] = 0.\n\n denom = (aug_knots[spline_order:-1] - aug_knots[: -spline_order - 1])\n left = prev_bases[:, :-1] / denom\n\n denom = (aug_knots[spline_order+1:] - aug_knots[1: -spline_order])\n right = prev_bases[:, 1:] / denom\n\n grads = (spline_order) * (left - right)\n\n if any(x_extrapolte_l):\n val = grads[0] * x[x_extrapolte_l] + bases[-2]\n bases[x_extrapolte_l] = val\n if any(x_extrapolte_r):\n val = grads[1] * (x[x_extrapolte_r] - 1) + bases[-1]\n bases[x_extrapolte_r] = val\n # get rid of the added values at 0, and 1\n bases = bases[:-2]\n\n if sparse:\n return sp.sparse.csc_matrix(bases)\n\n return bases\n\n\ndef ylogydu(y, u):\n \"\"\"\n tool to give desired output for the limit as y -> 0, which is 0\n\n Parameters\n ----------\n y : array-like of len(n)\n u : array-like of len(n)\n\n Returns\n -------\n np.array len(n)\n \"\"\"\n mask = (np.atleast_1d(y)!=0.)\n out = np.zeros_like(u)\n out[mask] = y[mask] * np.log(y[mask] / u[mask])\n return out\n\n\ndef combine(*args):\n \"\"\"\n tool to perform tree search via recursion\n useful for developing the grid in a grid search\n\n Parameters\n ----------\n args : list of lists\n\n Returns\n -------\n list of all the combinations of the elements in the input lists\n \"\"\"\n if hasattr(args, '__iter__') and (len(args) > 1):\n subtree = combine(*args[:-1])\n tree = []\n for leaf in subtree:\n for node in args[-1]:\n if hasattr(leaf, '__iter__'):\n tree.append(leaf + [node])\n else:\n tree.append([leaf] + [node])\n return tree\n else:\n return [[arg] for arg in args[0]]\n\ndef isiterable(obj, reject_string=True):\n \"\"\"convenience tool to detect if something is iterable.\n in python3, strings count as iterables to we have the option to exclude them\n\n Parameters:\n -----------\n obj : object to analyse\n reject_string : bool, whether to ignore strings\n\n Returns:\n --------\n bool, if the object is itereable.\n \"\"\"\n\n iterable = hasattr(obj, '__len__')\n\n if reject_string:\n iterable = iterable and not isinstance(obj, str)\n\n return iterable\n\ndef check_iterable_depth(obj, max_depth=100):\n \"\"\"find the maximum depth of nesting of the iterable\n\n Parameters\n ----------\n obj : iterable\n max_depth : int, default: 100\n maximum depth beyond which we stop counting\n\n Returns\n -------\n int\n \"\"\"\n def find_iterables(obj):\n iterables = []\n for item in obj:\n if isiterable(item):\n iterables += list(item)\n return iterables\n\n depth = 0\n while (depth < max_depth) and isiterable(obj) and len(obj) > 0:\n depth += 1\n obj = find_iterables(obj)\n return depth\n\ndef flatten(iterable):\n \"\"\"convenience tool to flatten any nested iterable\n\n example:\n\n flatten([[[],[4]],[[[5,[6,7, []]]]]])\n >>> [4, 5, 6, 7]\n\n flatten('hello')\n >>> 'hello'\n\n Parameters\n ----------\n iterable\n\n Returns\n -------\n flattened object\n \"\"\"\n if isiterable(iterable):\n flat = []\n for item in list(iterable):\n item = flatten(item)\n if not isiterable(item):\n item = [item]\n flat += item\n return flat\n else:\n return iterable\n\n\ndef tensor_product(a, b, reshape=True):\n \"\"\"\n compute the tensor protuct of two matrices a and b\n\n if a is (n, m_a), b is (n, m_b),\n then the result is\n (n, m_a * m_b) if reshape = True.\n or\n (n, m_a, m_b) otherwise\n\n Parameters\n ---------\n a : array-like of shape (n, m_a)\n\n b : array-like of shape (n, m_b)\n\n reshape : bool, default True\n whether to reshape the result to be 2-dimensional ie\n (n, m_a * m_b)\n or return a 3-dimensional tensor ie\n (n, m_a, m_b)\n\n Returns\n -------\n dense np.ndarray of shape\n (n, m_a * m_b) if reshape = True.\n or\n (n, m_a, m_b) otherwise\n \"\"\"\n assert a.ndim == 2, 'matrix a must be 2-dimensional, but found {} dimensions'.format(a.ndim)\n assert b.ndim == 2, 'matrix b must be 2-dimensional, but found {} dimensions'.format(b.ndim)\n\n na, ma = a.shape\n nb, mb = b.shape\n\n if na != nb:\n raise ValueError('both arguments must have the same number of samples')\n\n if sp.sparse.issparse(a):\n a = a.A\n\n if sp.sparse.issparse(b):\n b = b.A\n\n tensor = a[..., :, None] * b[..., None, :]\n\n if reshape:\n return tensor.reshape(na, ma * mb)\n\n return tensor\n"
] | [
[
"scipy.stats.norm.ppf",
"numpy.ones_like",
"numpy.allclose",
"numpy.linspace",
"numpy.abs",
"numpy.isfinite",
"numpy.arange",
"numpy.random.randn"
],
[
"numpy.linspace",
"numpy.asarray",
"numpy.max",
"numpy.zeros_like",
"scipy.sparse.issparse",
"numpy.unique",
"numpy.arange",
"numpy.eye",
"numpy.finfo",
"numpy.atleast_1d",
"numpy.diff",
"numpy.ravel",
"scipy.sparse.csc_matrix",
"numpy.log",
"numpy.min",
"numpy.atleast_2d",
"numpy.array",
"numpy.isfinite",
"scipy.linalg.cholesky",
"numpy.shape",
"scipy.sparse.lil_matrix"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
NCBI-Hackathons/RNAseq_Cancer_Biomarkers | [
"4ad41888f6546f400a451633f964ed7999a05ad8"
] | [
"scripts/cm_work/model_feature_importance.py"
] | [
"from model_blender import important_gene_mask\nfrom sklearn.metrics import log_loss\nimport numpy as np\n\ndef gene_weight_finder(model, X_train, X_test, y_train, y_test):\n \"\"\"\n function that returns the most important features, weights and # of features\n\n inputs\n -------\n model: tree based model\n X_train:\n X_test:\n y_train:\n y_test\n\n outputs\n -------\n all_important: list of all genes with feature importance > 0\n \n top_20_feature_names: top 20 most important column names (gene)\n based on feature importance\n\n top_20_weights: weights of the top 20 columns\n\n num_feats: number of features that are not 0\n\n number_important: number of features with feature importance > 0\n\n log_loss: log loss score\n \"\"\"\n columns = X_train.columns\n model.fit(X_train, y_train)\n y_pred = model.predict_proba(X_test)\n ll = log_loss(y_test, y_pred)\n \n top_20_features = np.argsort(model.feature_importances_)[-20:][::-1]\n top_20_feature_names = columns[top_20_features]\n top_20_weights = model.feature_importances_[top_20_features]\n\n number_important = len(important_gene_mask(columns, model.feature_importances_))\n all_important = important_gene_mask(columns, model.feature_importances_)\n\n return all_important, top_20_feature_names, top_20_weights, number_important, ll\n\n\n\n\n"
] | [
[
"numpy.argsort",
"sklearn.metrics.log_loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Dragoncall/GPflowOpt | [
"f1c268e6b5dc4d7f458e06c59095901d55b73c32"
] | [
"gpflowopt/domain.py"
] | [
"# Copyright 2017 Joachim van der Herten\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom itertools import chain\nfrom gpflow.param import Parentable\n\nfrom .transforms import LinearTransform\n\n\nclass Domain(Parentable):\n \"\"\"\n A domain representing the mathematical space over which is optimized.\n \"\"\"\n\n def __init__(self, parameters):\n super(Domain, self).__init__()\n self._parameters = parameters\n\n @property\n def lower(self):\n \"\"\"\n Lower bound of the domain, corresponding to a numpy array with the lower value of each parameter\n \"\"\"\n return np.array(list(map(lambda param: param.lower, self._parameters))).flatten()\n\n @property\n def upper(self):\n \"\"\"\n Upper bound of the domain, corresponding to a numpy array with the upper value of each parameter\n \"\"\"\n return np.array(list(map(lambda param: param.upper, self._parameters))).flatten()\n\n def __add__(self, other):\n assert isinstance(other, Domain)\n return Domain(self._parameters + other._parameters)\n\n @property\n def size(self):\n \"\"\"\n Returns the dimensionality of the domain\n \"\"\"\n return sum(map(lambda param: param.size, self._parameters))\n\n def __setattr__(self, key, value):\n super(Domain, self).__setattr__(key, value)\n if key is not '_parent':\n if isinstance(value, Parentable):\n value._parent = self\n if isinstance(value, list):\n for val in (x for x in value if isinstance(x, Parentable)):\n val._parent = self\n\n def __eq__(self, other):\n return self._parameters == other._parameters\n\n def __contains__(self, X):\n X = np.atleast_2d(X)\n if X.shape[1] is not self.size:\n return False\n return np.all(np.logical_and(np.logical_or(self.lower < X, np.isclose(self.lower, X)),\n np.logical_or(X < self.upper, np.isclose(self.upper, X))))\n\n def __iter__(self):\n for v in chain(*map(iter, self._parameters)):\n yield v\n\n def __getitem__(self, items):\n if isinstance(items, list):\n return np.sum([self[item] for item in items])\n\n if isinstance(items, str):\n labels = [param.label for param in self._parameters]\n items = labels.index(items)\n\n return self._parameters[items]\n\n def __rshift__(self, other):\n assert(self.size == other.size)\n A = (other.upper - other.lower) / (self.upper - self.lower)\n b = -self.upper * A + other.upper\n return LinearTransform(A, b)\n\n @property\n def value(self):\n return np.vstack(map(lambda p: p.value, self._parameters)).T\n\n @value.setter\n def value(self, x):\n x = np.atleast_2d(x)\n assert (len(x.shape) == 2)\n assert (x.shape[1] == self.size)\n offset = 0\n for p in self._parameters:\n p.value = x[:, offset:offset + p.size]\n offset += p.size\n\n def _repr_html_(self):\n \"\"\"\n Build html string for table display in jupyter notebooks.\n \"\"\"\n html = [\"<table id='domain' width=100%>\"]\n\n # Table header\n columns = ['Name', 'Type', 'Values']\n header = \"<tr>\"\n header += ''.join(map(lambda l: \"<td>{0}</td>\".format(l), columns))\n header += \"</tr>\"\n html.append(header)\n\n # Add parameters\n html.append(self._html_table_rows())\n html.append(\"</table>\")\n\n return ''.join(html)\n\n def _html_table_rows(self):\n return ''.join(map(lambda l: l._html_table_rows(), self._parameters))\n\n\nclass Parameter(Domain):\n \"\"\"\n Abstract class representing a parameter (which corresponds to a one-dimensional domain)\n This class can be derived for continuous, discrete and categorical parameters\n \"\"\"\n\n def __init__(self, label, xinit):\n super(Parameter, self).__init__([self])\n self.label = label\n self._x = np.atleast_1d(xinit)\n\n @Domain.size.getter\n def size(self):\n \"\"\"\n One parameter has a dimensionality of 1\n :return: 1\n \"\"\"\n return 1\n\n def __iter__(self):\n yield self\n\n @Domain.value.getter\n def value(self):\n return self._x\n\n @value.setter\n def value(self, x):\n x = np.atleast_1d(x)\n self._x = x.ravel()\n\n def _html_table_rows(self):\n \"\"\"\n Html row representation of a Parameter. Should be overwritten in subclasses objects.\n \"\"\"\n return \"<tr><td>{0}</td><td>{1}</td><td>{2}</td></tr>\".format(self.label, 'N/A', 'N/A')\n\n\nclass ContinuousParameter(Parameter):\n def __init__(self, label, lb, ub, xinit=None):\n self._range = np.array([lb, ub], dtype=float)\n super(ContinuousParameter, self).__init__(label, xinit or ((ub + lb) / 2.0))\n\n @Parameter.lower.getter\n def lower(self):\n return np.array([self._range[0]])\n\n @Parameter.upper.getter\n def upper(self):\n return np.array([self._range[1]])\n\n @lower.setter\n def lower(self, value):\n self._range[0] = value\n\n @upper.setter\n def upper(self, value):\n self._range[1] = value\n\n def __eq__(self, other):\n return isinstance(other, ContinuousParameter) and self.lower == other.lower and self.upper == other.upper\n\n def _html_table_rows(self):\n \"\"\"\n Html row representation of a ContinuousParameter.\n \"\"\"\n return \"<tr><td>{0}</td><td>{1}</td><td>{2}</td></tr>\".format(self.label, 'Continuous', str(self._range))\n\n\nclass UnitCube(Domain):\n \"\"\"\n The unit domain [0, 1]^d\n \"\"\"\n def __init__(self, n_inputs):\n params = [ContinuousParameter('u{0}'.format(i), 0, 1) for i in np.arange(n_inputs)]\n super(UnitCube, self).__init__(params)\n"
] | [
[
"numpy.arange",
"numpy.atleast_1d",
"numpy.atleast_2d",
"numpy.array",
"numpy.sum",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AprilXiaoyanLiu/whitenoise-system | [
"0e94d2cc8114b97a61d5d2e45278428f91f1e687",
"0e94d2cc8114b97a61d5d2e45278428f91f1e687",
"0e94d2cc8114b97a61d5d2e45278428f91f1e687"
] | [
"synth/snsynth/pytorch/nn/privacy_utils.py",
"sql/tests/query/test_having.py",
"synth/snsynth/mwem.py"
] | [
"import torch\nimport torch.nn as nn\nimport math\nimport numpy as np\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\n\n\ndef pate(data, teachers, lap_scale, device=\"cpu\"):\n \"\"\"PATE implementation for GANs.\n \"\"\"\n num_teachers = len(teachers)\n labels = torch.Tensor(num_teachers, data.shape[0]).type(torch.int64).to(device)\n for i in range(num_teachers):\n output = teachers[i](data)\n pred = (output > 0.5).type(torch.Tensor).squeeze().to(device)\n # print(pred.shape)\n # print(labels[i].shape)\n labels[i] = pred\n\n votes = torch.sum(labels, dim=0).unsqueeze(1).type(torch.DoubleTensor).to(device)\n noise = torch.from_numpy(np.random.laplace(loc=0, scale=1 / lap_scale, size=votes.size())).to(\n device\n )\n noisy_votes = votes + noise\n noisy_labels = (noisy_votes > num_teachers / 2).type(torch.DoubleTensor).to(device)\n\n return noisy_labels, votes\n\n\ndef moments_acc(num_teachers, votes, lap_scale, l_list, device=\"cpu\"):\n q = (2 + lap_scale * torch.abs(2 * votes - num_teachers)) / (\n 4 * torch.exp(lap_scale * torch.abs(2 * votes - num_teachers))\n ).to(device)\n\n alpha = []\n for l_val in l_list:\n a = 2 * lap_scale ** 2 * l_val * (l_val + 1)\n t_one = (1 - q) * torch.pow((1 - q) / (1 - math.exp(2 * lap_scale) * q), l_val)\n t_two = q * torch.exp(2 * lap_scale * l_val)\n t = t_one + t_two\n alpha.append(torch.clamp(t, max=a).sum())\n\n return torch.DoubleTensor(alpha).to(device)\n",
"import os\nimport subprocess\nimport copy\nfrom snsql.sql.privacy import Privacy\nimport pytest\nimport numpy as np\n\nimport pandas as pd\nfrom pandasql import sqldf\nimport math\n\nfrom snsql.metadata import Metadata\nfrom snsql.sql import PrivateReader\nfrom snsql.sql.reader.pandas import PandasReader\n\ngit_root_dir = subprocess.check_output(\"git rev-parse --show-toplevel\".split(\" \")).decode(\"utf-8\").strip()\n\nmeta_path = os.path.join(git_root_dir, os.path.join(\"datasets\", \"PUMS.yaml\"))\ncsv_path = os.path.join(git_root_dir, os.path.join(\"datasets\", \"PUMS.csv\"))\n\nmeta = Metadata.from_file(meta_path)\nmeta[\"PUMS.PUMS\"].censor_dims = False\n\npums_schema_path = os.path.join(\"datasets\", \"PUMS.yaml\")\n\n\nclass TestBaseTypes:\n def setup_class(cls):\n meta = Metadata.from_file(meta_path)\n meta[\"PUMS.PUMS\"].censor_dims = False\n df = pd.read_csv(csv_path)\n reader = PandasReader(df, meta)\n private_reader = PrivateReader(reader, meta, privacy=Privacy(epsilon=3.0, delta=10e-3))\n cls.reader = private_reader\n\n def test_queries(self, test_databases):\n query = \"SELECT age, sex, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, sex HAVING income > 100000\"\n privacy = Privacy(10.0, 0.1)\n readers = test_databases.get_private_readers(privacy=privacy, database='PUMS', overrides={'censor_dims': False})\n for reader in readers:\n res = [len(test_databases.to_tuples(reader.execute(query))) for i in range(5)]\n assert np.mean(res) < 115 and np.mean(res) > 10 # actual is 14, but noise is huge\n\n query = \"SELECT age, sex, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, sex HAVING sex = 1\"\n res = self.reader.execute(query)\n assert len(res) == 74\n\n query = \"SELECT age, sex, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, sex HAVING income > 100000 OR sex = 1\"\n res = self.reader.execute(query)\n assert len(res) > 80 and len(res) < 150\n\n query = \"SELECT age, COUNT(*) FROM PUMS.PUMS GROUP BY age HAVING age < 30 OR age > 60\"\n res = self.reader.execute(query)\n assert len(res) == 43\n\n # # this one is indeterminate behavior based on engine, but works on PrivateReader\n # query = \"SELECT age * 1000 as age, COUNT(*) FROM PUMS.PUMS GROUP BY age HAVING age < 30000 OR age > 60000\"\n # res = self.reader.execute(query)\n # assert len(res) == 43\n\n query = \"SELECT age as age, COUNT(*) FROM PUMS.PUMS GROUP BY age HAVING age * 1000 < 30000 OR age * 2 > 120\"\n res = self.reader.execute(query)\n assert len(res) == 43\n\n query = \"SELECT age, COUNT(*) AS n FROM PUMS.PUMS GROUP BY age HAVING (age < 30 OR age > 60) AND n > 10\"\n res = self.reader.execute(query)\n assert len(res) < 25 # [len is 16 for non-private]\n\n query = \"SELECT age, COUNT(*) * 1000 AS n FROM PUMS.PUMS GROUP BY age HAVING (age < 30 OR age > 60) AND n > 10000\"\n res = self.reader.execute(query)\n assert len(res) < 25 #[len is 16 for non-private]\n\n query = \"SELECT age, COUNT(*) AS n FROM PUMS.PUMS GROUP BY age HAVING (age < 30 OR age > 60) AND n * 100 / 2 > 500\"\n res = self.reader.execute(query)\n assert len(res) < 25 #[len is 16 for non-private]\n\nclass TestOtherTypes:\n def setup_class(self):\n meta = Metadata.from_file(meta_path)\n meta[\"PUMS.PUMS\"].censor_dims = False\n meta[\"PUMS.PUMS\"][\"sex\"].type = \"int\"\n meta[\"PUMS.PUMS\"][\"educ\"].type = \"int\"\n meta[\"PUMS.PUMS\"][\"married\"].type = \"bool\"\n df = pd.read_csv(csv_path)\n reader = PandasReader(df, meta)\n private_reader = PrivateReader(reader, meta, privacy=Privacy(epsilon=10.0, delta=10e-3))\n self.reader = private_reader\n\n def test_queries(self):\n query = \"SELECT age, sex, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, sex HAVING income > 100000\"\n res = [len(self.reader.execute(query)) for i in range(5)]\n assert np.mean(res) < 115 and np.mean(res) > 10 # actual is 14, but noise is huge\n\n query = \"SELECT age, sex, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, sex HAVING sex = 1\"\n res = self.reader.execute(query)\n assert len(res) == 74\n\n query = \"SELECT age, sex, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, sex HAVING income > 100000 OR sex = 1\"\n res = self.reader.execute(query)\n assert len(res) > 80 and len(res) < 150\n\n query = \"SELECT age, COUNT(*) FROM PUMS.PUMS GROUP BY age HAVING age < 30 OR age > 60\"\n res = self.reader.execute(query)\n assert len(res) == 43\n\n # # this one is indeterminate behavior based on engine, works with PrivateReader\n # query = \"SELECT age * 1000 as age, COUNT(*) FROM PUMS.PUMS GROUP BY age HAVING age < 30000 OR age > 60000\"\n # res = self.reader.execute(query)\n # assert len(res) == 43\n\n query = \"SELECT age as age, COUNT(*) FROM PUMS.PUMS GROUP BY age HAVING age * 1000 < 30000 OR age * 2 > 120\"\n res = self.reader.execute(query)\n assert len(res) == 43\n\n query = \"SELECT age, COUNT(*) AS n FROM PUMS.PUMS GROUP BY age HAVING (age < 30 OR age > 60) AND n > 10\"\n res = self.reader.execute(query)\n assert len(res) < 25 #[len is 16 for non-private]\n\n query = \"SELECT age, COUNT(*) * 1000 AS n FROM PUMS.PUMS GROUP BY age HAVING (age < 30 OR age > 60) AND n > 10000\"\n res = self.reader.execute(query)\n assert len(res) < 25 #[len is 16 for non-private]\n\n query = \"SELECT age, COUNT(*) AS n FROM PUMS.PUMS GROUP BY age HAVING (age < 30 OR age > 60) AND n * 100 / 2 > 500\"\n res = self.reader.execute(query)\n assert len(res) < 25 # [len is 16 for non-private]\n\n query = \"SELECT age, married, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, married HAVING married = 1\"\n res = self.reader.execute(query)\n assert len(res) == 72\n\n query = \"SELECT age, married, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, married HAVING n > 10 OR married = 1\"\n res = self.reader.execute(query)\n assert len(res) > 75\n\n query = \"SELECT age, married, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, married HAVING married\"\n res = self.reader.execute(query)\n assert len(res) == 72\n\n query = \"SELECT age, married, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, married HAVING n > 10 OR married\"\n res = self.reader.execute(query)\n assert len(res) > 75\n\n query = \"SELECT age, married, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, married HAVING married = 0\"\n res = self.reader.execute(query)\n assert len(res) == 73\n\n query = \"SELECT age, married, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, married HAVING n > 10 OR married = 0\"\n res = self.reader.execute(query)\n assert len(res) > 75\n\n query = \"SELECT age, married, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, married HAVING NOT married\"\n res = self.reader.execute(query)\n assert len(res) == 73\n\n query = \"SELECT age, married, COUNT(*) AS n, SUM(income) AS income FROM PUMS.PUMS GROUP BY age, married HAVING n > 10 OR NOT married\"\n res = self.reader.execute(query)\n assert len(res) > 75\n",
"import math\nimport random\nimport warnings\n\nfrom functools import wraps\n\nimport numpy as np\nimport pandas as pd\n\nfrom snsynth.base import SDGYMBaseSynthesizer\n\n\nclass MWEMSynthesizer(SDGYMBaseSynthesizer):\n\n def __init__(\n self,\n epsilon=3.0,\n q_count=400,\n iterations=30,\n mult_weights_iterations=20,\n splits=[],\n split_factor=None,\n max_bin_count=500,\n custom_bin_count={},\n max_retries_exp_mechanism=1000\n ):\n \"\"\"\n N-Dimensional numpy implementation of MWEM.\n (http://users.cms.caltech.edu/~katrina/papers/mwem-nips.pdf)\n\n From the paper:\n \"[MWEM is] a broadly applicable, simple, and easy-to-implement\n algorithm, capable of substantially improving the performance of\n linear queries on many realistic datasets...\n (circa 2012)...MWEM matches the best known and nearly\n optimal theoretical accuracy guarantees for differentially private\n data analysis with linear queries.\"\n\n Linear queries used for sampling in this implementation are\n random contiguous slices of the n-dimensional numpy array.\n\n :param q_count: Number of random queries in the pool to generate.\n Must be more than # of iterations, recommended ~10-15x iterations,\n defaults to 400\n :type q_count: int, optional\n :param epsilon: Privacy epsilon for DP, defaults to 3.0\n :type epsilon: float, optional\n :param iterations: Number of iterations of MWEM, defaults to 30\n :type iterations: int, optional\n :param mult_weights_iterations: Number of iterations of MW, per\n iteration of MWEM, defaults to 20\n :type mult_weights_iterations: int, optional\n :param splits: Allows you to specify feature dependence when creating\n internal histograms.\n Columns that are known to be dependent can be kept together.\n Example: splits=[[0,1],[2,3]] where\n columns 0 and 1 are dependent, columns 2 and 3 are dependent,\n and between groupings there is independence, defaults to []\n :type splits: list, optional\n :param split_factor: If splits not specified, can instead subdivide\n pseudo-randomly. For example, split_factor=3\n will make groupings of features of size 3 for the histograms.\n Note: this will likely make synthetic data worse.\n defaults to None\n :type split_factor: int, optional\n :param max_bin_count: MWEM is not good at continuous features, and\n is not purpose built for the feature. We can, however,\n fudge it by turning a continuous feature into a discrete feature with\n artificial binning. This is the maximum number\n of bins that MWEM will create. More bins leads to a huge slow down in\n MWEM due to dimensionality exploding the histogram\n size. Note, defaults to 500\n :type max_bin_count: int, optional\n :param custom_bin_count: If you have a specific bin assignment for\n continuous features (i.e. column 3 -> 20 bins), specify it with\n a dict here, defaults to {}\n :type custom_bin_count: dict, optional\n \"\"\"\n self.epsilon = epsilon\n self.q_count = q_count\n self.iterations = iterations\n self.mult_weights_iterations = mult_weights_iterations\n self.synthetic_data = None\n self.data_bins = None\n self.real_data = None\n self.splits = splits\n self.split_factor = split_factor\n self.max_bin_count = max_bin_count\n self.mins_maxes = {}\n self.scale = {}\n self.custom_bin_count = custom_bin_count\n\n # Pandas check\n self.pandas = False\n self.pd_cols = None\n self.pd_index = None\n\n # Query trackers\n self.q_values = None\n self.max_retries_exp_mechanism = max_retries_exp_mechanism\n\n @wraps(SDGYMBaseSynthesizer.fit)\n def fit(self, data, categorical_columns=None, ordinal_columns=None):\n \"\"\"\n Follows sdgym schema to be compatible with their benchmark system.\n\n :param data: Dataset to use as basis for synthetic data\n :type data: np.ndarray\n :return: synthetic data, real data histograms\n :rtype: np.ndarray\n \"\"\"\n if isinstance(data, np.ndarray):\n self.data = data.copy()\n elif isinstance(data, pd.DataFrame):\n self.pandas = True\n for col in data.columns:\n data[col] = pd.to_numeric(data[col], errors=\"ignore\")\n self.data = data.to_numpy().copy()\n self.pd_cols = data.columns\n self.pd_index = data.index\n else:\n raise ValueError(\"Data must be a numpy array or pandas dataframe.\")\n if self.split_factor is not None and self.splits == []:\n self.splits = self._generate_splits(data.T.shape[0], self.split_factor)\n elif self.split_factor is None and self.splits == []:\n # Set split factor to default to shape[1]\n self.split_factor = data.shape[1]\n warnings.warn(\n \"Unset split_factor and splits, defaulting to include all columns \"\n + \"- this can lead to slow performance or out of memory error. \"\n + \" split_factor: \" + str(self.split_factor),\n Warning,\n )\n self.splits = self._generate_splits(data.T.shape[0], self.split_factor)\n\n self.splits = np.array(self.splits)\n if self.splits.size == 0:\n self.histograms = self._histogram_from_data_attributes(\n self.data, [np.arange(self.data.shape[1])]\n )\n else:\n self.histograms = self._histogram_from_data_attributes(self.data, self.splits)\n self.q_values = []\n for h in self.histograms:\n # h[1] is dimensions for each histogram\n self.q_values.append(self._compose_arbitrary_slices(self.q_count, h[1]))\n # Run the algorithm\n self.synthetic_histograms = self.mwem()\n\n @wraps(SDGYMBaseSynthesizer.sample)\n def sample(self, samples):\n \"\"\"\n Creates samples from the histogram data.\n Follows sdgym schema to be compatible with their benchmark system.\n NOTE: We are sampleing from each split dimensional\n group as though they are *independent* from one another.\n We have essentially created len(splits) DP histograms as\n if they are separate databases, and combine the results into\n a single sample.\n\n :param samples: Number of samples to generate\n :type samples: int\n :return: N samples\n :rtype: list(np.ndarray)\n \"\"\"\n synthesized_columns = ()\n first = True\n for fake, _, split in self.synthetic_histograms:\n s = []\n fake_indices = np.arange(len(np.ravel(fake)))\n fake_distribution = np.ravel(fake)\n norm = np.sum(fake)\n for _ in range(samples):\n s.append(np.random.choice(fake_indices, p=(fake_distribution / norm)))\n s_unraveled = []\n for ind in s:\n s_unraveled.append(np.unravel_index(ind, fake.shape))\n # Here we make scale adjustments to match the original\n # data\n np_unraveled = np.array(s_unraveled)\n for i in range(np_unraveled.shape[-1]):\n min_c, max_c = self.mins_maxes[str(split[i])]\n # TODO: Deal with the 0 edge case when scaling\n # i.e. scale factor * 0th bin is 0,\n # but should still scale appropriately\n np_unraveled[:, i] = np_unraveled[:, i] * self.scale[str(split[i])]\n np_unraveled[:, i] = np_unraveled[:, i] + min_c\n if first:\n synthesized_columns = np_unraveled\n first = False\n else:\n synthesized_columns = np.hstack((synthesized_columns, np_unraveled))\n # Recombine the independent distributions into a single dataset\n combined = synthesized_columns\n # Reorder the columns to mirror their original order\n r = self._reorder(self.splits)\n if self.pandas:\n df = pd.DataFrame(combined[:, r], columns=self.pd_cols)\n return df\n else:\n return combined[:, r]\n\n def mwem(self):\n \"\"\"\n Runner for the mwem algorithm.\n Initializes the synthetic histogram, and updates it\n for self.iterations using the exponential mechanism and\n multiplicative weights. Draws from the initialized query store\n for measurements.\n\n :return: synth_hist, self.histogram - synth_hist is the\n synthetic data histogram, self.histogram is original histo\n :rtype: np.ndarray, np.ndarray\n \"\"\"\n a_values = []\n for i, h in enumerate(self.histograms):\n hist = h[0]\n dimensions = h[1]\n split = h[3]\n queries = self.q_values[i]\n synth_hist = self._initialize_a(hist, dimensions)\n measurements = {}\n # NOTE: Here we perform a privacy check,\n # because if the histogram dimensions are\n # greater than the iterations, this can be\n # a big privacy risk (the sample queries will\n # otherwise be able to match the actual\n # distribution)\n # This usually occurs with a split factor of 1,\n # so that each attribute is independent of the other\n flat_dim = 1\n for j in dimensions:\n flat_dim *= j\n if 2 * flat_dim <= self.iterations:\n warnings.warn(\n \"Flattened dimensionality of synthetic histogram is less than\"\n + \" the number of iterations. This is a privacy risk.\"\n + \" Consider increasing your split_factor (especially if it is 1), \"\n + \"or decreasing the number of iterations. \"\n + \"Dim: \" + str(flat_dim) + \" Split: \" + str(split),\n Warning,\n )\n\n for i in range(self.iterations):\n # print(\"Iteration: \" + str(i))\n qi = self._exponential_mechanism(\n hist, synth_hist, queries, ((self.epsilon / (2 * self.iterations)) / len(self.histograms))\n )\n # Make sure we get a different query to measure:\n count_retries = 0\n while qi in measurements:\n if count_retries > self.max_retries_exp_mechanism:\n raise ValueError(\n \"Did not find a different query to measure via exponential mechanism. Try \"\n + \"decreasing the number of iterations or increasing the number of allowed \"\n + \"retries.\")\n\n qi = self._exponential_mechanism(\n hist, synth_hist, queries, ((self.epsilon / (2 * self.iterations)) / len(self.histograms))\n )\n count_retries += 1\n\n # NOTE: Add laplace noise here with budget\n evals = self._evaluate(queries[qi], hist)\n lap = self._laplace(\n (2 * self.iterations * len(self.histograms)) / (self.epsilon * len(dimensions))\n )\n measurements[qi] = evals + lap\n # Improve approximation with Multiplicative Weights\n synth_hist = self._multiplicative_weights(\n synth_hist, queries, measurements, hist, self.mult_weights_iterations\n )\n a_values.append((synth_hist, hist, split))\n return a_values\n\n def _initialize_a(self, histogram, dimensions):\n \"\"\"\n Initializes a uniform distribution histogram from\n the given histogram with dimensions\n\n :param histogram: Reference histogram\n :type histogram: np.ndarray\n :param dimensions: Reference dimensions\n :type dimensions: np.ndarray\n :return: New histogram, uniformly distributed according to\n reference histogram\n :rtype: np.ndarray\n \"\"\"\n # NOTE: Could actually use a distribution from real data with some budget,\n # as opposed to using this uniform dist (would take epsilon as argument,\n # and detract from it)\n n = np.sum(histogram)\n value = n / np.prod(dimensions)\n synth_hist = np.zeros_like(histogram)\n synth_hist += value\n return synth_hist\n\n def _histogram_from_data_attributes(self, data, splits=[]):\n \"\"\"\n Create a histogram from given data\n\n :param data: Reference histogram\n :type data: np.ndarray\n :return: Histogram over given data, dimensions,\n bins created (output of np.histogramdd)\n :rtype: np.ndarray, np.shape, np.ndarray\n \"\"\"\n histograms = []\n for split in splits:\n split_data = data[:, split]\n mins_data = []\n maxs_data = []\n dims_sizes = []\n # Transpose for column wise iteration\n for i, column in enumerate(split_data.T):\n min_c = min(column)\n max_c = max(column)\n # TODO: Make these noisy min/max\n mins_data.append(min_c)\n maxs_data.append(max_c)\n # Dimension size (number of bins)\n bin_count = int(max_c - min_c + 1)\n # Here we track the min and max for the column,\n # for sampling\n self.mins_maxes[str(split[i])] = (min_c, max_c)\n if bin_count > self.max_bin_count:\n # Note the limitations of MWEM here, specifically in the case of continuous data.\n warnings.warn(\n \"Bin count \"\n + str(bin_count)\n + \" in column: \"\n + str(split[i])\n + \" exceeds max_bin_count, defaulting to: \"\n + str(self.max_bin_count)\n + \". Is this a continuous variable?\",\n Warning,\n )\n bin_count = self.max_bin_count\n # We track a scaling factor per column, for sampling\n self.scale[str(split[i])] = (max_c - min_c + 1) / self.max_bin_count\n else:\n self.scale[str(split[i])] = 1\n if str(split[i]) in self.custom_bin_count:\n bin_count = int(self.custom_bin_count[str(split[i])])\n self.scale[str(split[i])] = 1\n dims_sizes.append(bin_count)\n # Produce an N,D dimensional histogram, where\n # we pre-specify the bin sizes to correspond with\n # our ranges above\n histogram, bins = np.histogramdd(split_data, bins=dims_sizes)\n # Return histogram, dimensions\n histograms.append((histogram, dims_sizes, bins, split))\n return histograms\n\n def _exponential_mechanism(self, hist, synth_hist, queries, eps):\n \"\"\"\n Refer to paper for in depth description of\n Exponential Mechanism.\n Parametrized with epsilon value epsilon/(2 * iterations)\n\n :param hist: Basis histogram\n :type hist: np.ndarray\n :param synth_hist: Synthetic histogram\n :type synth_hist: np.ndarray\n :param queries: Queries to draw from\n :type queries: list\n :param eps: Budget\n :type eps: float\n :return: # of errors\n :rtype: int\n \"\"\"\n errors = [\n abs(self._evaluate(queries[i], hist) - self._evaluate(queries[i], synth_hist)) * (eps / 2.0)\n for i in range(len(queries))\n ]\n maxi = max(errors)\n errors = [math.exp(errors[i] - maxi) for i in range(len(errors))]\n r = random.random()\n e_s = sum(errors)\n c = 0\n for i in range(len(errors)):\n c += errors[i]\n if c > r * e_s:\n return i\n return len(errors) - 1\n\n def _multiplicative_weights(self, synth_hist, queries, m, hist, iterate):\n \"\"\"\n Multiplicative weights update algorithm,\n used to boost the synthetic data accuracy given measurements m.\n Run for iterate times\n\n :param synth_hist: Synthetic histogram\n :type synth_hist: np.ndarray\n :param queries: Queries to draw from\n :type queries: list\n :param m: Measurements taken from real data for each qi query\n :type m: dict\n :param hist: Basis histogram\n :type hist: np.ndarray\n :param iterate: Number of iterations to run mult weights\n :type iterate: iterate\n :return: synth_hist\n :rtype: np.ndarray\n \"\"\"\n sum_a = np.sum(synth_hist)\n for _ in range(iterate):\n for qi in m:\n error = m[qi] - self._evaluate(queries[qi], synth_hist)\n # Perform the weights update\n query_update = self._binary_replace_in_place_slice(\n np.zeros_like(synth_hist.copy()), queries[qi])\n\n # Apply the update\n a_multiplier = np.exp(query_update * error / (2.0 * sum_a))\n a_multiplier[a_multiplier == 0.0] = 1.0\n synth_hist = synth_hist * a_multiplier\n # Normalize again\n count_a = np.sum(synth_hist)\n synth_hist = synth_hist * (sum_a / count_a)\n return synth_hist\n\n def _compose_arbitrary_slices(self, num_s, dimensions):\n \"\"\"\n Here, dimensions is the shape of the histogram\n We want to return a list of length num_s, containing\n random slice objects, given the dimensions\n These are our linear queries\n\n :param num_s: Number of queries (slices) to generate\n :type num_s: int\n :param dimensions: Dimensions of histogram to be sliced\n :type dimensions: np.shape\n :return: Collection of random np.s_ (linear queries) for\n a dataset with dimensions\n :rtype: list\n \"\"\"\n slices_list = []\n # TODO: For analysis, generate a distribution of slice sizes,\n # by running the list of slices on a dimensional array\n # and plotting the bucket size\n slices_list = []\n for _ in range(num_s):\n inds = []\n for _, s in np.ndenumerate(dimensions):\n # Random linear sample, within dimensions\n a = np.random.randint(s)\n b = np.random.randint(s)\n l_b = min(a, b)\n u_b = max(a, b) + 1\n pre = []\n pre.append(l_b)\n pre.append(u_b)\n inds.append(pre)\n # Compose slices\n sl = []\n for ind in inds:\n sl.append(np.s_[ind[0]: ind[1]])\n slices_list.append(sl)\n return slices_list\n\n def _evaluate(self, a_slice, data):\n \"\"\"\n Evaluate a count query i.e. an arbitrary slice\n\n :param a_slice: Random slice within bounds of flattened data length\n :type a_slice: np.s_\n :param data: Data to evaluate from (synthetic dset)\n :type data: np.ndarray\n :return: Count from data within slice\n :rtype: float\n \"\"\"\n # We want to count the number of objects in an\n # arbitrary slice of our collection\n # We use np.s_[arbitrary slice] as our queries\n e = data.T[tuple(a_slice)]\n\n if isinstance(e, np.ndarray):\n return np.sum(e)\n else:\n return e\n\n def _binary_replace_in_place_slice(self, data, a_slice):\n \"\"\"\n We want to create a binary copy of the data,\n so that we can easily perform our error multiplication\n in MW. Convenience function.\n\n :param data: Data\n :type data: np.ndarray\n :param a_slice: Slice\n :type a_slice: np.s_\n :return: Return data, where the range specified\n by a_slice is all 1s.\n :rtype: np.ndarray\n \"\"\"\n view = data.copy()\n view.T[tuple(a_slice)] = 1.0\n return view\n\n def _reorder(self, splits):\n \"\"\"\n Given an array of dimensionality splits (column indices)\n returns the corresponding reorder array (indices to return\n columns to original order)\n Example:\n original = [[1, 2, 3, 4, 5, 6],\n [ 6, 7, 8, 9, 10, 11]]\n\n splits = [[1,3,4],[0,2,5]]\n\n mod_data = [[2 4 5 1 3 6]\n [ 7 9 10 6 8 11]]\n\n reorder = [3 0 4 1 2 5]\n\n :param splits: 2d list with splits (column indices)\n :type splits: array of arrays\n :return: 2d list with splits (column indices)\n :rtype: array of arrays\n \"\"\"\n flat = np.concatenate(np.asarray(splits)).ravel()\n reordered = np.zeros(len(flat))\n for i, ind in enumerate(flat):\n reordered[ind] = i\n return reordered.astype(int)\n\n def _generate_splits(self, n_dim, factor):\n \"\"\"\n If user specifies, do the work and figure out how to divide the dimensions\n into even splits to speed up MWEM\n Last split will contain leftovers <= sizeof(factor)\n\n :param n_dim: Total # of dimensions\n :type n_dim: int\n :param factor: Desired size of the splits\n :type factor: int\n :return: Splits\n :rtype: np.array(np.array(),...)\n \"\"\"\n # Columns indices\n indices = np.arange(n_dim)\n\n # Split intelligently\n fits = int((np.floor(len(indices) / factor)) * factor)\n even_inds = indices[:fits].copy().reshape((int(len(indices) / factor), factor))\n s1 = even_inds.tolist()\n if indices[fits:].size != 0:\n s1.append(indices[fits:])\n s2 = [np.array(l_val) for l_val in s1]\n return np.array(s2)\n\n def _laplace(self, sigma):\n \"\"\"\n Laplace mechanism\n\n :param sigma: Laplace scale param sigma\n :type sigma: float\n :return: Random value from laplace distribution [-1,1]\n :rtype: float\n \"\"\"\n return sigma * np.log(random.random()) * np.random.choice([-1, 1])\n"
] | [
[
"torch.abs",
"torch.Tensor",
"torch.sum",
"torch.exp",
"torch.nn.init.xavier_uniform_",
"torch.clamp",
"torch.DoubleTensor"
],
[
"pandas.read_csv",
"numpy.mean"
],
[
"numpy.hstack",
"numpy.unravel_index",
"numpy.random.choice",
"numpy.asarray",
"numpy.arange",
"numpy.ndenumerate",
"pandas.DataFrame",
"numpy.histogramdd",
"numpy.zeros_like",
"numpy.prod",
"numpy.exp",
"numpy.ravel",
"numpy.array",
"pandas.to_numeric",
"numpy.sum",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
DewanshuHaswani/fastai | [
"fa3aed62e9f7b842d335a92aa20fa7e1b2a7b266"
] | [
"mnist_pytorch/previewer.py"
] | [
"import torch\nimport matplotlib.pyplot as plt\nfrom torchvision import datasets, transforms\nfrom random import choice\n\nBATCH_SIZE=64\n\n# Load the mnist dataset\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n \"./data\", \n train=True,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n # transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=BATCH_SIZE\n )\n\ntrain_data = train_loader.dataset.train_data\n\nchar = choice(train_data)\n\nprint(char)\n\nplt.imshow(char.numpy())\nplt.show()\n\n"
] | [
[
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kaniblu/vhda | [
"35941097ef552568c29f66cc55d8ce1927f34978",
"35941097ef552568c29f66cc55d8ce1927f34978"
] | [
"loopers/inferencer/evaluator.py",
"embeds/fasttext.py"
] | [
"__all__ = [\"EvaluatingInferencer\"]\n\nfrom dataclasses import dataclass\nfrom typing import Sequence\n\nimport torch\nimport torch.utils.data as td\n\nimport utils\nfrom datasets import BatchData\nfrom .inferencer import Inferencer\nfrom evaluators import FinegrainedEvaluator\n\n\n@dataclass\nclass EvaluatingInferencer(Inferencer):\n evaluators: Sequence[FinegrainedEvaluator] = tuple()\n _requires_lexical_form: bool = utils.private_field(default=False)\n\n def __post_init__(self):\n super().__post_init__()\n self._requires_lexical_form = any(e.requires_lexical_form\n for e in self.evaluators)\n\n def on_run_started(self, dataloader: td.DataLoader) -> td.DataLoader:\n dataloader = super().on_run_started(dataloader)\n for evaluator in self.evaluators:\n evaluator.reset()\n return dataloader\n\n def on_batch_ended(self, batch: BatchData, pred: BatchData, outputs\n ) -> utils.TensorMap:\n stats = dict(super().on_batch_ended(batch, pred, outputs))\n batch_lex, pred_lex = None, None\n if self._requires_lexical_form:\n batch_lex = list(map(self.processor.lexicalize_global, batch))\n pred_lex = list(map(self.processor.lexicalize_global, pred))\n with torch.no_grad():\n for evaluator in self.evaluators:\n if evaluator.requires_lexical_form:\n eval_stats = evaluator.update(batch_lex, pred_lex, outputs)\n else:\n eval_stats = evaluator.update(batch, pred, outputs)\n stats.update(eval_stats or dict())\n return stats\n\n def on_run_ended(self, stats: utils.TensorMap) -> utils.TensorMap:\n stats = dict(super().on_run_ended(stats))\n with torch.no_grad():\n for evaluator in self.evaluators:\n stats.update(evaluator.get() or dict())\n return stats\n",
"__all__ = [\"FastText\", \"FastTextEmbeddings\"]\n\nimport subprocess\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport numpy as np\n\nfrom utils import UnsupportedOperationsError\nfrom .embeddings import Embeddings\n\n\n@dataclass\nclass FastText:\n ft_path: str\n model_path: str\n dtype: type = np.float32\n process: Optional[subprocess.Popen] = field(\n init=False, hash=False, compare=False, repr=False,\n default=None\n )\n\n def query(self, word):\n self.process.stdin.write(f\"{word}\\n\".encode())\n self.process.stdin.flush()\n line = self.process.stdout.readline().decode()\n line = \" \".join(line.split()[1:])\n return np.fromstring(line, dtype=self.dtype, sep=\" \")\n\n def __enter__(self):\n self.process = subprocess.Popen(\n args=[self.ft_path, \"print-word-vectors\", self.model_path],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.process is not None:\n self.process.kill()\n self.process = None\n\n\n@dataclass\nclass FastTextEmbeddings(Embeddings):\n ft: FastText\n _dim: int = field(\n init=False, compare=False, hash=False, repr=False,\n default=None\n )\n\n @property\n def dim(self) -> int:\n if self._dim is None:\n self._dim = len(self.ft.query(\".\"))\n return self._dim\n\n def preload(self):\n self.ft.__enter__()\n return self\n\n def __getitem__(self, item) -> np.ndarray:\n return self.ft.query(item)\n\n def __contains__(self, item):\n return True\n\n def __iter__(self):\n raise UnsupportedOperationsError\n"
] | [
[
"torch.no_grad"
],
[
"numpy.fromstring"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shamim-hussain/egt | [
"02187de16fcd672b8070191d29e9c9e7f681eb37",
"02187de16fcd672b8070191d29e9c9e7f681eb37",
"02187de16fcd672b8070191d29e9c9e7f681eb37"
] | [
"lib/base/xformer_layers/attention.py",
"lib/training/schemes/tsp/svd.py",
"lib/training/training_base.py"
] | [
"\nimport tensorflow as tf\ntfk = tf.keras\nfrom .shaping import move_dim\n\n\n\ndef move_ch2h(maybe_headed_tensor,\n channels_dim=-1, head_dim=1):\n if maybe_headed_tensor.shape.rank == 4:\n return move_dim(maybe_headed_tensor,\n from_dim=channels_dim,\n to_dim=head_dim)\n else:\n return maybe_headed_tensor\n\n\ndef merge_attention_heads(merge_type, headed_tensor):\n if merge_type == 'mean':\n return tf.reduce_mean(headed_tensor, axis=1)\n elif merge_type == 'max':\n return tf.reduce_max(headed_tensor, axis=1)\n elif merge_type == 'sum':\n return tf.reduce_sum(headed_tensor, axis=1)\n elif merge_type == 'prod':\n return tf.reduce_prod(headed_tensor, axis=1)\n else:\n raise ValueError(f'Unknown merge type \"{merge_type}\"')\n\n\ndef dot_product_attention(query, key, value, \n mask = None,\n attn_mask = None,\n scale_factor = None,\n bias = None,\n scale_logits = True,\n clip_logits_value = None,\n causal = False,\n pad = False,\n merge_heads = None,\n attn_scale_factor = None,\n return_logits = False,\n return_matrix = False,\n big_number = 1e9,\n scale_degree = False,\n ):\n\n query_shape = query.shape\n key_shape = key.shape\n value_shape = value.shape\n input_rank = query_shape.rank\n\n attention_dim = query_shape[-1]\n \n if pad:\n paddings = [(0,0)]*(input_rank-2) + [(1,0),(0,0)]\n key = tf.pad(key, paddings)\n value = tf.pad(value, paddings)\n\n # Create Priliminary Logits\n attention_logits = tf.matmul(query, key, transpose_b=True)\n\n\n # Scaling for dot product\n if scale_logits:\n attention_logits = attention_logits*(attention_dim**-.5)\n \n \n # Clipping for numerical stability\n if clip_logits_value is not None:\n if not isinstance(clip_logits_value, list):\n if isinstance(clip_logits_value, tuple):\n clip_logits_value = list(clip_logits_value)\n else:\n clip_logits_value = [-clip_logits_value, clip_logits_value, 0]\n if len(clip_logits_value) == 2:\n clip_logits_value.append(0)\n if len(clip_logits_value) < 3:\n raise ValueError\n \n # Clip before\n if clip_logits_value is not None and (not clip_logits_value[2]):\n attention_logits = tf.clip_by_value(attention_logits, *clip_logits_value[:2])\n\n # Scale factor and bias\n if scale_factor is not None:\n scale_factor = move_ch2h(scale_factor)\n attention_logits = attention_logits * scale_factor\n\n if bias is not None:\n bias = move_ch2h(bias)\n attention_logits = attention_logits + bias\n \n # Save for returning the logits\n logits_matrix = attention_logits\n\n # Clip after\n if clip_logits_value is not None and clip_logits_value[2]:\n attention_logits = tf.clip_by_value(attention_logits, *clip_logits_value[:2])\n\n # Masking\n if not mask is None:\n mask_rank = mask.shape.rank\n\n mask_slice = [Ellipsis]+[None]*(input_rank-mask_rank)+[slice(None)]\n mask = mask[mask_slice]\n\n if not mask.dtype is attention_logits.dtype:\n mask = tf.cast(mask, attention_logits.dtype)\n attention_logits = attention_logits + (mask-1)*big_number\n \n if not attn_mask is None:\n attn_mask = move_ch2h(attn_mask)\n if not attn_mask.dtype is attention_logits.dtype:\n attn_mask = tf.cast(attn_mask, attention_logits.dtype)\n attention_logits = attention_logits + (attn_mask-1)*big_number\n \n if causal:\n causal_mask_shape = [query.shape[-2], key.shape[-2]]\n if None in causal_mask_shape:\n causal_mask_shape = tf.shape(attention_logits)[-2:]\n\n causal_mask = tf.ones(causal_mask_shape,\n dtype=attention_logits.dtype)\n causal_mask = tf.linalg.band_part(causal_mask,-1,0)\n attention_logits = attention_logits + (causal_mask-1)*big_number\n \n \n # Softmax Attention\n attention_matrix = tf.nn.softmax(attention_logits, axis=-1)\n \n # Merge Heads\n if merge_heads is not None:\n attention_matrix = merge_attention_heads(merge_type=merge_heads,\n headed_tensor=attention_matrix)\n \n # Scale Attention Matrix\n if attn_scale_factor is not None:\n attn_scale_factor = move_ch2h(attn_scale_factor)\n attention_matrix = attention_matrix * attn_scale_factor\n \n output = tf.matmul(attention_matrix, value)\n \n if (attn_scale_factor is not None) and scale_degree:\n if mask is None:\n degree = tf.reduce_sum(attn_scale_factor,\n axis=-1, keepdims=True)\n else:\n degree = tf.reduce_sum(attn_scale_factor * mask,\n axis=-1, keepdims=True)\n output = output * tf.math.log(1+degree)\n \n if merge_heads is None:\n output.set_shape(query_shape[:-1]+value_shape[-1:])\n else:\n output.set_shape(query_shape[0:1]+query_shape[2:-1]+value_shape[-1:])\n\n\n # Format Outputs\n outputs = output\n\n if return_logits or return_matrix:\n outputs = (outputs,)\n \n if return_logits:\n logits = move_dim(logits_matrix, from_dim=1, to_dim=4)\n outputs = outputs + (logits,)\n \n if return_matrix:\n outputs = outputs + (attention_matrix,)\n\n return outputs\n",
"import tensorflow as tf\nfrom tensorflow.keras import (optimizers, losses, metrics)\nfrom tqdm import tqdm\nimport numpy as np\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nimport os\n\nfrom lib.base.dotdict import HDict\nfrom lib.data.datasets.tsp import SVDDataset\nfrom lib.models.tsp.dc import DCSVDTransformer\nfrom lib.training.schemes.scheme_base import BaseSVDModelScheme\n\n\nclass TSPDCSVD(BaseSVDModelScheme):\n def get_default_config(self):\n config_dict = super().get_default_config()\n config_dict.update(\n dataset_name = 'tsp',\n batch_size = 8,\n prediction_bmult = 3,\n include_xpose = True,\n save_best_monitor = 'val_xent',\n rlr_monitor = 'val_xent',\n )\n return config_dict\n \n def get_dataset_config(self, splits=['training','validation']):\n dataset_config, _ = super().get_dataset_config()\n return dataset_config, SVDDataset\n \n def get_model_config(self):\n config = self.config\n model_config, _ = super().get_model_config()\n model_config.update(\n use_node_embeddings = (config.edge_channel_type not in\n ['residual','constrained']) ,\n )\n return model_config, DCSVDTransformer\n \n def get_loss(self):\n loss = losses.SparseCategoricalCrossentropy(from_logits=True, \n name='xentropy')\n return loss\n\n def get_metrics(self):\n xent = metrics.SparseCategoricalCrossentropy(from_logits=True,\n name='xent')\n return [xent,'acc']\n \n def do_evaluations_on_split(self,split):\n dataset = getattr(self,split)\n model = self.model\n strategy = self.strategy\n \n targs = []\n preds = []\n prog_bar = tqdm()\n def collate_fn(fmat,tmat,outp):\n bool_mask = (fmat.numpy().squeeze() >= 0)\n targ = tmat.numpy().squeeze()[bool_mask]\n pred = outp.numpy().squeeze().argmax(-1)[bool_mask]\n \n targs.append(targ)\n preds.append(pred)\n prog_bar.update()\n \n \n @tf.function\n def prediction_step(*inputs):\n return model(inputs, training=False)\n \n if self.config.distributed:\n dataset = strategy.experimental_distribute_dataset(dataset)\n \n @tf.function\n def make_predictions():\n for i,t in dataset:\n inps = tuple(i[n] for n in self.model.input_names)\n fmat = i['feature_matrix']\n tmat = t['target']\n \n if not self.config.distributed:\n outp = prediction_step(inps)\n else:\n outp = strategy.experimental_run_v2(prediction_step, args=inps)\n outp = tf.concat(outp.values, axis=0)\n fmat = tf.concat(fmat.values, axis=0)\n tmat = tf.concat(tmat.values, axis=0)\n \n tf.py_function(collate_fn, [fmat, tmat, outp], [])\n \n make_predictions()\n\n targs = np.concatenate(targs, axis=0)\n preds = np.concatenate(preds, axis=0)\n prog_bar.close()\n\n acc = accuracy_score(targs, preds)\n prec = precision_score(targs, preds)\n rec = recall_score(targs, preds)\n f1 = f1_score(targs,preds)\n\n print(f'Accuracy = {acc}')\n print(f'Precision = {prec}')\n print(f'Recall = {rec}')\n print(f'f1 = {f1}')\n \n save_path = os.path.join(self.config.predictions_path,f'{split}_evals.txt')\n with open(save_path, 'a') as fl:\n print(f'Accuracy = {acc}', file=fl)\n print(f'Precision = {prec}', file=fl)\n print(f'Recall = {rec}', file=fl)\n print(f'f1 = {f1}', file=fl)\n\n\nSCHEME = TSPDCSVD\n\n\n\n\n",
"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras import (optimizers, losses, metrics, callbacks)\n\n\nimport json\nimport os\n\nfrom lib.base.dotdict import HDict\nHDict.L.update_globals({'path':os.path})\n\nfrom lib.data.reader import ExcludeFeatures, CreateTargets\nfrom lib.base.callbacks.checkpoint import CheckpointCallback, SaveWhenCallback\n\ndef read_config_from_file(config_file):\n with open(config_file, 'r') as fp:\n return json.load(fp)\n\ndef save_config_to_file(config, config_file):\n with open(config_file, 'w') as fp:\n return json.dump(config, fp, indent='\\t')\n\nclass TrainingBase:\n def __init__(self, config=None):\n self.config_input = config\n self.config = self.get_default_config()\n if config is not None:\n for k in config.keys():\n if not k in self.config:\n raise KeyError(f'Unknown config \"{k}\"')\n self.config.update(config)\n \n self.state = self.get_default_state()\n \n self.pred_flag = False\n self.eval_flag = False\n \n def get_dataset_config(self):\n return {}, None\n \n def get_dataset(self, splits=['training','validation']):\n dataset_config, dataset_class = self.get_dataset_config()\n if dataset_class is None:\n raise NotImplementedError\n return dataset_class(**dataset_config, splits = splits)\n \n def get_excluded_features(self):\n return []\n \n def get_model_config(self):\n return {}, None\n \n def get_model(self):\n model_config, model_class = self.get_model_config()\n if model_class is None:\n raise NotImplementedError\n return model_class(**model_config)\n \n def get_optimizer(self):\n config = self.config\n \n optim_dict = dict(\n adam = optimizers.Adam ,\n rmsprop = optimizers.RMSprop ,\n sgd = optimizers.SGD ,\n )\n optim = optim_dict[config.optimizer]\n if config.gradient_clipval is None:\n return optim(learning_rate = config.initial_lr)\n else:\n return optim(learning_rate = config.initial_lr, \n clipvalue = config.gradient_clipval)\n \n def get_loss(self):\n raise NotImplementedError\n \n def get_metrics(self):\n return None\n\n def get_default_config(self):\n return HDict(\n scheme = None,\n model_name = 'unnamed_model',\n distributed = False,\n batch_size = HDict.L('c:32 if c.distributed else 128'),\n initial_lr = 5e-4,\n gradient_clipval = None,\n num_epochs = 1000,\n dataset_path = 'datasets/gnn_benchmark.h5',\n save_path = HDict.L('c:path.join(\"models\",c.model_name)'),\n checkpoint_path = HDict.L('c:path.join(c.save_path,\"checkpoint\")'),\n log_path = HDict.L('c:path.join(c.save_path,\"logs\")'),\n config_path = HDict.L('c:path.join(c.save_path,\"config\")'),\n summary_path = HDict.L('c:path.join(c.save_path,\"summary\")'),\n saved_model_path = HDict.L('c:path.join(c.save_path,\"saved\", c.model_name)'),\n rlr_factor = 0.5, \n rlr_patience = 10,\n rlr_monitor = HDict.L(\"c: c.save_best_monitor\"),\n min_lr_factor = 0.01,\n stopping_lr = 0.,\n steps_per_epoch = None,\n validation_steps = None,\n save_best = True,\n save_when = HDict.L(\"c: '' if not c.save_best \"+\n \"else 'epoch;'+c.save_best_monitor+'<=save_best_value;epoch{epoch:0>4d}'\"),\n save_best_monitor = 'val_loss',\n stopping_patience = 0,\n predictions_path = HDict.L('c:path.join(c.save_path,\"predictions\")'),\n weight_file = ':',\n prediction_bmult = 2,\n optimizer = 'adam',\n )\n \n def get_default_state(self):\n state = HDict(\n current_epoch = tf.Variable(0, trainable=False, name=\"current_epoch\"),\n global_step = tf.Variable(0, trainable=False, name=\"global_step\"),\n )\n if self.config.save_best:\n state.update(\n save_best_value = tf.Variable(np.inf, trainable=False, \n name=\"save_best_value\"),\n save_best_epoch = tf.Variable(0, trainable=False, \n name=\"save_best_epoch\"),\n )\n if self.config.rlr_factor<1.0:\n state.update(\n last_reduce_lr = tf.Variable(0, trainable=False, \n name=\"last_reduce_lr\"),\n )\n return state\n \n def get_state_updates(self):\n config = self.config\n updates = HDict(\n on_batch_end = [lambda model, state, *a, **kw: state.global_step.assign_add(1)],\n on_epoch_end = [lambda model, state, *a, **kw: state.current_epoch.assign_add(1)], \n )\n \n if config.save_best:\n monitor = config.save_best_monitor\n rlrp = config.rlr_patience\n rlrf = config.rlr_factor\n minlr = config.initial_lr * config.min_lr_factor\n stplr = config.stopping_lr\n def save_best_update(model, state, epoch, logs=None,\n *args, **kwargs):\n logs = logs or {}\n new_value = logs.get(monitor, np.inf)\n old_value = state.save_best_value.numpy()\n \n old_epoch = state.save_best_epoch.numpy()\n new_epoch = state.current_epoch.numpy()\n \n if new_value < old_value:\n state.save_best_value.assign(new_value)\n state.save_best_epoch.assign(new_epoch)\n print(f'\\nSAVE BEST: {monitor} improved from (epoch:{old_epoch},value:{old_value:0.5f})'+\n f' to (epoch:{new_epoch},value:{new_value:0.5f})',flush=True)\n else:\n print(f'\\nSAVE BEST: {monitor} did NOT improve from'+\n f' (epoch:{old_epoch},value:{old_value:0.5f})',flush=True)\n \n # RLR logic\n if rlrf < 1.0:\n last_reduce_lr = state.last_reduce_lr.numpy()\n epoch_gap = (new_epoch - max(old_epoch, last_reduce_lr))\n if epoch_gap >= rlrp:\n model.optimizer.lr.assign(tf.maximum(model.optimizer.lr*rlrf, minlr))\n state.last_reduce_lr.assign(new_epoch)\n print(f'\\nRLR: {monitor} did NOT improve for {epoch_gap} epochs,'+\n f' new lr = {model.optimizer.lr.numpy()}')\n \n # Stop training logic\n if model.optimizer.lr.numpy() < stplr:\n model.stop_training = True\n print(f'\\nSTOP: lr fell below {stplr}, STOPPING TRAINING!')\n \n updates.on_epoch_end.append(save_best_update)\n \n return updates\n \n def config_summary(self):\n for k,v in self.config.get_dict().items():\n print(f'{k} : {v}', flush=True)\n \n def save_config_file(self):\n os.makedirs(os.path.dirname(self.config.config_path), exist_ok=True)\n save_config_to_file(self.config.get_dict(), self.config.config_path+'.json')\n save_config_to_file(self.config_input, self.config.config_path+'_input.json')\n \n def get_targets(self):\n return ['target']\n \n def get_batched_data(self):\n targets = self.get_targets()\n if len(targets)>0:\n map_fns = CreateTargets(targets)\n else:\n map_fns = None\n \n if self.eval_flag or self.pred_flag:\n return self.dataset.get_batched_data(self.config.batch_size*self.config.prediction_bmult,\n map_fns=map_fns)\n else:\n return self.dataset.get_batched_data(self.config.batch_size,\n map_fns=map_fns)\n \n def load_data(self, splits=['training','validation']):\n self.dataset = self.get_dataset(splits)\n if not self.config.cache_dir == '':\n self.dataset.cache(self.config.cache_dir)\n self.dataset.map(ExcludeFeatures(self.get_excluded_features()))\n\n self.trainset, *others = self.get_batched_data()\n self.valset, self.testset = None, None\n if others: self.valset, *others = others\n if others: self.testset, = others\n \n def model_summary(self):\n os.makedirs(os.path.dirname(self.config.summary_path), exist_ok=True)\n with open(self.config.summary_path+'.txt', 'w') as fp:\n print_fn = lambda *a,**kw: print(*a, **kw,file=fp)\n self.model.summary(print_fn=print_fn)\n \n \n def load_model(self):\n self.model_config = self.get_model()\n\n if self.config.distributed:\n self.strategy = tf.distribute.MirroredStrategy()\n self.strategy_scope = self.strategy.scope\n else:\n from contextlib import nullcontext\n self.strategy = None\n self.strategy_scope = nullcontext\n\n with self.strategy_scope():\n self.model = self.model_config.get_model()\n\n self.model_summary()\n\n opt = self.get_optimizer()\n loss = self.get_loss()\n metrics = self.get_metrics()\n \n self.model.compile(opt, loss, metrics)\n \n def load_state(self):\n os.makedirs(self.config.checkpoint_path, exist_ok=True)\n self.training_callbacks = self.get_callbacks()\n mchk_callback = CheckpointCallback(save_path = self.config.checkpoint_path,\n model = self.model,\n state = self.state,\n **self.get_state_updates())\n self.state_callbacks = [mchk_callback]\n self.callbacks = self.training_callbacks + self.state_callbacks\n \n with self.strategy_scope():\n mchk_callback.load_checkpoint()\n \n \n def get_base_callbacks(self):\n cbacks = []\n os.makedirs(self.config.log_path, exist_ok=True)\n logs_callback = callbacks.TensorBoard(log_dir=self.config.log_path)\n cbacks.append(logs_callback)\n \n if self.config.save_when:\n saved_model_dir = os.path.dirname(self.config.saved_model_path)\n os.makedirs(saved_model_dir, exist_ok=True)\n svwhn_callback = SaveWhenCallback(saved_model_dir,\n when = self.config.save_when,\n state = self.state,\n verbose = 1,\n save_weights_only = True)\n cbacks.append(svwhn_callback)\n \n if self.config.stopping_patience > 0:\n estop_callback = callbacks.EarlyStopping(monitor = 'val_loss',\n verbose = 1,\n patience = self.config.stopping_patience)\n cbacks.append(estop_callback)\n \n return cbacks\n \n def get_callbacks(self):\n return self.get_base_callbacks()\n \n def get_additional_training_configs(self):\n return {}\n \n def train_model(self):\n self.model.fit(self.trainset, \n epochs = self.config.num_epochs, \n validation_data = self.valset,\n callbacks = self.callbacks,\n initial_epoch = self.state.current_epoch.numpy(),\n steps_per_epoch = self.config.steps_per_epoch,\n validation_steps = self.config.validation_steps,\n **self.get_additional_training_configs()\n )\n \n \n def execute_training(self):\n self.config_summary()\n self.save_config_file()\n self.load_data()\n self.load_model()\n self.load_state()\n self.train_model()\n self.finalize_training(skip_init=True)\n \n \n def save_model(self):\n os.makedirs(os.path.dirname(self.config.saved_model_path), exist_ok=True)\n save_path = self.config.saved_model_path+'.h5'\n self.model.save_weights(save_path)\n print(f'Saved model to {save_path}')\n \n def finalize_training(self, skip_init=False):\n if not skip_init:\n self.config_summary()\n self.load_model()\n self.load_state()\n self.save_model()\n print('DONE!!!')\n \n \n def get_latest_save_file(self):\n import re\n pattern = re.compile(r'(?<=epoch)[0-9]+')\n from pathlib import Path\n\n cur_epoch, cur_file = 0, ''\n for fp in Path(self.config.saved_model_path).parent.glob('*.h5'):\n m = pattern.search(fp.name)\n \n e = 0 if m is None else int(m.group())\n if e > cur_epoch:\n cur_epoch = e\n cur_file = str(fp)\n \n self.config.weight_file = cur_file\n \n \n def prepare_for_test(self):\n self.config_summary()\n self.load_data(splits=['training', 'validation', 'test'])\n self.load_model()\n \n if self.config.weight_file == ':':\n self.get_latest_save_file()\n \n if self.config.weight_file == '':\n self.config.weight_file = self.config.saved_model_path+'.h5'\n \n if self.config.weight_file == '-':\n self.load_state()\n print('LOADED TRAINING STATE FOR PREDICTIONS!')\n else:\n self.model.load_weights(self.config.weight_file, by_name=True)\n print(f'LOADED WEIGHT FILE \"{self.config.weight_file}\" FOR PREDICTIONS!')\n \n \n def make_predictions_on_split(self, split):\n raise NotImplementedError\n \n def do_evaluations_on_split(self, split):\n raise NotImplementedError\n \n def make_predictions(self):\n self.pred_flag = True\n self.prepare_for_test()\n \n os.makedirs(self.config.predictions_path, exist_ok=True)\n for split in ['trainset', 'valset', 'testset']:\n print('='*40)\n print(f'Prediction on {split}.')\n self.make_predictions_on_split(split)\n print()\n \n def do_evaluations(self):\n self.eval_flag = True\n self.prepare_for_test()\n \n os.makedirs(self.config.predictions_path, exist_ok=True)\n for split in ['trainset', 'valset', 'testset']:\n print('='*40)\n print(f'Evaluation on {split}.')\n self.do_evaluations_on_split(split)\n print()\n \n"
] | [
[
"tensorflow.clip_by_value",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.reduce_max",
"tensorflow.reduce_mean",
"tensorflow.shape",
"tensorflow.linalg.band_part",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.ones",
"tensorflow.math.log",
"tensorflow.reduce_prod",
"tensorflow.pad"
],
[
"sklearn.metrics.recall_score",
"tensorflow.concat",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"sklearn.metrics.precision_score",
"numpy.concatenate",
"tensorflow.py_function",
"sklearn.metrics.f1_score",
"tensorflow.keras.metrics.SparseCategoricalCrossentropy",
"sklearn.metrics.accuracy_score"
],
[
"tensorflow.Variable",
"tensorflow.maximum",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.distribute.MirroredStrategy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
theislab/AutoGeneS | [
"22bde0d5eba013e90edb85341e0bd9c28b82e7fd"
] | [
"autogenes/core.py"
] | [
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom .ga import GeneticAlgorithm\n\nfrom . import objectives as ga_objectives\n\nimport deap\nimport warnings\n\nclass AutoGeneS:\n\n PLOT_PARAMS = {\n 'small': {\n 'figsize': (10,5),\n 'all_ms': 8,\n 'sel_ms': 10\n },\n 'large': {\n 'figsize': (15,10),\n 'all_ms': 5,\n 'sel_ms': 10\n }\n }\n\n PLOT_THRESHOLD = 50\n\n def __init__(self, data):\n\n self.data = data\n\n if len(self.data.shape) != 2:\n raise ValueError(\"data is expected to have two dimensions\")\n\n if self.data.shape[0] < 2:\n raise ValueError(\"At least two rows (cell types) expected\")\n\n if self.data.shape[1] < self.data.shape[0]:\n raise ValueError(\"Number of columns (genes) must be >= number of rows (cell types)\")\n\n if not np.isfinite(self.data).all():\n raise ValueError(\"Some entries in data are not scalars\")\n\n self.__has_run = False\n self.selection = None\n self.selection_index = None\n\n def run(self, ngen=2, mode='standard', nfeatures=None, weights=None, objectives=None, seed=0, verbose=True, **kwargs):\n\n # Check modes\n\n if mode == 'standard':\n if nfeatures is not None:\n raise ValueError(\"nfeatures doesn't apply to standard mode (did you mean mode='fixed'?)\")\n\n elif mode == 'fixed':\n if nfeatures is None:\n raise ValueError(\"You need to supply nfeatures\")\n\n if nfeatures > self.data.shape[1]:\n raise ValueError(\"nfeatures must be <= the number of columns (genes)\")\n \n if nfeatures < self.data.shape[0]:\n raise ValueError(\"nfeatures must be >= the number of rows (cell types)\")\n else:\n raise ValueError(\"Invalid mode\")\n\n # Check weights and objectives\n \n if weights is None:\n if objectives is None:\n weights = (-1.0,1.0)\n objectives = ('correlation','distance')\n else:\n raise Exception(\"Need weights for objectives\")\n else:\n if objectives is not None:\n if len(weights) != len(objectives):\n raise ValueError(\"Number of weights does not match number of objectives\")\n weights_l = []\n objectives_l = []\n for i,w in enumerate(weights):\n if w == 0:\n warnings.warn(f\"Ignoring objective '{str(objectives[i])}'\")\n else:\n weights_l.append(w)\n objectives_l.append(objectives[i])\n weights=tuple(weights_l)\n objectives=tuple(objectives_l)\n else:\n raise Exception(\"Need objectives for weights\")\n \n # Store objectives\n\n self.objectives_func = []\n self.objectives_names = []\n\n for f in objectives:\n if callable(f):\n self.objectives_func.append(f)\n self.objectives_names.append(f.__name__)\n elif isinstance(f,str):\n if not hasattr(ga_objectives,f):\n raise ValueError(f\"No such objective: {f}\")\n else:\n self.objectives_names.append(f)\n self.objectives_func.append(getattr(ga_objectives,f))\n else:\n raise ValueError(\"Invalid objective\")\n\n self.objectives_num = len(self.objectives_func)\n self.weights = weights\n\n self.ga = GeneticAlgorithm(\n data=self.data, \n ngen=ngen,\n mode=mode,\n weights=weights, \n objectives_names=self.objectives_names, \n objectives_func=self.objectives_func, \n seed=seed, \n verbose=verbose,\n nfeatures=nfeatures,\n **kwargs\n )\n self.hof = self.ga.run()\n\n self.__has_run = True\n\n def resume(self):\n self.ga.resume()\n\n @property\n def pareto(self):\n self.__assert_run()\n return self.hof.items\n\n @property\n def fitness_matrix(self):\n self.__assert_run()\n\n all = []\n for i in range(self.objectives_num):\n vals = np.array(list(map(lambda x: x.fitness.values[i], self.hof.items)))\n all.append(vals)\n return np.array(all).T\n\n #\n # Plot results\n #\n\n def plot(self,objectives=(0,1), **kwargs):\n\n self.__assert_run()\n\n if self.objectives_num == 1:\n raise Exception(\"Cannot plot for a single objective\")\n\n obj = objectives\n\n if len(obj) != 2:\n raise ValueError(\"Must supply two objectives per plot\") \n\n if not all(map(lambda x: x in range(self.objectives_num), obj)):\n raise ValueError(f\"Invalid objectives, must be 0 <= x <= {self.objectives_num-1}\")\n\n if not kwargs:\n return self.plot(weights=self.weights)\n\n i,desc = self.__from_pareto(**kwargs)\n\n if desc == 'index': legend = f'By index'\n if desc == 'weights': legend = f\"Using weights {kwargs['weights']}\"\n if desc == 'close_to': legend = f\"Close to {kwargs['close_to'][1]}\"\n\n if 'size' in kwargs:\n if kwargs['size'] not in ['small','large']:\n raise ValueError(\"Invalid size\")\n size = kwargs['size']\n else:\n if len(self.pareto) < AutoGeneS.PLOT_THRESHOLD:\n size = 'small' \n else:\n size = 'large'\n\n df = pd.DataFrame(self.fitness_matrix).sort_values(by=obj[0])\n\n df_0 = df[obj[0]]\n df_1 = df[obj[1]]\n\n params = AutoGeneS.PLOT_PARAMS[size]\n\n plt.figure(figsize=params['figsize'])\n\n line = plt.plot(df_0,df_1)\n\n plt_all, = plt.plot(df_0.drop(i),df_1.drop(i),'bo',ms=params['all_ms'])\n plt_sel, = plt.plot(df_0[i],df_1[i],'r^',ms=params['sel_ms'])\n\n plt.xlabel(self.objectives_names[obj[0]])\n plt.ylabel(self.objectives_names[obj[1]])\n\n plt.legend([plt_all, plt_sel], [\"Option\", legend],bbox_to_anchor=(1, 1), loc='upper left')\n\n plt.show()\n\n #\n # Select individual\n #\n\n def select(self, **kwargs):\n self.__assert_run()\n\n if not kwargs:\n return self.select(weights=self.weights)\n\n i,desc = self.__from_pareto(**kwargs)\n self.selection = self.hof[i]\n self.selection_index = i\n\n return self.selection\n\n #\n # Helper\n #\n\n def __from_pareto(self,**kwargs):\n\n if sum([ x in kwargs for x in [\"weights\",\"index\",\"close_to\"]]) != 1:\n raise Exception(\"You need to provide exactly one criterion.\")\n\n if 'weights' in kwargs:\n weights = kwargs['weights']\n i_max = self.__index_by_weights(weights) \n return i_max,'weights'\n\n if 'index' in kwargs:\n index = kwargs['index']\n if isinstance(index,int):\n if index not in range(len(self.pareto)):\n raise ValueError(\"Invalid index\")\n return index,'index'\n else:\n obj,i = index\n fit = pd.DataFrame(data=self.fitness_matrix).sort_values(by=obj)\n return fit.index.values[i],'index'\n \n if 'close_to' in kwargs:\n obj,num = kwargs['close_to']\n fit = self.fitness_matrix[:,obj]\n i = np.argmin(np.abs(fit-num))\n return i,'close_to'\n\n def __index_by_weights(self,weights):\n self.__assert_run()\n if len(weights) != self.objectives_num:\n raise ValueError(f\"Number of weights does not match number of objectives\")\n\n fitness = self.fitness_matrix\n for i in range(self.objectives_num):\n max = np.max(fitness[:,i])\n if max:\n fitness[:,i] *= 1/max\n\n wfitness = fitness.dot(np.array(weights))\n return np.argmax(wfitness)\n\n def __assert_run(self):\n if not self.__has_run:\n raise Exception(\"AutoGeneS did not run yet\")\n\n def __setstate__(self,dict):\n deap.creator.FitnessGA.weights = dict['weights']\n self.__dict__.update(dict)\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.abs",
"numpy.isfinite",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.argmax",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gargraghav/tensorflow | [
"a0ea36b9dffc563deae6fa9e2f4d2ca912a3a224"
] | [
"Learning Tensorflow/Examples/handwrittendigit_classifier.py"
] | [
"import tensorflow as tf\nimport time\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\n\nbeginTime=time.time()\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nlearning_rate = 0.01\ntraining_iterations = 30\nbatch_size = 100\ndisplay_step = 2\n\nx = tf.placeholder(\"float\", [None, 784])\ny = tf.placeholder(\"float\", [None, 10])\n\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\n\nwith tf.name_scope(\"Wx_b\") as scope:\n model = tf.nn.softmax(tf.matmul(x, W) + b)\n\nw_h = tf.summary.histogram(\"weights\", W)\nb_h = tf.summary.histogram(\"biases\", b)\n\nwith tf.name_scope(\"cost_function\") as scope:\n cost_function = -tf.reduce_sum(y*tf.log(model))\n tf.summary.scalar(\"cost function\", cost_function)\n\nwith tf.name_scope(\"train\") as scope:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)\n\ninit = tf.global_variables_initializer()\n\nmerged_summary_op = tf.summary.merge_all()\n\nwith tf.Session() as sess:\n sess.run(init)\n summary_writer = tf.summary.FileWriter(\"/home/raghav/PycharmProjects/tensorflow/tensorboard/\", sess.graph)\n\n for itr in range(training_iterations):\n avg_cost = 0.\n total_batch = int(mnist.train.num_examples / batch_size)\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})\n avg_cost += sess.run(cost_function, feed_dict={x: batch_xs, y: batch_ys})/total_batch\n summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})\n summary_writer.add_summary(summary_str, total_batch + i)\n if itr % display_step == 0:\n print(\"Iteration:\", '%d' % (itr + 1), \"cost=\", \"{:.9f}\".format(avg_cost))\n\n print(\"Training Completed!\")\n\n predictions = tf.equal(tf.argmax(model, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(predictions, \"float\"))\n\n print(\"\\nAccuracy: \",sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})*100)\n endTime = time.time()\n print('\\nTotal time: {:5.2f}s'.format(endTime - beginTime))\n\n f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n\n test1_index = 0\n test1_x = mnist.test.images[test1_index].reshape(1, 784)\n test1_img = mnist.test.images[test1_index].reshape((28, 28))\n test1_y = mnist.test.labels[test1_index].reshape(1, 10)\n test1_pred = sess.run(model, feed_dict={x: test1_x, y: test1_y})\n\n ax1.imshow(test1_img, cmap='gray')\n ax2.bar(list(range(0, 10)), test1_pred[0])\n\n test2_index = 6\n test2_x = mnist.test.images[test2_index].reshape(1, 784)\n test2_img = mnist.test.images[test2_index].reshape((28, 28))\n test2_y = mnist.test.labels[test2_index].reshape(1, 10)\n test2_pred = sess.run(model, feed_dict={x: test2_x, y: test2_y})\n\n ax3.imshow(test2_img, cmap='gray')\n ax4.bar(list(range(0, 10)), test2_pred[0])\n\n plt.show()"
] | [
[
"tensorflow.matmul",
"tensorflow.summary.FileWriter",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.placeholder",
"matplotlib.pyplot.subplots",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.summary.merge_all",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.log",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.summary.scalar",
"matplotlib.pyplot.show",
"tensorflow.summary.histogram"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
AI-Mart/PaddleNLP | [
"0ababea960427e8b70220ea06d908ed58cbed0ed"
] | [
"examples/language_model/gpt-3/static/run_pretrain_static.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPretrain GPT in static graph mode.\n\"\"\"\nimport argparse\nimport math\nimport os\nimport random\nimport time\nimport sys\n\nos.path.expandvars('$HOME')\nos.path.expanduser('~')\n\nimport numpy as np\nimport paddle\nimport paddle.distributed.fleet as fleet\nfrom paddle.distributed.fleet.meta_optimizers.sharding.utils import save_persistables\nfrom modeling import GPTModel, GPTForPretraining, GPTPretrainingCriterion\nfrom paddlenlp.transformers import GPTTokenizer, GPTChineseTokenizer\nfrom paddlenlp.ops import guard, Topology, get_rng_state_tracker\nfrom paddlenlp.utils.log import logger\nfrom paddlenlp.utils import profiler\nimport paddlenlp.ops as ops\nfrom visualdl import LogWriter\n\n# Used to load the data_tools path, should import before dataset\nfilepath = os.path.abspath(os.path.dirname(__file__))\nsys.path.insert(0, os.path.join(filepath, \"../\"))\nfrom dataset import create_pretrained_dataset\nfrom args import parse_args\nimport lr\n\nMODEL_CLASSES = {\n \"gpt\": (GPTForPretraining, GPTTokenizer),\n \"gpt-cn\": (GPTForPretraining, GPTChineseTokenizer),\n}\n\n\ndef create_data_holder(args):\n \"\"\"creat data holder\"\"\"\n tokens = paddle.static.data(\n name=\"tokens\", shape=[-1, args.max_seq_len], dtype=\"int64\")\n loss_mask = paddle.static.data(\n name=\"loss_mask\", shape=[-1, args.max_seq_len], dtype=\"float32\")\n position_ids = paddle.static.data(\n name=\"position_ids\", shape=[-1, args.max_seq_len], dtype=\"int64\")\n labels = paddle.static.data(\n name=\"labels\", shape=[-1, args.max_seq_len], dtype=\"int64\")\n return [tokens, loss_mask, position_ids, labels]\n\n\ndef dist_optimizer(args, topo):\n default_global_batch_size = topo.data_info.size * args.micro_batch_size\n if args.global_batch_size is None:\n args.global_batch_size = default_global_batch_size\n\n bsz_per_dp = args.global_batch_size // topo.data_info.size\n micro_batch_size = args.micro_batch_size\n assert args.global_batch_size % micro_batch_size == 0, \"cannot do gradient accumulate, global_batch_size: {} micro_batch_size: {}\".format(\n args.global_batch_size, micro_batch_size)\n acc_steps = bsz_per_dp // micro_batch_size\n\n exec_strategy = paddle.fluid.ExecutionStrategy()\n exec_strategy.num_threads = 2\n exec_strategy.num_iteration_per_drop_scope = 1\n\n dist_strategy = fleet.DistributedStrategy()\n dist_strategy.execution_strategy = exec_strategy\n dist_strategy.nccl_comm_num = 3\n\n dist_strategy.recompute = args.use_recompute\n dist_strategy.pipeline = args.pp_degree > 1\n\n if args.use_amp:\n dist_strategy.amp = True\n dist_strategy.amp_configs = {\n \"custom_white_list\": [\n 'softmax', 'layer_norm', 'gelu',\n \"fused_softmax_mask_upper_triangle\", \"elementwise_add\"\n ],\n \"custom_black_list\":\n [\"reduce_sum\", \"c_softmax_with_cross_entropy\", \"elementwise_div\"],\n \"init_loss_scaling\": 32768,\n \"use_dynamic_loss_scaling\": True,\n \"use_pure_fp16\": args.amp_level == \"O2\",\n \"use_fp16_guard\": False\n }\n if args.use_sharding:\n dist_strategy.sharding = True\n dist_strategy.sharding_configs = {\n \"segment_broadcast_MB\": 32,\n \"sharding_degree\": args.sharding_degree,\n \"mp_degree\": args.mp_degree,\n \"pp_degree\": args.pp_degree,\n \"dp_degree\": args.dp_degree,\n \"optimize_offload\": False,\n }\n if args.pp_degree > 1:\n dist_strategy.pipeline_configs = {\n \"schedule_mode\": \"1F1B\",\n \"micro_micro_batch_size\": micro_batch_size,\n \"accumulate_steps\": acc_steps,\n }\n else:\n assert acc_steps == 1, \"Only support accumulate steps in piplinemode. Please set you global_batch_size={}\".format(\n default_global_batch_size)\n\n return dist_strategy\n\n\ndef get_train_data_file(args):\n files = [\n os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)\n if (os.path.isfile(os.path.join(args.input_dir, f)) and str(f).endswith(\n \"_idx.npz\"))\n ]\n files = [x.replace(\"_idx.npz\", \"\") for x in files]\n if len(files) == 0:\n logger.warning(\n \"Not found dataset with name of xxx_ids.npy and xxx_idx.npz! Try to found old compatible xxx_ids.npz file.\"\n )\n else:\n return files\n\n files = [\n os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)\n if (os.path.isfile(os.path.join(args.input_dir, f)) and str(f).endswith(\n \"_ids.npz\"))\n ]\n\n files = [x.replace(\"_ids.npz\", \"\") for x in files]\n return files\n\n\ndef init_static_with_params(model, dygraph_params, topo, prog=None):\n from paddlenlp.utils.tools import dygraph_params_to_static\n static_params = dygraph_params_to_static(model, dygraph_params, topo)\n if prog is None:\n prog = paddle.static.default_main_program()\n paddle.static.set_program_state(prog, static_params)\n\n\ndef run_evaluate(data_loader,\n exe,\n program,\n iter_steps,\n log_writer,\n global_step,\n args,\n epoch,\n is_last,\n eval_fetch,\n task_name=\"valid\"):\n all_loss = []\n local_time = time.time()\n\n for eval_step, batch in enumerate(data_loader):\n loss_return = exe.run(program, feed=batch, fetch_list=eval_fetch)\n if is_last:\n all_loss.append(float(loss_return[0]))\n if eval_step >= iter_steps - 1:\n if not is_last:\n break\n average_loss = sum(all_loss) / len(all_loss)\n logger.info(\n \"%s step %d, epoch: %d, batch: %d, loss: %f, speed: %.0f tokens/s\"\n % (task_name, global_step, epoch, eval_step, average_loss,\n iter_steps * args.micro_batch_size * args.max_seq_len /\n (time.time() - local_time)))\n log_writer.add_scalar(task_name + \"_loss\", average_loss,\n global_step)\n break\n\n\ndef do_train(args):\n # Initialize the paddle and paddle fleet execute environment\n paddle.enable_static()\n fleet.init(is_collective=True)\n\n # Create the random seed for the worker\n random.seed(args.seed)\n np.random.seed(args.seed)\n paddle.seed(args.seed)\n get_rng_state_tracker().add('global_seed', args.seed)\n get_rng_state_tracker().add('local_seed',\n args.seed + fleet.worker_index() + 2021)\n\n if args.use_amp and args.amp_level == \"O2\":\n assert (args.mp_degree == 1 and args.pp_degree == 1\n ), \"When amp level is O2, mp_degree and pp_degree should be 1.\"\n assert (args.use_sharding == False\n ), \"When amp level is O2, use_sharding should be False.\"\n\n assert args.device in [\n \"cpu\", \"gpu\", \"xpu\"\n ], \"Invalid device! Available device should be cpu, gpu, or xpu.\"\n place = paddle.set_device(args.device)\n\n worker_num = fleet.worker_num()\n worker_index = fleet.worker_index()\n local_rank = 0 if fleet.local_rank() is None else int(fleet.local_rank())\n\n topo = Topology(\n device_rank=worker_index,\n world_size=worker_num,\n dp_degree=args.dp_degree,\n pp_degree=args.pp_degree,\n sharding_degree=args.sharding_degree,\n mp_degree=args.mp_degree)\n\n logger.info(\"The topo of hybrid parallelism:\\n{}\".format(topo))\n\n dist_strategy = dist_optimizer(args, topo)\n\n # Create log write, train results show on last card of pipeline.\n if topo.is_last:\n log_writer_path = os.path.join(\n args.output_dir, \"train_log\",\n \"{}_globalbsz_{}_amp_{}_recompute_{}_card_{}\".format(\n args.model_name_or_path, args.global_batch_size, args.use_amp,\n args.use_recompute, worker_index).lower())\n if os.path.exists(log_writer_path):\n import shutil\n shutil.rmtree(log_writer_path)\n log_writer = LogWriter(log_writer_path)\n\n # Define the input data in the static mode\n\n model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n pretrained_models_list = list(\n model_class.pretrained_init_configuration.keys())\n\n data_file = get_train_data_file(args)\n main_program = paddle.static.default_main_program()\n startup_program = paddle.static.default_startup_program()\n with paddle.static.program_guard(main_program, startup_program):\n with paddle.utils.unique_name.guard():\n with paddle.static.device_guard('gpu:0'):\n data_holders = create_data_holder(args)\n [tokens, loss_mask, position_ids, labels] = data_holders\n\n tokenizer = tokenizer_class.from_pretrained(\n args.model_name_or_path)\n eos_id = tokenizer.eos_token_id\n\n train_data_loader, valid_data_loader, test_data_loader = create_pretrained_dataset(\n args,\n data_file,\n local_rank=local_rank,\n data_world_size=topo.data_info.size,\n data_world_rank=topo.data_info.rank,\n eos_id=eos_id,\n max_seq_len=args.max_seq_len,\n places=paddle.static.cuda_places(),\n data_holders=data_holders,\n pipeline_mode=False, )\n\n if args.model_name_or_path in pretrained_models_list:\n model_config = model_class.pretrained_init_configuration[\n args.model_name_or_path]\n\n model_config[\n \"hidden_dropout_prob\"] = args.hidden_dropout_prob\n model_config[\n \"attention_probs_dropout_prob\"] = args.attention_probs_dropout_prob\n model_config[\"topo\"] = topo\n\n model = guard(f'gpu:{args.pp_degree -1}')(\n GPTForPretraining)(guard(f'gpu:0')(GPTModel)(\n **model_config))\n else:\n model, _ = GPTForPretraining.from_pretrained(\n args.model_name_or_path,\n hidden_dropout_prob=args.hidden_dropout_prob,\n attention_probs_dropout_prob=args.\n attention_probs_dropout_prob,\n topo=topo)\n # Create the model for the gpt pretrain\n preds = model(tokens, position_ids)\n\n criterion = guard(f'gpu:{args.pp_degree -1}')(\n GPTPretrainingCriterion)(topo)\n loss = criterion(preds, labels, loss_mask)\n\n # Create the learning_rate sheduler and optimizer\n if args.decay_steps is None:\n args.decay_steps = args.max_steps\n warmup_step = args.warmup_rate * args.decay_steps\n\n # TODO @ZHUI Use paddle network to support lr scheduler\n lr_scheduler = lr.CosineAnnealingWithWarmupDecay(\n max_lr=args.max_lr,\n min_lr=args.min_lr,\n warmup_step=warmup_step,\n decay_step=args.decay_steps)\n\n clip = None\n if args.grad_clip > 0:\n clip = paddle.fluid.clip.GradientClipByGlobalNorm(\n clip_norm=args.grad_clip)\n\n decay_param = [\n p.name for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ]\n optimizer = paddle.optimizer.AdamW(\n learning_rate=lr_scheduler,\n beta1=args.adam_beta1,\n beta2=args.adam_beta2,\n epsilon=args.adam_epsilon,\n grad_clip=clip,\n weight_decay=args.weight_decay,\n apply_decay_param_fun=lambda x: x in decay_param)\n # alias\n optimizer.apply_optimize = optimizer._apply_optimize\n\n if args.use_recompute:\n dist_strategy.recompute = True\n dist_strategy.recompute_configs = {\n \"checkpoints\": model.gpt.checkpoints\n }\n\n # Use the fleet api to compile the distributed optimizer\n optimizer = fleet.distributed_optimizer(\n optimizer, strategy=dist_strategy)\n\n optimizer.minimize(loss)\n logger.info(f'final strategy: {fleet._final_strategy()}')\n logger.info(\"The training meta optimizer is/are %s\" %\n fleet._get_applied_meta_list())\n\n program_desc_dir = os.path.join(args.output_dir, \"program_desc\")\n if not os.path.isdir(program_desc_dir):\n os.mkdir(program_desc_dir)\n\n with open(program_desc_dir + \"/main_program.txt.%d\" % worker_index,\n 'w') as f:\n f.write(str(main_program))\n\n with open(program_desc_dir + \"/startup_program.txt.%d\" % worker_index,\n 'w') as f:\n f.write(str(startup_program))\n\n # Define the Executor for running the static model\n exe = paddle.static.Executor(place)\n exe.run(startup_program)\n test_program = main_program.clone(for_test=True)\n\n if args.use_amp and args.amp_level == \"O2\":\n optimizer.amp_init(place)\n\n if args.model_name_or_path not in pretrained_models_list:\n logger.info(\"Try to load checkpoint from %s \" % args.model_name_or_path)\n dygrah_path = os.path.join(args.model_name_or_path,\n \"model_state.pdparams\")\n static_path = os.path.join(args.model_name_or_path, \"static_vars\")\n\n flag_loaded = False\n if os.path.exists(static_path):\n if args.mp_degree > 1:\n logger.warning(\"MP should init with dygraph params\")\n else:\n logger.info(\"Loading parameters from %s\" % static_path)\n paddle.static.load(main_program, static_path, exe)\n flag_loaded = True\n\n if not flag_loaded and os.path.exists(dygrah_path):\n if args.sharding_degree > 1:\n logger.warning(\"Sharding should init with static vars\")\n else:\n logger.info(\"Loading parameters from %s\" % dygrah_path)\n init_static_with_params(\n model,\n paddle.load(\n dygrah_path, return_numpy=True),\n topo,\n main_program)\n flag_loaded = True\n\n if not flag_loaded:\n logger.error(\"No checkpoint load.\")\n\n global_step = 0\n tic_train = time.time()\n epoch = 0\n learning_rate = main_program.global_block().vars[\"learning_rate_0\"]\n while True:\n fetchs = []\n if topo.is_last:\n fetchs = [loss, learning_rate]\n\n # Bug fix, if not call valid_data_loader, the enumerate will call valid_data_loader\n # many times. and start a new random dataloader.\n valid_data_loader = valid_data_loader()\n test_data_loader = test_data_loader()\n\n train_reader_cost = 0.0\n train_run_cost = 0.0\n reader_start = time.time()\n for step, batch in enumerate(train_data_loader()):\n train_reader_cost += time.time() - reader_start\n train_start = time.time()\n\n global_step += 1\n\n ret = exe.run(main_program,\n feed=batch,\n fetch_list=fetchs,\n use_program_cache=True)\n # In the new 2.0 api, must call this function to change the learning_rate\n lr_scheduler.step()\n train_run_cost += time.time() - train_start\n\n # Profile for model benchmark\n profiler.add_profiler_step(args.profiler_options)\n\n if global_step % args.logging_freq == 0:\n if topo.is_last:\n loss_return, lr_return = ret\n #speed = args.logging_freq / (time.time() - tic_train)\n speed = args.logging_freq / (\n train_reader_cost + train_run_cost)\n avg_reader_cost = train_reader_cost / args.logging_freq\n logger.info(\n \"global step %d, epoch: %d, batch: %d, loss: %.9f, avg_reader_cost: %.5f sec, avg_batch_cost: %.5f sec, speed: %.2f steps/s, ips_total: %.0f tokens/s, ips: %.0f tokens/s, learning rate: %.5e\"\n % (global_step, epoch, step, loss_return[0],\n avg_reader_cost, 1. / speed, speed,\n speed * args.global_batch_size * args.max_seq_len,\n speed * args.global_batch_size * args.max_seq_len /\n worker_num, lr_return[0]))\n log_writer.add_scalar(\"loss\", loss_return[0], global_step)\n log_writer.add_scalar(\"learning_rate\", lr_return[0],\n global_step)\n tic_train = time.time()\n train_reader_cost = 0.0\n train_run_cost = 0.0\n\n if args.check_accuracy:\n if global_step >= args.max_steps:\n return\n else:\n continue\n\n if global_step % args.eval_freq == 0:\n # TODO, check the input data of validation\n eval_fetch = []\n if topo.is_last:\n eval_fetch = [loss]\n\n run_evaluate(valid_data_loader, exe, test_program,\n args.eval_iters, log_writer, global_step, args,\n epoch, topo.is_last, eval_fetch, \"valid\")\n tic_train = time.time()\n\n if global_step % args.save_steps == 0 or global_step >= args.max_steps:\n output_dir = os.path.join(args.output_dir,\n \"model_%d\" % global_step)\n logger.debug(\"saving models to {}\".format(output_dir))\n save_persistables(exe,\n os.path.join(output_dir, \"static_vars\"),\n main_program)\n if global_step <= args.save_steps:\n model.init_config[\"init_args\"][0].init_config.pop(\"topo\",\n None)\n model.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n tic_train = time.time()\n\n if global_step >= args.max_steps:\n eval_fetch = []\n if topo.is_last:\n eval_fetch = [loss]\n\n run_evaluate(test_data_loader, exe, test_program,\n args.test_iters, log_writer, global_step, args,\n epoch, topo.is_last, eval_fetch, \"test\")\n del train_data_loader\n return\n reader_start = time.time()\n\n epoch += 1\n\n\nif __name__ == \"__main__\":\n config = parse_args(MODEL_CLASSES)\n do_train(config)\n"
] | [
[
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ozcelikfu/IC-GAN_fMRI_Reconstruction | [
"31b0dc7659afbf8d12b1e460a38ab6d8d9a4296c"
] | [
"KamitaniData/kamitani_data_handler.py"
] | [
"\nfrom scipy.io import loadmat\nimport numpy as np\nimport pandas as pd\nimport sklearn.preprocessing\nfrom sklearn import preprocessing\n\n\nclass kamitani_data_handler():\n \"\"\"Generate batches for FMRI prediction\n frames_back - how many video frames to take before FMRI frame\n frames_forward - how many video frames to take after FMRI frame\n \"\"\"\n\n def __init__(self, matlab_file ,test_img_csv = 'KamitaniData/imageID_test.csv',train_img_csv = 'KamitaniData/imageID_training.csv',voxel_spacing =3,log = 0 ):\n mat = loadmat(matlab_file)\n self.data = mat['dataSet'][:,3:]\n self.sample_meta = mat['dataSet'][:,:3]\n meta = mat['metaData']\n\n\n self.meta_keys = list(l[0] for l in meta[0][0][0][0])\n self.meta_desc = list(l[0] for l in meta[0][0][1][0])\n self.voxel_meta = np.nan_to_num(meta[0][0][2][:,3:])\n test_img_df = pd.read_csv(test_img_csv, header=None)\n train_img_df =pd.read_csv(train_img_csv, header=None)\n self.test_img_id = test_img_df[0].values\n self.train_img_id = train_img_df[0].values\n self.sample_type = {'train':1 , 'test':2 , 'test_imagine' : 3}\n self.voxel_spacing = voxel_spacing\n\n self.log = log\n\n def get_meta_field(self,field = 'DataType'):\n index = self.meta_keys.index(field)\n if(index <3): # 3 first keys are sample meta\n return self.sample_meta[:,index]\n else:\n return self.voxel_meta[index]\n\n\n def print_meta_desc(self):\n print(self.meta_desc)\n\n def get_labels(self, imag_data = 0,test_run_list = None):\n le = preprocessing.LabelEncoder()\n\n img_ids = self.get_meta_field('Label')\n type = self.get_meta_field('DataType')\n train = (type == self.sample_type['train'])\n test = (type == self.sample_type['test'])\n imag = (type == self.sample_type['test_imagine'])\n\n img_ids_train = img_ids[train]\n img_ids_test = img_ids[test]\n img_ids_imag = img_ids[imag]\n\n\n train_labels = []\n test_labels = []\n imag_labels = []\n for id in img_ids_test:\n idx = (np.abs(id - self.test_img_id)).argmin()\n test_labels.append(idx)\n\n for id in img_ids_train:\n idx = (np.abs(id - self.train_img_id)).argmin()\n train_labels.append(idx)\n\n for id in img_ids_imag:\n idx = (np.abs(id - self.test_img_id)).argmin()\n imag_labels.append(idx)\n\n if (test_run_list is not None):\n run = self.get_meta_field('Run')\n test = (self.get_meta_field('DataType') == 2).astype(bool)\n run = run[test]\n\n select = np.in1d(run, test_run_list)\n test_labels = test_labels[select]\n\n #imag_labels = le.fit_transform(img_ids_imag)\n if(imag_data):\n return np.array(train_labels), np.array(test_labels), np.array(imag_labels)\n else:\n return np.array(train_labels),np.array(test_labels)\n\n\n\n\n\n def get_data(self,normalize =1 ,roi = 'ROI_VC',imag_data = 0,test_run_list = None): # normalize 0-no, 1- per run , 2- train/test seperatly\n type = self.get_meta_field('DataType')\n train = (type == self.sample_type['train'])\n test = (type == self.sample_type['test'])\n test_imag = (type == self.sample_type['test_imagine'])\n test_all = np.logical_or(test,test_imag)\n\n roi_select = self.get_meta_field(roi).astype(bool)\n data = self.data[:,roi_select]\n\n if(self.log ==1):\n data = np.log(1+np.abs(data))*np.sign(data)\n\n\n if(normalize==1):\n\n run = self.get_meta_field('Run').astype('int')-1\n num_runs = np.max(run)+1\n data_norm = np.zeros(data.shape)\n\n for r in range(num_runs):\n data_norm[r==run] = sklearn.preprocessing.scale(data[r==run])\n train_data = data_norm[train]\n test_data = data_norm[test]\n test_all = data_norm[test_all]\n test_imag = data_norm[test_imag]\n\n else:\n train_data = data[train]\n test_data = data[test]\n if(normalize==2):\n train_data = sklearn.preprocessing.scale(train_data)\n test_data = sklearn.preprocessing.scale(test_data)\n\n\n if(self.log ==2):\n train_data = np.log(1+np.abs(train_data))*np.sign(train_data)\n test_data = np.log(1+np.abs(test_data))*np.sign(test_data)\n train_data = sklearn.preprocessing.scale(train_data)\n test_data = sklearn.preprocessing.scale(test_data)\n\n\n\n test_labels = self.get_labels()[1]\n imag_labels = self.get_labels(1)[2]\n num_labels = max(test_labels)+1\n test_data_avg = np.zeros([num_labels,test_data.shape[1]])\n test_imag_avg = np.zeros([num_labels,test_data.shape[1]])\n\n if(test_run_list is not None):\n run = self.get_meta_field('Run')\n test = (self.get_meta_field('DataType') == 2).astype(bool)\n run = run[test]\n\n select = np.in1d(run, test_run_list)\n test_data = test_data[select,:]\n test_labels = test_labels[select]\n\n for i in range(num_labels):\n test_data_avg[i] = np.mean(test_data[test_labels==i],axis=0)\n test_imag_avg[i] = np.mean(test_imag[imag_labels == i], axis=0)\n if(imag_data):\n return train_data, test_data, test_data_avg,test_imag,test_imag_avg\n\n else:\n return train_data, test_data, test_data_avg\n\n def get_voxel_loc(self):\n x = self.get_meta_field('voxel_x')\n y = self.get_meta_field('voxel_y')\n z = self.get_meta_field('voxel_z')\n dim = [int(x.max() -x.min()+1),int(y.max() -y.min()+1), int(z.max() -z.min()+1)]\n return [x,y,z] , dim\n\n\n"
] | [
[
"pandas.read_csv",
"numpy.abs",
"numpy.in1d",
"scipy.io.loadmat",
"numpy.nan_to_num",
"numpy.logical_or",
"numpy.sign",
"numpy.max",
"numpy.mean",
"numpy.array",
"sklearn.preprocessing.LabelEncoder",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Pluto9th/ctapipe | [
"8c4faa674a1949210cbda8cb9e2413dd6362afea",
"8c4faa674a1949210cbda8cb9e2413dd6362afea"
] | [
"ctapipe/reco/tests/test_energy_regressor.py",
"ctapipe/tools/plot_charge_resolution.py"
] | [
"from tempfile import TemporaryDirectory\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom astropy import units as u\n\nfrom ctapipe.reco.energy_regressor import EnergyRegressor\n\n\ndef test_prepare_model():\n cam_id_list = [\"FlashCam\", \"ASTRICam\"]\n feature_list = {\"FlashCam\": [[1, 10], [2, 20], [3, 30], [0.9, 9],\n ],\n \"ASTRICam\": [[10, 1], [20, 2], [30, 3], [9, 0.9],\n ]}\n target_list = {\"FlashCam\": np.array([1, 2, 3, 0.9]) * u.TeV,\n \"ASTRICam\": np.array([1, 2, 3, 0.9]) * u.TeV}\n\n reg = EnergyRegressor(cam_id_list=cam_id_list, n_estimators=10)\n reg.fit(feature_list, target_list)\n return reg, cam_id_list\n\n\ndef test_fit_save_load():\n reg, cam_id_list = test_prepare_model()\n with TemporaryDirectory() as d:\n temp_path = \"/\".join([d, \"reg_{cam_id}.pkl\"])\n reg.save(temp_path)\n reg = EnergyRegressor.load(temp_path, cam_id_list)\n return reg, cam_id_list\n\n\ndef test_predict_by_event():\n np.random.seed(3)\n\n reg, cam_id_list = test_fit_save_load()\n prediction = reg.predict_by_event([{\"ASTRICam\": [[10, 1]]},\n {\"ASTRICam\": [[20, 2]]},\n {\"ASTRICam\": [[30, 3]]}])\n assert_allclose(prediction[\"mean\"].value, [1, 2, 3], rtol=0.2)\n\n prediction = reg.predict_by_event([{\"FlashCam\": [[1, 10]]},\n {\"FlashCam\": [[2, 20]]},\n {\"FlashCam\": [[3, 30]]}])\n assert_allclose(prediction[\"mean\"].value, [1, 2, 3], rtol=0.2)\n",
"\"\"\"\nPlot charge resolutions generated by ChargeResolutionCalculator.\n\"\"\"\nimport numpy as np\nfrom traitlets import Dict, List, Unicode\nfrom ctapipe.core import Tool\nfrom ctapipe.plotting.charge_resolution import ChargeResolutionPlotter\n\n\nclass ChargeResolutionViewer(Tool):\n name = \"ChargeResolutionViewer\"\n description = (\"Plot charge resolutions generated by \"\n \"ChargeResolutionCalculator.\")\n\n input_files = List(\n Unicode, None,\n help='Input HDF5 files produced by ChargeResolutionCalculator'\n ).tag(config=True)\n\n aliases = Dict(dict(\n f='ChargeResolutionViewer.input_files',\n B='ChargeResolutionPlotter.n_bins',\n o='ChargeResolutionPlotter.output_path',\n ))\n classes = List([\n ChargeResolutionPlotter,\n ])\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.calculator = None\n self.plotter = None\n\n def setup(self):\n self.log_format = \"%(levelname)s: %(message)s [%(name)s.%(funcName)s]\"\n self.plotter = ChargeResolutionPlotter(parent=self)\n\n def start(self):\n for fp in self.input_files:\n self.plotter.plot_camera(fp)\n\n def finish(self):\n q = np.arange(1, 1000)\n self.plotter.plot_poisson(q)\n self.plotter.plot_requirement(q)\n self.plotter.save()\n\n\ndef main():\n exe = ChargeResolutionViewer()\n exe.run()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"numpy.random.seed",
"numpy.testing.assert_allclose"
],
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lucasiscovici/plotly_py | [
"42ab769febb45fbbe0a3c677dc4306a4f59cea36",
"42ab769febb45fbbe0a3c677dc4306a4f59cea36"
] | [
"plotly_study/tests/test_orca/test_sg_scraper.py",
"plotly_study/tests/test_io/test_renderers.py"
] | [
"import plotly_study\nimport os\nimport shutil\nimport pytest\n\n\n# Fixtures\n# --------\[email protected]()\ndef setup():\n # Reset orca state\n plotly_study.io.orca.config.restore_defaults(reset_server=False)\n\n\nhere = os.path.dirname(os.path.abspath(__file__))\n\n\n# Run setup before every test function in this file\npytestmark = pytest.mark.usefixtures(\"setup\")\n\n\ndef execute_plotly_example():\n \"\"\"\n Some typical code which would go inside a gallery example.\n \"\"\"\n import plotly_study.graph_objs as go\n\n # Create random data with numpy\n import numpy as np\n\n N = 200\n random_x = np.random.randn(N)\n random_y_0 = np.random.randn(N)\n random_y_1 = np.random.randn(N) - 1\n\n # Create traces\n trace_0 = go.Scatter(x=random_x, y=random_y_0, mode=\"markers\", name=\"Above\")\n\n fig = go.Figure(data=[trace_0])\n plotly_study.io.show(fig)\n\n\ndef test_scraper():\n from plotly_study.io._sg_scraper import plotly_sg_scraper\n\n # test that monkey-patching worked ok\n assert plotly_study.io.renderers.default == \"sphinx_gallery\"\n # Use dummy values for arguments of plotly_sg_scraper\n block = \"\" # we don't need actually code\n import tempfile\n\n tempdir = tempfile.mkdtemp()\n gallery_conf = {\"src_dir\": tempdir, \"examples_dirs\": here}\n names = iter([\"0\", \"1\", \"2\"])\n block_vars = {\"image_path_iterator\": names}\n execute_plotly_example()\n res = plotly_sg_scraper(block, block_vars, gallery_conf)\n shutil.rmtree(tempdir)\n assert \".. raw:: html\" in res\n",
"import json\nimport sys\nimport base64\nimport threading\nimport time\n\nimport pytest\nimport requests\nimport numpy as np\n\nimport plotly_study.graph_objs as go\nimport plotly_study.io as pio\nfrom plotly_study.offline import get_plotlyjs\n\nif sys.version_info.major == 3 and sys.version_info.minor >= 3:\n import unittest.mock as mock\n from unittest.mock import MagicMock\nelse:\n import mock\n from mock import MagicMock\n\n\n# fixtures\n# --------\[email protected]\ndef fig1(request):\n return go.Figure(\n data=[\n {\n \"type\": \"scatter\",\n \"y\": np.array([2, 1, 3, 2, 4, 2]),\n \"marker\": {\"color\": \"green\"},\n }\n ],\n layout={\"title\": {\"text\": \"Figure title\"}},\n )\n\n\n# JSON\n# ----\ndef test_json_renderer_mimetype(fig1):\n pio.renderers.default = \"json\"\n expected = {\"application/json\": json.loads(pio.to_json(fig1, remove_uids=False))}\n\n pio.renderers.render_on_display = False\n\n with mock.patch(\"IPython.display.display\") as mock_display:\n fig1._ipython_display_()\n\n mock_display.assert_not_called()\n\n pio.renderers.render_on_display = True\n with mock.patch(\"IPython.display.display\") as mock_display:\n fig1._ipython_display_()\n\n mock_display.assert_called_once_with(expected, raw=True)\n\n\ndef test_json_renderer_show(fig1):\n pio.renderers.default = \"json\"\n expected_bundle = {\n \"application/json\": json.loads(pio.to_json(fig1, remove_uids=False))\n }\n\n with mock.patch(\"IPython.display.display\") as mock_display:\n pio.show(fig1)\n\n mock_display.assert_called_once_with(expected_bundle, raw=True)\n\n\ndef test_json_renderer_show_override(fig1):\n pio.renderers.default = \"notebook\"\n expected_bundle = {\n \"application/json\": json.loads(pio.to_json(fig1, remove_uids=False))\n }\n\n with mock.patch(\"IPython.display.display\") as mock_display:\n pio.show(fig1, renderer=\"json\")\n\n mock_display.assert_called_once_with(expected_bundle, raw=True)\n\n\n# Plotly mimetype\n# ---------------\nplotly_mimetype = \"application/vnd.plotly_study.v1+json\"\nplotly_mimetype_renderers = [\"plotly_mimetype\", \"jupyterlab\", \"vscode\", \"nteract\"]\n\n\[email protected](\"renderer\", plotly_mimetype_renderers)\ndef test_plotly_mimetype_renderer_mimetype(fig1, renderer):\n pio.renderers.default = renderer\n expected = {plotly_mimetype: json.loads(pio.to_json(fig1, remove_uids=False))}\n\n expected[plotly_mimetype][\"config\"] = {\"plotlyServerURL\": \"https://plot.ly\"}\n\n pio.renderers.render_on_display = False\n\n with mock.patch(\"IPython.display.display\") as mock_display:\n fig1._ipython_display_()\n\n mock_display.assert_not_called()\n\n pio.renderers.render_on_display = True\n with mock.patch(\"IPython.display.display\") as mock_display:\n fig1._ipython_display_()\n\n mock_display.assert_called_once_with(expected, raw=True)\n\n\[email protected](\"renderer\", plotly_mimetype_renderers)\ndef test_plotly_mimetype_renderer_show(fig1, renderer):\n pio.renderers.default = renderer\n expected = {plotly_mimetype: json.loads(pio.to_json(fig1, remove_uids=False))}\n\n expected[plotly_mimetype][\"config\"] = {\"plotlyServerURL\": \"https://plot.ly\"}\n\n with mock.patch(\"IPython.display.display\") as mock_display:\n pio.show(fig1)\n\n mock_display.assert_called_once_with(expected, raw=True)\n\n\n# Static Image\n# ------------\n# See plotly/tests/test_orca/test_image_renderers.py\n\n# HTML\n# ----\ndef assert_full_html(html):\n assert html.startswith(\"<html\")\n\n\ndef assert_not_full_html(html):\n assert not html.startswith(\"<html\")\n\n\ndef assert_connected(html):\n assert \"https://cdn.plot.ly/plotly-latest.min\" in html\n\n\ndef assert_offline(html):\n assert get_plotlyjs() in html\n\n\ndef assert_requirejs(html):\n assert 'require([\"plotly\"]' in html\n\n\ndef assert_not_requirejs(html):\n assert 'require([\"plotly\"]' not in html\n\n\ndef test_colab_renderer_show(fig1):\n pio.renderers.default = \"colab\"\n\n with mock.patch(\"IPython.display.display\") as mock_display:\n pio.show(fig1)\n\n # Get display call arguments\n mock_call_args = mock_display.call_args\n mock_arg1 = mock_call_args[0][0]\n\n # Check for html bundle\n assert list(mock_arg1) == [\"text/html\"]\n\n # Check html contents\n html = mock_arg1[\"text/html\"]\n assert_full_html(html)\n assert_connected(html)\n assert_not_requirejs(html)\n\n # check kwargs\n mock_kwargs = mock_call_args[1]\n assert mock_kwargs == {\"raw\": True}\n\n\[email protected](\n \"name,connected\",\n [(\"notebook\", False), (\"notebook_connected\", True), (\"kaggle\", True)],\n)\ndef test_notebook_connected_show(fig1, name, connected):\n # Set renderer\n pio.renderers.default = name\n\n # Show\n with mock.patch(\"IPython.display.display_html\") as mock_display_html:\n with mock.patch(\"IPython.display.display\") as mock_display:\n pio.show(fig1)\n\n # ### Check initialization ###\n # Get display call arguments\n mock_call_args_html = mock_display_html.call_args\n mock_arg1_html = mock_call_args_html[0][0]\n\n # Check init display contents\n bundle_display_html = mock_arg1_html\n if connected:\n assert_connected(bundle_display_html)\n else:\n assert_offline(bundle_display_html)\n\n # ### Check display call ###\n # Get display call arguments\n mock_call_args = mock_display.call_args\n mock_arg1 = mock_call_args[0][0]\n\n # Check for html bundle\n assert list(mock_arg1) == [\"text/html\"]\n\n # Check html display contents\n bundle_html = mock_arg1[\"text/html\"]\n assert_not_full_html(bundle_html)\n assert_requirejs(bundle_html)\n\n # check kwargs\n mock_kwargs = mock_call_args[1]\n assert mock_kwargs == {\"raw\": True}\n\n\n# Browser\n# -------\[email protected](\"renderer\", [\"browser\", \"chrome\", \"firefox\"])\ndef test_browser_renderer_show(fig1, renderer):\n pio.renderers.default = renderer\n renderer_obj = pio.renderers[renderer]\n\n # Setup mocks\n mock_get = MagicMock(name=\"test get\")\n mock_browser = MagicMock(name=\"test browser\")\n mock_get.return_value = mock_browser\n\n request_responses = []\n\n def perform_request(url):\n request_responses.append(requests.get(url))\n\n def open_url(url, new=0, autoraise=True):\n print(\"open url\")\n # Perform request in thread so that we don't block\n request_thread = threading.Thread(target=lambda: perform_request(url))\n request_thread.daemon = True\n request_thread.start()\n\n mock_browser.open.side_effect = open_url\n\n with mock.patch(\"webbrowser.get\", mock_get):\n pio.show(fig1)\n\n # check get args\n mock_get.assert_called_once_with(renderer_obj.using)\n\n # check open args\n mock_call_args = mock_browser.open.call_args\n mock_arg1 = mock_call_args[0][0]\n mock_arg1.startswith(\"http://127.0.0.1:\")\n\n mock_kwargs = mock_call_args[1]\n assert mock_kwargs == dict(new=renderer_obj.new, autoraise=renderer_obj.autoraise)\n\n # Give request content a little time to show up\n tries = 0\n while tries < 5 and not request_responses:\n time.sleep(0.5)\n\n # Check request content\n assert len(request_responses) == 1\n response = request_responses[0]\n assert response.status_code == 200\n html = response.content.decode(\"utf8\")\n assert_full_html(html)\n assert_offline(html)\n assert_not_requirejs(html)\n\n\n# Validation\n# ----------\[email protected](\"renderer\", [\"bogus\", \"json+bogus\", \"bogus+chrome\"])\ndef test_reject_invalid_renderer(renderer):\n with pytest.raises(ValueError) as e:\n pio.renderers.default = renderer\n\n e.match(\"Invalid named renderer\")\n\n\[email protected](\n \"renderer\", [\"json\", \"json+firefox\", \"chrome+colab+notebook+vscode\"]\n)\ndef test_accept_valid_renderer(renderer):\n pio.renderers.default = renderer\n"
] | [
[
"numpy.random.randn"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
guilhermemg/trace-links-tc-br | [
"965cb57d17057d1c9c3841c4aba01e72cf008cab"
] | [
"modules/models_runner/tc_br_models_runner.py"
] | [
"import pandas as pd\nimport numpy as np\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n\nfrom modules.utils import plots\nfrom modules.utils import firefox_dataset_p2 as fd\nfrom modules.utils import tokenizers as tok\nfrom modules.utils import aux_functions\n\nfrom modules.models.lda import LDA\nfrom modules.models.lsi import LSI\nfrom modules.models.bm25 import BM_25\nfrom modules.models.wordvec import WordVec_BasedModel\nfrom modules.models.zeror import ZeroR_Model\nfrom modules.models.vsm import VSM\n\nimport modules.models.model_hyperps as mh\n\nclass TC_BR_Models_Hyperp:\n \n @staticmethod\n def get_lsi_model_hyperp():\n return {\n mh.LSI_Model_Hyperp.SVD_MODEL_N_COMPONENTS.value: 20,\n mh.LSI_Model_Hyperp.VECTORIZER_NGRAM_RANGE.value: (1,1),\n mh.LSI_Model_Hyperp.VECTORIZER_MAX_FEATURES.value: 400,\n mh.LSI_Model_Hyperp.VECTORIZER.value : TfidfVectorizer(stop_words='english', use_idf=True, smooth_idf=True),\n mh.LSI_Model_Hyperp.VECTORIZER_TOKENIZER.value : tok.WordNetBased_LemmaTokenizer()\n }\n \n @staticmethod\n def get_lda_model_hyperp():\n return {\n mh.LDA_Model_Hyperp.LDA_MODEL_N_COMPONENTS.value: 20,\n mh.LDA_Model_Hyperp.LDA_MODEL_RANDOM_STATE.value : 2,\n mh.LDA_Model_Hyperp.VECTORIZER_NGRAM_RANGE.value: (1,1),\n mh.LDA_Model_Hyperp.VECTORIZER_MAX_FEATURES.value: 200,\n mh.LDA_Model_Hyperp.VECTORIZER.value : TfidfVectorizer(stop_words='english', use_idf=True, smooth_idf=True),\n mh.LDA_Model_Hyperp.VECTORIZER_TOKENIZER.value : tok.PorterStemmerBased_Tokenizer() \n }\n \n @staticmethod\n def get_bm25_model_hyperp():\n return {\n mh.BM25_Model_Hyperp.TOKENIZER.value : tok.PorterStemmerBased_Tokenizer()\n }\n \n @staticmethod\n def get_w2v_model_hyperp():\n return {\n mh.WordVec_Model_Hyperp.TOKENIZER.value : tok.PorterStemmerBased_Tokenizer(),\n mh.WordVec_Model_Hyperp.WORD_EMBEDDING.value : 'CC_BASED',\n mh.WordVec_Model_Hyperp.GEN_NAME.value : 'wordvector'\n }\n \n @staticmethod\n def get_cust_w2v_model_hyperp():\n return {\n mh.WordVec_Model_Hyperp.TOKENIZER.value : tok.PorterStemmerBased_Tokenizer(),\n mh.WordVec_Model_Hyperp.WORD_EMBEDDING.value : 'CUSTOMIZED',\n mh.WordVec_Model_Hyperp.GEN_NAME.value : 'cust_wordvector'\n }\n \n @staticmethod\n def get_vsm_model_hyperp():\n return {\n mh.VSM_Model_Hyperp.VECTORIZER_NGRAM_RANGE.value: (1,1),\n mh.VSM_Model_Hyperp.VECTORIZER_MAX_FEATURES.value: 400,\n mh.VSM_Model_Hyperp.VECTORIZER.value : TfidfVectorizer(stop_words='english', use_idf=True, smooth_idf=True),\n mh.VSM_Model_Hyperp.VECTORIZER_TOKENIZER.value : tok.WordNetBased_LemmaTokenizer()\n }\n\nclass TC_BR_Runner:\n def __init__(self, testcases=pd.DataFrame(), bugreports=pd.DataFrame()):\n self.test_cases_df = None\n self.bug_reports_df = None\n self.corpus = None\n self.query = None\n self.test_cases_names = None\n self.bug_reports_names = None\n \n self.set_basic_params(testcases, bugreports)\n \n \n def set_basic_params(self, testcases, bugreports):\n if testcases.empty:\n self.test_cases_df = fd.Datasets.read_testcases_df()\n else:\n self.test_cases_df = testcases\n \n if bugreports.empty:\n self.bug_reports_df = fd.Datasets.read_selected_bugreports_df()\n else:\n self.bug_reports_df = bugreports\n \n self.corpus = self.test_cases_df.tc_desc\n self.query = self.bug_reports_df.br_desc\n \n self.test_cases_names = self.test_cases_df.TC_Number\n self.bug_reports_names = self.bug_reports_df.Bug_Number\n \n def run_lsi_model(self, lsi_hyperp=None):\n print(\"Running LSI Model ------\")\n \n if lsi_hyperp == None:\n lsi_hyperp = TC_BR_Models_Hyperp.get_lsi_model_hyperp()\n\n lsi_model = LSI(**lsi_hyperp)\n lsi_model.set_name('LSI_Model_TC_BR')\n \n lsi_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n \n return lsi_model\n \n def run_lda_model(self, lda_hyperp=None):\n print(\"Running LDA Model -----\")\n \n if lda_hyperp == None:\n lda_hyperp = TC_BR_Models_Hyperp.get_lda_model_hyperp()\n\n lda_model = LDA(**lda_hyperp)\n lda_model.set_name('LDA_Model_TC_BR')\n lda_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n\n return lda_model\n \n def run_bm25_model(self, bm25_hyperp=None):\n print(\"Running BM25 Model -----\")\n \n if bm25_hyperp == None:\n bm25_hyperp = TC_BR_Models_Hyperp.get_bm25_model_hyperp()\n\n bm25_model = BM_25(**bm25_hyperp)\n bm25_model.set_name('BM25_Model_TC_BR')\n bm25_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n\n return bm25_model\n \n def run_word2vec_model(self, wv_hyperp=None):\n print(\"Running W2V Model ------\")\n \n if wv_hyperp == None:\n wv_hyperp = TC_BR_Models_Hyperp.get_w2v_model_hyperp()\n\n wv_model = WordVec_BasedModel(**wv_hyperp)\n wv_model.set_name('WordVec_Model_TC_BR')\n wv_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n\n return wv_model\n \n def run_cust_word2vec_model(self, wv_hyperp=None):\n print(\"Running Customized W2V model -----\")\n \n if wv_hyperp == None:\n wv_hyperp = TC_BR_Models_Hyperp.get_cust_w2v_model_hyperp()\n\n wv_model = WordVec_BasedModel(**wv_hyperp)\n wv_model.set_name('Customized_WordVec_Model_TC_BR')\n wv_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n\n return wv_model\n \n def run_zeror_model(self, zeror_hyperp=None):\n print(\"Running ZeroR model -----\")\n \n oracle = fd.Tc_BR_Oracles.read_oracle_expert_volunteers_intersec_df()\n \n zeror_model = ZeroR_Model(oracle)\n zeror_model.set_name('ZeroR_Model_TC_BR')\n zeror_model.recover_links()\n \n return zeror_model\n\n def run_vsm_model(self, vsm_hyperp=None):\n print('Running VSM model -----')\n \n if vsm_hyperp == None:\n vsm_hyperp = TC_BR_Models_Hyperp.get_vsm_model_hyperp()\n \n vsm_model = VSM(**vsm_hyperp)\n vsm_model.set_name('VSM_Model_TC_BR')\n vsm_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n \n return vsm_model"
] | [
[
"sklearn.feature_extraction.text.TfidfVectorizer",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kumasento/gconv-prune | [
"f81c417d3754102c902bd153809130e12607bd7d",
"f81c417d3754102c902bd153809130e12607bd7d"
] | [
"evaluation/early_stage/prune.py",
"evaluation/early_stage/export.py"
] | [
"\"\"\" Pruning a pre-trained model by GSP.\n\nAuthor: Ruizhe Zhao\nDate: 12/02/2019\n\nThe work-flow of this script:\n- load a pre-trained model (suffixed by 'm')\n- compute the mask based on weights\n- fine-tune the model\n\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nimport copy\nimport time\nimport shutil\nimport json\nimport logging\n\nlogging.getLogger().setLevel(logging.DEBUG)\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nfrom gumi.ops.mask_conv2d import MaskConv2d\nfrom gumi.pruning import prune_utils\nfrom gumi import model_utils\nfrom gumi import models # a module contains all supported models\n\nmodel_names = sorted(\n name\n for name in models.__dict__\n if name.islower() and not name.startswith(\"__\") and callable(models.__dict__[name])\n)\n\nimport cifar_utils\nfrom utils import * # import utilities provided by pytorch-classification\nfrom parser import create_parser # argument parser for evaluation tasks\nfrom pruner import Pruner\n\nparser = create_parser()\nargs = parser.parse_args()\n\n# CUDA\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\nuse_cuda = torch.cuda.is_available()\ncudnn.benchmark = True\n\n\ndef write_summary(args, file_name=\"summary.json\", **kwargs):\n \"\"\" Write summary to a JSON file. \"\"\"\n summary_file = \"{}/{}\".format(args.checkpoint, file_name)\n with open(summary_file, \"w\") as f:\n json.dump(kwargs, f)\n\n\ndef main():\n # initialize the pruner\n pruner = Pruner(args)\n # pruner.prune(args.checkpoint)\n pruner.evaluate()\n\n # Run regularization\n pruner.prune(\n args.checkpoint, fake_mask=True, perm=args.perm, num_iters=args.num_sort_iters\n )\n pruner.evaluate()\n pruner.regularize()\n pruner.apply_mask()\n pruner.evaluate()\n\n logging.debug(\"Fine-tuning model for {} epochs\".format(args.epochs))\n best_acc = pruner.fine_tune(args.epochs)\n logging.debug(\"Fine-tuned model\")\n pruner.evaluate()\n\n write_summary(args, best_acc=best_acc)\n\n\nif __name__ == \"__main__\":\n main()\n",
"\"\"\" Export a fine-tuned mask based model to a grouped one. \"\"\"\n\nimport os\nimport sys\nimport argparse\nimport copy\nimport time\nimport shutil\nimport json\nimport itertools\nimport functools\nfrom subprocess import Popen, PIPE # launching pruning processes\nimport logging\n\nlogging.getLogger().setLevel(logging.DEBUG)\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nfrom parser import create_parser\nfrom group_exporter import GroupExporter\n\nparser = create_parser(prog=\"Export from a mask based model to a grouped one.\")\nargs = parser.parse_args()\n\n# CUDA\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\nuse_cuda = torch.cuda.is_available()\ncudnn.benchmark = True\n\n\ndef main():\n exporter = GroupExporter(args)\n exporter.export()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.cuda.is_available"
],
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wj-Mcat/Paddle | [
"0a931106008f4174a8556aa4a4b9f23167c33f4d"
] | [
"python/paddle/fluid/reader.py"
] | [
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom . import core\nimport sys\nimport six\nimport numpy as np\nimport threading\nimport paddle\nfrom .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places, _current_expected_place, _in_eager_mode\nfrom .executor import global_scope\nfrom .data_feeder import DataFeeder, BatchedTensorProvider\nfrom .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler\nfrom .dataloader import BatchSampler, Dataset, IterableDataset\nfrom .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn\nfrom .dataloader.batch_sampler import _InfiniteIterableSampler\nfrom .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer\nfrom .unique_name import UniqueNameGenerator\nfrom .framework import _get_paddle_place, _get_paddle_place_list\nfrom paddle.fluid.framework import _set_expected_place, _current_expected_place\nimport logging\nimport warnings\n\n### Dygraph DataLoader configs ###\nimport os\nimport multiprocessing\nimport signal\n\n# NOTE: queue has a different name in python2 and python3\nimport queue\n\n# NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process\nQUEUE_GET_TIMEOUT = 60\n\n__all__ = ['PyReader', 'DataLoader', 'default_collate_fn']\n\ndata_loader_unique_name_generator = UniqueNameGenerator()\n\nKEEP_DATA_LOADER_ORDER = True\nUSE_PINNED_MEMORY = None\n\n\ndef keep_data_loader_order(*args):\n global KEEP_DATA_LOADER_ORDER\n if len(args) == 0:\n return KEEP_DATA_LOADER_ORDER\n else:\n assert len(args) == 1 and isinstance(args[0], bool)\n KEEP_DATA_LOADER_ORDER = args[0]\n\n\ndef use_pinned_memory(*args):\n global USE_PINNED_MEMORY\n if len(args) == 0:\n return USE_PINNED_MEMORY\n else:\n assert len(args) == 1 and isinstance(args[0], bool)\n USE_PINNED_MEMORY = args[0]\n\n\ndef _convert_places(places):\n if not isinstance(places, (list, tuple)):\n places = [places]\n\n ret = []\n for p in places:\n if not isinstance(p, core.Place):\n tmp = core.Place()\n tmp.set_place(p)\n p = tmp\n\n ret.append(p)\n return ret\n\n\n# NOTE(chenweihang): _reader_process_loop must be top level method to be pickled\ndef _reader_process_loop(batch_reader, data_queue):\n try:\n # set signal handler\n core._set_process_signal_handler()\n\n # NOTE: [ mmap files clear ] When the child process exits unexpectedly,\n # some shared memory objects may have been applied for but have not yet\n # been put into the inter-process Queue. This part of the object needs\n # to be cleaned up when the process ends.\n CleanupFuncRegistrar.register(_cleanup_mmap)\n\n for batch in batch_reader():\n tensor_list = core._convert_to_tensor_list(batch)\n data_queue.put(tensor_list)\n core._remove_tensor_list_mmap_fds(tensor_list)\n data_queue.put(None)\n except KeyboardInterrupt:\n # NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process\n pass\n except:\n six.reraise(*sys.exc_info())\n\n\nclass DataLoaderBase(object):\n def __init__(self):\n self._places = None\n\n def __call__(self):\n return self\n\n def next(self):\n '''\n Get the next item in the DataLoader object. This method \n should not be called by users directly. It is used for\n implementing iterator protocol of Python 2.x inside\n PaddlePaddle framework.\n '''\n return self.__next__()\n\n def __iter__(self):\n raise NotImplementedError()\n\n def __next__(self):\n raise NotImplementedError()\n\n @classmethod\n def _check_input_array(cls, item):\n arr = np.asarray(item)\n if arr.dtype == np.object:\n raise TypeError(\n \"\\n\\tFaild to convert input data to a regular ndarray :\\n\\t* Usually \"\n \"this means the input data contains nested lists with different lengths. \"\n \"\\n\\t* Check the reader function passed to 'decorate_batch_generator'\"\n \" to locate the data causes this issue.\\n\\t* Please consider using \"\n \"'fluid.create_lod_tensor' to convert it to a LoD-Tensor.\")\n return arr\n\n\nclass DataLoader(object):\n \"\"\"\n DataLoader prodives an iterator which iterates given dataset\n once by the batch_sampler.\n\n DataLoader supports single-process and multi-prcess data loading,\n multi-process workers will be used to load data asynchronously if\n :attr:`num_workers` is set as a positive number.\n\n DataLoader supports map-style dataset and iterable-style dataset.\n\n For map-style datast(can get a sample from dataset with a given\n index), please see :code:`paddle.io.Dataset`.\n\n For iterable-style datast(get samples from dataset iteratively,\n like a Python iterator), please see :code:`paddle.io.IterableDataset`.\n\n For :code:`batch_sampler` please see :code:`paddle.io.BatchSampler`\n\n .. note::\n GPU tensor operation is not supported in subprocess currently,\n please don't use GPU tensor operations in pipeline which will\n be performed in subprocess, such as dataset transforms, collte_fn,\n etc. Numpy array and CPU tensor operation is supported.\n\n **Disable automatic batching**\n\n In certain cases such as some NLP tasks, instead of automatic batching,\n handling batching manually in dataset is needed by users. For these\n cases, automatic batching is disabled if both :attr:`batch_size` and\n :attr:`batch_sampler` is set as None, each data got from :attr:`dataset`\n should be batched data and will be processed with function define by\n :attr:`collate_fn` or :attr:`default_collate_fn`.\n\n\n .. note::\n When automatic batching is disabled, :attr:`default_collate_fn` will\n do nothing to data from dataset.\n\n\n Args: \n dataset(Dataset): the dataset to load data from, should be an\n instance of subclass of :code:`paddle.io.Dataset` or\n :code:`paddle.io.IterableDataset`.\n feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.\n The Tensors should be created by :code:`paddle.static.data()`.\n :attr:`feed_list` must be set if :attr:`return_list` is\n False. Default None.\n places(list(Place)|tuple(Place)|list(str)|optional): a list of Place,\n to put data onto, :attr:`places` can be None, if \n :attr:`places` is None, default place(CPUPlace or CUDAPlace(0))\n will be used. Default None. If ``places`` is list of string,\n the string in the list can be ``cpu``, ``gpu:x`` and ``gpu_pinned``,\n where ``x`` is the index of the GPUs.\n return_list (bool): whether the return value on each device is \n presented as a list. If :attr:`return_list=False`, the return\n value on each device would be a dict of str -> Tensor, where\n the key of the dict is the name of each fed Tensors. If \n :attr:`return_list=True`, the return value on each device would\n be a list(Tensor). :attr:`return_list` can only be True\n in dynamic graph mode. Default True.\n batch_sampler(BatchSampler): an instance of `paddle.io.BatchSampler`\n to generate batch indices to draw samples from :attr:`dataset`\n and combine a batch. Default None.\n batch_size(int|None): sample number in a mini-batch, a substitution\n parameter for :attr:`batch_sampler`, if :attr:`batch_sampler`\n is not set, a default `paddle.io.BatchSampler` will be used\n and initialize by :attr:`batch_size`, :attr:`shuffle` and\n :attr:`drop_last`. Default 1.\n shuffle(bool): whther to shuffle indices order before genrate\n batch indices, a substitution parameter for :attr:`batch_sampler`\n see :attr:`batch_size`. Default False.\n drop_last(bool): whether drop the last incomplete batch dataset size\n is not divisible by the batch size, a substitution parameter\n for :attr:`batch_sampler`, see :attr:`batch_size`. Default False\n collate_fn(callable): function to generate mini-batch data by merging\n the sample list, None for only stack each fields of sample in axis\n 0(same as :attr::`np.stack(..., axis=0)`). Default None\n num_workers(int): the number of subprocess to load data, 0 for no\n subprocess used and loading data in main process. Default 0\n use_buffer_reader (bool): whether to use bufferred reader. \n If use_buffer_reader=True, the DataLoader would prefetch next \n batch data asynchronously, so it would speed up data feeding \n and occupies a little more CPU or GPU memory, i.e., the memory\n of one batch input data. Default True.\n use_shared_memory (bool): whether to use shared memory to speed up\n putting data into inter-process queue, set :attr:`use_shared_memory`\n as True only when the shared memory space on your machine(e.g.\n space of '/dev/shm' on Linux operating sysytem) is large enough.\n Shared memory will only be enabled in multi-process mode(num_workers\n > 0). Default True.\n timeout(int): the timeout value for getting data form output queue\n of subprocesses. Default 0.\n worker_init_fn(callable): init function which will be called with\n worker id on each subproces starting if not set as None. Default\n None.\n\n Returns:\n DataLoader: an iterable object for data iterating, each elemnet of the generated data is a Tensor.\n\n Examples:\n \n .. code-block:: python\n\n import numpy as np\n\n import paddle\n import paddle.nn as nn\n import paddle.nn.functional as F\n from paddle.io import Dataset, BatchSampler, DataLoader\n\n BATCH_NUM = 20\n BATCH_SIZE = 16\n EPOCH_NUM = 4\n\n IMAGE_SIZE = 784\n CLASS_NUM = 10\n\n # define a random dataset\n class RandomDataset(Dataset):\n def __init__(self, num_samples):\n self.num_samples = num_samples\n\n def __getitem__(self, idx):\n image = np.random.random([IMAGE_SIZE]).astype('float32')\n label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')\n return image, label\n\n def __len__(self):\n return self.num_samples\n\n dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)\n\n class SimpleNet(nn.Layer):\n def __init__(self):\n super(SimpleNet, self).__init__()\n self.fc = nn.Linear(IMAGE_SIZE, CLASS_NUM)\n\n def forward(self, image, label=None):\n return self.fc(image)\n\n simple_net = SimpleNet()\n opt = paddle.optimizer.SGD(learning_rate=1e-3,\n parameters=simple_net.parameters())\n\n loader = DataLoader(dataset,\n batch_size=BATCH_SIZE,\n shuffle=True,\n drop_last=True,\n num_workers=2)\n\n for e in range(EPOCH_NUM):\n for i, (image, label) in enumerate(loader()):\n out = simple_net(image)\n loss = F.cross_entropy(out, label)\n avg_loss = paddle.mean(loss)\n avg_loss.backward()\n opt.minimize(avg_loss)\n simple_net.clear_gradients()\n print(\"Epoch {} batch {}: loss = {}\".format(e, i, np.mean(loss.numpy())))\n\n\n .. note::\n For reading iterable dataset with multiprocess Dataloader,\n please see :code:`paddle.io.IterableDataset`\n\n \"\"\"\n\n def __init__(self,\n dataset,\n feed_list=None,\n places=None,\n return_list=True,\n batch_sampler=None,\n batch_size=1,\n shuffle=False,\n drop_last=False,\n collate_fn=None,\n num_workers=0,\n use_buffer_reader=True,\n use_shared_memory=True,\n timeout=0,\n worker_init_fn=None,\n persistent_workers=False):\n self.return_list = return_list\n self.collate_fn = collate_fn\n self.use_buffer_reader = use_buffer_reader\n self.worker_init_fn = worker_init_fn\n\n self.dataset = dataset\n\n if not return_list and not in_dygraph_mode():\n assert feed_list is not None, \\\n \"feed_list should be set when return_list=False\"\n self.feed_list = feed_list\n\n if places is None:\n places = _current_expected_place()\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n self.places = _convert_places(places)\n\n assert num_workers >= 0, \"num_workers should be a non-negative value\"\n if num_workers > 0 and (sys.platform == 'darwin' or\n sys.platform == 'win32'):\n warnings.warn(\n \"DataLoader with multi-process mode is not supported on MacOs and Windows currently.\" \\\n \" Please use signle-process mode with num_workers = 0 instead\")\n num_workers = 0\n self.num_workers = num_workers\n\n self.use_shared_memory = use_shared_memory\n if use_shared_memory and num_workers == 0:\n self.use_shared_memory = False\n\n assert timeout >= 0, \"timeout should be a non-negative value\"\n self.timeout = timeout\n\n if isinstance(dataset, IterableDataset):\n self.dataset_kind = _DatasetKind.ITER\n if shuffle:\n raise ValueError(\n \"IterableDataset not support shuffle, but got shuffle={}\".\n format(shuffle))\n if batch_sampler is not None:\n raise ValueError(\n \"IterableDataset expect unspecified batch_sampler\")\n else:\n self.dataset_kind = _DatasetKind.MAP\n\n if batch_sampler is not None:\n assert batch_size == 1 and not shuffle and not drop_last, \\\n \"batch_size/shuffle/drop_last should not be set when \" \\\n \"batch_sampler is given\"\n self.batch_sampler = batch_sampler\n self.batch_size = None\n elif batch_size is None:\n self.batch_sampler = None\n self.batch_size = None\n else:\n assert batch_size > 0, \\\n \"batch_size should be None or a positive value when \" \\\n \"batch_sampler is not given\"\n self.batch_size = batch_size\n if isinstance(dataset, IterableDataset):\n self.batch_sampler = _InfiniteIterableSampler(dataset,\n batch_size)\n else:\n self.batch_sampler = BatchSampler(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n drop_last=drop_last)\n\n self.drop_last = drop_last\n self.auto_collate_batch = self.batch_sampler is not None\n\n self.pin_memory = False\n if in_dygraph_mode():\n self.pin_memory = use_pinned_memory() or True\n\n self._persistent_workers = persistent_workers\n self._iterator = None\n\n def __len__(self):\n if self.dataset_kind == _DatasetKind.ITER:\n raise ValueError(\"length of IterableDataset not supported\")\n else:\n if self.auto_collate_batch:\n return len(self.batch_sampler)\n else:\n return len(self.dataset)\n\n def __iter__(self):\n if self.num_workers == 0:\n return _DataLoaderIterSingleProcess(self)\n elif self._persistent_workers:\n if self._iterator is None:\n self._iterator = _DataLoaderIterMultiProcess(self)\n else:\n self._iterator._reset()\n return self._iterator\n else:\n return _DataLoaderIterMultiProcess(self)\n\n def __call__(self):\n return self.__iter__()\n\n @staticmethod\n def from_generator(feed_list=None,\n capacity=None,\n use_double_buffer=True,\n iterable=True,\n return_list=False,\n use_multiprocess=False,\n drop_last=True):\n \"\"\"\n .. warning::\n This API will be deprecated in the future, it is recommended to use\n :code:`paddle.io.DataLoader` which supports multi-processes acceleration.\n\n .. note::\n **The framework ensures that the data loading order of DataLoader is exactly the same as the user-defined data source.**\n\n Create a DataLoader object for loading data from Python generator. \n Data would be prefetched using Python thread and be pushed\n into a queue asynchronously.\n\n The created DataLoader object provides 3 methods to set the data source\n :code:`set_sample_generator` , :code:`set_sample_list_generator` and \n :code:`set_batch_generator` . Please see the following example codes\n to know their usages.\n \n If iterable = True, the created DataLoader object is a Python generator\n object, which is iterable using for-range loop.\n\n If iterable = False, the created DataLoader object provides \n :code:`start()` and :code:`reset()` method to control the data reading\n process.\n\n Args: \n feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.\n The Tensors should be created by :code:`fluid.data()`.\n capacity (int): capacity of the queue maintained in DataLoader.\n The unit is batch number. Set larger capacity if your reader \n is fast. \n use_double_buffer (bool): whether to use double_buffer_reader. \n If use_double_buffer=True, the DataLoader would prefetch next \n batch data asynchronously, so it would speed up data feeding \n and occupies a little more CPU or GPU memory, i.e., the memory\n of one batch input data. \n iterable (bool): whether the created DataLoader is iterable. \n return_list (bool): whether the return value on each device is \n presented as a list. It is only valid when iterable=True. \n If return_list=False, the return value on each device would \n be a dict of str -> LoDTensor, where the key of the dict is \n the name of each fed Tensors. If return_list=True, the \n return value on each device would be a list(LoDTensor). It is\n recommended to use return_list=False in static graph mode and\n use return_list=True in dygraph mode. \n use_multiprocess (bool): whether to use multi-process to speed up\n the data loading process in dygraph. Note: this parameter only\n can be used in the dygraph mode. In the static graph mode,\n whether this parameter is set or not has no effect.\n The Default value is False.\n drop_last (bool): whether to drop the last batches whose number is\n less than the CPU core/GPU card number. The default value is \n True. In training phase, users should not set drop_last=False,\n because all CPU cores/GPU cards must read data from DataLoader. \n In inference phase, users can set drop_last=False, so that the\n last batches whose number is less than the CPU core/GPU card\n number can be tested. \n\n Returns:\n loader (DataLoader): the created DataLoader object.\n\n Examples 1:\n \n .. code-block:: python\n\n '''\n Example in static graph mode\n '''\n import numpy as np\n\n import paddle\n import paddle.static as static\n import paddle.nn.functional as F\n\n\n BATCH_NUM = 10 \n BATCH_SIZE = 16\n EPOCH_NUM = 4\n\n CLASS_NUM = 10\n\n ITERABLE = True # whether the created DataLoader object is iterable\n USE_GPU = False # whether to use GPU\n\n DATA_FORMAT = 'batch_generator' # data format of data source user provides \n\n paddle.enable_static()\n\n def simple_net(image, label):\n fc_tmp = static.nn.fc(image, size=CLASS_NUM)\n cross_entropy = F.softmax_with_cross_entropy(image, label)\n loss = paddle.mean(cross_entropy)\n sgd = paddle.optimizer.SGD(learning_rate=1e-3)\n sgd.minimize(loss)\n return loss\n\n def get_random_images_and_labels(image_shape, label_shape):\n image = np.random.random(size=image_shape).astype('float32')\n label = np.random.random(size=label_shape).astype('int64')\n return image, label\n\n # If the data generator yields one sample each time,\n # use DataLoader.set_sample_generator to set the data source.\n def sample_generator_creator(): \n def __reader__():\n for _ in range(BATCH_NUM * BATCH_SIZE):\n image, label = get_random_images_and_labels([784], [1])\n yield image, label\n\n return __reader__\n\n # If the data generator yield list of samples each time,\n # use DataLoader.set_sample_list_generator to set the data source.\n def sample_list_generator_creator():\n def __reader__():\n for _ in range(BATCH_NUM): \n sample_list = []\n for _ in range(BATCH_SIZE):\n image, label = get_random_images_and_labels([784], [1])\n sample_list.append([image, label])\n\n yield sample_list\n\n return __reader__ \n\n # If the data generator yields a batch each time, \n # use DataLoader.set_batch_generator to set the data source.\n def batch_generator_creator():\n def __reader__():\n for _ in range(BATCH_NUM):\n batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1]) \n yield batch_image, batch_label\n\n return __reader__\n\n # If DataLoader is iterable, use for loop to train the network \n def train_iterable(exe, prog, loss, loader):\n for _ in range(EPOCH_NUM):\n for data in loader():\n exe.run(prog, feed=data, fetch_list=[loss])\n\n # If DataLoader is not iterable, use start() and reset() method to control the process \n def train_non_iterable(exe, prog, loss, loader):\n for _ in range(EPOCH_NUM):\n loader.start() # call DataLoader.start() before each epoch starts\n try:\n while True:\n exe.run(prog, fetch_list=[loss])\n except paddle.core.EOFException:\n loader.reset() # call DataLoader.reset() after catching EOFException \n\n def set_data_source(loader, places):\n if DATA_FORMAT == 'sample_generator':\n loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places)\n elif DATA_FORMAT == 'sample_list_generator':\n loader.set_sample_list_generator(sample_list_generator_creator(), places=places)\n elif DATA_FORMAT == 'batch_generator':\n loader.set_batch_generator(batch_generator_creator(), places=places)\n else:\n raise ValueError('Unsupported data format')\n\n image = static.data(name='image', shape=[None, 784], dtype='float32')\n label = static.data(name='label', shape=[None, 1], dtype='int64')\n\n # Define DataLoader \n loader = paddle.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE)\n\n # Define network\n loss = simple_net(image, label)\n\n # Set data source of DataLoader\n #\n # If DataLoader is iterable, places must be given and the number of places must be the same with device number. \n # - If you are using GPU, call `paddle.static.cuda_places()` to get all GPU places. \n # - If you are using CPU, call `paddle.static.cpu_places()` to get all CPU places. \n # \n # If DataLoader is not iterable, places can be None.\n places = static.cuda_places() if USE_GPU else static.cpu_places()\n set_data_source(loader, places)\n\n exe = static.Executor(places[0])\n exe.run(static.default_startup_program())\n\n prog = static.CompiledProgram(static.default_main_program()).with_data_parallel(loss_name=loss.name)\n\n if loader.iterable:\n train_iterable(exe, prog, loss, loader)\n else:\n train_non_iterable(exe, prog, loss, loader)\n\n\n Examples 2:\n\n .. code-block:: python\n\n '''\n Example in dynamic graph mode. \n '''\n import numpy as np\n\n import paddle\n import paddle.nn as nn\n import paddle.optimizer as opt\n import paddle.distributed as dist\n\n BATCH_SIZE = 16\n BATCH_NUM = 4\n EPOCH_NUM = 4\n\n IMAGE_SIZE = 784\n CLASS_NUM = 10\n\n USE_GPU = False # whether to use GPU\n\n def _get_random_images_and_labels(image_shape, label_shape):\n image = np.random.random(size=image_shape).astype('float32')\n label = np.random.random(size=label_shape).astype('int64')\n return image, label\n\n def __reader__():\n for _ in range(BATCH_NUM):\n batch_image, batch_label = _get_random_images_and_labels(\n [BATCH_SIZE, IMAGE_SIZE], [BATCH_SIZE, CLASS_NUM])\n yield batch_image, batch_label\n\n def random_batch_reader():\n return __reader__\n\n class LinearNet(nn.Layer):\n def __init__(self):\n super(LinearNet, self).__init__()\n self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)\n\n @paddle.jit.to_static\n def forward(self, x):\n return self._linear(x)\n\n # set device\n paddle.set_device('gpu' if USE_GPU else 'cpu')\n\n # create network\n layer = LinearNet()\n dp_layer = paddle.DataParallel(layer)\n loss_fn = nn.CrossEntropyLoss()\n adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())\n\n # create data loader\n loader = paddle.io.DataLoader.from_generator(capacity=5)\n loader.set_batch_generator(random_batch_reader())\n\n for epoch_id in range(EPOCH_NUM):\n for batch_id, (image, label) in enumerate(loader()):\n out = layer(image)\n loss = loss_fn(out, label)\n\n loss.backward()\n\n adam.step()\n adam.clear_grad()\n print(\"Epoch {} batch {}: loss = {}\".format(\n epoch_id, batch_id, np.mean(loss.numpy())))\n\n Examples 3:\n\n .. code-block:: python\n\n '''\n Example of `drop_last` using in static graph multi-cards mode\n '''\n import paddle\n import paddle.static as static\n import numpy as np\n import os\n\n # We use 2 CPU cores to run inference network \n os.environ['CPU_NUM'] = '2'\n\n paddle.enable_static()\n\n # The data source has only 3 batches, which can not be\n # divided evenly to each CPU core\n def batch_generator(): \n for i in range(3):\n yield np.array([i+1]).astype('float32'), \n\n x = static.data(name='x', shape=[None], dtype='float32') \n y = x * x\n\n def run_inference(drop_last): \n loader = paddle.io.DataLoader.from_generator(feed_list=[x],\n capacity=8, drop_last=drop_last)\n loader.set_batch_generator(batch_generator, static.cpu_places())\n\n exe = static.Executor(paddle.CPUPlace())\n prog = static.CompiledProgram(static.default_main_program())\n prog = prog.with_data_parallel()\n\n result = []\n for data in loader():\n each_ret, = exe.run(prog, feed=data, fetch_list=[y])\n result.extend(each_ret)\n return result\n\n # Set drop_last to True, so that the last batch whose\n # number is less than CPU core number would be discarded.\n print(run_inference(drop_last=True)) # [1.0, 4.0]\n\n # Set drop_last to False, so that the last batch whose\n # number is less than CPU core number can be tested.\n print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]\n \"\"\"\n if in_dygraph_mode():\n return DygraphGeneratorLoader(feed_list, capacity,\n use_double_buffer, iterable,\n return_list, use_multiprocess)\n else:\n return GeneratorLoader(feed_list, capacity, use_double_buffer,\n iterable, return_list, drop_last)\n\n @staticmethod\n def from_dataset(dataset, places, drop_last=True):\n \"\"\"\n .. warning::\n This API will be deprecated in the future, it is recommended to use\n :code:`paddle.io.DataLoader` which supports multi-processes acceleration.\n\n Create an iterable DataLoader object for loading data from Dataset. \n Dataset is only supported in Linux system currently.\n\n Args:\n dataset (InMemoryDataset|QueueDataset): the dataset object.\n places (list(CUDAPlace)|list(CPUPlace)|list(str)): places where the result \n data should be converted. If places is list of string, the string in the list \n can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where x is the index of the GPUs. \n drop_last (bool): whether to drop the last batch whose sample \n number is less than batch size. If drop_last = True, they\n would be dropped. If drop_last = False, they would be kept. \n\n Returns:\n loader (DataLoader): the created DataLoader object, which can be \n treated as a Python generator. \n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n image = static.data(name='image', shape=[None, 784], dtype='float32')\n label = static.data(name='label', shape=[None, 1], dtype='int64')\n\n dataset = paddle.distributed.QueueDataset()\n dataset.init(\n batch_size=32,\n pipe_command='cat',\n use_var=[image, label])\n dataset.set_filelist(['a.txt', 'b.txt', 'c.txt'])\n\n loader = paddle.io.DataLoader.from_dataset(dataset, static.cpu_places())\n \"\"\"\n return DatasetLoader(dataset, places, drop_last)\n\n\nclass DygraphGeneratorLoader(DataLoaderBase):\n \"\"\"\n The GeneratorLoader of dygraph\n\n The multiprocess dygraph GeneratorLoader's most functions are different from \n static graph GeneratorLoader, Separate implementation to keep code readable.\n \"\"\"\n\n def __init__(self,\n feed_list=None,\n capacity=None,\n use_double_buffer=True,\n iterable=True,\n return_list=True,\n use_multiprocess=False):\n self._batch_reader = None\n self._places = None\n self._feed_list = feed_list\n\n if not capacity:\n raise ValueError(\"Please give value to capacity.\")\n self._capacity = capacity\n self._use_double_buffer = use_double_buffer\n\n if not iterable:\n warnings.warn(\n \"Please NOTE: DygraphGeneratorLoader supports iterable mode only. Change to iterable mode.\"\n )\n self._iterable = True\n if not return_list:\n warnings.warn(\n \"Please NOTE: DygraphGeneratorLoader supports returning as list only. Change to return as list.\"\n )\n self._return_list = True\n\n # NOTE: the multiprocessing in different platform is incompatible, we will solve it later\n self._use_multiprocess = use_multiprocess\n if self._use_multiprocess and (sys.platform == 'darwin' or\n sys.platform == 'win32'):\n warnings.warn(\n \"NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows.\"\n )\n self._use_multiprocess = False\n\n if self._use_multiprocess:\n # NOTE: the multiprocessing.Queue used to save loading data in self._process\n self._data_queue = None\n # NOTE: this process is used to load data asynchronously from self._batch_reader\n self._process = None\n\n # NOTE: the C++ LoDTensorBlockingQueue instance\n self._blocking_queue = None\n # NOTE: 1. In multiprocess mode, this thread is used to get next batch data from\n # self._data_queue, then push it into self._blocking_queue; 2. In singleprocess\n # mode, this thread is used to get next batch data from self._batch_reader, then \n # push it into self._blocking_queue\n self._thread = None\n self._pin_memory = True if use_pinned_memory(\n ) is None else use_pinned_memory()\n\n @property\n def queue(self):\n return self._blocking_queue\n\n @property\n def iterable(self):\n return self._iterable\n\n def _clear_and_remove_data_queue(self):\n if self._data_queue is not None:\n while True:\n try:\n self._data_queue.get_nowait()\n except queue.Empty:\n break\n global multiprocess_queue_set\n multiprocess_queue_set.remove(self._data_queue)\n\n def _wait_thread_ends(self):\n thread = self._thread\n if thread is not None:\n self._blocking_queue.close()\n thread.join()\n\n def _wait_process_ends(self):\n process = self._process\n if process is not None:\n process.join()\n # erase process id\n core._erase_process_pids(id(self))\n\n def _init_iterable(self):\n self._wait_thread_ends()\n if self._use_multiprocess:\n self._wait_process_ends()\n self._var_names = []\n self._shapes = []\n self._dtypes = []\n self._need_check_feed = []\n self._blocking_queue = core.init_lod_tensor_blocking_queue(\n core.Variable(), self._capacity, False)\n self._reader = None\n self._reader = core.create_py_reader(\n self.queue, self._var_names, self._shapes, self._dtypes,\n self._need_check_feed, self._places, self._use_double_buffer, True,\n self._pin_memory)\n\n def _start(self):\n if self._use_multiprocess:\n # clear old _data_queue and remove it from multiprocess_queue_set\n self._clear_and_remove_data_queue()\n # set data_queue and process\n self._data_queue = multiprocessing.Queue(self._capacity)\n # add _data_queue into global queue set\n global multiprocess_queue_set\n multiprocess_queue_set.add(self._data_queue)\n self._process = multiprocessing.Process(\n target=_reader_process_loop,\n args=(self._batch_reader, self._data_queue))\n self._process.daemon = True\n self._process.start()\n\n # Set child process signal handler\n # NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault\n # or just hang, the main process will hang waiting for data, so here need to deal \n # with SIGSEGV and SIGBUS of child process; 2. if the main process end before child\n # process, it shuts the all its daemonic children down with a SIGTERM (instead of \n # joining them without a timeout), so here nedd to deal with SIGTERM.\n core._set_process_pids(id(self), [self._process.pid])\n _set_SIGCHLD_handler()\n\n # Set reader_thread\n self._thread_done_event = threading.Event()\n self._thread = threading.Thread(\n target=self._reader_thread_loop_for_multiprocess,\n args=(_current_expected_place(), ))\n self._thread.daemon = True\n self._thread.start()\n else:\n self._thread = threading.Thread(\n target=self._reader_thread_loop_for_singleprocess,\n args=(_current_expected_place(), ))\n self._thread.daemon = True\n self._thread.start()\n\n def _reset(self):\n self._reader.reset()\n self._wait_thread_ends()\n if self._use_multiprocess:\n self._wait_process_ends()\n\n def __iter__(self):\n assert self.iterable, \"DataLoader is not iterable\"\n assert self._batch_reader is not None, \\\n \"Data source of DataLoader has not set yet\"\n\n self._init_iterable()\n self._start()\n return self\n\n def __next__(self):\n try:\n if _in_eager_mode():\n return core.eager.read_next_tensor_list(\n self._reader.read_next_list()[0])\n else:\n return self._reader.read_next_var_list()\n except StopIteration:\n self._reset()\n six.reraise(*sys.exc_info())\n\n def _exit_thread_expectedly(self):\n self._thread_done_event.set()\n self._blocking_queue.close()\n\n def _exit_thread_unexpectedly(self):\n self._thread_done_event.set()\n self._blocking_queue.kill()\n logging.error(\"DataLoader reader thread raised an exception!\")\n\n def _reader_thread_loop_for_multiprocess(self, legacy_expected_place):\n # See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.\n _set_expected_place(legacy_expected_place)\n\n while not self._thread_done_event.is_set():\n try:\n # NOTE: [ avoid hanging ] Even with carefully designed data dependencies \n # (i.e., a put() always corresponding to a get()), hanging on get() can \n # still happen when data in queue is corrupted (e.g., due to \n # Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever \n # we try to get data from `data_queue`\n # NOTE: [ avoid failed quickly ] Here, the time setting of QUEUE_GET_TIMEOUT\n # is relatively long, currently it is 60 seconds, because in some models,\n # if the reader child process starts with a heavy burden, the child process\n # has no enough time to put the data in the queue when the main process\n # start trying to get data from queue. At this time, the child thread needs\n # to wait slightly longer\n tensor_list = self._data_queue.get(timeout=QUEUE_GET_TIMEOUT)\n except:\n # NOTE [ avoid handing ] After adding the shared memory mechanism, not only\n # the queue.Empty exception will occur here, but other exceptions will also\n # occur, such as mmap failure. If it is not handled here, it will hang.\n self._exit_thread_unexpectedly()\n logging.error(\n \"DataLoader reader thread failed to read data from the multiprocessing.Queue.\"\n )\n six.reraise(*sys.exc_info())\n\n if not self._thread_done_event.is_set():\n if tensor_list is not None:\n try:\n array = core.LoDTensorArray()\n for tensor in tensor_list:\n array.append(tensor)\n if not self._blocking_queue.push(array):\n self._blocking_queue.close()\n except:\n self._exit_thread_unexpectedly()\n six.reraise(*sys.exc_info())\n else:\n self._exit_thread_expectedly()\n\n def _reader_thread_loop_for_singleprocess(self, legacy_expected_place):\n try:\n # See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.\n _set_expected_place(legacy_expected_place)\n\n for sample in self._batch_reader():\n array = core.LoDTensorArray()\n for item in sample:\n if not isinstance(item, core.LoDTensor):\n item = self._check_input_array(item)\n tmp = core.LoDTensor()\n tmp.set(item, core.CPUPlace())\n item = tmp\n\n array.append(item)\n\n if not self._blocking_queue.push(array):\n break\n\n self._blocking_queue.close()\n self._thread = None\n except Exception:\n self._blocking_queue.kill()\n self._thread = None\n logging.warning(\n \"DygraphDataLoader reader thread raised an exception.\")\n six.reraise(*sys.exc_info())\n\n def set_sample_generator(self,\n reader,\n batch_size,\n drop_last=True,\n places=None):\n assert batch_size > 0, \"batch_size must be larger than 0\"\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n self.set_sample_list_generator(\n paddle.batch(\n reader, batch_size=batch_size, drop_last=drop_last),\n places=places)\n return self\n\n def set_sample_list_generator(self, reader, places=None):\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n\n def __batch_reader_impl__():\n for batch in reader():\n slots = []\n for items in batch:\n for i, item in enumerate(items):\n if len(slots) < len(items):\n slots.append([item])\n else:\n slots[i].append(item)\n yield slots\n\n self.set_batch_generator(__batch_reader_impl__, places)\n return self\n\n def set_batch_generator(self, reader, places=None):\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n self._batch_reader = reader\n if places is None:\n places = _current_expected_place()\n self._places = _convert_places(places)\n assert len(self._places) == 1, \\\n \"Number of places must be 1 in imperative mode\"\n return self\n\n\nclass GeneratorLoader(DataLoaderBase):\n def __init__(self,\n feed_list=None,\n capacity=None,\n use_double_buffer=True,\n iterable=True,\n return_list=False,\n drop_last=True):\n self._tensor_reader = None\n self._places = None\n self._thread = None\n self._queue = None\n self._feed_list = feed_list\n self._exited = False\n self._drop_last = drop_last\n self._keep_order = keep_data_loader_order()\n if not capacity:\n raise ValueError(\"Please give value to capacity.\")\n self._iterable = iterable\n self._return_list = return_list\n if not self._feed_list:\n raise Exception(\"Feed list must be given under static mode.\")\n self._use_double_buffer = use_double_buffer\n self._capacity = capacity\n if not self._iterable:\n self._init_non_iterable()\n\n def _wait_thread_ends(self):\n # Get self._thread first to prevent data race, because __thread_main__\n # would set self._thread be None at the end\n thread = self._thread\n if thread is not None and self._iterable:\n self._queue.close()\n thread.join()\n\n def _init_iterable(self):\n self._wait_thread_ends()\n self._var_names = [v.name for v in self._feed_list]\n self._shapes = [v.shape for v in self._feed_list]\n self._dtypes = [v.dtype for v in self._feed_list]\n self._need_check_feed = [\n v.desc.need_check_feed() for v in self._feed_list\n ]\n self._queue = core.init_lod_tensor_blocking_queue(\n core.Variable(), self._capacity, self._keep_order)\n self._reader = None\n self._reader = core.create_py_reader(\n self.queue, self._var_names, self._shapes, self._dtypes,\n self._need_check_feed, self._places, self._use_double_buffer,\n self._drop_last, False)\n\n def _init_non_iterable(self):\n lod_levels = []\n dtypes = []\n shape_concat = []\n ranks = []\n shapes = []\n need_check_feed = []\n\n for feed_data in self._feed_list:\n dtypes.append(feed_data.dtype)\n shape_concat.extend(feed_data.shape)\n ranks.append(len(feed_data.shape))\n shapes.append(feed_data.shape)\n lod_levels.append(feed_data.lod_level)\n need_check_feed.append(int(feed_data.desc.need_check_feed()))\n\n queue_name = data_loader_unique_name_generator(\n 'lod_tensor_blocking_queue')\n reader_name = data_loader_unique_name_generator('create_py_reader')\n double_buffer_name = data_loader_unique_name_generator('double_buffer')\n\n var = global_scope().var(queue_name)\n self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity,\n self._keep_order)\n\n if self._keep_order:\n block = default_main_program().current_block()\n else:\n block = default_startup_program().current_block()\n\n reader_var = block.create_var(name=reader_name)\n\n dtype_int = [int(t) for t in dtypes]\n block.append_op(\n type='create_py_reader',\n inputs={'blocking_queue': [queue_name]},\n outputs={'Out': [reader_var]},\n attrs={\n 'shape_concat': shape_concat,\n 'lod_levels': lod_levels,\n 'dtypes': dtype_int,\n 'need_check_feed': need_check_feed,\n 'ranks': ranks\n })\n\n reader_var.desc.set_dtypes(dtypes)\n reader_var.persistable = True\n reader_var.stop_gradient = True\n\n if self._keep_order:\n main_prog_var = reader_var\n reader = main_prog_var\n reader.reset = self._queue.reset\n else:\n main_prog_var = _copy_reader_var_(\n default_main_program().current_block(), reader_var)\n\n main_prog_var.stop_gradient = True\n main_prog_var.persistable = True\n\n reader = monkey_patch_reader_methods(main_prog_var)\n\n if self._use_double_buffer:\n double_buffer_reader = double_buffer(\n reader, name=double_buffer_name)\n # we return a double buffer reader. However, the reset method comes from\n # py_reader.\n double_buffer_reader.reset = reader.reset\n reader = double_buffer_reader\n\n self._reader = reader\n\n default_main_program().current_block().append_op(\n type='read',\n inputs={'Reader': [self._reader]},\n outputs={'Out': self._feed_list},\n attrs={'drop_last': self._drop_last})\n\n @property\n def queue(self):\n return self._queue\n\n @property\n def iterable(self):\n return self._iterable\n\n def __iter__(self):\n assert self.iterable, \"DataLoader is not iterable\"\n assert self._tensor_reader is not None, \\\n \"Data source of DataLoader has not set yet\"\n\n self._init_iterable()\n self._start()\n return self\n\n def __next__(self):\n try:\n if self._return_list:\n data = self._reader.read_next_list()\n for i in range(len(data)):\n data[i] = data[i]._move_to_list()\n return data\n else:\n return self._reader.read_next()\n except StopIteration:\n self._queue.close()\n self._reset()\n six.reraise(*sys.exc_info())\n\n def start(self):\n assert not self._iterable, \"start() cannot be called when DataLoader is iterable\"\n self._start()\n\n def reset(self):\n assert not self._iterable, \"reset() cannot be called when DataLoader is iterable\"\n self._reset()\n\n def _start(self):\n def __thread_main__(legacy_expected_place):\n try:\n # See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.\n _set_expected_place(legacy_expected_place)\n\n while not self._queue.wait_for_inited(1):\n if self._exited:\n return\n\n for tensors in self._tensor_reader():\n array = core.LoDTensorArray()\n for item in tensors:\n if not isinstance(item, core.LoDTensor):\n item = self._check_input_array(item)\n tmp = core.LoDTensor()\n tmp.set(item, core.CPUPlace())\n item = tmp\n\n array.append(item)\n\n if not self._queue.push(array):\n break\n\n self._queue.close()\n self._thread = None\n except Exception as ex:\n self._queue.kill()\n self._thread = None\n logging.warning('Your reader has raised an exception!')\n six.reraise(*sys.exc_info())\n\n self._thread = threading.Thread(\n target=__thread_main__, args=(_current_expected_place(), ))\n self._thread.daemon = True\n self._thread.start()\n\n def _reset(self):\n self._queue.close()\n self._exited = True\n thread = self._thread\n if thread is not None:\n thread.join()\n\n self._exited = False\n self._reader.reset()\n\n def set_sample_generator(self,\n reader,\n batch_size,\n drop_last=True,\n places=None):\n assert batch_size > 0, \"batch_size must be larger than 0\"\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n has_lod = False\n for f in self._feed_list:\n if f.lod_level != 0:\n has_lod = True\n break\n\n if has_lod:\n self.set_sample_list_generator(\n paddle.batch(\n reader, batch_size=batch_size, drop_last=drop_last),\n places=places)\n else:\n reader = BatchedTensorProvider(\n feed_list=self._feed_list,\n place=core.CPUPlace(),\n batch_size=batch_size,\n generator=reader,\n drop_last=drop_last)\n self.set_batch_generator(reader, places=places)\n return self\n\n def set_sample_list_generator(self, reader, places=None):\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n with program_guard(Program(), Program()):\n feeder = DataFeeder(\n feed_list=self._feed_list, place=core.CPUPlace())\n paddle_reader = feeder.decorate_reader(reader, multi_devices=False)\n\n def __tensor_reader_impl__():\n for slots in paddle_reader():\n yield [slots[var.name] for var in self._feed_list]\n\n self.set_batch_generator(__tensor_reader_impl__, places)\n return self\n\n def set_batch_generator(self, reader, places=None):\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n self._tensor_reader = reader\n if self._iterable:\n assert places is not None, \"Places cannot be None when DataLoader is iterable\"\n self._places = _convert_places(places)\n else:\n if places is not None:\n logging.info(\n 'places would be ommited when DataLoader is not iterable')\n return self\n\n\nclass PyReader(DataLoaderBase):\n r\"\"\"\n Create a reader object for data feeding in Python. \n Data would be prefetched using Python thread and be pushed\n into a queue asynchronously. Data in the queue would be extracted \n automatically when `Executor.run(...)` is called.\n\n Args: \n feed_list (list(Variable)|tuple(Variable)): feed variable list.\n The variables should be created by :code:`fluid.layers.data()`.\n capacity (int): capacity of the queue maintained in PyReader.\n The unit is batch number. Set larger capacity if your reader \n is fast. \n use_double_buffer (bool): whether to use double_buffer_reader. \n If use_double_buffer=True, PyReader would prefetch next \n batch data asynchronously, so it would speed up data feeding \n and occupies a little more CPU or GPU memory, i.e., the memory\n of one batch input data. \n iterable (bool): whether the created PyReader is iterable. \n return_list (bool): whether the return value on each device is \n presented as a list. It is only valid when iterable=True. \n If return_list=False, the return value on each device would \n be a dict of str -> LoDTensor, where the key of the dict is \n the name of each fed variables. If return_list=True, the \n return value on each device would be a list(LoDTensor). It is\n recommended to use return_list=False in static graph mode and\n use return_list=True in dygraph mode. \n\n Returns:\n the created reader object.\n\n Return type:\n reader(Reader)\n\n Examples:\n 1. If iterable = False, the created PyReader object is almost the\n same as :code:`fluid.layers.py_reader()`. Operators would be \n inserted into the program. User should call :code:`start()` \n before each epoch and catch :code:`fluid.core.EOFException`\n thrown by :code:`Executor.run()` when epoch ends. Once the \n exception is caught, user should call :code:`reset()` to reset \n the reader manually.\n\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n EPOCH_NUM = 3\n ITER_NUM = 5\n BATCH_SIZE = 3\n \n def network(image, label):\n # User-defined network, here is an example of softmax regression.\n predict = fluid.layers.fc(input=image, size=10, act='softmax') \n return fluid.layers.cross_entropy(input=predict, label=label)\n\n def reader_creator_random_image_and_label(height, width):\n def reader():\n for i in range(ITER_NUM):\n fake_image = np.random.uniform(low=0,\n high=255,\n size=[height, width])\n fake_label = np.ones([1])\n yield fake_image, fake_label\n return reader\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n\n reader = fluid.io.PyReader(feed_list=[image, label],\n capacity=4,\n iterable=False)\n\n user_defined_reader = reader_creator_random_image_and_label(784, 784)\n reader.decorate_sample_list_generator(\n paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))\n loss = network(image, label)\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n for i in range(EPOCH_NUM):\n reader.start()\n while True:\n try:\n executor.run(feed=None)\n except fluid.core.EOFException:\n reader.reset()\n break\n\n \n 2. If iterable=True, the created PyReader object is decoupled with\n the program. No operator would be inserted into the program. \n In this case, the created reader is a Python generator, which \n is iterable. User should feed the data yielded from PyReader \n object into :code:`Executor.run(feed=...)`. \n\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n EPOCH_NUM = 3\n ITER_NUM = 5\n BATCH_SIZE = 10\n\n def network(image, label):\n # User-defined network, here is an example of softmax regression.\n predict = fluid.layers.fc(input=image, size=10, act='softmax') \n return fluid.layers.cross_entropy(input=predict, label=label)\n\n def reader_creator_random_image(height, width):\n def reader():\n for i in range(ITER_NUM):\n fake_image = np.random.uniform(low=0, high=255, size=[height, width])\n fake_label = np.ones([1])\n yield fake_image, fake_label \n return reader\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)\n\n user_defined_reader = reader_creator_random_image(784, 784)\n reader.decorate_sample_list_generator(\n paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),\n fluid.core.CPUPlace())\n \n loss = network(image, label)\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n \n for _ in range(EPOCH_NUM):\n for data in reader():\n executor.run(feed=data, fetch_list=[loss])\n\n\n 3. If return_list=True, the return values would be presented as list instead of dict. \n This is usually used in dygraph mode.\n\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n ITER_NUM = 5\n BATCH_SIZE = 10\n\n def reader_creator_random_image(height, width):\n def reader():\n for i in range(ITER_NUM):\n yield np.random.uniform(low=0, high=255, size=[height, width]), \\\n np.random.random_integers(low=0, high=9, size=[1])\n return reader\n\n place = fluid.CPUPlace()\n with fluid.dygraph.guard(place):\n py_reader = fluid.io.PyReader(capacity=2, return_list=True)\n user_defined_reader = reader_creator_random_image(784, 784)\n py_reader.decorate_sample_list_generator(\n paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),\n place)\n for image, label in py_reader():\n relu = fluid.layers.relu(image)\n \"\"\"\n\n def __init__(self,\n feed_list=None,\n capacity=None,\n use_double_buffer=True,\n iterable=True,\n return_list=False):\n self._loader = DataLoader.from_generator(\n feed_list, capacity, use_double_buffer, iterable, return_list)\n\n @property\n def queue(self):\n return self._loader.queue\n\n @property\n def iterable(self):\n return self._loader.iterable\n\n def __iter__(self):\n return self._loader.__iter__()\n\n def __next__(self):\n return self._loader.__next__()\n\n def start(self):\n '''\n Start the data feeding thread. \n Can only call when the reader object is not iterable. \n \n\tExample:\n\t .. code-block:: python\n \n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n BATCH_SIZE = 10\n\n def generator():\n for i in range(5):\n yield np.random.uniform(low=0, high=255, size=[784, 784]),\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)\n reader.decorate_sample_list_generator(\n paddle.batch(generator, batch_size=BATCH_SIZE))\n\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n for i in range(3):\n reader.start()\n while True:\n try:\n executor.run(feed=None)\n except fluid.core.EOFException:\n reader.reset()\n break\n\n\t '''\n self._loader.start()\n\n def reset(self):\n '''\n Reset the reader object when :code:`fluid.core.EOFException` raises. \n Can only call when the reader object is not iterable.\n \n Example:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n BATCH_SIZE = 10\n\n def generator():\n for i in range(5):\n yield np.random.uniform(low=0, high=255, size=[784, 784]),\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)\n reader.decorate_sample_list_generator(\n paddle.batch(generator, batch_size=BATCH_SIZE))\n\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n for i in range(3):\n reader.start()\n while True:\n try:\n executor.run(feed=None)\n except fluid.core.EOFException:\n reader.reset()\n break \n\n '''\n self._loader.reset()\n\n def decorate_sample_generator(self,\n sample_generator,\n batch_size,\n drop_last=True,\n places=None):\n '''\n Set the data source of the PyReader object.\n \n The provided :code:`sample_generator` should be a Python generator,\n which yields list(numpy.ndarray)-typed data of each sample.\n\n :code:`places` must be set when the PyReader object is iterable.\n\n If all inputs have no lods, this method is faster than \n :code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .\n\n Args:\n sample_generator (generator): Python generator that yields\n list(numpy.ndarray)-typed sample data.\n batch_size (int): batch size. Must be larger than 0.\n drop_last (bool): Whether to drop the last batch when sample number\n is less than batch_size. \n places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must\n be provided when PyReader is iterable.\n\n Example:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n EPOCH_NUM = 3\n ITER_NUM = 15\n BATCH_SIZE = 3\n \n def network(image, label):\n # User-defined network, here is an example of softmax regression.\n predict = fluid.layers.fc(input=image, size=10, act='softmax') \n return fluid.layers.cross_entropy(input=predict, label=label)\n\n def random_image_and_label_generator(height, width):\n def generator():\n for i in range(ITER_NUM):\n fake_image = np.random.uniform(low=0,\n high=255,\n size=[height, width])\n fake_label = np.array([1])\n yield fake_image, fake_label\n return generator\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)\n\n user_defined_generator = random_image_and_label_generator(784, 784)\n reader.decorate_sample_generator(user_defined_generator,\n batch_size=BATCH_SIZE,\n places=[fluid.CPUPlace()])\n loss = network(image, label)\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n\n for _ in range(EPOCH_NUM):\n for data in reader():\n executor.run(feed=data, fetch_list=[loss])\n \n '''\n self._loader.set_sample_generator(sample_generator, batch_size,\n drop_last, places)\n\n def decorate_sample_list_generator(self, reader, places=None):\n '''\n Set the data source of the PyReader object. \n\n The provided :code:`reader` should be a Python generator,\n which yields list(numpy.ndarray) typed batched data. \n \n :code:`places` must be set when the PyReader object is iterable.\n\n Args:\n reader (generator): Python generator that yields \n list(numpy.ndarray)-typed batched data. \n places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must\n be provided when PyReader is iterable.\n \n Example:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n EPOCH_NUM = 3\n ITER_NUM = 15\n BATCH_SIZE = 3\n\n def network(image, label):\n # User-defined network, here is an example of softmax regression.\n predict = fluid.layers.fc(input=image, size=10, act='softmax') \n return fluid.layers.cross_entropy(input=predict, label=label)\n\n def random_image_and_label_generator(height, width):\n def generator():\n for i in range(ITER_NUM):\n fake_image = np.random.uniform(low=0,\n high=255,\n size=[height, width])\n fake_label = np.ones([1])\n yield fake_image, fake_label\n return generator\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)\n\n user_defined_generator = random_image_and_label_generator(784, 784)\n reader.decorate_sample_list_generator(\n paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),\n fluid.core.CPUPlace())\n \n loss = network(image, label)\n executor = fluid.Executor(fluid.core.CPUPlace())\n executor.run(fluid.default_startup_program())\n\n for _ in range(EPOCH_NUM):\n for data in reader():\n executor.run(feed=data, fetch_list=[loss])\n \n '''\n self._loader.set_sample_list_generator(reader, places)\n\n def decorate_batch_generator(self, reader, places=None):\n '''\n Set the data source of the PyReader object.\n\n The provided :code:`reader` should be a Python generator,\n which yields numpy.ndarray-typed or LoDTensor-typed batched data.\n\n :code:`places` must be set when the PyReader object is iterable.\n\n Args:\n reader (generator): Python generator that yields LoDTensor-typed\n batched data.\n places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must\n be provided when PyReader is iterable.\n\n Example:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n EPOCH_NUM = 3\n ITER_NUM = 15\n BATCH_SIZE = 3\n \n def network(image, label):\n # User-defined network, here is an example of softmax regression.\n predict = fluid.layers.fc(input=image, size=10, act='softmax') \n return fluid.layers.cross_entropy(input=predict, label=label)\n\n def random_image_and_label_generator(height, width):\n def generator():\n for i in range(ITER_NUM):\n batch_image = np.random.uniform(low=0,\n high=255,\n size=[BATCH_SIZE, height, width])\n batch_label = np.ones([BATCH_SIZE, 1])\n batch_image = batch_image.astype('float32')\n batch_label = batch_label.astype('int64')\n yield batch_image, batch_label\n return generator\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)\n\n user_defined_generator = random_image_and_label_generator(784, 784)\n reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())\n \n loss = network(image, label)\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n\n for _ in range(EPOCH_NUM):\n for data in reader():\n executor.run(feed=data, fetch_list=[loss])\n\n '''\n self._loader.set_batch_generator(reader, places)\n\n\nclass DatasetLoader(DataLoaderBase):\n def __init__(self, dataset, places, drop_last):\n assert isinstance(dataset, paddle.distributed.fleet.dataset.\n DatasetBase), \"dataset must be type of DatasetBase\"\n assert not in_dygraph_mode(\n ), \"DatasetLoader is not supported in dygraph mode yet\"\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n\n thread_num = len(places)\n\n assert len(dataset.filelist) >= thread_num, \\\n \"Filelist number of dataset {} must be not less than place number {}\".format(len(dataset.filelist), thread_num)\n\n if dataset.thread_num != 0 and dataset.thread_num != thread_num:\n logging.warn('thread_num {} which is set in Dataset is ignored'.\n format(dataset.thread_num))\n\n dataset._set_thread(thread_num)\n\n if isinstance(dataset, paddle.distributed.fleet.dataset.\n InMemoryDataset) and dataset.queue_num > thread_num:\n logging.warn(\"queue_num {} which is set in Dataset is ignored\".\n format(dataset.queue_num))\n dataset._set_queue_num(thread_num)\n\n self._dataset = dataset\n use_slots = [\n slot.name for slot in dataset.proto_desc.multi_slot_desc.slots\n if slot.is_used\n ]\n\n self._iterable_dataset = core.IterableDatasetWrapper(\n dataset.dataset, use_slots,\n _convert_places(places), dataset.proto_desc.batch_size, drop_last)\n\n def __iter__(self):\n self._dataset._finish_to_run()\n self._dataset._prepare_to_run()\n self._iterable_dataset._start()\n return self\n\n def __next__(self):\n return self._iterable_dataset._next()\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
krishna1401/Digital-Image-Processing | [
"47a4da4bef9d08708ac84174b0fcd0ced6a8b5e2"
] | [
"edgeDetection.py"
] | [
"#Perform Edge Detection using Roberts Cross Gradient & Sobel Operators over an Image\n\nimport cv2\nimport math\nimport numpy as np\n\ndef robertCrossGradient(image):\n\t#Objective: Performing Robert Cross Gradient Edge Detection over an Image\n\t#Input: Original Image\n\t#Output: Resultant Image\n\t\n\t#Robert Cross Operator\n\t# x 0 1\n\t#\t-1 0\n\t# y 1 0\n\t#\t 0 -1\n\timage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting Image to Gray Scale\n\tresultant_image = image.copy()\n\tfor i in range(0,image.shape[0]-1):\n\t for j in range(0,image.shape[1]-1):\n\t gx = image[i, j+1] - image[i+1, j]\n\t gy = image[i, j] - image[i+1, j+1]\n\t resultant_image[i, j] = math.sqrt(gx*gx + gy*gy)\t\n\t\n\treturn resultant_image\n\ndef sobelOperator(image):\n #Objective: Performing Sobel Edge Detection over an Image\n\t#Input: Original Image\n\t#Output: Resultant Image\n\t\n\t#Sobel Operator\n\t\n\t# x -1 -2 -1\n\t# 0 0 0\n\t# 1 2 1\n\t\n\t#y -1 0 1\n\t# -2 0 2\n\t# -1 0 1\n\t\n\timage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting Image to Gray Scale\n\tresultant_image = image.copy()\n\t\n\t#Applying Padding\n\trows,cols = image.shape\n\timage = np.insert(image,0,0,axis=0) #top\n\timage = np.insert(image,rows+1,0,axis=0) #bottom\n\timage = np.insert(image,0,0,axis=1) #left\n\timage = np.insert(image,cols+1,0,axis=1) #right\n\t\n\tfor i in range(1, image.shape[0]-1):\n\t for j in range(1, image.shape[1]-1):\n\t fx = image[i+1, j-1] + 2*image[i+1, j] + image[i+1, j+1] - image[i-1, j-1] - 2*image[i-1, j] - image[i+1, j-1]\n\t fy = image[i-1, j+1] + 2*image[i, j+1] + image[i+1, j+1] - image[i-1, j-1] - 2*image[i, j-1] - image[i+1, j-1]\n\t resultant_image[i-1, j-1] = math.sqrt(fx*fx + fy*fy)\n\t\n\treturn resultant_image\n\nimg = cv2.imread('image5.jpg')\noutput = sobelOperator(img)\n\ncv2.imshow('image',output)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.insert"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
strawberryfg/xraygan | [
"047474b0244e530f78b28db67564304cff692f5e"
] | [
"full_code/test_apr27.py"
] | [
"import os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nimport math\nimport numpy as np\nfrom scipy import linalg\nfrom os import path as osp\nimport cv2\nimport random\nimport matplotlib.pyplot as plt\nimport pdb\n\n#0. torch imports\nimport torch\nfrom torch.utils.data import DataLoader,Dataset\nfrom torch import optim,nn\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nfrom torchvision import transforms as T\nimport torch.nn.functional as F\nimport torchvision.utils as vutils\nfrom torchvision import models\n\nfrom torchvision.models.resnet import BasicBlock, Bottleneck\nfrom torchvision.models.resnet import model_urls\n\n\n## Data parallel\n\"\"\"Encoding Data Parallel\"\"\"\nimport threading\nimport functools\nfrom torch.autograd import Variable, Function\nimport torch.cuda.comm as comm\nfrom torch.nn.parallel.data_parallel import DataParallel\nfrom torch.nn.parallel.parallel_apply import get_a_var\nfrom torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast\n\n# [DATA PARALLEL]\n\n__all__ = ['allreduce', 'DataParallelModel', 'DataParallelCriterion',\n 'patch_replication_callback']\n\ndef allreduce(*inputs):\n \"\"\"Cross GPU all reduce autograd operation for calculate mean and\n variance in SyncBN.\n \"\"\"\n return AllReduce.apply(*inputs)\n\n\nclass AllReduce(Function):\n @staticmethod\n def forward(ctx, num_inputs, *inputs):\n ctx.num_inputs = num_inputs\n ctx.target_gpus = [inputs[i].get_device() for i in range(0, len(inputs), num_inputs)]\n inputs = [inputs[i:i + num_inputs]\n for i in range(0, len(inputs), num_inputs)]\n # sort before reduce sum\n inputs = sorted(inputs, key=lambda i: i[0].get_device())\n results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])\n outputs = comm.broadcast_coalesced(results, ctx.target_gpus)\n return tuple([t for tensors in outputs for t in tensors])\n\n @staticmethod\n def backward(ctx, *inputs):\n inputs = [i.data for i in inputs]\n inputs = [inputs[i:i + ctx.num_inputs]\n for i in range(0, len(inputs), ctx.num_inputs)]\n results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])\n outputs = comm.broadcast_coalesced(results, ctx.target_gpus)\n return (None,) + tuple([Variable(t) for tensors in outputs for t in tensors])\n\n\nclass Reduce(Function):\n @staticmethod\n def forward(ctx, *inputs):\n ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))]\n inputs = sorted(inputs, key=lambda i: i.get_device())\n return comm.reduce_add(inputs)\n\n @staticmethod\n def backward(ctx, gradOutput):\n return Broadcast.apply(ctx.target_gpus, gradOutput)\n\n\nclass DataParallelModel(DataParallel):\n \"\"\"Implements data parallelism at the module level.\n\n This container parallelizes the application of the given module by\n splitting the input across the specified devices by chunking in the\n batch dimension.\n In the forward pass, the module is replicated on each device,\n and each replica handles a portion of the input. During the backwards pass, gradients from each replica are summed into the original module.\n Note that the outputs are not gathered, please use compatible\n :class:`encoding.parallel.DataParallelCriterion`.\n\n The batch size should be larger than the number of GPUs used. It should\n also be an integer multiple of the number of GPUs so that each chunk is\n the same size (so that each GPU processes the same number of samples).\n\n Args:\n module: module to be parallelized\n device_ids: CUDA devices (default: all devices)\n\n Reference:\n Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,\n Amit Agrawal. Context Encoding for Semantic Segmentation.\n *The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*\n\n Example::\n\n >>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])\n >>> y = net(x)\n \"\"\"\n def gather(self, outputs, output_device):\n return outputs\n\n def replicate(self, module, device_ids):\n modules = super(DataParallelModel, self).replicate(module, device_ids)\n execute_replication_callbacks(modules)\n return modules\n\n\n\nclass DataParallelCriterion(DataParallel):\n \"\"\"\n Calculate loss in multiple-GPUs, which balance the memory usage for\n Semantic Segmentation.\n\n The targets are splitted across the specified devices by chunking in\n the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`.\n\n Reference:\n Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,\n Amit Agrawal. Context Encoding for Semantic Segmentation.\n *The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*\n\n Example::\n\n >>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])\n >>> criterion = encoding.nn.DataParallelCriterion(criterion, device_ids=[0, 1, 2])\n >>> y = net(x)\n >>> loss = criterion(y, target)\n \"\"\"\n def forward(self, inputs, *targets, **kwargs):\n # input should be already scatterd\n # scattering the targets instead\n # if not self.device_ids:\n # return self.module(inputs, *targets, **kwargs)\n targets, kwargs = self.scatter(targets, kwargs, self.device_ids)\n if len(self.device_ids) == 1:\n return self.module(inputs, *targets[0])\n replicas = self.replicate(self.module, self.device_ids[:len(inputs)])\n outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)\n return Reduce.apply(*outputs) / len(outputs)\n\n #return self.gather(outputs, self.output_device).mean()\n\n\ndef _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None):\n assert len(modules) == len(inputs)\n assert len(targets) == len(inputs)\n if kwargs_tup:\n assert len(modules) == len(kwargs_tup)\n else:\n kwargs_tup = ({},) * len(modules)\n if devices is not None:\n assert len(modules) == len(devices)\n else:\n devices = [None] * len(modules)\n\n lock = threading.Lock()\n results = {}\n if torch_ver != \"0.3\":\n grad_enabled = torch.is_grad_enabled()\n\n def _worker(i, module, input, target, kwargs, device=None):\n if torch_ver != \"0.3\":\n torch.set_grad_enabled(grad_enabled)\n if device is None:\n device = get_a_var(input).get_device()\n try:\n with torch.cuda.device(device):\n output = module(input, *target)\n with lock:\n results[i] = output\n except Exception as e:\n with lock:\n results[i] = e\n\n if len(modules) > 1:\n threads = [threading.Thread(target=_worker,\n args=(i, module, input, target,\n kwargs, device),)\n for i, (module, input, target, kwargs, device) in\n enumerate(zip(modules, inputs, targets, kwargs_tup, devices))]\n\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n else:\n _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])\n\n outputs = []\n for i in range(len(inputs)):\n output = results[i]\n if isinstance(output, Exception):\n raise output\n outputs.append(output)\n return outputs\n\n\n###########################################################################\n# Adapted from Synchronized-BatchNorm-PyTorch.\n# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch\n#\nclass CallbackContext(object):\n pass\n\n\ndef execute_replication_callbacks(modules):\n \"\"\"\n Execute an replication callback `__data_parallel_replicate__` on each module created\n by original replication.\n\n The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`\n\n Note that, as all modules are isomorphism, we assign each sub-module with a context\n (shared among multiple copies of this module on different devices).\n Through this context, different copies can share some information.\n\n We guarantee that the callback on the master copy (the first copy) will be called ahead\n of calling the callback of any slave copies.\n \"\"\"\n master_copy = modules[0]\n nr_modules = len(list(master_copy.modules()))\n ctxs = [CallbackContext() for _ in range(nr_modules)]\n\n for i, module in enumerate(modules):\n for j, m in enumerate(module.modules()):\n if hasattr(m, '__data_parallel_replicate__'):\n m.__data_parallel_replicate__(ctxs[j], i)\n\n\ndef patch_replication_callback(data_parallel):\n \"\"\"\n Monkey-patch an existing `DataParallel` object. Add the replication callback.\n Useful when you have customized `DataParallel` implementation.\n\n Examples:\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])\n > patch_replication_callback(sync_bn)\n # this is equivalent to\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])\n \"\"\"\n\n assert isinstance(data_parallel, DataParallel)\n\n old_replicate = data_parallel.replicate\n\n @functools.wraps(old_replicate)\n def new_replicate(module, device_ids):\n modules = old_replicate(module, device_ids)\n execute_replication_callbacks(modules)\n return modules\n\n data_parallel.replicate = new_replicate\n\n\n\n\n\n# 0. ResNet 18\n# ResNet Classifier\n#class BasicBlock(nn.Module):\n# expansion = 1\n\n# def __init__(self, in_planes, planes, stride=1):\n# super(BasicBlock, self).__init__()\n# self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n# self.bn1 = nn.BatchNorm2d(planes)\n# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n# self.bn2 = nn.BatchNorm2d(planes)\n\n# self.shortcut = nn.Sequential()\n# if stride != 1 or in_planes != self.expansion*planes:\n# self.shortcut = nn.Sequential(\n# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n# nn.BatchNorm2d(self.expansion*planes)\n# )\n\n# def forward(self, x):\n# out = F.relu(self.bn1(self.conv1(x)))\n# out = self.bn2(self.conv2(out))\n# out += self.shortcut(x)\n# out = F.relu(out)\n# return out\n\n\n\n# gram matrix and loss\nclass GramMatrix(nn.Module):\n def forward(self, input):\n b, c, h, w = input.size()\n F = input.view(b, c, h * w)\n G = torch.bmm(F, F.transpose(1,2)) \n G.div_(h * w)\n return G\n\nclass GramMSELoss(nn.Module):\n def forward(self, input, target):\n out = nn.MSELoss()(GramMatrix()(input), target)\n return(out)\n\n\nclass ResDeconvNet(nn.Module):\n def __init__(self, backbone):\n super(ResDeconvNet, self).__init__()\n self.backbone = backbone\n\n def forward(self, x, y):\n x = torch.cat((x, y), dim = 1)\n x = self.backbone(x)\n\n return x\n\n\n# Conv Layer\nclass ConvLayer(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride):\n super(ConvLayer, self).__init__()\n padding = kernel_size // 2\n self.reflection_pad = nn.ReflectionPad2d(padding)\n self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride) #, padding)\n\n def forward(self, x):\n out = self.reflection_pad(x)\n out = self.conv2d(out)\n return out\n\n# Upsample Conv Layer\nclass UpsampleConvLayer(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):\n super(UpsampleConvLayer, self).__init__()\n self.upsample = upsample\n if upsample:\n self.upsample = nn.Upsample(scale_factor=upsample, mode='nearest')\n reflection_padding = kernel_size // 2\n self.reflection_pad = nn.ReflectionPad2d(reflection_padding)\n self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)\n\n def forward(self, x):\n if self.upsample:\n x = self.upsample(x)\n out = self.reflection_pad(x)\n out = self.conv2d(out)\n return out\n\n# Residual Block\n# adapted from pytorch tutorial\n# https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/02-\n# intermediate/deep_residual_network/main.py\nclass ResidualBlock(nn.Module):\n def __init__(self, channels):\n super(ResidualBlock, self).__init__()\n self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)\n self.in1 = nn.InstanceNorm2d(channels, affine=True)\n self.relu = nn.ReLU()\n self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)\n self.in2 = nn.InstanceNorm2d(channels, affine=True)\n\n def forward(self, x):\n residual = x\n out = self.relu(self.in1(self.conv1(x)))\n out = self.in2(self.conv2(out))\n out = out + residual\n out = self.relu(out)\n return out \n\n\n#vgg definition that conveniently let's you grab the outputs from any layer\nclass VGG(nn.Module):\n def __init__(self, pool='max'):\n super(VGG, self).__init__()\n #vgg modules\n self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)\n self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)\n self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)\n self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)\n self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\n self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\n self.conv3_4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\n self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)\n self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv4_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv5_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n if pool == 'max':\n self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool6 = nn.MaxPool2d(kernel_size=8, stride=8)\n elif pool == 'avg':\n self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.pool3 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.pool4 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.pool5 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.pool6 = nn.AvgPool2d(kernel_size=8, stride=8)\n \n def forward(self, x, out_keys):\n out = {}\n out['r11'] = F.relu(self.conv1_1(x))\n out['r12'] = F.relu(self.conv1_2(out['r11']))\n out['p1'] = self.pool1(out['r12'])\n out['r21'] = F.relu(self.conv2_1(out['p1']))\n out['r22'] = F.relu(self.conv2_2(out['r21']))\n out['p2'] = self.pool2(out['r22'])\n out['r31'] = F.relu(self.conv3_1(out['p2']))\n out['r32'] = F.relu(self.conv3_2(out['r31']))\n out['r33'] = F.relu(self.conv3_3(out['r32']))\n out['r34'] = F.relu(self.conv3_4(out['r33']))\n out['p3'] = self.pool3(out['r34'])\n out['r41'] = F.relu(self.conv4_1(out['p3']))\n out['r42'] = F.relu(self.conv4_2(out['r41']))\n out['r43'] = F.relu(self.conv4_3(out['r42']))\n out['r44'] = F.relu(self.conv4_4(out['r43']))\n out['p4'] = self.pool4(out['r44'])\n out['r51'] = F.relu(self.conv5_1(out['p4']))\n out['r52'] = F.relu(self.conv5_2(out['r51']))\n out['r53'] = F.relu(self.conv5_3(out['r52']))\n out['r54'] = F.relu(self.conv5_4(out['r53']))\n out['p5'] = self.pool5(out['r54'])\n #out['p6'] = self.pool6(out['r54'])\n return [out[key] for key in out_keys]\n\n\n\nmodel_dir = 'F:/nst/ist/Models/' #os.getcwd() + '/Models/'\n#get network\nvgg = VGG()\n\nvgg.load_state_dict(torch.load(model_dir + 'vgg_conv.pth'))\nfor param in vgg.parameters():\n param.requires_grad = False\nif torch.cuda.is_available():\n vgg = DataParallelModel(vgg).cuda()\n\n#3. possible l\n\n# Image Transform Network\nclass ImageTransformNet(nn.Module):\n def __init__(self):\n super(ImageTransformNet, self).__init__()\n \n # nonlineraity\n self.relu = nn.ReLU()\n self.tanh = nn.Tanh()\n\n # encoding layers\n self.conv1 = ConvLayer(6, 32, kernel_size=9, stride=1)\n self.in1_e = nn.InstanceNorm2d(32, affine=True)\n\n self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)\n self.in2_e = nn.InstanceNorm2d(64, affine=True)\n\n self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)\n self.in3_e = nn.InstanceNorm2d(128, affine=True)\n\n # residual layers\n self.res1 = ResidualBlock(128)\n self.res2 = ResidualBlock(128)\n self.res3 = ResidualBlock(128)\n self.res4 = ResidualBlock(128)\n self.res5 = ResidualBlock(128)\n\n # decoding layers\n self.deconv3 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2 )\n self.in3_d = nn.InstanceNorm2d(64, affine=True)\n\n self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2 )\n self.in2_d = nn.InstanceNorm2d(32, affine=True)\n\n self.deconv1 = UpsampleConvLayer(32, 3, kernel_size=9, stride=1)\n self.in1_d = nn.InstanceNorm2d(3, affine=True)\n\n def forward(self, x):\n # encode\n y = self.relu(self.in1_e(self.conv1(x)))\n y = self.relu(self.in2_e(self.conv2(y)))\n y = self.relu(self.in3_e(self.conv3(y)))\n\n # residual layers\n y = self.res1(y)\n y = self.res2(y)\n y = self.res3(y)\n y = self.res4(y)\n y = self.res5(y)\n\n # decode\n y = self.relu(self.in3_d(self.deconv3(y)))\n y = self.relu(self.in2_d(self.deconv2(y)))\n #y = self.tanh(self.in1_d(self.deconv1(y)))\n y = self.deconv1(y)\n\n return y\n\n def init_weights(self):\n a = 1\n\n\ndef get_deconv_net(is_train):\n backbone_nst = ImageTransformNet()# ResNetBackbone(18, is_pose_net = False)# ImageTransformNet() #ResNetBackbone(18, is_pose_net = False)\n if is_train:\n backbone_nst.init_weights()\n \n model_deconv = ResDeconvNet(backbone_nst)\n\n return model_deconv\n\n\nmodel_deconv = get_deconv_net(True)\nmodel_deconv = DataParallelModel(model_deconv).cuda()\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 16\n self.embDim = 128 * block.expansion\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 128, num_blocks[3], stride=2)\n self.linear = nn.Linear(128 * block.expansion, num_classes)\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 16)\n emb = out.view(out.size(0), -1)\n out = self.linear(emb)\n return out#, emb\n def get_embedding_dim(self):\n return self.embDim\n\ndef ResNet18():\n return ResNet(BasicBlock, [2,2,2,2])\n\n#1. DCGAN Generator\nclass DCGAN_generator(nn.Module):\n \"\"\"\n\n Attributes\n ----------\n ngpu : int\n The number of available GPU devices\n\n \"\"\"\n def __init__(self, ngpu):\n \"\"\"Init function\n\n Parameters\n ----------\n ngpu : int\n The number of available GPU devices\n\n \"\"\"\n super(DCGAN_generator, self).__init__()\n self.ngpu = ngpu\n \n nz = 100 # noise dimension\n ngf = 64 # number of features map on the first layer\n nc = 1 # number of channels\n\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( nz, ngf * 4, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n \"\"\"Forward function\n\n Parameters\n ----------\n input : :py:class:`torch.Tensor`\n \n Returns\n -------\n :py:class:`torch.Tensor`\n the output of the generator (i.e. an image)\n\n \"\"\"\n output = self.main(input)\n return output\n\n\nclass _netG64(nn.Module):\n def __init__(self, ngpu):\n super(_netG64, self).__init__()\n self.ngpu = ngpu\n nz = 100 # noise dimension\n ngf = 64 # number of features map on the first layer\n nc = 1 # number of channels\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16 \n nn.ConvTranspose2d(ngf * 2, ngf * 1, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 1),\n nn.ReLU(True), \n # state size. (ngf) x 32 x 32\n nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n output = self.main(input)\n return output\n\n\n\nclass _netG(nn.Module):\n def __init__(self, ngpu):\n super(_netG, self).__init__()\n self.ngpu = ngpu\n nz = 100 # noise dimension\n ngf = 64 # number of features map on the first layer\n nc = 1 # number of channels\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( nz, ngf * 16, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 16),\n nn.ReLU(True),\n # state size. (ngf*16) x 4 x 4\n nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 8 x 8\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 16 x 16 \n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 32 x 32\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 64 x 64\n nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 128 x 128\n )\n\n def forward(self, input):\n \toutput = self.main(input)\n \treturn output\n\n\nclass _netG256(nn.Module):\n def __init__(self, ngpu):\n super(_netG256, self).__init__()\n self.ngpu = ngpu\n nz = 100 # noise dimension\n ngf = 64 # number of features map on the first layer\n nc = 1 # number of channels\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( nz, ngf * 32, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 32),\n nn.ReLU(True),\n # state size. (ngf*32) x 4 x 4\n nn.ConvTranspose2d(ngf * 32, ngf * 16, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 16),\n nn.ReLU(True),\n # state size. (ngf*16) x 8 x 8\n nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 16 x 16 \n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 32 x 32\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 64 x 64\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 128 x 128\n nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 256 x 256\n )\n\n def forward(self, input):\n output = self.main(input)\n return output\n\n\n#2. DCGAN Discriminator\nclass DCGAN_discriminator(nn.Module):\n \"\"\" \n\n Attributes\n ----------\n ngpu : int\n The number of available GPU devices\n\n \"\"\"\n def __init__(self, ngpu):\n \"\"\"Init function\n\n Parameters\n ----------\n ngpu : int\n The number of available GPU devices\n\n \"\"\"\n super(DCGAN_discriminator, self).__init__()\n self.ngpu = ngpu\n \n ndf = 64\n nc = 1\n \n self.main = nn.Sequential(\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 4, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n \"\"\"Forward function\n\n Parameters\n ----------\n input : :py:class:`torch.Tensor`\n \n Returns\n -------\n :py:class:`torch.Tensor`\n the output of the generator (i.e. an image)\n\n \"\"\"\n output = self.main(input)\n\n return output.view(-1, 1).squeeze(1)\n\n\nclass _netD64(nn.Module):\n def __init__(self, ngpu):\n super(_netD64, self).__init__()\n self.ngpu = ngpu\n ndf = 64\n nc = 1\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, stride=2, padding=1, bias=False), \n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 8, 1, 4, stride=1, padding=0, bias=False),\n nn.Sigmoid()\n # state size. 1\n )\n\n def forward(self, input):\n output = self.main(input)\n return output.view(-1, 1).squeeze(1)\n\n\nclass _netD(nn.Module):\n def __init__(self, ngpu):\n super(_netD, self).__init__()\n self.ngpu = ngpu\n ndf = 64\n nc = 1\n self.main = nn.Sequential(\n # input is (nc) x 128 x 128\n nn.Conv2d(nc, ndf, 4, stride=2, padding=1, bias=False), \n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 64 x 64\n nn.Conv2d(ndf, ndf * 2, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 32 x 32\n nn.Conv2d(ndf * 2, ndf * 4, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 16 x 16 \n nn.Conv2d(ndf * 4, ndf * 8, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 8 x 8\n nn.Conv2d(ndf * 8, ndf * 16, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 16),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*16) x 4 x 4\n nn.Conv2d(ndf * 16, 1, 4, stride=1, padding=0, bias=False),\n nn.Sigmoid()\n # state size. 1\n )\n\n def forward(self, input):\n \toutput = self.main(input)\n \treturn output.view(-1, 1).squeeze(1)\n\nclass _netD256(nn.Module):\n def __init__(self, ngpu):\n super(_netD256, self).__init__()\n self.ngpu = ngpu\n ndf = 64\n nc = 1\n self.main = nn.Sequential(\n # input is (nc) x 256 x 256\n nn.Conv2d(nc, ndf, 4, stride=2, padding=1, bias=False), \n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 128 x 128\n nn.Conv2d(ndf, ndf * 2, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 64 x 64\n nn.Conv2d(ndf * 2, ndf * 4, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 32 x 32 \n nn.Conv2d(ndf * 4, ndf * 8, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 16 x 16\n nn.Conv2d(ndf * 8, ndf * 16, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 16),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*16) x 8 x 8\n nn.Conv2d(ndf * 16, ndf * 32, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 32),\n nn.LeakyReLU(0.2, inplace=True), \n # state size. (ndf*32) x 4 x 4\n nn.Conv2d(ndf * 32, 1, 4, stride=1, padding=0, bias=False),\n nn.Sigmoid()\n # state size. 1\n )\n\n def forward(self, input):\n output = self.main(input)\n return output.view(-1, 1).squeeze(1)\n\n\n\n#3. ResNet\n\n\n\n\nclass ResNetBackbone(nn.Module):\n\n def __init__(self, resnet_type, num_classes = 1000):\n \n resnet_spec = {18: (BasicBlock, [2, 2, 2, 2], [64, 64, 128, 256, 512], 'resnet18'),\n 34: (BasicBlock, [3, 4, 6, 3], [64, 64, 128, 256, 512], 'resnet34'),\n 50: (Bottleneck, [3, 4, 6, 3], [64, 256, 512, 1024, 2048], 'resnet50'),\n 101: (Bottleneck, [3, 4, 23, 3], [64, 256, 512, 1024, 2048], 'resnet101'),\n 152: (Bottleneck, [3, 8, 36, 3], [64, 256, 512, 1024, 2048], 'resnet152')}\n block, layers, channels, name = resnet_spec[resnet_type]\n \n self.name = name\n self.inplanes = 64\n self.outplanes = 3\n super(ResNetBackbone, self).__init__()\n self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) #128 -> 4\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n nn.init.normal_(m.weight, mean=0, std=0.01)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n\n def forward(self, x): \n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x) \n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x)\n x = torch.flatten(x, 1) \n x = self.fc(x)\n\n return x\n \n def load_my_state_dict(model, state_dict):\n \n own_state = model.state_dict()\n for name, param in state_dict.items():\n if name not in own_state:\n continue\n #if isinstance(param, Parameter):\n # backwards compatibility for serialized parameters\n # param = param.data\n own_state[name].copy_(param)\n\n def init_weights(self):\n org_resnet = torch.utils.model_zoo.load_url(model_urls[self.name])\n # drop orginal resnet fc layer, add 'None' in case of no fc layer, that will raise error\n org_resnet.pop('fc.weight', None)\n org_resnet.pop('fc.bias', None)\n org_resnet.pop('conv1.weight', None)\n org_resnet.pop('conv1.bias', None)\n #self.load_state_dict(org_resnet)\n self.load_my_state_dict(org_resnet)\n print(\"Initialize resnet from model zoo\")\n\n#4. logging\nimport logging\nimport os\n\nOK = '\\033[92m'\nWARNING = '\\033[93m'\nFAIL = '\\033[91m'\nEND = '\\033[0m'\n\nPINK = '\\033[95m'\nBLUE = '\\033[94m'\nGREEN = OK\nRED = FAIL\nWHITE = END\nYELLOW = WARNING\nclass colorlogger():\n def __init__(self, log_dir, log_name='train_logs.txt'):\n # set log\n self._logger = logging.getLogger(log_name)\n self._logger.setLevel(logging.INFO)\n log_file = os.path.join(log_dir, log_name)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n file_log = logging.FileHandler(log_file, mode='a')\n file_log.setLevel(logging.INFO)\n #console_log = logging.StreamHandler()\n #console_log.setLevel(logging.INFO)\n formatter = logging.Formatter(\n \"{}%(asctime)s{} %(message)s\".format(GREEN, END),\n \"%m-%d %H:%M:%S\")\n file_log.setFormatter(formatter)\n #console_log.setFormatter(formatter)\n self._logger.addHandler(file_log)\n #self._logger.addHandler(console_log)\n\n def debug(self, msg):\n self._logger.debug(str(msg))\n\n def info(self, msg):\n self._logger.info(str(msg))\n\n def warning(self, msg):\n self._logger.warning(WARNING + 'WRN: ' + str(msg) + END)\n\n def critical(self, msg):\n self._logger.critical(RED + 'CRI: ' + str(msg) + END)\n\n def error(self, msg):\n self._logger.error(RED + 'ERR: ' + str(msg) + END)\n\n\n#5. Configurations and arguments\nroot_dir = \"E:/ml/\" # chest x-ray 14\nn_classes = 15 # 0 is normal : no finding\nbatch_size = 22\nimg_size = 128\ndisplay_per_iters = 5 # how many iterations before outputting to the console window\nsave_gan_per_iters = 5 # save gan images per this iterations\nsave_gan_img_folder_prefix = root_dir + \"train_fake/\"\nshow_train_classifier_acc_per_iters = 1000000 # how many iterations before showing train acc of classifier\nshow_test_classifier_acc_per_iters = 15 # \nsave_per_samples = 2000 # save a checkpoint per forward run of this number of samples\nmodel_ckpt_prefix = 'ecgan-chest-xray14'\n\nuse_label_smoothing = True\nsmoothing = 0.1\n\n# define device \ndevice = torch.device(\"cuda:0\")\n\n# The files that contain paths of all images\nimage_index_list_file = root_dir + \"image_index.txt\"\nlabels_file = root_dir + \"labels.txt\"\ntrain_val_list_file = root_dir + \"train_val_list.txt\"\ntest_list_file = root_dir + \"test_list.txt\"\nimg_folders = { 'images_001/', 'images_002/', 'images_003/', 'images_005/', 'images_008/', 'images_011/', 'images_006/', 'images_007/', 'images_004/', 'images_009/', 'images_010/', 'images_012/'}\nsuffix = 'images/'\nimage_index_list = [] \nlabels_list = []\nimg_index_2_label_dict = {}\nlabel_name_dict = { 'No Finding': 0,\n 'Atelectasis': 1, \n 'Cardiomegaly': 2, \n 'Effusion': 3, \n 'Infiltration': 4, \n 'Mass': 5, \n 'Nodule': 6, \n 'Pneumonia': 7, \n 'Pneumothorax': 8, \n 'Consolidation': 9, \n 'Edema': 10, \n 'Emphysema': 11, \n 'Fibrosis': 12, \n 'Pleural_Thickening': 13, \n 'Hernia': 14}\n# list of img paths \ntrain_val_list = []\ntest_list = []\ntrain_val_labels = []\ntest_labels = []\n\ndef load_image_index_and_list():\n #1. image index list (all) e.g. 00000583_023.png\n f_list = open(image_index_list_file, \"r\")\n l = f_list.readlines()\n for line in l:\n if line != '\\n':\n image_index_list.append(line[0:len(line) - 1])\n f_list.close()\n\n #2. labels e.g. Cardiomegaly|Effusion\n f_list = open(labels_file, \"r\")\n l = f_list.readlines()\n for line in l:\n if line != '\\n':\n labels_list.append(line[0:len(line) - 1])\n f_list.close()\n return \n\ndef build_img_2_label_dict():\n for i in range(len(image_index_list)):\n img_id = image_index_list[i]\n label = labels_list[i]\n img_index_2_label_dict.update({img_id: label})\n \ndef load_train_val_list():\n #1. original train_val_list.txt\n f_list = open(train_val_list_file, \"r\")\n l = f_list.readlines() \n for line in l:\n s = line \n if s[len(s) - 1] == '\\n':\n s = s[:len(s) - 1]\n img_name = s\n this_label = img_index_2_label_dict[img_name] \n find_or = this_label.find('|') \n if find_or != -1:\n continue\n # See if this image exists \n for folders in img_folders:\n img_path = root_dir + folders + suffix + img_name \n if not osp.exists(img_path):\n continue\n train_val_list.append(img_path) \n this_label = label_name_dict[this_label]\n train_val_labels.append(this_label)\n print('There are {:6d} images in train/val.\\n'.format(len(train_val_list)))\n f_list.close()\n\ndef load_test_list():\n #1. original test_list.txt\n f_list = open(test_list_file, \"r\")\n l = f_list.readlines() \n for line in l:\n s = line \n if s[len(s) - 1] == '\\n':\n s = s[:len(s) - 1]\n img_name = s\n this_label = img_index_2_label_dict[img_name] \n find_or = this_label.find('|') \n if find_or != -1:\n continue\n # See if this image exists \n for folders in img_folders:\n img_path = root_dir + folders + suffix + img_name \n if not osp.exists(img_path):\n continue\n test_list.append(img_path) \n this_label = label_name_dict[this_label]\n test_labels.append(this_label)\n print('There are {:6d} images in test.\\n'.format(len(test_list)))\n f_list.close()\n\n\t\nload_image_index_and_list()\nbuild_img_2_label_dict()\nload_train_val_list()\nload_test_list()\n\n# Where to log outputs\nlogger = colorlogger(\"logs/\", log_name=\"logs_all.txt\")\ndiscriminator_logger = colorlogger(\"logs/\", log_name=\"logs_D(x);1.txt\")\nfake_logger = colorlogger(\"logs/\", log_name=\"logs_D(G(z));0.txt\")\ngenerator_logger = colorlogger(\"logs/\", log_name=\"logs_D(G(z));1.txt\")\nreal_classifier_logger = colorlogger(\"logs/\", log_name=\"logs_C(x).txt\")\nfake_classifier_logger = colorlogger(\"logs/\", log_name=\"logs_C(G(z)).txt\")\ntotal_logger = colorlogger(\"logs/\", log_name=\"logs_loss_total.txt\")\ntrain_accuracy_logger = colorlogger(\"logs/\", log_name=\"logs_train_classifier_acc.txt\")\ntest_accuracy_logger = colorlogger(\"logs/\", log_name=\"logs_test_classifier_acc.txt\")\navg_kl_logger = colorlogger(\"logs/\", log_name=\"logs_avg_kl.txt\")\n\nepochs = 100\nepoch_imgs = 3483 #how many images are defined as an epoch\n\n#6. Randomly sample training images \ndef sample_train_images_randomly():\n inputs = []\n labels = []\n for i in range(batch_size):\n sz = len(train_val_list)\n img_id = random.randint(0, sz - 1)\n img_path = train_val_list[img_id]\n if not osp.exists(img_path):\n \tprint('Image ', img_path, 'does not exist?')\n else:\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE | cv2.IMREAD_IGNORE_ORIENTATION)\n img = img / 256.0\n #on my laptop it needs to be divided by 256 not sure elsewhere to be in the range[0, 1]\n img = cv2.resize(img, (img_size, img_size))\n img = img.astype(np.float32)\n img = torch.from_numpy(img)\n img = img.reshape((img.shape[0], img.shape[1], 1))\n img = img.permute(2, 0, 1).data.cpu().numpy()\n inputs.append(img)\n this_label = train_val_labels[img_id]\n #print(this_label)\n labels.append(this_label) \n\n TRAIN_AUG = torch.nn.Sequential(\n \n T.RandomResizedCrop((img_size, img_size), scale=(0.75, 1.33), ratio=(0.75, 1.3333333333333333)),\n T.Normalize(\n mean=torch.tensor([0.485]),\n std=torch.tensor([0.229])),\n )\n\n inputs = np.array(inputs)\n inputs = torch.from_numpy(inputs)\n inputs = TRAIN_AUG(inputs)\n labels = np.array(labels)\n labels = labels.reshape((labels.shape[0]))\n labels = torch.from_numpy(labels).long()\n return inputs, labels\n\n#6. Randomly sample test images \ndef sample_test_images_randomly():\n inputs = []\n labels = []\n for i in range(batch_size):\n sz = len(test_list)\n img_id = random.randint(0, sz - 1)\n img_path = test_list[img_id]\n if not osp.exists(img_path):\n \tprint('Image ', img_path, 'does not exist?')\n else:\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE | cv2.IMREAD_IGNORE_ORIENTATION)\n img = img / 256.0\n #on my laptop it needs to be divided by 256 not sure elsewhere to be in the range[0, 1]\n img = cv2.resize(img, (img_size, img_size))\n img = img.astype(np.float32)\n img = torch.from_numpy(img)\n img = img.reshape((img.shape[0], img.shape[1], 1))\n img = img.permute(2, 0, 1).data.cpu().numpy()\n inputs.append(img)\n this_label = test_labels[img_id]\n labels.append(this_label) \n\n TEST_AUG = torch.nn.Sequential(\n \n #T.RandomResizedCrop((img_size, img_size), scale=(0.75, 1.33), ratio=(0.75, 1.3333333333333333)),\n T.Normalize(\n mean=torch.tensor([0.485]),\n std=torch.tensor([0.229])),\n )\n inputs = np.array(inputs)\n inputs = torch.from_numpy(inputs)\n inputs = TEST_AUG(inputs)\n labels = np.array(labels)\n labels = labels.reshape((labels.shape[0]))\n labels = torch.from_numpy(labels).long()\n return inputs, labels\n\n\n#6. Sequentially sample test images \ndef sample_test_images_sequentially(lb, ub):\n inputs = []\n labels = []\n for i in range(batch_size):\n #sz = len(test_list)\n img_id = lb + i #random.randint(0, sz - 1)\n img_path = test_list[img_id]\n if not osp.exists(img_path):\n print('Image ', img_path, 'does not exist?')\n else:\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE | cv2.IMREAD_IGNORE_ORIENTATION)\n img = img / 256.0\n #on my laptop it needs to be divided by 256 not sure elsewhere to be in the range[0, 1]\n img = cv2.resize(img, (img_size, img_size))\n img = img.astype(np.float32)\n img = torch.from_numpy(img)\n img = img.reshape((img.shape[0], img.shape[1], 1))\n img = img.permute(2, 0, 1).data.cpu().numpy()\n inputs.append(img)\n this_label = test_labels[img_id]\n labels.append(this_label) \n\n TEST_AUG = torch.nn.Sequential(\n \n T.RandomResizedCrop((img_size, img_size), scale=(0.75, 1.33), ratio=(0.75, 1.3333333333333333)),\n T.Normalize(\n mean=torch.tensor([0.485]),\n std=torch.tensor([0.229])),\n )\n inputs = np.array(inputs)\n inputs = torch.from_numpy(inputs)\n inputs = TEST_AUG(inputs)\n labels = np.array(labels)\n labels = labels.reshape((labels.shape[0]))\n labels = torch.from_numpy(labels).long()\n return inputs, labels\n\n\n\n#6.5 label smoothing\nclass LabelSmoothingCrossEntropy(nn.Module):\n def __init__(self):\n super(LabelSmoothingCrossEntropy, self).__init__()\n def forward(self, x, target, smoothing=0.1):\n confidence = 1. - smoothing\n logprobs = F.log_softmax(x, dim=-1)\n nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))\n nll_loss = nll_loss.squeeze(1)\n smooth_loss = -logprobs.mean(dim=-1)\n loss = confidence * nll_loss + smoothing * smooth_loss\n return loss.mean()\n\n\n\ndef mixup_data(x, y, alpha=1.0, use_cuda=True):\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\ndef mixup_criterion(criterion, pred, y_a, y_b, lam):\n return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)\n\n\nclass LabelSmoothingCrossEntropy(nn.Module):\n def __init__(self):\n super(LabelSmoothingCrossEntropy, self).__init__()\n def forward(self, x, target, smoothing=0.2): \n confidence = 1. - smoothing\n logprobs = F.log_softmax(x, dim=-1)\n nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))\n nll_loss = nll_loss.squeeze(1)\n smooth_loss = -logprobs.mean(dim=-1)\n loss = confidence * nll_loss + smoothing * smooth_loss\n return loss.mean()\n##\n# version 1: use torch.autograd\nclass LabelSmoothSoftmaxCEV1(nn.Module):\n '''\n This is the autograd version, you can also try the LabelSmoothSoftmaxCEV2 that uses derived gradients\n '''\n\n def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):\n super(LabelSmoothSoftmaxCEV1, self).__init__()\n self.lb_smooth = lb_smooth\n self.reduction = reduction\n self.lb_ignore = ignore_index\n self.log_softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, logits, label):\n '''\n Same usage method as nn.CrossEntropyLoss:\n >>> criteria = LabelSmoothSoftmaxCEV1()\n >>> logits = torch.randn(8, 19, 384, 384) # nchw, float/half\n >>> lbs = torch.randint(0, 19, (8, 384, 384)) # nhw, int64_t\n >>> loss = criteria(logits, lbs)\n '''\n # overcome ignored label\n logits = logits.float() # use fp32 to avoid nan\n with torch.no_grad():\n num_classes = logits.size(1)\n label = label.clone().detach()\n ignore = label.eq(self.lb_ignore)\n n_valid = ignore.eq(0).sum()\n label[ignore] = 0\n lb_pos, lb_neg = 1. - self.lb_smooth, self.lb_smooth / num_classes\n lb_one_hot = torch.empty_like(logits).fill_(\n lb_neg).scatter_(1, label.unsqueeze(1), lb_pos).detach()\n\n logs = self.log_softmax(logits)\n loss = -torch.sum(logs * lb_one_hot, dim=1)\n loss[ignore] = 0\n if self.reduction == 'mean':\n loss = loss.sum() / n_valid\n if self.reduction == 'sum':\n loss = loss.sum()\n\n return loss\n\n\n\n# models\n# _net: 128x128\n# DCGAN_: 64X64\nnetG = _netG(1) #DCGAN_generator(1) \nnetD = _netD(1) #DCGAN_discriminator(1)\nnetC = ResNetBackbone(50, num_classes = 15) #ResNet18() #normal or pneumonia\nnetC.init_weights()\n\nnetG = DataParallelModel(netG).cuda()\nnetD = DataParallelModel(netD).cuda()\nnetC = DataParallelModel(netC).cuda()\n\n# optimizers \nblr_d = 0.0001\nblr_g = 0.0001\nblr_c = 0.0001\noptD = optim.Adam(netD.parameters(), lr=blr_d, betas=(0.5, 0.999), weight_decay = 1e-3)\noptG = optim.Adam(netG.parameters(), lr=blr_g, betas=(0.5, 0.999))\noptC = optim.Adam(netC.parameters(), lr=blr_c, betas=(0.5, 0.999), weight_decay = 1e-3)\n\n# losses \n# 1) for discriminator and generator)\nbce_loss = nn.BCELoss()\nbce_loss = DataParallelCriterion(bce_loss, device_ids=[0])\n# 2) for classifier\nif use_label_smoothing:\n criterion = LabelSmoothSoftmaxCEV1(lb_smooth=smoothing, ignore_index=255, reduction='mean')\nelse:\n criterion = nn.CrossEntropyLoss() #LabelSmoothingCrossEntropy() #\ncriterion = DataParallelCriterion(criterion, device_ids=[0])\n\nadvWeight = 0.25 # adversarial weight\n\n#5. Loading trained weights\ndef load_my_state_dict(model, state_dict):\n \n own_state = model.state_dict()\n for name, param in state_dict.items():\n if name not in own_state:\n continue\n #own_state[name] = deepcopy(param)\n own_state[name].copy_(param)\n #print(own_state)\n return own_state\n\ndef load_model(model_path):\n ckpt = torch.load(model_path) \n start_epoch = ckpt['epoch'] + 1\n #netD.load_state_dict(ckpt['netD']) \n #netG.load_state_dict(ckpt['netG']) \n netC.load_state_dict(ckpt['netC'])\n #optD.load_state_dict(ckpt['optD'])\n #optG.load_state_dict(ckpt['optG'])\n #optC.load_state_dict(ckpt['optC'])\n total_trained_samples = ckpt['total_trained_samples']\n return start_epoch, total_trained_samples\n\n\ndef distance(X, Y, sqrt=True):\n nX = X.size(0)\n nY = Y.size(0)\n \n X = X.view(nX,-1).cuda()\n X2 = (X*X).sum(1).resize(nX,1)\n Y = Y.view(nY,-1).cuda()\n Y2 = (Y*Y).sum(1).resize(nY,1)\n\n M = torch.zeros(nX, nY)\n M.copy_(X2.expand(nX,nY) + Y2.expand(nY,nX).transpose(0,1) - 2*torch.mm(X,Y.transpose(0,1)))\n\n #del X, X2, Y, Y2\n \n if sqrt:\n M = ((M+M.abs())/2).sqrt()\n\n return M\n\ndef mmd(Mxx, Mxy, Myy, sigma = 1):\n scale = Mxx.mean()\n Mxx = torch.exp(-Mxx/(scale*2*sigma*sigma))\n Mxy = torch.exp(-Mxy/(scale*2*sigma*sigma))\n Myy = torch.exp(-Myy/(scale*2*sigma*sigma))\n a = Mxx.mean()+Myy.mean()-2*Mxy.mean() \n if a.item() > 1e-6:\n \tmmd = torch.sqrt(a)\n \t#print(mmd)\n else:\n \treturn -1\n return mmd \n\n#6. Testing loop\ndef test_all(file, epoch, best_accuracy, best_epoch):\n netD.eval()\n netG.eval()\n total_test = 0\n correct_test = 0\n test_num = 100 \n # sample some test images\n with torch.no_grad():\n for steps in range(test_num):\n inputs, labels = sample_test_images_sequentially(steps * batch_size, (steps + 1) * batch_size)\n inputs = inputs.cuda()\n labels = labels.cuda()\n outputs = netC(inputs)\n \n # accuracy\n _, predicted = torch.max(outputs.data, 1)\n total_test += labels.size(0)\n correct_test += predicted.eq(labels.data).sum().item()\n test_accuracy = 100 * correct_test / total_test\n print('Epoch {:5d} test acc {:6.2f} Current Best {:6.2f} \\n'.format(epoch, test_accuracy, best_accuracy))\n file.write('Epoch {:5d} test acc {:6.2f} Current Best {:6.2f} \\n'.format(epoch, test_accuracy, best_accuracy))\n if test_accuracy > best_accuracy:\n best_accuracy = test_accuracy\n best_epoch = epoch\n return test_accuracy, best_accuracy, best_epoch \ntotal_trained_samples = 0\ntorch.manual_seed(42)\nstart_epoch = 0\nfile = open('best.txt', 'w')\nbest_acc = 0\nbest_epoch = 0\nfor epoch in range(36, 100):\n start_epoch, total_trained_samples = load_model('../models_apr28/ecgan-chest-xray14epo_' + str(epoch) + '.pth')\n netC.eval() #classifier\n test_acc, best_acc, best_epoch = test_all(file, epoch, best_acc, best_epoch)\n \nfile.close()\n \n \n \n \n \n"
] | [
[
"torch.max",
"torch.load",
"torch.zeros",
"torch.cat",
"torch.randperm",
"torch.sum",
"torch.nn.parallel.parallel_apply.get_a_var",
"torch.set_grad_enabled",
"torch.no_grad",
"torch.cuda.is_available",
"torch.flatten",
"torch.device",
"torch.is_grad_enabled",
"torch.cuda.comm.reduce_add_coalesced",
"torch.utils.model_zoo.load_url",
"torch.autograd.Variable",
"torch.nn.CrossEntropyLoss",
"numpy.random.beta",
"torch.sqrt",
"torch.from_numpy",
"torch.nn.Sigmoid",
"torch.tensor",
"torch.nn.parallel._functions.Broadcast.apply",
"torch.nn.Sequential",
"torch.nn.LogSoftmax",
"torch.empty_like",
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.BCELoss",
"torch.exp",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.InstanceNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"numpy.array",
"torch.cuda.device",
"torch.nn.ReflectionPad2d",
"torch.manual_seed",
"torch.cuda.comm.reduce_add",
"torch.nn.Tanh",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Upsample",
"torch.cuda.comm.broadcast_coalesced",
"torch.nn.ReLU",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Yongtae723/88_face | [
"7a761cb277be2a28984161be1e7ae2b73cadf085"
] | [
"wtfml/data_loaders/pl_data_module/data_module.py"
] | [
"import pytorch_lightning as pl\nfrom torch.utils.data import DataLoader\n\n\nclass plDataModule(pl.LightningDataModule):\n def __init__(\n self,\n train_dataset,\n val_dataset,\n test_dataset=None,\n num_workers=2,\n train_sampler=None,\n train_shuffle=True,\n train_batch_size=64,\n train_drop_last=False,\n val_batch_size=16,\n val_shuffle=False,\n val_sampler=None,\n train_dataloader=None,\n val_dataloader=None,\n test_dataloader=None,\n ):\n super().__init__()\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.test_dataset = test_dataset\n\n self.num_workers = num_workers\n self.train_sampler = train_sampler\n self.train_shuffle = train_shuffle\n self.train_batch_size = train_batch_size\n self.train_drop_last = train_drop_last\n\n self.val_batch_size = val_batch_size\n self.val_shuffle = val_shuffle\n self.val_sampler = val_sampler\n\n self.created_train_dataloader = train_dataloader\n self.created_val_dataloader = val_dataloader\n self.created_test_dataloader = test_dataloader\n\n def train_dataloader(self):\n if self.created_train_dataloader:\n return self.created_train_dataloader\n return DataLoader(\n self.train_dataset,\n batch_size=self.train_batch_size,\n sampler=self.train_sampler,\n drop_last=self.train_drop_last,\n num_workers=self.num_workers,\n shuffle=self.train_shuffle if not self.train_sampler else False,\n )\n\n def val_dataloader(self):\n if self.created_val_dataloader:\n return self.created_val_dataloader\n return DataLoader(\n self.val_dataset,\n batch_size=self.val_batch_size,\n sampler=self.val_sampler,\n drop_last=False,\n num_workers=self.num_workers,\n shuffle=self.val_shuffle if not self.val_sampler else False,\n )\n\n def test_dataloader(self):\n if self.created_test_dataloader:\n return self.created_test_dataloader\n if self.test_dataset:\n return DataLoader(\n self.test_dataset,\n batch_size=self.val_batch_size,\n sampler=self.val_sampler,\n drop_last=False,\n num_workers=self.num_workers,\n shuffle=self.val_shuffle if not self.val_sampler else False,\n )\n"
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aimalz/justice | [
"2edcb471cd01d6659a498bcd0209cb5dae83375a",
"2edcb471cd01d6659a498bcd0209cb5dae83375a"
] | [
"justice/summarize.py",
"justice/features/dense_extracted_features.py"
] | [
"\"\"\"Tools for summarizing lightcurve data into statistics\"\"\"\n\nimport numpy as np\nimport scipy.optimize as spo\nfrom tensorflow.contrib.framework import nest\n\nfrom justice import lightcurve\nfrom justice import xform\n\n\ndef opt_alignment(\n lca: lightcurve._LC,\n lcb: lightcurve._LC,\n ivals=None,\n constraints=None,\n method='Nelder-Mead',\n options=None,\n vb=True,\n) -> xform.LCXform:\n \"\"\"\n Minimizes the arclength between two lightcurves after merging\n\n :param lca: First lightcurve.\n :param lcb: Lightcurve to try merging in\n :param ivals: initial values to try\n :param constraints: Not sure how these work, feel free to give it a try though!\n :param method: Only Nelder_Mead is tested as of now\n :param options: Only maxiter is included right now\n :param vb: Boolean verbose\n :return: best xform\n \"\"\"\n if constraints is None:\n constraints = []\n if options is None:\n options = {'maxiter': 10000}\n if ivals is None:\n ivals = np.array([0, 0, 1, 1])\n\n if method != 'Nelder-Mead':\n\n def pos_dil(xf: xform.LinearBandDataXform):\n return min(xf._dilate_time, xf._dilate_flux)\n\n constraints += [{'type': 'ineq', 'fun': pos_dil}]\n else:\n constraints = None\n\n # don't know if this way of handling constraints actually works -- untested!\n def _helper(vals):\n bd_xform = xform.LinearBandDataXform(*vals)\n lca_xform = xform.SameLCXform(bd_xform)\n lc = lca_xform.apply(lcb)\n new_lc = lca + lc\n length = new_lc.connect_the_dots()\n return length\n\n # could make this a probability by taking chi^2 error relative to\n # connect_the_dots original, but it didn't work better in the sandbox\n # notebook\n res = spo.minimize(\n _helper, ivals, constraints=constraints, method=method, options=options\n )\n if vb:\n print(res)\n res_xform = xform.SameLCXform(xform.LinearBandDataXform(*res.x))\n return res_xform\n",
"# -*- coding: utf-8 -*-\n\"\"\"Extracts dense features with linear transformations.\"\"\"\nimport enum\nimport json\nimport pathlib\nimport typing\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom justice import path_util\nfrom justice.align_model import graph_typecheck\nfrom justice.features import band_settings_params\n\n\ndef _left_mask(before_padding, window_size):\n \"\"\"Generates a mask for left-padded vectors.\n\n Mask elements are True if a valid element is present.\n\n e.g. suppose left features are [0, 0, 0, x], where the \"0\" values are padding,\n so before_padding = 3. This function will return a mask [False, False, False, True].\n (in reality everything is vectorized by batch dimension.\n\n :param before_padding: [batch_size] tensor of before_padding values.\n :param window_size: scalar window size.\n :return: [batch_size, window_size] boolean tensor mask.\n \"\"\"\n return tf.logical_not(tf.sequence_mask(before_padding, maxlen=window_size))\n\n\ndef _right_mask(after_padding, window_size):\n \"\"\"Same as above, but for right-padded vectors.\"\"\"\n return tf.sequence_mask(window_size - after_padding, maxlen=window_size)\n\n\nclass WindowFeatures(object):\n \"\"\"Helper for dealing with window-like raw features.\n\n In particular, this class generates concatenated \"dflux_dt\" values, and has a masking\n helper, which will probably be applied after doing some non-linear transformations to\n dflux_dt (or possibly raw self.dtime, self.dflux) values.\n \"\"\"\n\n def __init__(\n self,\n band_features: dict,\n batch_size: int,\n window_size: int,\n band_time_diff: float = 4.0\n ):\n \"\"\"Initializes a windowed feature extractor.\n\n :param band_features: Band features, generated by raw_value_features.\n :param batch_size: Outer batch size.\n :param window_size: Number of points in 'before' and 'after' windows.\n :param band_time_diff: Maximum difference between requested time and actual time\n in the window.\n \"\"\"\n\n def batch_shaped(t):\n return graph_typecheck.assert_shape(t, [batch_size])\n\n def batch_win_shaped(t):\n return graph_typecheck.assert_shape(t, [batch_size, window_size])\n\n def batch_2win_shaped(t):\n return graph_typecheck.assert_shape(t, [batch_size, 2 * window_size])\n\n def tile_to_2win(t):\n return tf.tile(tf.expand_dims(t, 1), [1, 2 * window_size])\n\n closest_time = batch_shaped(band_features['closest_time_in_band'])\n closest_flux = batch_shaped(band_features['closest_flux_in_band'])\n self.in_window = tf.less(\n batch_shaped(band_features[\"closest_time_diff\"]), band_time_diff\n )\n\n # Before and after flux.\n before_flux = batch_win_shaped(band_features[\"before_flux\"])\n after_flux = batch_win_shaped(band_features[\"after_flux\"])\n before_time = batch_win_shaped(band_features[\"before_time\"])\n after_time = batch_win_shaped(band_features[\"after_time\"])\n\n self.dtime = batch_2win_shaped(\n tf.concat([before_time, after_time], axis=1) - tile_to_2win(closest_time),\n )\n self.dflux = batch_2win_shaped(\n tf.concat([before_flux, after_flux], axis=1) - tile_to_2win(closest_flux),\n )\n\n # Masking tensor.\n left_mask = _left_mask(\n batch_shaped(\n band_features[\"before_padding\"]),\n window_size)\n right_mask = _right_mask(\n batch_shaped(band_features[\"after_padding\"]), window_size\n )\n\n self.mask = batch_2win_shaped(\n tf.logical_and(\n tf.concat([left_mask, right_mask], axis=1), tile_to_2win(self.in_window)\n )\n )\n\n def dflux_dt(self, clip_magnitude: typing.Optional[float]) -> tf.Tensor:\n \"\"\"Computes dflux/dt.\n\n :param clip_magnitude: Option for clipping the magnitude, if dt might be very small.\n :return: <float>[batch_size, 2 * window_size] dflux/dt tensor.\n \"\"\"\n result = self.dflux / self.dtime\n if clip_magnitude is not None:\n result = tf.clip_by_value(\n result, clip_value_min=-clip_magnitude, clip_value_max=clip_magnitude\n )\n return result\n\n def masked(\n self, expanded_tensor: tf.Tensor, value_if_masked: float,\n expected_extra_dims: typing.List[int]\n ):\n \"\"\"Masks a tensor which was calculated from dflux_dt.\n\n :param expanded_tensor: <float>[batch_size, window_size, ...] Tensor with first\n dimensions being batch_size and window_size.\n :param value_if_masked: Value to fill for masked positions.\n :param expected_extra_dims: Expected extra dimensions.\n :returns: Tensor of same shape as expanded_tensor, but with `value_if_masked` filled\n in masked dimensions.\n \"\"\"\n mask_shape = list(map(int, self.mask.shape))\n graph_typecheck.assert_shape(expanded_tensor, mask_shape + expected_extra_dims)\n\n value_if_masked = expanded_tensor.dtype.as_numpy_dtype(value_if_masked)\n if_masked_tensor = tf.fill(expanded_tensor.shape, value_if_masked)\n mask = self.mask\n for i in range(2, 2 + len(expected_extra_dims)):\n mask = tf.expand_dims(mask, axis=i)\n mask = tf.tile(mask, [1, 1] + expected_extra_dims)\n return tf.where(mask, expanded_tensor, if_masked_tensor)\n\n\ndef initial_layer(\n window_feature: WindowFeatures, *, clip_magnitude=10.0, include_flux_and_time=False\n) -> tf.Tensor:\n features = tf.expand_dims(window_feature.dflux_dt(clip_magnitude=clip_magnitude), 2)\n if include_flux_and_time:\n dflux = tf.expand_dims(window_feature.dflux, 2)\n dtime = tf.expand_dims(window_feature.dtime, 2)\n features = tf.concat([features, dflux, dtime],\n axis=2,\n name=\"initial_layer_concat\")\n return features\n\n\nclass CutoffData:\n def __init__(self, config_json: dict):\n self.window_size: int = config_json[\"window_size\"]\n self.band_time_diff: int = config_json[\"band_time_diff\"]\n self.embedding_size: int = config_json[\"desired_num_cutoffs\"]\n self.models_by_band = {}\n for solution in config_json[\"solutions\"]:\n band = solution[\"band\"]\n self.models_by_band.setdefault(band, {})\n self.models_by_band[band][solution[\"column\"]] = (\n solution[\"median_scale\"],\n solution[\"cutoffs\"],\n )\n\n def dflux_dt_dflux_dtime_scales(self, band: str, dtype=np.float32):\n \"\"\"Generates a vector of scalar offsets.\n\n :param band: Band name.\n :param dtype: Data type of output array.\n :return: <dtype>[3] matrix of scales per channel.\n \"\"\"\n return np.array([\n self.models_by_band[band][\"dflux_dt\"][0],\n self.models_by_band[band][\"dflux\"][0],\n self.models_by_band[band][\"dtime\"][0],\n ],\n dtype=dtype)\n\n def dflux_dt_dflux_dtime_cutoffs(self, band: str, dtype=np.float32):\n \"\"\"Generates a matrix of [dflux_dt, dflux, dtime].\n\n :param band: Band name.\n :param dtype: Data type of output array.\n :return: <dtype>[3, self.embedding_size] matrix of cutoffs.\n \"\"\"\n return np.array([\n self.models_by_band[band][\"dflux_dt\"][1],\n self.models_by_band[band][\"dflux\"][1],\n self.models_by_band[band][\"dtime\"][1],\n ],\n dtype=dtype)\n\n @classmethod\n def from_file(cls, filename: pathlib.Path):\n if not filename.is_file():\n raise EnvironmentError(\n \"Please generate tf_align_model data using the tf_align_model_input_\"\n \"feature_percentiles.ipynb notebook or run `git clone https://github.com/\"\n \"gatoatigrado/plasticc-generated-data data/tf_align_model`.\")\n with open(str(filename)) as f:\n return cls(json.load(f))\n\n\nclass Nonlinearity(enum.Enum):\n SIGMOID = 1\n GAUSSIAN = 2\n\n\ndef nonlinearity_fcn(typ: Nonlinearity):\n if typ == Nonlinearity.SIGMOID:\n return tf.sigmoid\n else:\n assert typ == Nonlinearity.GAUSSIAN\n return lambda arg: tf.exp(-arg * arg)\n\n\ndef initial_layer_binned(\n initial_layer_features: tf.Tensor,\n cutoff_data: CutoffData,\n band: str,\n soft_onehot: Nonlinearity = Nonlinearity.SIGMOID\n):\n batch_size, twice_window_size, channels = map(int, initial_layer_features.shape)\n nonlinearity = nonlinearity_fcn(soft_onehot)\n if channels == 3:\n scales = cutoff_data.dflux_dt_dflux_dtime_scales(band)\n cutoffs = cutoff_data.dflux_dt_dflux_dtime_cutoffs(band)\n\n cutoffs_batch_window = tf.expand_dims(tf.expand_dims(cutoffs, 0), 0)\n scales_batch_window = tf.expand_dims(\n tf.expand_dims(tf.expand_dims(scales, 0), 0), -1\n )\n init_layer_per_cutoff = tf.expand_dims(initial_layer_features, -1)\n graph_typecheck.assert_shape(\n cutoffs_batch_window, [1, 1, channels, cutoff_data.embedding_size]\n )\n graph_typecheck.assert_shape(scales_batch_window, [1, 1, channels, 1])\n graph_typecheck.assert_shape(\n init_layer_per_cutoff, [batch_size, twice_window_size, channels, 1]\n )\n result = nonlinearity(\n (init_layer_per_cutoff - cutoffs_batch_window) / scales_batch_window\n )\n return graph_typecheck.assert_shape(\n result, [batch_size, twice_window_size, channels, cutoff_data.embedding_size]\n )\n else:\n raise NotImplementedError(f\"{channels}-size data not implemented.\")\n\n\ndef cutoff_data_for_window_size(window_size):\n if window_size == 10:\n cutoff_data = CutoffData.from_file(\n path_util.tf_align_data / 'feature_extraction' /\n 'cutoffs__window_sz-10__2018-11-23.json'\n )\n else:\n raise ValueError(\"No supported cutoff data for window size\")\n return cutoff_data\n\n\ndef initial_layer_binned_defaults(\n band_features: dict,\n band: str,\n batch_size: int,\n window_size: int,\n value_if_masked: float = 0.0,\n soft_onehot: Nonlinearity = Nonlinearity.SIGMOID\n):\n cutoff_data = cutoff_data_for_window_size(window_size)\n wf = WindowFeatures(band_features, batch_size=batch_size, window_size=window_size)\n init_layer = initial_layer(wf, include_flux_and_time=True)\n binned = initial_layer_binned(\n init_layer, cutoff_data=cutoff_data, band=band, soft_onehot=soft_onehot\n )\n masked = wf.masked(\n binned,\n value_if_masked=value_if_masked,\n expected_extra_dims=[3, cutoff_data.embedding_size]\n )\n return masked\n\n\ndef per_band_model_fn(\n band_features,\n band_name,\n params,\n value_if_masked: float = 0.0,\n soft_onehot: Nonlinearity = Nonlinearity.SIGMOID\n):\n batch_size = params[\"batch_size\"]\n window_size = params[\"window_size\"]\n return initial_layer_binned_defaults(\n band_features,\n band=band_name,\n batch_size=batch_size,\n window_size=window_size,\n value_if_masked=value_if_masked,\n soft_onehot=soft_onehot\n )\n\n\ndef feature_model_fn(features, params):\n band_settings = band_settings_params.BandSettings.from_params(params)\n per_band_data = band_settings.per_band_sub_model_fn_with_band_name(\n per_band_model_fn,\n features,\n params=params,\n value_if_masked=params.get(\"value_if_masked\", 0.0),\n soft_onehot=Nonlinearity[params.get(\"input_soft_onehot\", \"sigmoid\").upper()]\n )\n return graph_typecheck.assert_shape(\n tf.stack(per_band_data, axis=4), [\n params[\"batch_size\"],\n 2 * params[\"window_size\"],\n 3,\n cutoff_data_for_window_size(params[\"window_size\"]).embedding_size,\n band_settings.nbands,\n ]\n )\n"
] | [
[
"numpy.array",
"scipy.optimize.minimize"
],
[
"tensorflow.clip_by_value",
"tensorflow.fill",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.expand_dims",
"tensorflow.exp",
"tensorflow.where",
"numpy.array",
"tensorflow.sequence_mask",
"tensorflow.tile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
nilkeshpatra/kmodes | [
"f4b5582e7bb872b15ec4e2c135fd40bd42642e83"
] | [
"kmodes/kprototypes.py"
] | [
"\"\"\"\nK-prototypes clustering for mixed categorical and numerical data\n\"\"\"\n\n# pylint: disable=super-on-old-class,unused-argument,attribute-defined-outside-init\n\nfrom collections import defaultdict\n\nimport numpy as np\nfrom scipy import sparse\nfrom sklearn.externals.joblib import Parallel, delayed\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils.validation import check_array\n\nfrom . import kmodes\nfrom .util import get_max_value_key, encode_features, get_unique_rows, decode_centroids\nfrom .util.dissim import matching_dissim, euclidean_dissim\n\n# Number of tries we give the initialization methods to find non-empty\n# clusters before we switch to random initialization.\nMAX_INIT_TRIES = 20\n# Number of tries we give the initialization before we raise an\n# initialization error.\nRAISE_INIT_TRIES = 100\n\n\ndef move_point_num(point, to_clust, from_clust, cl_attr_sum, cl_memb_sum):\n \"\"\"Move point between clusters, numerical attributes.\"\"\"\n # Update sum of attributes in cluster.\n for iattr, curattr in enumerate(point):\n cl_attr_sum[to_clust][iattr] += curattr\n cl_attr_sum[from_clust][iattr] -= curattr\n # Update sums of memberships in cluster\n cl_memb_sum[to_clust] += 1\n cl_memb_sum[from_clust] -= 1\n return cl_attr_sum, cl_memb_sum\n\n\ndef _split_num_cat(X, categorical):\n \"\"\"Extract numerical and categorical columns.\n Convert to numpy arrays, if needed.\n\n :param X: Feature matrix\n :param categorical: Indices of categorical columns\n \"\"\"\n Xnum = np.asanyarray(X[:, [ii for ii in range(X.shape[1])\n if ii not in categorical]]).astype(np.float64)\n Xcat = np.asanyarray(X[:, categorical])\n return Xnum, Xcat\n\n\ndef _labels_cost(Xnum, Xcat, centroids, num_dissim, cat_dissim, gamma, membship=None):\n \"\"\"Calculate labels and cost function given a matrix of points and\n a list of centroids for the k-prototypes algorithm.\n \"\"\"\n\n n_points = Xnum.shape[0]\n Xnum = check_array(Xnum)\n\n cost = 0.\n labels = np.empty(n_points, dtype=np.uint8)\n for ipoint in range(n_points):\n # Numerical cost = sum of Euclidean distances\n num_costs = num_dissim(centroids[0], Xnum[ipoint])\n cat_costs = cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship)\n # Gamma relates the categorical cost to the numerical cost.\n tot_costs = num_costs + gamma * cat_costs\n clust = np.argmin(tot_costs)\n labels[ipoint] = clust\n cost += tot_costs[clust]\n\n return labels, cost\n\n\ndef _k_prototypes_iter(Xnum, Xcat, centroids, cl_attr_sum, cl_memb_sum, cl_attr_freq,\n membship, num_dissim, cat_dissim, gamma, random_state):\n \"\"\"Single iteration of the k-prototypes algorithm\"\"\"\n moves = 0\n for ipoint in range(Xnum.shape[0]):\n clust = np.argmin(\n num_dissim(centroids[0], Xnum[ipoint]) +\n gamma * cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship)\n )\n if membship[clust, ipoint]:\n # Point is already in its right place.\n continue\n\n # Move point, and update old/new cluster frequencies and centroids.\n moves += 1\n old_clust = np.argwhere(membship[:, ipoint])[0][0]\n\n # Note that membship gets updated by kmodes.move_point_cat.\n # move_point_num only updates things specific to the k-means part.\n cl_attr_sum, cl_memb_sum = move_point_num(\n Xnum[ipoint], clust, old_clust, cl_attr_sum, cl_memb_sum\n )\n cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat(\n Xcat[ipoint], ipoint, clust, old_clust,\n cl_attr_freq, membship, centroids[1]\n )\n\n # Update old and new centroids for numerical attributes using\n # the means and sums of all values\n for iattr in range(len(Xnum[ipoint])):\n for curc in (clust, old_clust):\n if cl_memb_sum[curc]:\n centroids[0][curc, iattr] = cl_attr_sum[curc, iattr] / cl_memb_sum[curc]\n else:\n centroids[0][curc, iattr] = 0.\n\n # In case of an empty cluster, reinitialize with a random point\n # from largest cluster.\n if not cl_memb_sum[old_clust]:\n from_clust = membship.sum(axis=1).argmax()\n choices = [ii for ii, ch in enumerate(membship[from_clust, :]) if ch]\n rindx = random_state.choice(choices)\n\n cl_attr_sum, cl_memb_sum = move_point_num(\n Xnum[rindx], old_clust, from_clust, cl_attr_sum, cl_memb_sum\n )\n cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat(\n Xcat[rindx], rindx, old_clust, from_clust,\n cl_attr_freq, membship, centroids[1]\n )\n\n return centroids, moves\n\n\ndef k_prototypes_single(Xnum, Xcat, nnumattrs, ncatattrs, n_clusters, n_points,\n max_iter, num_dissim, cat_dissim, gamma, init, init_no,\n verbose, random_state):\n # For numerical part of initialization, we don't have a guarantee\n # that there is not an empty cluster, so we need to retry until\n # there is none.\n random_state = check_random_state(random_state)\n init_tries = 0\n while True:\n init_tries += 1\n # _____ INIT _____\n if verbose:\n print(\"Init: initializing centroids\")\n if isinstance(init, str) and init.lower() == 'huang':\n centroids = kmodes.init_huang(Xcat, n_clusters, cat_dissim, random_state)\n elif isinstance(init, str) and init.lower() == 'cao':\n centroids = kmodes.init_cao(Xcat, n_clusters, cat_dissim)\n elif isinstance(init, str) and init.lower() == 'random':\n seeds = random_state.choice(range(n_points), n_clusters)\n centroids = Xcat[seeds]\n elif isinstance(init, list):\n # Make sure inits are 2D arrays.\n init = [np.atleast_2d(cur_init).T if len(cur_init.shape) == 1\n else cur_init\n for cur_init in init]\n assert init[0].shape[0] == n_clusters, \\\n \"Wrong number of initial numerical centroids in init \" \\\n \"({}, should be {}).\".format(init[0].shape[0], n_clusters)\n assert init[0].shape[1] == nnumattrs, \\\n \"Wrong number of numerical attributes in init ({}, should be {}).\" \\\n .format(init[0].shape[1], nnumattrs)\n assert init[1].shape[0] == n_clusters, \\\n \"Wrong number of initial categorical centroids in init ({}, \" \\\n \"should be {}).\".format(init[1].shape[0], n_clusters)\n assert init[1].shape[1] == ncatattrs, \\\n \"Wrong number of categorical attributes in init ({}, should be {}).\" \\\n .format(init[1].shape[1], ncatattrs)\n centroids = [np.asarray(init[0], dtype=np.float64),\n np.asarray(init[1], dtype=np.uint8)]\n else:\n raise NotImplementedError(\"Initialization method not supported.\")\n\n if not isinstance(init, list):\n # Numerical is initialized by drawing from normal distribution,\n # categorical following the k-modes methods.\n meanx = np.mean(Xnum, axis=0)\n stdx = np.std(Xnum, axis=0)\n centroids = [\n meanx + random_state.randn(n_clusters, nnumattrs) * stdx,\n centroids\n ]\n\n if verbose:\n print(\"Init: initializing clusters\")\n membship = np.zeros((n_clusters, n_points), dtype=np.uint8)\n # Keep track of the sum of attribute values per cluster so that we\n # can do k-means on the numerical attributes.\n cl_attr_sum = np.zeros((n_clusters, nnumattrs), dtype=np.float64)\n # Same for the membership sum per cluster\n cl_memb_sum = np.zeros(n_clusters, dtype=int)\n # cl_attr_freq is a list of lists with dictionaries that contain\n # the frequencies of values per cluster and attribute.\n cl_attr_freq = [[defaultdict(int) for _ in range(ncatattrs)]\n for _ in range(n_clusters)]\n for ipoint in range(n_points):\n # Initial assignment to clusters\n clust = np.argmin(\n num_dissim(centroids[0], Xnum[ipoint]) + gamma *\n cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship)\n )\n membship[clust, ipoint] = 1\n cl_memb_sum[clust] += 1\n # Count attribute values per cluster.\n for iattr, curattr in enumerate(Xnum[ipoint]):\n cl_attr_sum[clust, iattr] += curattr\n for iattr, curattr in enumerate(Xcat[ipoint]):\n cl_attr_freq[clust][iattr][curattr] += 1\n\n # If no empty clusters, then consider initialization finalized.\n if membship.sum(axis=1).min() > 0:\n break\n\n if init_tries == MAX_INIT_TRIES:\n # Could not get rid of empty clusters. Randomly\n # initialize instead.\n init = 'random'\n elif init_tries == RAISE_INIT_TRIES:\n raise ValueError(\n \"Clustering algorithm could not initialize. \"\n \"Consider assigning the initial clusters manually.\"\n )\n\n # Perform an initial centroid update.\n for ik in range(n_clusters):\n for iattr in range(nnumattrs):\n centroids[0][ik, iattr] = cl_attr_sum[ik, iattr] / cl_memb_sum[ik]\n for iattr in range(ncatattrs):\n centroids[1][ik, iattr] = get_max_value_key(cl_attr_freq[ik][iattr])\n\n # _____ ITERATION _____\n if verbose:\n print(\"Starting iterations...\")\n itr = 0\n labels = None\n converged = False\n cost = np.Inf\n while itr <= max_iter and not converged:\n itr += 1\n centroids, moves = _k_prototypes_iter(Xnum, Xcat, centroids,\n cl_attr_sum, cl_memb_sum, cl_attr_freq,\n membship, num_dissim, cat_dissim, gamma,\n random_state)\n\n # All points seen in this iteration\n labels, ncost = _labels_cost(Xnum, Xcat, centroids,\n num_dissim, cat_dissim, gamma, membship)\n converged = (moves == 0) or (ncost >= cost)\n cost = ncost\n if verbose:\n print(\"Run: {}, iteration: {}/{}, moves: {}, ncost: {}\"\n .format(init_no + 1, itr, max_iter, moves, ncost))\n\n return centroids, labels, cost, itr\n\n\ndef k_prototypes(X, categorical, n_clusters, max_iter, num_dissim, cat_dissim,\n gamma, init, n_init, verbose, random_state, n_jobs):\n \"\"\"k-prototypes algorithm\"\"\"\n random_state = check_random_state(random_state)\n if sparse.issparse(X):\n raise TypeError(\"k-prototypes does not support sparse data.\")\n\n # Convert pandas objects to numpy arrays.\n if 'pandas' in str(X.__class__):\n X = X.values\n\n if categorical is None or not categorical:\n raise NotImplementedError(\n \"No categorical data selected, effectively doing k-means. \"\n \"Present a list of categorical columns, or use scikit-learn's \"\n \"KMeans instead.\"\n )\n if isinstance(categorical, int):\n categorical = [categorical]\n assert len(categorical) != X.shape[1], \\\n \"All columns are categorical, use k-modes instead of k-prototypes.\"\n assert max(categorical) < X.shape[1], \\\n \"Categorical index larger than number of columns.\"\n\n ncatattrs = len(categorical)\n nnumattrs = X.shape[1] - ncatattrs\n n_points = X.shape[0]\n assert n_clusters <= n_points, \"Cannot have more clusters ({}) \" \\\n \"than data points ({}).\".format(n_clusters, n_points)\n\n Xnum, Xcat = _split_num_cat(X, categorical)\n Xnum, Xcat = check_array(Xnum), check_array(Xcat, dtype=None)\n\n # Convert the categorical values in Xcat to integers for speed.\n # Based on the unique values in Xcat, we can make a mapping to achieve this.\n Xcat, enc_map = encode_features(Xcat)\n\n # Are there more n_clusters than unique rows? Then set the unique\n # rows as initial values and skip iteration.\n unique = get_unique_rows(X)\n n_unique = unique.shape[0]\n if n_unique <= n_clusters:\n max_iter = 0\n n_init = 1\n n_clusters = n_unique\n init = list(_split_num_cat(unique, categorical))\n init[1], _ = encode_features(init[1], enc_map)\n\n # Estimate a good value for gamma, which determines the weighing of\n # categorical values in clusters (see Huang [1997]).\n if gamma is None:\n gamma = 0.5 * Xnum.std()\n\n results = []\n seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)\n if n_jobs == 1:\n for init_no in range(n_init):\n results.append(k_prototypes_single(Xnum, Xcat, nnumattrs, ncatattrs,\n n_clusters, n_points, max_iter,\n num_dissim, cat_dissim, gamma,\n init, init_no, verbose, seeds[init_no]))\n else:\n results = Parallel(n_jobs=n_jobs, verbose=0)(\n delayed(k_prototypes_single)(Xnum, Xcat, nnumattrs, ncatattrs,\n n_clusters, n_points, max_iter,\n num_dissim, cat_dissim, gamma,\n init, init_no, verbose, seed)\n for init_no, seed in enumerate(seeds))\n all_centroids, all_labels, all_costs, all_n_iters = zip(*results)\n\n best = np.argmin(all_costs)\n if n_init > 1 and verbose:\n print(\"Best run was number {}\".format(best + 1))\n\n # Note: return gamma in case it was automatically determined.\n return all_centroids[best], enc_map, all_labels[best], \\\n all_costs[best], all_n_iters[best], gamma\n\n\nclass KPrototypes(kmodes.KModes):\n \"\"\"k-protoypes clustering algorithm for mixed numerical/categorical data.\n\n Parameters\n -----------\n n_clusters : int, optional, default: 8\n The number of clusters to form as well as the number of\n centroids to generate.\n\n max_iter : int, default: 300\n Maximum number of iterations of the k-modes algorithm for a\n single run.\n\n num_dissim : func, default: euclidian_dissim\n Dissimilarity function used by the algorithm for numerical variables.\n Defaults to the Euclidian dissimilarity function.\n\n cat_dissim : func, default: matching_dissim\n Dissimilarity function used by the kmodes algorithm for categorical variables.\n Defaults to the matching dissimilarity function.\n\n n_init : int, default: 10\n Number of time the k-modes algorithm will be run with different\n centroid seeds. The final results will be the best output of\n n_init consecutive runs in terms of cost.\n\n init : {'Huang', 'Cao', 'random' or a list of ndarrays}, default: 'Cao'\n Method for initialization:\n 'Huang': Method in Huang [1997, 1998]\n 'Cao': Method in Cao et al. [2009]\n 'random': choose 'n_clusters' observations (rows) at random from\n data for the initial centroids.\n If a list of ndarrays is passed, it should be of length 2, with\n shapes (n_clusters, n_features) for numerical and categorical\n data respectively. These are the initial centroids.\n\n gamma : float, default: None\n Weighing factor that determines relative importance of numerical vs.\n categorical attributes (see discussion in Huang [1997]). By default,\n automatically calculated from data.\n\n verbose : integer, optional\n Verbosity mode.\n\n random_state : int, RandomState instance or None, optional, default: None\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n n_jobs : int, default: 1\n The number of jobs to use for the computation. This works by computing\n each of the n_init runs in parallel.\n If -1 all CPUs are used. If 1 is given, no parallel computing code is\n used at all, which is useful for debugging. For n_jobs below -1,\n (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one\n are used.\n\n Attributes\n ----------\n cluster_centroids_ : array, [n_clusters, n_features]\n Categories of cluster centroids\n\n labels_ :\n Labels of each point\n\n cost_ : float\n Clustering cost, defined as the sum distance of all points to\n their respective cluster centroids.\n\n n_iter_ : int\n The number of iterations the algorithm ran for.\n\n gamma : float\n The (potentially calculated) weighing factor.\n\n Notes\n -----\n See:\n Huang, Z.: Extensions to the k-modes algorithm for clustering large\n data sets with categorical values, Data Mining and Knowledge\n Discovery 2(3), 1998.\n\n \"\"\"\n\n def __init__(self, n_clusters=8, max_iter=100, num_dissim=euclidean_dissim,\n cat_dissim=matching_dissim, init='Huang', n_init=10, gamma=None,\n verbose=0, random_state=None, n_jobs=1):\n\n super(KPrototypes, self).__init__(n_clusters, max_iter, cat_dissim,\n init, n_init, verbose, random_state,\n n_jobs)\n\n self.num_dissim = num_dissim\n self.gamma = gamma\n\n def fit(self, X, y=None, categorical=None):\n \"\"\"Compute k-prototypes clustering.\n\n Parameters\n ----------\n X : array-like, shape=[n_samples, n_features]\n categorical : Index of columns that contain categorical data\n \"\"\"\n\n random_state = check_random_state(self.random_state)\n # If self.gamma is None, gamma will be automatically determined from\n # the data. The function below returns its value.\n self._enc_cluster_centroids, self._enc_map, self.labels_, self.cost_,\\\n self.n_iter_, self.gamma = k_prototypes(X,\n categorical,\n self.n_clusters,\n self.max_iter,\n self.num_dissim,\n self.cat_dissim,\n self.gamma,\n self.init,\n self.n_init,\n self.verbose,\n random_state,\n self.n_jobs)\n return self\n\n def predict(self, X, categorical=None):\n \"\"\"Predict the closest cluster each sample in X belongs to.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n New data to predict.\n categorical : Index of columns that contain categorical data\n\n Returns\n -------\n labels : array, shape [n_samples,]\n Index of the cluster each sample belongs to.\n \"\"\"\n assert hasattr(self, '_enc_cluster_centroids'), \"Model not yet fitted.\"\n\n Xnum, Xcat = _split_num_cat(X, categorical)\n Xnum, Xcat = check_array(Xnum), check_array(Xcat, dtype=None)\n Xcat, _ = encode_features(Xcat, enc_map=self._enc_map)\n return _labels_cost(Xnum, Xcat, self._enc_cluster_centroids,\n self.num_dissim, self.cat_dissim, self.gamma)[0]\n\n @property\n def cluster_centroids_(self):\n if hasattr(self, '_enc_cluster_centroids'):\n return [\n self._enc_cluster_centroids[0],\n decode_centroids(self._enc_cluster_centroids[1], self._enc_map)\n ]\n else:\n raise AttributeError(\"'{}' object has no attribute 'cluster_centroids_' \"\n \"because the model is not yet fitted.\")\n"
] | [
[
"scipy.sparse.issparse",
"sklearn.utils.validation.check_array",
"sklearn.externals.joblib.Parallel",
"numpy.asarray",
"sklearn.externals.joblib.delayed",
"numpy.argwhere",
"numpy.atleast_2d",
"numpy.std",
"numpy.asanyarray",
"numpy.argmin",
"numpy.mean",
"numpy.iinfo",
"numpy.zeros",
"sklearn.utils.check_random_state",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
anuragphadnis/im2latex2 | [
"3e5bcb400d7bdff9cfd8ed03b821b3b6cb809b9b",
"3e5bcb400d7bdff9cfd8ed03b821b3b6cb809b9b",
"a54a6c7208b9258218538a4d56c3a3bd3bed2ca8"
] | [
"model/utils/text.py",
"model/evaluation/text.py",
"model/decoder.py"
] | [
"import numpy as np\nfrom collections import Counter\n\n\nclass Vocab(object):\n\n def __init__(self, config):\n self.config = config\n self.load_vocab()\n\n\n def load_vocab(self):\n special_tokens = [self.config.unk, self.config.pad, self.config.end]\n self.tok_to_id = load_tok_to_id(self.config.path_vocab, special_tokens)\n self.id_to_tok = {idx: tok for tok, idx in self.tok_to_id.items()}\n self.n_tok = len(self.tok_to_id)\n\n self.id_pad = self.tok_to_id[self.config.pad]\n self.id_end = self.tok_to_id[self.config.end]\n self.id_unk = self.tok_to_id[self.config.unk]\n\n\n @property\n def form_prepro(self):\n return get_form_prepro(self.tok_to_id, self.id_unk)\n\n\ndef get_form_prepro(vocab, id_unk):\n \"\"\"Given a vocab, returns a lambda function word -> id\n\n Args:\n vocab: dict[token] = id\n\n Returns:\n lambda function(formula) -> list of ids\n\n \"\"\"\n def get_token_id(token):\n return vocab[token] if token in vocab else id_unk\n\n def f(formula):\n formula = formula.strip().split(' ')\n return [get_token_id(t) for t in formula]\n\n return f\n\n\ndef load_tok_to_id(filename, tokens=[]):\n \"\"\"\n Args:\n filename: (string) path to vocab txt file one word per line\n tokens: list of token to add to vocab after reading filename\n\n Returns:\n dict: d[token] = id\n\n \"\"\"\n tok_to_id = dict()\n with open(filename) as f:\n for idx, token in enumerate(f):\n token = token.strip()\n tok_to_id[token] = idx\n\n # add extra tokens\n for tok in tokens:\n tok_to_id[tok] = len(tok_to_id)\n\n return tok_to_id\n\n\ndef build_vocab(datasets, min_count=10):\n \"\"\"Build vocabulary from an iterable of datasets objects\n\n Args:\n datasets: a list of dataset objects\n min_count: (int) if token appears less times, do not include it.\n\n Returns:\n a set of all the words in the dataset\n\n \"\"\"\n print(\"Building vocab...\")\n c = Counter()\n for dataset in datasets:\n for _, formula in dataset:\n try:\n c.update(formula)\n except Exception:\n print(formula)\n raise Exception\n vocab = [tok for tok, count in list(c.items()) if count >= min_count]\n print((\"- done. {}/{} tokens added to vocab.\".format(len(vocab), len(c))))\n return sorted(vocab)\n\n\ndef write_vocab(vocab, filename):\n \"\"\"Writes a vocab to a file\n\n Writes one word per line.\n\n Args:\n vocab: iterable that yields word\n filename: path to vocab file\n\n Returns:\n write a word per line\n\n \"\"\"\n print(\"Writing vocab...\")\n with open(filename, \"w\") as f:\n for i, word in enumerate(vocab):\n if i != len(vocab) - 1:\n f.write(\"{}\\n\".format(word))\n else:\n f.write(word)\n print((\"- done. {} tokens\".format(i+1)))\n\n\ndef pad_batch_formulas(formulas, id_pad, id_end, max_len=None):\n \"\"\"Pad formulas to the max length with id_pad and adds and id_end token\n at the end of each formula\n\n Args:\n formulas: (list) of list of ints\n max_length: length maximal of formulas\n\n Returns:\n array: of shape = (batch_size, max_len) of type np.int32\n array: of shape = (batch_size) of type np.int32\n\n \"\"\"\n if max_len is None:\n max_len = max([len(x) for x in formulas])\n\n batch_formulas = id_pad * np.ones([len(formulas), max_len+1],\n dtype=np.int32)\n formula_length = np.zeros(len(formulas), dtype=np.int32)\n for idx, formula in enumerate(formulas):\n batch_formulas[idx, :len(formula)] = np.asarray(formula,\n dtype=np.int32)\n batch_formulas[idx, len(formula)] = id_end\n formula_length[idx] = len(formula) + 1\n\n return batch_formulas, formula_length\n\n\ndef load_formulas(filename):\n formulas = dict()\n with open(filename) as f:\n for idx, line in enumerate(f):\n formulas[idx] = line.strip()\n\n print((\"Loaded {} formulas from {}\".format(len(formulas), filename)))\n return formulas\n",
"import os\r\nimport sys\r\nimport numpy as np\r\nimport nltk\r\nimport distance\r\n\r\n\r\nfrom ..utils.text import load_formulas\r\nfrom ..utils.general import init_dir\r\n\r\n\r\ndef score_files(path_ref, path_hyp):\r\n \"\"\"Loads result from file and score it\r\n\r\n Args:\r\n path_ref: (string) formulas of reference\r\n path_hyp: (string) formulas of prediction.\r\n\r\n Returns:\r\n scores: (dict)\r\n\r\n \"\"\"\r\n # load formulas\r\n formulas_ref = load_formulas(path_ref)\r\n formulas_hyp = load_formulas(path_hyp)\r\n\r\n assert len(formulas_ref) == len(formulas_hyp)\r\n\r\n # tokenize\r\n refs = [ref.split(' ') for _, ref in list(formulas_ref.items())]\r\n hyps = [hyp.split(' ') for _, hyp in list(formulas_hyp.items())]\r\n\r\n # score\r\n return {\r\n \"BLEU-4\": bleu_score(refs, hyps)*100,\r\n \"EM\": exact_match_score(refs, hyps)*100,\r\n \"Edit\": edit_distance(refs, hyps)*100\r\n }\r\n\r\n\r\ndef exact_match_score(references, hypotheses):\r\n \"\"\"Computes exact match scores.\r\n\r\n Args:\r\n references: list of list of tokens (one ref)\r\n hypotheses: list of list of tokens (one hypothesis)\r\n\r\n Returns:\r\n exact_match: (float) 1 is perfect\r\n\r\n \"\"\"\r\n exact_match = 0\r\n for ref, hypo in zip(references, hypotheses):\r\n if np.array_equal(ref, hypo):\r\n exact_match += 1\r\n\r\n return exact_match / float(max(len(hypotheses), 1))\r\n\r\n\r\ndef bleu_score(references, hypotheses):\r\n \"\"\"Computes bleu score.\r\n\r\n Args:\r\n references: list of list (one hypothesis)\r\n hypotheses: list of list (one hypothesis)\r\n\r\n Returns:\r\n BLEU-4 score: (float)\r\n\r\n \"\"\"\r\n references = [[ref] for ref in references] # for corpus_bleu func\r\n BLEU_4 = nltk.translate.bleu_score.corpus_bleu(references, hypotheses,\r\n weights=(0.25, 0.25, 0.25, 0.25))\r\n return BLEU_4\r\n\r\n\r\ndef edit_distance(references, hypotheses):\r\n \"\"\"Computes Levenshtein distance between two sequences.\r\n\r\n Args:\r\n references: list of list of token (one hypothesis)\r\n hypotheses: list of list of token (one hypothesis)\r\n\r\n Returns:\r\n 1 - levenshtein distance: (higher is better, 1 is perfect)\r\n\r\n \"\"\"\r\n d_leven, len_tot = 0, 0\r\n for ref, hypo in zip(references, hypotheses):\r\n d_leven += distance.levenshtein(ref, hypo)\r\n len_tot += float(max(len(ref), len(hypo)))\r\n\r\n return 1. - d_leven / len_tot\r\n\r\n\r\ndef truncate_end(list_of_ids, id_end):\r\n \"\"\"Removes the end of the list starting from the first id_end token\"\"\"\r\n list_trunc = []\r\n for idx in list_of_ids:\r\n if idx == id_end:\r\n break\r\n else:\r\n list_trunc.append(idx)\r\n\r\n return list_trunc\r\n\r\n\r\ndef write_answers(references, hypotheses, rev_vocab, dir_name, id_end):\r\n \"\"\"Writes text answers in files.\r\n\r\n One file for the reference, one file for each hypotheses\r\n\r\n Args:\r\n references: list of list (one reference)\r\n hypotheses: list of list of list (multiple hypotheses)\r\n hypotheses[0] is a list of all the first hypothesis for all the\r\n dataset\r\n rev_vocab: (dict) rev_vocab[idx] = word\r\n dir_name: (string) path where to write results\r\n id_end: (int) special id of token that corresponds to the END of\r\n sentence\r\n\r\n Returns:\r\n file_names: list of the created files\r\n\r\n \"\"\"\r\n def ids_to_str(ids):\r\n ids = truncate_end(ids, id_end)\r\n s = [rev_vocab[idx] for idx in ids]\r\n return \" \".join(s)\r\n\r\n def write_file(file_name, list_of_list):\r\n with open(file_name, \"w\") as f:\r\n for l in list_of_list:\r\n f.write(ids_to_str(l) + \"\\n\")\r\n\r\n init_dir(dir_name)\r\n file_names = [dir_name + \"ref.txt\"]\r\n write_file(dir_name + \"ref.txt\", references) # one file for the ref\r\n for i in range(len(hypotheses)): # one file per hypo\r\n assert len(references) == len(hypotheses[i])\r\n write_file(dir_name + \"hyp_{}.txt\".format(i), hypotheses[i])\r\n file_names.append(dir_name + \"hyp_{}.txt\".format(i))\r\n\r\n return file_names\r\n\r\n",
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.util import nest\nimport tensorflow.contrib.layers as layers\nfrom tensorflow.contrib.rnn import GRUCell, LSTMCell\n\n\nfrom .components.dynamic_decode import dynamic_decode\nfrom .components.attention_mechanism import AttentionMechanism\nfrom .components.attention_cell import AttentionCell\nfrom .components.greedy_decoder_cell import GreedyDecoderCell\nfrom .components.beam_search_decoder_cell import BeamSearchDecoderCell\n\n\nclass Decoder(object):\n \"\"\"Implements this paper https://arxiv.org/pdf/1609.04938.pdf\"\"\"\n\n def __init__(self, config, n_tok, id_end):\n self._config = config\n self._n_tok = n_tok\n self._id_end = id_end\n self._tiles = 1 if config.decoding == \"greedy\" else config.beam_size\n\n\n def __call__(self, training, img, formula, dropout):\n \"\"\"Decodes an image into a sequence of token\n\n Args:\n training: (tf.placeholder) bool\n img: encoded image (tf.Tensor) shape = (N, H, W, C)\n formula: (tf.placeholder), shape = (N, T)\n\n Returns:\n pred_train: (tf.Tensor), shape = (?, ?, vocab_size) logits of each class\n pret_test: (structure)\n - pred.test.logits, same as pred_train\n - pred.test.ids, shape = (?, config.max_length_formula)\n\n \"\"\"\n dim_embeddings = self._config.attn_cell_config.get(\"dim_embeddings\")\n E = tf.get_variable(\"E\", initializer=embedding_initializer(),\n shape=[self._n_tok, dim_embeddings], dtype=tf.float32)\n\n start_token = tf.get_variable(\"start_token\", dtype=tf.float32,\n shape=[dim_embeddings], initializer=embedding_initializer())\n\n batch_size = tf.shape(img)[0]\n\n # training\n with tf.variable_scope(\"attn_cell\", reuse=False):\n embeddings = get_embeddings(formula, E, dim_embeddings,\n start_token, batch_size)\n attn_meca = AttentionMechanism(img,\n self._config.attn_cell_config[\"dim_e\"])\n recu_cell = LSTMCell(self._config.attn_cell_config[\"num_units\"])\n attn_cell = AttentionCell(recu_cell, attn_meca, dropout,\n self._config.attn_cell_config, self._n_tok)\n\n train_outputs, _ = tf.nn.dynamic_rnn(attn_cell, embeddings,\n initial_state=attn_cell.initial_state())\n\n # decoding\n with tf.variable_scope(\"attn_cell\", reuse=True):\n attn_meca = AttentionMechanism(img=img,\n dim_e=self._config.attn_cell_config[\"dim_e\"],\n tiles=self._tiles)\n recu_cell = LSTMCell(self._config.attn_cell_config[\"num_units\"],\n reuse=True)\n attn_cell = AttentionCell(recu_cell, attn_meca, dropout,\n self._config.attn_cell_config, self._n_tok)\n if self._config.decoding == \"greedy\":\n decoder_cell = GreedyDecoderCell(E, attn_cell, batch_size,\n start_token, id_end)\n elif self._config.decoding == \"beam_search\":\n decoder_cell = BeamSearchDecoderCell(E, attn_cell, batch_size,\n start_token, self._id_end, self._config.beam_size,\n self._config.div_gamma, self._config.div_prob)\n\n test_outputs, _ = dynamic_decode(decoder_cell,\n self._config.max_length_formula+1)\n\n return train_outputs, test_outputs\n\n\ndef get_embeddings(formula, E, dim, start_token, batch_size):\n \"\"\"Returns the embedding of the n-1 first elements in the formula concat\n with the start token\n\n Args:\n formula: (tf.placeholder) tf.uint32\n E: tf.Variable (matrix)\n dim: (int) dimension of embeddings\n start_token: tf.Variable\n batch_size: tf variable extracted from placeholder\n\n Returns:\n embeddings_train: tensor\n\n \"\"\"\n formula_ = tf.nn.embedding_lookup(E, formula)\n start_token_ = tf.reshape(start_token, [1, 1, dim])\n start_tokens = tf.tile(start_token_, multiples=[batch_size, 1, 1])\n embeddings = tf.concat([start_tokens, formula_[:, :-1, :]], axis=1)\n\n return embeddings\n\n\ndef embedding_initializer():\n \"\"\"Returns initializer for embeddings\"\"\"\n def _initializer(shape, dtype, partition_info=None):\n E = tf.random_uniform(shape, minval=-1.0, maxval=1.0, dtype=dtype)\n E = tf.nn.l2_normalize(E, -1)\n return E\n\n return _initializer\n"
] | [
[
"numpy.asarray"
],
[
"numpy.array_equal"
],
[
"tensorflow.nn.l2_normalize",
"tensorflow.concat",
"tensorflow.nn.embedding_lookup",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.contrib.rnn.LSTMCell",
"tensorflow.variable_scope",
"tensorflow.random_uniform",
"tensorflow.tile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Devanthro/ball_in_socket_estimator | [
"5793db2dfd22b693c082694c2130a16c92164d70"
] | [
"python_old/magnetic_field_simulation.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom magpylib.source.magnet import Box,Cylinder\nfrom magpylib import Collection, displaySystem, Sensor\nfrom scipy.optimize import fsolve, least_squares\nimport matplotlib.animation as manimation\nimport random\nimport MDAnalysis\nimport MDAnalysis.visualization.streamlines_3D\nimport mayavi, mayavi.mlab\n\n\niterations = 360\n\nFFMpegWriter = manimation.writers['ffmpeg']\nmetadata = dict(title='Movie Test', artist='Matplotlib',\n comment='Movie support!')\nwriter = FFMpegWriter(fps=1, metadata=metadata)\n\n# define sensor\nsensor_pos = [(-22.7,7.7,0),(-14.7,-19.4,0),(14.7,-19.4,0),(22.7,7.7,0)]\n# sensor_rot = [[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]]]\nsensors = []\ni = 0\nfor pos in sensor_pos:\n # sensors.append(Sensor(pos=pos,angle=sensor_rot[i][0], axis=sensor_rot[i][1]))\n sensors.append(Sensor(pos=pos))\n\ndef gen_magnets():\n return [Box(mag=(0,0,500),dim=(10,10,10),pos=(0,0,12)), Box(mag=(0,500,0),dim=(10,10,10),pos=(0,12,0)), Box(mag=(0,500,0),dim=(10,10,10),pos=(10.392304845,-6,0),angle=60, axis=(0,0,1)), Box(mag=(0,500,0),dim=(10,10,10),pos=(-10.392304845,-6,0),angle=-60, axis=(0,0,1))]\n # return [Box(mag=(0,500,0),dim=(10,10,10),pos=(0,12,0)), Box(mag=(0,-500,0),dim=(10,10,10),pos=(10.392304845,-6,0),angle=60, axis=(0,0,1)), Box(mag=(0,0,500),dim=(10,10,10),pos=(-10.392304845,-6,0),angle=-60, axis=(0,0,1))]\n\nc = Collection(gen_magnets())\n\n# calculate B-field on a grid\nxs = np.linspace(-30,30,33)\nzs = np.linspace(-30,30,44)\nPOS = np.array([(x,0,z) for z in zs for x in xs])\n\n# create figure\nfig = plt.figure(figsize=(9,5))\nax1 = fig.add_subplot(121, projection='3d') # 3D-axis\nax2 = fig.add_subplot(122) # 2D-axis\n\nBs = c.getB(POS).reshape(44,33,3) #<--VECTORIZED\nX,Z = np.meshgrid(xs,zs)\nU,V = Bs[:,:,0], Bs[:,:,2]\nax2.streamplot(X, Z, U, V, color=np.log(U**2+V**2))\ndisplaySystem(c, subplotAx=ax1, suppress=True, sensors=sensors,direc=True)\nplt.show()\n\nfirst = True\n\nwith writer.saving(fig, \"writer_test.mp4\", 100):\n for iter in range(0,iterations,5):\n rot = [iter,0,0]#random.uniform(-90,90),random.uniform(-90,90)\n\n c = Collection(gen_magnets())\n c.rotate(rot[0],(1,0,0), anchor=(0,0,0))\n c.rotate(rot[1],(0,1,0), anchor=(0,0,0))\n c.rotate(rot[2],(0,0,1), anchor=(0,0,0))\n b_target = []\n for sens in sensors:\n b_target.append(sens.getB(c))\n # print(b_target)\n\n fig.clear()\n ax1 = fig.add_subplot(121, projection='3d') # 3D-axis\n ax2 = fig.add_subplot(122) # 2D-axis\n\n Bs = c.getB(POS).reshape(44,33,3) #<--VECTORIZED\n X,Z = np.meshgrid(xs,zs)\n U,V = Bs[:,:,0], Bs[:,:,2]\n ax2.streamplot(X, Z, U, V, color=np.log(U**2+V**2))\n\n def func(x):\n c = Collection(gen_magnets())\n c.rotate(x[0],(1,0,0), anchor=(0,0,0))\n c.rotate(x[1],(0,1,0), anchor=(0,0,0))\n c.rotate(x[2],(0,0,1), anchor=(0,0,0))\n b_error = 0\n i = 0\n for sens in sensors:\n b_error = b_error + np.linalg.norm(sens.getB(c)-b_target[i])\n i=i+1\n # print(b_error)\n return [b_error,b_error,b_error]\n\n res = least_squares(func, [0,0,0], bounds = ((-360,-360,-360), (360, 360, 360)))\n angle_error = ((rot[0]-res.x[0])**2+(rot[1]-res.x[1])**2+(rot[2]-res.x[2])**2)**0.5\n print(\"iteration (%d/%d) target %.3f %.3f %.3f result %.3f %.3f %.3f b-field error %.3f, angle_error %.3f\"%(iter,iterations,rot[0],rot[1],rot[2],res.x[0],res.x[1],res.x[2],res.cost,angle_error))\n c = Collection(gen_magnets())\n c.rotate(rot[0],(1,0,0), anchor=(0,0,0))\n c.rotate(rot[1],(0,1,0), anchor=(0,0,0))\n c.rotate(rot[2],(0,0,1), anchor=(0,0,0))\n result = Collection(gen_magnets())\n result.rotate(res.x[0],(1,0,0), anchor=(0,0,0))\n result.rotate(res.x[1],(0,1,0), anchor=(0,0,0))\n result.rotate(res.x[2],(0,0,1), anchor=(0,0,0))\n d = Collection(c,result)\n displaySystem(d, subplotAx=ax1, suppress=True, sensors=sensors)\n if first:\n plt.show()\n first = False\n writer.grab_frame()\n"
] | [
[
"numpy.log",
"numpy.linspace",
"scipy.optimize.least_squares",
"numpy.array",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
niksell/phenotypes-prediction-using-genotypes-Master-Thesis | [
"c20b6ef89d0979d15266ad572c5aed56e28c4229"
] | [
"code/IO/Output.py"
] | [
"import os.path\nimport time\nimport numpy as np\nfrom DataStructure.PatientPhenotype import PatientPhenotype\nfrom DataStructure.Snp import Snp\n\nclass Output:\n \n def __init__(self,path,numberOfChromosomes):\n \n self.__path = path\n self.__numberOfChromosomes = numberOfChromosomes\n \n def writePatientsList(self,patients,kind):\n \n path = self.__path + kind\n \n try:\n write = open(path,'w')\n for patient in patients.keys():\n write.write(patient.strip() + '\\n')\n \n write.close()\n except Exception as x:\n print(\"error = \",x)\n write.close()\n \n \n def writeSnpsList(self,chromosomes):\n \n for i in range(self.__numberOfChromosomes):\n \n chro = 'chr'+str(i+1)\n try:\n path = self.__path + chro + 'snpList.txt'\n write = open(path,'w')\n\n for snp in chromosomes[chro].keys():\n write.write(snp.strip() + '\\n')\n\n write.close()\n except Exception as x:\n print(\"error = \",x)\n write.close()\n \n def writeSnpsUsed(self,snpsIds,idToName,chromosomes,name = None):\n \n if not name:\n print(\"give a name to file\")\n return\n \n path = self.__path + name + \" ( \" + time.strftime(\"%d-%m-%Y\") + \" ).txt \" \n \n i=1\n while os.path.exists(path):\n \n path = self.__path + name + \" ( \" + time.strftime(\"%d-%m-%Y\") + \" ) \" + '_' + str(i)+\".txt\"\n i += 1\n \n snps = []\n for i in snpsIds:\n snps.append(idToName[i])\n \n print(\"snpsIds = \",len(snpsIds))\n print(\"idToName = \",len(idToName))\n \n write = open(path,'w')\n try:\n for i in range(1,23):\n \n chro = 'chr'+str(i)\n chromList = chromosomes[chro]\n\n if len(list(set(chromList) - set(snps))) < len(chromList):\n write.write(\"chromosome\"+str(i)+'\\n')\n for j in snps:\n if j in chromosomes[chro]:\n write.write(j + '\\t' + chromosomes[chro][j][0] + '\\t' + chromosomes[chro][j][1] + '\\n')\n write.write('\\n')\n\n write.close()\n except Exception as x:\n print(\"error = \",x)\n write.close()\n \n def saveData(self,ids,patients,data,chroms = {}):\n \n self.__snpCodeLog(ids['patients']['idToName'],ids['snps']['idToName'],patients,data)\n \n def writeDf(self,n,m,chromosomes,ids,patients):\n \n X = np.zeros((n,m),dtype = int)\n \n for i in range(self.__numberOfChromosomes):\n \n chro = 'chr'+str(i+1)\n path = self.__path + chro +'.lgen'\n \n \n \n if os.path.exists(path):\n \n try:\n f = open(path,'r')\n \n for line in f:\n try:\n \n patient = line.split()[0].strip()\n snp = line.split()[2].strip()\n allele1 = line.split()[3].strip()\n allele2 = line.split()[4].strip()\n \n snpp = Snp(snp,allele1,allele2)\n snpp.setSnpCode(chromosomes[chro][snp][0],chromosomes[chro][snp][1])\n code = snpp.getSnpCode()\n \n p = ids['patients']['nameToId'][patient]\n s = ids['snps']['nameToId'][snp]\n \n X[p,s] = code\n \n except Exception as x:\n \n print(\"error1 = \",x)\n f.close()\n \n f.close()\n \n except Exception as x:\n print(\"error2 = \",x)\n f.close()\n \n print(\"x shape is \", X.shape)\n write = open(self.__path + 'snpCodeTest1.csv','w')\n \n write.write('patients,')\n \n for i in range(len(X.T)):\n \n s = ids['snps']['idToName'][i]\n write.write(s + ',')\n \n write.write('label' + '\\n')\n \n for i in range(len(X)):\n \n p = ids['patients']['idToName'][i]\n write.write(p + ',')\n \n for j in range(len(X.T)):\n \n s = ids['snps']['idToName'][j]\n write.write(str(X[i,j]) + ',')\n \n write.write(str(patients[p].getCase()) + '\\n')\n \n \n write.close()\n \n \n \n def __patientsLogFile(self,ids,patientKind):\n \n write = open(self.__path + patientKind + 'Ids.txt','w')\n \n write.write(str(len(ids['nameToId'])) + '\\n')\n \n for patient in ids['nameToId'].keys():\n \n write.write(patient.strip() + '\\t' + str(ids['nameToId'][patient]).strip() + '\\n')\n \n write.close()\n \n def __snpsLogFile(self,ids,chroms):\n \n if len(chroms.keys()) > 0:\n \n write = open(self.__path + 'SnpsIds.txt','w')\n \n write.write(str(len(ids['nameToId'])) + '\\n')\n \n for chro in chroms.keys():\n \n for snp in chroms[chro].keys():\n write.write(snp.strip() + '\\t' + str(ids['nameToId'][snp.strip()]).strip() + '\\n')\n \n write.close()\n \n def __snpCodeLog(self,patientsIds,snpsIds,patients,data):\n \n write = open(self.__path + 'snpCode.txt','w')\n \n write.write(str(len(patientsIds)) + '\\n')\n write.write(str(len(snpsIds)) + '\\n')\n \n for i in range(len(data)):\n for j in range(len(data.T)):\n allele1 = patients[patientsIds[i]].getAllele1(snpsIds[j])\n allele2 = patients[patientsIds[i]].getAllele2(snpsIds[j])\n write.write(patientsIds[i].strip() + '\\t' + snpsIds[j].strip() + '\\t' + str(data[i,j]).strip() + '\\t' \n + allele1.strip() + '\\t' + allele2.strip() + '\\n')\n \n write.close()"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HugoSenetaire/vaeac | [
"451d34dd4986c52f2f37c508f03ee3db9e7408d3"
] | [
"fashion_mnist_dropout01/model.py"
] | [
"from torch import nn\nfrom torch.optim import Adam\n\nfrom mask_generators import ImageMaskGenerator, DropoutMaskGenerator\nfrom nn_utils import ResBlock, MemoryLayer, SkipConnection\nfrom prob_utils import normal_parse_params, GaussianLoss\n\n\n# sampler from the model generative distribution\n# here we return mean of the Gaussian to avoid white noise\ndef sampler(params):\n return normal_parse_params(params).mean\n\n\ndef optimizer(parameters):\n return Adam(parameters, lr=2e-4)\n\n\nbatch_size = 16\n\nreconstruction_log_prob = GaussianLoss()\n\nmask_generator = DropoutMaskGenerator(rate=0.9)\n\n# improve train computational stability by dividing the loss\n# by this scale factor right before backpropagation\nvlb_scale_factor = 28 ** 2\nclass StupidLayer(nn.Module):\n\n def __init__(self):\n super(StupidLayer, self).__init__()\n\n def forward(self,x):\n return x[:,:,2:-2,2:-2]\n\ndef MLPBlock(dim):\n return SkipConnection(\n nn.BatchNorm2d(dim),\n nn.LeakyReLU(),\n nn.Conv2d(dim, dim, 1)\n )\n\nproposal_network = nn.Sequential(\n nn.Conv2d(2, 8, 1,padding=2), #28,28,8\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n nn.AvgPool2d(2, 2), # 16, 16,8\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n nn.AvgPool2d(2, 2), nn.Conv2d(8, 16, 1), # 8, 8, 16\n ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), # 8,8, 16?\n nn.AvgPool2d(2, 2), nn.Conv2d(16, 32, 1), # 4, 4, 32\n ResBlock(32, 16), ResBlock(32, 16),\n ResBlock(32, 16), ResBlock(32, 16),\n nn.AvgPool2d(2, 2), nn.Conv2d(32, 64, 1), # 2,2 64\n ResBlock(64, 32), ResBlock(64, 32),\n ResBlock(64, 32), ResBlock(64, 32),\n nn.AvgPool2d(2, 2), nn.Conv2d(64, 128, 1),\n MLPBlock(128), MLPBlock(128), MLPBlock(128), MLPBlock(128),\n)\n\nprior_network = nn.Sequential(\n MemoryLayer('#0'),\n nn.Conv2d(2, 8, 1, padding=2), # 28,28,8\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n MemoryLayer('#1'),\n nn.AvgPool2d(2, 2),# 16,16,8\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n MemoryLayer('#2'),\n nn.AvgPool2d(2, 2), nn.Conv2d(8, 16, 1),# 8,8,16\n ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8),\n MemoryLayer('#3'),\n nn.AvgPool2d(2, 2), nn.Conv2d(16, 32, 1), # 4,4 ,32\n ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16),\n MemoryLayer('#4'),\n nn.AvgPool2d(2, 2), nn.Conv2d(32, 64, 1), #2,2 64\n ResBlock(64, 32), ResBlock(64, 32),\n ResBlock(64, 32), ResBlock(64, 32),\n MemoryLayer('#5'),\n nn.AvgPool2d(2, 2), nn.Conv2d(64, 128, 1), #1,1,128\n MLPBlock(128), MLPBlock(128), MLPBlock(128), MLPBlock(128),\n)\n\ngenerative_network = nn.Sequential(\n nn.Conv2d(64, 64, 1),\n MLPBlock(64), MLPBlock(64), MLPBlock(64), MLPBlock(64),\n nn.Conv2d(64, 32, 1), nn.Upsample(scale_factor=2),\n # MemoryLayer('#7', True), nn.Conv2d(384, 128, 1),\n # ResBlock(128, 64), ResBlock(128, 64),\n # ResBlock(128, 64), ResBlock(128, 64),\n # nn.Conv2d(128, 64, 1), nn.Upsample(scale_factor=2),\n # MemoryLayer('#6', True), nn.Conv2d(192, 64, 1),\n # ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32),\n # nn.Conv2d(64, 32, 1), nn.Upsample(scale_factor=2),\n MemoryLayer('#5', True), nn.Conv2d(96, 32, 1),\n ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16),\n nn.Conv2d(32, 16, 1), nn.Upsample(scale_factor=2),\n MemoryLayer('#4', True), nn.Conv2d(48, 16, 1),\n ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8),\n nn.Conv2d(16, 8, 1), nn.Upsample(scale_factor=2),\n MemoryLayer('#3', True), nn.Conv2d(24, 8, 1),\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n nn.Upsample(scale_factor=2),\n MemoryLayer('#2', True), nn.Conv2d(16, 8, 1),\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n nn.Upsample(scale_factor=2), #32,32,8\n\n # nn.Conv2dTranspose(8,8,stride=2,padding=1) \n MemoryLayer('#1', True), nn.Conv2d(16, 8, 1),\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n StupidLayer(),\n MemoryLayer('#0', True), nn.Conv2d(10, 8, 1),\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n nn.Conv2d(8, 2, 1),\n\n)\n"
] | [
[
"torch.optim.Adam",
"torch.nn.Conv2d",
"torch.nn.AvgPool2d",
"torch.nn.Upsample",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ncduy0303/fairseq | [
"a086afb15b7d1737cd98831e975fd21b14ef6b07"
] | [
"fairseq/modules/conformer_layer.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport torch\nfrom typing import Optional\nfrom fairseq.modules import (\n LayerNorm,\n MultiheadAttention,\n ESPNETMultiHeadedAttention,\n RelPositionMultiHeadedAttention,\n RotaryPositionMultiHeadedAttention,\n)\nfrom fairseq.utils import get_activation_fn\n\n\nclass ConvolutionModule(torch.nn.Module):\n \"\"\"Convolution block used in the conformer block\"\"\"\n\n def __init__(\n self,\n embed_dim,\n channels,\n depthwise_kernel_size,\n dropout,\n activation_fn=\"swish\",\n bias=False,\n export=False,\n ):\n \"\"\"\n Args:\n embed_dim: Embedding dimension\n channels: Number of channels in depthwise conv layers\n depthwise_kernel_size: Depthwise conv layer kernel size\n dropout: dropout value\n activation_fn: Activation function to use after depthwise convolution kernel\n bias: If bias should be added to conv layers\n export: If layernorm should be exported to jit\n \"\"\"\n super(ConvolutionModule, self).__init__()\n assert (\n depthwise_kernel_size - 1\n ) % 2 == 0, \"kernel_size should be a odd number for 'SAME' padding\"\n self.layer_norm = LayerNorm(embed_dim, export=export)\n self.pointwise_conv1 = torch.nn.Conv1d(\n embed_dim,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.glu = torch.nn.GLU(dim=1)\n self.depthwise_conv = torch.nn.Conv1d(\n channels,\n channels,\n depthwise_kernel_size,\n stride=1,\n padding=(depthwise_kernel_size - 1) // 2,\n groups=channels,\n bias=bias,\n )\n self.batch_norm = torch.nn.BatchNorm1d(channels)\n self.activation = get_activation_fn(activation_fn)(channels)\n self.pointwise_conv2 = torch.nn.Conv1d(\n channels,\n embed_dim,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.dropout = torch.nn.Dropout(dropout)\n\n def forward(self, x):\n \"\"\"\n Args:\n x: Input of shape B X T X C\n Returns:\n Tensor of shape B X T X C\n \"\"\"\n x = self.layer_norm(x)\n # exchange the temporal dimension and the feature dimension\n x = x.transpose(1, 2)\n\n # GLU mechanism\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\n x = self.glu(x) # (batch, channel, dim)\n\n # 1D Depthwise Conv\n x = self.depthwise_conv(x)\n x = self.batch_norm(x)\n x = self.activation(x)\n\n x = self.pointwise_conv2(x)\n x = self.dropout(x)\n return x.transpose(1, 2)\n\n\nclass FeedForwardModule(torch.nn.Module):\n \"\"\"Positionwise feed forward layer used in conformer\"\"\"\n\n def __init__(\n self,\n input_feat,\n hidden_units,\n dropout1,\n dropout2,\n activation_fn=\"swish\",\n bias=True,\n ):\n \"\"\"\n Args:\n input_feat: Input feature dimension\n hidden_units: Hidden unit dimension\n dropout1: dropout value for layer1\n dropout2: dropout value for layer2\n activation_fn: Name of activation function\n bias: If linear layers should have bias\n \"\"\"\n\n super(FeedForwardModule, self).__init__()\n self.layer_norm = LayerNorm(input_feat)\n self.w_1 = torch.nn.Linear(input_feat, hidden_units, bias=bias)\n self.w_2 = torch.nn.Linear(hidden_units, input_feat, bias=bias)\n self.dropout1 = torch.nn.Dropout(dropout1)\n self.dropout2 = torch.nn.Dropout(dropout2)\n self.activation = get_activation_fn(activation_fn)(hidden_units)\n\n def forward(self, x):\n \"\"\"\n Args:\n x: Input Tensor of shape T X B X C\n Returns:\n Tensor of shape T X B X C\n \"\"\"\n x = self.layer_norm(x)\n x = self.w_1(x)\n x = self.activation(x)\n x = self.dropout1(x)\n x = self.w_2(x)\n return self.dropout2(x)\n\n\nclass ConformerEncoderLayer(torch.nn.Module):\n \"\"\"Conformer block based on https://arxiv.org/abs/2005.08100. We currently don't support relative positional encoding in MHA\"\"\"\n\n def __init__(\n self,\n embed_dim,\n ffn_embed_dim,\n attention_heads,\n dropout,\n use_fp16,\n depthwise_conv_kernel_size=31,\n activation_fn=\"swish\",\n attn_type=None,\n pos_enc_type=\"abs\",\n ):\n \"\"\"\n Args:\n embed_dim: Input embedding dimension\n ffn_embed_dim: FFN layer dimension\n attention_heads: Number of attention heads in MHA\n dropout: dropout value\n depthwise_conv_kernel_size: Size of kernel in depthwise conv layer in convolution module\n activation_fn: Activation function name to use in convulation block and feed forward block\n attn_type: MHA implementation from ESPNET vs fairseq\n pos_enc_type: Positional encoding type - abs, rope, rel_pos\n \"\"\"\n self.pos_enc_type = pos_enc_type\n super(ConformerEncoderLayer, self).__init__()\n\n self.ffn1 = FeedForwardModule(\n embed_dim,\n ffn_embed_dim,\n dropout,\n dropout,\n )\n\n self.self_attn_layer_norm = LayerNorm(embed_dim, export=False)\n self.self_attn_dropout = torch.nn.Dropout(dropout)\n if attn_type == \"espnet\":\n if self.pos_enc_type == \"rel_pos\":\n self.self_attn = RelPositionMultiHeadedAttention(\n embed_dim,\n attention_heads,\n dropout=dropout,\n )\n elif self.pos_enc_type == \"rope\":\n self.self_attn = RotaryPositionMultiHeadedAttention(\n embed_dim, attention_heads, dropout=dropout, precision=use_fp16\n )\n elif self.pos_enc_type == \"abs\":\n self.self_attn = ESPNETMultiHeadedAttention(\n embed_dim,\n attention_heads,\n dropout=dropout,\n )\n else:\n raise Exception(f\"Unsupported attention type {self.pos_enc_type}\")\n else:\n # Default to fairseq MHA\n self.self_attn = MultiheadAttention(\n embed_dim,\n attention_heads,\n dropout=dropout,\n )\n\n self.conv_module = ConvolutionModule(\n embed_dim=embed_dim,\n channels=embed_dim,\n depthwise_kernel_size=depthwise_conv_kernel_size,\n dropout=dropout,\n activation_fn=activation_fn,\n )\n\n self.ffn2 = FeedForwardModule(\n embed_dim,\n ffn_embed_dim,\n dropout,\n dropout,\n activation_fn=activation_fn,\n )\n self.final_layer_norm = LayerNorm(embed_dim, export=False)\n\n def forward(\n self,\n x,\n encoder_padding_mask: Optional[torch.Tensor],\n position_emb: Optional[torch.Tensor] = None,\n ):\n \"\"\"\n Args:\n x: Tensor of shape T X B X C\n encoder_padding_mask: Optional mask tensor\n positions:\n Returns:\n Tensor of shape T X B X C\n \"\"\"\n residual = x\n x = self.ffn1(x)\n x = x * 0.5 + residual\n residual = x\n x = self.self_attn_layer_norm(x)\n if self.pos_enc_type == \"rel_pos\":\n x, attn = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=encoder_padding_mask,\n pos_emb=position_emb,\n need_weights=False,\n )\n else:\n x, attn = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=encoder_padding_mask,\n need_weights=False,\n )\n x = self.self_attn_dropout(x)\n x = x + residual\n\n residual = x\n # TBC to BTC\n x = x.transpose(0, 1)\n x = self.conv_module(x)\n # BTC to TBC\n x = x.transpose(0, 1)\n x = residual + x\n\n residual = x\n x = self.ffn2(x)\n x = x * 0.5 + residual\n\n x = self.final_layer_norm(x)\n return x, attn\n\n\nclass ConformerWav2Vec2EncoderLayer(ConformerEncoderLayer):\n \"\"\"Encoder layer for Wav2vec2 encoder\"\"\"\n\n def forward(\n self,\n x: torch.Tensor,\n self_attn_mask: torch.Tensor = None,\n self_attn_padding_mask: torch.Tensor = None,\n need_weights: bool = False,\n att_args=None,\n position_emb=None,\n ):\n return super().forward(x, self_attn_padding_mask, position_emb)\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.nn.GLU",
"torch.nn.Linear",
"torch.nn.Conv1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
robertmacdavid/approx-upf | [
"3f6da80226f94b175afe0c9d463fa38abfd743b9"
] | [
"python/lookup_tables.py"
] | [
"from typing import List, Callable, Dict, Tuple, Union\n\nimport math\nimport matplotlib.pyplot as plt\n\n\nclass ApproxMultiplicationTable:\n \"\"\"\n Multiplication done using a lookup table instead of a math unit\n \"\"\"\n table_entries: Dict[Tuple[int, int], int]\n num_significant_bits: int\n\n def __init__(self, num_significant_bits: int, unbiasing: float = 0.5):\n \"\"\"\n Create a lookup table that approximately multiplies pairs of positive integers\n :param num_significant_bits: number of bits to preserve when approximating operands.\n Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8\n :param unbiasing: a value in the range [0,1) that is used to unbias lookup table error\n \"\"\"\n self.num_significant_bits = num_significant_bits\n self.table_entries = {}\n # Populate the lookup table\n for i in range(1 << num_significant_bits):\n for j in range(1 << num_significant_bits):\n # i and j will be rounded versions of more precise numbers.\n # To unbias the rounding error, we offset i and j slightly before dividing them\n value: int = round((i + unbiasing) * (j + unbiasing))\n self.table_entries[(i, j)] = value\n\n def compute(self, a: int, b: int) -> int:\n assert a > 0 and b > 0\n # the exponent can be computed in tofino using TCAM lookup tables. If the operands are 32 bits,\n # the lookup tables will have 32 entries\n exponent: int = max(a.bit_length(), b.bit_length())\n rshift: int = max(exponent - self.num_significant_bits, 0)\n i = a >> rshift\n j = b >> rshift\n value = self.table_entries[(i, j)]\n return value << (2 * rshift)\n\n def table_size(self) -> int:\n return len(self.table_entries)\n\n\nclass ApproxDivisionTable:\n \"\"\"\n Division done using a lookup table instead of a math unit\n \"\"\"\n table_entries: Dict[Tuple[int, int], Tuple[int, int]]\n num_significant_bits: int\n MIN_LOOKUP_ENTRY = 2 ** -16 # lookup entries smaller than this will be rounded down to 0\n\n def __init__(self, num_significant_bits: int, unbiasing: float = 0.5, lookup_value_mantissa_bits: int = 8):\n \"\"\"\n Create a lookup table that approximately divides pairs of positive integers\n :param num_significant_bits: number of bits to preserve when approximating operands.\n Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8\n :param unbiasing: a value in the range [0,1) that is used to unbias lookup table error\n :param lookup_value_mantissa_bits: significant bits of division results stored in the lookup table\n \"\"\"\n self.num_significant_bits = num_significant_bits\n self.table_entries = {}\n # populate the lookup table\n for i in range(1 << num_significant_bits):\n for j in range(1 << num_significant_bits):\n # i and j will be rounded versions of more precise numbers.\n # To unbias the rounding error, we offset i and j slightly before dividing them\n value = (i + unbiasing) / (j + unbiasing)\n exp: int\n mantissa: int\n if value < self.MIN_LOOKUP_ENTRY:\n exp = 0\n mantissa = 0\n else:\n exp = math.floor(math.log(value, 2)) - lookup_value_mantissa_bits + 1\n mantissa = round(value * 2 ** (-exp))\n self.table_entries[(i, j)] = (mantissa, exp)\n\n def compute(self, a: int, b: int) -> float:\n assert a > 0 and b > 0\n\n exponent: int = max(a.bit_length(), b.bit_length())\n rshift: int = exponent - self.num_significant_bits\n i = a >> rshift\n j = b >> rshift\n\n mantissa, exponent = self.table_entries[(i, j)]\n\n return mantissa * (2 ** exponent)\n\n def table_size(self) -> int:\n return len(self.table_entries)\n\n\ndef plot_relative_error(a_vals: List[int], b_vals: List[int],\n true_func: Callable[[int, int], float],\n lookup: Union[ApproxMultiplicationTable, ApproxDivisionTable]):\n fig, ax = plt.subplots()\n\n ax.set_title(\"Relative error for %s with %d entries\" % (type(lookup).__name__, lookup.table_size()))\n ax.set_ylabel(\"Relative error (0.1 = 10%)\")\n ax.set_xlabel(\"Input a to f(a,b)\")\n\n for b in b_vals:\n errors = []\n for a in a_vals:\n approx_result = lookup.compute(a, b)\n true_result = true_func(a, b)\n error = (approx_result - true_result) / true_result\n errors.append(error)\n\n line, = ax.plot(a_vals, errors, label=\"%d\" % b, linewidth=1.0)\n\n ax.legend(title=\"Input b to f(a,b)\")\n plt.show()\n\n\ndef main():\n a_vals = [i for i in range(100000, 500000)]\n b_vals = [j for j in range(100000, 500000, 100000)]\n mult_lookup = ApproxMultiplicationTable(num_significant_bits=7)\n div_loookup = ApproxDivisionTable(num_significant_bits=7)\n plot_relative_error(a_vals, b_vals, lambda a, b: a * b, mult_lookup)\n plot_relative_error(a_vals, b_vals, lambda a, b: a / b, div_loookup)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
A-Quarter-Mile/Muskits | [
"60d80727d2ec6b8ec405502d67796e8df319ea82",
"60d80727d2ec6b8ec405502d67796e8df319ea82",
"60d80727d2ec6b8ec405502d67796e8df319ea82",
"60d80727d2ec6b8ec405502d67796e8df319ea82"
] | [
"muskit/layers/conformer/convolution.py",
"muskit/torch_utils/nets_utils.py",
"muskit/tasks/abs_task.py",
"muskit/svs/gst/style_encoder.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2020 Johns Hopkins University (Shinji Watanabe)\n# Northwestern Polytechnical University (Pengcheng Guo)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"ConvolutionModule definition.\"\"\"\n\nfrom torch import nn\n\n\nclass ConvolutionModule(nn.Module):\n \"\"\"ConvolutionModule in Conformer model.\n Args:\n channels (int): The number of channels of conv layers.\n kernel_size (int): Kernerl size of conv layers.\n \"\"\"\n\n def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):\n \"\"\"Construct an ConvolutionModule object.\"\"\"\n super(ConvolutionModule, self).__init__()\n # kernerl_size should be a odd number for 'SAME' padding\n assert (kernel_size - 1) % 2 == 0\n\n self.pointwise_conv1 = nn.Conv1d(\n channels,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.depthwise_conv = nn.Conv1d(\n channels,\n channels,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n groups=channels,\n bias=bias,\n )\n self.norm = nn.BatchNorm1d(channels)\n self.pointwise_conv2 = nn.Conv1d(\n channels,\n channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.activation = activation\n\n def forward(self, x):\n \"\"\"Compute convolution module.\n Args:\n x (torch.Tensor): Input tensor (#batch, time, channels).\n Returns:\n torch.Tensor: Output tensor (#batch, time, channels).\n \"\"\"\n # exchange the temporal dimension and the feature dimension\n x = x.transpose(1, 2)\n\n # GLU mechanism\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\n\n # 1D Depthwise Conv\n x = self.depthwise_conv(x)\n x = self.activation(self.norm(x))\n\n x = self.pointwise_conv2(x)\n\n return x.transpose(1, 2)\n",
"# -*- coding: utf-8 -*-\n\n\"\"\"Network related utility tools.\"\"\"\n\nimport logging\nfrom typing import Dict\n\nimport numpy as np\nimport torch\n\n\ndef to_device(m, x):\n \"\"\"Send tensor into the device of the module.\n Args:\n m (torch.nn.Module): Torch module.\n x (Tensor): Torch tensor.\n Returns:\n Tensor: Torch tensor located in the same place as torch module.\n \"\"\"\n if isinstance(m, torch.nn.Module):\n device = next(m.parameters()).device\n elif isinstance(m, torch.Tensor):\n device = m.device\n else:\n raise TypeError(\n \"Expected torch.nn.Module or torch.tensor, \" f\"bot got: {type(m)}\"\n )\n return x.to(device)\n\n\ndef pad_list(xs, pad_value):\n \"\"\"Perform padding for the list of tensors.\n Args:\n xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].\n pad_value (float): Value for padding.\n Returns:\n Tensor: Padded tensor (B, Tmax, `*`).\n Examples:\n >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]\n >>> x\n [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]\n >>> pad_list(x, 0)\n tensor([[1., 1., 1., 1.],\n [1., 1., 0., 0.],\n [1., 0., 0., 0.]])\n \"\"\"\n n_batch = len(xs)\n max_len = max(x.size(0) for x in xs)\n pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)\n\n for i in range(n_batch):\n pad[i, : xs[i].size(0)] = xs[i]\n\n return pad\n\n\ndef make_pad_mask(lengths, xs=None, length_dim=-1):\n \"\"\"Make mask tensor containing indices of padded part.\n Args:\n lengths (LongTensor or List): Batch of lengths (B,).\n xs (Tensor, optional): The reference tensor.\n If set, masks will be the same shape as this tensor.\n length_dim (int, optional): Dimension indicator of the above tensor.\n See the example.\n Returns:\n Tensor: Mask tensor containing indices of padded part.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n Examples:\n With only lengths.\n >>> lengths = [5, 3, 2]\n >>> make_non_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n With the reference tensor.\n >>> xs = torch.zeros((3, 2, 4))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 1],\n [0, 0, 0, 1]],\n [[0, 0, 1, 1],\n [0, 0, 1, 1]]], dtype=torch.uint8)\n >>> xs = torch.zeros((3, 2, 6))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n With the reference tensor and dimension indicator.\n >>> xs = torch.zeros((3, 6, 6))\n >>> make_pad_mask(lengths, xs, 1)\n tensor([[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)\n >>> make_pad_mask(lengths, xs, 2)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n \"\"\"\n if length_dim == 0:\n raise ValueError(\"length_dim cannot be 0: {}\".format(length_dim))\n\n if not isinstance(lengths, list):\n lengths = lengths.tolist()\n bs = int(len(lengths))\n if xs is None:\n maxlen = int(max(lengths))\n else:\n maxlen = xs.size(length_dim)\n\n seq_range = torch.arange(0, maxlen, dtype=torch.int64)\n seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)\n seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n\n if xs is not None:\n assert xs.size(0) == bs, (xs.size(0), bs)\n\n if length_dim < 0:\n length_dim = xs.dim() + length_dim\n # ind = (:, None, ..., None, :, , None, ..., None)\n ind = tuple(\n slice(None) if i in (0, length_dim) else None for i in range(xs.dim())\n )\n mask = mask[ind].expand_as(xs).to(xs.device)\n return mask\n\n\ndef make_non_pad_mask(lengths, xs=None, length_dim=-1):\n \"\"\"Make mask tensor containing indices of non-padded part.\n Args:\n lengths (LongTensor or List): Batch of lengths (B,).\n xs (Tensor, optional): The reference tensor.\n If set, masks will be the same shape as this tensor.\n length_dim (int, optional): Dimension indicator of the above tensor.\n See the example.\n Returns:\n ByteTensor: mask tensor containing indices of padded part.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n Examples:\n With only lengths.\n >>> lengths = [5, 3, 2]\n >>> make_non_pad_mask(lengths)\n masks = [[1, 1, 1, 1 ,1],\n [1, 1, 1, 0, 0],\n [1, 1, 0, 0, 0]]\n With the reference tensor.\n >>> xs = torch.zeros((3, 2, 4))\n >>> make_non_pad_mask(lengths, xs)\n tensor([[[1, 1, 1, 1],\n [1, 1, 1, 1]],\n [[1, 1, 1, 0],\n [1, 1, 1, 0]],\n [[1, 1, 0, 0],\n [1, 1, 0, 0]]], dtype=torch.uint8)\n >>> xs = torch.zeros((3, 2, 6))\n >>> make_non_pad_mask(lengths, xs)\n tensor([[[1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0]],\n [[1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0]],\n [[1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)\n With the reference tensor and dimension indicator.\n >>> xs = torch.zeros((3, 6, 6))\n >>> make_non_pad_mask(lengths, xs, 1)\n tensor([[[1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0]],\n [[1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]],\n [[1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)\n >>> make_non_pad_mask(lengths, xs, 2)\n tensor([[[1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0]],\n [[1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0]],\n [[1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)\n \"\"\"\n return ~make_pad_mask(lengths, xs, length_dim)\n\n\ndef mask_by_length(xs, lengths, fill=0):\n \"\"\"Mask tensor according to length.\n Args:\n xs (Tensor): Batch of input tensor (B, `*`).\n lengths (LongTensor or List): Batch of lengths (B,).\n fill (int or float): Value to fill masked part.\n Returns:\n Tensor: Batch of masked input tensor (B, `*`).\n Examples:\n >>> x = torch.arange(5).repeat(3, 1) + 1\n >>> x\n tensor([[1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5]])\n >>> lengths = [5, 3, 2]\n >>> mask_by_length(x, lengths)\n tensor([[1, 2, 3, 4, 5],\n [1, 2, 3, 0, 0],\n [1, 2, 0, 0, 0]])\n \"\"\"\n assert xs.size(0) == len(lengths)\n ret = xs.data.new(*xs.size()).fill_(fill)\n for i, l in enumerate(lengths):\n ret[i, :l] = xs[i, :l]\n return ret\n\n\ndef th_accuracy(pad_outputs, pad_targets, ignore_label):\n \"\"\"Calculate accuracy.\n Args:\n pad_outputs (Tensor): Prediction tensors (B * Lmax, D).\n pad_targets (LongTensor): Target label tensors (B, Lmax, D).\n ignore_label (int): Ignore label id.\n Returns:\n float: Accuracy value (0.0 - 1.0).\n \"\"\"\n pad_pred = pad_outputs.view(\n pad_targets.size(0), pad_targets.size(1), pad_outputs.size(1)\n ).argmax(2)\n mask = pad_targets != ignore_label\n numerator = torch.sum(\n pad_pred.masked_select(mask) == pad_targets.masked_select(mask)\n )\n denominator = torch.sum(mask)\n return float(numerator) / float(denominator)\n\n\ndef to_torch_tensor(x):\n \"\"\"Change to torch.Tensor or ComplexTensor from numpy.ndarray.\n Args:\n x: Inputs. It should be one of numpy.ndarray, Tensor, ComplexTensor, and dict.\n Returns:\n Tensor or ComplexTensor: Type converted inputs.\n Examples:\n >>> xs = np.ones(3, dtype=np.float32)\n >>> xs = to_torch_tensor(xs)\n tensor([1., 1., 1.])\n >>> xs = torch.ones(3, 4, 5)\n >>> assert to_torch_tensor(xs) is xs\n >>> xs = {'real': xs, 'imag': xs}\n >>> to_torch_tensor(xs)\n ComplexTensor(\n Real:\n tensor([1., 1., 1.])\n Imag;\n tensor([1., 1., 1.])\n )\n \"\"\"\n # If numpy, change to torch tensor\n if isinstance(x, np.ndarray):\n if x.dtype.kind == \"c\":\n # Dynamically importing because torch_complex requires python3\n from torch_complex.tensor import ComplexTensor\n\n return ComplexTensor(x)\n else:\n return torch.from_numpy(x)\n\n # If {'real': ..., 'imag': ...}, convert to ComplexTensor\n elif isinstance(x, dict):\n # Dynamically importing because torch_complex requires python3\n from torch_complex.tensor import ComplexTensor\n\n if \"real\" not in x or \"imag\" not in x:\n raise ValueError(\"has 'real' and 'imag' keys: {}\".format(list(x)))\n # Relative importing because of using python3 syntax\n return ComplexTensor(x[\"real\"], x[\"imag\"])\n\n # If torch.Tensor, as it is\n elif isinstance(x, torch.Tensor):\n return x\n\n else:\n error = (\n \"x must be numpy.ndarray, torch.Tensor or a dict like \"\n \"{{'real': torch.Tensor, 'imag': torch.Tensor}}, \"\n \"but got {}\".format(type(x))\n )\n try:\n from torch_complex.tensor import ComplexTensor\n except Exception:\n # If PY2\n raise ValueError(error)\n else:\n # If PY3\n if isinstance(x, ComplexTensor):\n return x\n else:\n raise ValueError(error)\n\n\ndef get_subsample(train_args, mode, arch):\n \"\"\"Parse the subsampling factors from the args for the specified `mode` and `arch`.\n Args:\n train_args: argument Namespace containing options.\n mode: one of ('asr', 'mt', 'st')\n arch: one of ('rnn', 'rnn-t', 'rnn_mix', 'rnn_mulenc', 'transformer')\n Returns:\n np.ndarray / List[np.ndarray]: subsampling factors.\n \"\"\"\n if arch == \"transformer\":\n return np.array([1])\n\n elif mode == \"mt\" and arch == \"rnn\":\n # +1 means input (+1) and layers outputs (train_args.elayer)\n subsample = np.ones(train_args.elayers + 1, dtype=np.int)\n logging.warning(\"Subsampling is not performed for machine translation.\")\n logging.info(\"subsample: \" + \" \".join([str(x) for x in subsample]))\n return subsample\n\n elif (\n (mode == \"asr\" and arch in (\"rnn\", \"rnn-t\"))\n or (mode == \"mt\" and arch == \"rnn\")\n or (mode == \"st\" and arch == \"rnn\")\n ):\n subsample = np.ones(train_args.elayers + 1, dtype=np.int)\n if train_args.etype.endswith(\"p\") and not train_args.etype.startswith(\"vgg\"):\n ss = train_args.subsample.split(\"_\")\n for j in range(min(train_args.elayers + 1, len(ss))):\n subsample[j] = int(ss[j])\n else:\n logging.warning(\n \"Subsampling is not performed for vgg*. \"\n \"It is performed in max pooling layers at CNN.\"\n )\n logging.info(\"subsample: \" + \" \".join([str(x) for x in subsample]))\n return subsample\n\n elif mode == \"asr\" and arch == \"rnn_mix\":\n subsample = np.ones(\n train_args.elayers_sd + train_args.elayers + 1, dtype=np.int\n )\n if train_args.etype.endswith(\"p\") and not train_args.etype.startswith(\"vgg\"):\n ss = train_args.subsample.split(\"_\")\n for j in range(\n min(train_args.elayers_sd + train_args.elayers + 1, len(ss))\n ):\n subsample[j] = int(ss[j])\n else:\n logging.warning(\n \"Subsampling is not performed for vgg*. \"\n \"It is performed in max pooling layers at CNN.\"\n )\n logging.info(\"subsample: \" + \" \".join([str(x) for x in subsample]))\n return subsample\n\n elif mode == \"asr\" and arch == \"rnn_mulenc\":\n subsample_list = []\n for idx in range(train_args.num_encs):\n subsample = np.ones(train_args.elayers[idx] + 1, dtype=np.int)\n if train_args.etype[idx].endswith(\"p\") and not train_args.etype[\n idx\n ].startswith(\"vgg\"):\n ss = train_args.subsample[idx].split(\"_\")\n for j in range(min(train_args.elayers[idx] + 1, len(ss))):\n subsample[j] = int(ss[j])\n else:\n logging.warning(\n \"Encoder %d: Subsampling is not performed for vgg*. \"\n \"It is performed in max pooling layers at CNN.\",\n idx + 1,\n )\n logging.info(\"subsample: \" + \" \".join([str(x) for x in subsample]))\n subsample_list.append(subsample)\n return subsample_list\n\n else:\n raise ValueError(\"Invalid options: mode={}, arch={}\".format(mode, arch))\n\n\ndef rename_state_dict(\n old_prefix: str, new_prefix: str, state_dict: Dict[str, torch.Tensor]\n):\n \"\"\"Replace keys of old prefix with new prefix in state dict.\"\"\"\n # need this list not to break the dict iterator\n old_keys = [k for k in state_dict if k.startswith(old_prefix)]\n if len(old_keys) > 0:\n logging.warning(f\"Rename: {old_prefix} -> {new_prefix}\")\n for k in old_keys:\n v = state_dict.pop(k)\n new_k = k.replace(old_prefix, new_prefix)\n state_dict[new_k] = v\n\n\ndef get_activation(act):\n \"\"\"Return activation function.\"\"\"\n # Lazy load to avoid unused import\n from muskit.layers.conformer.swish import Swish\n\n activation_funcs = {\n \"hardtanh\": torch.nn.Hardtanh,\n \"tanh\": torch.nn.Tanh,\n \"relu\": torch.nn.ReLU,\n \"selu\": torch.nn.SELU,\n \"swish\": Swish,\n }\n\n return activation_funcs[act]()\n",
"# Adopted from ESPNet: https://github.com/espnet/espnet\n\nfrom abc import ABC\nfrom abc import abstractmethod\nimport argparse\nfrom dataclasses import dataclass\nfrom distutils.version import LooseVersion\nimport functools\nimport logging\nimport os\nfrom pathlib import Path\nimport sys\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\nfrom typing import Union\n\nimport humanfriendly\nimport numpy as np\nimport torch\nimport torch.multiprocessing\nimport torch.nn\nimport torch.optim\nfrom torch.utils.data import DataLoader\nfrom typeguard import check_argument_types\nfrom typeguard import check_return_type\nimport wandb\nimport yaml\n\n# from muskit import __version__\nfrom muskit.iterators.abs_iter_factory import AbsIterFactory\nfrom muskit.iterators.chunk_iter_factory import ChunkIterFactory\nfrom muskit.iterators.multiple_iter_factory import MultipleIterFactory\nfrom muskit.iterators.sequence_iter_factory import SequenceIterFactory\nfrom muskit.main_funcs.collect_stats import collect_stats\nfrom muskit.optimizers.sgd import SGD\nfrom muskit.samplers.build_batch_sampler import BATCH_TYPES\nfrom muskit.samplers.build_batch_sampler import build_batch_sampler\nfrom muskit.samplers.unsorted_batch_sampler import UnsortedBatchSampler\nfrom muskit.schedulers.noam_lr import NoamLR\nfrom muskit.schedulers.warmup_lr import WarmupLR\nfrom muskit.torch_utils.load_pretrained_model import load_pretrained_model\nfrom muskit.torch_utils.model_summary import model_summary\nfrom muskit.torch_utils.pytorch_version import pytorch_cudnn_version\nfrom muskit.torch_utils.set_all_random_seed import set_all_random_seed\nfrom muskit.train.abs_muskit_model import AbsMuskitModel\nfrom muskit.train.class_choices import ClassChoices\nfrom muskit.train.dataset import AbsDataset\nfrom muskit.train.dataset import DATA_TYPES\nfrom muskit.train.dataset import MuskitDataset\nfrom muskit.train.distributed_utils import DistributedOption\nfrom muskit.train.distributed_utils import free_port\nfrom muskit.train.distributed_utils import get_master_port\nfrom muskit.train.distributed_utils import get_node_rank\nfrom muskit.train.distributed_utils import get_num_nodes\nfrom muskit.train.distributed_utils import resolve_distributed_mode\nfrom muskit.train.iterable_dataset import IterableMuskitDataset\nfrom muskit.train.trainer import Trainer\nfrom muskit.utils.build_dataclass import build_dataclass\nfrom muskit.utils import config_argparse\nfrom muskit.utils.cli_utils import get_commandline_args\nfrom muskit.utils.get_default_kwargs import get_default_kwargs\nfrom muskit.utils.nested_dict_action import NestedDictAction\nfrom muskit.utils.types import humanfriendly_parse_size_or_none\nfrom muskit.utils.types import int_or_none\nfrom muskit.utils.types import str2bool\nfrom muskit.utils.types import str2triple_str\nfrom muskit.utils.types import str_or_int\nfrom muskit.utils.types import str_or_none\nfrom muskit.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump\n\nif LooseVersion(torch.__version__) >= LooseVersion(\"1.5.0\"):\n from torch.multiprocessing.spawn import ProcessContext\nelse:\n from torch.multiprocessing.spawn import SpawnContext as ProcessContext\n\n\ndef my_parameters_svs(model):\n r\"\"\"Returns an iterator over immediate children modules.\n\n Yields:\n Module: a child module\n \"\"\"\n for name, module in model.named_parameters():\n if \"predictor.pre\" in name:\n continue\n yield module\n\noptim_classes = dict(\n adam=torch.optim.Adam,\n sgd=SGD,\n adadelta=torch.optim.Adadelta,\n adagrad=torch.optim.Adagrad,\n adamax=torch.optim.Adamax,\n asgd=torch.optim.ASGD,\n lbfgs=torch.optim.LBFGS,\n rmsprop=torch.optim.RMSprop,\n rprop=torch.optim.Rprop,\n)\nif LooseVersion(torch.__version__) >= LooseVersion(\"1.2.0\"):\n optim_classes[\"adamw\"] = torch.optim.AdamW\ntry:\n import torch_optimizer\n\n optim_classes.update(\n accagd=torch_optimizer.AccSGD,\n adabound=torch_optimizer.AdaBound,\n adamod=torch_optimizer.AdaMod,\n diffgrad=torch_optimizer.DiffGrad,\n lamb=torch_optimizer.Lamb,\n novograd=torch_optimizer.NovoGrad,\n pid=torch_optimizer.PID,\n # torch_optimizer<=0.0.1a10 doesn't support\n # qhadam=torch_optimizer.QHAdam,\n qhm=torch_optimizer.QHM,\n radam=torch_optimizer.RAdam,\n sgdw=torch_optimizer.SGDW,\n yogi=torch_optimizer.Yogi,\n )\n del torch_optimizer\nexcept ImportError:\n pass\ntry:\n import apex\n\n optim_classes.update(\n fusedadam=apex.optimizers.FusedAdam,\n fusedlamb=apex.optimizers.FusedLAMB,\n fusednovograd=apex.optimizers.FusedNovoGrad,\n fusedsgd=apex.optimizers.FusedSGD,\n )\n del apex\nexcept ImportError:\n pass\ntry:\n import fairscale\nexcept ImportError:\n fairscale = None\n\n\nscheduler_classes = dict(\n ReduceLROnPlateau=torch.optim.lr_scheduler.ReduceLROnPlateau,\n lambdalr=torch.optim.lr_scheduler.LambdaLR,\n steplr=torch.optim.lr_scheduler.StepLR,\n multisteplr=torch.optim.lr_scheduler.MultiStepLR,\n exponentiallr=torch.optim.lr_scheduler.ExponentialLR,\n CosineAnnealingLR=torch.optim.lr_scheduler.CosineAnnealingLR,\n)\nif LooseVersion(torch.__version__) >= LooseVersion(\"1.1.0\"):\n scheduler_classes.update(\n noamlr=NoamLR,\n warmuplr=WarmupLR,\n )\nif LooseVersion(torch.__version__) >= LooseVersion(\"1.3.0\"):\n CosineAnnealingWarmRestarts = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts\n scheduler_classes.update(\n cycliclr=torch.optim.lr_scheduler.CyclicLR,\n onecyclelr=torch.optim.lr_scheduler.OneCycleLR,\n CosineAnnealingWarmRestarts=CosineAnnealingWarmRestarts,\n )\n# To lower keys\noptim_classes = {k.lower(): v for k, v in optim_classes.items()}\nscheduler_classes = {k.lower(): v for k, v in scheduler_classes.items()}\n\n\n@dataclass\nclass IteratorOptions:\n preprocess_fn: callable\n collate_fn: callable\n data_path_and_name_and_type: list\n shape_files: list\n batch_size: int\n batch_bins: int\n batch_type: str\n max_cache_size: float\n max_cache_fd: int\n distributed: bool\n num_batches: Optional[int]\n num_iters_per_epoch: Optional[int]\n train: bool\n\n\nclass AbsTask(ABC):\n # Use @staticmethod, or @classmethod,\n # instead of instance method to avoid God classes\n\n # If you need more than one optimizers, change this value in inheritance\n num_optimizers: int = 1\n trainer = Trainer\n class_choices_list: List[ClassChoices] = []\n\n def __init__(self):\n raise RuntimeError(\"This class can't be instantiated.\")\n\n @classmethod\n @abstractmethod\n def add_task_arguments(cls, parser: argparse.ArgumentParser):\n pass\n\n @classmethod\n @abstractmethod\n def build_collate_fn(\n cls, args: argparse.Namespace, train: bool\n ) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]:\n \"\"\"Return \"collate_fn\", which is a callable object and given to DataLoader.\n >>> from torch.utils.data import DataLoader\n >>> loader = DataLoader(collate_fn=cls.build_collate_fn(args, train=True), ...)\n In many cases, you can use our common collate_fn.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def build_preprocess_fn(\n cls, args: argparse.Namespace, train: bool\n ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]:\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def required_data_names(\n cls, train: bool = True, inference: bool = False\n ) -> Tuple[str, ...]:\n \"\"\"Define the required names by Task\n This function is used by\n >>> cls.check_task_requirements()\n If your model is defined as following,\n >>> from muskit.train.abs_muskit_model import AbsMuskitModel\n >>> class Model(AbsMuskitModel):\n ... def forward(self, input, output, opt=None): pass\n then \"required_data_names\" should be as\n >>> required_data_names = ('input', 'output')\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def optional_data_names(\n cls, train: bool = True, inference: bool = False\n ) -> Tuple[str, ...]:\n \"\"\"Define the optional names by Task\n This function is used by\n >>> cls.check_task_requirements()\n If your model is defined as follows,\n >>> from muskit.train.abs_muskit_model import AbsMuskitModel\n >>> class Model(AbsMuskitModel):\n ... def forward(self, input, output, opt=None): pass\n then \"optional_data_names\" should be as\n >>> optional_data_names = ('opt',)\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def build_model(cls, args: argparse.Namespace) -> AbsMuskitModel:\n raise NotImplementedError\n\n @classmethod\n def get_parser(cls) -> config_argparse.ArgumentParser:\n assert check_argument_types()\n\n class ArgumentDefaultsRawTextHelpFormatter(\n argparse.RawTextHelpFormatter,\n argparse.ArgumentDefaultsHelpFormatter,\n ):\n pass\n\n parser = config_argparse.ArgumentParser(\n description=\"base parser\",\n formatter_class=ArgumentDefaultsRawTextHelpFormatter,\n )\n\n # NOTE(kamo): Use '_' instead of '-' to avoid confusion.\n # I think '-' looks really confusing if it's written in yaml.\n\n # NOTE(kamo): add_arguments(..., required=True) can't be used\n # to provide --print_config mode. Instead of it, do as\n parser.set_defaults(required=[\"output_dir\"])\n\n group = parser.add_argument_group(\"Common configuration\")\n\n group.add_argument(\n \"--print_config\",\n action=\"store_true\",\n help=\"Print the config file and exit\",\n )\n group.add_argument(\n \"--log_level\",\n type=lambda x: x.upper(),\n default=\"INFO\",\n choices=(\"ERROR\", \"WARNING\", \"INFO\", \"DEBUG\", \"NOTSET\"),\n help=\"The verbose level of logging\",\n )\n group.add_argument(\n \"--dry_run\",\n type=str2bool,\n default=False,\n help=\"Perform process without training\",\n )\n group.add_argument(\n \"--iterator_type\",\n type=str,\n choices=[\"sequence\", \"chunk\", \"task\", \"none\"],\n default=\"sequence\",\n help=\"Specify iterator type\",\n )\n\n group.add_argument(\"--output_dir\", type=str_or_none, default=None)\n group.add_argument(\n \"--ngpu\",\n type=int,\n default=0,\n help=\"The number of gpus. 0 indicates CPU mode\",\n )\n group.add_argument(\n \"--gpu_id\",\n type=int,\n default=0,\n help=\"GPU_id, only works when ngpu=1\",\n )\n group.add_argument(\n \"--pitch_aug_min\",\n type=int,\n default=0,\n help=\"The lower bound of midi semitone when pitch augmentation\",\n )\n group.add_argument(\n \"--pitch_aug_max\",\n type=int,\n default=0,\n help=\"The upper bound of midi semitone when pitch augmentation\",\n )\n group.add_argument(\n \"--pitch_mean\",\n type=str,\n default=\"None\",\n help=\"The mean midi-value of training split, None means no-adaptive-pitch-augmentation\",\n )\n group.add_argument(\n \"--time_aug_min\",\n type=float,\n default=1,\n help=\"The lower bound of time augmentation factor\",\n )\n group.add_argument(\n \"--time_aug_max\",\n type=float,\n default=1,\n help=\"The upper bound of time augmentation factor\",\n )\n group.add_argument(\n \"--random_crop\",\n type=bool,\n default=False,\n help=\"Flag to use random crop augmentation during training\",\n )\n group.add_argument(\n \"--mask_aug\",\n type=bool,\n default=False,\n help=\"Flag to use masking augmentation during training\",\n )\n group.add_argument(\"--seed\", type=int, default=0, help=\"Random seed\")\n group.add_argument(\n \"--num_workers\",\n type=int,\n default=1,\n help=\"The number of workers used for DataLoader\",\n )\n group.add_argument(\n \"--num_att_plot\",\n type=int,\n default=3,\n help=\"The number images to plot the outputs from attention. \"\n \"This option makes sense only when attention-based model\",\n )\n\n group = parser.add_argument_group(\"distributed training related\")\n group.add_argument(\n \"--dist_backend\",\n default=\"nccl\",\n type=str,\n help=\"distributed backend\",\n )\n group.add_argument(\n \"--dist_init_method\",\n type=str,\n default=\"env://\",\n help='if init_method=\"env://\", env values of \"MASTER_PORT\", \"MASTER_ADDR\", '\n '\"WORLD_SIZE\", and \"RANK\" are referred.',\n )\n group.add_argument(\n \"--dist_world_size\",\n default=None,\n type=int_or_none,\n help=\"number of nodes for distributed training\",\n )\n group.add_argument(\n \"--dist_rank\",\n type=int_or_none,\n default=None,\n help=\"node rank for distributed training\",\n )\n group.add_argument(\n # Not starting with \"dist_\" for compatibility to launch.py\n \"--local_rank\",\n type=int_or_none,\n default=None,\n help=\"local rank for distributed training. This option is used if \"\n \"--multiprocessing_distributed=false\",\n )\n group.add_argument(\n \"--dist_master_addr\",\n default=None,\n type=str_or_none,\n help=\"The master address for distributed training. \"\n \"This value is used when dist_init_method == 'env://'\",\n )\n group.add_argument(\n \"--dist_master_port\",\n default=None,\n type=int_or_none,\n help=\"The master port for distributed training\"\n \"This value is used when dist_init_method == 'env://'\",\n )\n group.add_argument(\n \"--dist_launcher\",\n default=None,\n type=str_or_none,\n choices=[\"slurm\", \"mpi\", None],\n help=\"The launcher type for distributed training\",\n )\n group.add_argument(\n \"--multiprocessing_distributed\",\n default=False,\n type=str2bool,\n help=\"Use multi-processing distributed training to launch \"\n \"N processes per node, which has N GPUs. This is the \"\n \"fastest way to use PyTorch for either single node or \"\n \"multi node data parallel training\",\n )\n group.add_argument(\n \"--unused_parameters\",\n type=str2bool,\n default=False,\n help=\"Whether to use the find_unused_parameters in \"\n \"torch.nn.parallel.DistributedDataParallel \",\n )\n group.add_argument(\n \"--sharded_ddp\",\n default=False,\n type=str2bool,\n help=\"Enable sharded training provided by fairscale\",\n )\n\n group = parser.add_argument_group(\"cudnn mode related\")\n group.add_argument(\n \"--cudnn_enabled\",\n type=str2bool,\n default=torch.backends.cudnn.enabled,\n help=\"Enable CUDNN\",\n )\n group.add_argument(\n \"--cudnn_benchmark\",\n type=str2bool,\n default=torch.backends.cudnn.benchmark,\n help=\"Enable cudnn-benchmark mode\",\n )\n group.add_argument(\n \"--cudnn_deterministic\",\n type=str2bool,\n default=True,\n help=\"Enable cudnn-deterministic mode\",\n )\n\n group = parser.add_argument_group(\"collect stats mode related\")\n group.add_argument(\n \"--collect_stats\",\n type=str2bool,\n default=False,\n help='Perform on \"collect stats\" mode',\n )\n group.add_argument(\n \"--write_collected_feats\",\n type=str2bool,\n default=False,\n help='Write the output features from the model when \"collect stats\" mode',\n )\n\n group = parser.add_argument_group(\"Trainer related\")\n group.add_argument(\n \"--vocoder_checkpoint\",\n default=\"\",\n type=str,\n help=\"checkpoint file to be loaded.\",\n )\n group.add_argument(\n \"--vocoder_config\",\n default=\"\",\n type=str,\n help=\"yaml format configuration file. if not explicitly provided, \"\n \"it will be searched in the checkpoint directory. (default=None)\",\n )\n group.add_argument(\n \"--vocoder_normalize_before\",\n default=False,\n action=\"store_true\",\n help=\"whether to perform feature normalization before input to the model. \"\n \"if true, it assumes that the feature is de-normalized. this is useful when \"\n \"text2mel model and vocoder use different feature statistics.\",\n )\n\n group.add_argument(\n \"--max_epoch\",\n type=int,\n default=40,\n help=\"The maximum number epoch to train\",\n )\n group.add_argument(\n \"--patience\",\n type=int_or_none,\n default=None,\n help=\"Number of epochs to wait without improvement \"\n \"before stopping the training\",\n )\n group.add_argument(\n \"--val_scheduler_criterion\",\n type=str,\n nargs=2,\n default=(\"valid\", \"loss\"),\n help=\"The criterion used for the value given to the lr scheduler. \"\n 'Give a pair referring the phase, \"train\" or \"valid\",'\n 'and the criterion name. The mode specifying \"min\" or \"max\" can '\n \"be changed by --scheduler_conf\",\n )\n group.add_argument(\n \"--early_stopping_criterion\",\n type=str,\n nargs=3,\n default=(\"valid\", \"loss\", \"min\"),\n help=\"The criterion used for judging of early stopping. \"\n 'Give a pair referring the phase, \"train\" or \"valid\",'\n 'the criterion name and the mode, \"min\" or \"max\", e.g. \"acc,max\".',\n )\n group.add_argument(\n \"--best_model_criterion\",\n type=str2triple_str,\n nargs=\"+\",\n default=[\n (\"train\", \"loss\", \"min\"),\n (\"valid\", \"loss\", \"min\"),\n (\"train\", \"acc\", \"max\"),\n (\"valid\", \"acc\", \"max\"),\n ],\n help=\"The criterion used for judging of the best model. \"\n 'Give a pair referring the phase, \"train\" or \"valid\",'\n 'the criterion name, and the mode, \"min\" or \"max\", e.g. \"acc,max\".',\n )\n group.add_argument(\n \"--keep_nbest_models\",\n type=int,\n nargs=\"+\",\n default=[10],\n help=\"Remove previous snapshots excluding the n-best scored epochs\",\n )\n group.add_argument(\n \"--grad_clip\",\n type=float,\n default=5.0,\n help=\"Gradient norm threshold to clip\",\n )\n group.add_argument(\n \"--grad_clip_type\",\n type=float,\n default=2.0,\n help=\"The type of the used p-norm for gradient clip. Can be inf\",\n )\n group.add_argument(\n \"--grad_noise\",\n type=str2bool,\n default=False,\n help=\"The flag to switch to use noise injection to \"\n \"gradients during training\",\n )\n group.add_argument(\n \"--accum_grad\",\n type=int,\n default=1,\n help=\"The number of gradient accumulation\",\n )\n group.add_argument(\n \"--no_forward_run\",\n type=str2bool,\n default=False,\n help=\"Just only iterating data loading without \"\n \"model forwarding and training\",\n )\n group.add_argument(\n \"--resume\",\n type=str2bool,\n default=False,\n help=\"Enable resuming if checkpoint is existing\",\n )\n group.add_argument(\n \"--train_dtype\",\n default=\"float32\",\n choices=[\"float16\", \"float32\", \"float64\"],\n help=\"Data type for training.\",\n )\n group.add_argument(\n \"--use_amp\",\n type=str2bool,\n default=False,\n help=\"Enable Automatic Mixed Precision. This feature requires pytorch>=1.6\",\n )\n group.add_argument(\n \"--log_interval\",\n type=int_or_none,\n default=None,\n help=\"Show the logs every the number iterations in each epochs at the \"\n \"training phase. If None is given, it is decided according the number \"\n \"of training samples automatically .\",\n )\n group.add_argument(\n \"--use_tensorboard\",\n type=str2bool,\n default=True,\n help=\"Enable tensorboard logging\",\n )\n group.add_argument(\n \"--use_wandb\",\n type=str2bool,\n default=False,\n help=\"Enable wandb logging\",\n )\n group.add_argument(\n \"--wandb_project\",\n type=str,\n default=None,\n help=\"Specify wandb project\",\n )\n group.add_argument(\n \"--wandb_id\",\n type=str,\n default=None,\n help=\"Specify wandb id\",\n )\n group.add_argument(\n \"--detect_anomaly\",\n type=str2bool,\n default=False,\n help=\"Set torch.autograd.set_detect_anomaly\",\n )\n\n group = parser.add_argument_group(\"Pretraining model related\")\n group.add_argument(\"--pretrain_path\", help=\"This option is obsoleted\")\n group.add_argument(\n \"--init_param\",\n type=str,\n default=[],\n nargs=\"*\",\n help=\"Specify the file path used for initialization of parameters. \"\n \"The format is '<file_path>:<src_key>:<dst_key>:<exclude_keys>', \"\n \"where file_path is the model file path, \"\n \"src_key specifies the key of model states to be used in the model file, \"\n \"dst_key specifies the attribute of the model to be initialized, \"\n \"and exclude_keys excludes keys of model states for the initialization.\"\n \"e.g.\\n\"\n \" # Load all parameters\"\n \" --init_param some/where/model.pth\\n\"\n \" # Load only decoder parameters\"\n \" --init_param some/where/model.pth:decoder:decoder\\n\"\n \" # Load only decoder parameters excluding decoder.embed\"\n \" --init_param some/where/model.pth:decoder:decoder:decoder.embed\\n\"\n \" --init_param some/where/model.pth:decoder:decoder:decoder.embed\\n\",\n )\n group.add_argument(\n \"--ignore_init_mismatch\",\n type=str2bool,\n default=False,\n help=\"Ignore size mismatch when loading pre-trained model\",\n )\n group.add_argument(\n \"--freeze_param\",\n type=str,\n default=[],\n nargs=\"*\",\n help=\"Freeze parameters\",\n )\n\n group = parser.add_argument_group(\"BatchSampler related\")\n group.add_argument(\n \"--num_iters_per_epoch\",\n type=int_or_none,\n default=None,\n help=\"Restrict the number of iterations for training per epoch\",\n )\n group.add_argument(\n \"--batch_size\",\n type=int,\n default=20,\n help=\"The mini-batch size used for training. Used if batch_type='unsorted',\"\n \" 'sorted', or 'folded'.\",\n )\n group.add_argument(\n \"--valid_batch_size\",\n type=int_or_none,\n default=None,\n help=\"If not given, the value of --batch_size is used\",\n )\n group.add_argument(\n \"--batch_bins\",\n type=int,\n default=1000000,\n help=\"The number of batch bins. Used if batch_type='length' or 'numel'\",\n )\n group.add_argument(\n \"--valid_batch_bins\",\n type=int_or_none,\n default=None,\n help=\"If not given, the value of --batch_bins is used\",\n )\n\n group.add_argument(\"--train_shape_file\", type=str, action=\"append\", default=[])\n group.add_argument(\"--valid_shape_file\", type=str, action=\"append\", default=[])\n\n group = parser.add_argument_group(\"Sequence iterator related\")\n _batch_type_help = \"\"\n for key, value in BATCH_TYPES.items():\n _batch_type_help += f'\"{key}\":\\n{value}\\n'\n group.add_argument(\n \"--batch_type\",\n type=str,\n default=\"folded\",\n choices=list(BATCH_TYPES),\n help=_batch_type_help,\n )\n group.add_argument(\n \"--valid_batch_type\",\n type=str_or_none,\n default=None,\n choices=list(BATCH_TYPES) + [None],\n help=\"If not given, the value of --batch_type is used\",\n )\n group.add_argument(\"--fold_length\", type=int, action=\"append\", default=[])\n group.add_argument(\n \"--sort_in_batch\",\n type=str,\n default=\"descending\",\n choices=[\"descending\", \"ascending\"],\n help=\"Sort the samples in each mini-batches by the sample \"\n 'lengths. To enable this, \"shape_file\" must have the length information.',\n )\n group.add_argument(\n \"--sort_batch\",\n type=str,\n default=\"descending\",\n choices=[\"descending\", \"ascending\"],\n help=\"Sort mini-batches by the sample lengths\",\n )\n group.add_argument(\n \"--multiple_iterator\",\n type=str2bool,\n default=False,\n help=\"Use multiple iterator mode\",\n )\n\n group = parser.add_argument_group(\"Chunk iterator related\")\n group.add_argument(\n \"--chunk_length\",\n type=str_or_int,\n default=500,\n help=\"Specify chunk length. e.g. '300', '300,400,500', or '300-400'.\"\n \"If multiple numbers separated by command are given, \"\n \"one of them is selected randomly for each samples. \"\n \"If two numbers are given with '-', it indicates the range of the choices. \"\n \"Note that if the sequence length is shorter than the all chunk_lengths, \"\n \"the sample is discarded. \",\n )\n group.add_argument(\n \"--chunk_shift_ratio\",\n type=float,\n default=0.5,\n help=\"Specify the shift width of chunks. If it's less than 1, \"\n \"allows the overlapping and if bigger than 1, there are some gaps \"\n \"between each chunk.\",\n )\n group.add_argument(\n \"--num_cache_chunks\",\n type=int,\n default=1024,\n help=\"Shuffle in the specified number of chunks and generate mini-batches \"\n \"More larger this value, more randomness can be obtained.\",\n )\n\n group = parser.add_argument_group(\"Dataset related\")\n _data_path_and_name_and_type_help = (\n \"Give three words splitted by comma. It's used for the training data. \"\n \"e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. \"\n \"The first value, some/path/a.scp, indicates the file path, \"\n \"and the second, foo, is the key name used for the mini-batch data, \"\n \"and the last, sound, decides the file type. \"\n \"This option is repeatable, so you can input any number of features \"\n \"for your task. Supported file types are as follows:\\n\\n\"\n )\n for key, dic in DATA_TYPES.items():\n _data_path_and_name_and_type_help += f'\"{key}\":\\n{dic[\"help\"]}\\n\\n'\n\n group.add_argument(\n \"--train_data_path_and_name_and_type\",\n type=str2triple_str,\n action=\"append\",\n default=[],\n help=_data_path_and_name_and_type_help,\n )\n group.add_argument(\n \"--valid_data_path_and_name_and_type\",\n type=str2triple_str,\n action=\"append\",\n default=[],\n )\n group.add_argument(\n \"--allow_variable_data_keys\",\n type=str2bool,\n default=False,\n help=\"Allow the arbitrary keys for mini-batch with ignoring \"\n \"the task requirements\",\n )\n group.add_argument(\n \"--max_cache_size\",\n type=humanfriendly.parse_size,\n default=0.0,\n help=\"The maximum cache size for data loader. e.g. 10MB, 20GB.\",\n )\n group.add_argument(\n \"--max_cache_fd\",\n type=int,\n default=32,\n help=\"The maximum number of file descriptors to be kept \"\n \"as opened for ark files. \"\n \"This feature is only valid when data type is 'kaldi_ark'.\",\n )\n group.add_argument(\n \"--valid_max_cache_size\",\n type=humanfriendly_parse_size_or_none,\n default=None,\n help=\"The maximum cache size for validation data loader. e.g. 10MB, 20GB. \"\n \"If None, the 5 percent size of --max_cache_size\",\n )\n\n group = parser.add_argument_group(\"Optimizer related\")\n for i in range(1, cls.num_optimizers + 1):\n suf = \"\" if i == 1 else str(i)\n group.add_argument(\n f\"--optim{suf}\",\n type=lambda x: x.lower(),\n default=\"adadelta\",\n choices=list(optim_classes),\n help=\"The optimizer type\",\n )\n group.add_argument(\n f\"--optim{suf}_conf\",\n action=NestedDictAction,\n default=dict(),\n help=\"The keyword arguments for optimizer\",\n )\n group.add_argument(\n f\"--scheduler{suf}\",\n type=lambda x: str_or_none(x.lower()),\n default=None,\n choices=list(scheduler_classes) + [None],\n help=\"The lr scheduler type\",\n )\n group.add_argument(\n f\"--scheduler{suf}_conf\",\n action=NestedDictAction,\n default=dict(),\n help=\"The keyword arguments for lr scheduler\",\n )\n\n cls.trainer.add_arguments(parser)\n cls.add_task_arguments(parser)\n\n assert check_return_type(parser)\n return parser\n\n @classmethod\n def build_optimizers(\n cls,\n args: argparse.Namespace,\n model: torch.nn.Module,\n ) -> List[torch.optim.Optimizer]:\n if cls.num_optimizers != 1:\n raise RuntimeError(\n \"build_optimizers() must be overridden if num_optimizers != 1\"\n )\n\n optim_class = optim_classes.get(args.optim)\n if optim_class is None:\n raise ValueError(f\"must be one of {list(optim_classes)}: {args.optim}\")\n if args.sharded_ddp:\n if fairscale is None:\n raise RuntimeError(\"Requiring fairscale. Do 'pip install fairscale'\")\n optim = fairscale.optim.oss.OSS(\n params=model.parameters(), optim=optim_class, **args.optim_conf\n )\n else:\n optim = optim_class(model.parameters(), **args.optim_conf)\n\n optimizers = [optim]\n\n return optimizers\n\n @classmethod\n def exclude_opts(cls) -> Tuple[str, ...]:\n \"\"\"The options not to be shown by --print_config\"\"\"\n return \"required\", \"print_config\", \"config\", \"ngpu\"\n\n @classmethod\n def get_default_config(cls) -> Dict[str, Any]:\n \"\"\"Return the configuration as dict.\n This method is used by print_config()\n \"\"\"\n\n def get_class_type(name: str, classes: dict):\n _cls = classes.get(name)\n if _cls is None:\n raise ValueError(f\"must be one of {list(classes)}: {name}\")\n return _cls\n\n # This method is used only for --print_config\n assert check_argument_types()\n parser = cls.get_parser()\n args, _ = parser.parse_known_args()\n config = vars(args)\n # Excludes the options not to be shown\n for k in AbsTask.exclude_opts():\n config.pop(k)\n\n for i in range(1, cls.num_optimizers + 1):\n suf = \"\" if i == 1 else str(i)\n name = config[f\"optim{suf}\"]\n optim_class = get_class_type(name, optim_classes)\n conf = get_default_kwargs(optim_class)\n # Overwrite the default by the arguments,\n conf.update(config[f\"optim{suf}_conf\"])\n # and set it again\n config[f\"optim{suf}_conf\"] = conf\n\n name = config[f\"scheduler{suf}\"]\n if name is not None:\n scheduler_class = get_class_type(name, scheduler_classes)\n conf = get_default_kwargs(scheduler_class)\n # Overwrite the default by the arguments,\n conf.update(config[f\"scheduler{suf}_conf\"])\n # and set it again\n config[f\"scheduler{suf}_conf\"] = conf\n\n for class_choices in cls.class_choices_list:\n if getattr(args, class_choices.name) is not None:\n class_obj = class_choices.get_class(getattr(args, class_choices.name))\n conf = get_default_kwargs(class_obj)\n name = class_choices.name\n # Overwrite the default by the arguments,\n conf.update(config[f\"{name}_conf\"])\n # and set it again\n config[f\"{name}_conf\"] = conf\n return config\n\n @classmethod\n def check_required_command_args(cls, args: argparse.Namespace):\n assert check_argument_types()\n for k in vars(args):\n if \"-\" in k:\n raise RuntimeError(f'Use \"_\" instead of \"-\": parser.get_parser(\"{k}\")')\n\n required = \", \".join(\n f\"--{a}\" for a in args.required if getattr(args, a) is None\n )\n\n if len(required) != 0:\n parser = cls.get_parser()\n parser.print_help(file=sys.stderr)\n p = Path(sys.argv[0]).name\n print(file=sys.stderr)\n print(\n f\"{p}: error: the following arguments are required: \" f\"{required}\",\n file=sys.stderr,\n )\n sys.exit(2)\n\n @classmethod\n def check_task_requirements(\n cls,\n dataset: Union[AbsDataset, IterableMuskitDataset],\n allow_variable_data_keys: bool,\n train: bool,\n inference: bool = False,\n ) -> None:\n \"\"\"Check if the dataset satisfy the requirement of current Task\"\"\"\n assert check_argument_types()\n mes = (\n f\"If you intend to use an additional input, modify \"\n f'\"{cls.__name__}.required_data_names()\" or '\n f'\"{cls.__name__}.optional_data_names()\". '\n f\"Otherwise you need to set --allow_variable_data_keys true \"\n )\n\n for k in cls.required_data_names(train, inference):\n if not dataset.has_name(k):\n raise RuntimeError(\n f'\"{cls.required_data_names(train, inference)}\" are required for'\n f' {cls.__name__}. but \"{dataset.names()}\" are input.\\n{mes}'\n )\n if not allow_variable_data_keys:\n task_keys = cls.required_data_names(\n train, inference\n ) + cls.optional_data_names(train, inference)\n for k in dataset.names():\n if k not in task_keys:\n raise RuntimeError(\n f\"The data-name must be one of {task_keys} \"\n f'for {cls.__name__}: \"{k}\" is not allowed.\\n{mes}'\n )\n\n @classmethod\n def print_config(cls, file=sys.stdout) -> None:\n assert check_argument_types()\n # Shows the config: e.g. python train.py asr --print_config\n config = cls.get_default_config()\n file.write(yaml_no_alias_safe_dump(config, indent=4, sort_keys=False))\n\n @classmethod\n def main(cls, args: argparse.Namespace = None, cmd: Sequence[str] = None):\n assert check_argument_types()\n print(get_commandline_args(), file=sys.stderr)\n if args is None:\n parser = cls.get_parser()\n args = parser.parse_args(cmd)\n # args.version = __version__\n if args.pretrain_path is not None:\n raise RuntimeError(\"--pretrain_path is deprecated. Use --init_param\")\n if args.print_config:\n cls.print_config()\n sys.exit(0)\n cls.check_required_command_args(args)\n\n # \"distributed\" is decided using the other command args\n resolve_distributed_mode(args)\n if not args.distributed or not args.multiprocessing_distributed:\n cls.main_worker(args)\n\n else:\n assert args.ngpu > 1, args.ngpu\n # Multi-processing distributed mode: e.g. 2node-4process-4GPU\n # | Host1 | Host2 |\n # | Process1 | Process2 | <= Spawn processes\n # |Child1|Child2|Child1|Child2|\n # |GPU1 |GPU2 |GPU1 |GPU2 |\n\n # See also the following usage of --multiprocessing-distributed:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n num_nodes = get_num_nodes(args.dist_world_size, args.dist_launcher)\n if num_nodes == 1:\n args.dist_master_addr = \"localhost\"\n args.dist_rank = 0\n # Single node distributed training with multi-GPUs\n if (\n args.dist_init_method == \"env://\"\n and get_master_port(args.dist_master_port) is None\n ):\n # Get the unused port\n args.dist_master_port = free_port()\n\n # Assume that nodes use same number of GPUs each other\n args.dist_world_size = args.ngpu * num_nodes\n node_rank = get_node_rank(args.dist_rank, args.dist_launcher)\n\n # The following block is copied from:\n # https://github.com/pytorch/pytorch/blob/master/torch/multiprocessing/spawn.py\n error_queues = []\n processes = []\n mp = torch.multiprocessing.get_context(\"spawn\")\n for i in range(args.ngpu):\n # Copy args\n local_args = argparse.Namespace(**vars(args))\n\n local_args.local_rank = i\n local_args.dist_rank = args.ngpu * node_rank + i\n local_args.ngpu = 1\n\n process = mp.Process(\n target=cls.main_worker,\n args=(local_args,),\n daemon=False,\n )\n process.start()\n processes.append(process)\n error_queues.append(mp.SimpleQueue())\n # Loop on join until it returns True or raises an exception.\n while not ProcessContext(processes, error_queues).join():\n pass\n\n @classmethod\n def main_worker(cls, args: argparse.Namespace):\n assert check_argument_types()\n\n # 0. Init distributed process\n distributed_option = build_dataclass(DistributedOption, args)\n # Setting distributed_option.dist_rank, etc.\n distributed_option.init_options()\n\n # NOTE(kamo): Don't use logging before invoking logging.basicConfig()\n if not distributed_option.distributed or distributed_option.dist_rank == 0:\n if not distributed_option.distributed:\n _rank = \"\"\n else:\n _rank = (\n f\":{distributed_option.dist_rank}/\"\n f\"{distributed_option.dist_world_size}\"\n )\n\n # NOTE(kamo):\n # logging.basicConfig() is invoked in main_worker() instead of main()\n # because it can be invoked only once in a process.\n # FIXME(kamo): Should we use logging.getLogger()?\n logging.basicConfig(\n level=args.log_level,\n format=f\"[{os.uname()[1].split('.')[0]}{_rank}]\"\n f\" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n else:\n # Suppress logging if RANK != 0\n logging.basicConfig(\n level=\"ERROR\",\n format=f\"[{os.uname()[1].split('.')[0]}\"\n f\":{distributed_option.dist_rank}/{distributed_option.dist_world_size}]\"\n f\" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n )\n # Invoking torch.distributed.init_process_group\n distributed_option.init_torch_distributed()\n\n # 1. Set random-seed\n set_all_random_seed(args.seed)\n torch.backends.cudnn.enabled = args.cudnn_enabled\n torch.backends.cudnn.benchmark = args.cudnn_benchmark\n torch.backends.cudnn.deterministic = args.cudnn_deterministic\n if args.detect_anomaly:\n logging.info(\"Invoking torch.autograd.set_detect_anomaly(True)\")\n torch.autograd.set_detect_anomaly(args.detect_anomaly)\n\n # 2. Build model\n model = cls.build_model(args=args)\n if not isinstance(model, AbsMuskitModel):\n raise RuntimeError(\n f\"model must inherit {AbsMuskitModel.__name__}, but got {type(model)}\"\n )\n if args.ngpu == 1 and torch.cuda.is_available():\n torch.cuda.set_device(args.gpu_id)\n logging.info(f\"GPU {args.gpu_id} is used\")\n model = model.to(\n dtype=getattr(torch, args.train_dtype),\n device=\"cuda\" if args.ngpu > 0 else \"cpu\",\n )\n for t in args.freeze_param:\n for k, p in model.named_parameters():\n if k.startswith(t + \".\") or k == t:\n logging.info(f\"Setting {k}.requires_grad = False\")\n p.requires_grad = False\n\n # 3. Build optimizer\n optimizers = cls.build_optimizers(args, model=model)\n\n # 4. Build schedulers\n schedulers = []\n for i, optim in enumerate(optimizers, 1):\n suf = \"\" if i == 1 else str(i)\n name = getattr(args, f\"scheduler{suf}\")\n conf = getattr(args, f\"scheduler{suf}_conf\")\n if name is not None:\n cls_ = scheduler_classes.get(name)\n if cls_ is None:\n raise ValueError(\n f\"must be one of {list(scheduler_classes)}: {name}\"\n )\n scheduler = cls_(optim, **conf)\n else:\n scheduler = None\n\n schedulers.append(scheduler)\n\n logging.info(pytorch_cudnn_version())\n logging.info(model_summary(model))\n for i, (o, s) in enumerate(zip(optimizers, schedulers), 1):\n suf = \"\" if i == 1 else str(i)\n logging.info(f\"Optimizer{suf}:\\n{o}\")\n logging.info(f\"Scheduler{suf}: {s}\")\n\n # 5. Dump \"args\" to config.yaml\n # NOTE(kamo): \"args\" should be saved after object-buildings are done\n # because they are allowed to modify \"args\".\n output_dir = Path(args.output_dir)\n if not distributed_option.distributed or distributed_option.dist_rank == 0:\n output_dir.mkdir(parents=True, exist_ok=True)\n with (output_dir / \"config.yaml\").open(\"w\", encoding=\"utf-8\") as f:\n logging.info(\n f'Saving the configuration in {output_dir / \"config.yaml\"}'\n )\n yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)\n\n # 6. Loads pre-trained model\n for p in args.init_param:\n logging.info(f\"Loading pretrained params from {p}\")\n load_pretrained_model(\n model=model,\n init_param=p,\n ignore_init_mismatch=args.ignore_init_mismatch,\n # NOTE(kamo): \"cuda\" for torch.load always indicates cuda:0\n # in PyTorch<=1.4\n map_location=f\"cuda:{torch.cuda.current_device()}\"\n if args.ngpu > 0\n else \"cpu\",\n )\n\n if args.dry_run:\n pass\n elif args.collect_stats:\n # Perform on collect_stats mode. This mode has two roles\n # - Derive the length and dimension of all input data\n # - Accumulate feats, square values, and the length for whitening\n logging.info(args)\n\n if args.valid_batch_size is None:\n args.valid_batch_size = args.batch_size\n\n if len(args.train_shape_file) != 0:\n train_key_file = args.train_shape_file[0]\n else:\n train_key_file = None\n if len(args.valid_shape_file) != 0:\n valid_key_file = args.valid_shape_file[0]\n else:\n valid_key_file = None\n\n collect_stats(\n model=model,\n train_iter=cls.build_streaming_iterator(\n data_path_and_name_and_type=args.train_data_path_and_name_and_type,\n key_file=train_key_file,\n batch_size=args.batch_size,\n dtype=args.train_dtype,\n num_workers=args.num_workers,\n allow_variable_data_keys=args.allow_variable_data_keys,\n ngpu=args.ngpu,\n preprocess_fn=cls.build_preprocess_fn(args, train=False),\n collate_fn=cls.build_collate_fn(args, train=False),\n ),\n valid_iter=cls.build_streaming_iterator(\n data_path_and_name_and_type=args.valid_data_path_and_name_and_type,\n key_file=valid_key_file,\n batch_size=args.valid_batch_size,\n dtype=args.train_dtype,\n num_workers=args.num_workers,\n allow_variable_data_keys=args.allow_variable_data_keys,\n ngpu=args.ngpu,\n preprocess_fn=cls.build_preprocess_fn(args, train=False),\n collate_fn=cls.build_collate_fn(args, train=False),\n ),\n output_dir=output_dir,\n ngpu=args.ngpu,\n log_interval=args.log_interval,\n write_collected_feats=args.write_collected_feats,\n )\n else:\n # 7. Build iterator factories\n if args.multiple_iterator:\n train_iter_factory = cls.build_multiple_iter_factory(\n args=args,\n distributed_option=distributed_option,\n mode=\"train\",\n )\n else:\n train_iter_factory = cls.build_iter_factory(\n args=args,\n distributed_option=distributed_option,\n mode=\"train\",\n )\n valid_iter_factory = cls.build_iter_factory(\n args=args,\n distributed_option=distributed_option,\n mode=\"valid\",\n )\n if args.num_att_plot != 0:\n plot_attention_iter_factory = cls.build_iter_factory(\n args=args,\n distributed_option=distributed_option,\n mode=\"plot_att\",\n )\n else:\n plot_attention_iter_factory = None\n\n # 8. Start training\n if args.use_wandb:\n if (\n not distributed_option.distributed\n or distributed_option.dist_rank == 0\n ):\n if args.wandb_project is None:\n project = (\n \"Muskit_\"\n + cls.__name__\n + str(Path(\".\").resolve()).replace(\"/\", \"_\")\n )\n else:\n project = args.wandb_project\n if args.wandb_id is None:\n wandb_id = str(output_dir).replace(\"/\", \"_\")\n else:\n wandb_id = args.wandb_id\n\n wandb.init(\n project=project,\n dir=output_dir,\n id=wandb_id,\n resume=\"allow\",\n )\n wandb.config.update(args)\n else:\n # wandb also supports grouping for distributed training,\n # but we only logs aggregated data,\n # so it's enough to perform on rank0 node.\n args.use_wandb = False\n\n # Don't give args to trainer.run() directly!!!\n # Instead of it, define \"Options\" object and build here.\n\n trainer_options = cls.trainer.build_options(args)\n cls.trainer.run(\n model=model,\n optimizers=optimizers,\n schedulers=schedulers,\n train_iter_factory=train_iter_factory,\n valid_iter_factory=valid_iter_factory,\n plot_attention_iter_factory=plot_attention_iter_factory,\n trainer_options=trainer_options,\n distributed_option=distributed_option,\n )\n\n @classmethod\n def build_iter_options(\n cls,\n args: argparse.Namespace,\n distributed_option: DistributedOption,\n mode: str,\n ):\n if mode == \"train\":\n preprocess_fn = cls.build_preprocess_fn(args, train=True)\n collate_fn = cls.build_collate_fn(args, train=True)\n data_path_and_name_and_type = args.train_data_path_and_name_and_type\n shape_files = args.train_shape_file\n batch_size = args.batch_size\n batch_bins = args.batch_bins\n batch_type = args.batch_type\n max_cache_size = args.max_cache_size\n max_cache_fd = args.max_cache_fd\n distributed = distributed_option.distributed\n num_batches = None\n num_iters_per_epoch = args.num_iters_per_epoch\n train = True\n\n elif mode == \"valid\":\n preprocess_fn = cls.build_preprocess_fn(args, train=False)\n collate_fn = cls.build_collate_fn(args, train=False)\n data_path_and_name_and_type = args.valid_data_path_and_name_and_type\n shape_files = args.valid_shape_file\n\n if args.valid_batch_type is None:\n batch_type = args.batch_type\n else:\n batch_type = args.valid_batch_type\n if args.valid_batch_size is None:\n batch_size = args.batch_size\n else:\n batch_size = args.valid_batch_size\n if args.valid_batch_bins is None:\n batch_bins = args.batch_bins\n else:\n batch_bins = args.valid_batch_bins\n if args.valid_max_cache_size is None:\n # Cache 5% of maximum size for validation loader\n max_cache_size = 0.05 * args.max_cache_size\n else:\n max_cache_size = args.valid_max_cache_size\n max_cache_fd = args.max_cache_fd\n distributed = distributed_option.distributed\n num_batches = None\n num_iters_per_epoch = None\n train = False\n\n elif mode == \"plot_att\":\n preprocess_fn = cls.build_preprocess_fn(args, train=False)\n collate_fn = cls.build_collate_fn(args, train=False)\n data_path_and_name_and_type = args.valid_data_path_and_name_and_type\n shape_files = args.valid_shape_file\n batch_type = \"unsorted\"\n batch_size = 1\n batch_bins = 0\n num_batches = args.num_att_plot\n max_cache_fd = args.max_cache_fd\n # num_att_plot should be a few sample ~ 3, so cache all data.\n max_cache_size = np.inf if args.max_cache_size != 0.0 else 0.0\n # always False because plot_attention performs on RANK0\n distributed = False\n num_iters_per_epoch = None\n train = False\n else:\n raise NotImplementedError(f\"mode={mode}\")\n\n return IteratorOptions(\n preprocess_fn=preprocess_fn,\n collate_fn=collate_fn,\n data_path_and_name_and_type=data_path_and_name_and_type,\n shape_files=shape_files,\n batch_type=batch_type,\n batch_size=batch_size,\n batch_bins=batch_bins,\n num_batches=num_batches,\n max_cache_size=max_cache_size,\n max_cache_fd=max_cache_fd,\n distributed=distributed,\n num_iters_per_epoch=num_iters_per_epoch,\n train=train,\n )\n\n @classmethod\n def build_iter_factory(\n cls,\n args: argparse.Namespace,\n distributed_option: DistributedOption,\n mode: str,\n kwargs: dict = None,\n ) -> AbsIterFactory:\n \"\"\"Build a factory object of mini-batch iterator.\n This object is invoked at every epochs to build the iterator for each epoch\n as following:\n >>> iter_factory = cls.build_iter_factory(...)\n >>> for epoch in range(1, max_epoch):\n ... for keys, batch in iter_fatory.build_iter(epoch):\n ... model(**batch)\n The mini-batches for each epochs are fully controlled by this class.\n Note that the random seed used for shuffling is decided as \"seed + epoch\" and\n the generated mini-batches can be reproduces when resuming.\n Note that the definition of \"epoch\" doesn't always indicate\n to run out of the whole training corpus.\n \"--num_iters_per_epoch\" option restricts the number of iterations for each epoch\n and the rest of samples for the originally epoch are left for the next epoch.\n e.g. If The number of mini-batches equals to 4, the following two are same:\n - 1 epoch without \"--num_iters_per_epoch\"\n - 4 epoch with \"--num_iters_per_epoch\" == 4\n \"\"\"\n assert check_argument_types()\n iter_options = cls.build_iter_options(args, distributed_option, mode)\n # Overwrite iter_options if any kwargs is given\n if kwargs is not None:\n for k, v in kwargs.items():\n setattr(iter_options, k, v)\n\n if args.iterator_type == \"sequence\":\n return cls.build_sequence_iter_factory(\n args=args,\n iter_options=iter_options,\n mode=mode,\n )\n elif args.iterator_type == \"chunk\":\n return cls.build_chunk_iter_factory(\n args=args,\n iter_options=iter_options,\n mode=mode,\n )\n elif args.iterator_type == \"task\":\n return cls.build_task_iter_factory(\n args=args,\n iter_options=iter_options,\n mode=mode,\n )\n else:\n raise RuntimeError(f\"Not supported: iterator_type={args.iterator_type}\")\n\n @classmethod\n def build_sequence_iter_factory(\n cls, args: argparse.Namespace, iter_options: IteratorOptions, mode: str\n ) -> AbsIterFactory:\n assert check_argument_types()\n\n midi_loader_mode = (\n \"xiaoice\" if \"xiaoice\" in args.config else \"format\"\n ) # NOTE(Shuai) format, xiaoice (tempo means index_nums)\n time_shift = (\n args.feats_extract_conf[\"hop_length\"] / args.feats_extract_conf[\"fs\"]\n )\n\n dataset = MuskitDataset(\n iter_options.data_path_and_name_and_type,\n float_dtype=args.train_dtype,\n preprocess=iter_options.preprocess_fn,\n max_cache_size=iter_options.max_cache_size,\n max_cache_fd=iter_options.max_cache_fd,\n mode=mode,\n midi_loader_mode=midi_loader_mode,\n time_shift=time_shift,\n pitch_aug_min=args.pitch_aug_min,\n pitch_aug_max=args.pitch_aug_max,\n pitch_mean=args.pitch_mean,\n time_aug_min=args.time_aug_min,\n time_aug_max=args.time_aug_max,\n random_crop=args.random_crop,\n mask_aug=args.mask_aug,\n )\n cls.check_task_requirements(\n dataset, args.allow_variable_data_keys, train=iter_options.train\n )\n\n if Path(\n Path(iter_options.data_path_and_name_and_type[0][0]).parent, \"utt2category\"\n ).exists():\n utt2category_file = str(\n Path(\n Path(iter_options.data_path_and_name_and_type[0][0]).parent,\n \"utt2category\",\n )\n )\n else:\n utt2category_file = None\n batch_sampler = build_batch_sampler(\n type=iter_options.batch_type,\n shape_files=iter_options.shape_files,\n fold_lengths=args.fold_length,\n batch_size=iter_options.batch_size,\n batch_bins=iter_options.batch_bins,\n sort_in_batch=args.sort_in_batch,\n sort_batch=args.sort_batch,\n drop_last=False,\n min_batch_size=torch.distributed.get_world_size()\n if iter_options.distributed\n else 1,\n utt2category_file=utt2category_file,\n )\n\n batches = list(batch_sampler)\n if iter_options.num_batches is not None:\n batches = batches[: iter_options.num_batches]\n\n bs_list = [len(batch) for batch in batches]\n\n logging.info(f\"[{mode}] dataset:\\n{dataset}\")\n logging.info(f\"[{mode}] Batch sampler: {batch_sampler}\")\n logging.info(\n f\"[{mode}] mini-batch sizes summary: N-batch={len(bs_list)}, \"\n f\"mean={np.mean(bs_list):.1f}, min={np.min(bs_list)}, max={np.max(bs_list)}\"\n )\n\n if iter_options.distributed:\n world_size = torch.distributed.get_world_size()\n rank = torch.distributed.get_rank()\n for batch in batches:\n if len(batch) < world_size:\n raise RuntimeError(\n f\"The batch-size must be equal or more than world_size: \"\n f\"{len(batch)} < {world_size}\"\n )\n batches = [batch[rank::world_size] for batch in batches]\n\n return SequenceIterFactory(\n dataset=dataset,\n batches=batches,\n seed=args.seed,\n num_iters_per_epoch=iter_options.num_iters_per_epoch,\n shuffle=iter_options.train,\n num_workers=args.num_workers,\n collate_fn=iter_options.collate_fn,\n pin_memory=args.ngpu > 0,\n )\n\n @classmethod\n def build_chunk_iter_factory(\n cls,\n args: argparse.Namespace,\n iter_options: IteratorOptions,\n mode: str,\n ) -> AbsIterFactory:\n assert check_argument_types()\n\n dataset = MuskitDataset(\n iter_options.data_path_and_name_and_type,\n float_dtype=args.train_dtype,\n preprocess=iter_options.preprocess_fn,\n max_cache_size=iter_options.max_cache_size,\n max_cache_fd=iter_options.max_cache_fd,\n )\n cls.check_task_requirements(\n dataset, args.allow_variable_data_keys, train=iter_options.train\n )\n\n if len(iter_options.shape_files) == 0:\n key_file = iter_options.data_path_and_name_and_type[0][0]\n else:\n key_file = iter_options.shape_files[0]\n\n batch_sampler = UnsortedBatchSampler(batch_size=1, key_file=key_file)\n batches = list(batch_sampler)\n if iter_options.num_batches is not None:\n batches = batches[: iter_options.num_batches]\n logging.info(f\"[{mode}] dataset:\\n{dataset}\")\n\n if iter_options.distributed:\n world_size = torch.distributed.get_world_size()\n rank = torch.distributed.get_rank()\n if len(batches) < world_size:\n raise RuntimeError(\"Number of samples is smaller than world_size\")\n if iter_options.batch_size < world_size:\n raise RuntimeError(\"batch_size must be equal or more than world_size\")\n\n if rank < iter_options.batch_size % world_size:\n batch_size = iter_options.batch_size // world_size + 1\n else:\n batch_size = iter_options.batch_size // world_size\n num_cache_chunks = args.num_cache_chunks // world_size\n # NOTE(kamo): Split whole corpus by sample numbers without considering\n # each of the lengths, therefore the number of iteration counts are not\n # always equal to each other and the iterations are limitted\n # by the fewest iterations.\n # i.e. the samples over the counts are discarded.\n batches = batches[rank::world_size]\n else:\n batch_size = iter_options.batch_size\n num_cache_chunks = args.num_cache_chunks\n\n return ChunkIterFactory(\n dataset=dataset,\n batches=batches,\n seed=args.seed,\n batch_size=batch_size,\n # For chunk iterator,\n # --num_iters_per_epoch doesn't indicate the number of iterations,\n # but indicates the number of samples.\n num_samples_per_epoch=iter_options.num_iters_per_epoch,\n shuffle=iter_options.train,\n num_workers=args.num_workers,\n collate_fn=iter_options.collate_fn,\n pin_memory=args.ngpu > 0,\n chunk_length=args.chunk_length,\n chunk_shift_ratio=args.chunk_shift_ratio,\n num_cache_chunks=num_cache_chunks,\n )\n\n # NOTE(kamo): Not abstract class\n @classmethod\n def build_task_iter_factory(\n cls,\n args: argparse.Namespace,\n iter_options: IteratorOptions,\n mode: str,\n ) -> AbsIterFactory:\n \"\"\"Build task specific iterator factory\n Example:\n >>> class YourTask(AbsTask):\n ... @classmethod\n ... def add_task_arguments(cls, parser: argparse.ArgumentParser):\n ... parser.set_defaults(iterator_type=\"task\")\n ...\n ... @classmethod\n ... def build_task_iter_factory(\n ... cls,\n ... args: argparse.Namespace,\n ... iter_options: IteratorOptions,\n ... mode: str,\n ... ):\n ... return FooIterFactory(...)\n ...\n ... @classmethod\n ... def build_iter_options(\n .... args: argparse.Namespace,\n ... distributed_option: DistributedOption,\n ... mode: str\n ... ):\n ... # if you need to customize options object\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def build_multiple_iter_factory(\n cls, args: argparse.Namespace, distributed_option: DistributedOption, mode: str\n ):\n assert check_argument_types()\n iter_options = cls.build_iter_options(args, distributed_option, mode)\n assert len(iter_options.data_path_and_name_and_type) > 0, len(\n iter_options.data_path_and_name_and_type\n )\n\n # 1. Sanity check\n num_splits = None\n for path in [\n path for path, _, _ in iter_options.data_path_and_name_and_type\n ] + list(iter_options.shape_files):\n if not Path(path).is_dir():\n raise RuntimeError(f\"{path} is not a directory\")\n p = Path(path) / \"num_splits\"\n if not p.exists():\n raise FileNotFoundError(f\"{p} is not found\")\n with p.open() as f:\n _num_splits = int(f.read())\n if num_splits is not None and num_splits != _num_splits:\n raise RuntimeError(\n f\"Number of splits are mismathed: \"\n f\"{iter_options.data_path_and_name_and_type[0][0]} and {path}\"\n )\n num_splits = _num_splits\n\n for i in range(num_splits):\n p = Path(path) / f\"split.{i}\"\n if not p.exists():\n raise FileNotFoundError(f\"{p} is not found\")\n\n # 2. Create functions to build an iter factory for each splits\n data_path_and_name_and_type_list = [\n [\n (str(Path(p) / f\"split.{i}\"), n, t)\n for p, n, t in iter_options.data_path_and_name_and_type\n ]\n for i in range(num_splits)\n ]\n shape_files_list = [\n [str(Path(s) / f\"split.{i}\") for s in iter_options.shape_files]\n for i in range(num_splits)\n ]\n num_iters_per_epoch_list = [\n (iter_options.num_iters_per_epoch + i) // num_splits\n if iter_options.num_iters_per_epoch is not None\n else None\n for i in range(num_splits)\n ]\n max_cache_size = iter_options.max_cache_size / num_splits\n\n # Note that iter-factories are built for each epoch at runtime lazily.\n build_funcs = [\n functools.partial(\n cls.build_iter_factory,\n args,\n distributed_option,\n mode,\n kwargs=dict(\n data_path_and_name_and_type=_data_path_and_name_and_type,\n shape_files=_shape_files,\n num_iters_per_epoch=_num_iters_per_epoch,\n max_cache_size=max_cache_size,\n ),\n )\n for (\n _data_path_and_name_and_type,\n _shape_files,\n _num_iters_per_epoch,\n ) in zip(\n data_path_and_name_and_type_list,\n shape_files_list,\n num_iters_per_epoch_list,\n )\n ]\n\n # 3. Build MultipleIterFactory\n return MultipleIterFactory(\n build_funcs=build_funcs, shuffle=iter_options.train, seed=args.seed\n )\n\n @classmethod\n def build_streaming_iterator(\n cls,\n data_path_and_name_and_type,\n preprocess_fn,\n collate_fn,\n key_file: str = None,\n batch_size: int = 1,\n dtype: str = np.float32,\n num_workers: int = 1,\n allow_variable_data_keys: bool = False,\n ngpu: int = 0,\n inference: bool = False,\n ) -> DataLoader:\n \"\"\"Build DataLoader using iterable dataset\"\"\"\n assert check_argument_types()\n # For backward compatibility for pytorch DataLoader\n if collate_fn is not None:\n kwargs = dict(collate_fn=collate_fn)\n else:\n kwargs = {}\n\n # IterableDataset is supported from pytorch=1.2\n if LooseVersion(torch.__version__) >= LooseVersion(\"1.2\"):\n dataset = IterableMuskitDataset(\n data_path_and_name_and_type,\n float_dtype=dtype,\n preprocess=preprocess_fn,\n key_file=key_file,\n )\n if dataset.apply_utt2category:\n kwargs.update(batch_size=1)\n else:\n kwargs.update(batch_size=batch_size)\n else:\n dataset = MuskitDataset(\n data_path_and_name_and_type,\n float_dtype=dtype,\n preprocess=preprocess_fn,\n )\n if key_file is None:\n key_file = data_path_and_name_and_type[0][0]\n batch_sampler = UnsortedBatchSampler(\n batch_size=batch_size,\n key_file=key_file,\n drop_last=False,\n )\n kwargs.update(batch_sampler=batch_sampler)\n\n cls.check_task_requirements(\n dataset, allow_variable_data_keys, train=False, inference=inference\n )\n\n return DataLoader(\n dataset=dataset,\n pin_memory=ngpu > 0,\n num_workers=num_workers,\n **kwargs,\n )\n\n # ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~\n @classmethod\n def build_model_from_file(\n cls,\n config_file: Union[Path, str],\n model_file: Union[Path, str] = None,\n device: str = \"cpu\",\n ) -> Tuple[AbsMuskitModel, argparse.Namespace]:\n \"\"\"This method is used for inference or fine-tuning.\n Args:\n config_file: The yaml file saved when training.\n model_file: The model file saved when training.\n device:\n \"\"\"\n assert check_argument_types()\n config_file = Path(config_file)\n\n with config_file.open(\"r\", encoding=\"utf-8\") as f:\n args = yaml.safe_load(f)\n args = argparse.Namespace(**args)\n model = cls.build_model(args)\n if not isinstance(model, AbsMuskitModel):\n raise RuntimeError(\n f\"model must inherit {AbsMuskitModel.__name__}, but got {type(model)}\"\n )\n model.to(device)\n if model_file is not None:\n logging.info(f\"Load model state dict from: {model_file}\")\n if device == \"cuda\":\n # NOTE(kamo): \"cuda\" for torch.load always indicates cuda:0\n # in PyTorch<=1.4\n device = f\"cuda:{torch.cuda.current_device()}\"\n model.load_state_dict(torch.load(model_file, map_location=device))\n\n return model, args\n",
"# Copyright 2020 Nagoya University (Tomoki Hayashi)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Style encoder of GST-Tacotron.\"\"\"\n\nfrom typeguard import check_argument_types\nfrom typing import Sequence\n\nimport torch\n\nfrom muskit.layers.transformer.attention import (\n MultiHeadedAttention as BaseMultiHeadedAttention, # NOQA\n)\n\n\nclass StyleEncoder(torch.nn.Module):\n \"\"\"Style encoder.\n This module is style encoder introduced in `Style Tokens: Unsupervised Style\n Modeling, Control and Transfer in End-to-End Speech Synthesis`.\n .. _`Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End\n Speech Synthesis`: https://arxiv.org/abs/1803.09017\n Args:\n idim (int, optional): Dimension of the input mel-spectrogram.\n gst_tokens (int, optional): The number of GST embeddings.\n gst_token_dim (int, optional): Dimension of each GST embedding.\n gst_heads (int, optional): The number of heads in GST multihead attention.\n conv_layers (int, optional): The number of conv layers in the reference encoder.\n conv_chans_list: (Sequence[int], optional):\n List of the number of channels of conv layers in the referece encoder.\n conv_kernel_size (int, optional):\n Kernel size of conv layers in the reference encoder.\n conv_stride (int, optional):\n Stride size of conv layers in the reference encoder.\n gru_layers (int, optional): The number of GRU layers in the reference encoder.\n gru_units (int, optional): The number of GRU units in the reference encoder.\n Todo:\n * Support manual weight specification in inference.\n \"\"\"\n\n def __init__(\n self,\n idim: int = 80,\n gst_tokens: int = 10,\n gst_token_dim: int = 256,\n gst_heads: int = 4,\n conv_layers: int = 6,\n conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),\n conv_kernel_size: int = 3,\n conv_stride: int = 2,\n gru_layers: int = 1,\n gru_units: int = 128,\n ):\n \"\"\"Initilize global style encoder module.\"\"\"\n assert check_argument_types()\n super(StyleEncoder, self).__init__()\n\n self.ref_enc = ReferenceEncoder(\n idim=idim,\n conv_layers=conv_layers,\n conv_chans_list=conv_chans_list,\n conv_kernel_size=conv_kernel_size,\n conv_stride=conv_stride,\n gru_layers=gru_layers,\n gru_units=gru_units,\n )\n self.stl = StyleTokenLayer(\n ref_embed_dim=gru_units,\n gst_tokens=gst_tokens,\n gst_token_dim=gst_token_dim,\n gst_heads=gst_heads,\n )\n\n def forward(self, speech: torch.Tensor) -> torch.Tensor:\n \"\"\"Calculate forward propagation.\n Args:\n speech (Tensor): Batch of padded target features (B, Lmax, odim).\n Returns:\n Tensor: Style token embeddings (B, token_dim).\n \"\"\"\n ref_embs = self.ref_enc(speech)\n style_embs = self.stl(ref_embs)\n\n return style_embs\n\n\nclass ReferenceEncoder(torch.nn.Module):\n \"\"\"Reference encoder module.\n This module is reference encoder introduced in `Style Tokens: Unsupervised Style\n Modeling, Control and Transfer in End-to-End Speech Synthesis`.\n .. _`Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End\n Speech Synthesis`: https://arxiv.org/abs/1803.09017\n Args:\n idim (int, optional): Dimension of the input mel-spectrogram.\n conv_layers (int, optional): The number of conv layers in the reference encoder.\n conv_chans_list: (Sequence[int], optional):\n List of the number of channels of conv layers in the referece encoder.\n conv_kernel_size (int, optional):\n Kernel size of conv layers in the reference encoder.\n conv_stride (int, optional):\n Stride size of conv layers in the reference encoder.\n gru_layers (int, optional): The number of GRU layers in the reference encoder.\n gru_units (int, optional): The number of GRU units in the reference encoder.\n \"\"\"\n\n def __init__(\n self,\n idim=80,\n conv_layers: int = 6,\n conv_chans_list: Sequence[int] = (32, 32, 64, 64, 128, 128),\n conv_kernel_size: int = 3,\n conv_stride: int = 2,\n gru_layers: int = 1,\n gru_units: int = 128,\n ):\n \"\"\"Initilize reference encoder module.\"\"\"\n assert check_argument_types()\n super(ReferenceEncoder, self).__init__()\n\n # check hyperparameters are valid\n assert conv_kernel_size % 2 == 1, \"kernel size must be odd.\"\n assert (\n len(conv_chans_list) == conv_layers\n ), \"the number of conv layers and length of channels list must be the same.\"\n\n convs = []\n padding = (conv_kernel_size - 1) // 2\n for i in range(conv_layers):\n conv_in_chans = 1 if i == 0 else conv_chans_list[i - 1]\n conv_out_chans = conv_chans_list[i]\n convs += [\n torch.nn.Conv2d(\n conv_in_chans,\n conv_out_chans,\n kernel_size=conv_kernel_size,\n stride=conv_stride,\n padding=padding,\n # Do not use bias due to the following batch norm\n bias=False,\n ),\n torch.nn.BatchNorm2d(conv_out_chans),\n torch.nn.ReLU(inplace=True),\n ]\n self.convs = torch.nn.Sequential(*convs)\n\n self.conv_layers = conv_layers\n self.kernel_size = conv_kernel_size\n self.stride = conv_stride\n self.padding = padding\n\n # get the number of GRU input units\n gru_in_units = idim\n for i in range(conv_layers):\n gru_in_units = (\n gru_in_units - conv_kernel_size + 2 * padding\n ) // conv_stride + 1\n gru_in_units *= conv_out_chans\n self.gru = torch.nn.GRU(gru_in_units, gru_units, gru_layers, batch_first=True)\n\n def forward(self, speech: torch.Tensor) -> torch.Tensor:\n \"\"\"Calculate forward propagation.\n Args:\n speech (Tensor): Batch of padded target features (B, Lmax, idim).\n Returns:\n Tensor: Reference embedding (B, gru_units)\n \"\"\"\n batch_size = speech.size(0)\n xs = speech.unsqueeze(1) # (B, 1, Lmax, idim)\n hs = self.convs(xs).transpose(1, 2) # (B, Lmax', conv_out_chans, idim')\n # NOTE(kan-bayashi): We need to care the length?\n time_length = hs.size(1)\n hs = hs.contiguous().view(batch_size, time_length, -1) # (B, Lmax', gru_units)\n self.gru.flatten_parameters()\n _, ref_embs = self.gru(hs) # (gru_layers, batch_size, gru_units)\n ref_embs = ref_embs[-1] # (batch_size, gru_units)\n\n return ref_embs\n\n\nclass StyleTokenLayer(torch.nn.Module):\n \"\"\"Style token layer module.\n This module is style token layer introduced in `Style Tokens: Unsupervised Style\n Modeling, Control and Transfer in End-to-End Speech Synthesis`.\n .. _`Style Tokens: Unsupervised Style Modeling, Control and Transfer in End-to-End\n Speech Synthesis`: https://arxiv.org/abs/1803.09017\n Args:\n ref_embed_dim (int, optional): Dimension of the input reference embedding.\n gst_tokens (int, optional): The number of GST embeddings.\n gst_token_dim (int, optional): Dimension of each GST embedding.\n gst_heads (int, optional): The number of heads in GST multihead attention.\n dropout_rate (float, optional): Dropout rate in multi-head attention.\n \"\"\"\n\n def __init__(\n self,\n ref_embed_dim: int = 128,\n gst_tokens: int = 10,\n gst_token_dim: int = 256,\n gst_heads: int = 4,\n dropout_rate: float = 0.0,\n ):\n \"\"\"Initilize style token layer module.\"\"\"\n assert check_argument_types()\n super(StyleTokenLayer, self).__init__()\n\n gst_embs = torch.randn(gst_tokens, gst_token_dim // gst_heads)\n self.register_parameter(\"gst_embs\", torch.nn.Parameter(gst_embs))\n self.mha = MultiHeadedAttention(\n q_dim=ref_embed_dim,\n k_dim=gst_token_dim // gst_heads,\n v_dim=gst_token_dim // gst_heads,\n n_head=gst_heads,\n n_feat=gst_token_dim,\n dropout_rate=dropout_rate,\n )\n\n def forward(self, ref_embs: torch.Tensor) -> torch.Tensor:\n \"\"\"Calculate forward propagation.\n Args:\n ref_embs (Tensor): Reference embeddings (B, ref_embed_dim).\n Returns:\n Tensor: Style token embeddings (B, gst_token_dim).\n \"\"\"\n batch_size = ref_embs.size(0)\n # (num_tokens, token_dim) -> (batch_size, num_tokens, token_dim)\n gst_embs = torch.tanh(self.gst_embs).unsqueeze(0).expand(batch_size, -1, -1)\n # NOTE(kan-bayashi): Shoule we apply Tanh?\n ref_embs = ref_embs.unsqueeze(1) # (batch_size, 1 ,ref_embed_dim)\n style_embs = self.mha(ref_embs, gst_embs, gst_embs, None)\n\n return style_embs.squeeze(1)\n\n\nclass MultiHeadedAttention(BaseMultiHeadedAttention):\n \"\"\"Multi head attention module with different input dimension.\"\"\"\n\n def __init__(self, q_dim, k_dim, v_dim, n_head, n_feat, dropout_rate=0.0):\n \"\"\"Initialize multi head attention module.\"\"\"\n # NOTE(kan-bayashi): Do not use super().__init__() here since we want to\n # overwrite BaseMultiHeadedAttention.__init__() method.\n torch.nn.Module.__init__(self)\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = torch.nn.Linear(q_dim, n_feat)\n self.linear_k = torch.nn.Linear(k_dim, n_feat)\n self.linear_v = torch.nn.Linear(v_dim, n_feat)\n self.linear_out = torch.nn.Linear(n_feat, n_feat)\n self.attn = None\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.ReLU",
"torch.nn.functional.glu",
"torch.nn.Conv1d"
],
[
"torch.sum",
"torch.from_numpy",
"numpy.ones",
"torch.arange",
"numpy.array"
],
[
"torch.cuda.set_device",
"torch.autograd.set_detect_anomaly",
"torch.load",
"numpy.min",
"torch.cuda.current_device",
"torch.utils.data.DataLoader",
"torch.multiprocessing.spawn.SpawnContext",
"torch.multiprocessing.get_context",
"numpy.max",
"numpy.mean",
"torch.cuda.is_available",
"torch.distributed.get_rank",
"torch.distributed.get_world_size"
],
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.Parameter",
"torch.nn.Module.__init__",
"torch.randn",
"torch.nn.GRU",
"torch.nn.Conv2d",
"torch.tanh",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kunal-mulki/Materials | [
"b76bba123002972e4063b9b24cd5dc3d980e16e9"
] | [
"Code/Python/bootcamp_examples.py"
] | [
"\"\"\"\nExamples for Data Bootcamp course (data input and graphics)\n\n**Warning**\nWeb data access will change in the near future, when Pandas spins\noff the web access tools into a new package.\nhttp://pandas.pydata.org/pandas-docs/stable/remote_data.html\n\nRepository of materials (including this file):\n* https://github.com/NYUDataBootcamp/Materials\n\nWritten by Dave Backus, November 2015\nCreated with Python 3.5\n\"\"\"\n\"\"\"\nCheck versions (ignore this)\n\"\"\"\nimport pandas as pd # the data package\nimport sys\n\nprint('\\nPython version:', sys.version)\nprint('Pandas version: ', pd.__version__)\n\n#%%\n\"\"\"\nExample: World Bank country indicators\n* NY.GDP.PCAP.PP.KD = gdp per capita\n* NY.GDP.MKTP.PP.KD = gdp\n* SE.ADT.LITR.ZS = adult literacy (%)\n* SP.DYN.LE00.IN = life expectancy\n* IT.CEL.SETS.P2 = cell phone penetration (per 100)\nSee: http://data.worldbank.org/\n\"\"\"\n# load packages (redundancy is ok)\nimport pandas as pd # data management tools\nfrom pandas.io import wb # World Bank api\nimport matplotlib.pyplot as plt # plotting tools\n\n# variable list\nvar = ['NY.GDP.PCAP.PP.KD', 'NY.GDP.MKTP.PP.KD', 'SP.DYN.LE00.IN']\n# country list (ISO codes)\niso = ['USA', 'FRA', 'JPN', 'CHN', 'IND', 'BRA', 'MEX']\nyear = 2013\ndf = wb.download(indicator=var, country=iso, start=year, end=year)\n\n# massage data\ndf = df.reset_index(level='year', drop=True)\ndf.columns = ['gdppc', 'gdp', 'le'] # rename variables\ndf['gdp'] = df['gdp']/10**12 # convert to trillions\ndf['gdppc'] = df['gdppc']/10**3 # convert to thousands\ndf['order'] = [5, 3, 1, 4, 2, 6, 0] # reorder countries\ndf = df.sort(columns='order', ascending=False)\n\n# GDP bar chart\nax = df['gdp'].plot(kind='barh', alpha=0.5)\nax.set_title('GDP', loc='left', fontsize=14)\nax.set_xlabel('Trillions of US Dollars')\nax.set_ylabel('')\n\n#%%\n# ditto for GDP per capita (per person)\nax = df['gdppc'].plot(kind='barh', color='m', alpha=0.5)\nax.set_title('GDP Per Capita', loc='left', fontsize=14)\nax.set_xlabel('Thousands of US Dollars')\nax.set_ylabel('')\n\n#%%\n# scatterplot of life expectancy vs gdp per capita\nplt.scatter(df['gdppc'], df['le'], s=50*df['gdp'],\n cmap=plt.get_cmap(name='Spectral'), alpha=0.5) # cmap irrelevant\nplt.title('Life expectancy vs. GDP per capita', loc='left', fontsize=14)\nplt.xlabel('GDP Per Capita')\nplt.ylabel('Life Expectancy')\n#plt.annotate(x=iso, xy=(df['gdppc'], df['le']))\n\n#%%\n\"\"\"\nExample: US GDP and GDP growth from FRED\n\"\"\"\nimport pandas.io.data as web # web interface for FRED\nimport datetime as dt # handles dates\nimport matplotlib.pyplot as plt # plotting\n\nfred_series = ['GDPC1'] # the real GDP code for FRED\nstart_date = dt.datetime(1960, 1, 1)\nfred = web.DataReader(fred_series, 'fred', start_date)/10**3 # convert to trillions of USD\n# print last 3 data points to see what we've got (quarterly data)\nprint(fred.tail(3))\n\n# plot GDP over time\nax = fred.plot(legend=False)\nax.set_title('US Real GDP', fontsize=14, loc='left')\nax.set_xlabel('')\nax.set_ylabel('Trillions of Chained US Dollars')\nax.legend().set_visible(False)\n\n#%%\n# quarterly growth rates\ng = fred.pct_change()*400 # 400 makes this an annual percentage\nprint(g.tail(3))\n# change label\ng.columns = ['US GDP Growth']\ngbar = g.mean()\n\n# plot growth rates\nstart = dt.datetime(1985, 1, 1)\nend = g.index[-1]\nax = g[g.index >= start].plot(kind='line')\nax.set_title('US Real GDP Growth', fontsize=14, loc='left')\nax.hlines(y=gbar, xmin=start, xmax=end)\nax.hlines(y=0, xmin=start, xmax=end, linestyles='dashed')\nax.legend().set_visible(False)\n\n#%%\n\"\"\"\nExample: US economic indicators (monthly data from FRED)\n* INDPRO: industrial production\n* PAYEMS: nonfarm employment\n* AWHMAN: average weekly hours worked in manufacturing\n* PERMIT: premits for new housing\n* NAPM: purchasing managers index\n\"\"\"\nimport pandas.io.data as web # web interface with FRED\nimport pandas as pd # data manipulation\nimport datetime as dt # handles dates\n\n# list of indicators (FRED codes)\nindicators = ['INDPRO', 'PAYEMS', 'AWHMAN', 'PERMIT', 'NAPM']\nstart_date = dt.datetime(1970, 1, 1)\ninds = web.DataReader(indicators, \"fred\", start_date)\nprint(inds.tail(3))\n\n# yoy growth rates\ng = inds.pct_change(periods=12).dropna()\n# standardize\ng_std = (g - g.mean()) / g.std()\n\n# plot\nax = g_std.plot()\nax.set_title('Various economic indicators', fontsize=14, loc='left')\n#ax.set_ylabel('Standard deviations from mean')\nax.set_xlabel('')\nax.hlines(y=[-1, 0, 1], xmin=start_date, xmax=end, linestyles='dashed')\nax.legend().set_visible(False)\n\n#%%\n\"\"\"\nGovernment debt: IMF historical data\nThanks to Itamar Snir\n\"\"\"\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# data input\nexcelFilePath = '../Temp/Debt Database Fall 2013 Vintage.xlsx'\ndf = pd.read_excel(excelFilePath, sheetname=1, na_values=['…', '….', ''])\n #, index_col=-1, encoding='utf-8')\n\n#%%\n#get most recent year in the data (instead of 2013):\nmax_year = max(df.columns.values[4:].tolist())\n\n#get a list of the years for the x-axis values\nyears = [year for year in range(1980,max_year+1)]\n#get a list of the debt to GDP for the y-axis values\ndbt_greece = df[df.country=='Greece'][years]\ndbt_greece_list = dbt_greece.values.tolist()[0]\n#plot the data\nplt.plot(years,dbt_greece_list, color='red') #set graph color\nplt.ylabel('Debt to GDP')\nplt.title ('Greece Debt to GDP Between 1980 and '+ str(max_year))\nplt.show()\n\n#%%\n\"\"\"\nUS bond yields\nVideo?\n\"\"\"\n\n\n#%%\n\"\"\"\nExample: Stock prices from Yahoo finance (VIX)\n\"\"\"\nimport pandas as pd\nimport pandas.io.data as web\nimport datetime as dt\n\n# ticker\nticker = 'aapl'\ntoday = dt.date.today()\n#one_week = dt.timedelta(days=7)\n#start = today - one_week\nstart = dt.datetime(2000, 1, 1)\nvix = web.DataReader(ticker, 'yahoo', start)\n\nax = vix['Close'].plot()\nax.set_xlabel('')\n\n#%%\n\"\"\"\nExample: Fama-French stock returns\n* xsm = excess return on market (market minus riskfree rate)\n* smb = return on small firms minus return on big firms\n* hml = return on high book-to-market firms minus low\n* rf = riskfree rate\nAll returns are monthly percentages\n\"\"\"\nimport pandas.io.data as web\n\nff = web.DataReader('F-F_Research_Data_factors', 'famafrench')[0]\nff.columns = ['xsm', 'smb', 'hml', 'rf']\n\nff.describe\n\n# plots of mean and std\nffbar = ff.mean()\nffstd = ff.std()\n\nff.plot(kind='kde', subplots=True)\n\n#fig, ax = plt.\n#ffbar.plot(kind='barh', alpha=0.5)\n#plt.title('Mean returns', fontsize=14, loc='left')\n#\n#ffstd.plot(kind='barh', alpha=0.5)\n#plt.title('Standard deviation of returns', fontsize=14, loc='left')\n\n\n#%%\n\"\"\"\nExample: Stock options from Yahoo finance\nCurrently **broken**: asks for html5lib, which conflicts with Python 3.5\n\"\"\"\nimport pandas as pd\nimport pandas.io.data as web\nfrom pandas.io.data import Options\nimport datetime as dt\n#import matplotlib.pylab as plt\n\n# ticker\nticker = 'spy'\ntoday = dt.date.today()\none_week = dt.timedelta(days=7)\nstart = today - one_week\nstock = web.DataReader(ticker, 'yahoo', start)\n# take the last close (-1 is the last, 'Close' is the close)\natm = stock.ix[-1,'Close'] # the -1 takes the last observation\nprint('Stock price (at the money): ', atm)\n\n# get option prices for same ticker\noption = Options(ticker, 'yahoo')\nexpiry = dt.date(2016, 2, 19)\n#data_calls = option.get_call_data(expiry=expiry).dropna()\n#data_puts = option.get_put_data(expiry=expiry).dropna()\n\n# compute mid of bid and ask and arrange series for plotting\ncalls_bid = data_calls['Bid']\ncalls_ask = data_calls['Ask']\n\ncalls_strikes = data_calls['Strike']\ncalls_mid = (data_calls['Bid'] + data_calls['Ask'])/2\nputs_strikes = data_puts['Strike']\nputs_mid = (data_puts['Bid'] + data_puts['Ask'])/2\n\n# plot call and put prices v strike\nplt.plot(calls_strikes, calls_mid, 'r', lw=2, label='calls')\nplt.plot(puts_strikes, puts_mid, 'b', lw=2, label='puts')\n\n# prettify it\n#plt.axis([120, 250, 0, 50])\nplt.axvline(x=atm, color='k', linestyle='--', label='ATM')\nplt.legend(loc='best')\nplt.show()\n\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_excel",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.title",
"pandas.io.data.DataReader",
"pandas.io.data.Options",
"matplotlib.pyplot.get_cmap",
"pandas.io.wb.download",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Vincent34/mindspore | [
"fcb2ec2779b753e95c762cf292b23bd81d1f561b",
"fcb2ec2779b753e95c762cf292b23bd81d1f561b",
"fcb2ec2779b753e95c762cf292b23bd81d1f561b",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"d02ba6a87c37ad9d0bc413413b9e9ddc8c60f43c",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"fcb2ec2779b753e95c762cf292b23bd81d1f561b",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"fcb2ec2779b753e95c762cf292b23bd81d1f561b",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"fcb2ec2779b753e95c762cf292b23bd81d1f561b",
"a39a60878a46e7e9cb02db788c0bca478f2fa6e5",
"fcb2ec2779b753e95c762cf292b23bd81d1f561b"
] | [
"tests/ut/python/dataset/test_vocab.py",
"model_zoo/official/cv/ctpn/src/ctpn.py",
"tests/st/control/inner/test_111_if_after_if_in_while.py",
"model_zoo/official/nlp/cpm/src/lr_schedule.py",
"tests/st/auto_monad/test_auto_monad_mindtester.py",
"model_zoo/research/cv/StarGAN/export.py",
"mindspore/nn/metrics/perplexity.py",
"model_zoo/research/cv/FaceRecognition/src/custom_dataset.py",
"model_zoo/official/cv/cnnctc/src/dataset.py",
"model_zoo/research/cv/CGAN/train.py",
"model_zoo/research/cv/StarGAN/eval.py",
"model_zoo/research/cv/deeplabv3plus/eval.py",
"model_zoo/official/cv/maskrcnn/src/maskrcnn/fpn_neck.py",
"mindspore/explainer/explanation/_counterfactual/hierarchical_occlusion.py",
"model_zoo/research/cv/ProtoNet/src/EvalCallBack.py",
"tests/ut/python/dataset/test_config.py",
"model_zoo/official/cv/maskrcnn_mobilenetv1/src/maskrcnn_mobilenetv1/rcnn_mask.py",
"model_zoo/official/cv/retinaface_resnet50/eval.py",
"tests/ut/python/optimizer/test_optimizer_with_loss_scale.py",
"tests/st/pynative/loss_scale/test_loss_scale.py",
"tests/ut/python/parallel/test_concat.py"
] | [
"# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\n\nimport mindspore.dataset as ds\nimport mindspore.dataset.text as text\nimport mindspore.common.dtype as mstype\nfrom mindspore import log as logger\n\n# this file contains \"home is behind the world head\" each word is 1 line\nDATA_FILE = \"../data/dataset/testVocab/words.txt\"\nVOCAB_FILE = \"../data/dataset/testVocab/vocab_list.txt\"\nSIMPLE_VOCAB_FILE = \"../data/dataset/testVocab/simple_vocab_list.txt\"\n\n\ndef test_lookup_callable():\n \"\"\"\n Test lookup is callable\n \"\"\"\n logger.info(\"test_lookup_callable\")\n vocab = text.Vocab.from_list(['深', '圳', '欢', '迎', '您'])\n lookup = text.Lookup(vocab)\n word = \"迎\"\n assert lookup(word) == 3\n\ndef test_from_list_tutorial():\n vocab = text.Vocab.from_list(\"home IS behind the world ahead !\".split(\" \"), [\"<pad>\", \"<unk>\"], True)\n lookup = text.Lookup(vocab, \"<unk>\")\n data = ds.TextFileDataset(DATA_FILE, shuffle=False)\n data = data.map(operations=lookup, input_columns=[\"text\"])\n ind = 0\n res = [2, 1, 4, 5, 6, 7]\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n assert d[\"text\"] == res[ind], ind\n ind += 1\n\n\ndef test_from_file_tutorial():\n vocab = text.Vocab.from_file(VOCAB_FILE, \",\", None, [\"<pad>\", \"<unk>\"], True)\n lookup = text.Lookup(vocab)\n data = ds.TextFileDataset(DATA_FILE, shuffle=False)\n data = data.map(operations=lookup, input_columns=[\"text\"])\n ind = 0\n res = [10, 11, 12, 15, 13, 14]\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n assert d[\"text\"] == res[ind], ind\n ind += 1\n\n\ndef test_from_dict_tutorial():\n vocab = text.Vocab.from_dict({\"home\": 3, \"behind\": 2, \"the\": 4, \"world\": 5, \"<unk>\": 6})\n lookup = text.Lookup(vocab, \"<unk>\") # any unknown token will be mapped to the id of <unk>\n data = ds.TextFileDataset(DATA_FILE, shuffle=False)\n data = data.map(operations=lookup, input_columns=[\"text\"])\n res = [3, 6, 2, 4, 5, 6]\n ind = 0\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n assert d[\"text\"] == res[ind], ind\n ind += 1\n\n\ndef test_from_dict_exception():\n try:\n vocab = text.Vocab.from_dict({\"home\": -1, \"behind\": 0})\n if not vocab:\n raise ValueError(\"Vocab is None\")\n except ValueError as e:\n assert \"is not within the required interval\" in str(e)\n\n\ndef test_from_list():\n def gen(texts):\n for word in texts.split(\" \"):\n yield (np.array(word, dtype='S'),)\n\n def test_config(lookup_str, vocab_input, special_tokens, special_first, unknown_token):\n try:\n vocab = text.Vocab.from_list(vocab_input, special_tokens, special_first)\n data = ds.GeneratorDataset(gen(lookup_str), column_names=[\"text\"])\n data = data.map(operations=text.Lookup(vocab, unknown_token), input_columns=[\"text\"])\n res = []\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n res.append(d[\"text\"].item())\n return res\n except (ValueError, RuntimeError, TypeError) as e:\n return str(e)\n\n # test basic default config, special_token=None, unknown_token=None\n assert test_config(\"w1 w2 w3\", [\"w1\", \"w2\", \"w3\"], None, True, None) == [0, 1, 2]\n # test normal operations\n assert test_config(\"w1 w2 w3 s1 s2 ephemeral\", [\"w1\", \"w2\", \"w3\"], [\"s1\", \"s2\"], True, \"s2\") == [2, 3, 4, 0, 1, 1]\n assert test_config(\"w1 w2 w3 s1 s2\", [\"w1\", \"w2\", \"w3\"], [\"s1\", \"s2\"], False, \"s2\") == [0, 1, 2, 3, 4]\n assert test_config(\"w3 w2 w1\", [\"w1\", \"w2\", \"w3\"], None, True, \"w1\") == [2, 1, 0]\n assert test_config(\"w3 w2 w1\", [\"w1\", \"w2\", \"w3\"], None, False, \"w1\") == [2, 1, 0]\n # test unknown token lookup\n assert test_config(\"w1 un1 w3 un2\", [\"w1\", \"w2\", \"w3\"], [\"<pad>\", \"<unk>\"], True, \"<unk>\") == [2, 1, 4, 1]\n assert test_config(\"w1 un1 w3 un2\", [\"w1\", \"w2\", \"w3\"], [\"<pad>\", \"<unk>\"], False, \"<unk>\") == [0, 4, 2, 4]\n\n # test exceptions\n assert \"doesn't exist in vocab.\" in test_config(\"un1\", [\"w1\"], [], False, \"unk\")\n assert \"doesn't exist in vocab and no unknown token is specified.\" in test_config(\"un1\", [\"w1\"], [], False, None)\n assert \"doesn't exist in vocab\" in test_config(\"un1\", [\"w1\"], [], False, None)\n assert \"word_list contains duplicate\" in test_config(\"w1\", [\"w1\", \"w1\"], [], True, \"w1\")\n assert \"special_tokens contains duplicate\" in test_config(\"w1\", [\"w1\", \"w2\"], [\"s1\", \"s1\"], True, \"w1\")\n assert \"special_tokens and word_list contain duplicate\" in test_config(\"w1\", [\"w1\", \"w2\"], [\"s1\", \"w1\"], True, \"w1\")\n assert \"is not of type\" in test_config(\"w1\", [\"w1\", \"w2\"], [\"s1\"], True, 123)\n\n\ndef test_from_list_lookup_empty_string():\n # \"\" is a valid word in vocab, which can be looked up by LookupOp\n vocab = text.Vocab.from_list(\"home IS behind the world ahead !\".split(\" \"), [\"<pad>\", \"\"], True)\n lookup = text.Lookup(vocab, \"\")\n data = ds.TextFileDataset(DATA_FILE, shuffle=False)\n data = data.map(operations=lookup, input_columns=[\"text\"])\n ind = 0\n res = [2, 1, 4, 5, 6, 7]\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n assert d[\"text\"] == res[ind], ind\n ind += 1\n\n # unknown_token of LookUp is None, it will convert to std::nullopt in C++,\n # so it has nothing to do with \"\" in vocab and C++ will skip looking up unknown_token\n vocab = text.Vocab.from_list(\"home IS behind the world ahead !\".split(\" \"), [\"<pad>\", \"\"], True)\n lookup = text.Lookup(vocab)\n data = ds.TextFileDataset(DATA_FILE, shuffle=False)\n data = data.map(operations=lookup, input_columns=[\"text\"])\n try:\n for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n pass\n except RuntimeError as e:\n assert \"token: \\\"is\\\" doesn't exist in vocab and no unknown token is specified\" in str(e)\n\n\ndef test_from_file():\n def gen(texts):\n for word in texts.split(\" \"):\n yield (np.array(word, dtype='S'),)\n\n def test_config(lookup_str, vocab_size, special_tokens, special_first):\n try:\n vocab = text.Vocab.from_file(SIMPLE_VOCAB_FILE, vocab_size=vocab_size, special_tokens=special_tokens,\n special_first=special_first)\n data = ds.GeneratorDataset(gen(lookup_str), column_names=[\"text\"])\n data = data.map(operations=text.Lookup(vocab, \"s2\"), input_columns=[\"text\"])\n res = []\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n res.append(d[\"text\"].item())\n return res\n except ValueError as e:\n return str(e)\n\n # test special tokens are prepended\n assert test_config(\"w1 w2 w3 s1 s2 s3\", None, [\"s1\", \"s2\", \"s3\"], True) == [3, 4, 5, 0, 1, 2]\n # test special tokens are appended\n assert test_config(\"w1 w2 w3 s1 s2 s3\", None, [\"s1\", \"s2\", \"s3\"], False) == [0, 1, 2, 8, 9, 10]\n # test special tokens are prepended when not all words in file are used\n assert test_config(\"w1 w2 w3 s1 s2 s3\", 3, [\"s1\", \"s2\", \"s3\"], False) == [0, 1, 2, 3, 4, 5]\n # text exception special_words contains duplicate words\n assert \"special_tokens contains duplicate\" in test_config(\"w1\", None, [\"s1\", \"s1\"], True)\n # test exception when vocab_size is negative\n assert \"Input vocab_size must be greater than 0\" in test_config(\"w1 w2\", 0, [], True)\n assert \"Input vocab_size must be greater than 0\" in test_config(\"w1 w2\", -1, [], True)\n\n\ndef test_lookup_cast_type():\n def gen(texts):\n for word in texts.split(\" \"):\n yield (np.array(word, dtype='S'),)\n\n def test_config(lookup_str, data_type=None):\n try:\n vocab = text.Vocab.from_list([\"w1\", \"w2\", \"w3\"], special_tokens=[\"<unk>\"], special_first=True)\n data = ds.GeneratorDataset(gen(lookup_str), column_names=[\"text\"])\n # if data_type is None, test the default value of data_type\n op = text.Lookup(vocab, \"<unk>\") if data_type is None else text.Lookup(vocab, \"<unk>\", data_type)\n data = data.map(operations=op, input_columns=[\"text\"])\n res = []\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n res.append(d[\"text\"])\n return res[0].dtype\n except (ValueError, RuntimeError, TypeError) as e:\n return str(e)\n\n # test result is correct\n assert test_config(\"w1\", mstype.int8) == np.dtype(\"int8\")\n assert test_config(\"w2\", mstype.int32) == np.dtype(\"int32\")\n assert test_config(\"w3\", mstype.int64) == np.dtype(\"int64\")\n assert test_config(\"unk\", mstype.float32) != np.dtype(\"int32\")\n assert test_config(\"unk\") == np.dtype(\"int32\")\n # test exception, data_type isn't the correct type\n assert \"tldr is not of type [<class 'mindspore._c_expression.typing.Type'>]\" in test_config(\"unk\", \"tldr\")\n assert \"Lookup : The parameter data_type must be numeric including bool.\" in \\\n test_config(\"w1\", mstype.string)\n\n\nif __name__ == '__main__':\n test_lookup_callable()\n test_from_dict_exception()\n test_from_list_tutorial()\n test_from_file_tutorial()\n test_from_dict_tutorial()\n test_from_list()\n test_from_file()\n test_lookup_cast_type()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"CPTN network definition.\"\"\"\n\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore import Tensor, Parameter\nfrom mindspore.common import dtype as mstype\nfrom mindspore.ops import operations as P\nfrom src.CTPN.rpn import RPN\nfrom src.CTPN.anchor_generator import AnchorGenerator\nfrom src.CTPN.proposal_generator import Proposal\nfrom src.CTPN.vgg16 import VGG16FeatureExtraction\n\nclass BiLSTM(nn.Cell):\n \"\"\"\n Define a BiLSTM network which contains two LSTM layers\n\n Args:\n config(EasyDict): config for ctpn network\n batch_size(int): batch size of input data, only support 1\n \"\"\"\n def __init__(self, config, batch_size):\n super(BiLSTM, self).__init__()\n self.batch_size = batch_size\n self.batch_size = self.batch_size * config.rnn_batch_size\n self.input_size = config.input_size\n self.hidden_size = config.hidden_size\n self.num_step = config.num_step\n self.reshape = P.Reshape()\n self.cast = P.Cast()\n k = (1 / self.hidden_size) ** 0.5\n self.rnn1 = P.DynamicRNN(forget_bias=0.0)\n self.rnn_bw = P.DynamicRNN(forget_bias=0.0)\n self.w1 = Parameter(np.random.uniform(-k, k, \\\n (self.input_size + self.hidden_size, 4 * self.hidden_size)).astype(np.float32), name=\"w1\")\n self.w1_bw = Parameter(np.random.uniform(-k, k, \\\n (self.input_size + self.hidden_size, 4 * self.hidden_size)).astype(np.float32), name=\"w1_bw\")\n\n self.b1 = Parameter(np.random.uniform(-k, k, (4 * self.hidden_size)).astype(np.float32), name=\"b1\")\n self.b1_bw = Parameter(np.random.uniform(-k, k, (4 * self.hidden_size)).astype(np.float32), name=\"b1_bw\")\n\n self.h1 = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))\n self.h1_bw = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))\n\n self.c1 = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))\n self.c1_bw = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))\n self.reverse_seq = P.ReverseV2(axis=[0])\n self.concat = P.Concat()\n self.transpose = P.Transpose()\n self.concat1 = P.Concat(axis=2)\n self.dropout = nn.Dropout(0.7)\n self.use_dropout = config.use_dropout\n self.reshape = P.Reshape()\n self.transpose = P.Transpose()\n def construct(self, x):\n if self.use_dropout:\n x = self.dropout(x)\n x = self.cast(x, mstype.float16)\n bw_x = self.reverse_seq(x)\n y1, _, _, _, _, _, _, _ = self.rnn1(x, self.w1, self.b1, None, self.h1, self.c1)\n y1_bw, _, _, _, _, _, _, _ = self.rnn_bw(bw_x, self.w1_bw, self.b1_bw, None, self.h1_bw, self.c1_bw)\n y1_bw = self.reverse_seq(y1_bw)\n output = self.concat1((y1, y1_bw))\n return output\n\nclass CTPN(nn.Cell):\n \"\"\"\n Define CTPN network\n\n Args:\n config(EasyDict): config for ctpn network\n batch_size(int): batch size of input data, only support 1\n is_training(bool): whether training, default is True\n \"\"\"\n def __init__(self, config, batch_size, is_training=True):\n super(CTPN, self).__init__()\n self.config = config\n self.batch_size = batch_size\n self.num_step = config.num_step\n self.input_size = config.input_size\n self.hidden_size = config.hidden_size\n self.vgg16_feature_extractor = VGG16FeatureExtraction()\n self.conv = nn.Conv2d(512, 512, kernel_size=3, padding=0, pad_mode='same')\n self.rnn = BiLSTM(self.config, batch_size=self.batch_size).to_float(mstype.float16)\n self.reshape = P.Reshape()\n self.transpose = P.Transpose()\n self.cast = P.Cast()\n self.is_training = is_training\n\n # rpn block\n self.rpn_with_loss = RPN(config,\n self.batch_size,\n config.rpn_in_channels,\n config.rpn_feat_channels,\n config.num_anchors,\n config.rpn_cls_out_channels)\n self.anchor_generator = AnchorGenerator(config)\n self.featmap_size = config.feature_shapes\n self.anchor_list = self.get_anchors(self.featmap_size)\n self.proposal_generator_test = Proposal(config,\n self.batch_size,\n config.activate_num_classes,\n config.use_sigmoid_cls)\n self.proposal_generator_test.set_train_local(config, False)\n def construct(self, img_data, gt_bboxes, gt_labels, gt_valids, img_metas=None):\n x = self.vgg16_feature_extractor(img_data)\n x = self.conv(x)\n x = self.cast(x, mstype.float16)\n x = self.transpose(x, (0, 2, 1, 3))\n x = self.reshape(x, (-1, self.input_size, self.num_step))\n x = self.transpose(x, (2, 0, 1))\n x = self.rnn(x)\n rpn_loss, cls_score, bbox_pred, rpn_cls_loss, rpn_reg_loss = self.rpn_with_loss(x,\n img_metas,\n self.anchor_list,\n gt_bboxes,\n gt_labels,\n gt_valids)\n if self.training:\n return rpn_loss, cls_score, bbox_pred, rpn_cls_loss, rpn_reg_loss\n proposal, proposal_mask = self.proposal_generator_test(cls_score, bbox_pred, self.anchor_list)\n return proposal, proposal_mask\n\n def get_anchors(self, featmap_size):\n anchors = self.anchor_generator.grid_anchors(featmap_size)\n return Tensor(anchors, mstype.float16)\n\nclass CTPN_Infer(nn.Cell):\n def __init__(self, config, batch_size):\n super(CTPN_Infer, self).__init__()\n self.network = CTPN(config, batch_size=batch_size, is_training=False)\n self.network.set_train(False)\n\n def construct(self, img_data):\n output = self.network(img_data, None, None, None, None)\n return output\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\nfrom mindspore.common import dtype as mstype\nfrom mindspore import nn\nfrom mindspore import Tensor\nfrom mindspore.ops import composite as C\nfrom mindspore import context\nfrom mindspore.common.parameter import Parameter\n\ncontext.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target=\"Ascend\")\n\n\nclass ForwardNet(nn.Cell):\n def __init__(self, max_cycles=10):\n super(ForwardNet, self).__init__()\n self.max_cycles = max_cycles\n self.i = Tensor(np.array(0), mstype.int32)\n self.zero = Tensor(np.array(0), mstype.int32)\n self.weight = Parameter(Tensor(np.array(0), mstype.int32))\n\n def construct(self, x, y):\n i = self.i\n out = self.zero\n while i < self.max_cycles:\n self.weight = i\n if out <= 20:\n self.weight = i\n out = x * y + out\n i = i + 1\n if out >= 30:\n self.weight = out\n out = out - 30\n return out, self.weight\n\n\nclass BackwardNet(nn.Cell):\n def __init__(self, net):\n super(BackwardNet, self).__init__(auto_prefix=False)\n self.forward_net = net\n self.grad = C.GradOperation(get_all=True)\n\n def construct(self, *inputs):\n grads = self.grad(self.forward_net)(*inputs)\n return grads\n\n\ndef test_forward():\n x = Tensor(np.array(1), mstype.int32)\n y = Tensor(np.array(3), mstype.int32)\n # Graph Mode\n context.set_context(mode=context.GRAPH_MODE)\n graph_forward_net = ForwardNet(max_cycles=10)\n graph_mode_out = graph_forward_net(x, y)\n # Pynative Mode\n context.set_context(mode=context.PYNATIVE_MODE)\n pynative_forward_net = ForwardNet(max_cycles=10)\n pynative_mode_out = pynative_forward_net(x, y)\n assert graph_mode_out == pynative_mode_out\n\n\ndef test_backward():\n x = Tensor(np.array(1), mstype.int32)\n y = Tensor(np.array(3), mstype.int32)\n # Graph Mode\n context.set_context(mode=context.GRAPH_MODE)\n graph_forward_net = ForwardNet(max_cycles=10)\n graph_backward_net = BackwardNet(graph_forward_net)\n graph_mode_grads = graph_backward_net(x, y)\n # Pynative Mode\n context.set_context(mode=context.PYNATIVE_MODE)\n pynative_forward_net = ForwardNet(max_cycles=10)\n pynative_backward_net = BackwardNet(pynative_forward_net)\n pynative_mode_grads = pynative_backward_net(x, y)\n assert graph_mode_grads == pynative_mode_grads\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\"Learning rate schedule.\"\"\"\r\nimport numpy as np\r\nfrom mindspore.ops import operations as P\r\nfrom mindspore.common.tensor import Tensor\r\nfrom mindspore.common import dtype as mstype\r\nfrom mindspore.nn.learning_rate_schedule import LearningRateSchedule, WarmUpLR\r\n\r\n\r\nclass DecayLR(LearningRateSchedule):\r\n \"\"\"\r\n Implements of decay learning rate scheduler.\r\n\r\n Args:\r\n learning_rate (float): Initial learning rate.\r\n warmup_steps (int): Warmup steps.\r\n end_steps (int): A value used to calculate decayed learning rate.\r\n\r\n Returns:\r\n np.ndarray, learning rate of each step.\r\n \"\"\"\r\n\r\n def __init__(self, learning_rate, warmup_steps, end_iter):\r\n super(DecayLR, self).__init__()\r\n self.learning_rate = learning_rate\r\n self.warmup_steps = warmup_steps\r\n self.end_iter = end_iter\r\n self.cast = P.Cast()\r\n\r\n def construct(self, global_step):\r\n warmup_percent = self.cast((self.end_iter - (global_step - self.warmup_steps)), mstype.float32) / self.end_iter\r\n\r\n return self.learning_rate * warmup_percent\r\n\r\n\r\nclass CPMLearningRate(LearningRateSchedule):\r\n \"\"\"\r\n Implements of warmup-polynomial decay learning rate scheduler.\r\n\r\n Args:\r\n learning_rate (float): The initial value of learning rate.\r\n warmup_steps (int): The warm up steps of learning rate.\r\n end_steps (int): A value used to calculate decayed learning rate.\r\n\r\n Returns:\r\n Tensor. The learning rate value for the current step.\r\n \"\"\"\r\n\r\n def __init__(self, learning_rate, warmup_steps, end_steps):\r\n super(CPMLearningRate, self).__init__()\r\n self.warmup_lr = WarmUpLR(learning_rate, warmup_steps)\r\n self.decay_lr = DecayLR(learning_rate, warmup_steps, end_steps)\r\n self.warmup_steps = Tensor(np.array([warmup_steps]).astype(np.float32))\r\n\r\n def construct(self, global_step):\r\n if global_step < self.warmup_steps:\r\n lr = self.warmup_lr(global_step)\r\n else:\r\n lr = self.decay_lr(global_step)\r\n return lr\r\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport os\nimport pytest\nimport numpy as np\nimport mindspore as ms\nimport mindspore.ops.operations as P\nfrom mindspore.nn import Cell\nfrom mindspore import context, Tensor\nfrom mindspore.common.parameter import Parameter\nfrom mindspore.common.initializer import initializer\nfrom mindspore.train.model import Model\nfrom mindspore.ops.composite import GradOperation\nfrom mindspore.common import ParameterTuple\n\ncontext.set_context(mode=context.GRAPH_MODE)\n\n\nclass _Grad(Cell):\n def __init__(self, grad, network, wrt_params=False, real_inputs_count=None):\n super().__init__()\n self.network = network\n self.grad = grad\n self.sens_param = self.grad.sens_param\n self.wrt_params = wrt_params\n self.real_inputs_count = real_inputs_count\n if self.wrt_params:\n self.params = ParameterTuple(self.network.trainable_params())\n\n def construct(self, *inputs):\n if self.real_inputs_count is None or self.sens_param is False:\n if self.wrt_params:\n return self.grad(self.network, self.params)(*inputs)\n return self.grad(self.network)(*inputs)\n\n real_inputs = inputs[:self.real_inputs_count]\n sense_param_inputs = inputs[self.real_inputs_count:]\n if self.wrt_params:\n return self.grad(self.network, self.params)(*real_inputs, sense_param_inputs)\n return self.grad(self.network)(*real_inputs, sense_param_inputs)\n\n\nclass GradOfFirstInput(_Grad):\n \"\"\"\n get grad of first input\n \"\"\"\n\n def __init__(self, network, sens_param=True, real_inputs_count=None):\n super().__init__(grad=GradOperation(sens_param=sens_param),\n network=network, real_inputs_count=real_inputs_count)\n\n\nclass GradOfAllInputs(_Grad):\n '''\n get grads of all inputs\n '''\n\n def __init__(self, network, sens_param=True, real_inputs_count=None):\n super().__init__(grad=GradOperation(get_all=True, sens_param=sens_param),\n network=network, real_inputs_count=real_inputs_count)\n\n\nclass GradOfAllInputsAndParams(_Grad):\n '''\n get grads of all inputs and params\n '''\n\n def __init__(self, network, sens_param=True, real_inputs_count=None):\n super().__init__(grad=GradOperation(get_all=True, get_by_list=True, sens_param=sens_param),\n network=network, wrt_params=True, real_inputs_count=real_inputs_count)\n\n\ndef _count_unequal_element(data_expected, data_me, rtol, atol):\n assert data_expected.shape == data_me.shape\n total_count = len(data_expected.flatten())\n error = np.abs(data_expected - data_me)\n greater = np.greater(error, atol + np.abs(data_me) * rtol)\n loss_count = np.count_nonzero(greater)\n assert (loss_count / total_count) < rtol, \\\n \"\\ndata_expected_std:{0}\\ndata_me_error:{1}\\nloss:{2}\". \\\n format(data_expected[greater], data_me[greater], error[greater])\n\n\ndef allclose_nparray(data_expected, data_me, rtol, atol, equal_nan=True):\n if np.any(np.isnan(data_expected)):\n assert np.allclose(data_expected, data_me, rtol,\n atol, equal_nan=equal_nan)\n elif not np.allclose(data_expected, data_me, rtol, atol, equal_nan=equal_nan):\n _count_unequal_element(data_expected, data_me, rtol, atol)\n else:\n assert True\n\n\nclass ControlGraphSupportNotEqual(Cell):\n def construct(self, x, y, z, input_data):\n if x != y:\n out = input_data + input_data\n else:\n out = input_data - input_data\n if x == z:\n out2 = input_data * input_data\n else:\n out2 = input_data / input_data\n if x == z:\n out3_f = (lambda a: a + a)\n out3 = out3_f(input_data)\n else:\n out3_f = (lambda a: a + a + a)\n out3 = out3_f(input_data)\n return out, out2, out3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_ctrl_if_while_graph_support_not_equal_true():\n x = np.array(0).astype(np.float32)\n y = np.array(3).astype(np.float32)\n input_shape = (512, 512, 7, 7)\n input_data = np.random.randn(*input_shape).astype(np.float32)\n net = ControlGraphSupportNotEqual()\n model = Model(net)\n out_me = model.predict(Tensor(x), Tensor(y), Tensor(x), Tensor(input_data))\n out = input_data + input_data\n out2 = input_data * input_data\n out3 = input_data + input_data\n allclose_nparray(out, out_me[0].asnumpy(), 0.0001, 0.0001)\n allclose_nparray(out2, out_me[1].asnumpy(), 0.0001, 0.0001)\n allclose_nparray(out3, out_me[2].asnumpy(), 0.0001, 0.0001)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_ctrl_if_while_graph_support_not_equal_false():\n x = np.array(0).astype(np.float32)\n y = np.array(0).astype(np.float32)\n z = np.array(3).astype(np.float32)\n input_shape = (512, 512, 7, 7)\n input_data = np.random.randn(*input_shape).astype(np.float32)\n net = ControlGraphSupportNotEqual()\n model = Model(net)\n out_me = model.predict(Tensor(x), Tensor(y), Tensor(z), Tensor(input_data))\n out = input_data - input_data\n out2 = input_data / input_data\n out3 = input_data + input_data + input_data\n allclose_nparray(out, out_me[0].asnumpy(), 0.0001, 0.0001)\n allclose_nparray(out2, out_me[1].asnumpy(), 0.0001, 0.0001)\n allclose_nparray(out3, out_me[2].asnumpy(), 0.0001, 0.0001)\n\n\nclass ControlBprop(Cell):\n def construct(self, x, y, z, input_data):\n if x != y:\n out = input_data + input_data\n else:\n out = input_data - input_data\n if x == z:\n out2 = input_data * input_data\n else:\n out2 = input_data / input_data\n if x == z:\n out3_f = (lambda a: a + a)\n out3 = out3_f(input_data)\n else:\n out3_f = (lambda a: a + a + a)\n out3 = out3_f(input_data)\n return out, out2, out3\n\n def bprop(self, x, y, z, input_data, out, dout):\n return x * 2, y * 3, z, input_data * 5.1\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_ctrl_if_while_bprop_true():\n x = np.array(0).astype(np.float32)\n y = np.array(3).astype(np.float32)\n input_shape = (512, 512, 7, 7)\n input_data = np.random.randn(*input_shape).astype(np.float32)\n net = ControlBprop()\n grad_net = GradOfAllInputs(net, sens_param=False)\n grad_net.set_train()\n grads = grad_net(Tensor(x), Tensor(y), Tensor(x), Tensor(input_data))\n allclose_nparray(x * 2, grads[0].asnumpy(), 0.0000, 0.0000)\n allclose_nparray(y * 3, grads[1].asnumpy(), 0.0000, 0.0000)\n allclose_nparray(x, grads[2].asnumpy(), 0.0000, 0.0000)\n allclose_nparray(input_data * 5.1, grads[3].asnumpy(), 0.0000, 0.0000)\n\n\nclass TwoInput(Cell):\n def __init__(self):\n super().__init__()\n self.op = P.Mul()\n\n def construct(self, x, y):\n x = self.op(x, y)\n return x\n\n\nclass InlineBpropTwoInput1(Cell):\n def __init__(self):\n super().__init__()\n self.f = TwoInput()\n self.f.set_grad()\n self.grad = GradOfAllInputs(self.f, sens_param=False)\n\n def construct(self, x, y):\n if x > y:\n x = self.f(x, y)\n else:\n x = self.f(x, y)\n return x\n\n def bprop(self, x, y, out, dout):\n if x > y:\n grads = self.grad(x, y)\n else:\n grads = self.grad(x, y)\n return grads[0] * 2, grads[1] * 2\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_ctrl_if_while_bprop_inlinebprop_twoinput():\n net = InlineBpropTwoInput1()\n input1 = Tensor(np.array(2).astype(np.float32))\n input2 = Tensor(np.array(1).astype(np.float32))\n grad_net = GradOfAllInputs(net, sens_param=False)\n grad_net.set_train()\n grads = grad_net(input1, input2)\n allclose_nparray(input1.asnumpy() * 2, grads[1].asnumpy(), 0, 0)\n allclose_nparray(input2.asnumpy() * 2, grads[0].asnumpy(), 0, 0)\n\n\nclass ControlOneIfOneParaOneAddn(Cell):\n def __init__(self, input_shape):\n super().__init__()\n self.addn = P.AddN()\n self.assign = P.Assign()\n self.inputdata = Parameter(initializer(\n 1, input_shape, ms.float32), name=\"global_step\")\n\n def construct(self, x, y, input_data):\n if x > y:\n out = self.inputdata\n else:\n out = self.addn([input_data, input_data, input_data])\n if x > y:\n out = self.assign(self.inputdata, input_data)\n return out\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_ctrl_if_para_addn_true():\n x = Tensor(1, ms.float32)\n y = Tensor(0, ms.float32)\n input_shape = (1024, 512, 7, 7)\n input_data = np.random.randn(*input_shape).astype(np.float32)\n net = ControlOneIfOneParaOneAddn(input_shape)\n out = net(x, y, Tensor(input_data))\n allclose_nparray(input_data[0], out.asnumpy()[0], 0.0001, 0.0001)\n\n\nclass AddnCell(Cell):\n def __init__(self):\n super().__init__()\n self.addn = P.AddN()\n\n def construct(self, x):\n x = self.addn((x, x))\n return x\n\n\nclass SideEffectMemoryCellAddnNet(Cell):\n def __init__(self):\n super().__init__()\n self.para = Parameter(Tensor([1.0], ms.float32), name=\"para\")\n self.assign = P.Assign()\n self.addn = P.AddN()\n self.addn1 = AddnCell()\n\n def construct(self, x):\n x = self.addn1(x)\n self.assign(self.para, x)\n out = self.addn((self.para, x))\n return out\n\n def grad_mindspore_impl(self, params, grad_ys):\n grad_net = GradOfAllInputsAndParams(self)\n grad_net.set_train()\n grad_out = grad_net(params, grad_ys)\n return grad_out\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_grad_memory_addn():\n net = SideEffectMemoryCellAddnNet()\n grad_ys = Tensor([18.0], ms.float32)\n inputs = Tensor([9.0], ms.float32)\n net.grad_mindspore_impl(inputs, grad_ys)\n\n\nclass SideEffectIOCellAddnNet(Cell):\n def __init__(self):\n super().__init__()\n self.para1 = Parameter(Tensor([1.0], ms.float32), name=\"para1\")\n self.para2 = Parameter(Tensor([3.0], ms.float32), name=\"para2\")\n self.print = P.Print()\n self.addn = AddnCell()\n\n def construct(self, x):\n self.print(\"para1:\", self.para1)\n self.print(\"para2:\", self.para2)\n x = self.addn(x)\n return x\n\n def grad_mindspore_impl(self, params, grad_ys):\n grad_net = GradOfAllInputsAndParams(self)\n grad_net.set_train()\n grad_out = grad_net(params, grad_ys)\n return grad_out\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_grad_io_addn():\n net = SideEffectIOCellAddnNet()\n grad_ys = Tensor([18.0], ms.float32)\n inputs = Tensor([9.0], ms.float32)\n net.grad_mindspore_impl(inputs, grad_ys)\n\n\nclass SideEffectReturnParameterNet(Cell):\n def __init__(self):\n super().__init__()\n self.para = Parameter(Tensor([1.0], ms.float32), name=\"para\")\n self.assign = P.Assign()\n self.addn = P.AddN()\n self.relu = P.ReLU()\n\n def construct(self, inputs):\n p1 = self.assign(self.para, inputs)\n out = self.addn((inputs, inputs, inputs))\n out = self.relu(out)\n return p1\n\n def grad_mindspore_impl(self, params, grad_ys):\n grad_net = GradOfAllInputsAndParams(self)\n grad_net.set_train()\n grad_out = grad_net(params, grad_ys)\n return grad_out\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_grad_read_dependency_return_parameter():\n net = SideEffectReturnParameterNet()\n grad_ys = Tensor([18.0], ms.float32)\n inputs = Tensor([9.0], ms.float32)\n net.grad_mindspore_impl(inputs, grad_ys)\n\n\nclass SideEffectAssignAddnReluReturnParNet(Cell):\n def __init__(self):\n super().__init__()\n self.parameter1 = Parameter(\n Tensor([1.0], ms.float32), name=\"parameter1\")\n self.assign = P.Assign()\n self.addN = P.AddN()\n self.relu = P.ReLU()\n\n def construct(self, inputs):\n p1 = self.assign(self.parameter1, inputs)\n out = self.addN((inputs, inputs, inputs))\n out = self.relu(out)\n return p1\n\n def grad_mindspore_impl(self, params, grad_ys):\n grad_net = GradOfAllInputsAndParams(self)\n grad_net.set_train()\n grad_out = grad_net(params, grad_ys)\n return grad_out\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_side_effect_grad_read_dependency_assign_addn_relu_return_parameter():\n net = SideEffectAssignAddnReluReturnParNet()\n grad_ys = Tensor([18.0], ms.float32)\n inputs = Tensor([9.0], ms.float32)\n out1 = net.grad_mindspore_impl(inputs, grad_ys)\n net = SideEffectAssignAddnReluReturnParNet()\n try:\n context.set_context(mode=context.PYNATIVE_MODE)\n out2 = net.grad_mindspore_impl(inputs, grad_ys)\n allclose_nparray(out1[0][0].asnumpy(), out2[0]\n [0].asnumpy(), 0.001, 0.001)\n allclose_nparray(out1[1][0].asnumpy(), out2[1]\n [0].asnumpy(), 0.001, 0.001)\n finally:\n context.set_context(mode=context.GRAPH_MODE)\n\n\nclass SideEffectPrintInHighOrdeAddnNet(Cell):\n def __init__(self):\n super().__init__()\n self.parameter1 = Parameter(\n Tensor([1.0], ms.float32), name=\"parameter1\")\n self.parameter2 = Parameter(\n Tensor([3.0], ms.float32), name=\"parameter2\")\n self.assign = P.Assign()\n self.addn = P.AddN()\n self.mul = P.Mul()\n self.print = P.Print()\n\n def construct(self, x):\n self.high_order_func()\n out = self.addn((self.parameter1, x, self.parameter2))\n return out\n\n def high_order_func(self):\n self.print(\"parameter1: \", self.parameter1)\n self.print(\"parameter2: \", self.parameter2)\n return True\n\n def grad_mindspore_impl(self, params, grad_ys):\n grad_net = GradOfAllInputsAndParams(self)\n grad_net.set_train()\n grad_out = grad_net(params, grad_ys)\n return grad_out\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_side_effect_high_order_print_in_high_order_net():\n print_file = os.getcwd() + \"/test_side_effect_high_order_print_in_high_order_net.data\"\n context.set_context(print_file_path=print_file)\n net = SideEffectPrintInHighOrdeAddnNet()\n out1 = net(Tensor([9.0], ms.float32))\n net = SideEffectPrintInHighOrdeAddnNet()\n try:\n context.set_context(mode=context.PYNATIVE_MODE)\n out2 = net(Tensor([9.0], ms.float32))\n allclose_nparray(out1.asnumpy(), out2.asnumpy(), 0.001, 0.001)\n finally:\n context.set_context(mode=context.GRAPH_MODE)\n\n\nclass SideEffectControlFlowAssignDependTwoIfNet(Cell):\n def __init__(self):\n super().__init__()\n self.parameter1 = Parameter(\n Tensor([3.0], ms.float32), name=\"parameter1\")\n self.assign = P.Assign()\n self.mul = P.Mul()\n self.addn = P.AddN()\n self.depend = P.Depend()\n\n def construct(self, x, y):\n self.assign(self.parameter1, x)\n if self.parameter1 > y:\n x = self.mul(x, x)\n p2 = self.assign(self.parameter1, x)\n if self.parameter1 > y:\n x = self.addn((x, self.parameter1))\n p3 = self.assign(self.parameter1, x)\n self.depend(p3, p2)\n return x\n\n def grad_mindspore_impl(self, params1, params2, grad_ys):\n grad_net = GradOfAllInputsAndParams(self)\n grad_net.set_train()\n grad_out = grad_net(params1, params2, grad_ys)\n return grad_out\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_side_effect_grad_control_flow_assign_depend_of_two_if():\n net = SideEffectControlFlowAssignDependTwoIfNet()\n grad_ys = Tensor([18.0], ms.float32)\n inputs1 = Tensor([9.0], ms.float32)\n inputs2 = Tensor([6.0], ms.float32)\n net.grad_mindspore_impl(inputs1, inputs2, grad_ys)\n\n\nclass SideEffectTwoAddnSwitchNet(Cell):\n def __init__(self):\n super().__init__()\n self.addN = P.AddN()\n\n def construct(self, x):\n y = x\n x = self.addN((x, x, x))\n y = self.addN((y, y))\n if x > y:\n return x\n return y\n\n def grad_mindspore_impl(self, params, grad_ys):\n grad_net = GradOfAllInputsAndParams(self)\n grad_net.set_train()\n grad_out = grad_net(params, grad_ys)\n return grad_out\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_side_effect_grad_two_addn_switch():\n net = SideEffectTwoAddnSwitchNet()\n grad_ys = Tensor([18.0], ms.float32)\n inputs = Tensor([9.0], ms.float32)\n out1 = net.grad_mindspore_impl(inputs, grad_ys)\n net = SideEffectTwoAddnSwitchNet()\n try:\n context.set_context(mode=context.PYNATIVE_MODE)\n out2 = net.grad_mindspore_impl(inputs, grad_ys)\n allclose_nparray(out1[0][0].asnumpy(), out2[0][0].asnumpy(), 0.001, 0.001)\n finally:\n context.set_context(mode=context.GRAPH_MODE)\n\n\nclass SideEffectGradIfNet(Cell):\n def __init__(self):\n super().__init__()\n self.relu = P.ReLU()\n a = np.full((1,), 5, dtype=np.float32)\n self.a = Parameter(Tensor(a), name=\"a\")\n b = np.full((1,), 4, dtype=np.float32)\n self.b = Parameter(Tensor(b), name=\"b\")\n\n def construct(self, x):\n if self.a > self.b:\n x = self.relu(x)\n out = x\n else:\n out = x + 2\n return out\n\n def grad_mindspore_impl(self, params, grad_ys):\n grad_net = GradOfFirstInput(self)\n grad_net.set_train()\n grad_out = grad_net(params, grad_ys)\n return grad_out\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_side_effect_grad_if():\n context.set_context(mode=context.GRAPH_MODE)\n net = SideEffectGradIfNet()\n grad_ys = Tensor([18.0], ms.float32)\n inputs = Tensor([9.0], ms.float32)\n out1 = net.grad_mindspore_impl(inputs, grad_ys)\n net = SideEffectGradIfNet()\n try:\n context.set_context(mode=context.PYNATIVE_MODE)\n out2 = net.grad_mindspore_impl(inputs, grad_ys)\n allclose_nparray(out1.asnumpy(), out2.asnumpy(), 0.001, 0.001)\n finally:\n context.set_context(mode=context.GRAPH_MODE)\n\n\nclass OneInputBprop(Cell):\n def __init__(self):\n super().__init__()\n self.op = P.ReLU()\n\n def construct(self, x):\n return self.op(x)\n\n def bprop(self, x, out, dout):\n return (5 * x,)\n\n\nclass HighGrad(Cell):\n def __init__(self, network, grad_list, sens_param=False, real_inputs_count=None):\n super().__init__()\n self.grads = [network]\n for i in range(len(grad_list) - 1):\n _grad = grad_list[i](self.grads[i], sens_param=False)\n self.grads.append(_grad)\n self.final_grad = grad_list[-1](self.grads[-1],\n sens_param=sens_param, real_inputs_count=real_inputs_count)\n\n def construct(self, *inputs):\n return self.final_grad(*inputs)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_highgrad_one_input_sec_grad():\n net = OneInputBprop()\n x = Tensor(np.array([2, 2]).astype(np.float32))\n grad_net = HighGrad(net, [GradOfFirstInput, GradOfFirstInput])\n dxdx = grad_net(x)\n assert (dxdx.asnumpy() == np.array([5, 5]).astype(np.float32)).all()\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_highgrad_one_input_third_grad():\n net = OneInputBprop()\n x = Tensor(np.array([2, 2]).astype(np.float32))\n grad_net = HighGrad(\n net, [GradOfFirstInput, GradOfFirstInput, GradOfFirstInput])\n third_grad = grad_net(x)\n assert (third_grad.asnumpy() == np.array([0, 0]).astype(np.float32)).all()\n\n\nclass SideEffectControlFlowAssignDependWhileNet(Cell):\n def __init__(self):\n super().__init__()\n self.parameter1 = Parameter(Tensor([199.0], ms.float32), name=\"parameter1\")\n self.assign = P.Assign()\n self.assignadd = P.AssignAdd()\n self.addn = P.AddN()\n self.depend = P.Depend()\n\n def construct(self, x, y, z):\n p1 = self.assign(self.parameter1, x)\n while self.parameter1 < y:\n x = self.addn((x, x))\n p2 = self.assignadd(self.parameter1, z)\n self.depend(p2, p1)\n return x\n\n def grad_mindspore_impl(self, params1, params2, params3, grad_ys):\n grad_net = GradOfAllInputsAndParams(self)\n grad_net.set_train()\n grad_out = grad_net(params1, params2, params3, grad_ys)\n return grad_out\n\n\n# Now the case can't pass because the GPU RT problem, so only run on Ascend current time.\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_side_effect_grad_control_flow_assign_depend_while_net():\n context.set_context(mode=context.GRAPH_MODE)\n net = SideEffectControlFlowAssignDependWhileNet()\n grad_ys = Tensor([18.0], ms.float32)\n inputs1 = Tensor([9.0], ms.float32)\n inputs2 = Tensor([6.0], ms.float32)\n inputs3 = Tensor([3.0], ms.float32)\n out1 = net.grad_mindspore_impl(inputs1, inputs2, inputs3, grad_ys)\n try:\n context.set_context(mode=context.PYNATIVE_MODE)\n net = SideEffectControlFlowAssignDependWhileNet()\n out2 = net.grad_mindspore_impl(inputs1, inputs2, inputs3, grad_ys)\n allclose_nparray(out1[0][0].asnumpy(), out2[0][0].asnumpy(), 0.001, 0.001)\n allclose_nparray(out1[1][0].asnumpy(), out2[1][0].asnumpy(), 0.001, 0.001)\n finally:\n context.set_context(mode=context.GRAPH_MODE)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\"export file.\"\"\"\r\nimport numpy as np\r\n\r\nfrom mindspore import context, Tensor\r\nfrom mindspore.train.serialization import export, load_param_into_net\r\nfrom src.config import get_config\r\nfrom src.utils import get_network, resume_model\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n config = get_config()\r\n context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)\r\n\r\n G, D = get_network(config)\r\n\r\n # Use BatchNorm2d with batchsize=1, affine=False, training=True instead of InstanceNorm2d\r\n # Use real mean and varance rather than moving_men and moving_varance in BatchNorm2d\r\n\r\n G.set_train(True)\r\n param_G, _ = resume_model(config, G, D)\r\n load_param_into_net(G, param_G)\r\n\r\n input_array = Tensor(np.random.uniform(-1.0, 1.0, size=(1, 3, 128, 128)).astype(np.float32))\r\n input_label = Tensor(np.random.uniform(-1.0, 1.0, size=(1, 5)).astype(np.float32))\r\n G_file = f\"StarGAN_Generator\"\r\n export(G, input_array, file_name=G_file, file_format=config.file_format)\r\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Perplexity\"\"\"\nimport math\nimport numpy as np\nfrom mindspore._checkparam import Validator as validator\nfrom .metric import Metric, rearrange_inputs\n\n\nclass Perplexity(Metric):\n r\"\"\"\n Computes perplexity. Perplexity is a measurement about how well a probability distribution or a model predicts a\n sample. A low perplexity indicates the model can predict the sample well. The function is shown as follows:\n\n .. math::\n PP(W)=P(w_{1}w_{2}...w_{N})^{-\\frac{1}{N}}=\\sqrt[N]{\\frac{1}{P(w_{1}w_{2}...w_{N})}}\n\n Args:\n ignore_label (int): Index of an invalid label to be ignored when counting. If set to `None`, it will include all\n entries. Default: -1.\n\n Supported Platforms:\n ``Ascend`` ``GPU`` ``CPU``\n\n Note:\n The method `update` must be called with the form `update(preds, labels)`.\n\n Examples:\n >>> import numpy as np\n >>> from mindspore import nn, Tensor\n >>>\n >>> x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]))\n >>> y = Tensor(np.array([1, 0, 1]))\n >>> metric = nn.Perplexity(ignore_label=None)\n >>> metric.clear()\n >>> metric.update(x, y)\n >>> perplexity = metric.eval()\n >>> print(perplexity)\n 2.231443166940565\n \"\"\"\n\n def __init__(self, ignore_label=None):\n super(Perplexity, self).__init__()\n\n if ignore_label is None:\n self.ignore_label = ignore_label\n else:\n self.ignore_label = validator.check_value_type(\"ignore_label\", ignore_label, [int])\n self.clear()\n\n def clear(self):\n \"\"\"Clears the internal evaluation result.\"\"\"\n self._sum_metric = 0.0\n self._num_inst = 0\n\n @rearrange_inputs\n def update(self, *inputs):\n \"\"\"\n Updates the internal evaluation result: math:preds and :math:labels.\n\n Args:\n inputs: Input `preds` and `labels`. `preds` and `labels` are Tensor, list or numpy.ndarray.\n `preds` is the predicted values, `labels` is the label of the data.\n The shape of `preds` and `labels` are both :math:`(N, C)`.\n\n Raises:\n ValueError: If the number of the inputs is not 2.\n RuntimeError: If preds and labels should have different length.\n RuntimeError: If label shape should not be equal to pred shape.\n \"\"\"\n if len(inputs) != 2:\n raise ValueError('Perplexity needs 2 inputs (preds, labels), but got {}.'.format(len(inputs)))\n\n preds = [self._convert_data(inputs[0])]\n labels = [self._convert_data(inputs[1])]\n\n if len(preds) != len(labels):\n raise RuntimeError('preds and labels should have the same length, but the length of preds is{}, '\n 'the length of labels is {}.'.format(len(preds), len(labels)))\n\n loss = 0.\n num = 0\n for label, pred in zip(labels, preds):\n if label.size != pred.size / pred.shape[-1]:\n raise RuntimeError(\"shape mismatch: label shape should be equal to pred shape, but got label shape \"\n \"is {}, pred shape is {}.\".format(label.shape, pred.shape))\n label = label.reshape((label.size,))\n label_expand = label.astype(int)\n label_expand = np.expand_dims(label_expand, axis=1)\n first_indices = np.arange(label_expand.shape[0])[:, None]\n pred = np.squeeze(pred[first_indices, label_expand])\n if self.ignore_label is not None:\n ignore = (label == self.ignore_label).astype(pred.dtype)\n num -= np.sum(ignore)\n pred = pred * (1 - ignore) + ignore\n loss -= np.sum(np.log(np.maximum(1e-10, pred)))\n num += pred.size\n self._sum_metric += loss\n self._num_inst += num\n\n def eval(self):\n r\"\"\"\n Returns the current evaluation result.\n\n Returns:\n float, the computed result.\n\n Raises:\n RuntimeError: If the sample size is 0.\n \"\"\"\n if self._num_inst == 0:\n raise RuntimeError('Perplexity can not be calculated, because the number of samples is 0.')\n\n return math.exp(self._sum_metric / self._num_inst)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\"Face Recognition dataset.\"\"\"\r\nimport sys\r\nimport os\r\nimport math\r\nimport pickle\r\nfrom collections import defaultdict\r\nimport numpy as np\r\n\r\nfrom PIL import Image, ImageFile\r\nfrom model_utils.config import config\r\nfrom mindspore.communication.management import get_group_size, get_rank\r\nImageFile.LOAD_TRUNCATED_IMAGES = True\r\n\r\n__all__ = ['DistributedCustomSampler', 'CustomDataset']\r\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')\r\n\r\nclass DistributedCustomSampler:\r\n '''DistributedCustomSampler'''\r\n def __init__(self, dataset, num_replicas=None, rank=None, is_distributed=1, shuffle=True, k=2):\r\n assert isinstance(dataset, CustomDataset), 'Custom Sampler is Only Support Custom Dataset!!!'\r\n if is_distributed:\r\n if num_replicas is None:\r\n num_replicas = get_group_size()\r\n if rank is None:\r\n rank = get_rank()\r\n else:\r\n if num_replicas is None:\r\n num_replicas = 1\r\n if rank is None:\r\n rank = 0\r\n self.dataset = dataset\r\n self.num_replicas = num_replicas\r\n self.rank = rank\r\n self.epoch = 0\r\n self.ratio = 4.0\r\n self.data_len = len(self.dataset.classes)\r\n self.num_ids = int(math.ceil(self.data_len * 1.0 / self.num_replicas))\r\n self.total_ids = self.num_ids * self.num_replicas\r\n self.num_samples = math.ceil(len(self.dataset) * 1.0 / self.num_replicas)\r\n self.total_size = self.num_samples * self.num_replicas\r\n self.shuffle = shuffle\r\n self.k = k\r\n self.epoch_gen = 1\r\n\r\n def _sample_(self, indices):\r\n \"\"\"sample\"\"\"\r\n sampled = []\r\n\r\n for indice in indices:\r\n sampled_id = indice\r\n if config.device_target == 'CPU':\r\n if self.k >= len(sampled_id):\r\n continue\r\n sampled.extend(np.random.choice(self.dataset.id2range[sampled_id][:], self.k).tolist())\r\n\r\n return sampled\r\n\r\n def __iter__(self):\r\n if self.shuffle:\r\n # Note, the self.epoch parameter does not get updated in DE\r\n self.epoch_gen = (self.epoch_gen + 1) & 0xffffffff\r\n np.random.seed(self.epoch_gen)\r\n indices = np.random.permutation(len(self.dataset.classes))\r\n indices = indices.tolist()\r\n else:\r\n indices = list(range(len(self.dataset.classes)))\r\n\r\n indices += indices[:(self.total_ids - len(indices))]\r\n assert len(indices) == self.total_ids\r\n\r\n indices = indices[self.rank*self.num_ids:(self.rank+1)*self.num_ids]\r\n assert len(indices) == self.num_ids\r\n sampled_idxs = self._sample_(indices)\r\n return iter(sampled_idxs)\r\n\r\n def __len__(self):\r\n return self.num_ids * self.k\r\n\r\n def set_epoch(self, epoch):\r\n self.epoch = epoch\r\n\r\n def merge_indices(self, list1, list2):\r\n '''merge_indices'''\r\n list_result = []\r\n ct_1, ct_2 = 0, 0\r\n for i in range(self.data_len):\r\n if (i+1) % int(self.ratio+1) == 0:\r\n list_result.append(list2[ct_2])\r\n ct_2 += 1\r\n else:\r\n list_result.append(list1[ct_1])\r\n ct_1 += 1\r\n return list_result\r\n\r\n\r\ndef has_file_allowed_extension(filename, extensions):\r\n \"\"\"Checks if a file is an allowed extension.\r\n\r\n Args:\r\n filename (string): path to a file\r\n extensions (tuple of strings): extensions to consider (lowercase)\r\n\r\n Returns:\r\n bool: True if the filename ends with one of given extensions\r\n \"\"\"\r\n return filename.lower().endswith(extensions)\r\n\r\n\r\ndef make_dataset(dir_1, class_to_idx, extensions=None, is_valid_file=None):\r\n '''make_dataset'''\r\n images = []\r\n dir_1 = os.path.expanduser(dir_1)\r\n if not (extensions is None) ^ (is_valid_file is None):\r\n raise ValueError(\"Both extensions and is_valid_file cannot be None or not None at the same time\")\r\n if extensions is not None:\r\n def f(x):\r\n return has_file_allowed_extension(x, extensions)\r\n is_valid_file = f\r\n for target in sorted(class_to_idx.keys()):\r\n d = os.path.join(dir_1, target)\r\n if not os.path.isdir(d):\r\n continue\r\n for root, _, fnames in sorted(os.walk(d)):\r\n for fname in sorted(fnames):\r\n path = os.path.join(root, fname)\r\n if is_valid_file(path):\r\n item = (path, class_to_idx[target])\r\n images.append(item)\r\n\r\n return images\r\n\r\nclass ImageFolderDataset:\r\n '''ImageFolderDataset'''\r\n def __init__(self, root, cache_path, is_distributed):\r\n\r\n if not os.path.isfile(cache_path):\r\n self.classes, self.classes_to_idx = self._find_classes(root)\r\n self.samples = make_dataset(root, self.classes_to_idx, IMG_EXTENSIONS, None)\r\n self.id2range = self._build_id2range()\r\n cache = dict()\r\n cache['classes'] = self.classes\r\n cache['classes_to_idx'] = self.classes_to_idx\r\n cache['samples'] = self.samples\r\n cache['id2range'] = self.id2range\r\n if is_distributed:\r\n print(\"******* TODO: All workers will write cache... Need to only dump when rank == 0 ******\")\r\n if get_rank() == 0:\r\n with open(cache_path, 'wb') as fw:\r\n pickle.dump(cache, fw)\r\n print('local dump cache:{}'.format(cache_path))\r\n else:\r\n with open(cache_path, 'wb') as fw:\r\n pickle.dump(cache, fw)\r\n print('local dump cache:{}'.format(cache_path))\r\n else:\r\n print('loading cache from %s'%cache_path)\r\n with open(cache_path, 'rb') as fr:\r\n cache = pickle.load(fr)\r\n self.classes, self.classes_to_idx, self.samples, self.id2range = cache['classes'], \\\r\n cache['classes_to_idx'], \\\r\n cache['samples'], cache['id2range']\r\n\r\n self.all_image_idxs = range(len(self.samples))\r\n\r\n self.classes = list(self.id2range.keys())\r\n\r\n def _find_classes(self, dir_1):\r\n \"\"\"\r\n Finds the class folders in a dataset.\r\n\r\n Args:\r\n dir (string): Root directory path.\r\n\r\n Returns:\r\n tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.\r\n\r\n Ensures:\r\n No class is a subdirectory of another.\r\n \"\"\"\r\n if sys.version_info >= (3, 5):\r\n # Faster and available in Python 3.5 and above\r\n classes = [d.name for d in os.scandir(dir_1) if d.is_dir()]\r\n else:\r\n classes = [d for d in os.listdir(dir_1) if os.path.isdir(os.path.join(dir_1, d))]\r\n classes.sort()\r\n class_to_idx = {classes[i]: i for i in range(len(classes))}\r\n return classes, class_to_idx\r\n\r\n def _build_id2range(self):\r\n '''_build_id2range'''\r\n id2range = defaultdict(list)\r\n ret_range = defaultdict(list)\r\n for idx, sample in enumerate(self.samples):\r\n label = sample[1]\r\n id2range[label].append((sample, idx))\r\n for key in id2range:\r\n id2range[key].sort(key=lambda x: int(os.path.basename(x[0][0]).split('.')[0]))\r\n for item in id2range[key]:\r\n ret_range[key].append(item[1])\r\n return ret_range\r\n\r\n def __getitem__(self, index):\r\n return self.samples[index]\r\n\r\n def __len__(self):\r\n return len(self.samples)\r\n\r\n\r\ndef pil_loader(path):\r\n \"\"\"\r\n Loads the image\r\n Args:\r\n path: path to the image\r\n Returns:\r\n Object: pil_loader\r\n \"\"\"\r\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\r\n with open(path, 'rb') as f:\r\n img = Image.open(f)\r\n return img.convert('RGB')\r\n\r\n\r\nclass CustomDataset:\r\n '''CustomDataset'''\r\n def __init__(self, root, cache_path, is_distributed=1, transform=None, target_transform=None,\r\n loader=pil_loader):\r\n self.dataset = ImageFolderDataset(root, cache_path, is_distributed)\r\n print('CustomDataset len(dataset):{}'.format(len(self.dataset)))\r\n self.loader = loader\r\n self.transform = transform\r\n self.target_transform = target_transform\r\n self.classes = self.dataset.classes\r\n self.id2range = self.dataset.id2range\r\n\r\n def __getitem__(self, index):\r\n path, target = self.dataset[index]\r\n sample = self.loader(path)\r\n\r\n if self.transform is not None:\r\n sample = self.transform(sample)\r\n if self.target_transform is not None:\r\n target = self.target_transform(target)\r\n return sample, target\r\n\r\n def __len__(self):\r\n return len(self.dataset)\r\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\"cnn_ctc dataset\"\"\"\r\n\r\nimport sys\r\nimport pickle\r\nimport math\r\nimport six\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport lmdb\r\nfrom mindspore.communication.management import get_rank, get_group_size\r\nfrom src.model_utils.config import config\r\nfrom .util import CTCLabelConverter\r\n\r\n\r\nclass NormalizePAD():\r\n\r\n def __init__(self, max_size, PAD_type='right'):\r\n self.max_size = max_size\r\n self.PAD_type = PAD_type\r\n\r\n def __call__(self, img):\r\n # toTensor\r\n img = np.array(img, dtype=np.float32)\r\n img = img.transpose([2, 0, 1])\r\n img = img.astype(np.float)\r\n img = np.true_divide(img, 255)\r\n # normalize\r\n img = np.subtract(img, 0.5)\r\n img = np.true_divide(img, 0.5)\r\n\r\n _, _, w = img.shape\r\n Pad_img = np.zeros(shape=self.max_size, dtype=np.float32)\r\n Pad_img[:, :, :w] = img # right pad\r\n if self.max_size[2] != w: # add border Pad\r\n Pad_img[:, :, w:] = np.tile(np.expand_dims(img[:, :, w - 1], 2), (1, 1, self.max_size[2] - w))\r\n\r\n return Pad_img\r\n\r\n\r\nclass AlignCollate():\r\n\r\n def __init__(self, imgH=32, imgW=100):\r\n self.imgH = imgH\r\n self.imgW = imgW\r\n\r\n def __call__(self, images):\r\n\r\n resized_max_w = self.imgW\r\n input_channel = 3\r\n transform = NormalizePAD((input_channel, self.imgH, resized_max_w))\r\n\r\n resized_images = []\r\n for image in images:\r\n w, h = image.size\r\n ratio = w / float(h)\r\n if math.ceil(self.imgH * ratio) > self.imgW:\r\n resized_w = self.imgW\r\n else:\r\n resized_w = math.ceil(self.imgH * ratio)\r\n\r\n resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC)\r\n resized_images.append(transform(resized_image))\r\n\r\n image_tensors = np.concatenate([np.expand_dims(t, 0) for t in resized_images], 0)\r\n\r\n return image_tensors\r\n\r\n\r\ndef get_img_from_lmdb(env, index):\r\n with env.begin(write=False) as txn:\r\n label_key = 'label-%09d'.encode() % index\r\n label = txn.get(label_key).decode('utf-8')\r\n img_key = 'image-%09d'.encode() % index\r\n imgbuf = txn.get(img_key)\r\n\r\n buf = six.BytesIO()\r\n buf.write(imgbuf)\r\n buf.seek(0)\r\n try:\r\n img = Image.open(buf).convert('RGB') # for color image\r\n\r\n except IOError:\r\n print(f'Corrupted image for {index}')\r\n # make dummy image and dummy label for corrupted image.\r\n img = Image.new('RGB', (config.IMG_W, config.IMG_H))\r\n label = '[dummy_label]'\r\n\r\n label = label.lower()\r\n\r\n return img, label\r\n\r\n\r\nclass ST_MJ_Generator_batch_fixed_length:\r\n def __init__(self):\r\n self.align_collector = AlignCollate()\r\n self.converter = CTCLabelConverter(config.CHARACTER)\r\n self.env = lmdb.open(config.TRAIN_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False,\r\n meminit=False)\r\n if not self.env:\r\n print('cannot create lmdb from %s' % (config.TRAIN_DATASET_PATH))\r\n raise ValueError(config.TRAIN_DATASET_PATH)\r\n\r\n with open(config.TRAIN_DATASET_INDEX_PATH, 'rb') as f:\r\n self.st_mj_filtered_index_list = pickle.load(f)\r\n\r\n print(f'num of samples in ST_MJ dataset: {len(self.st_mj_filtered_index_list)}')\r\n self.dataset_size = len(self.st_mj_filtered_index_list) // config.TRAIN_BATCH_SIZE\r\n self.batch_size = config.TRAIN_BATCH_SIZE\r\n\r\n def __len__(self):\r\n return self.dataset_size\r\n\r\n def __getitem__(self, item):\r\n img_ret = []\r\n text_ret = []\r\n\r\n for i in range(item * self.batch_size, (item + 1) * self.batch_size):\r\n index = self.st_mj_filtered_index_list[i]\r\n img, label = get_img_from_lmdb(self.env, index)\r\n\r\n img_ret.append(img)\r\n text_ret.append(label)\r\n\r\n img_ret = self.align_collector(img_ret)\r\n text_ret, length = self.converter.encode(text_ret)\r\n\r\n label_indices = []\r\n for i, _ in enumerate(length):\r\n for j in range(length[i]):\r\n label_indices.append((i, j))\r\n label_indices = np.array(label_indices, np.int64)\r\n sequence_length = np.array([config.FINAL_FEATURE_WIDTH] * config.TRAIN_BATCH_SIZE, dtype=np.int32)\r\n text_ret = text_ret.astype(np.int32)\r\n\r\n return img_ret, label_indices, text_ret, sequence_length\r\n\r\nclass ST_MJ_Generator_batch_fixed_length_para:\r\n def __init__(self):\r\n self.align_collector = AlignCollate()\r\n self.converter = CTCLabelConverter(config.CHARACTER)\r\n self.env = lmdb.open(config.TRAIN_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False,\r\n meminit=False)\r\n if not self.env:\r\n print('cannot create lmdb from %s' % (config.TRAIN_DATASET_PATH))\r\n raise ValueError(config.TRAIN_DATASET_PATH)\r\n\r\n with open(config.TRAIN_DATASET_INDEX_PATH, 'rb') as f:\r\n self.st_mj_filtered_index_list = pickle.load(f)\r\n\r\n print(f'num of samples in ST_MJ dataset: {len(self.st_mj_filtered_index_list)}')\r\n self.rank_id = get_rank()\r\n self.rank_size = get_group_size()\r\n self.dataset_size = len(self.st_mj_filtered_index_list) // config.TRAIN_BATCH_SIZE // self.rank_size\r\n self.batch_size = config.TRAIN_BATCH_SIZE\r\n\r\n def __len__(self):\r\n return self.dataset_size\r\n\r\n def __getitem__(self, item):\r\n img_ret = []\r\n text_ret = []\r\n\r\n rank_item = (item * self.rank_size) + self.rank_id\r\n for i in range(rank_item * self.batch_size, (rank_item + 1) * self.batch_size):\r\n index = self.st_mj_filtered_index_list[i]\r\n img, label = get_img_from_lmdb(self.env, index)\r\n\r\n img_ret.append(img)\r\n text_ret.append(label)\r\n\r\n img_ret = self.align_collector(img_ret)\r\n text_ret, length = self.converter.encode(text_ret)\r\n\r\n label_indices = []\r\n for i, _ in enumerate(length):\r\n for j in range(length[i]):\r\n label_indices.append((i, j))\r\n label_indices = np.array(label_indices, np.int64)\r\n sequence_length = np.array([config.FINAL_FEATURE_WIDTH] * config.TRAIN_BATCH_SIZE, dtype=np.int32)\r\n text_ret = text_ret.astype(np.int32)\r\n\r\n return img_ret, label_indices, text_ret, sequence_length\r\n\r\n\r\ndef IIIT_Generator_batch():\r\n max_len = int((26 + 1) // 2)\r\n\r\n align_collector = AlignCollate()\r\n\r\n converter = CTCLabelConverter(config.CHARACTER)\r\n\r\n env = lmdb.open(config.TEST_DATASET_PATH, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False)\r\n if not env:\r\n print('cannot create lmdb from %s' % (config.TEST_DATASET_PATH))\r\n sys.exit(0)\r\n\r\n with env.begin(write=False) as txn:\r\n nSamples = int(txn.get('num-samples'.encode()))\r\n nSamples = nSamples\r\n\r\n # Filtering\r\n filtered_index_list = []\r\n for index in range(nSamples):\r\n index += 1 # lmdb starts with 1\r\n label_key = 'label-%09d'.encode() % index\r\n label = txn.get(label_key).decode('utf-8')\r\n\r\n if len(label) > max_len:\r\n continue\r\n\r\n illegal_sample = False\r\n for char_item in label.lower():\r\n if char_item not in config.CHARACTER:\r\n illegal_sample = True\r\n break\r\n if illegal_sample:\r\n continue\r\n\r\n filtered_index_list.append(index)\r\n\r\n img_ret = []\r\n text_ret = []\r\n\r\n print(f'num of samples in IIIT dataset: {len(filtered_index_list)}')\r\n\r\n for index in filtered_index_list:\r\n\r\n img, label = get_img_from_lmdb(env, index)\r\n\r\n img_ret.append(img)\r\n text_ret.append(label)\r\n\r\n if len(img_ret) == config.TEST_BATCH_SIZE:\r\n img_ret = align_collector(img_ret)\r\n text_ret, length = converter.encode(text_ret)\r\n\r\n label_indices = []\r\n for i, _ in enumerate(length):\r\n for j in range(length[i]):\r\n label_indices.append((i, j))\r\n label_indices = np.array(label_indices, np.int64)\r\n sequence_length = np.array([26] * config.TEST_BATCH_SIZE, dtype=np.int32)\r\n text_ret = text_ret.astype(np.int32)\r\n\r\n yield img_ret, label_indices, text_ret, sequence_length, length\r\n\r\n img_ret = []\r\n text_ret = []\r\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"train\"\"\"\nimport os\nimport time\nimport argparse\nimport numpy as np\nfrom mindspore import nn\nfrom mindspore import Tensor\nfrom mindspore import context\nfrom mindspore.context import ParallelMode\nfrom mindspore.common import dtype as mstype\nfrom mindspore.communication.management import init, get_group_size\nimport mindspore.ops as ops\nfrom src.dataset import create_dataset\nfrom src.ckpt_util import save_ckpt\nfrom src.model import Generator, Discriminator\nfrom src.cell import GenWithLossCell, DisWithLossCell, TrainOneStepCell\n\n\ndef preLauch():\n \"\"\"parse the console argument\"\"\"\n parser = argparse.ArgumentParser(description='MindSpore cgan training')\n parser.add_argument(\"--distribute\", type=bool, default=False,\n help=\"Run distribute, default is false.\")\n parser.add_argument('--device_id', type=int, default=0,\n help='device id of Ascend (Default: 0)')\n parser.add_argument('--ckpt_dir', type=str,\n default='ckpt', help='checkpoint dir of CGAN')\n parser.add_argument('--dataset', type=str, default='data/MNIST_Data/train',\n help='dataset dir (default data/MNISt_Data/train)')\n args = parser.parse_args()\n\n # if not exists 'imgs4', 'gif' or 'ckpt_dir', make it\n if not os.path.exists(args.ckpt_dir):\n os.mkdir(args.ckpt_dir)\n # deal with the distribute analyze problem\n if args.distribute:\n device_id = args.device_id\n context.set_context(save_graphs=False,\n device_id=device_id,\n device_target=\"Ascend\",\n mode=context.GRAPH_MODE)\n init()\n args.device_num = get_group_size()\n context.set_auto_parallel_context(gradients_mean=True,\n device_num=args.device_num,\n parallel_mode=ParallelMode.DATA_PARALLEL)\n else:\n device_id = args.device_id\n args.device_num = 1\n context.set_context(save_graphs=False,\n mode=context.GRAPH_MODE,\n device_target=\"Ascend\")\n context.set_context(device_id=device_id)\n return args\n\n\ndef main():\n # before training, we should set some arguments\n args = preLauch()\n\n # training argument\n batch_size = 128\n input_dim = 100\n epoch_start = 0\n epoch_end = 51\n lr = 0.001\n\n dataset = create_dataset(args.dataset,\n flatten_size=28 * 28,\n batch_size=batch_size,\n num_parallel_workers=args.device_num)\n\n # create G Cell & D Cell\n netG = Generator(input_dim)\n netD = Discriminator(batch_size)\n # create WithLossCell\n netG_with_loss = GenWithLossCell(netG, netD)\n netD_with_loss = DisWithLossCell(netG, netD)\n # create optimizer cell\n optimizerG = nn.Adam(netG.trainable_params(), lr)\n optimizerD = nn.Adam(netD.trainable_params(), lr)\n\n net_train = TrainOneStepCell(netG_with_loss,\n netD_with_loss,\n optimizerG,\n optimizerD)\n\n netG.set_train()\n netD.set_train()\n\n # latent_code_eval = Tensor(np.random.randn(\n # 200, input_dim), dtype=mstype.float32)\n\n # label_eval = np.zeros((200, 10))\n # for i in range(200):\n # j = i // 20\n # label_eval[i][j] = 1\n # label_eval = Tensor(label_eval, dtype=mstype.float32)\n\n data_size = dataset.get_dataset_size()\n print(\"data-size\", data_size)\n print(\"=========== start training ===========\")\n for epoch in range(epoch_start, epoch_end):\n step = 0\n start = time.time()\n for data in dataset:\n img = data[0]\n label = data[1]\n img = ops.Reshape()(img, (batch_size, 1, 28, 28))\n latent_code = Tensor(np.random.randn(\n batch_size, input_dim), dtype=mstype.float32)\n dout, gout = net_train(img, latent_code, label)\n step += 1\n\n if step % data_size == 0:\n end = time.time()\n pref = (end-start)*1000 / data_size\n print(\"epoch {}, {:.3f} ms per step, d_loss is {:.4f}, g_loss is {:.4f}\".format(epoch,\n pref, dout.asnumpy(),\n gout.asnumpy()))\n\n save_ckpt(args, netG, netD, epoch)\n print(\"===========training success================\")\n\nif __name__ == '__main__':\n main()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\"Evaluation for StarGAN\"\"\"\r\nimport os\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\nfrom mindspore import context\r\nfrom mindspore import Tensor\r\nfrom mindspore.train.serialization import load_param_into_net\r\nfrom mindspore.common import dtype as mstype\r\nimport mindspore.ops as ops\r\n\r\nfrom src.utils import resume_model, create_labels, denorm, get_network\r\nfrom src.config import get_config\r\nfrom src.dataset import dataloader\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=1)\r\n config = get_config()\r\n G, D = get_network(config)\r\n para_g, _ = resume_model(config, G, D)\r\n load_param_into_net(G, para_g)\r\n\r\n if not os.path.exists(config.result_dir):\r\n os.mkdir(config.result_dir)\r\n # Define Dataset\r\n\r\n data_path = config.celeba_image_dir\r\n attr_path = config.attr_path\r\n\r\n dataset, length = dataloader(img_path=data_path,\r\n attr_path=attr_path,\r\n batch_size=4,\r\n selected_attr=config.selected_attrs,\r\n device_num=config.num_workers,\r\n dataset=config.dataset,\r\n mode=config.mode,\r\n shuffle=False)\r\n\r\n op = ops.Concat(axis=3)\r\n ds = dataset.create_dict_iterator()\r\n print(length)\r\n print('Start Evaluating!')\r\n for i, data in enumerate(ds):\r\n result_list = ()\r\n img_real = denorm(data['image'].asnumpy())\r\n x_real = Tensor(data['image'], mstype.float32)\r\n result_list += (x_real,)\r\n c_trg_list = create_labels(data['attr'].asnumpy(), selected_attrs=config.selected_attrs)\r\n c_trg_list = Tensor(c_trg_list, mstype.float32)\r\n x_fake_list = []\r\n\r\n for c_trg in c_trg_list:\r\n\r\n x_fake = G(x_real, c_trg)\r\n x = Tensor(x_fake.asnumpy().copy())\r\n\r\n result_list += (x,)\r\n\r\n x_fake_list = op(result_list)\r\n\r\n result = denorm(x_fake_list.asnumpy())\r\n result = np.reshape(result, (-1, 768, 3))\r\n\r\n im = Image.fromarray(np.uint8(result))\r\n im.save(config.result_dir + '/test_{}.jpg'.format(i))\r\n print('Successful save image in ' + config.result_dir + '/test_{}.jpg'.format(i))\r\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\"eval deeplabv3+\"\"\"\r\nimport os\r\nimport argparse\r\nimport numpy as np\r\nimport cv2\r\nfrom mindspore import Tensor\r\nimport mindspore.common.dtype as mstype\r\nimport mindspore.nn as nn\r\nfrom mindspore import context\r\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\r\nfrom src.deeplab_v3plus import DeepLabV3Plus\r\n\r\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\", save_graphs=False,\r\n device_id=int(os.getenv('DEVICE_ID')))\r\n\r\n\r\ndef parse_args():\r\n \"\"\"parse_args\"\"\"\r\n parser = argparse.ArgumentParser('MindSpore DeepLabV3+ eval')\r\n\r\n # val data\r\n parser.add_argument('--data_root', type=str, default='', help='root path of val data')\r\n parser.add_argument('--data_lst', type=str, default='', help='list of val data')\r\n parser.add_argument('--batch_size', type=int, default=16, help='batch size')\r\n parser.add_argument('--crop_size', type=int, default=513, help='crop size')\r\n parser.add_argument('--image_mean', type=list, default=[103.53, 116.28, 123.675], help='image mean')\r\n parser.add_argument('--image_std', type=list, default=[57.375, 57.120, 58.395], help='image std')\r\n parser.add_argument('--scales', type=float, action='append', help='scales of evaluation')\r\n parser.add_argument('--flip', action='store_true', help='perform left-right flip')\r\n parser.add_argument('--ignore_label', type=int, default=255, help='ignore label')\r\n parser.add_argument('--num_classes', type=int, default=21, help='number of classes')\r\n\r\n # model\r\n parser.add_argument('--model', type=str, default='', help='select model')\r\n parser.add_argument('--freeze_bn', action='store_true', default=False, help='freeze bn')\r\n parser.add_argument('--ckpt_path', type=str, default='', help='model to evaluate')\r\n\r\n args, _ = parser.parse_known_args()\r\n return args\r\n\r\n\r\ndef cal_hist(a, b, n):\r\n k = (a >= 0) & (a < n)\r\n return np.bincount(n * a[k].astype(np.int32) + b[k], minlength=n ** 2).reshape(n, n)\r\n\r\n\r\ndef resize_long(img, long_size=513):\r\n h, w, _ = img.shape\r\n if h > w:\r\n new_h = long_size\r\n new_w = int(1.0 * long_size * w / h)\r\n else:\r\n new_w = long_size\r\n new_h = int(1.0 * long_size * h / w)\r\n imo = cv2.resize(img, (new_w, new_h))\r\n return imo\r\n\r\n\r\nclass BuildEvalNetwork(nn.Cell):\r\n def __init__(self, network):\r\n super(BuildEvalNetwork, self).__init__()\r\n self.network = network\r\n self.softmax = nn.Softmax(axis=1)\r\n\r\n def construct(self, input_data):\r\n output = self.network(input_data)\r\n output = self.softmax(output)\r\n return output\r\n\r\n\r\ndef pre_process(args, img_, crop_size=513):\r\n \"\"\"pre_process\"\"\"\r\n # resize\r\n img_ = resize_long(img_, crop_size)\r\n resize_h, resize_w, _ = img_.shape\r\n\r\n # mean, std\r\n image_mean = np.array(args.image_mean)\r\n image_std = np.array(args.image_std)\r\n img_ = (img_ - image_mean) / image_std\r\n\r\n # pad to crop_size\r\n pad_h = crop_size - img_.shape[0]\r\n pad_w = crop_size - img_.shape[1]\r\n if pad_h > 0 or pad_w > 0:\r\n img_ = cv2.copyMakeBorder(img_, 0, pad_h, 0, pad_w, cv2.BORDER_CONSTANT, value=0)\r\n\r\n # hwc to chw\r\n img_ = img_.transpose((2, 0, 1))\r\n return img_, resize_h, resize_w\r\n\r\n\r\ndef eval_batch(args, eval_net, img_lst, crop_size=513, flip=True):\r\n \"\"\"eval_batch\"\"\"\r\n result_lst = []\r\n batch_size = len(img_lst)\r\n batch_img = np.zeros((args.batch_size, 3, crop_size, crop_size), dtype=np.float32)\r\n resize_hw = []\r\n for l in range(batch_size):\r\n img_ = img_lst[l]\r\n img_, resize_h, resize_w = pre_process(args, img_, crop_size)\r\n batch_img[l] = img_\r\n resize_hw.append([resize_h, resize_w])\r\n\r\n batch_img = np.ascontiguousarray(batch_img)\r\n net_out = eval_net(Tensor(batch_img, mstype.float32))\r\n net_out = net_out.asnumpy()\r\n\r\n if flip:\r\n batch_img = batch_img[:, :, :, ::-1]\r\n net_out_flip = eval_net(Tensor(batch_img, mstype.float32))\r\n net_out += net_out_flip.asnumpy()[:, :, :, ::-1]\r\n\r\n for bs in range(batch_size):\r\n probs_ = net_out[bs][:, :resize_hw[bs][0], :resize_hw[bs][1]].transpose((1, 2, 0))\r\n ori_h, ori_w = img_lst[bs].shape[0], img_lst[bs].shape[1]\r\n probs_ = cv2.resize(probs_, (ori_w, ori_h))\r\n result_lst.append(probs_)\r\n\r\n return result_lst\r\n\r\n\r\ndef eval_batch_scales(args, eval_net, img_lst, scales,\r\n base_crop_size=513, flip=True):\r\n \"\"\"eval_batch_scales\"\"\"\r\n sizes_ = [int((base_crop_size - 1) * sc) + 1 for sc in scales]\r\n probs_lst = eval_batch(args, eval_net, img_lst, crop_size=sizes_[0], flip=flip)\r\n print(sizes_)\r\n for crop_size_ in sizes_[1:]:\r\n probs_lst_tmp = eval_batch(args, eval_net, img_lst, crop_size=crop_size_, flip=flip)\r\n for pl, _ in enumerate(probs_lst):\r\n probs_lst[pl] += probs_lst_tmp[pl]\r\n\r\n result_msk = []\r\n for i in probs_lst:\r\n result_msk.append(i.argmax(axis=2))\r\n return result_msk\r\n\r\n\r\ndef net_eval():\r\n \"\"\"net_eval\"\"\"\r\n args = parse_args()\r\n\r\n # data list\r\n with open(args.data_lst) as f:\r\n img_lst = f.readlines()\r\n\r\n # network\r\n if args.model == 'DeepLabV3plus_s16':\r\n network = DeepLabV3Plus('eval', args.num_classes, 16, args.freeze_bn)\r\n elif args.model == 'DeepLabV3plus_s8':\r\n network = DeepLabV3Plus('eval', args.num_classes, 8, args.freeze_bn)\r\n else:\r\n raise NotImplementedError('model [{:s}] not recognized'.format(args.model))\r\n\r\n eval_net = BuildEvalNetwork(network)\r\n\r\n # load model\r\n param_dict = load_checkpoint(args.ckpt_path)\r\n load_param_into_net(eval_net, param_dict)\r\n eval_net.set_train(False)\r\n\r\n # evaluate\r\n hist = np.zeros((args.num_classes, args.num_classes))\r\n batch_img_lst = []\r\n batch_msk_lst = []\r\n bi = 0\r\n image_num = 0\r\n for i, line in enumerate(img_lst):\r\n img_path, msk_path = line.strip().split(' ')\r\n img_path = os.path.join(args.data_root, img_path)\r\n msk_path = os.path.join(args.data_root, msk_path)\r\n img_ = cv2.imread(img_path)\r\n msk_ = cv2.imread(msk_path, cv2.IMREAD_GRAYSCALE)\r\n batch_img_lst.append(img_)\r\n batch_msk_lst.append(msk_)\r\n bi += 1\r\n if bi == args.batch_size:\r\n batch_res = eval_batch_scales(args, eval_net, batch_img_lst, scales=args.scales,\r\n base_crop_size=args.crop_size, flip=args.flip)\r\n for mi in range(args.batch_size):\r\n hist += cal_hist(batch_msk_lst[mi].flatten(), batch_res[mi].flatten(), args.num_classes)\r\n\r\n bi = 0\r\n batch_img_lst = []\r\n batch_msk_lst = []\r\n print('processed {} images'.format(i + 1))\r\n image_num = i\r\n\r\n if bi > 0:\r\n batch_res = eval_batch_scales(args, eval_net, batch_img_lst, scales=args.scales,\r\n base_crop_size=args.crop_size, flip=args.flip)\r\n for mi in range(bi):\r\n hist += cal_hist(batch_msk_lst[mi].flatten(), batch_res[mi].flatten(), args.num_classes)\r\n print('processed {} images'.format(image_num + 1))\r\n\r\n print(hist)\r\n iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\r\n print('per-class IoU', iu)\r\n print('mean IoU', np.nanmean(iu))\r\n\r\n\r\nif __name__ == '__main__':\r\n net_eval()\r\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MaskRcnn feature pyramid network.\"\"\"\n\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore.ops import operations as P\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.common import dtype as mstype\nfrom mindspore.common.initializer import initializer\n\n\ndef bias_init_zeros(shape):\n \"\"\"Bias init method.\"\"\"\n return Tensor(np.array(np.zeros(shape).astype(np.float32)), dtype=mstype.float32)\n\ndef _conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, pad_mode='pad'):\n \"\"\"Conv2D wrapper.\"\"\"\n shape = (out_channels, in_channels, kernel_size, kernel_size)\n weights = initializer(\"XavierUniform\", shape=shape, dtype=mstype.float32)\n shape_bias = (out_channels,)\n biass = bias_init_zeros(shape_bias)\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding,\n pad_mode=pad_mode, weight_init=weights, has_bias=True, bias_init=biass)\n\nclass FeatPyramidNeck(nn.Cell):\n \"\"\"\n Feature pyramid network cell, usually uses as network neck.\n\n Applies the convolution on multiple, input feature maps\n and output feature map with same channel size. if required num of\n output larger then num of inputs, add extra maxpooling for further\n downsampling;\n\n Args:\n in_channels (tuple) - Channel size of input feature maps.\n out_channels (int) - Channel size output.\n num_outs (int) - Num of output features.\n\n Returns:\n Tuple, with tensors of same channel size.\n\n Examples:\n neck = FeatPyramidNeck([100,200,300], 50, 4)\n input_data = (normal(0,0.1,(1,c,1280//(4*2**i), 768//(4*2**i)),\n dtype=np.float32) \\\n for i, c in enumerate(config.fpn_in_channels))\n x = neck(input_data)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n num_outs):\n super(FeatPyramidNeck, self).__init__()\n self.num_outs = num_outs\n self.in_channels = in_channels\n self.fpn_layer = len(self.in_channels)\n\n assert not self.num_outs < len(in_channels)\n\n self.lateral_convs_list_ = []\n self.fpn_convs_ = []\n\n for _, channel in enumerate(in_channels):\n l_conv = _conv(channel, out_channels, kernel_size=1, stride=1,\n padding=0, pad_mode='valid').to_float(mstype.float16)\n fpn_conv = _conv(out_channels, out_channels, kernel_size=3, stride=1,\n padding=0, pad_mode='same').to_float(mstype.float16)\n self.lateral_convs_list_.append(l_conv)\n self.fpn_convs_.append(fpn_conv)\n self.lateral_convs_list = nn.layer.CellList(self.lateral_convs_list_)\n self.fpn_convs_list = nn.layer.CellList(self.fpn_convs_)\n self.interpolate1 = P.ResizeBilinear((48, 80))\n self.interpolate2 = P.ResizeBilinear((96, 160))\n self.interpolate3 = P.ResizeBilinear((192, 320))\n self.cast = P.Cast()\n self.maxpool = P.MaxPool(kernel_size=1, strides=2, pad_mode=\"same\")\n\n def construct(self, inputs):\n x = ()\n for i in range(self.fpn_layer):\n x += (self.lateral_convs_list[i](inputs[i]),)\n\n y = (x[3],)\n y = y + (x[2] + self.cast(self.interpolate1(y[self.fpn_layer - 4]), mstype.float16),)\n y = y + (x[1] + self.cast(self.interpolate2(y[self.fpn_layer - 3]), mstype.float16),)\n y = y + (x[0] + self.cast(self.interpolate3(y[self.fpn_layer - 2]), mstype.float16),)\n\n z = ()\n for i in range(self.fpn_layer - 1, -1, -1):\n z = z + (y[i],)\n\n outs = ()\n for i in range(self.fpn_layer):\n outs = outs + (self.fpn_convs_list[i](z[i]),)\n\n for i in range(self.num_outs - self.fpn_layer):\n outs = outs + (self.maxpool(outs[3]),)\n return outs\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Hierarchical occlusion edit tree searcher.\"\"\"\nfrom enum import Enum\nimport copy\nimport re\nimport math\n\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter\n\nfrom mindspore import nn\nfrom mindspore import Tensor\nfrom mindspore.ops import Squeeze\nfrom mindspore.train._utils import check_value_type\n\n\nAUTO_LAYER_MAX = 3 # maximum number of layer by auto settings\nAUTO_WIN_SIZE_MIN = 28 # minimum window size by auto settings\nAUTO_WIN_SIZE_DIV = 2 # denominator of windows size calculations by auto settings\nAUTO_STRIDE_DIV = 5 # denominator of stride calculations by auto settings\nAUTO_MASK_GAUSSIAN_RADIUS_DIV = 25 # denominator of gaussian mask radius calculations by auto settings\nDEFAULT_THRESHOLD = 0.5 # default target prediction threshold\nDEFAULT_BATCH_SIZE = 64 # default batch size for batch inference search\nMASK_GAUSSIAN_RE = r'^gaussian:(\\d+)$' # gaussian mask string pattern\n\n# minimum length of input images' short side with auto settings\nAUTO_IMAGE_SHORT_SIDE_MIN = AUTO_WIN_SIZE_MIN * AUTO_WIN_SIZE_DIV\n\n\ndef is_valid_str_mask(mask):\n \"\"\"Check if it is a valid string mask.\"\"\"\n check_value_type('mask', mask, str)\n match = re.match(MASK_GAUSSIAN_RE, mask)\n return match and int(match.group(1)) > 0\n\n\ndef compile_mask(mask, image):\n \"\"\"Compile mask to a ready to use object.\"\"\"\n if mask is None:\n return compile_str_mask(auto_str_mask(image), image)\n check_value_type('mask', mask, (str, tuple, float, np.ndarray))\n if isinstance(mask, str):\n return compile_str_mask(mask, image)\n\n if isinstance(mask, tuple):\n _check_iterable_type('mask', mask, tuple, float)\n elif isinstance(mask, np.ndarray):\n if len(image.shape) == 4 and len(mask.shape) == 3:\n mask = np.expand_dims(mask, axis=0)\n elif len(image.shape) == 3 and len(mask.shape) == 4 and mask.shape[0] == 1:\n mask = mask.squeeze(0)\n if image.shape != mask.shape:\n raise ValueError(\"Image and mask is not match in shape.\")\n return mask\n\n\ndef auto_str_mask(image):\n \"\"\"Generate auto string mask for the image.\"\"\"\n check_value_type('image', image, np.ndarray)\n short_side = np.min(image.shape[-2:])\n radius = int(round(short_side/AUTO_MASK_GAUSSIAN_RADIUS_DIV))\n if radius == 0:\n raise ValueError(f\"Input image's short side:{short_side} is too small for auto mask, \"\n f\"at least {AUTO_MASK_GAUSSIAN_RADIUS_DIV}pixels is required.\")\n return f'gaussian:{radius}'\n\n\ndef compile_str_mask(mask, image):\n \"\"\"Concert string mask to numpy.ndarray.\"\"\"\n check_value_type('mask', mask, str)\n check_value_type('image', image, np.ndarray)\n match = re.match(MASK_GAUSSIAN_RE, mask)\n if match:\n radius = int(match.group(1))\n if radius > 0:\n sigma = [0] * len(image.shape)\n sigma[-2] = radius\n sigma[-1] = radius\n return gaussian_filter(image, sigma=sigma, mode='nearest')\n raise ValueError(f\"Invalid string mask: '{mask}'.\")\n\n\nclass EditStep:\n \"\"\"\n Edit step that describes a box region, also represents an edit tree.\n\n Args:\n layer (int): Layer number, -1 is root layer, 0 or above is normal edit layer.\n box (tuple[int, int, int, int]): Tuple of x, y, width, height.\n \"\"\"\n def __init__(self, layer, box):\n self.layer = layer\n self.box = box\n self.network_output = 0\n self.step_change = 0\n self.children = None\n\n @property\n def x(self):\n \"\"\"X-coordinate of the box.\"\"\"\n return self.box[0]\n\n @property\n def y(self):\n \"\"\"Y-coordinate of the box.\"\"\"\n return self.box[1]\n\n @property\n def width(self):\n \"\"\"Width of the box.\"\"\"\n return self.box[2]\n\n @property\n def height(self):\n \"\"\"Height of the box.\"\"\"\n return self.box[3]\n\n @property\n def is_leaf(self):\n \"\"\"Returns True if no child edit step.\"\"\"\n return not self.children\n\n @property\n def leaf_steps(self):\n \"\"\"Returns all leaf edit steps in the tree.\"\"\"\n if self.is_leaf:\n return [self]\n steps = []\n for child in self.children:\n steps.extend(child.leaf_steps)\n return steps\n\n @property\n def max_layer(self):\n \"\"\"Maximum layer number in the edit tree.\"\"\"\n if self.is_leaf:\n return self.layer\n layer = self.layer\n for child in self.children:\n child_max_layer = child.max_layer\n if child_max_layer > layer:\n layer = child_max_layer\n return layer\n\n def add_child(self, child):\n \"\"\"Add a child edit step.\"\"\"\n if self.children is None:\n self.children = [child]\n else:\n self.children.append(child)\n\n def remove_all_children(self):\n \"\"\"Remove all child steps.\"\"\"\n self.children = None\n\n def get_layer_or_leaf_steps(self, layer):\n \"\"\"Get all edit steps of the layer and all leaf edit steps above the layer.\"\"\"\n if self.layer == layer or (self.layer < layer and self.is_leaf):\n return [self]\n steps = []\n if self.layer < layer and self.children:\n for child in self.children:\n steps.extend(child.get_layer_or_leaf_steps(layer))\n return steps\n\n def get_layer_steps(self, layer):\n \"\"\"Get all edit steps of the layer.\"\"\"\n if self.layer == layer:\n return [self]\n steps = []\n if self.layer < layer and self.children:\n for child in self.children:\n steps.extend(child.get_layer_steps(layer))\n return steps\n\n @classmethod\n def apply(cls,\n image,\n mask,\n edit_steps,\n by_masking=False,\n inplace=False):\n \"\"\"\n Apply edit steps.\n\n Args:\n image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format.\n mask (Union[str, tuple[float, float, float], float, numpy.ndarray]): The mask, type can be\n str: String mask, e.g. 'gaussian:9' - Gaussian blur with radius of 9.\n tuple[float, float, float]: RGB solid color mask,\n float: Grey scale solid color mask.\n numpy.ndarray: Image mask in CHW or NCHW(N=1) format.\n edit_steps (list[EditStep], optional): Edit steps to be applied.\n by_masking (bool): Whether it is masking mode.\n inplace (bool): Whether the modification is going to take place in the input image tensor. False to\n construct a new image tensor as result.\n\n Returns:\n numpy.ndarray, the result image tensor.\n\n Raises:\n TypeError: Be raised for any argument or data type problem.\n ValueError: Be raised for any argument or data value problem.\n \"\"\"\n if by_masking:\n return cls.apply_masking(image, mask, edit_steps, inplace)\n return cls.apply_unmasking(image, mask, edit_steps, inplace)\n\n @classmethod\n def apply_masking(cls,\n image,\n mask,\n edit_steps,\n inplace=False):\n \"\"\"\n Apply edit steps in masking mode.\n\n Args:\n image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format.\n mask (Union[str, tuple[float, float, float], float, numpy.ndarray]): The mask, type can be\n str: String mask, e.g. 'gaussian:9' - Gaussian blur with radius of 9.\n tuple[float, float, float]: RGB solid color mask,\n float: Grey scale solid color mask.\n numpy.ndarray: Image mask in CHW or NCHW(N=1) format.\n edit_steps (list[EditStep], optional): Edit steps to be applied.\n inplace (bool): Whether the modification is going to take place in the input image tensor. False to\n construct a new image tensor as result.\n\n Returns:\n numpy.ndarray, the result image tensor.\n\n Raises:\n TypeError: Be raised for any argument or data type problem.\n ValueError: Be raised for any argument or data value problem.\n \"\"\"\n\n cls._apply_check_args(image, mask, edit_steps)\n\n mask = compile_mask(mask, image)\n\n background = image if inplace else np.copy(image)\n\n if not edit_steps:\n return background\n\n for step in edit_steps:\n\n x_max, y_max = cls._get_step_xy_max(step, background.shape[-1], background.shape[-2])\n\n if x_max <= step.x or y_max <= step.y:\n continue\n\n if isinstance(mask, np.ndarray):\n background[..., step.y:y_max, step.x:x_max] = mask[..., step.y:y_max, step.x:x_max]\n else:\n if isinstance(mask, (int, float)):\n mask = (mask, mask, mask)\n for c in range(3):\n background[..., c, step.y:y_max, step.x:x_max] = mask[c]\n return background\n\n @classmethod\n def apply_unmasking(cls,\n image,\n mask,\n edit_steps,\n inplace=False):\n \"\"\"\n Apply edit steps in unmasking mode.\n\n Args:\n image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format.\n mask (Union[str, tuple[float, float, float], float, numpy.ndarray]): The mask, type can be\n str: String mask, e.g. 'gaussian:9' - Gaussian blur with radius of 9.\n tuple[float, float, float]: RGB solid color mask,\n float: Grey scale solid color mask.\n numpy.ndarray: Image mask in CHW or NCHW(N=1) format.\n edit_steps (list[EditStep]): Edit steps to be applied.\n inplace (bool): Whether the modification is going to take place in the input mask tensor. False to\n construct a new image tensor as result.\n\n Returns:\n numpy.ndarray, the result image tensor.\n\n Raises:\n TypeError: Be raised for any argument or data type problem.\n ValueError: Be raised for any argument or data value problem.\n \"\"\"\n\n cls._apply_check_args(image, mask, edit_steps)\n\n mask = compile_mask(mask, image)\n\n if isinstance(mask, np.ndarray):\n if inplace:\n background = mask\n else:\n background = np.copy(mask)\n else:\n if inplace:\n raise ValueError('Inplace cannot be True when mask is not a numpy.ndarray')\n\n background = np.zeros_like(image)\n if isinstance(mask, (int, float)):\n background.fill(mask)\n else:\n for c in range(3):\n background[..., c, :, :] = mask[c]\n\n if not edit_steps:\n return background\n\n for step in edit_steps:\n\n x_max, y_max = cls._get_step_xy_max(step, background.shape[-1], background.shape[-2])\n\n if x_max <= step.x or y_max <= step.y:\n continue\n\n background[..., step.y:y_max, step.x:x_max] = image[..., step.y:y_max, step.x:x_max]\n\n return background\n\n @staticmethod\n def _apply_check_args(image, mask, edit_steps):\n \"\"\"\n Check arguments for apply edit steps.\n\n Args:\n image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format.\n mask (Union[str, tuple[float, float, float], float, numpy.ndarray]): The mask, type can be\n str: String mask, e.g. 'gaussian:9' - Gaussian blur with radius of 9.\n tuple[float, float, float]: RGB solid color mask,\n float: Grey scale solid color mask.\n numpy.ndarray: Image mask in CHW or NCHW(N=1) format.\n edit_steps (list[EditStep], optional): Edit steps to be applied.\n\n Raises:\n TypeError: Be raised for any argument or data type problem.\n ValueError: Be raised for any argument or data value problem.\n \"\"\"\n check_value_type('image', image, np.ndarray)\n check_value_type('mask', mask, (str, tuple, float, np.ndarray))\n if isinstance(mask, tuple):\n _check_iterable_type('mask', mask, tuple, float)\n\n if edit_steps is not None:\n _check_iterable_type('edit_steps', edit_steps, (tuple, list), EditStep)\n\n @staticmethod\n def _get_step_xy_max(step, x_limit, y_limit):\n \"\"\"Get the step x and y max. position.\"\"\"\n x_max = step.x + step.width\n y_max = step.y + step.height\n\n if x_max > x_limit:\n x_max = x_limit\n\n if y_max > y_limit:\n y_max = y_limit\n return x_max, y_max\n\n\nclass NoValidResultError(RuntimeError):\n \"\"\"Error for no edit step layer's network output meet the threshold.\"\"\"\n\n\nclass OriginalOutputError(RuntimeError):\n \"\"\"Error for network output of the original image is not strictly larger than the threshold.\"\"\"\n\n\nclass Searcher:\n \"\"\"\n Edit step searcher.\n\n Args:\n network (Cell): Image tensor in CHW or NCHW(N=1) format.\n win_sizes (Union(list[int], optional): Moving square window size (length of side) of layers,\n None means by auto calcuation.\n strides (Union(list[int], optional): Stride of layers, None means by auto calcuation.\n threshold (float): Threshold network output value of the target class.\n by_masking (bool): Whether it is masking mode.\n\n Raises:\n ValueError: Be raised for any data or settings' value problem.\n TypeError: Be raised for any data or settings' type problem.\n RuntimeError: Be raised if this function was invoked before.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n \"\"\"\n\n def __init__(self,\n network,\n win_sizes=None,\n strides=None,\n threshold=DEFAULT_THRESHOLD,\n by_masking=False):\n\n check_value_type('network', network, nn.Cell)\n\n if win_sizes is not None:\n _check_iterable_type('win_sizes', win_sizes, list, int)\n if not win_sizes:\n raise ValueError('Argument win_sizes is empty.')\n\n for i in range(1, len(win_sizes)):\n if win_sizes[i] >= win_sizes[i-1]:\n raise ValueError('Argument win_sizes is not strictly descending.')\n\n if win_sizes[-1] <= 0:\n raise ValueError('Argument win_sizes has non-positive number.')\n elif strides is not None:\n raise ValueError('Argument win_sizes cannot be None if strides is not None.')\n\n if strides is not None:\n _check_iterable_type('strides', strides, list, int)\n for i in range(1, len(strides)):\n if strides[i] >= strides[i-1]:\n raise ValueError('Argument win_sizes is not strictly descending.')\n\n if strides[-1] <= 0:\n raise ValueError('Argument strides has non-positive number.')\n\n if len(strides) != len(win_sizes):\n raise ValueError('Length of strides and win_sizes is not equal.')\n elif win_sizes is not None:\n raise ValueError('Argument strides cannot be None if win_sizes is not None.')\n\n self._network = copy.deepcopy(network)\n self._compiled_mask = None\n self._threshold = threshold\n self._win_sizes = copy.copy(win_sizes) if win_sizes else None\n self._strides = copy.copy(strides) if strides else None\n self._by_masking = by_masking\n\n @property\n def network(self):\n \"\"\"Get the network.\"\"\"\n return self._network\n\n @property\n def by_masking(self):\n \"\"\"Check if it is masking mode.\"\"\"\n return self._by_masking\n\n @property\n def threshold(self):\n \"\"\"The network output threshold to stop the search.\"\"\"\n return self._threshold\n\n @property\n def win_sizes(self):\n \"\"\"Windows sizes in pixels.\"\"\"\n return self._win_sizes\n\n @property\n def strides(self):\n \"\"\"Strides in pixels.\"\"\"\n return self._strides\n\n @property\n def compiled_mask(self):\n \"\"\"The compiled mask after a successful search() call.\"\"\"\n return self._compiled_mask\n\n def search(self, image, class_idx, mask=None):\n \"\"\"\n Search smallest sufficient/destruction region on an image.\n\n Args:\n image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format.\n class_idx (int): Target class index.\n mask (Union[str, tuple[float, float, float], float], optional): The mask, type can be\n str: String mask, e.g. 'gaussian:9' - Gaussian blur with radius of 9.\n tuple[float, float, float]: RGB solid color mask,\n float: Grey scale solid color mask.\n None: By auto calculation.\n\n Returns:\n tuple[EditStep, list[float]], the root edit step and network output of each layer after applied the\n layer steps.\n\n Raise:\n TypeError: Be raised for any argument or data type problem.\n ValueError: Be raised for any argument or data value problem.\n NoValidResultError: Be raised if no valid result was found.\n OriginalOutputError: Be raised if network output of the original image is not strictly larger than\n the threshold.\n \"\"\"\n check_value_type('image', image, (Tensor, np.ndarray))\n\n if isinstance(image, Tensor):\n image = image.asnumpy()\n\n if len(image.shape) == 4:\n if image.shape[0] != 1:\n raise ValueError(\"Argument image's batch size is not 1.\")\n elif len(image.shape) == 3:\n image = np.expand_dims(image, axis=0)\n else:\n raise ValueError(\"Argument image is not in CHW or NCHW(N=1) format.\")\n\n check_value_type('class_idx', class_idx, int)\n\n if class_idx < 0:\n raise ValueError(\"Argument class_idx is less then zero.\")\n\n self._compiled_mask = compile_mask(mask, image)\n\n short_side = np.min(image.shape[-2:])\n if self._win_sizes is None:\n win_sizes, strides = self._auto_win_sizes_strides(short_side)\n else:\n win_sizes, strides = self._win_sizes, self._strides\n\n if short_side <= win_sizes[0]:\n raise ValueError(f\"Input image's short side is shorter then or \"\n f\"equals to the first window size:{win_sizes[0]}.\")\n\n self._network.set_train(False)\n\n # the search result will be store as a edit tree that attached to the root step.\n root_step = EditStep(-1, (0, 0, image.shape[-1], image.shape[-2]))\n root_job = _SearchJob(by_masking=self._by_masking,\n class_idx=class_idx,\n win_sizes=win_sizes,\n strides=strides,\n layer=0,\n search_field=root_step.box,\n pre_edit_steps=None,\n parent_step=root_step)\n self._process_root_job(image, root_job)\n return self._touch_result(image, class_idx, root_step)\n\n def _touch_result(self, image, class_idx, root_step):\n \"\"\"\n Final treatment to the search result.\n\n Args:\n image (numpy.ndarray): Image tensor in CHW or NCHW(N=1) format.\n class_idx (int): Target class index.\n root_step (EditStep): The searched root step.\n\n Returns:\n tuple[EditStep, list[float]], the root edit step and network output of each layer after applied the\n layer steps.\n\n Raise:\n NoValidResultError: Be raised if no valid result was found.\n \"\"\"\n # the leaf layer's network output may not meet the threshold,\n # we have to cutoff the unqualified layers\n layer_count = root_step.max_layer + 1\n if layer_count == 0:\n raise NoValidResultError(\"No edit step layer was found.\")\n\n # gather the network output of each layer\n layer_outputs = [None] * layer_count\n for layer in range(layer_count):\n steps = root_step.get_layer_or_leaf_steps(layer)\n if not steps:\n continue\n masked_image = EditStep.apply(image, self._compiled_mask, steps, by_masking=self._by_masking)\n output = self._network(Tensor(masked_image))\n output = output[0, class_idx].asnumpy().item()\n layer_outputs[layer] = output\n\n # determine which layer we have to cutoff\n cutoff_layer = None\n for layer in reversed(range(layer_count)):\n if layer_outputs[layer] is not None and self._is_threshold_met(layer_outputs[layer]):\n cutoff_layer = layer\n break\n\n if cutoff_layer is None or root_step.is_leaf:\n raise NoValidResultError(f\"No edit step layer's network output meet the threshold: {self._threshold}.\")\n\n # cutoff the layer by removing all children of the layer's steps.\n steps = root_step.get_layer_steps(cutoff_layer)\n for step in steps:\n step.remove_all_children()\n layer_outputs = layer_outputs[:cutoff_layer + 1]\n\n return root_step, layer_outputs\n\n def _process_root_job(self, sample_input, root_job):\n \"\"\"\n Process job queue.\n\n Args:\n sample_input (numpy.ndarray): Image tensor in NCHW(N=1) format.\n root_job (_SearchJob): Root search job.\n \"\"\"\n job_queue = [root_job]\n while job_queue:\n job = job_queue.pop(0)\n sub_job_queue = []\n job_edit_steps, stop_reason = self._process_job(job, sample_input, sub_job_queue)\n\n if stop_reason in (self._StopReason.THRESHOLD_MET, self._StopReason.STEP_CHANGE_MET):\n for step in job_edit_steps:\n job.parent_step.add_child(step)\n job_queue.extend(sub_job_queue)\n\n def _prepare_job(self, job, sample_input):\n \"\"\"\n Prepare a job for process.\n\n Args:\n job (_SearchJob): Search job to be processed.\n sample_input (numpy.ndarray): Image tensor in NCHW(N=1) format.\n\n Returns:\n numpy.ndarray, the image tensor workpiece.\n\n Raise:\n OriginalOutputError: Be raised if network output of the original image is not strictly larger than the\n threshold.\n \"\"\"\n # make the network output with the original image is strictly larger than the threshold\n if job.layer == 0:\n original_output = self._network(Tensor(sample_input))[0, job.class_idx].asnumpy().item()\n if original_output <= self._threshold:\n raise OriginalOutputError(f'The original output is not strictly larger the threshold: '\n f'{self._threshold}')\n\n # applying the pre-edit steps from the parent steps\n if job.pre_edit_steps:\n # use the latest leaf steps to increase the accuracy\n leaf_steps = []\n for step in job.pre_edit_steps:\n leaf_steps.extend(step.leaf_steps)\n pre_edit_steps = leaf_steps\n else:\n pre_edit_steps = None\n workpiece = EditStep.apply(sample_input,\n self._compiled_mask,\n pre_edit_steps,\n self._by_masking)\n\n job.on_start(sample_input, workpiece, self._compiled_mask, self._network)\n return workpiece\n\n def _process_job(self, job, sample_input, job_queue):\n \"\"\"\n Process a job.\n\n Args:\n job (_SearchJob): Search job to be processed.\n sample_input (numpy.ndarray): Image tensor in NCHW(N=1) format.\n job_queue (list[_SearchJob]): Job queue.\n\n Returns:\n tuple[list[EditStep], _StopReason], result edit stop and the stop reason.\n\n Raise:\n OriginalOutputError: Be raised if network output of the original image is not strictly larger than the\n threshold.\n \"\"\"\n workpiece = self._prepare_job(job, sample_input)\n\n start_output = self._network(Tensor(workpiece))[0, job.class_idx].asnumpy().item()\n last_output = start_output\n edit_steps = []\n # greedy search loop\n while True:\n\n if self._is_threshold_met(last_output):\n return edit_steps, self._StopReason.THRESHOLD_MET\n\n try:\n best_edit = job.find_best_edit()\n except _NoNewStepError:\n return edit_steps, self._StopReason.NO_NEW_STEP\n except _RepeatedStepError:\n return edit_steps, self._StopReason.REPEATED_STEP\n\n best_edit.step_change = best_edit.network_output - last_output\n\n if job.layer < job.layer_count - 1 and self._is_greedy(best_edit.step_change):\n # create net layer search job if new edit step is valid and not yet reaching\n # the final layer\n if job.pre_edit_steps:\n pre_edit_steps = list(job.pre_edit_steps)\n pre_edit_steps.extend(edit_steps)\n else:\n pre_edit_steps = list(edit_steps)\n\n sub_job = job.create_sub_job(best_edit, pre_edit_steps)\n job_queue.append(sub_job)\n\n edit_steps.append(best_edit)\n\n if job.layer > 0:\n # stop if the step change meet the parent step change only after layer 0\n change = best_edit.network_output - start_output\n if self._is_step_change_met(job.parent_step.step_change, change):\n return edit_steps, self._StopReason.STEP_CHANGE_MET\n\n last_output = best_edit.network_output\n\n def _is_threshold_met(self, network_output):\n \"\"\"Check if the threshold was met.\"\"\"\n if self._by_masking:\n return network_output <= self._threshold\n return network_output >= self._threshold\n\n def _is_step_change_met(self, target, step_change):\n \"\"\"Check if the change target was met.\"\"\"\n if self._by_masking:\n return step_change <= target\n return step_change >= target\n\n def _is_greedy(self, step_change):\n \"\"\"Check if it is a greedy step.\"\"\"\n if self._by_masking:\n return step_change < 0\n return step_change > 0\n\n @classmethod\n def _auto_win_sizes_strides(cls, short_side):\n \"\"\"\n Calculate auto window sizes and strides.\n\n Args:\n short_side (int): Length of search space.\n\n Returns:\n tuple[list[int], list[int]], window sizes and strides.\n \"\"\"\n win_sizes = []\n strides = []\n cur_len = int(short_side/AUTO_WIN_SIZE_DIV)\n while len(win_sizes) < AUTO_LAYER_MAX and cur_len >= AUTO_WIN_SIZE_MIN:\n stride = int(cur_len/AUTO_STRIDE_DIV)\n if stride <= 0:\n break\n win_sizes.append(cur_len)\n strides.append(stride)\n cur_len = int(cur_len/AUTO_WIN_SIZE_DIV)\n if not win_sizes:\n raise ValueError(f\"Image's short side is less then {AUTO_IMAGE_SHORT_SIDE_MIN}, \"\n f\"unable to calculates auto settings.\")\n return win_sizes, strides\n\n class _StopReason(Enum):\n \"\"\"Stop reason of search job.\"\"\"\n THRESHOLD_MET = 0 # threshold was met.\n STEP_CHANGE_MET = 1 # parent step change was met.\n NO_NEW_STEP = 2 # no new step was found.\n REPEATED_STEP = 3 # repeated step was found.\n\n\ndef _check_iterable_type(arg_name, arg_value, container_type, elem_types):\n \"\"\"Concert iterable argument data type.\"\"\"\n check_value_type(arg_name, arg_value, container_type)\n for elem in arg_value:\n check_value_type(arg_name + ' element', elem, elem_types)\n\n\nclass _NoNewStepError(Exception):\n \"\"\"Error for no new step was found.\"\"\"\n\n\nclass _RepeatedStepError(Exception):\n \"\"\"Error for repeated step was found.\"\"\"\n\n\nclass _SearchJob:\n \"\"\"\n Search job.\n\n Args:\n by_masking (bool): Whether it is masking mode.\n class_idx (int): Target class index.\n win_sizes (list[int]): Moving square window size (length of side) of layers.\n strides (list[int]): Strides of layers.\n layer (int): Layer number.\n search_field (tuple[int, int, int, int]): Search field in x, y, width, height format.\n pre_edit_steps (list[EditStep], optional): Edit steps to be applied before searching.\n parent_step (EditStep): Parent edit step.\n batch_size (int): Batch size of batched inferences.\n \"\"\"\n\n def __init__(self,\n by_masking,\n class_idx,\n win_sizes,\n strides,\n layer,\n search_field,\n pre_edit_steps,\n parent_step,\n batch_size=DEFAULT_BATCH_SIZE):\n\n if layer >= len(win_sizes):\n raise ValueError('Layer is larger then number of window sizes.')\n\n self.by_masking = by_masking\n self.class_idx = class_idx\n self.win_sizes = win_sizes\n self.strides = strides\n self.layer = layer\n self.search_field = search_field\n self.pre_edit_steps = pre_edit_steps\n self.parent_step = parent_step\n self.batch_size = batch_size\n self.network = None\n self.mask = None\n self.original_input = None\n\n self._workpiece = None\n self._found_best_edits = None\n self._found_uvs = None\n self._u_pixels = None\n self._v_pixels = None\n\n @property\n def layer_count(self):\n \"\"\"Number of layers.\"\"\"\n return len(self.win_sizes)\n\n def on_start(self, original_input, workpiece, mask, network):\n \"\"\"\n Notification of the start of the search job.\n\n Args:\n original_input (numpy.ndarray): The original image tensor in CHW or NCHW(N=1) format.\n workpiece (numpy.ndarray): The intermediate image tensor in CHW or NCHW(N=1) format.\n mask (Union[tuple[float, float, float], float, numpy.ndarray]): The mask, type can be\n tuple[float, float, float]: RGB solid color mask,\n float: Grey scale solid color mask.\n numpy.ndarray: Image mask, has same format of original_input.\n network (nn.Cell): Classification network.\n \"\"\"\n self.original_input = original_input\n self.mask = mask\n self.network = network\n\n self._workpiece = workpiece\n self._found_best_edits = []\n self._found_uvs = []\n self._u_pixels = self._calc_uv_pixels(self.search_field[0], self.search_field[2])\n self._v_pixels = self._calc_uv_pixels(self.search_field[1], self.search_field[3])\n\n def create_sub_job(self, parent_step, pre_edit_steps):\n \"\"\"Create next layer search job.\"\"\"\n return self.__class__(by_masking=self.by_masking,\n class_idx=self.class_idx,\n win_sizes=self.win_sizes,\n strides=self.strides,\n layer=self.layer + 1,\n search_field=copy.copy(parent_step.box),\n pre_edit_steps=pre_edit_steps,\n parent_step=parent_step,\n batch_size=self.batch_size)\n\n def find_best_edit(self):\n \"\"\"\n Find the next best edit step.\n\n Returns:\n EditStep, the next best edit step.\n \"\"\"\n workpiece = self._workpiece\n if len(workpiece.shape) == 3:\n workpiece = np.expand_dims(workpiece, axis=0)\n\n # generate input tensors with shifted masked/unmasked region and pack into a batch\n best_new_workpiece = None\n best_output = None\n best_edit = None\n best_uv = None\n batch = np.repeat(workpiece, repeats=self.batch_size, axis=0)\n batch_uvs = []\n batch_steps = []\n batch_i = 0\n win_size = self.win_sizes[self.layer]\n for u, x in enumerate(self._u_pixels):\n for v, y in enumerate(self._v_pixels):\n if (u, v) in self._found_uvs:\n continue\n\n edit_step = EditStep(self.layer, (x, y, win_size, win_size))\n\n if self.by_masking:\n EditStep.apply(batch[batch_i],\n self.mask,\n [edit_step],\n self.by_masking,\n inplace=True)\n else:\n EditStep.apply(self.original_input,\n batch[batch_i],\n [edit_step],\n self.by_masking,\n inplace=True)\n\n batch_i += 1\n batch_uvs.append((u, v))\n batch_steps.append(edit_step)\n if batch_i != self.batch_size:\n continue\n\n # the batch is full, inference and empty it\n updated = self._update_best(batch, batch_uvs, batch_steps, best_output)\n if updated:\n best_output, best_uv, best_edit, best_new_workpiece = updated\n\n batch = np.repeat(workpiece, repeats=self.batch_size, axis=0)\n batch_uvs = []\n batch_i = 0\n\n if batch_i > 0:\n # don't forget the last half full batch\n updated = self._update_best(batch, batch_uvs, batch_steps, best_output, batch_i)\n if updated:\n best_output, best_uv, best_edit, best_new_workpiece = updated\n\n if best_edit is None:\n raise _NoNewStepError\n\n if best_uv in self._found_uvs:\n raise _RepeatedStepError\n\n self._found_uvs.append(best_uv)\n self._found_best_edits.append(best_edit)\n best_edit.network_output = best_output\n\n # continue on the best workpiece in the next function call\n self._workpiece = best_new_workpiece\n\n return best_edit\n\n def _update_best(self, batch, batch_uvs, batch_steps, best_output, batch_i=None):\n \"\"\"Update the best edit step.\"\"\"\n squeeze = Squeeze()\n batch_output = self.network(Tensor(batch))\n batch_output = batch_output[:, self.class_idx]\n if len(batch_output.shape) > 1:\n batch_output = squeeze(batch_output)\n\n aggregation = np.argmin if self.by_masking else np.argmax\n if batch_i is None:\n batch_best_i = aggregation(batch_output.asnumpy())\n else:\n batch_best_i = aggregation(batch_output.asnumpy()[:batch_i, ...])\n batch_best_output = batch_output[int(batch_best_i)].asnumpy().item()\n\n if best_output is None or self._is_output0_better(batch_best_output, best_output):\n best_output = batch_best_output\n best_uv = batch_uvs[batch_best_i]\n best_edit = batch_steps[batch_best_i]\n best_new_workpiece = batch[batch_best_i]\n return best_output, best_uv, best_edit, best_new_workpiece\n return None\n\n def _is_output0_better(self, output0, output1):\n \"\"\"Check if the network output0 is better.\"\"\"\n if self.by_masking:\n return output0 < output1\n return output0 > output1\n\n def _calc_uv_pixels(self, begin, length):\n \"\"\"\n Calculate the pixel coordinate of shifts.\n\n Args:\n begin (int): The beginning pixel coordinate of search field.\n length (int): The length of search field.\n\n Returns:\n list[int], pixel coordinate of shifts.\n \"\"\"\n win_size = self.win_sizes[self.layer]\n stride = self.strides[self.layer]\n shift_count = self._calc_shift_count(length, win_size, stride)\n pixels = [0] * shift_count\n for i in range(shift_count):\n if i == shift_count - 1:\n pixels[i] = begin + length - win_size\n else:\n pixels[i] = begin + i*stride\n return pixels\n\n @staticmethod\n def _calc_shift_count(length, win_size, stride):\n \"\"\"\n Calculate the number of shifts in search field.\n\n Args:\n length (int): The length of search field.\n win_size (int): The length of sides of moving window.\n stride (int): The stride.\n\n Returns:\n int, number of shifts.\n \"\"\"\n if length <= win_size or win_size < stride or stride <= 0:\n raise ValueError(\"Invalid length, win_size or stride.\")\n count = int(math.ceil((length - win_size)/stride))\n if (count - 1)*stride + win_size < length:\n return count + 1\n return count\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nCallback for eval\n\"\"\"\n\nimport os\nfrom mindspore.train.callback import Callback\nfrom mindspore import save_checkpoint\nimport numpy as np\n\n\nclass EvalCallBack(Callback):\n \"\"\"\n CallBack class\n \"\"\"\n def __init__(self, options, net, eval_dataset, path):\n self.net = net\n self.eval_dataset = eval_dataset\n self.path = path\n self.avgacc = 0\n self.avgloss = 0\n self.bestacc = 0\n self.options = options\n\n\n def epoch_begin(self, run_context):\n \"\"\"\n CallBack epoch begin\n \"\"\"\n cb_param = run_context.original_args()\n cur_epoch = cb_param.cur_epoch_num\n print('=========EPOCH {} BEGIN========='.format(cur_epoch))\n\n def epoch_end(self, run_context):\n \"\"\"\n CallBack epoch end\n \"\"\"\n cb_param = run_context.original_args()\n cur_epoch = cb_param.cur_epoch_num\n cur_net = cb_param.network\n # print(cur_net)\n evalnet = self.net\n self.avgacc, self.avgloss = self.eval(self.eval_dataset, evalnet)\n\n if self.avgacc > self.bestacc:\n self.bestacc = self.avgacc\n print('Epoch {}: Avg Accuracy: {}(best) Avg Loss:{}'.format(cur_epoch, self.avgacc, self.avgloss))\n best_path = os.path.join(self.path, 'best_ck.ckpt')\n save_checkpoint(cur_net, best_path)\n\n else:\n print('Epoch {}: Avg Accuracy: {} Avg Loss:{}'.format(cur_epoch, self.avgacc, self.avgloss))\n last_path = os.path.join(self.path, 'last_ck.ckpt')\n save_checkpoint(cur_net, last_path)\n print(\"Best Acc:\", self.bestacc)\n print('=========EPOCH {} END========='.format(cur_epoch))\n\n def eval(self, inp, net):\n \"\"\"\n CallBack eval\n \"\"\"\n avg_acc = list()\n avg_loss = list()\n for _ in range(10):\n for batch in inp.create_dict_iterator():\n x = batch['data']\n y = batch['label']\n classes = batch['classes']\n acc, loss = net(x, y, classes)\n avg_acc.append(acc.asnumpy())\n avg_loss.append(loss.asnumpy())\n avg_acc = np.mean(avg_acc)\n avg_loss = np.mean(avg_loss)\n\n return avg_acc, avg_loss\n",
"# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nTesting configuration manager\n\"\"\"\nimport os\nimport filecmp\nimport glob\nimport numpy as np\n\nimport mindspore.dataset as ds\nimport mindspore.dataset.transforms.py_transforms\nimport mindspore.dataset.vision.c_transforms as c_vision\nimport mindspore.dataset.vision.py_transforms as py_vision\nfrom mindspore import log as logger\nfrom util import dataset_equal\n\nDATA_DIR = [\"../data/dataset/test_tf_file_3_images/train-0000-of-0001.data\"]\nSCHEMA_DIR = \"../data/dataset/test_tf_file_3_images/datasetSchema.json\"\n\n\ndef test_basic():\n \"\"\"\n Test basic configuration functions\n \"\"\"\n # Save original configuration values\n num_parallel_workers_original = ds.config.get_num_parallel_workers()\n prefetch_size_original = ds.config.get_prefetch_size()\n seed_original = ds.config.get_seed()\n monitor_sampling_interval_original = ds.config.get_monitor_sampling_interval()\n\n ds.config.load('../data/dataset/declient.cfg')\n\n assert ds.config.get_num_parallel_workers() == 8\n # assert ds.config.get_worker_connector_size() == 16\n assert ds.config.get_prefetch_size() == 16\n assert ds.config.get_seed() == 5489\n assert ds.config.get_monitor_sampling_interval() == 15\n\n ds.config.set_num_parallel_workers(2)\n # ds.config.set_worker_connector_size(3)\n ds.config.set_prefetch_size(4)\n ds.config.set_seed(5)\n ds.config.set_monitor_sampling_interval(45)\n\n assert ds.config.get_num_parallel_workers() == 2\n # assert ds.config.get_worker_connector_size() == 3\n assert ds.config.get_prefetch_size() == 4\n assert ds.config.get_seed() == 5\n assert ds.config.get_monitor_sampling_interval() == 45\n\n # Restore original configuration values\n ds.config.set_num_parallel_workers(num_parallel_workers_original)\n ds.config.set_prefetch_size(prefetch_size_original)\n ds.config.set_seed(seed_original)\n ds.config.set_monitor_sampling_interval(monitor_sampling_interval_original)\n\n\ndef test_get_seed():\n \"\"\"\n This gets the seed value without explicitly setting a default, expect int.\n \"\"\"\n assert isinstance(ds.config.get_seed(), int)\n\n\ndef test_pipeline():\n \"\"\"\n Test that our configuration pipeline works when we set parameters at different locations in dataset code\n \"\"\"\n # Save original configuration values\n num_parallel_workers_original = ds.config.get_num_parallel_workers()\n\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)\n data1 = data1.map(operations=[c_vision.Decode(True)], input_columns=[\"image\"])\n ds.serialize(data1, \"testpipeline.json\")\n\n data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, num_parallel_workers=num_parallel_workers_original,\n shuffle=False)\n data2 = data2.map(operations=[c_vision.Decode(True)], input_columns=[\"image\"])\n ds.serialize(data2, \"testpipeline2.json\")\n\n # check that the generated output is different\n assert filecmp.cmp('testpipeline.json', 'testpipeline2.json')\n\n # this test passes currently because our num_parallel_workers don't get updated.\n\n # remove generated jason files\n file_list = glob.glob('*.json')\n for f in file_list:\n try:\n os.remove(f)\n except IOError:\n logger.info(\"Error while deleting: {}\".format(f))\n\n # Restore original configuration values\n ds.config.set_num_parallel_workers(num_parallel_workers_original)\n\n\ndef test_deterministic_run_fail():\n \"\"\"\n Test RandomCrop with seed, expected to fail\n \"\"\"\n logger.info(\"test_deterministic_run_fail\")\n\n # Save original configuration values\n num_parallel_workers_original = ds.config.get_num_parallel_workers()\n seed_original = ds.config.get_seed()\n\n # when we set the seed all operations within our dataset should be deterministic\n ds.config.set_seed(0)\n ds.config.set_num_parallel_workers(1)\n # First dataset\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n # Assuming we get the same seed on calling constructor, if this op is re-used then result won't be\n # the same in between the two datasets. For example, RandomCrop constructor takes seed (0)\n # outputs a deterministic series of numbers, e,g \"a\" = [1, 2, 3, 4, 5, 6] <- pretend these are random\n random_crop_op = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])\n decode_op = c_vision.Decode()\n data1 = data1.map(operations=decode_op, input_columns=[\"image\"])\n data1 = data1.map(operations=random_crop_op, input_columns=[\"image\"])\n\n # Second dataset\n data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n data2 = data2.map(operations=decode_op, input_columns=[\"image\"])\n # If seed is set up on constructor\n data2 = data2.map(operations=random_crop_op, input_columns=[\"image\"])\n\n try:\n dataset_equal(data1, data2, 0)\n\n except Exception as e:\n # two datasets split the number out of the sequence a\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"Array\" in str(e)\n\n # Restore original configuration values\n ds.config.set_num_parallel_workers(num_parallel_workers_original)\n ds.config.set_seed(seed_original)\n\n\ndef test_seed_undeterministic():\n \"\"\"\n Test seed with num parallel workers in c, this test is expected to fail some of the time\n \"\"\"\n logger.info(\"test_seed_undeterministic\")\n\n # Save original configuration values\n num_parallel_workers_original = ds.config.get_num_parallel_workers()\n seed_original = ds.config.get_seed()\n\n ds.config.set_seed(0)\n ds.config.set_num_parallel_workers(3)\n\n # First dataset\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n # We get the seed when constructor is called\n random_crop_op = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])\n decode_op = c_vision.Decode()\n data1 = data1.map(operations=decode_op, input_columns=[\"image\"])\n data1 = data1.map(operations=random_crop_op, input_columns=[\"image\"])\n\n # Second dataset\n data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n data2 = data2.map(operations=decode_op, input_columns=[\"image\"])\n # Since seed is set up on constructor, so the two ops output deterministic sequence.\n # Assume the generated random sequence \"a\" = [1, 2, 3, 4, 5, 6] <- pretend these are random\n random_crop_op2 = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])\n data2 = data2.map(operations=random_crop_op2, input_columns=[\"image\"])\n try:\n dataset_equal(data1, data2, 0)\n except Exception as e:\n # two datasets both use numbers from the generated sequence \"a\"\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"Array\" in str(e)\n\n # Restore original configuration values\n ds.config.set_num_parallel_workers(num_parallel_workers_original)\n ds.config.set_seed(seed_original)\n\n\ndef test_seed_deterministic():\n \"\"\"\n Test deterministic run with setting the seed, only works with num_parallel worker = 1\n \"\"\"\n logger.info(\"test_seed_deterministic\")\n\n # Save original configuration values\n num_parallel_workers_original = ds.config.get_num_parallel_workers()\n seed_original = ds.config.get_seed()\n\n ds.config.set_seed(0)\n ds.config.set_num_parallel_workers(1)\n\n # First dataset\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n # seed will be read in during constructor call\n random_crop_op = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])\n decode_op = c_vision.Decode()\n data1 = data1.map(operations=decode_op, input_columns=[\"image\"])\n data1 = data1.map(operations=random_crop_op, input_columns=[\"image\"])\n\n # Second dataset\n data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n data2 = data2.map(operations=decode_op, input_columns=[\"image\"])\n # If seed is set up on constructor, so the two ops output deterministic sequence\n random_crop_op2 = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])\n data2 = data2.map(operations=random_crop_op2, input_columns=[\"image\"])\n\n dataset_equal(data1, data2, 0)\n\n # Restore original configuration values\n ds.config.set_num_parallel_workers(num_parallel_workers_original)\n ds.config.set_seed(seed_original)\n\n\ndef test_deterministic_run_distribution():\n \"\"\"\n Test deterministic run with with setting the seed being used in a distribution\n \"\"\"\n logger.info(\"test_deterministic_run_distribution\")\n\n # Save original configuration values\n num_parallel_workers_original = ds.config.get_num_parallel_workers()\n seed_original = ds.config.get_seed()\n\n # when we set the seed all operations within our dataset should be deterministic\n ds.config.set_seed(0)\n ds.config.set_num_parallel_workers(1)\n\n # First dataset\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n random_horizontal_flip_op = c_vision.RandomHorizontalFlip(0.1)\n decode_op = c_vision.Decode()\n data1 = data1.map(operations=decode_op, input_columns=[\"image\"])\n data1 = data1.map(operations=random_horizontal_flip_op, input_columns=[\"image\"])\n\n # Second dataset\n data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n data2 = data2.map(operations=decode_op, input_columns=[\"image\"])\n # If seed is set up on constructor, so the two ops output deterministic sequence\n random_horizontal_flip_op2 = c_vision.RandomHorizontalFlip(0.1)\n data2 = data2.map(operations=random_horizontal_flip_op2, input_columns=[\"image\"])\n\n dataset_equal(data1, data2, 0)\n\n # Restore original configuration values\n ds.config.set_num_parallel_workers(num_parallel_workers_original)\n ds.config.set_seed(seed_original)\n\n\ndef test_deterministic_python_seed():\n \"\"\"\n Test deterministic execution with seed in python\n \"\"\"\n logger.info(\"test_deterministic_python_seed\")\n\n # Save original configuration values\n num_parallel_workers_original = ds.config.get_num_parallel_workers()\n seed_original = ds.config.get_seed()\n\n ds.config.set_seed(0)\n ds.config.set_num_parallel_workers(1)\n\n # First dataset\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n\n transforms = [\n py_vision.Decode(),\n py_vision.RandomCrop([512, 512], [200, 200, 200, 200]),\n py_vision.ToTensor(),\n ]\n transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)\n data1 = data1.map(operations=transform, input_columns=[\"image\"])\n data1_output = []\n # config.set_seed() calls random.seed()\n for data_one in data1.create_dict_iterator(num_epochs=1, output_numpy=True):\n data1_output.append(data_one[\"image\"])\n\n # Second dataset\n data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n data2 = data2.map(operations=transform, input_columns=[\"image\"])\n # config.set_seed() calls random.seed(), resets seed for next dataset iterator\n ds.config.set_seed(0)\n\n data2_output = []\n for data_two in data2.create_dict_iterator(num_epochs=1, output_numpy=True):\n data2_output.append(data_two[\"image\"])\n\n np.testing.assert_equal(data1_output, data2_output)\n\n # Restore original configuration values\n ds.config.set_num_parallel_workers(num_parallel_workers_original)\n ds.config.set_seed(seed_original)\n\n\ndef test_deterministic_python_seed_multi_thread():\n \"\"\"\n Test deterministic execution with seed in python, this fails with multi-thread pyfunc run\n \"\"\"\n logger.info(\"test_deterministic_python_seed_multi_thread\")\n\n # Save original configuration values\n num_parallel_workers_original = ds.config.get_num_parallel_workers()\n seed_original = ds.config.get_seed()\n ds.config.set_num_parallel_workers(3)\n ds.config.set_seed(0)\n # when we set the seed all operations within our dataset should be deterministic\n # First dataset\n data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n transforms = [\n py_vision.Decode(),\n py_vision.RandomCrop([512, 512], [200, 200, 200, 200]),\n py_vision.ToTensor(),\n ]\n transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)\n data1 = data1.map(operations=transform, input_columns=[\"image\"], python_multiprocessing=True)\n data1_output = []\n # config.set_seed() calls random.seed()\n for data_one in data1.create_dict_iterator(num_epochs=1, output_numpy=True):\n data1_output.append(data_one[\"image\"])\n\n # Second dataset\n data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n # If seed is set up on constructor\n data2 = data2.map(operations=transform, input_columns=[\"image\"], python_multiprocessing=True)\n # config.set_seed() calls random.seed()\n ds.config.set_seed(0)\n\n data2_output = []\n for data_two in data2.create_dict_iterator(num_epochs=1, output_numpy=True):\n data2_output.append(data_two[\"image\"])\n\n try:\n np.testing.assert_equal(data1_output, data2_output)\n except Exception as e:\n # expect output to not match during multi-threaded execution\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"Array\" in str(e)\n\n # Restore original configuration values\n ds.config.set_num_parallel_workers(num_parallel_workers_original)\n ds.config.set_seed(seed_original)\n\n\ndef test_auto_num_workers_error():\n \"\"\"\n Test auto_num_workers error\n \"\"\"\n err_msg = \"\"\n try:\n ds.config.set_auto_num_workers([1, 2])\n except TypeError as e:\n err_msg = str(e)\n\n assert \"must be of type bool\" in err_msg\n\n\ndef test_auto_num_workers():\n \"\"\"\n Test auto_num_workers can be set.\n \"\"\"\n\n saved_config = ds.config.get_auto_num_workers()\n assert isinstance(saved_config, bool)\n # change to a different config\n flipped_config = not saved_config\n ds.config.set_auto_num_workers(flipped_config)\n assert flipped_config == ds.config.get_auto_num_workers()\n # now flip this back\n ds.config.set_auto_num_workers(saved_config)\n assert saved_config == ds.config.get_auto_num_workers()\n\n\nif __name__ == '__main__':\n test_basic()\n test_get_seed()\n test_pipeline()\n test_deterministic_run_fail()\n test_seed_undeterministic()\n test_seed_deterministic()\n test_deterministic_run_distribution()\n test_deterministic_python_seed()\n test_deterministic_python_seed_multi_thread()\n test_auto_num_workers_error()\n test_auto_num_workers()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"MaskRcnn Rcnn for mask network.\"\"\"\n\nimport numpy as np\nimport mindspore.common.dtype as mstype\nimport mindspore.nn as nn\nfrom mindspore.ops import operations as P\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.common.initializer import initializer\n\ndef _conv(in_channels, out_channels, kernel_size=1, stride=1, padding=0, pad_mode='pad'):\n \"\"\"Conv2D wrapper.\"\"\"\n shape = (out_channels, in_channels, kernel_size, kernel_size)\n weights = initializer(\"XavierUniform\", shape=shape, dtype=mstype.float32)\n shape_bias = (out_channels,)\n bias = Tensor(np.array(np.zeros(shape_bias)).astype(np.float32))\n return nn.Conv2d(in_channels, out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding,\n pad_mode=pad_mode, weight_init=weights, has_bias=True, bias_init=bias)\n\ndef _convTanspose(in_channels, out_channels, kernel_size=1, stride=1, padding=0, pad_mode='pad'):\n \"\"\"ConvTranspose wrapper.\"\"\"\n shape = (out_channels, in_channels, kernel_size, kernel_size)\n weights = initializer(\"XavierUniform\", shape=shape, dtype=mstype.float32)\n shape_bias = (out_channels,)\n bias = Tensor(np.array(np.zeros(shape_bias)).astype(np.float32))\n return nn.Conv2dTranspose(in_channels, out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding,\n pad_mode=pad_mode, weight_init=weights, has_bias=True, bias_init=bias)\n\nclass FpnMask(nn.Cell):\n \"\"\"conv layers of mask head\"\"\"\n def __init__(self, input_channels, output_channels, num_classes):\n super(FpnMask, self).__init__()\n self.mask_conv1 = _conv(input_channels, output_channels, kernel_size=3,\n pad_mode=\"same\").to_float(mstype.float16)\n self.mask_relu1 = P.ReLU()\n\n self.mask_conv2 = _conv(output_channels, output_channels, kernel_size=3,\n pad_mode=\"same\").to_float(mstype.float16)\n self.mask_relu2 = P.ReLU()\n\n self.mask_conv3 = _conv(output_channels, output_channels, kernel_size=3,\n pad_mode=\"same\").to_float(mstype.float16)\n self.mask_relu3 = P.ReLU()\n\n self.mask_conv4 = _conv(output_channels, output_channels, kernel_size=3,\n pad_mode=\"same\").to_float(mstype.float16)\n self.mask_relu4 = P.ReLU()\n\n self.mask_deconv5 = _convTanspose(output_channels, output_channels, kernel_size=2,\n stride=2, pad_mode=\"valid\").to_float(mstype.float16)\n self.mask_relu5 = P.ReLU()\n self.mask_conv6 = _conv(output_channels, num_classes, kernel_size=1, stride=1,\n pad_mode=\"valid\").to_float(mstype.float16)\n\n def construct(self, x):\n x = self.mask_conv1(x)\n x = self.mask_relu1(x)\n\n x = self.mask_conv2(x)\n x = self.mask_relu2(x)\n\n x = self.mask_conv3(x)\n x = self.mask_relu3(x)\n\n x = self.mask_conv4(x)\n x = self.mask_relu4(x)\n\n x = self.mask_deconv5(x)\n x = self.mask_relu5(x)\n\n x = self.mask_conv6(x)\n\n return x\n\nclass RcnnMask(nn.Cell):\n \"\"\"\n Rcnn for mask subnet.\n\n Args:\n config (dict) - Config.\n batch_size (int) - Batchsize.\n num_classes (int) - Class number.\n target_means (list) - Means for encode function. Default: (.0, .0, .0, .0]).\n target_stds (list) - Stds for encode function. Default: (0.1, 0.1, 0.2, 0.2).\n\n Returns:\n Tuple, tuple of output tensor.\n\n Examples:\n RcnnMask(config=config, representation_size = 1024, batch_size=2, num_classes = 81, \\\n target_means=(0., 0., 0., 0.), target_stds=(0.1, 0.1, 0.2, 0.2))\n \"\"\"\n def __init__(self,\n config,\n batch_size,\n num_classes,\n target_means=(0., 0., 0., 0.),\n target_stds=(0.1, 0.1, 0.2, 0.2)\n ):\n super(RcnnMask, self).__init__()\n cfg = config\n self.rcnn_loss_mask_fb_weight = Tensor(np.array(cfg.rcnn_loss_mask_fb_weight).astype(np.float16))\n self.rcnn_mask_out_channels = cfg.rcnn_mask_out_channels\n self.target_means = target_means\n self.target_stds = target_stds\n self.num_classes = num_classes\n self.in_channels = cfg.rcnn_in_channels\n\n self.fpn_mask = FpnMask(self.in_channels, self.rcnn_mask_out_channels, self.num_classes)\n\n self.logicaland = P.LogicalAnd()\n self.loss_mask = P.SigmoidCrossEntropyWithLogits()\n self.onehot = P.OneHot()\n self.greater = P.Greater()\n self.cast = P.Cast()\n self.sum_loss = P.ReduceSum()\n self.tile = P.Tile()\n self.expandims = P.ExpandDims()\n\n self.on_value = Tensor(1.0, mstype.float32)\n self.off_value = Tensor(0.0, mstype.float32)\n\n self.num_bboxes = cfg.num_expected_pos_stage2 * batch_size\n rmv_first = np.ones((self.num_bboxes, self.num_classes))\n rmv_first[:, 0] = np.zeros((self.num_bboxes,))\n self.rmv_first_tensor = Tensor(rmv_first.astype(np.float16))\n self.mean_loss = P.ReduceMean()\n\n def construct(self, mask_featuremap, labels=None, mask=None, mask_fb_targets=None):\n x_mask_fb = self.fpn_mask(mask_featuremap)\n\n if self.training:\n bbox_weights = self.cast(self.logicaland(self.greater(labels, 0), mask), mstype.int32) * labels\n mask_fb_targets = self.tile(self.expandims(mask_fb_targets, 1), (1, self.num_classes, 1, 1))\n\n loss_mask_fb = self.loss(x_mask_fb, bbox_weights, mask, mask_fb_targets)\n out = loss_mask_fb\n else:\n out = x_mask_fb\n\n return out\n\n\n def loss(self, masks_fb_pred, bbox_weights, weights, masks_fb_targets):\n \"\"\"Loss method.\"\"\"\n weights = self.cast(weights, mstype.float16)\n bbox_weights = self.cast(self.onehot(bbox_weights, self.num_classes, self.on_value, self.off_value),\n mstype.float16)\n bbox_weights = bbox_weights * self.rmv_first_tensor # * self.rmv_first_tensor exclude background\n\n # loss_mask_fb\n masks_fb_targets = self.cast(masks_fb_targets, mstype.float16)\n loss_mask_fb = self.loss_mask(masks_fb_pred, masks_fb_targets)\n loss_mask_fb = self.mean_loss(loss_mask_fb, (2, 3))\n loss_mask_fb = loss_mask_fb * bbox_weights\n loss_mask_fb = loss_mask_fb / self.sum_loss(weights, (0,))\n loss_mask_fb = self.sum_loss(loss_mask_fb, (0, 1))\n\n return loss_mask_fb\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\"Eval Retinaface_resnet50.\"\"\"\r\nfrom __future__ import print_function\r\nimport os\r\nimport time\r\nimport datetime\r\nimport numpy as np\r\nimport cv2\r\n\r\nfrom mindspore import Tensor, context\r\nfrom mindspore.common import set_seed\r\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\r\n\r\nfrom src.config import cfg_res50\r\nfrom src.network import RetinaFace, resnet50\r\nfrom src.utils import decode_bbox, prior_box\r\n\r\n\r\nset_seed(1)\r\n\r\nclass Timer():\r\n def __init__(self):\r\n self.start_time = 0.\r\n self.diff = 0.\r\n\r\n def start(self):\r\n self.start_time = time.time()\r\n\r\n def end(self):\r\n self.diff = time.time() - self.start_time\r\n\r\nclass DetectionEngine:\r\n def __init__(self, cfg):\r\n self.results = {}\r\n self.nms_thresh = cfg['val_nms_threshold']\r\n self.conf_thresh = cfg['val_confidence_threshold']\r\n self.iou_thresh = cfg['val_iou_threshold']\r\n self.var = cfg['variance']\r\n self.save_prefix = cfg['val_predict_save_folder']\r\n self.gt_dir = cfg['val_gt_dir']\r\n\r\n def _iou(self, a, b):\r\n A = a.shape[0]\r\n B = b.shape[0]\r\n max_xy = np.minimum(\r\n np.broadcast_to(np.expand_dims(a[:, 2:4], 1), [A, B, 2]),\r\n np.broadcast_to(np.expand_dims(b[:, 2:4], 0), [A, B, 2]))\r\n min_xy = np.maximum(\r\n np.broadcast_to(np.expand_dims(a[:, 0:2], 1), [A, B, 2]),\r\n np.broadcast_to(np.expand_dims(b[:, 0:2], 0), [A, B, 2]))\r\n inter = np.maximum((max_xy - min_xy + 1), np.zeros_like(max_xy - min_xy))\r\n inter = inter[:, :, 0] * inter[:, :, 1]\r\n\r\n area_a = np.broadcast_to(\r\n np.expand_dims(\r\n (a[:, 2] - a[:, 0] + 1) * (a[:, 3] - a[:, 1] + 1), 1),\r\n np.shape(inter))\r\n area_b = np.broadcast_to(\r\n np.expand_dims(\r\n (b[:, 2] - b[:, 0] + 1) * (b[:, 3] - b[:, 1] + 1), 0),\r\n np.shape(inter))\r\n union = area_a + area_b - inter\r\n return inter / union\r\n\r\n def _nms(self, boxes, threshold=0.5):\r\n x1 = boxes[:, 0]\r\n y1 = boxes[:, 1]\r\n x2 = boxes[:, 2]\r\n y2 = boxes[:, 3]\r\n scores = boxes[:, 4]\r\n\r\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\r\n order = scores.argsort()[::-1]\r\n\r\n reserved_boxes = []\r\n while order.size > 0:\r\n i = order[0]\r\n reserved_boxes.append(i)\r\n max_x1 = np.maximum(x1[i], x1[order[1:]])\r\n max_y1 = np.maximum(y1[i], y1[order[1:]])\r\n min_x2 = np.minimum(x2[i], x2[order[1:]])\r\n min_y2 = np.minimum(y2[i], y2[order[1:]])\r\n\r\n intersect_w = np.maximum(0.0, min_x2 - max_x1 + 1)\r\n intersect_h = np.maximum(0.0, min_y2 - max_y1 + 1)\r\n intersect_area = intersect_w * intersect_h\r\n ovr = intersect_area / (areas[i] + areas[order[1:]] - intersect_area)\r\n\r\n indices = np.where(ovr <= threshold)[0]\r\n order = order[indices + 1]\r\n\r\n return reserved_boxes\r\n\r\n def write_result(self):\r\n # save result to file.\r\n import json\r\n t = datetime.datetime.now().strftime('_%Y_%m_%d_%H_%M_%S')\r\n try:\r\n if not os.path.isdir(self.save_prefix):\r\n os.makedirs(self.save_prefix)\r\n\r\n self.file_path = self.save_prefix + '/predict' + t + '.json'\r\n f = open(self.file_path, 'w')\r\n json.dump(self.results, f)\r\n except IOError as e:\r\n raise RuntimeError(\"Unable to open json file to dump. What(): {}\".format(str(e)))\r\n else:\r\n f.close()\r\n return self.file_path\r\n\r\n def detect(self, boxes, confs, resize, scale, image_path, priors):\r\n if boxes.shape[0] == 0:\r\n # add to result\r\n event_name, img_name = image_path.split('/')\r\n self.results[event_name][img_name[:-4]] = {'img_path': image_path,\r\n 'bboxes': []}\r\n return\r\n\r\n boxes = decode_bbox(np.squeeze(boxes.asnumpy(), 0), priors, self.var)\r\n boxes = boxes * scale / resize\r\n\r\n scores = np.squeeze(confs.asnumpy(), 0)[:, 1]\r\n # ignore low scores\r\n inds = np.where(scores > self.conf_thresh)[0]\r\n boxes = boxes[inds]\r\n scores = scores[inds]\r\n\r\n # keep top-K before NMS\r\n order = scores.argsort()[::-1]\r\n boxes = boxes[order]\r\n scores = scores[order]\r\n\r\n # do NMS\r\n dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)\r\n keep = self._nms(dets, self.nms_thresh)\r\n dets = dets[keep, :]\r\n\r\n dets[:, 2:4] = (dets[:, 2:4].astype(np.int) - dets[:, 0:2].astype(np.int)).astype(np.float) # int\r\n dets[:, 0:4] = dets[:, 0:4].astype(np.int).astype(np.float) # int\r\n\r\n\r\n # add to result\r\n event_name, img_name = image_path.split('/')\r\n if event_name not in self.results.keys():\r\n self.results[event_name] = {}\r\n self.results[event_name][img_name[:-4]] = {'img_path': image_path,\r\n 'bboxes': dets[:, :5].astype(np.float).tolist()}\r\n\r\n def _get_gt_boxes(self):\r\n from scipy.io import loadmat\r\n gt = loadmat(os.path.join(self.gt_dir, 'wider_face_val.mat'))\r\n hard = loadmat(os.path.join(self.gt_dir, 'wider_hard_val.mat'))\r\n medium = loadmat(os.path.join(self.gt_dir, 'wider_medium_val.mat'))\r\n easy = loadmat(os.path.join(self.gt_dir, 'wider_easy_val.mat'))\r\n\r\n faceboxes = gt['face_bbx_list']\r\n events = gt['event_list']\r\n files = gt['file_list']\r\n\r\n hard_gt_list = hard['gt_list']\r\n medium_gt_list = medium['gt_list']\r\n easy_gt_list = easy['gt_list']\r\n\r\n return faceboxes, events, files, hard_gt_list, medium_gt_list, easy_gt_list\r\n\r\n def _norm_pre_score(self):\r\n max_score = 0\r\n min_score = 1\r\n\r\n for event in self.results:\r\n for name in self.results[event].keys():\r\n bbox = np.array(self.results[event][name]['bboxes']).astype(np.float)\r\n if bbox.shape[0] <= 0:\r\n continue\r\n max_score = max(max_score, np.max(bbox[:, -1]))\r\n min_score = min(min_score, np.min(bbox[:, -1]))\r\n\r\n length = max_score - min_score\r\n for event in self.results:\r\n for name in self.results[event].keys():\r\n bbox = np.array(self.results[event][name]['bboxes']).astype(np.float)\r\n if bbox.shape[0] <= 0:\r\n continue\r\n bbox[:, -1] -= min_score\r\n bbox[:, -1] /= length\r\n self.results[event][name]['bboxes'] = bbox.tolist()\r\n\r\n def _image_eval(self, predict, gt, keep, iou_thresh, section_num):\r\n\r\n _predict = predict.copy()\r\n _gt = gt.copy()\r\n\r\n image_p_right = np.zeros(_predict.shape[0])\r\n image_gt_right = np.zeros(_gt.shape[0])\r\n proposal = np.ones(_predict.shape[0])\r\n\r\n # x1y1wh -> x1y1x2y2\r\n _predict[:, 2:4] = _predict[:, 0:2] + _predict[:, 2:4]\r\n _gt[:, 2:4] = _gt[:, 0:2] + _gt[:, 2:4]\r\n\r\n ious = self._iou(_predict[:, 0:4], _gt[:, 0:4])\r\n for i in range(_predict.shape[0]):\r\n gt_ious = ious[i, :]\r\n max_iou, max_index = gt_ious.max(), gt_ious.argmax()\r\n if max_iou >= iou_thresh:\r\n if keep[max_index] == 0:\r\n image_gt_right[max_index] = -1\r\n proposal[i] = -1\r\n elif image_gt_right[max_index] == 0:\r\n image_gt_right[max_index] = 1\r\n\r\n right_index = np.where(image_gt_right == 1)[0]\r\n image_p_right[i] = len(right_index)\r\n\r\n\r\n\r\n image_pr = np.zeros((section_num, 2), dtype=np.float)\r\n for section in range(section_num):\r\n _thresh = 1 - (section + 1)/section_num\r\n over_score_index = np.where(predict[:, 4] >= _thresh)[0]\r\n if over_score_index.shape[0] <= 0:\r\n image_pr[section, 0] = 0\r\n image_pr[section, 1] = 0\r\n else:\r\n index = over_score_index[-1]\r\n p_num = len(np.where(proposal[0:(index+1)] == 1)[0])\r\n image_pr[section, 0] = p_num\r\n image_pr[section, 1] = image_p_right[index]\r\n\r\n return image_pr\r\n\r\n\r\n def get_eval_result(self):\r\n self._norm_pre_score()\r\n facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list = self._get_gt_boxes()\r\n section_num = 1000\r\n sets = ['easy', 'medium', 'hard']\r\n set_gts = [easy_gt_list, medium_gt_list, hard_gt_list]\r\n ap_key_dict = {0: \"Easy Val AP : \", 1: \"Medium Val AP : \", 2: \"Hard Val AP : \",}\r\n ap_dict = {}\r\n for _set in range(len(sets)):\r\n gt_list = set_gts[_set]\r\n count_gt = 0\r\n pr_curve = np.zeros((section_num, 2), dtype=np.float)\r\n for i, _ in enumerate(event_list):\r\n event = str(event_list[i][0][0])\r\n image_list = file_list[i][0]\r\n event_predict_dict = self.results[event]\r\n event_gt_index_list = gt_list[i][0]\r\n event_gt_box_list = facebox_list[i][0]\r\n\r\n for j, _ in enumerate(image_list):\r\n predict = np.array(event_predict_dict[str(image_list[j][0][0])]['bboxes']).astype(np.float)\r\n gt_boxes = event_gt_box_list[j][0].astype('float')\r\n keep_index = event_gt_index_list[j][0]\r\n count_gt += len(keep_index)\r\n\r\n if gt_boxes.shape[0] <= 0 or predict.shape[0] <= 0:\r\n continue\r\n keep = np.zeros(gt_boxes.shape[0])\r\n if keep_index.shape[0] > 0:\r\n keep[keep_index-1] = 1\r\n\r\n image_pr = self._image_eval(predict, gt_boxes, keep,\r\n iou_thresh=self.iou_thresh,\r\n section_num=section_num)\r\n pr_curve += image_pr\r\n\r\n precision = pr_curve[:, 1] / pr_curve[:, 0]\r\n recall = pr_curve[:, 1] / count_gt\r\n\r\n precision = np.concatenate((np.array([0.]), precision, np.array([0.])))\r\n recall = np.concatenate((np.array([0.]), recall, np.array([1.])))\r\n for i in range(precision.shape[0]-1, 0, -1):\r\n precision[i-1] = np.maximum(precision[i-1], precision[i])\r\n index = np.where(recall[1:] != recall[:-1])[0]\r\n ap = np.sum((recall[index + 1] - recall[index]) * precision[index + 1])\r\n\r\n\r\n print(ap_key_dict[_set] + '{:.4f}'.format(ap))\r\n\r\n return ap_dict\r\n\r\n\r\ndef val():\r\n context.set_context(mode=context.GRAPH_MODE, device_target='GPU', save_graphs=False)\r\n\r\n cfg = cfg_res50\r\n\r\n backbone = resnet50(1001)\r\n network = RetinaFace(phase='predict', backbone=backbone)\r\n backbone.set_train(False)\r\n network.set_train(False)\r\n\r\n # load checkpoint\r\n assert cfg['val_model'] is not None, 'val_model is None.'\r\n param_dict = load_checkpoint(cfg['val_model'])\r\n print('Load trained model done. {}'.format(cfg['val_model']))\r\n network.init_parameters_data()\r\n load_param_into_net(network, param_dict)\r\n\r\n # testing dataset\r\n testset_folder = cfg['val_dataset_folder']\r\n testset_label_path = cfg['val_dataset_folder'] + \"label.txt\"\r\n with open(testset_label_path, 'r') as f:\r\n _test_dataset = f.readlines()\r\n test_dataset = []\r\n for im_path in _test_dataset:\r\n if im_path.startswith('# '):\r\n test_dataset.append(im_path[2:-1]) # delete '# ...\\n'\r\n\r\n num_images = len(test_dataset)\r\n\r\n timers = {'forward_time': Timer(), 'misc': Timer()}\r\n\r\n if cfg['val_origin_size']:\r\n h_max, w_max = 0, 0\r\n for img_name in test_dataset:\r\n image_path = os.path.join(testset_folder, 'images', img_name)\r\n _img = cv2.imread(image_path, cv2.IMREAD_COLOR)\r\n if _img.shape[0] > h_max:\r\n h_max = _img.shape[0]\r\n if _img.shape[1] > w_max:\r\n w_max = _img.shape[1]\r\n\r\n h_max = (int(h_max / 32) + 1) * 32\r\n w_max = (int(w_max / 32) + 1) * 32\r\n\r\n priors = prior_box(image_sizes=(h_max, w_max),\r\n min_sizes=[[16, 32], [64, 128], [256, 512]],\r\n steps=[8, 16, 32],\r\n clip=False)\r\n else:\r\n target_size = 1600\r\n max_size = 2176\r\n priors = prior_box(image_sizes=(max_size, max_size),\r\n min_sizes=[[16, 32], [64, 128], [256, 512]],\r\n steps=[8, 16, 32],\r\n clip=False)\r\n\r\n # init detection engine\r\n detection = DetectionEngine(cfg)\r\n\r\n # testing begin\r\n print('Predict box starting')\r\n for i, img_name in enumerate(test_dataset):\r\n image_path = os.path.join(testset_folder, 'images', img_name)\r\n\r\n img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)\r\n img = np.float32(img_raw)\r\n\r\n # testing scale\r\n if cfg['val_origin_size']:\r\n resize = 1\r\n assert img.shape[0] <= h_max and img.shape[1] <= w_max\r\n image_t = np.empty((h_max, w_max, 3), dtype=img.dtype)\r\n image_t[:, :] = (104.0, 117.0, 123.0)\r\n image_t[0:img.shape[0], 0:img.shape[1]] = img\r\n img = image_t\r\n else:\r\n im_size_min = np.min(img.shape[0:2])\r\n im_size_max = np.max(img.shape[0:2])\r\n resize = float(target_size) / float(im_size_min)\r\n # prevent bigger axis from being more than max_size:\r\n if np.round(resize * im_size_max) > max_size:\r\n resize = float(max_size) / float(im_size_max)\r\n\r\n img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)\r\n\r\n assert img.shape[0] <= max_size and img.shape[1] <= max_size\r\n image_t = np.empty((max_size, max_size, 3), dtype=img.dtype)\r\n image_t[:, :] = (104.0, 117.0, 123.0)\r\n image_t[0:img.shape[0], 0:img.shape[1]] = img\r\n img = image_t\r\n\r\n scale = np.array([img.shape[1], img.shape[0], img.shape[1], img.shape[0]], dtype=img.dtype)\r\n img -= (104, 117, 123)\r\n img = img.transpose(2, 0, 1)\r\n img = np.expand_dims(img, 0)\r\n img = Tensor(img) # [1, c, h, w]\r\n\r\n timers['forward_time'].start()\r\n boxes, confs, _ = network(img) # forward pass\r\n timers['forward_time'].end()\r\n timers['misc'].start()\r\n detection.detect(boxes, confs, resize, scale, img_name, priors)\r\n timers['misc'].end()\r\n\r\n print('im_detect: {:d}/{:d} forward_pass_time: {:.4f}s misc: {:.4f}s'.format(i + 1, num_images,\r\n timers['forward_time'].diff,\r\n timers['misc'].diff))\r\n print('Predict box done.')\r\n print('Eval starting')\r\n\r\n if cfg['val_save_result']:\r\n # Save the predict result if you want.\r\n predict_result_path = detection.write_result()\r\n print('predict result path is {}'.format(predict_result_path))\r\n\r\n detection.get_eval_result()\r\n print('Eval done.')\r\n\r\n\r\nif __name__ == '__main__':\r\n val()\r\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\n\nimport mindspore.nn as nn\nfrom mindspore import Tensor, Parameter\nfrom mindspore import context\nfrom mindspore.common import dtype as mstype\nfrom mindspore.nn.optim import Lamb\nfrom mindspore.nn.optim import Momentum, Adam\nfrom mindspore.nn.wrap.cell_wrapper import WithLossCell\nfrom mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell\nfrom mindspore.ops import functional as F\nfrom mindspore.ops import operations as P\nfrom mindspore.train import Model\nfrom mindspore.train.loss_scale_manager import FixedLossScaleManager, DynamicLossScaleManager\nfrom ....dataset_mock import MindData\n\ncontext.set_context(mode=context.GRAPH_MODE)\n\n\nclass MindDataSet(MindData):\n def __init__(self, dataset_types, dataset_shapes):\n super(MindDataSet, self).__init__(size=2, batch_size=32,\n np_types=dataset_types,\n output_shapes=dataset_shapes,\n input_indexs=(0, 1))\n\n def __next__(self):\n if self._size < self._iter_num:\n raise StopIteration\n self._iter_num += 1\n lst = []\n for shape_, type_ in zip(self._output_shapes, self._np_types):\n lst.append(Tensor(np.ones(shape_).astype(type_)))\n return tuple(lst)\n\n\nclass Net(nn.Cell):\n def __init__(self, in_features, out_features):\n super(Net, self).__init__()\n self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name=\"weight\")\n self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name=\"bias\")\n self.matmul = P.MatMul()\n self.add = P.Add()\n\n def construct(self, input_):\n output = self.add(self.matmul(input_, self.weight), self.bias)\n return output\n\n\nclass NetFP16(nn.Cell):\n def __init__(self, in_features, out_features):\n super(NetFP16, self).__init__()\n self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name=\"weight\")\n self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name=\"bias\")\n self.matmul = P.MatMul()\n self.add = P.Add()\n self.cast = P.Cast()\n\n def construct(self, input_):\n output = self.cast(\n self.add(self.matmul(self.cast(input_, mstype.float16), self.cast(self.weight, mstype.float16)),\n self.cast(self.bias, mstype.float16)), mstype.float32)\n return output\n\n\ndef get_axis(x):\n shape_op = P.Shape()\n shape = shape_op(x)\n length = F.tuple_len(shape)\n perm = F.make_range(0, length)\n return perm\n\n\nclass MSELoss(nn.Cell):\n def __init__(self):\n super(MSELoss, self).__init__()\n self.reduce_sum = P.ReduceSum()\n self.square = P.Square()\n self.reduce_mean = P.ReduceMean()\n\n def construct(self, data, label):\n diff = data - label\n return self.reduce_mean(self.square(diff), get_axis(diff))\n\n\ndef test_momentum_compile():\n inputs = Tensor(np.ones([15, 1]).astype(np.float32))\n label = Tensor(np.zeros([15, 1]).astype(np.float32))\n net = Net(1, 1)\n\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n\n net_with_loss = WithLossCell(net, loss)\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer,\n scale_sense=Tensor(np.full((1), 1.0), dtype=mstype.float32))\n train_network.set_train()\n output = train_network(inputs, label)\n print(\"the result is \", output)\n\n\ndef test_compile_fp16_not_overflow():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n net = NetFP16(16, 16)\n\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n\n net_with_loss = WithLossCell(net, loss)\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer,\n scale_sense=Tensor(np.full((1), 1.0), dtype=mstype.float32))\n train_network.set_train()\n output = train_network(inputs, label)\n print(\"the result is \", output)\n\n\ndef test_compile_fp16_lr_overflow():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n lr = Tensor(np.ones([1], np.float32) * 0.1)\n net = NetFP16(16, 16)\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)\n\n net_with_loss = WithLossCell(net, loss)\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer,\n scale_sense=Tensor(np.full((1), np.finfo(np.float32).max),\n dtype=mstype.float32))\n train_network.set_train()\n output = train_network(inputs, label)\n print(\"the result is \", output)\n\n\ndef test_compile_fp16_overflow():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n net = NetFP16(16, 16)\n\n loss = MSELoss()\n optimizer = Lamb(net.trainable_params(), learning_rate=0.01)\n net_with_loss = WithLossCell(net, loss)\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer,\n scale_sense=Tensor(np.full((1), np.finfo(np.float32).max),\n dtype=mstype.float32))\n train_network.set_train()\n output = train_network(inputs, label)\n print(\"the result is \", output)\n\n\ndef test_compile_fp16_lr_overflow_with_lossscale_update():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n lr = Tensor(np.ones([1], np.float32) * 0.1)\n net = NetFP16(16, 16)\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)\n\n net_with_loss = WithLossCell(net, loss)\n scale_manager = DynamicLossScaleManager()\n manager = scale_manager.get_update_cell()\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager)\n train_network.set_train()\n output = train_network(inputs, label)\n print(\"the result is \", output)\n\n\ndef test_compile_f16_model_train():\n dataset_types = (np.float32, np.float32)\n dataset_shapes = ((16, 16), (16, 16))\n\n dataset = MindDataSet(dataset_types, dataset_shapes)\n net = NetFP16(16, 16)\n net.set_train()\n\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n model = Model(net, loss_fn=loss, optimizer=optimizer, metrics=None)\n model.train(2, dataset, dataset_sink_mode=False)\n\n\ndef test_compile_f16_model_train_fixed():\n dataset_types = (np.float32, np.float32)\n dataset_shapes = ((16, 16), (16, 16))\n\n dataset = MindDataSet(dataset_types, dataset_shapes)\n net = NetFP16(16, 16)\n net.set_train()\n scale_manager = FixedLossScaleManager()\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n model = Model(net, loss_fn=loss, optimizer=optimizer, metrics=None, loss_scale_manager=scale_manager)\n model.train(2, dataset)\n\n\ndef test_compile_fp16_lr_overflow_fixed_feed():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n lr = Tensor(np.ones([1], np.float32) * 0.1)\n net = NetFP16(16, 16)\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)\n\n net_with_loss = WithLossCell(net, loss)\n scale_manager = FixedLossScaleManager()\n update_cell = scale_manager.get_update_cell()\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=update_cell)\n train_network.set_train()\n output = train_network(inputs, label)\n print(\"the result is \", output)\n\n\ndef test_compile_fp16_lr_overflow_dynamic_feed():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n lr = Tensor(np.ones([1], np.float32) * 0.1)\n net = NetFP16(16, 16)\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)\n\n net_with_loss = WithLossCell(net, loss)\n scale_manager = DynamicLossScaleManager()\n update_cell = scale_manager.get_update_cell()\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=update_cell)\n train_network.set_train()\n output = train_network(inputs, label)\n print(\"the result is \", output)\n\n\ndef test_compile_fp16_lr_overflow_fixed_graph():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n lr = Tensor(np.ones([1], np.float32) * 0.1)\n net = NetFP16(16, 16)\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)\n\n net_with_loss = WithLossCell(net, loss)\n scale_manager = FixedLossScaleManager(drop_overflow_update=True)\n update_cell = scale_manager.get_update_cell()\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=update_cell)\n train_network.set_train()\n output = train_network(inputs, label)\n print(\"the result is \", output)\n\n\ndef test_compile_fp16_lr_overflow_dynamic_graph():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n lr = Tensor(np.ones([1], np.float32) * 0.1)\n net = NetFP16(16, 16)\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)\n\n net_with_loss = WithLossCell(net, loss)\n scale_manager = DynamicLossScaleManager()\n update_cell = scale_manager.get_update_cell()\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=update_cell)\n train_network.set_train()\n output = train_network(inputs, label)\n print(\"the result is \", output)\n\n\ndef adam_compile(loss_scale=1.0):\n inputs = Tensor(np.ones([15, 1]).astype(np.float32))\n label = Tensor(np.zeros([15, 1]).astype(np.float32))\n net = Net(1, 1)\n\n loss = MSELoss()\n optimizer = Adam(net.trainable_params(), learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False,\n use_nesterov=False, weight_decay=0.0, loss_scale=loss_scale)\n\n net_with_loss = WithLossCell(net, loss)\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer,\n scale_sense=Tensor(np.full((1), 1.0), dtype=mstype.float32))\n train_network.set_train()\n output = train_network(inputs, label)\n print(\"the result is \", output)\n\n\ndef test_adam_compile():\n adam_compile()\n\n\ndef test_adam_loss_scale_compile():\n \"\"\" test setting loss_scale to 1e-40 \"\"\"\n adam_compile(loss_scale=1e-40)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test_loss_scale \"\"\"\nimport numpy as np\nimport pytest\nimport mindspore.nn as nn\nfrom mindspore import context\nfrom mindspore import Tensor, Parameter\nfrom mindspore.nn.wrap.cell_wrapper import WithLossCell\nfrom mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell\nfrom mindspore.ops import operations as P\nfrom mindspore.nn.optim import Momentum, RMSProp\nfrom mindspore.ops import functional as F\nfrom mindspore.common import dtype as mstype\nfrom mindspore.train import Model\nfrom mindspore.nn.optim import Lamb\nfrom mindspore.train.loss_scale_manager import DynamicLossScaleManager\n\ndef setup_module():\n context.set_context(mode=context.PYNATIVE_MODE, device_target=\"Ascend\")\n\nclass MindData:\n \"\"\" Stub for MindData \"\"\"\n\n def __init__(self, size=None, batch_size=None, repeat_count=1,\n np_types=None, output_shapes=None, input_indexes=(), func_name=''):\n self._size = size\n self._batch_size = batch_size\n self._repeat_count = repeat_count\n self._np_types = np_types\n self._output_shapes = output_shapes\n self._input_indexes = input_indexes\n self._func_name = func_name\n self._iter_num = 0\n\n def get_dataset_size(self):\n return self._size\n\n def get_repeat_count(self):\n return self._repeat_count\n\n def get_batch_size(self):\n return self._batch_size\n\n def output_types(self):\n return self._np_types\n\n def output_shapes(self):\n return self._output_shapes\n\n def create_tuple_iterator(self, num_epochs=-1, do_copy=True):\n return self\n\n @property\n def input_indexes(self):\n return self._input_indexes\n\n @property\n def func_name(self):\n return self._func_name\n\n def send(self):\n pass\n\n def __len__(self):\n return self._size\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self._size < self._iter_num:\n raise StopIteration\n self._iter_num += 1\n next_value = []\n for shape, typ in zip(self._output_shapes, self._np_types):\n next_value.append(Tensor(np.ndarray(shape, typ)))\n\n return tuple(next_value)\n\n def next(self):\n return self.__next__()\n\n def reset(self):\n self._iter_num = 0\n\n\nclass MindDataSet(MindData):\n def __init__(self, dataset_types, dataset_shapes):\n super(MindDataSet, self).__init__(size=2, batch_size=32,\n np_types=dataset_types,\n output_shapes=dataset_shapes,\n input_indexes=(0, 1), func_name='')\n def __next__(self):\n if self._size < self._iter_num:\n raise StopIteration\n self._iter_num += 1\n res = []\n for shape, t in zip(self._output_shapes, self._np_types):\n res.append(Tensor(np.ones(shape).astype(t)))\n return tuple(res)\n\nclass NetFP16(nn.Cell):\n def __init__(self, in_features, out_features):\n super(NetFP16, self).__init__()\n self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name=\"weight\")\n self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name=\"bias\")\n self.matmul = P.MatMul()\n self.add = P.Add()\n self.cast = P.Cast()\n\n def construct(self, x):\n output = self.cast(self.add(self.matmul(self.cast(x, mstype.float16),\n self.cast(self.weight, mstype.float16)),\n self.cast(self.bias, mstype.float16)), mstype.float32)\n return output\n\ndef get_axis(x):\n shape_op = P.Shape()\n shape = shape_op(x)\n length = F.tuple_len(shape)\n perm = F.make_range(0, length)\n return perm\n\nclass MSELoss(nn.Cell):\n def __init__(self):\n super(MSELoss, self).__init__()\n self.sum = P.ReduceSum()\n self.square = P.Square()\n self.reduce_mean = P.ReduceMean()\n\n def construct(self, data, label):\n diff = data - label\n return self.reduce_mean(self.square(diff), get_axis(diff))\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_loss_scale_fp16_lr_overflow():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n lr = Tensor(np.ones([1], np.float32) * 0.1)\n net = NetFP16(16, 16)\n net.set_train()\n\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)\n\n net_with_loss = WithLossCell(net, loss)\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer,\n scale_sense=Tensor(np.full((1), np.finfo(np.float32).max),\n dtype=mstype.float32))\n output_1 = train_network(inputs, label)\n output_2 = train_network(inputs, label)\n assert output_1[0].asnumpy() == output_2[0].asnumpy()\n assert output_1[1].asnumpy() == output_2[1].asnumpy() == True\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_loss_scale_fp16_lr_overflow_set_sense_scale():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n lr = Tensor(np.ones([1], np.float32) * 0.1)\n net = NetFP16(16, 16)\n net.set_train()\n\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=lr, momentum=0.9)\n\n net_with_loss = WithLossCell(net, loss)\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer,\n scale_sense=Tensor(np.full((1), np.finfo(np.float32).max),\n dtype=mstype.float32))\n output_1 = train_network(inputs, label)\n\n train_network.set_sense_scale(Tensor(np.full((1), np.finfo(np.float32).max), dtype=mstype.float32))\n output_2 = train_network(inputs, label)\n assert output_1[0].asnumpy() == output_2[0].asnumpy()\n assert output_1[1].asnumpy() == output_2[1].asnumpy() == True\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_loss_scale_fp16_model_train_overflow():\n dataset_types = (np.float32, np.float32)\n dataset_shapes = ((16, 16), (16, 16))\n dataset = MindDataSet(dataset_types, dataset_shapes)\n\n net = NetFP16(16, 16)\n net.set_train()\n\n loss = MSELoss()\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n scale_manager = DynamicLossScaleManager(init_loss_scale=16, scale_factor=2, scale_window=2)\n model = Model(net, loss_fn=loss, optimizer=optimizer, metrics=None, loss_scale_manager=scale_manager)\n model.train(2, dataset, dataset_sink_mode=False)\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_loss_scale_fp16_opt_rmsprop_overflow():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n net = NetFP16(16, 16)\n net.set_train()\n\n loss = MSELoss()\n optimizer = RMSProp(net.trainable_params(), learning_rate=0.1)\n net_with_loss = WithLossCell(net, loss)\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer,\n scale_sense=Tensor(np.full(1, np.finfo(np.float32).max),\n dtype=mstype.float32))\n output_1 = train_network(inputs, label)\n output_2 = train_network(inputs, label)\n assert output_1[0].asnumpy() == output_2[0].asnumpy()\n assert output_1[1].asnumpy() == output_2[1].asnumpy() == True\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_onecard\ndef test_loss_scale_fp16_overflow():\n inputs = Tensor(np.ones([16, 16]).astype(np.float32))\n label = Tensor(np.zeros([16, 16]).astype(np.float32))\n net = NetFP16(16, 16)\n net.set_train()\n\n loss = MSELoss()\n optimizer = Lamb(net.trainable_params(), learning_rate=0.01)\n net_with_loss = WithLossCell(net, loss)\n net_with_loss.set_grad()\n train_network = TrainOneStepWithLossScaleCell(net_with_loss, optimizer,\n scale_sense=Tensor(np.full((1), np.finfo(np.float32).max),\n dtype=mstype.float32))\n output_1 = train_network(inputs, label)\n output_2 = train_network(inputs, label)\n assert output_1[0].asnumpy() == output_2[0].asnumpy()\n assert output_1[1].asnumpy() == output_2[1].asnumpy() == True\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\n\nimport mindspore as ms\nfrom mindspore import context, Tensor, Parameter\nfrom mindspore.common.api import _executor\nfrom mindspore.nn import Cell, TrainOneStepCell, Momentum\nfrom mindspore.ops import operations as P\n\nclass Net(Cell):\n def __init__(self, weight, weight2, strategy1=None, strategy2=None, is_parameter=True):\n super().__init__()\n self.concat = P.Concat(axis=0).shard(strategy1)\n if is_parameter:\n self.weight = Parameter(weight, \"w1\")\n else:\n self.weight = weight\n self.mul = P.Mul().shard(strategy2)\n self.weight2 = Parameter(weight2, \"w2\")\n\n def construct(self, x, b):\n out = self.concat((self.weight, self.weight2))\n out = self.mul(x, out)\n return out\n\n\nclass Net2(Cell):\n def __init__(self, weight, strategy1=None, strategy2=None, axis=0):\n super().__init__()\n self.mul = P.Mul().shard(strategy1)\n self.concat = P.Concat(axis=axis).shard(strategy2)\n self.weight = Parameter(weight, \"w\")\n\n def construct(self, x, b):\n out = self.mul(x, b)\n out = self.concat((out, self.weight))\n return out\n\n\nclass Net3(Cell):\n def __init__(self, weight, weight2, weight3, strategy1=None, strategy2=None, is_parameter=True):\n super().__init__()\n self.concat = P.Concat(axis=0).shard(strategy1)\n if is_parameter:\n self.weight = Parameter(weight, \"w1\")\n else:\n self.weight = weight\n self.mul = P.Mul().shard(strategy2)\n self.weight2 = Parameter(weight2, \"w2\")\n self.weight3 = Parameter(weight3, \"w3\")\n\n def construct(self, x, b):\n out = self.concat((self.weight, self.weight2, self.weight3))\n out = self.mul(x, out)\n return out\n\n\n_x = Tensor(np.ones([128, 64, 32]), dtype=ms.float32)\n_w1 = Tensor(np.ones([96, 64, 32]), dtype=ms.float32)\n_w2 = Tensor(np.ones([32, 64, 32]), dtype=ms.float32)\n_w3 = Tensor(np.ones([128, 16, 32]), dtype=ms.float32)\n_b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32)\n\nw1 = Tensor(np.ones([48, 64, 32]), dtype=ms.float32)\nw2 = Tensor(np.ones([16, 64, 32]), dtype=ms.float32)\nw3 = Tensor(np.ones([64, 64, 32]), dtype=ms.float32)\n\n\ndef compile_net(net):\n context.set_context(save_graphs=False)\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n train_net = TrainOneStepCell(net, optimizer)\n train_net.set_auto_parallel()\n train_net.set_train()\n _executor.compile(train_net, _x, _b)\n context.reset_auto_parallel_context()\n\n\ndef test_concat_parameter():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=8, global_rank=0)\n strategy1 = ((1, 4, 2), (1, 4, 2))\n strategy2 = ((1, 4, 2), (1, 4, 2))\n net = Net(_w1, _w2, strategy1, strategy2, is_parameter=True)\n compile_net(net)\n\n\ndef test_concat_parameter_no_full_split():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=8, global_rank=0)\n strategy1 = ((1, 2, 2), (1, 2, 2))\n strategy2 = ((1, 4, 2), (1, 4, 2))\n net = Net(_w1, _w2, strategy1, strategy2, is_parameter=True)\n compile_net(net)\n\n\ndef test_concat_tensor_and_parameter():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=8, global_rank=0)\n strategy1 = ((1, 2, 2), (1, 2, 2))\n strategy2 = ((1, 4, 2), (1, 4, 2))\n net = Net(_w1, _w2, strategy1, strategy2, is_parameter=False)\n compile_net(net)\n\n\ndef test_concat_output():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=8, global_rank=0)\n strategy1 = ((2, 2, 2), (2, 2, 2))\n strategy2 = ((1, 4, 2), (1, 4, 2))\n net = Net2(_w1, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_concat_output_no_full_split():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=8, global_rank=0)\n strategy1 = ((2, 2, 2), (2, 2, 2))\n strategy2 = ((1, 2, 2), (1, 2, 2))\n net = Net2(_w1, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_concat_no_strategy():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=8, global_rank=0)\n strategy1 = ((2, 2, 2), (2, 2, 2))\n strategy2 = None\n net = Net2(_w3, strategy1, strategy2, axis=1)\n compile_net(net)\n\n\ndef test_concat_auto_parallel():\n context.set_auto_parallel_context(parallel_mode=\"auto_parallel\", device_num=8, global_rank=0)\n net = Net2(_w2)\n compile_net(net)\n\n\ndef test_concat_auto_parallel2():\n context.set_auto_parallel_context(parallel_mode=\"auto_parallel\", device_num=8, global_rank=0)\n strategy1 = None\n strategy2 = None\n net = Net2(_w3, strategy1, strategy2, axis=1)\n compile_net(net)\n\n\ndef test_concat_auto_parallel_3_tensor():\n context.set_auto_parallel_context(parallel_mode=\"auto_parallel\", device_num=8, global_rank=0)\n net = Net3(w1, w2, w3)\n compile_net(net)\n"
] | [
[
"numpy.array",
"numpy.dtype"
],
[
"numpy.random.uniform",
"numpy.zeros"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.abs",
"numpy.allclose",
"numpy.isnan",
"numpy.full",
"numpy.random.randn",
"numpy.count_nonzero",
"numpy.array"
],
[
"numpy.random.uniform"
],
[
"numpy.expand_dims",
"numpy.maximum",
"numpy.arange",
"numpy.squeeze",
"numpy.sum"
],
[
"numpy.random.seed",
"numpy.random.choice"
],
[
"numpy.true_divide",
"numpy.expand_dims",
"numpy.subtract",
"numpy.array",
"numpy.zeros"
],
[
"numpy.random.randn"
],
[
"numpy.reshape",
"numpy.uint8"
],
[
"numpy.diag",
"numpy.ascontiguousarray",
"numpy.nanmean",
"numpy.array",
"numpy.zeros"
],
[
"numpy.zeros"
],
[
"numpy.expand_dims",
"scipy.ndimage.gaussian_filter",
"numpy.min",
"numpy.copy",
"numpy.zeros_like",
"numpy.repeat"
],
[
"numpy.mean"
],
[
"numpy.testing.assert_equal"
],
[
"numpy.array",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.hstack",
"numpy.expand_dims",
"numpy.maximum",
"numpy.minimum",
"numpy.sum",
"numpy.min",
"numpy.ones",
"numpy.round",
"numpy.max",
"numpy.zeros_like",
"numpy.shape",
"numpy.float32",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.empty"
],
[
"numpy.finfo",
"numpy.zeros",
"numpy.full",
"numpy.ones"
],
[
"numpy.finfo",
"numpy.zeros",
"numpy.ndarray",
"numpy.ones"
],
[
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kngwyu/infomax-option-critic | [
"9d907c041c1d0280db9b23eb2fdf9e0033e33bf3"
] | [
"src/option_select_impl.py"
] | [
"\"\"\" Implemenation of uncertainty-aware option selection\n\"\"\"\n\n\nfrom abc import ABC, abstractmethod\nfrom typing import Tuple\n\nimport torch\n\nfrom torch import BoolTensor, LongTensor, Tensor\nfrom torch.distributions import Categorical\n\nfrom rainy.net.policy import BernoulliPolicy\n\n\ndef _debug_minmax(name: str, t: Tensor) -> None:\n print(f\"{name}: {t.max().item()}, {t.min().item()}\")\n\n\nclass OptionSelectImpl(ABC):\n worker_indices: Tensor\n EPS = 0.001\n INF = 1e9\n\n @abstractmethod\n def logmu_weight(self) -> float:\n pass\n\n def _logmu(self, qo: Tensor, logmu_o_xs: Tensor) -> Tensor:\n return qo - self.logmu_weight() * logmu_o_xs\n\n def _eval_sample_options(\n self, qo: Tensor, beta: BernoulliPolicy,\n ) -> Tuple[LongTensor, BoolTensor]:\n \"\"\"Sample options by ε-Greedy\n \"\"\"\n batch_size = qo.size(0)\n prev_options = self.eval_prev_options[:batch_size]\n current_beta = beta[self.worker_indices[:batch_size], prev_options]\n opt_terminals = current_beta.action().bool()\n use_new_options = self.eval_is_initial_states[:batch_size] | opt_terminals\n new_options = self.eval_opt_explorer.select_from_value(qo, same_device=True)\n options = torch.where(use_new_options, new_options, prev_options)\n return options, use_new_options\n\n def _sample_options(\n self, qo: Tensor, beta: BernoulliPolicy, mu_o_xs: Categorical,\n ) -> Tuple[LongTensor, BoolTensor]:\n \"\"\"\n Select new options.\n Returns options and booltensor that indicates which options ended.\n \"\"\"\n\n masks = self.storage.masks[-1]\n prev_options = self.prev_options\n current_beta = beta[self.worker_indices[: qo.size(0)], prev_options]\n opt_terminals = current_beta.action().bool()\n use_new_options = (1.0 - masks).bool() | opt_terminals\n # mask out current options\n opt_mask = torch.zeros_like(qo)\n opt_mask[self.worker_indices, prev_options] += opt_terminals * -self.INF\n if self.config.option_selector == \"epsg\":\n new_options = self.opt_explorer.select_from_value(\n qo + opt_mask, same_device=True\n )\n elif self.config.option_selector == \"logp\":\n new_options = self._logmu(qo + opt_mask, mu_o_xs.logits).argmax(-1)\n elif self.config.option_selector == \"epsg-logp\":\n value = self._logmu(qo + opt_mask, mu_o_xs.logits)\n new_options = self.opt_explorer.select_from_value(value, same_device=True)\n else:\n raise NotImplementedError(\n f\"Invalid option selector {self.config.opt_selector}\"\n )\n self.option_counter[new_options[use_new_options].cpu().numpy()] += 1\n options = torch.where(use_new_options, new_options, prev_options)\n return options, opt_terminals\n"
] | [
[
"torch.zeros_like",
"torch.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nathanielbunch/Nonstationary-Bandit-Problem-on-a-Quantum-Computer | [
"af9d4f508a42790249007d5237a2c0ee8b93e30a",
"af9d4f508a42790249007d5237a2c0ee8b93e30a"
] | [
"Classical/Bandit Problem/k-armed-bandit_non_stationary.py",
"Classical/QLearning/q_learning_neural_network.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport sys\n\nclass KBanditProblem:\n \n def __init__(self, k, stationary=True):\n self.k = k\n self.stationary = stationary\n self.values = np.random.normal(loc=0.0, scale=1, size=k)\n self.optimal = self.values.argmax() # this is called optimal becuase the things are random, and becuase it changes\n # over time, and every time a reqward is given, the distribution of rewards chnages \n # with the random reward. The optimal solution changes over time, thus it has to be \n # recalculated every time.\n \n def generate_reward(self, action):\n if not self.stationary:\n self.values += np.random.normal(loc=0.0, scale=0.01, size=self.k)\n self.optimal = self.values.argmax()\n return np.random.normal(loc=self.values[action], scale=1)\n\nclass KBanditSolution:\n \n def __init__(self, problem, steps):\n self.problem = problem\n self.steps = steps\n \n self.average_reward = 0\n self.average_rewards = np.array([])\n self.optimal_percentage = 0\n self.optimal_precentages = np.array([])\n \n def count_statistics(self, action, reward, step):\n self.average_reward += (1 / (step + 1)) * (reward - self.average_reward)\n self.optimal_percentage += (1 / (step + 1)) * ((1 if action == self.problem.optimal else 0) - self.optimal_percentage)\n self.average_rewards = np.append(self.average_rewards, self.average_reward)\n self.optimal_precentages = np.append(self.optimal_precentages, self.optimal_percentage)\n\nclass EGreedy(KBanditSolution):\n \n def solve(self, exploration_rate, initial_value):\n Q = {i: initial_value for i in range(k)} # 1. Value function\n N = {i: 0 for i in range(k)} # 2. Number of actions, for update rule\n rewards = []\n rewards_mean = []\n for i in range(self.steps): # 3. Main loop\n explore = random.uniform(0, 1) < exploration_rate # 4. Exploration\n if explore:\n action = random.randint(0, k - 1) # 5. Exploration: Choosing random action\n else:\n action = max(Q, key=Q.get) # 6. Choose action with maximum mean reward\n\n reward = self.problem.generate_reward(action) # 7. Get reward for current action\n rewards.append(reward)\n N[action] += 1 # 8. Update action number\n Q[action] += (1 / N[action]) * (reward - Q[action]) # 9. Update value dict \n if (i % 100 == 0):\n r_mean = np.mean(rewards[-100:])\n rewards_mean.append(r_mean)\n self.count_statistics(action, reward, i)\n return rewards_mean\n\n \n def plot_graph(self, values):\n plt.plot(values)\n plt.show()\n\nclass WeightedAverage(KBanditSolution):\n \n def solve(self, exploration_rate, step_size, initial_value):\n Q = {i: initial_value for i in range(k)} # 1. Value function\n N = {i: 0 for i in range(k)} # 2. Number of actions, for update rule\n\n for i in range(self.steps): # 3. Main loop\n explore = random.uniform(0, 1) < exploration_rate # 4. Exploration\n if explore:\n action = random.randint(0, k - 1) # 5. Exploration: Choosing random action\n else:\n action = max(Q, key=Q.get) # 6. Choose action with maximum mean reward\n\n reward = self.problem.generate_reward(action) # 7. Get reward for current action\n N[action] += 1 # 8. Update action number\n Q[action] += step_size * (reward - Q[action]) # 9. Update value dict \n self.count_statistics(action, reward, i)\n\nclass UCB(KBanditSolution):\n \n def count_ucb(self, q, c, step, n):\n if n == 0:\n return sys.maxsize\n return (q + (c * sqrt((log(step) / n))))\n \n def solve(self, c):\n Q = {i: 0 for i in range(k)} # 1. Value function \n N = {i: 0 for i in range(k)} # 2. Number of actions, for update rule\n\n for i in range(self.steps): # 3. Main loop\n Q_ucb = {i: self.count_ucb(Q[i], c, i + 1, N[i]) for i in range(k)} # 4. Count UCB\n action = max(Q_ucb, key=Q_ucb.get) # 5. Choose action with maximum UCB\n\n reward = self.problem.generate_reward(action) # 6. Get reward for current action\n N[action] += 1 # 7. Update action number\n Q[action] += (1 / N[action]) * (reward - Q[action]) # 8. Update value dict \n self.count_statistics(action, reward, i)\n \nk = 4\nsteps = 50000\nkb_problem = KBanditProblem(k, stationary=False)\n#kb_solution = KBanditSolution(kb_problem, steps)\negreedy_boi = EGreedy(kb_problem, steps)\nsolved = egreedy_boi.solve(0.01, 0)\negreedy_boi.plot_graph(solved)",
"import gym\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nenv = gym.make('FrozenLake-v0')\ntf.reset_default_graph()\n#These lines establish the feed-forward part of the network used to choose actions\ninputs1 = tf.placeholder(shape=[1,16],dtype=tf.float32)\nW = tf.Variable(tf.random_uniform([16,4],0,0.01))\nQout = tf.matmul(inputs1,W)\npredict = tf.argmax(Qout,1)\n\n#Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.\nnextQ = tf.placeholder(shape=[1,4],dtype=tf.float32)\nloss = tf.reduce_sum(tf.square(nextQ - Qout))\ntrainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\nupdateModel = trainer.minimize(loss)\ninit = tf.initialize_all_variables()\n\n# Set learning parameters\ny = .99\ne = 0.1\nnum_episodes = 2000\n#create lists to contain total rewards and steps per episode\njList = []\nrList = []\nwith tf.Session() as sess:\n sess.run(init)\n for i in range(num_episodes):\n #Reset environment and get first new observation\n s = env.reset()\n rAll = 0\n d = False\n j = 0\n #The Q-Network\n while j < 99:\n j+=1\n #Choose an action by greedily (with e chance of random action) from the Q-network\n a,allQ = sess.run([predict,Qout],feed_dict={inputs1:np.identity(16)[s:s+1]})\n if np.random.rand(1) < e:\n a[0] = env.action_space.sample()\n #Get new state and reward from environment\n s1,r,d,_ = env.step(a[0])\n #Obtain the Q' values by feeding the new state through our network\n Q1 = sess.run(Qout,feed_dict={inputs1:np.identity(16)[s1:s1+1]})\n #Obtain maxQ' and set our target value for chosen action.\n maxQ1 = np.max(Q1)\n targetQ = allQ\n #Below is the learning function:\n targetQ[0,a[0]] = r + y*maxQ1\n #Train our network using target and predicted Q values\n _,W1 = sess.run([updateModel,W],feed_dict={inputs1:np.identity(16)[s:s+1],nextQ:targetQ})\n rAll += r\n s = s1\n if d == True:\n #Reduce chance of random action as we train the model.\n e = 1./((i/50) + 10)\n break\n jList.append(j)\n rList.append(rAll)\nprint(\"Percent of succesful episodes: \" + str(sum(rList)/num_episodes) + \"%\")"
] | [
[
"matplotlib.pyplot.plot",
"numpy.random.normal",
"numpy.append",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.show"
],
[
"tensorflow.matmul",
"tensorflow.placeholder",
"numpy.max",
"tensorflow.initialize_all_variables",
"tensorflow.reset_default_graph",
"tensorflow.train.GradientDescentOptimizer",
"numpy.random.rand",
"tensorflow.square",
"tensorflow.Session",
"numpy.identity",
"tensorflow.argmax",
"tensorflow.random_uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Alwaysproblem/examples-1 | [
"9754fa63ed1931489a21ac1f5b299f945e369a5c",
"9754fa63ed1931489a21ac1f5b299f945e369a5c",
"9754fa63ed1931489a21ac1f5b299f945e369a5c",
"9754fa63ed1931489a21ac1f5b299f945e369a5c",
"9754fa63ed1931489a21ac1f5b299f945e369a5c",
"9754fa63ed1931489a21ac1f5b299f945e369a5c",
"9754fa63ed1931489a21ac1f5b299f945e369a5c"
] | [
"applications/tensorflow/cnns/models/resnet.py",
"applications/popart/bert/tests/unit/pytorch/nsp_test.py",
"code_examples/tensorflow/sharding/simple_sharding.py",
"applications/tensorflow/cnns/test_densenet.py",
"code_examples/tensorflow2/imdb/imdb_single_ipu.py",
"applications/tensorflow/cnns/training/validation.py",
"applications/tensorflow/dynamic_sparsity/ipu_sparse_ops/tools/sparse_pooling.py"
] | [
"# Copyright (c) 2019 Graphcore Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom models.resnet_base import ResNet\n\nimport tensorflow.compat.v1 as tf\nimport tensorflow.contrib as contrib\nfrom tensorflow.python.ipu import normalization_ops\n\n# This is all written for: NHWC\n\n\nclass TensorflowResNet(ResNet):\n def __init__(self, *args, **kwargs):\n self.dtype = tf.float16\n super(TensorflowResNet, self).__init__(*args, **kwargs)\n\n def _get_variable(self, name, shape, init):\n return tf.get_variable(name, shape, initializer=init, dtype=self.dtype)\n\n def residual(self, x, shortcut, out_filters, stride, type='B'):\n in_shape = shortcut.get_shape()\n pad = int(x.get_shape()[3] - in_shape[3])\n if pad != 0 or type == 'C':\n if type == 'A':\n shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape,\n strides=[1, stride, stride, 1])\n shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]])\n else:\n shortcut = self.conv(shortcut, 1, stride, out_filters)\n shortcut = self.norm(shortcut)\n x = shortcut + x\n x = self.relu(x)\n return x\n\n def relu(self, x):\n return tf.nn.relu(x)\n\n def conv(self, x, ksize, stride, filters_out, bias=True):\n filters_in = x.get_shape()[-1]\n\n wshape = [ksize, ksize, filters_in, filters_out]\n w_init = contrib.layers.xavier_initializer(dtype=self.dtype)\n weights = self._get_variable('weights', shape=wshape, init=w_init)\n x = tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')\n if bias:\n bshape = [filters_out]\n b_init = tf.zeros_initializer()\n biases = self._get_variable('biases', shape=bshape, init=b_init)\n x = x + biases\n return x\n\n def norm(self, x, type='BATCH', groups=32, training=False):\n if type == 'BATCH':\n # Perhaps use tf.nn.fused_batch_norm instead.\n x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True,\n training=training, trainable=training,\n momentum=0.997, epsilon=1e-5)\n elif type == 'GROUP':\n x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True,\n training=training, trainable=training,\n channels_axis=-1, reduction_axes=[-3, -2])\n return x\n\n def fc(self, x, num_units_out):\n num_units_in = x.get_shape()[1]\n w_init = contrib.layers.xavier_initializer(dtype=self.dtype)\n b_init = tf.constant_initializer(0.0)\n\n with self.namescope('fc'):\n weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init)\n biases = self._get_variable('biases', shape=[num_units_out], init=b_init)\n\n x = tf.nn.xw_plus_b(x, weights, biases)\n return x\n\n def reduce_mean(self, x, indices=(1, 2)):\n x = tf.reduce_mean(x, reduction_indices=indices)\n return x\n\n def maxpool(self, x):\n x = tf.nn.max_pool(\n x,\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n return x\n\n def namescope(self, debug_string):\n return tf.variable_scope(debug_string)\n",
"# Copyright (c) 2019 Graphcore Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport popart\nimport numpy as np\nimport pytest\n\nfrom bert_model import BertConfig, ExecutionMode, get_model\nfrom tests.torch_bert import BertConfig as TorchBertConfig, BertForNextSentencePrediction\n\nfrom .full_graph_utils import fwd_graph, bwd_graph\n\n\n'''\nTests the full nsp graph.\n'''\nNSP_MAPPING = {}\n\nNSP_MAPPING[ExecutionMode.DEFAULT] = {\n \"bert.pooler.dense.weight\": \"NSP/PoolW\",\n \"bert.pooler.dense.bias\": \"NSP/PoolB\",\n \"cls.seq_relationship.weight\": \"NSP/NspW\",\n \"cls.seq_relationship.bias\": \"NSP/NspB\"\n}\n\nNSP_MAPPING[ExecutionMode.PHASED] = {\n \"bert.pooler.dense.weight\": \"BertModel/NSP/Pool/Dense/Weight\",\n \"bert.pooler.dense.bias\": \"BertModel/NSP/Pool/Dense/Bias\",\n \"cls.seq_relationship.weight\": \"BertModel/NSP/Classifier/Dense/Weight\",\n \"cls.seq_relationship.bias\": \"BertModel/NSP/Classifier/Dense/Bias\"\n}\n\nNSP_TRANSFORM = {\n \"bert.pooler.dense.weight\": np.transpose,\n \"cls.seq_relationship.weight\": np.transpose\n}\n\ntest_modes = [ExecutionMode.DEFAULT, pytest.param(ExecutionMode.PHASED, marks=pytest.mark.requires_remote_buffers)]\n\n\[email protected](\"mode\", test_modes)\ndef test_nsp_fwd(custom_ops, mode):\n # ------------------- PopART --------------------\n config = BertConfig(task=\"NSP\",\n vocab_length=9728,\n num_layers=2,\n batch_size=1,\n hidden_size=768,\n sequence_length=128,\n activation_type=\"relu\",\n popart_dtype=\"FLOAT\",\n no_dropout=True,\n no_attn_dropout=True,\n inference=True,\n no_mask=True,\n execution_mode=mode,\n mask_tokens=0,\n split_qkv=False)\n popart_model = get_model(config, mode)\n\n\n # ------------------- PyTorch -------------------------\n torch_model = BertForNextSentencePrediction(\n TorchBertConfig(config.vocab_length, config.hidden_size,\n num_hidden_layers=config.num_layers,\n num_attention_heads=config.attention_heads,\n intermediate_size=config.ff_size,\n hidden_act=config.activation_type,\n max_position_embeddings=config.max_positional_length,\n layer_norm_eps=config.layer_norm_eps,\n mask_tokens=config.mask_tokens,\n num_labels=2))\n\n fwd_graph(popart_model,\n torch_model,\n mode,\n NSP_MAPPING[mode],\n transform=NSP_TRANSFORM)\n\n\[email protected]\[email protected](\"mode\", test_modes)\[email protected](\"opt_type\", [\"SGD\", \"LAMB\"])\ndef test_nsp_bwd(custom_ops, mode, opt_type):\n nsp_bwd(custom_ops, mode, opt_type, 2432, 288)\n\n\ndef nsp_bwd(custom_ops, mode, opt_type, vocab_length=9728, hidden_size=768):\n if mode == ExecutionMode.PHASED:\n # Phased Execution requires atleast two transformer layers to ensure mlm and embedding are in the same virtual graph.\n num_layers = 2\n else:\n num_layers = 1\n\n # ------------------- PopART --------------------\n config = BertConfig(task=\"NSP\",\n vocab_length=vocab_length,\n num_layers=num_layers,\n batch_size=1,\n hidden_size=hidden_size,\n sequence_length=128,\n activation_type=\"relu\",\n popart_dtype=\"FLOAT\",\n no_dropout=True,\n no_attn_dropout=True,\n no_mask=True,\n update_embedding_dict=True,\n phased_execution_type=\"single\",\n execution_mode=mode,\n split_qkv = (opt_type == \"LAMB\"))\n popart_model = get_model(config, mode)\n\n # ------------------- PyTorch -------------------------\n torch_model = BertForNextSentencePrediction(\n TorchBertConfig(config.vocab_length, config.hidden_size,\n num_hidden_layers=config.num_layers,\n num_attention_heads=config.attention_heads,\n intermediate_size=config.ff_size,\n hidden_act=config.activation_type,\n max_position_embeddings=config.max_positional_length,\n layer_norm_eps=config.layer_norm_eps,\n mask_tokens=config.mask_tokens,\n update_embedding_dict=True,\n num_labels=2))\n l1_lambda = 0.1\n\n def popart_loss_fn(outputs):\n if mode == ExecutionMode.PHASED:\n with popart_model.scope_provider(popart_model.builder, popart_model.nsp_scope):\n loss = popart_model.builder.aiGraphcore.l1loss([outputs[0]],\n l1_lambda, debugPrefix=\"l1LossVal\",\n reduction=popart.ReductionType.Sum)\n else:\n loss = popart_model.builder.aiGraphcore.l1loss([outputs[0]], l1_lambda,\n debugPrefix=\"l1LossVal\",\n reduction=popart.ReductionType.Sum)\n popart_model.builder.virtualGraph(loss, popart_model.nsp_scope.virtualGraph)\n return loss\n\n def torch_loss_fn(outputs):\n return l1_lambda * torch.norm(outputs[0], 1)\n\n bwd_graph(popart_model,\n torch_model,\n mode,\n popart_loss_fn=popart_loss_fn,\n torch_loss_fn=torch_loss_fn,\n mapping=NSP_MAPPING[mode],\n transform=NSP_TRANSFORM,\n opt_type=opt_type)\n",
"# Copyright (c) 2019 Graphcore Ltd. All rights reserved.\n\nimport argparse\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.ipu import autoshard, ipu_compiler, scopes, utils\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--autoshard\", action=\"store_true\",\n help=\"Enables automatic sharding\")\nparser.set_defaults(autoshard=False)\nopts = parser.parse_args()\n\nNUM_SHARDS = 2\n\n# With sharding all placeholders MUST be explicitly placed on\n# the CPU device:\nwith tf.device(\"cpu\"):\n pa = tf.placeholder(np.float32, [2], name=\"a\")\n pb = tf.placeholder(np.float32, [2], name=\"b\")\n pc = tf.placeholder(np.float32, [2], name=\"c\")\n\n\n# Put part of the computation on shard 1 and part on shard 2.\n# Sharding is automatically enabled on detection of nodes\n# placed with 'scopes.ipu_shard(...)':\ndef manual_sharding(pa, pb, pc):\n with scopes.ipu_shard(0):\n o1 = pa + pb\n with scopes.ipu_shard(1):\n o2 = pa + pc\n out = o1 + o2\n return out\n\n\ndef auto_sharding(pa, pb, pc):\n # This context marks the section of the graph to autoshard.\n # In this case we want to autoshard across the whole graph\n # so this context isn't actually required.\n with autoshard.ipu_autoshard():\n o1 = pa + pb\n o2 = pa + pc\n out = o1 + o2\n return out\n\n\ndef my_graph(pa, pb, pc):\n if opts.autoshard:\n result = auto_sharding(pa, pb, pc)\n # The first argument to automatic_sharding is the number\n # of shards. The second argument is the tensor closest to\n # the input data source in the graph. In this case it\n # could be pa, pb or pc. The third argument is the\n # tensor closest to the loss of the graph. There is no\n # loss function, thus the output of the graph is the\n # closest. By defining the extremities of the graph\n # the automatic sharding mechanism can calculate which\n # edges it can split across.\n autoshard.automatic_sharding(NUM_SHARDS, pa, result)\n else:\n result = manual_sharding(pa, pb, pc)\n return result\n\n# Create the IPU section of the graph\nwith scopes.ipu_scope(\"/device:IPU:0\"):\n out = ipu_compiler.compile(my_graph, [pa, pb, pc])\n\n# Define the feed_dict input data\nfd = {pa: [1., 1.], pb: [0., 1.], pc: [1., 5.]}\n# Configure an IPU device that has NUM_SHARDS devices that we will\n# shard across.\ncfg = utils.create_ipu_config(profiling=True)\ncfg = utils.auto_select_ipus(cfg, NUM_SHARDS)\nutils.configure_ipu_system(cfg)\n\nwith tf.Session() as sess:\n result = sess.run(out, fd)\n print(result)\n",
"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport logging\nimport unittest\nfrom pathlib import Path\n\nimport numpy as np\nimport tensorflow as tf\nfrom models import optimize_for_infer\nfrom models.densenet_weights import get_densenet_weights\nfrom models.official_keras.densenet_base import DenseNet\nfrom tensorflow.python.ipu import utils\nfrom tensorflow.python.keras.applications.densenet import preprocess_input, \\\n decode_predictions\nfrom tensorflow.python.keras.preprocessing import image\n\n# Set up logging\nlogging.basicConfig(format='%(asctime)s %(module)s - %(funcName)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\ntf.get_logger().setLevel(logging.ERROR)\n\nCHECKPOINT_PATH = \"../tensorflow/densenet_weights_fp16/densenet_model.ckpt\"\nIMAGE_PATH = \"../../tensorflow/image_classification/densenet/images/gorilla.jpg\"\nIMAGE_DIR = \"../../tensorflow/image_classification/densenet/images\"\n\n\nclass TestDensenet(unittest.TestCase):\n \"\"\"Test densenet model. \"\"\"\n\n @classmethod\n def setUpClass(cls):\n # Set up input to the network\n img_width = img_height = 224\n img_channels = 3\n densenet_121_blocks = (6, 12, 24, 16)\n cls.batch_size = 1\n cls.num_classes = 1000\n # Set up image input placeholder\n cls.placeholder_input = tf.placeholder(dtype=tf.float16,\n shape=(cls.batch_size, img_height, img_width, img_channels),\n name=\"image_input\")\n\n # Set compile and device options\n opts = utils.create_ipu_config(profiling=False, use_poplar_text_report=False)\n utils.auto_select_ipus(opts, [1])\n utils.configure_ipu_system(opts)\n\n # Construct Densenet model\n cls.densenet_model = DenseNet(blocks=densenet_121_blocks, num_classes=cls.num_classes,\n image_width=img_width, image_height=img_height, image_channels=img_channels)\n\n cls.densenet_model(cls.placeholder_input)\n\n # Restore weights\n checkpoint_file = CHECKPOINT_PATH\n\n if not Path(checkpoint_file + \".index\").exists():\n print('Checkpoint file does not exist, attempting to download pre-trained weights')\n checkpoint_file = get_densenet_weights(Path(checkpoint_file))\n\n # Create test session\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n saver.restore(sess, checkpoint_file)\n logging.info('Restored imagenet weights.')\n\n # Optimize inference graph\n logging.info('Starting graph optimization.')\n densenet_graph_def = tf.get_default_graph().as_graph_def()\n frozen_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(sess, densenet_graph_def,\n output_node_names=[\"output-prob\"])\n # Remove identity ops in initializers to allow fusing batch norm with conv in the next line\n frozen_graph_def = tf.compat.v1.graph_util.remove_training_nodes(frozen_graph_def)\n optimized_graph_def = optimize_for_infer.fold_batch_norms(frozen_graph_def)\n\n logging.info('Completed graph optimization.')\n\n tf.reset_default_graph()\n with tf.device('/device:IPU:0'):\n with tf.variable_scope('', use_resource=True):\n cls.output = tf.import_graph_def(optimized_graph_def, input_map={}, name=\"optimized\",\n return_elements=[\"output-prob:0\"])[0]\n\n def test_output_shape(self):\n assert self.output._shape_as_list() == [self.batch_size, self.num_classes]\n\n def test_image(self, img_path: str = IMAGE_PATH) -> None:\n img = image.load_img(img_path, target_size=(224, 224))\n img = image.img_to_array(img)\n img = np.expand_dims(img, axis=0)\n img = preprocess_input(img)\n with tf.Session() as session:\n predictions = session.run(self.output, feed_dict={\"optimized/image_input:0\": img})\n\n _, pred_label, pred_prob = decode_predictions(predictions, top=1)[0][0]\n assert Path(IMAGE_PATH).stem.lower() == pred_label.lower()\n assert pred_prob > 0.9\n",
"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport tensorflow as tf\n\nfrom tensorflow.python import ipu\n\nfrom tensorflow.python.ipu.keras.layers import Embedding, LSTM\nfrom tensorflow.python.keras.layers import Dense\nfrom tensorflow.python.keras.layers import Input\nfrom tensorflow.python.keras.datasets import imdb\nfrom tensorflow.python.keras.preprocessing import sequence\nfrom tensorflow.python.keras.optimizer_v2.adam import Adam\n\nif tf.__version__[0] != '2':\n raise ImportError(\"TensorFlow 2 is required for this example\")\n\nmax_features = 20000\nminibatch_size = 32\n\n\n# Define the dataset.\ndef get_dataset():\n (x_train, y_train), (_, _) = imdb.load_data(num_words=max_features)\n\n x_train = sequence.pad_sequences(x_train, maxlen=80)\n\n ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n ds = ds.repeat()\n ds = ds.map(lambda x, y: (x, tf.cast(y, tf.int32)))\n ds = ds.batch(minibatch_size, drop_remainder=True)\n return ds\n\n\n# Define the model.\ndef get_model():\n input_layer = Input(shape=(80), dtype=tf.int32, batch_size=minibatch_size)\n\n x = Embedding(max_features, 128)(input_layer)\n x = LSTM(128, dropout=0.2)(x)\n x = Dense(16, activation='relu')(x)\n x = Dense(1, activation='sigmoid')(x)\n\n return ipu.keras.Model(input_layer, x)\n\n\ndef main():\n # Configure IPUs.\n cfg = ipu.utils.create_ipu_config()\n cfg = ipu.utils.auto_select_ipus(cfg, 1)\n ipu.utils.configure_ipu_system(cfg)\n\n # Set up IPU strategy.\n strategy = ipu.ipu_strategy.IPUStrategy()\n with strategy.scope():\n\n model = get_model()\n\n model.compile(loss='binary_crossentropy', optimizer=Adam(0.005))\n model.fit(get_dataset(), steps_per_epoch=768, epochs=3)\n\n\nif __name__ == '__main__':\n main()\n",
"# Copyright (c) 2019 Graphcore Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe validation code used in train.py.\n\nThis script can also be called to run validation on previously generated checkpoints.\nSee the README for more information.\n\n\"\"\"\n\nimport tensorflow as tf\nimport os\nimport re\nimport time\nimport argparse\nimport sys\nfrom collections import OrderedDict\nimport importlib\nfrom glob import glob\n\nimport train\nimport log as logging\nfrom Datasets import data as dataset\nfrom Datasets.imagenet_dataset import accelerator_side_preprocessing\nfrom tensorflow.python import ipu\nfrom ipu_utils import get_config\nfrom tensorflow.python.ipu.scopes import ipu_scope\nfrom tensorflow.python.ipu import loops, ipu_infeed_queue\nimport tensorflow.contrib.compiler.xla as xla\nfrom tensorflow.python.ipu.ops import cross_replica_ops\nDATASET_CONSTANTS = dataset.DATASET_CONSTANTS\n\n\ndef validation_graph_builder(model, image, label, opts):\n if opts.get('no_hostside_norm'):\n image = accelerator_side_preprocessing(image, opts=opts)\n logits = model(opts, training=False, image=image)\n\n predictions = tf.argmax(logits, 1, output_type=tf.int32)\n accuracy = tf.reduce_mean(tf.cast(tf.equal(predictions, label), tf.float16))\n\n return accuracy\n\n\ndef validation_graph(model, opts):\n valid_graph = tf.Graph()\n with valid_graph.as_default():\n # datasets must be defined outside the ipu device scope\n valid_iterator = ipu_infeed_queue.IPUInfeedQueue(dataset.data(opts, is_training=False),\n feed_name='validation_feed',\n replication_factor=opts['replicas']*opts['shards'])\n\n with ipu_scope('/device:IPU:0'):\n def comp_fn():\n def body(total_accuracy, image, label):\n accuracy = validation_graph_builder(model, image, label, opts)\n return total_accuracy + (tf.cast(accuracy, tf.float32) / opts[\"validation_batches_per_step\"])\n accuracy = loops.repeat(int(opts[\"validation_batches_per_step\"]),\n body, [tf.constant(0, tf.float32)], valid_iterator)\n if opts['replicas']*opts['shards'] > 1:\n accuracy = cross_replica_ops.cross_replica_sum(accuracy) / (opts['replicas']*opts['shards'])\n return accuracy\n\n (accuracy,) = xla.compile(comp_fn, [])\n\n accuracy = 100 * accuracy\n\n valid_saver = tf.train.Saver()\n\n ipu.utils.move_variable_initialization_to_cpu()\n valid_init = tf.global_variables_initializer()\n\n globalAMP = None\n if opts[\"available_memory_proportion\"] and len(opts[\"available_memory_proportion\"]) == 1:\n globalAMP = opts[\"available_memory_proportion\"][0]\n\n ipu_options = get_config(ipu_id=opts[\"select_ipu\"],\n prng=not opts[\"no_stochastic_rounding\"],\n shards=opts['shards'],\n number_of_replicas=opts['replicas'],\n max_cross_replica_buffer_size=opts[\"max_cross_replica_buffer_size\"],\n fp_exceptions=opts[\"fp_exceptions\"],\n half_partials=opts[\"enable_half_partials\"],\n conv_dithering=opts[\"enable_conv_dithering\"],\n xla_recompute=opts[\"xla_recompute\"],\n seed=opts[\"seed\"],\n profile = opts['profile'],\n availableMemoryProportion=globalAMP,\n stable_norm=opts[\"stable_norm\"],\n internalExchangeOptimisationTarget=opts[\n \"internal_exchange_optimisation_target\"\n ])\n ipu.utils.configure_ipu_system(ipu_options)\n\n valid_sess = tf.Session(graph=valid_graph, config=tf.ConfigProto())\n\n return train.GraphOps(valid_graph, valid_sess, valid_init, [accuracy], None, valid_iterator, None, valid_saver, None)\n\n\ndef validation_run(valid, filepath, i, epoch, first_run, opts):\n if filepath:\n valid.saver.restore(valid.session, filepath)\n name = filepath.split('/')[-1]\n else:\n name = None\n\n # Gather accuracy statistics\n accuracy = 0.0\n start = time.time()\n for __ in range(opts[\"validation_iterations\"]):\n try:\n a = valid.session.run(valid.ops)[0]\n except tf.errors.OpError as e:\n raise tf.errors.ResourceExhaustedError(e.node_def, e.op, e.message)\n\n accuracy += a\n val_time = time.time() - start\n accuracy /= opts[\"validation_iterations\"]\n\n valid_format = (\n \"Validation top-1 accuracy [{name}] (iteration: {iteration:6d}, epoch: {epoch:6.2f}, img/sec: {img_per_sec:6.2f},\"\n \" latency (ms): {latency:8.4g}, time: {val_time:8.6f}): {val_acc:6.3f}%\")\n\n stats = OrderedDict([\n ('name', name),\n ('iteration', i),\n ('epoch', epoch),\n ('val_acc', accuracy),\n ('val_time', val_time),\n ('img_per_sec', (opts[\"validation_iterations\"] *\n opts[\"validation_batches_per_step\"] *\n opts['validation_total_batch_size']) / val_time),\n ('latency', 1000 * val_time / (opts[\"validation_iterations\"] *\n opts[\"validation_batches_per_step\"])),\n ])\n logging.print_to_file_and_screen(valid_format.format(**stats), opts)\n del stats['name']\n logging.write_to_csv(stats, first_run, False, opts)\n\n\ndef initialise_validation(model, opts):\n # -------------- BUILD GRAPH ------------------\n valid = validation_graph(model.Model, opts)\n # ------------- INITIALIZE SESSION -----------\n\n valid.session.run(valid.iterator.initializer)\n with valid.graph.as_default():\n valid.session.run(tf.global_variables_initializer())\n\n return valid\n\n\ndef validation_only_process(model, opts):\n valid = initialise_validation(model, opts)\n\n filename_pattern = re.compile(\".*ckpt-[0-9]+$\")\n ckpt_pattern = re.compile(\".*ckpt-([0-9]+)$\")\n if opts[\"restore_path\"]:\n if os.path.isdir(opts[\"restore_path\"]):\n filenames = sorted([os.path.join(opts[\"restore_path\"], f[:-len(\".index\")])\n for f in os.listdir(opts[\"restore_path\"])\n if filename_pattern.match(f[:-len(\".index\")]) and\n f[-len(\".index\"):] == \".index\"],\n key=lambda x: int(ckpt_pattern.match(x).groups()[0]))\n else:\n filenames = sorted([f[:-len(\".index\")] for f in glob(opts['restore_path'] + '*.index')])\n else:\n filenames = [None]\n\n print(filenames)\n\n for i, filename in enumerate(filenames):\n print(filename)\n if filename:\n valid.saver.restore(valid.session, filename)\n if ckpt_pattern.match(filename):\n iteration = int(ckpt_pattern.match(filename).groups()[0])\n else:\n iteration = -1\n else:\n print(\"Warning: no restore point found - randomly initialising weights instead\")\n valid.session.run(valid.init)\n iteration = 0\n\n epoch = float(opts[\"batch_size\"] * iteration) / DATASET_CONSTANTS[opts['dataset']]['NUM_IMAGES']\n for r in range(opts[\"repeat\"]):\n validation_run(valid, None, iteration, epoch, i == 0, opts)\n\n\ndef add_main_arguments(parser):\n group = parser.add_argument_group('Main')\n group.add_argument('--model', default='resnet', help=\"Choose model\")\n group.add_argument('--restore-path', type=str,\n help=\"Path to a single checkpoint to restore from or directory containing multiple checkpoints\")\n group.add_argument('--repeat', type=int, default=1,\n help=\"Repeat validation for debugging puposes\")\n group.add_argument('--help', action='store_true', help='Show help information')\n return parser\n\n\ndef set_main_defaults(opts):\n opts['summary_str'] = \"\\n\"\n\n\ndef set_validation_defaults(opts):\n if not opts['validation']:\n opts['summary_str'] += \"No Validation\\n\"\n else:\n opts['validation_total_batch_size'] = opts['batch_size']*opts['shards']*opts['replicas']\n opts['summary_str'] += \"Validation\\n Batch Size: {}\\n\".format(\"{validation_total_batch_size}\")\n opts[\"validation_iterations\"] = int(DATASET_CONSTANTS[opts['dataset']]['NUM_VALIDATION_IMAGES'] /\n opts[\"validation_total_batch_size\"])\n if opts[\"batches_per_step\"] < opts[\"validation_iterations\"]:\n opts[\"validation_batches_per_step\"] = int(opts[\"validation_iterations\"] //\n int(round(opts[\"validation_iterations\"] / opts['batches_per_step'])))\n opts[\"validation_iterations\"] = int(opts[\"validation_iterations\"] / opts[\"validation_batches_per_step\"])\n else:\n opts[\"validation_batches_per_step\"] = opts[\"validation_iterations\"]\n opts[\"validation_iterations\"] = 1\n\n\ndef create_parser(model, parser):\n parser = model.add_arguments(parser)\n parser = dataset.add_arguments(parser)\n parser = train.add_training_arguments(parser)\n parser = train.add_ipu_arguments(parser)\n parser = logging.add_arguments(parser)\n return parser\n\n\ndef set_defaults(model, opts):\n set_main_defaults(opts)\n dataset.set_defaults(opts)\n model.set_defaults(opts)\n set_validation_defaults(opts)\n train.set_ipu_defaults(opts)\n logging.set_defaults(opts)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Validation for previously generated checkpoints.', add_help=False)\n parser = add_main_arguments(parser)\n args, unknown = parser.parse_known_args()\n args = vars(args)\n if args['help']:\n parser.print_help()\n else:\n try:\n model = importlib.import_module(\"Models.\" + args['model'])\n except ImportError:\n raise ValueError('Models/{}.py not found'.format(args['model']))\n\n parser = create_parser(model, parser)\n opts = vars(parser.parse_args())\n opts[\"command\"] = ' '.join(sys.argv)\n set_defaults(model, opts)\n\n if opts['dataset'] == 'imagenet':\n if opts['image_size'] is None:\n opts['image_size'] = 224\n elif 'cifar' in opts['dataset']:\n opts['image_size'] = 32\n\n logging.print_to_file_and_screen(\"Command line: \" + opts[\"command\"], opts)\n logging.print_to_file_and_screen(opts[\"summary_str\"].format(**opts), opts)\n opts[\"summary_str\"] = \"\"\n logging.print_to_file_and_screen(opts, opts)\n validation_only_process(model, opts)\n",
"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport os\nimport argparse\nimport numpy as np\nfrom functools import partial\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.python import ipu\nfrom ipu_sparse_ops import sparse, layers, sparse_training\nfrom tensorflow.python.ipu import ipu_outfeed_queue, ipu_compiler\nfrom tensorflow.python.ipu.scopes import ipu_scope\n\ntf.disable_eager_execution()\ntf.disable_v2_behavior()\n\n\nweights = {}\n\n\ndef get_program_arguments():\n parser = argparse.ArgumentParser(description='Sparse checkpoints tool')\n parser.add_argument(\"--input-size\", type=int, default=16,\n help=\"Input size of the layer.\")\n parser.add_argument(\"--batchsize\", type=int, default=4,\n help=\"Batch size\")\n parser.add_argument(\"--output-size\", type=int, default=16,\n help=\"Output size of the layer.\")\n parser.add_argument(\"--dtype\", choices=[\"fp32\", \"fp16\"], default=\"fp32\",\n help=\"Floating point precision\")\n parser.add_argument(\"--density\", default=0.1, type=float, help=\"Density of the fc layer\")\n parser.add_argument(\"--seed\", default=0, type=int, help=\"numpy random seed\")\n parser.add_argument(\"--print-weights\", action='store_true',\n help=\"Prints the dense fc weights at different \"\n \"stages for debug purposes.\")\n parser.add_argument('--pooling-type', default='SUM', choices=['SUM', 'AVG', 'MAX'],\n help=\"Select dense gradients block pooling\")\n parser.add_argument('--block-size', default=1, type=int, choices=[1, 4, 8, 16],\n help=\"Sparse blocks size. Set to a value > 1 for block sparsity\")\n parser.add_argument('--meta-info-oversize', default=0.1, type=float,\n help=\"Sets the Popsparse matmul option 'metaInfoBucketOversizeProportion'.\")\n return parser.parse_args()\n\n\ndef model(x_fc, fc, fc_pool, opts, outfeed_queue, dtype):\n with tf.variable_scope(\"SparseOps\", reuse=tf.AUTO_REUSE, use_resource=True):\n y_fc = fc(x_fc, tf.constant(True))\n y_fc_pool = fc_pool(x_fc, tf.constant(True))\n loss = tf.reduce_sum(y_fc)\n loss_pool = tf.reduce_sum(y_fc_pool)\n output = {}\n output['dense_grad'] = tf.convert_to_tensor(fc.get_dense_grad_w(loss))\n output['pooled_dense_grad'] = tf.convert_to_tensor(fc_pool.get_dense_grad_w(loss_pool))\n out = outfeed_queue.enqueue(output)\n return out\n\n\ndef make_fc_weights(input_size, hidden_size, values):\n w = np.zeros([input_size, hidden_size])\n for value in values:\n w[np.random.randint(input_size), np.random.randint(hidden_size)] = value\n return w\n\n\ndef create_sparse_layers(opts):\n matmul_opts = {\"metaInfoBucketOversizeProportion\": opts.meta_info_oversize}\n in_blocks = opts.input_size // opts.block_size\n out_blocks = opts.output_size // opts.block_size\n identity_size = max(in_blocks, out_blocks)\n block_mask = np.identity(identity_size)[0: in_blocks, 0: out_blocks]\n block_mask[1, 3] = 1\n block_mask[0, 3] = 1\n n_blocks = np.count_nonzero(block_mask)\n el_mask = sparse.block_mask_to_element(block_mask, opts.block_size)\n n_els = np.count_nonzero(el_mask)\n masked_rhs = np.zeros_like(el_mask, dtype=np.float32 if opts.dtype == \"fp32\" else np.float16)\n values = np.random.rand(n_els)\n masked_rhs[np.nonzero(el_mask)] = values\n if opts.block_size == 1:\n triplets = sparse.triplets_from_dense(masked_rhs)\n else:\n triplets = sparse.triplets_from_dense(block_mask)\n triplets = sparse.Triplets(\n triplets.row_indices, triplets.col_indices,\n sparse.blocks_at_indices(\n triplets.row_indices, triplets.col_indices, opts.block_size, masked_rhs)\n )\n\n fc = layers.SparseFcLayer.from_triplets(\n opts.output_size, [opts.batchsize, opts.input_size], *triplets,\n matmul_options=matmul_opts,\n name=\"fc_None\",\n dtype=dtype,\n use_bias=False, relu=False, pooling_type='NONE')\n fc_pool = layers.SparseFcLayer.from_triplets(\n opts.output_size, [opts.batchsize, opts.input_size], *triplets,\n matmul_options=matmul_opts,\n name=\"fc_\" + opts.pooling_type,\n dtype=dtype,\n use_bias=False, relu=False, pooling_type=opts.pooling_type)\n\n return fc, fc_pool\n\n\ndef set_up_ipu_devices(opts):\n config = ipu.utils.create_ipu_config()\n config = ipu.utils.auto_select_ipus(config, 1)\n ipu.utils.configure_ipu_system(config)\n # Set the seed for the stochastic rounding\n ipu.utils.reset_ipu_seed = opts.seed\n\n\ndef make_graph(fc_weights):\n graph = tf.Graph()\n\n with graph.as_default():\n outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue(feed_name=\"sparse_outfeed\")\n fc, fc_pool = create_sparse_layers(opts)\n\n model_op = partial(model, fc=fc, fc_pool=fc_pool,\n opts=opts, outfeed_queue=outfeed_queue,\n dtype=dtype)\n\n with tf.device(\"cpu\"):\n x_fc = tf.placeholder(dtype, shape=[opts.batchsize, opts.input_size])\n\n with ipu_scope('/device:IPU:0'):\n test_op = ipu_compiler.compile(model_op, inputs=[x_fc])\n\n with tf.device(\"cpu\"):\n fc.create_placeholders()\n fc_pool.create_placeholders()\n\n dequeue = outfeed_queue.dequeue()\n ipu.utils.move_variable_initialization_to_cpu()\n\n return graph, outfeed_queue, fc, fc_pool, x_fc, test_op, dequeue\n\n\nif __name__ == \"__main__\":\n if not os.path.isdir(\"./tmp\"):\n os.mkdir(\"./tmp\")\n tmp_path = \"./tmp/test\"\n opts = get_program_arguments()\n set_up_ipu_devices(opts)\n\n dtype = tf.float32 if opts.dtype == 'fp32' else tf.float16\n\n np.random.seed(opts.seed)\n\n x_fc_in = np.random.normal(size=[opts.batchsize, opts.input_size])\n\n fc_weights = np.random.rand(10)\n\n # Create a first graph and run it to retrieve the weights from the ipu. Then create a checkpoint\n graph, outfeed_queue, fc, fc_pool, x_fc, test_op, dequeue = make_graph(fc_weights=fc_weights)\n\n with tf.Session(graph=graph) as sess:\n # init\n sess.run(tf.global_variables_initializer())\n\n # run and outfeed weights\n sess.run(test_op, feed_dict={x_fc: x_fc_in})\n results = sess.run(dequeue)\n\n unpooled = results['dense_grad'][0]\n ipu_pooled = results['pooled_dense_grad'][0]\n\n # do pooling on the host for the dense grad\n cpu_pooled = sparse_training.block_pool(unpooled, opts.block_size, opts.pooling_type)\n\n if opts.dtype == 'fp16':\n atol = 1e-2\n else:\n atol = 1e-5\n if not np.allclose(cpu_pooled, ipu_pooled, atol=atol):\n raise Exception(f\"Host and ipu pooling results don't match.\\nHost pool:\\n{cpu_pooled}\\nIpu pool:\\n{ipu_pooled}\")\n\n print(\"All results match\")\n"
] | [
[
"tensorflow.compat.v1.strided_slice",
"tensorflow.python.ipu.normalization_ops.group_norm",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.nn.xw_plus_b",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.nn.conv2d",
"tensorflow.compat.v1.constant_initializer",
"tensorflow.compat.v1.nn.relu",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.compat.v1.nn.max_pool",
"tensorflow.compat.v1.layers.batch_normalization",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.pad"
],
[
"torch.norm"
],
[
"tensorflow.python.ipu.scopes.ipu_shard",
"tensorflow.python.ipu.ipu_compiler.compile",
"tensorflow.device",
"tensorflow.python.ipu.utils.configure_ipu_system",
"tensorflow.python.ipu.autoshard.automatic_sharding",
"tensorflow.placeholder",
"tensorflow.python.ipu.scopes.ipu_scope",
"tensorflow.python.ipu.utils.create_ipu_config",
"tensorflow.python.ipu.autoshard.ipu_autoshard",
"tensorflow.python.ipu.utils.auto_select_ipus",
"tensorflow.Session"
],
[
"tensorflow.device",
"numpy.expand_dims",
"tensorflow.python.ipu.utils.auto_select_ipus",
"tensorflow.get_default_graph",
"tensorflow.import_graph_def",
"tensorflow.python.keras.preprocessing.image.load_img",
"tensorflow.python.ipu.utils.create_ipu_config",
"tensorflow.reset_default_graph",
"tensorflow.compat.v1.graph_util.remove_training_nodes",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.python.ipu.utils.configure_ipu_system",
"tensorflow.python.keras.applications.densenet.preprocess_input",
"tensorflow.compat.v1.graph_util.convert_variables_to_constants",
"tensorflow.placeholder",
"tensorflow.get_logger",
"tensorflow.python.keras.preprocessing.image.img_to_array",
"tensorflow.python.keras.applications.densenet.decode_predictions",
"tensorflow.variable_scope"
],
[
"tensorflow.python.ipu.keras.layers.Embedding",
"tensorflow.python.ipu.utils.configure_ipu_system",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.datasets.imdb.load_data",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.cast",
"tensorflow.python.keras.preprocessing.sequence.pad_sequences",
"tensorflow.python.ipu.ipu_strategy.IPUStrategy",
"tensorflow.python.ipu.utils.create_ipu_config",
"tensorflow.python.ipu.utils.auto_select_ipus",
"tensorflow.python.keras.optimizer_v2.adam.Adam",
"tensorflow.python.ipu.keras.Model",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.ipu.keras.layers.LSTM"
],
[
"tensorflow.Graph",
"tensorflow.contrib.compiler.xla.compile",
"tensorflow.constant",
"tensorflow.python.ipu.utils.configure_ipu_system",
"tensorflow.equal",
"tensorflow.cast",
"tensorflow.errors.ResourceExhaustedError",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.python.ipu.scopes.ipu_scope",
"tensorflow.train.Saver",
"tensorflow.argmax",
"tensorflow.python.ipu.utils.move_variable_initialization_to_cpu",
"tensorflow.python.ipu.ops.cross_replica_ops.cross_replica_sum"
],
[
"tensorflow.python.ipu.scopes.ipu_scope",
"numpy.zeros_like",
"tensorflow.python.ipu.utils.auto_select_ipus",
"tensorflow.compat.v1.constant",
"numpy.random.randint",
"tensorflow.python.ipu.ipu_compiler.compile",
"numpy.allclose",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.python.ipu.utils.create_ipu_config",
"numpy.count_nonzero",
"tensorflow.python.ipu.utils.move_variable_initialization_to_cpu",
"tensorflow.compat.v1.variable_scope",
"numpy.zeros",
"tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue",
"tensorflow.python.ipu.utils.configure_ipu_system",
"numpy.nonzero",
"tensorflow.compat.v1.Graph",
"numpy.identity",
"numpy.random.rand",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.device",
"numpy.random.seed",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"numpy.random.normal",
"tensorflow.compat.v1.placeholder"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
filangel/Eigenfaces | [
"55ddb705611ee351cc856d5a927a4dc82acaff03",
"55ddb705611ee351cc856d5a927a4dc82acaff03"
] | [
"src/app_a.py",
"src/svm_ovo.py"
] | [
"# matplotlib backtest for missing $DISPLAY\nimport matplotlib\nmatplotlib.use('Agg')\n\n# scientific computing library\nimport numpy as np\n\n# visualization tools\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# prettify plots\nplt.rcParams['figure.figsize'] = [8.0, 6.0]\nsns.set_palette(sns.color_palette(\"muted\"))\nsns.set_style(\"ticks\")\nsns_b, sns_g, sns_r, sns_v, sns_y, sns_l = sns.color_palette(\"muted\")\n\n\n# helper data preprocessor\nfrom reader import fetch_data\n\n# utility functions\nfrom utils import progress\n\n# logging module\nimport logging\nimport coloredlogs\n\n# argument parser\nimport argparse\n\n# built-in tools\nimport os\n\nSHAPE = (46, 56)\n\nif __name__ == '__main__':\n\n # argument parser instance\n parser = argparse.ArgumentParser()\n # init log level argument\n parser.add_argument('-l', '--log', type=str,\n help='<optional> Log Level (info | debug)')\n # parse arguments\n argv = parser.parse_args()\n # get log level\n _level = argv.log or ''\n\n logger = logging.getLogger(os.path.basename(__file__).replace('.py', ''))\n\n if _level.upper() == 'INFO':\n coloredlogs.install(level='IFNO', logger=logger)\n elif _level.upper() == 'DEBUG':\n coloredlogs.install(level='DEBUG', logger=logger)\n else:\n coloredlogs.install(level='WARNING', logger=logger)\n\n logger.info('Fetching data...')\n data = fetch_data()\n\n X_train, y_train = data['train']\n\n D, N = X_train.shape\n logger.debug('Number of features: D=%d' % D)\n logger.debug('Number of train data: N=%d' % N)\n\n # mean face\n mean_face = X_train.mean(axis=1).reshape(-1, 1)\n\n A = X_train - mean_face\n logger.debug('A.shape=%s' % (A.shape,))\n\n S = (1 / N) * np.dot(A.T, A)\n logger.debug('S.shape=%s' % (S.shape,))\n\n # Calculate eigenvalues `w` and eigenvectors `v`\n logger.info('Calculating eigenvalues and eigenvectors...')\n _l, _v = np.linalg.eig(S)\n\n # Indexes of eigenvalues, sorted by value\n logger.info('Sorting eigenvalues...')\n _indexes = np.argsort(_l)[::-1]\n\n # TODO\n # threshold w's\n logger.warning('TODO: threshold eigenvalues')\n\n # Sorted eigenvalues and eigenvectors\n l = _l[_indexes]\n logger.debug('l.shape=%s' % (l.shape,))\n v = _v[:, _indexes]\n logger.debug('v.shape=%s' % (v.shape,))\n\n M = np.arange(1, N + 1)\n\n error = []\n\n logger.info('Reconstruction for M in [%d,%d]...' % (M[0], M[-1]))\n for j, m in enumerate(M):\n\n progress(j + 1, len(M), status='Reconstruction for M=%d' % m)\n\n V = v[:, :m]\n\n _U = np.dot(A, V)\n\n U = _U / np.apply_along_axis(np.linalg.norm, 0, _U)\n\n W = np.dot(U.T, A)\n\n A_hat = np.dot(U, W)\n\n error.append(np.mean(np.sum((A - A_hat)**2)))\n # fix bug of progress bar after '\\r'\n print('')\n\n logger.info('Plotting reconstruction error versus M...')\n fig, ax1 = plt.subplots()\n\n lns1 = ax1.plot(M, error, color=sns_b, label='Reconstruction Error')\n ax1.tick_params('y', colors=sns_b)\n\n ax2 = ax1.twinx()\n lns2 = ax2.plot(M, l, color=sns_g, label='Covariance Matrix Eigenvalues')\n ax2.tick_params('y', colors=sns_g)\n\n ax1.set_title(\n 'Reconstruction Error versus Number of Principle Components $\\mathcal{M}$\\n')\n ax1.set_xlabel('$\\mathcal{M}$: Number of Principle Components')\n ax1.set_ylabel('$\\mathcal{J}$: Reconstruction Error')\n ax2.set_ylabel('Covariance Matrix Eigenvalues')\n # fix legend hack\n lns = lns1 + lns2\n labs = [l.get_label() for l in lns]\n ax1.legend(lns, labs, loc=0)\n # ax1.grid()\n fig.tight_layout()\n plt.savefig('data/out/error_versus_M.pdf',\n format='pdf', dpi=1000, transparent=True)\n logger.info('Exported at data/out/error_versus_M.pdf...')\n\n # set M\n m = 100\n V = v[:, :m]\n _U = np.dot(A, V)\n U = _U / np.apply_along_axis(np.linalg.norm, 0, _U)\n W_train = np.dot(U.T, A)\n\n # test data\n X_test, y_test = data['test']\n I, K = X_test.shape\n assert I == D, logger.error(\n 'Number of features of test and train data do not match, %d != %d' % (D, I))\n Phi = X_test - mean_face\n logger.debug('Phi.shape=%s' % (Phi.shape,))\n\n W_test = np.dot(U.T, Phi)\n logger.debug('W_test.shape=%s' % (W_test.shape,))\n\n ridx_train = np.random.randint(0, N, 3)\n R_train = W_train[:, ridx_train]\n B_train = np.dot(U, R_train)\n\n plt.rcParams['figure.figsize'] = [16.0, 12.0]\n\n logger.info('Plotting reconstructed training images...')\n fig, axes = plt.subplots(nrows=2, ncols=3)\n titles_train = ['Original Train', 'Original Train', 'Original Train',\n 'Reconstructed Train', 'Reconstructed Train', 'Reconstructed Train']\n for ax, img, title in zip(axes.flatten(), np.concatenate((A[:, ridx_train], B_train), axis=1).T, titles_train):\n _img = img + mean_face.ravel()\n ax.imshow(_img.reshape(SHAPE).T,\n cmap=plt.get_cmap('gray'), vmin=0, vmax=255)\n ax.set_title(title)\n fig.savefig('data/out/reconstructed_train_images.pdf',\n format='pdf', dpi=1000, transparent=True)\n logger.info('Exported at data/out/reconstructed_train_images.pdf...')\n\n ridx_test = np.random.randint(0, K, 3)\n R_test = W_test[:, ridx_test]\n B_test = np.dot(U, R_test)\n\n logger.info('Plotting reconstructed testing images...')\n fig, axes = plt.subplots(nrows=2, ncols=3)\n titles_test = ['Original Test', 'Original Test', 'Original Test',\n 'Reconstructed Test', 'Reconstructed Test', 'Reconstructed Test']\n for ax, img, title in zip(axes.flatten(), np.concatenate((Phi[:, ridx_test], B_test), axis=1).T, titles_test):\n _img = img + mean_face.ravel()\n ax.imshow(_img.reshape(SHAPE).T,\n cmap=plt.get_cmap('gray'), vmin=0, vmax=255)\n ax.set_title(title)\n fig.savefig('data/out/reconstructed_test_images.pdf',\n format='pdf', dpi=1000, transparent=True)\n logger.info('Exported at data/out/reconstructed_test_images.pdf...')\n",
"# matplotlib backtest for missing $DISPLAY\nimport matplotlib\nmatplotlib.use('Agg')\n\n# scientific computing library\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import confusion_matrix\n\n# visualization tools\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# prettify plots\nplt.rcParams['figure.figsize'] = [32.0, 24.0]\nsns.set_palette(sns.color_palette(\"muted\"))\nsns.set_style(\"ticks\")\n\n# helper data preprocessor\nfrom reader import fetch_data\nfrom pca import PCA\n# visualization functions\nfrom visualize import plot_confusion_matrix\n\n# utility functions\nfrom utils import progress\n\n# logging module\nimport logging\nimport coloredlogs\n\n# argument parser\nimport argparse\n\n# time module\nimport time\n\n# built-in tools\nimport pdb\nimport itertools\nimport os\n\nif __name__ == '__main__':\n\n # argument parser instance\n parser = argparse.ArgumentParser()\n # init log level argument\n parser.add_argument('-l', '--log', type=str,\n help='<optional> Log Level (info | debug)')\n parser.add_argument('-m', '--n_comps', type=int,\n help='<optional> Number of principle components')\n parser.add_argument('-s', '--standard', action='store_true',\n help='<optional> Standardize data')\n parser.add_argument('-cv', '--cross_validation', action='store_true',\n help='<optional> Cross validate SVM')\n # parse arguments\n argv = parser.parse_args()\n # get log level\n _level = argv.log or ''\n # get number of principle components\n M = argv.n_comps or 121\n # get flag of standardization\n standard = argv.standard or True\n # get flag of cross validation\n cv = argv.cross_validation or False\n\n logger = logging.getLogger(os.path.basename(__file__).replace('.py', ''))\n\n if _level.upper() == 'INFO':\n coloredlogs.install(level='IFNO', logger=logger)\n elif _level.upper() == 'DEBUG':\n coloredlogs.install(level='DEBUG', logger=logger)\n else:\n coloredlogs.install(level='WARNING', logger=logger)\n\n logger.info('Fetching data...')\n data = fetch_data(ratio=0.8)\n\n X_train, y_train = data['train']\n\n D, N = X_train.shape\n\n pca = PCA(n_comps=M, standard=standard, logger=logger)\n logger.info('Applying PCA with M=%d' % M)\n\n # normalise data\n W_train = pca.fit(X_train)\n logger.debug('W_train.shape=%s' % (W_train.shape,))\n\n X_test, y_test = data['test']\n I, K = X_test.shape\n assert I == D, logger.error(\n 'Number of features of test and train data do not match, %d != %d' % (D, I))\n\n W_test = pca.transform(X_test)\n logger.debug('W_test.shape=%s' % (W_test.shape,))\n\n classes = set(y_train.ravel())\n\n C = len(classes)\n\n combs = list(itertools.combinations(classes, 2))\n\n support_vectors_ = []\n\n if cv:\n # cross validation grid\n params = {\n 'gamma': np.logspace(-5, -3, 5), 'kernel': ['rbf', 'linear'], 'C': np.logspace(0, 2, 3)}\n mean_fit_time = {k: 0 for k in params['kernel']}\n mean_score_time = {k: 0 for k in params['kernel']}\n mean_n_support_ = 0\n\n LEADERBOARD = np.zeros((C + 1, K))\n\n for c1, c2 in combs:\n\n # preprocess training labels\n l_train = np.empty(y_train.T.shape)\n l_train[:] = np.nan\n l_train[y_train.T == c1] = 1\n l_train[y_train.T == c2] = 0\n _index = ~np.isnan(l_train).ravel()\n l_train = l_train[_index].ravel()\n\n # select the training examples\n w_train = W_train.T[_index]\n\n _classifier = SVC(kernel='linear', C=1, gamma=2e-4)\n\n if cv:\n search = GridSearchCV(_classifier, params, n_jobs=-1)\n\n search.fit(w_train, l_train)\n\n classifier = search.best_estimator_\n\n _results = list(zip(search.cv_results_['params'],\n search.cv_results_['mean_fit_time'],\n search.cv_results_['mean_score_time']))\n\n for kernel in params['kernel']:\n _f = filter(lambda x: kernel == x[0]['kernel'], _results)\n for _, fit_time, score_time in _f:\n mean_fit_time[kernel] += fit_time\n mean_score_time[kernel] += score_time\n mean_fit_time[kernel] /= len(search.cv_results_['params']\n ) / len(params['kernel'])\n mean_score_time[kernel] /= len(\n search.cv_results_['params']) / len(params['kernel'])\n\n mean_n_support_ += np.sum(classifier.n_support_)\n\n else:\n classifier = _classifier\n classifier.fit(w_train, l_train)\n\n support_vectors_.append(classifier.support_vectors_)\n\n scores = classifier.predict(W_test.T)\n\n for j, s in enumerate(scores):\n c = c1 if s == 1 else c2\n LEADERBOARD[c, j] += 1\n\n if cv:\n mean_n_support_ /= len(combs)\n logger.error('Mean `fit` Time %s' % mean_fit_time)\n logger.error('Mean `score` Time %s' % mean_score_time)\n logger.error('Mean Number of Support Vectors %s' % mean_n_support_)\n\n y_hat = np.argmax(LEADERBOARD, axis=0)\n\n acc = np.sum(y_test == y_hat) / K\n\n logger.error('Accuracy = %.2f%%' % (acc * 100))\n\n cnf_matrix = confusion_matrix(\n y_test.ravel(), y_hat.ravel(), labels=list(classes))\n\n # Plot non-normalized confusion matrix\n plt.figure()\n logger.info('Plotting confusion matrices...')\n plot_confusion_matrix(cnf_matrix, classes=classes,\n title='SVM One versus One - Confusion Matrix',\n cmap=plt.cm.Reds)\n plt.savefig('data/out/svm_ovo_cnf_matrix.pdf', format='pdf', dpi=300)\n # Plot normalized confusion matrix\n plt.figure()\n plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True,\n title='SVM One versus One - Normalized Confusion Matrix',\n cmap=plt.cm.Reds)\n plt.savefig('data/out/svm_ovo_cnf_matrix_norm.pdf', format='pdf', dpi=300)\n logger.info(\n 'Exported at data/out/svm_ovo_cnf_matrix.pdf & data/out/svm_ovr_cnf_matrix_norm.pdf...')\n"
] | [
[
"numpy.dot",
"numpy.arange",
"matplotlib.use",
"numpy.linalg.eig",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.get_cmap",
"numpy.concatenate",
"numpy.apply_along_axis",
"numpy.argsort",
"numpy.sum",
"numpy.random.randint"
],
[
"sklearn.model_selection.GridSearchCV",
"numpy.logspace",
"matplotlib.use",
"numpy.isnan",
"matplotlib.pyplot.savefig",
"numpy.argmax",
"sklearn.svm.SVC",
"numpy.zeros",
"numpy.sum",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shkarupa-alex/tfmiss | [
"4fe1bb3a47327c07711f910ee53319167032b6af",
"4fe1bb3a47327c07711f910ee53319167032b6af",
"4fe1bb3a47327c07711f910ee53319167032b6af"
] | [
"tfmiss/text/wordpiecelib.py",
"tfmiss/text/unicode_transform.py",
"tfmiss/keras/losses/bitemp.py"
] | [
"# Taken from https://raw.githubusercontent.com/tensorflow/text/v2.5.0/tensorflow_text/tools/wordpiece_vocab/wordpiece_tokenizer_learner_lib.py\n#\n# coding=utf-8\n# Copyright 2021 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Algorithm for learning wordpiece vocabulary.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport numpy as np\nfrom typing import List, Optional\n\nParams = collections.namedtuple('Params', [\n 'upper_thresh', 'lower_thresh', 'num_iterations', 'max_input_tokens',\n 'max_token_length', 'max_unique_chars', 'vocab_size', 'slack_ratio',\n 'include_joiner_token', 'joiner', 'reserved_tokens'\n])\n\n\ndef extract_char_tokens(word_counts):\n \"\"\"Extracts all single-character tokens from word_counts.\n\n Args:\n word_counts: list of (string, int) tuples\n\n Returns:\n set of single-character strings contained within word_counts\n \"\"\"\n\n seen_chars = set()\n for word, _ in word_counts:\n for char in word:\n seen_chars.add(char)\n return seen_chars\n\n\ndef ensure_all_tokens_exist(input_tokens, output_tokens, include_joiner_token,\n joiner):\n \"\"\"Adds all tokens in input_tokens to output_tokens if not already present.\n\n Args:\n input_tokens: set of strings (tokens) we want to include\n output_tokens: string to int dictionary mapping token to count\n include_joiner_token: bool whether to include joiner token\n joiner: string used to indicate suffixes\n\n Returns:\n string to int dictionary with all tokens in input_tokens included\n \"\"\"\n\n for token in input_tokens:\n if token not in output_tokens:\n output_tokens[token] = 1\n\n if include_joiner_token:\n joined_token = joiner + token\n if joined_token not in output_tokens:\n output_tokens[joined_token] = 1\n\n return output_tokens\n\n\ndef get_split_indices(word, curr_tokens, include_joiner_token, joiner):\n \"\"\"Gets indices for valid substrings of word, for iterations > 0.\n\n For iterations > 0, rather than considering every possible substring, we only\n want to consider starting points corresponding to the start of wordpieces in\n the current vocabulary.\n\n Args:\n word: string we want to split into substrings\n curr_tokens: string to int dict of tokens in vocab (from previous iteration)\n include_joiner_token: bool whether to include joiner token\n joiner: string used to indicate suffixes\n\n Returns:\n list of ints containing valid starting indices for word\n \"\"\"\n\n indices = []\n start = 0\n while start < len(word):\n end = len(word)\n while end > start:\n subtoken = word[start:end]\n # Subtoken includes the joiner token.\n if include_joiner_token and start > 0:\n subtoken = joiner + subtoken\n # If subtoken is part of vocab, 'end' is a valid start index.\n if subtoken in curr_tokens:\n indices.append(end)\n break\n end -= 1\n\n if end == start:\n return None\n start = end\n\n return indices\n\n\ndef get_search_threshs(word_counts, upper_thresh, lower_thresh):\n \"\"\"Clips the thresholds for binary search based on current word counts.\n\n The upper threshold parameter typically has a large default value that can\n result in many iterations of unnecessary search. Thus we clip the upper and\n lower bounds of search to the maximum and the minimum wordcount values.\n\n Args:\n word_counts: list of (string, int) tuples\n upper_thresh: int, upper threshold for binary search\n lower_thresh: int, lower threshold for binary search\n\n Returns:\n upper_search: int, clipped upper threshold for binary search\n lower_search: int, clipped lower threshold for binary search\n \"\"\"\n\n counts = [count for _, count in word_counts]\n max_count = max(counts)\n min_count = min(counts)\n\n if upper_thresh is None:\n upper_search = max_count\n else:\n upper_search = max_count if max_count < upper_thresh else upper_thresh\n\n if lower_thresh is None:\n lower_search = min_count\n else:\n lower_search = min_count if min_count > lower_thresh else lower_thresh\n\n return upper_search, lower_search\n\n\ndef get_input_words(word_counts, reserved_tokens, max_token_length):\n \"\"\"Filters out words that are longer than max_token_length or are reserved.\n\n Args:\n word_counts: list of (string, int) tuples\n reserved_tokens: list of strings\n max_token_length: int, maximum length of a token\n\n Returns:\n list of (string, int) tuples of filtered wordcounts\n \"\"\"\n\n all_counts = []\n\n for word, count in word_counts:\n if len(word) > max_token_length or word in reserved_tokens:\n continue\n all_counts.append((word, count))\n\n return all_counts\n\n\ndef get_allowed_chars(all_counts, max_unique_chars):\n \"\"\"Get the top max_unique_chars characters within our wordcounts.\n\n We want each character to be in the vocabulary so that we can keep splitting\n down to the character level if necessary. However, in order not to inflate\n our vocabulary with rare characters, we only keep the top max_unique_chars\n characters.\n\n Args:\n all_counts: list of (string, int) tuples\n max_unique_chars: int, maximum number of unique single-character tokens\n\n Returns:\n set of strings containing top max_unique_chars characters in all_counts\n \"\"\"\n\n char_counts = collections.defaultdict(int)\n\n for word, count in all_counts:\n for char in word:\n char_counts[char] += count\n\n # Sort by count, then alphabetically.\n sorted_counts = sorted(sorted(char_counts.items(), key=lambda x: x[0]),\n key=lambda x: x[1], reverse=True)\n\n allowed_chars = set()\n for i in range(min(len(sorted_counts), max_unique_chars)):\n allowed_chars.add(sorted_counts[i][0])\n return allowed_chars\n\n\ndef filter_input_words(all_counts, allowed_chars, max_input_tokens):\n \"\"\"Filters out words with unallowed chars and limits words to max_input_tokens.\n\n Args:\n all_counts: list of (string, int) tuples\n allowed_chars: list of single-character strings\n max_input_tokens: int, maximum number of tokens accepted as input\n\n Returns:\n list of (string, int) tuples of filtered wordcounts\n \"\"\"\n # Ensure that the input is sorted so that if `max_input_tokens` is reached\n # the least common tokens are dropped.\n all_counts = sorted(\n all_counts, key=lambda word_and_count: word_and_count[1], reverse=True)\n filtered_counts = []\n for word, count in all_counts:\n if (max_input_tokens != -1 and\n len(filtered_counts) >= max_input_tokens):\n break\n has_unallowed_chars = False\n for char in word:\n if char not in allowed_chars:\n has_unallowed_chars = True\n break\n if has_unallowed_chars:\n continue\n filtered_counts.append((word, count))\n\n return filtered_counts\n\n\ndef generate_final_vocabulary(reserved_tokens, char_tokens, curr_tokens):\n \"\"\"Generates final vocab given reserved, single-character, and current tokens.\n\n Args:\n reserved_tokens: list of strings (tokens) that must be included in vocab\n char_tokens: set of single-character strings\n curr_tokens: string to int dict mapping token to count\n\n Returns:\n list of strings representing final vocabulary\n \"\"\"\n\n sorted_char_tokens = sorted(list(char_tokens))\n vocab_char_arrays = []\n vocab_char_arrays.extend(reserved_tokens)\n vocab_char_arrays.extend(sorted_char_tokens)\n\n # Sort by count, then alphabetically.\n sorted_tokens = sorted(sorted(curr_tokens.items(), key=lambda x: x[0]),\n key=lambda x: x[1], reverse=True)\n for token, _ in sorted_tokens:\n vocab_char_arrays.append(token)\n\n seen_tokens = set()\n # Adding unique tokens to list to maintain sorted order.\n vocab_words = []\n for word in vocab_char_arrays:\n if word in seen_tokens:\n continue\n seen_tokens.add(word)\n vocab_words.append(word)\n\n return vocab_words\n\n\ndef learn_with_thresh(word_counts, thresh, params):\n \"\"\"Wordpiece learning algorithm to produce a vocab given frequency threshold.\n\n Args:\n word_counts: list of (string, int) tuples\n thresh: int, frequency threshold for a token to be included in the vocab\n params: Params namedtuple, parameters for learning\n\n Returns:\n list of strings, vocabulary generated for the given thresh\n \"\"\"\n\n # Set of single-character tokens.\n char_tokens = extract_char_tokens(word_counts)\n curr_tokens = ensure_all_tokens_exist(char_tokens, {},\n params.include_joiner_token,\n params.joiner)\n\n for iteration in range(params.num_iterations):\n subtokens = [dict() for _ in range(params.max_token_length + 1)]\n # Populate array with counts of each subtoken.\n for word, count in word_counts:\n if iteration == 0:\n split_indices = range(1, len(word) + 1)\n else:\n split_indices = get_split_indices(word, curr_tokens,\n params.include_joiner_token,\n params.joiner)\n if not split_indices:\n continue\n\n start = 0\n for index in split_indices:\n for end in range(start + 1, len(word) + 1):\n subtoken = word[start:end]\n length = len(subtoken)\n if params.include_joiner_token and start > 0:\n subtoken = params.joiner + subtoken\n if subtoken in subtokens[length]:\n # Subtoken exists, increment count.\n subtokens[length][subtoken] += count\n else:\n # New subtoken, add to dict.\n subtokens[length][subtoken] = count\n start = index\n\n next_tokens = {}\n # Get all tokens that have a count above the threshold.\n for length in range(params.max_token_length, 0, -1):\n for token, count in subtokens[length].items():\n if count >= thresh:\n next_tokens[token] = count\n # Decrement the count of all prefixes.\n if len(token) > length: # This token includes the joiner.\n joiner_len = len(params.joiner)\n for i in range(1 + joiner_len, length + joiner_len):\n prefix = token[0:i]\n if prefix in subtokens[i - joiner_len]:\n subtokens[i - joiner_len][prefix] -= count\n else:\n for i in range(1, length):\n prefix = token[0:i]\n if prefix in subtokens[i]:\n subtokens[i][prefix] -= count\n\n # Add back single-character tokens.\n curr_tokens = ensure_all_tokens_exist(char_tokens, next_tokens,\n params.include_joiner_token,\n params.joiner)\n\n vocab_words = generate_final_vocabulary(params.reserved_tokens, char_tokens,\n curr_tokens)\n\n return vocab_words\n\n\ndef learn_binary_search(word_counts, lower, upper, params):\n \"\"\"Performs binary search to find wordcount frequency threshold.\n\n Given upper and lower bounds and a list of (word, count) tuples, performs\n binary search to find the threshold closest to producing a vocabulary\n of size vocab_size.\n\n Args:\n word_counts: list of (string, int) tuples\n lower: int, lower bound for binary search\n upper: int, upper bound for binary search\n params: Params namedtuple, parameters for learning\n\n Returns:\n list of strings, vocab that is closest to target vocab_size\n \"\"\"\n thresh = (upper + lower) // 2\n current_vocab = learn_with_thresh(word_counts, thresh, params)\n current_vocab_size = len(current_vocab)\n\n # Allow count to be within k% of the target count, where k is slack ratio.\n slack_count = params.slack_ratio * params.vocab_size\n if slack_count < 0:\n slack_count = 0\n\n is_within_slack = (current_vocab_size <= params.vocab_size) and (\n params.vocab_size - current_vocab_size <= slack_count)\n\n # We've created a vocab within our goal range (or, ran out of search space).\n if is_within_slack or lower >= upper or thresh <= 1:\n return current_vocab\n\n current_vocab = None\n\n if current_vocab_size > params.vocab_size:\n return learn_binary_search(word_counts, thresh + 1, upper, params)\n\n else:\n return learn_binary_search(word_counts, lower, thresh - 1, params)\n\n\ndef count_words(iterable) -> collections.Counter:\n \"\"\"Converts a iterable of arrays of words into a `Counter` of word counts.\"\"\"\n counts = collections.Counter()\n for words in iterable:\n # Convert a RaggedTensor to a flat/dense Tensor.\n words = getattr(words, 'flat_values', words)\n # Flatten any dense tensor\n words = np.reshape(words, [-1])\n counts.update(words)\n\n # Decode the words if necessary.\n example_word = next(iter(counts.keys()))\n if isinstance(example_word, bytes):\n counts = collections.Counter(\n {word.decode('utf-8'): count for word, count in counts.items()})\n\n return counts\n\n\ndef learn(word_counts,\n vocab_size: int,\n reserved_tokens: List[str],\n upper_thresh: Optional[int] = int(1e7),\n lower_thresh: Optional[int] = 10,\n num_iterations: int = 4,\n max_input_tokens: Optional[int] = int(5e6),\n max_token_length: int = 50,\n max_unique_chars: int = 1000,\n slack_ratio: float = 0.05,\n include_joiner_token: bool = True,\n joiner: str = '##') -> List[str]:\n \"\"\"Takes in wordcounts and returns wordpiece vocabulary.\n\n Args:\n word_counts: (word, count) pairs as a dictionary, or list of tuples.\n vocab_size: The target vocabulary size. This is the maximum size.\n reserved_tokens: A list of tokens that must be included in the vocabulary.\n upper_thresh: Initial upper bound on the token frequency threshold.\n lower_thresh: Initial lower bound on the token frequency threchold.\n num_iterations: Number of iterations to run.\n max_input_tokens: The maximum number of words in the initial vocabulary. The\n words with the lowest counts are discarded. Use `None` or `-1` for \"no\n maximum\".\n max_token_length: The maximum token length. Counts for longer words are\n discarded.\n max_unique_chars: The maximum alphabet size. This prevents rare characters\n from inflating the vocabulary. Counts for words containing characters\n ouside of the selected alphabet are discarded.\n slack_ratio: The maximum deviation acceptable from `vocab_size` for an\n acceptable vocabulary. The acceptable range of vocabulary sizes is from\n `vocab_size*(1-slack_ratio)` to `vocab_size`.\n include_joiner_token: If true, include the `joiner` token in the output\n vocabulary.\n joiner: The prefix to include on suffix tokens in the output vocabulary.\n Usually \"##\". For example 'places' could be tokenized as `['place',\n '##s']`.\n\n Returns:\n string, final vocabulary with each word separated by newline\n \"\"\"\n if isinstance(word_counts, dict):\n word_counts = word_counts.items()\n\n params = Params(upper_thresh, lower_thresh, num_iterations, max_input_tokens,\n max_token_length, max_unique_chars, vocab_size, slack_ratio,\n include_joiner_token, joiner, reserved_tokens)\n\n upper_search, lower_search = get_search_threshs(word_counts,\n params.upper_thresh,\n params.lower_thresh)\n all_counts = get_input_words(word_counts, params.reserved_tokens,\n params.max_token_length)\n allowed_chars = get_allowed_chars(all_counts, params.max_unique_chars)\n\n filtered_counts = filter_input_words(all_counts, allowed_chars,\n params.max_input_tokens)\n\n vocab = learn_binary_search(filtered_counts, lower_search, upper_search,\n params)\n\n return vocab\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tfmiss.ops import tfmiss_ops\n\n\ndef char_category(source, first=True, skip=None, name=None):\n \"\"\"Get first/last character category in unicode strings.\n\n Args:\n source: `Tensor` or `RaggedTensor` of any shape, strings to make lower.\n first: boolean flag indicating which character should be tested: first or last.\n skip: list of strings to pass without changes or None.\n name: A name for the operation (optional).\n Returns:\n `Tensor` or `RaggedTensor` of same shape as input.\n \"\"\"\n with tf.name_scope(name or 'char_category'):\n source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source', dtype=tf.string)\n\n if isinstance(source, tf.RaggedTensor):\n return source.with_flat_values(\n char_category(source.flat_values, first, skip)\n )\n\n return tfmiss_ops.miss_char_category(\n source=source,\n first=first,\n skip=skip or [],\n )\n\n\ndef lower_case(source, skip=None, name=None):\n \"\"\"Lowercases unicode strings.\n\n Args:\n source: `Tensor` or `RaggedTensor` of any shape, strings to make lower.\n skip: list of strings to pass without changes or None.\n name: A name for the operation (optional).\n Returns:\n `Tensor` or `RaggedTensor` of same shape as input.\n \"\"\"\n with tf.name_scope(name or 'lower_case'):\n source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source', dtype=tf.string)\n\n if isinstance(source, tf.RaggedTensor):\n return source.with_flat_values(\n lower_case(source.flat_values, skip)\n )\n\n return tfmiss_ops.miss_lower_case(\n source=source,\n skip=skip or [],\n )\n\n\ndef normalize_unicode(source, form, skip=None, name=None):\n \"\"\"Normalizes unicode strings.\n\n Args:\n source: `Tensor` or `RaggedTensor` of any shape, strings to normalize.\n form: Scalar value, name of normalization algorithm.\n One of `\"NFD\"`, `\"NFC\"`, `\"NFKD\"`, `\"NFKC\"`.\n skip: list of strings to pass without changes or None.\n name: A name for the operation (optional).\n Returns:\n `Tensor` or `RaggedTensor` of same shape and size as input.\n \"\"\"\n\n with tf.name_scope(name or 'normalize_unicode'):\n source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source', dtype=tf.string)\n\n if isinstance(source, tf.RaggedTensor):\n return source.with_flat_values(\n normalize_unicode(source.flat_values, form, skip)\n )\n\n return tfmiss_ops.miss_normalize_unicode(\n source=source,\n form=form,\n skip=skip or [],\n )\n\n\ndef replace_regex(source, pattern, rewrite, skip=None, name=None):\n \"\"\"Replaces all regex matchs from `needle` to corresponding unicode strings in `haystack`.\n\n Args:\n source: `Tensor` or `RaggedTensor` of any shape, source strings for replacing.\n pattern: List of RE2 patterns to search in source\n rewrite: List of strings to replace with. Should have same length as `needle`.\n skip: list of strings to pass without changes or None.\n name: A name for the operation (optional).\n Returns:\n `Tensor` or `RaggedTensor` of same shape and size as input.\n \"\"\"\n\n with tf.name_scope(name or 'replace_regex'):\n source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source', dtype=tf.string)\n\n if isinstance(source, tf.RaggedTensor):\n return source.with_flat_values(\n replace_regex(source.flat_values, pattern, rewrite, skip)\n )\n\n return tfmiss_ops.miss_replace_regex(\n source=source,\n pattern=pattern,\n rewrite=rewrite,\n skip=skip or [],\n )\n\n\ndef replace_string(source, needle, haystack, skip=None, name=None):\n \"\"\"Replaces all unicode substrings from `needle` to corresponding unicode strings in `haystack`.\n\n Args:\n source: `Tensor` or `RaggedTensor` of any shape, source strings for replacing.\n needle: List of strings to search in source\n haystack: List of strings to replace with. Should have same length as `needle`.\n skip: list of strings to pass without changes or None.\n name: A name for the operation (optional).\n Returns:\n `Tensor` or `RaggedTensor` of same shape and size as input.\n \"\"\"\n\n with tf.name_scope(name or 'replace_string'):\n source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source', dtype=tf.string)\n\n if isinstance(source, tf.RaggedTensor):\n return source.with_flat_values(\n replace_string(source.flat_values, needle, haystack, skip)\n )\n\n return tfmiss_ops.miss_replace_string(\n source=source,\n needle=needle,\n haystack=haystack,\n skip=skip or [],\n )\n\n\ndef sub_string(source, start, limit=None, skip=None, name=None):\n \"\"\"Cuts substrings starting at position `start` and spans `limit` characters.\n\n Args:\n source: `Tensor` or `RaggedTensor` of any shape, source strings for cut substring.\n start: Substring start position. If negative, will be interpreted as \"from the end of string\"\n limit: Substring length. `None` or any negative value will be interpreted as \"to the end of string\".\n skip: list of strings to pass without changes or None.\n name: A name for the operation (optional).\n Returns:\n `Tensor` or `RaggedTensor` of same shape and size as input.\n \"\"\"\n\n with tf.name_scope(name or 'sub_string'):\n source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source', dtype=tf.string)\n\n if isinstance(source, tf.RaggedTensor):\n return source.with_flat_values(\n sub_string(source.flat_values, start, limit, skip)\n )\n\n return tfmiss_ops.miss_sub_string(\n source=source,\n start=start,\n limit=-1 if limit is None else limit,\n skip=skip or [],\n )\n\n\ndef title_case(source, skip=None, name=None):\n \"\"\"Titlecases unicode strings.\n\n Args:\n source: `Tensor` or `RaggedTensor` of any shape, strings to make title.\n skip: list of strings to pass without changes or None.\n name: A name for the operation (optional).\n Returns:\n `Tensor` or `RaggedTensor` of same shape and size as input.\n \"\"\"\n\n with tf.name_scope(name or 'title_case'):\n source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source', dtype=tf.string)\n\n if isinstance(source, tf.RaggedTensor):\n return source.with_flat_values(\n title_case(source.flat_values, skip)\n )\n\n return tfmiss_ops.miss_title_case(\n source=source,\n skip=skip or [],\n )\n\n\ndef upper_case(source, skip=None, name=None):\n \"\"\"Uppercases unicode strings.\n\n Args:\n source: `Tensor` or `RaggedTensor` of any shape, strings to make upper.\n skip: list of strings to pass without changes or None.\n name: A name for the operation (optional).\n Returns:\n `Tensor` or `RaggedTensor` of same shape and size as input.\n \"\"\"\n\n with tf.name_scope(name or 'upper_case'):\n source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source', dtype=tf.string)\n\n if isinstance(source, tf.RaggedTensor):\n return source.with_flat_values(\n upper_case(source.flat_values, skip)\n )\n\n return tfmiss_ops.miss_upper_case(\n source=source,\n skip=skip or [],\n )\n\n\ndef wrap_with(source, left, right, skip=None, name=None):\n \"\"\"Wraps unicode strings with \"left\" and \"right\"\n\n Args:\n source: `Tensor` or `RaggedTensor` of any shape, strings to replace digits.\n left: Scalar string to add in the beginning\n right: Scalar string to add in the ending\n skip: list of strings to pass without changes or None.\n name: A name for the operation (optional).\n Returns:\n `RaggedTensor` of same shape and size as input.\n \"\"\"\n\n with tf.name_scope(name or 'wrap_with'):\n source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source', dtype=tf.string)\n\n if isinstance(source, tf.RaggedTensor):\n return source.with_flat_values(\n wrap_with(source.flat_values, left, right, skip)\n )\n\n return tfmiss_ops.miss_wrap_with(\n source=source,\n left=left,\n right=right,\n skip=skip or [],\n )\n\n\ndef zero_digits(source, skip=None, name=None):\n \"\"\"Replaces each digit in unicode strings with 0.\n\n Args:\n source: `Tensor` or `RaggedTensor` of any shape, strings to replace digits.\n skip: list of strings to pass without changes or None.\n name: A name for the operation (optional).\n Returns:\n `Tensor` or `RaggedTensor` of same shape and size as input.\n \"\"\"\n\n with tf.name_scope(name or 'zero_digits'):\n source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source', dtype=tf.string)\n\n if isinstance(source, tf.RaggedTensor):\n return source.with_flat_values(\n zero_digits(source.flat_values, skip)\n )\n\n return tfmiss_ops.miss_zero_digits(\n source=source,\n skip=skip or [],\n )\n",
"\"\"\"\nRobust Bi-Tempered Logistic Loss Based on Bregman Divergences.\nhttps://arxiv.org/pdf/1906.03361.pdf\n\nSource: https://github.com/google/bi-tempered-loss/\nDescription: https://ai.googleblog.com/2019/08/bi-tempered-logistic-loss-for-training.html\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport tensorflow as tf\nfrom keras import backend\nfrom keras.losses import LossFunctionWrapper\nfrom keras.utils.generic_utils import register_keras_serializable\nfrom keras.utils.losses_utils import ReductionV2 as Reduction\nfrom tensorflow.python.framework import ops\n\n\ndef log_t(u, t):\n \"\"\"Compute log_t for `u`.\"\"\"\n\n def _internal_log_t(u, t):\n return (u ** (1.0 - t) - 1.0) / (1.0 - t)\n\n return tf.cond(\n tf.equal(t, 1.0),\n lambda: tf.math.log(u),\n functools.partial(_internal_log_t, u, t)\n )\n\n\ndef exp_t(u, t):\n \"\"\"Compute exp_t for `u`.\"\"\"\n\n def _internal_exp_t(u, t):\n return tf.nn.relu(1.0 + (1.0 - t) * u) ** (1.0 / (1.0 - t))\n\n return tf.cond(\n tf.equal(t, 1.0),\n lambda: tf.exp(u),\n functools.partial(_internal_exp_t, u, t)\n )\n\n\ndef compute_normalization_fixed_point(activations, t, num_iters=5):\n \"\"\"Returns the normalization value for each example (t > 1.0).\n Args:\n activations: A multi-dimensional tensor with last dimension `num_classes`.\n t: Temperature 2 (> 1.0 for tail heaviness).\n num_iters: Number of iterations to run the method.\n Return: A tensor of same rank as activation with the last dimension being 1.\n \"\"\"\n\n mu = tf.reduce_max(activations, -1, keepdims=True)\n normalized_activations_step_0 = activations - mu\n shape_normalized_activations = tf.shape(normalized_activations_step_0)\n\n def iter_condition(i, unused_normalized_activations):\n return i < num_iters\n\n def iter_body(i, normalized_activations):\n logt_partition = tf.reduce_sum(\n exp_t(normalized_activations, t),\n -1,\n keepdims=True\n )\n normalized_activations_t = tf.reshape(\n normalized_activations_step_0 * tf.pow(logt_partition, 1.0 - t),\n shape_normalized_activations\n )\n\n return [i + 1, normalized_activations_t]\n\n _, normalized_activations_t = tf.while_loop(\n iter_condition,\n iter_body,\n [0, normalized_activations_step_0],\n maximum_iterations=num_iters\n )\n logt_partition = tf.reduce_sum(\n exp_t(normalized_activations_t, t),\n -1,\n keepdims=True\n )\n\n return -log_t(1.0 / logt_partition, t) + mu\n\n\ndef compute_normalization_binary_search(activations, t, num_iters=10):\n \"\"\"Returns the normalization value for each example (t < 1.0).\n Args:\n activations: A multi-dimensional tensor with last dimension `num_classes`.\n t: Temperature 2 (< 1.0 for finite support).\n num_iters: Number of iterations to run the method.\n Return: A tensor of same rank as activation with the last dimension being 1.\n \"\"\"\n mu = tf.reduce_max(activations, -1, keepdims=True)\n normalized_activations = activations - mu\n shape_activations = tf.shape(activations)\n effective_dim = tf.cast(\n tf.reduce_sum(\n tf.cast(\n tf.greater(normalized_activations, -1.0 / (1.0 - t)),\n tf.int32\n ),\n -1,\n keepdims=True\n ),\n tf.float32\n )\n shape_partition = tf.concat([shape_activations[:-1], [1]], 0)\n lower = tf.zeros(shape_partition)\n upper = -log_t(1.0 / effective_dim, t) * tf.ones(shape_partition)\n\n def iter_condition(i, unused_lower, unused_upper):\n return i < num_iters\n\n def iter_body(i, lower, upper):\n logt_partition = (upper + lower) / 2.0\n sum_probs = tf.reduce_sum(exp_t(\n normalized_activations - logt_partition, t),\n -1,\n keepdims=True\n )\n update = tf.cast(tf.less(sum_probs, 1.0), tf.float32)\n lower = tf.reshape(lower * update + (1.0 - update) * logt_partition, shape_partition)\n upper = tf.reshape(upper * (1.0 - update) + update * logt_partition, shape_partition)\n return [i + 1, lower, upper]\n\n _, lower, upper = tf.while_loop(\n iter_condition,\n iter_body, [0, lower, upper],\n maximum_iterations=num_iters\n )\n logt_partition = (upper + lower) / 2.0\n\n return logt_partition + mu\n\n\ndef compute_normalization(activations, t, num_iters=5):\n \"\"\"Returns the normalization value for each example.\n Args:\n activations: A multi-dimensional tensor with last dimension `num_classes`.\n t: Temperature 2 (< 1.0 for finite support, > 1.0 for tail heaviness).\n num_iters: Number of iterations to run the method.\n Return: A tensor of same rank as activation with the last dimension being 1.\n \"\"\"\n return tf.cond(\n tf.less(t, 1.0),\n functools.partial(compute_normalization_binary_search, activations, t, num_iters),\n functools.partial(compute_normalization_fixed_point, activations, t, num_iters)\n )\n\n\ndef _internal_bi_tempered_logistic_loss(activations, labels, t1, t2):\n \"\"\"Computes the Bi-Tempered logistic loss.\n Args:\n activations: A multi-dimensional tensor with last dimension `num_classes`.\n labels: batch_size\n t1: Temperature 1 (< 1.0 for boundedness).\n t2: Temperature 2 (> 1.0 for tail heaviness).\n Returns:\n A loss tensor for robust loss.\n \"\"\"\n if t2 == 1.0:\n normalization_constants = tf.math.log(\n tf.reduce_sum(\n tf.exp(activations),\n -1,\n keepdims=True\n )\n )\n if t1 == 1.0:\n return normalization_constants + tf.reduce_sum(\n tf.multiply(labels, tf.math.log(labels + 1e-10) - activations),\n -1,\n keepdims=True\n )\n else:\n shifted_activations = tf.exp(activations - normalization_constants)\n one_minus_t2 = 1.0\n else:\n one_minus_t2 = (1.0 - t2)\n normalization_constants = compute_normalization(activations, t2, num_iters=5)\n shifted_activations = tf.nn.relu(1.0 + one_minus_t2 * (activations - normalization_constants))\n\n one_minus_t1 = (1.0 - t1)\n\n if t1 == 1.0:\n return tf.reduce_sum(\n tf.multiply(\n tf.math.log(labels + 1e-10) - tf.math.log(tf.pow(shifted_activations, 1.0 / one_minus_t2)),\n labels\n ),\n -1,\n keepdims=True\n )\n else:\n beta = 1.0 + one_minus_t1\n logt_probs = (tf.pow(shifted_activations, one_minus_t1 / one_minus_t2) - 1.0) / one_minus_t1\n\n return tf.reduce_sum(\n tf.multiply(log_t(labels, t1) - logt_probs, labels)\n - 1.0 / beta * (tf.pow(labels, beta) - tf.pow(shifted_activations, beta / one_minus_t2)),\n -1\n )\n\n\ndef tempered_sigmoid(activations, t, num_iters=5):\n \"\"\"Tempered sigmoid function.\n Args:\n activations: Activations for the positive class for binary classification.\n t: Temperature tensor > 0.0.\n num_iters: Number of iterations to run the method.\n Returns:\n A probabilities tensor.\n \"\"\"\n t = tf.convert_to_tensor(t)\n input_shape = tf.shape(activations)\n activations_2d = tf.reshape(activations, [-1, 1])\n internal_activations = tf.concat([tf.zeros_like(activations_2d), activations_2d], 1)\n normalization_constants = tf.cond(\n # pylint: disable=g-long-lambda\n tf.equal(t, 1.0),\n lambda: tf.math.log(tf.reduce_sum(tf.exp(internal_activations), -1, keepdims=True)),\n functools.partial(compute_normalization, internal_activations, t, num_iters)\n )\n internal_probabilities = exp_t(internal_activations - normalization_constants,\n t)\n one_class_probabilities = tf.split(internal_probabilities, 2, axis=1)[1]\n\n return tf.reshape(one_class_probabilities, input_shape)\n\n\ndef tempered_softmax(activations, t, num_iters=5):\n \"\"\"Tempered softmax function.\n Args:\n activations: A multi-dimensional tensor with last dimension `num_classes`.\n t: Temperature tensor > 0.0.\n num_iters: Number of iterations to run the method.\n Returns:\n A probabilities tensor.\n \"\"\"\n t = tf.convert_to_tensor(t)\n normalization_constants = tf.cond(\n tf.equal(t, 1.0),\n lambda: tf.math.log(tf.reduce_sum(tf.exp(activations), -1, keepdims=True)),\n functools.partial(compute_normalization, activations, t, num_iters)\n )\n\n return exp_t(activations - normalization_constants, t)\n\n\ndef bi_tempered_binary_logistic_loss(labels, activations, t1, t2, label_smoothing=0.0, num_iters=5):\n \"\"\"Bi-Tempered binary logistic loss.\n Args:\n labels: A tensor with shape as activations and int dtype.\n activations: A tensor containing activations for class 1.\n t1: Temperature 1 (< 1.0 for boundedness).\n t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).\n label_smoothing: Label smoothing\n num_iters: Number of iterations to run the method.\n Returns:\n A loss tensor.\n \"\"\"\n with tf.name_scope('binary_bitempered_logistic'):\n activations = tf.convert_to_tensor(activations)\n labels = tf.cast(labels, activations.dtype)\n t1 = tf.convert_to_tensor(t1)\n t2 = tf.convert_to_tensor(t2)\n\n out_shape = tf.shape(labels)\n labels_2d = tf.reshape(labels, [-1, 1])\n activations_2d = tf.reshape(activations, [-1, 1])\n internal_labels = tf.concat([1.0 - labels_2d, labels_2d], 1)\n internal_logits = tf.concat([tf.zeros_like(activations_2d), activations_2d], 1)\n losses = bi_tempered_logistic_loss(internal_labels, internal_logits, t1, t2, label_smoothing, num_iters)\n\n return tf.reshape(losses, out_shape)\n\n\ndef bi_tempered_logistic_loss(labels, activations, t1, t2, label_smoothing=0.0, num_iters=5):\n \"\"\"Bi-Tempered Logistic Loss with custom gradient.\n Args:\n labels: A tensor with shape as activations and int dtype.\n activations: A multi-dimensional tensor with last dimension `num_classes`.\n t1: Temperature 1 (< 1.0 for boundedness).\n t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).\n label_smoothing: Label smoothing parameter between [0, 1).\n num_iters: Number of iterations to run the method.\n Returns:\n A loss tensor.\n \"\"\"\n with tf.name_scope('bitempered_logistic'):\n activations = tf.convert_to_tensor(activations)\n labels = tf.cast(labels, activations.dtype)\n t1 = tf.convert_to_tensor(t1)\n t2 = tf.convert_to_tensor(t2)\n\n if label_smoothing > 0.0:\n num_classes = tf.cast(tf.shape(labels)[-1], tf.float32)\n labels = (1 - num_classes / (num_classes - 1) * label_smoothing) * labels + \\\n label_smoothing / (num_classes - 1)\n\n @tf.custom_gradient\n def _custom_gradient_bi_tempered_logistic_loss(activations):\n \"\"\"Bi-Tempered Logistic Loss with custom gradient.\n Args:\n activations: A multi-dimensional tensor with last dim `num_classes`.\n Returns:\n A loss tensor, grad.\n \"\"\"\n with tf.name_scope('gradient_bitempered_logistic'):\n probabilities = tempered_softmax(activations, t2, num_iters)\n loss_values = tf.multiply(labels, log_t(labels + 1e-10, t1) - log_t(probabilities, t1)) - \\\n 1.0 / (2.0 - t1) * (tf.pow(labels, 2.0 - t1) - tf.pow(probabilities, 2.0 - t1))\n\n def grad(d_loss):\n \"\"\"Explicit gradient calculation.\n Args:\n d_loss: Infinitesimal change in the loss value.\n Returns: Loss gradient.\n \"\"\"\n delta_probs = probabilities - labels\n forget_factor = tf.pow(probabilities, t2 - t1)\n delta_probs_times_forget_factor = tf.multiply(delta_probs, forget_factor)\n delta_forget_sum = tf.reduce_sum(delta_probs_times_forget_factor, -1, keepdims=True)\n escorts = tf.pow(probabilities, t2)\n escorts = escorts / tf.reduce_sum(escorts, -1, keepdims=True)\n derivative = delta_probs_times_forget_factor - tf.multiply(escorts, delta_forget_sum)\n\n return tf.multiply(d_loss, derivative)\n\n return loss_values, grad\n\n loss_values = _custom_gradient_bi_tempered_logistic_loss(activations)\n\n return tf.reduce_sum(loss_values, -1)\n\n\ndef sparse_bi_tempered_logistic_loss(labels, activations, t1, t2, num_iters=5):\n \"\"\"Sparse Bi-Tempered Logistic Loss with custom gradient.\n Args:\n labels: A tensor with dtype of int32.\n activations: A multi-dimensional tensor with last dimension `num_classes`.\n t1: Temperature 1 (< 1.0 for boundedness).\n t2: Temperature 2 (> 1.0 for tail heaviness, < 1.0 for finite support).\n num_iters: Number of iterations to run the method.\n Returns:\n A loss tensor.\n \"\"\"\n with tf.name_scope('sparse_bitempered_logistic'):\n t1 = tf.convert_to_tensor(t1)\n t2 = tf.convert_to_tensor(t2)\n num_classes = tf.shape(activations)[-1]\n\n @tf.custom_gradient\n def _custom_gradient_sparse_bi_tempered_logistic_loss(activations):\n \"\"\"Sparse Bi-Tempered Logistic Loss with custom gradient.\n Args:\n activations: A multi-dimensional tensor with last dim `num_classes`.\n Returns:\n A loss tensor, grad.\n \"\"\"\n with tf.name_scope('gradient_sparse_bitempered_logistic'):\n probabilities = tempered_softmax(activations, t2, num_iters)\n loss_values = -log_t(\n tf.reshape(\n tf.gather_nd(probabilities, tf.where(tf.one_hot(labels, num_classes))),\n tf.shape(activations)[:-1]\n ),\n t1\n ) - 1.0 / (2.0 - t1) * (1.0 - tf.reduce_sum(tf.pow(probabilities, 2.0 - t1), -1))\n\n def grad(d_loss):\n \"\"\"Explicit gradient calculation.\n Args:\n d_loss: Infinitesimal change in the loss value.\n Returns: Loss gradient.\n \"\"\"\n delta_probs = probabilities - tf.one_hot(labels, num_classes)\n forget_factor = tf.pow(probabilities, t2 - t1)\n delta_probs_times_forget_factor = tf.multiply(delta_probs, forget_factor)\n delta_forget_sum = tf.reduce_sum(delta_probs_times_forget_factor, -1, keepdims=True)\n escorts = tf.pow(probabilities, t2)\n escorts = escorts / tf.reduce_sum(escorts, -1, keepdims=True)\n derivative = delta_probs_times_forget_factor - tf.multiply(escorts, delta_forget_sum)\n\n return tf.multiply(d_loss, derivative)\n\n return loss_values, grad\n\n loss_values = _custom_gradient_sparse_bi_tempered_logistic_loss(activations)\n\n return loss_values\n\n\ndef bi_tempered_binary_logistic(y_true, y_pred, t1, t2, label_smoothing=0.0, num_iters=5, from_logits=False):\n if from_logits:\n return bi_tempered_binary_logistic_loss(\n y_true, y_pred, t1=t1, t2=t2, label_smoothing=label_smoothing, num_iters=num_iters)\n\n y_pred = tf.convert_to_tensor(y_pred)\n if not isinstance(y_pred, (ops.EagerTensor, tf.Variable)) \\\n and y_pred.op.type == 'Sigmoid' and not hasattr(y_pred, '_keras_history'):\n # When sigmoid activation function is used for output operation, we\n # use logits from the sigmoid function directly to compute loss in order\n # to prevent collapsing zero when training.\n assert len(y_pred.op.inputs) == 1\n y_pred = y_pred.op.inputs[0]\n else:\n # Otherwise trying to revert sigmoid\n tf.get_logger().warning(\n 'Unable to obtain original logits for bi_tempered_binary_logistic loss. '\n 'Logits will be estimated from probabilities, but this can be numerical unstable.')\n epsilon_ = tf.constant(backend.epsilon(), y_pred.dtype)\n y_pred = tf.clip_by_value(y_pred, epsilon_, 1. - epsilon_)\n y_pred = -tf.math.log((1. - y_pred) / y_pred)\n\n return bi_tempered_binary_logistic_loss(\n y_true, y_pred, t1=t1, t2=t2, label_smoothing=label_smoothing, num_iters=num_iters)\n\n\ndef bi_tempered_logistic(y_true, y_pred, t1, t2, label_smoothing=0.0, num_iters=5, from_logits=False):\n if from_logits:\n return bi_tempered_logistic_loss(\n y_true, y_pred, t1=t1, t2=t2, label_smoothing=label_smoothing, num_iters=num_iters)\n\n y_pred = tf.convert_to_tensor(y_pred)\n if not isinstance(y_pred, (ops.EagerTensor, tf.Variable)) \\\n and y_pred.op.type == 'Softmax' and not hasattr(y_pred, '_keras_history'):\n # When softmax activation function is used for output operation, we\n # use logits from the softmax function directly to compute loss in order\n # to prevent collapsing zero when training.\n # See b/117284466\n assert len(y_pred.op.inputs) == 1\n y_pred = y_pred.op.inputs[0]\n else:\n # Otherwise trying to revert softmax\n tf.get_logger().warning(\n 'Unable to obtain original logits for bi_tempered_logistic loss. '\n 'Logits will be estimated from probabilities, but this can be numerical unstable.')\n epsilon_ = tf.constant(backend.epsilon(), y_pred.dtype)\n y_pred = tf.clip_by_value(y_pred, epsilon_, 1. - epsilon_)\n y_pred = tf.math.log(y_pred)\n y_pred, log_norm = tf.split(y_pred, num_or_size_splits=[-1, 1], axis=-1)\n y_pred = y_pred - log_norm\n\n return bi_tempered_logistic_loss(\n y_true, y_pred, t1=t1, t2=t2, label_smoothing=label_smoothing, num_iters=num_iters)\n\n\ndef sparse_bi_tempered_logistic(y_true, y_pred, t1, t2, num_iters=5, from_logits=False):\n if from_logits:\n return sparse_bi_tempered_logistic_loss(\n y_true, y_pred, t1=t1, t2=t2, num_iters=num_iters)\n\n y_pred = tf.convert_to_tensor(y_pred)\n if not isinstance(y_pred, (ops.EagerTensor, tf.Variable)) \\\n and y_pred.op.type == 'Softmax' and not hasattr(y_pred, '_keras_history'):\n # When softmax activation function is used for output operation, we\n # use logits from the softmax function directly to compute loss in order\n # to prevent collapsing zero when training.\n # See b/117284466\n assert len(y_pred.op.inputs) == 1\n y_pred = y_pred.op.inputs[0]\n else:\n # Otherwise trying to revert softmax\n tf.get_logger().warning(\n 'Unable to obtain original logits for sparse_bi_tempered_logistic loss. '\n 'Logits will be estimated from probabilities, but this can be numerical unstable.')\n epsilon_ = tf.constant(backend.epsilon(), y_pred.dtype)\n y_pred = tf.clip_by_value(y_pred, epsilon_, 1. - epsilon_)\n y_pred = tf.math.log(y_pred)\n\n return sparse_bi_tempered_logistic_loss(\n y_true, y_pred, t1=t1, t2=t2, num_iters=num_iters)\n\n\n@register_keras_serializable(package='Miss')\nclass BiTemperedBinaryLogistic(LossFunctionWrapper):\n \"\"\"Computes Bi-Tempered Binary Logistic Loss.\"\"\"\n\n def __init__(self, t1, t2, label_smoothing=0.0, num_iters=5, from_logits=False,\n reduction=Reduction.AUTO, name='bi_tempered_binary_logistic'):\n super(BiTemperedBinaryLogistic, self).__init__(\n bi_tempered_binary_logistic, name=name, reduction=reduction, from_logits=from_logits,\n t1=t1, t2=t2, label_smoothing=label_smoothing, num_iters=num_iters)\n\n\n@register_keras_serializable(package='Miss')\nclass BiTemperedLogistic(LossFunctionWrapper):\n \"\"\"Computes Bi-Tempered Logistic Loss.\"\"\"\n\n def __init__(self, t1, t2, label_smoothing=0.0, num_iters=5, from_logits=False,\n reduction=Reduction.AUTO, name='bi_tempered_logistic'):\n super(BiTemperedLogistic, self).__init__(\n bi_tempered_logistic, name=name, reduction=reduction, from_logits=from_logits,\n t1=t1, t2=t2, label_smoothing=label_smoothing, num_iters=num_iters)\n\n\n@register_keras_serializable(package='Miss')\nclass SparseBiTemperedLogistic(LossFunctionWrapper):\n \"\"\"Computes Sparse Bi-Tempered Logistic Loss.\"\"\"\n\n def __init__(self, t1, t2, num_iters=5, from_logits=False,\n reduction=Reduction.AUTO, name='sparse_bi_tempered_logistic'):\n super(SparseBiTemperedLogistic, self).__init__(\n sparse_bi_tempered_logistic, name=name, reduction=reduction, from_logits=from_logits,\n t1=t1, t2=t2, num_iters=num_iters)\n"
] | [
[
"numpy.reshape"
],
[
"tensorflow.name_scope",
"tensorflow.python.ops.ragged.ragged_tensor.convert_to_tensor_or_ragged_tensor"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.equal",
"tensorflow.cast",
"tensorflow.while_loop",
"tensorflow.greater",
"tensorflow.name_scope",
"tensorflow.shape",
"tensorflow.less",
"tensorflow.pow",
"tensorflow.exp",
"tensorflow.zeros_like",
"tensorflow.one_hot",
"tensorflow.split",
"tensorflow.clip_by_value",
"tensorflow.nn.relu",
"tensorflow.reduce_max",
"tensorflow.multiply",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.get_logger",
"tensorflow.math.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
abhi12ravi/iwpa | [
"ebe133412b7ef24453e090b6b44d8d78a540c384"
] | [
"scripts/make_predictions.py"
] | [
"import lazypredict\nimport sys\nimport numpy as np\nnp.set_printoptions(threshold=sys.maxsize)\n\n#Read data file\n\nimport pandas as pd\n\nfilepath = \"dataset/trial_1200/balanced_dataset2.csv\"\ndf = pd.read_csv(filepath)\nfeatures = df\n\n# Labels are the values we want to predict\nlabels = np.array(df['protection_level'])\n\n# Remove the labels from the features\nfeatures = features.drop('protection_level', axis = 1)\nfeatures = features.drop('page_title', axis = 1)\n# features = features.drop('page_id', axis=1)\n# features = features.drop('page_id_scrapped', axis=1)\n\n#Convert String to Floats\nfeatures['page_length'] = features['page_length'].astype(float)\nfeatures['edit_count'] = features['total_edits'].astype(float)\nfeatures['page_watchers'] = features['number_page_watchers'].astype(float)\nfeatures['page_watchers_recent_edits'] = features['number_page_watchers_recent_edits'].astype(float)\n\n# Saving feature names for later use\nfeature_list = list(features.columns)\n\n# Convert to numpy array\nfeatures = np.array(features)\n\n#Label encoding for protection_status column\n\n# 0 => unprotected\n# 1 => autoconfirmed\n# 2 => extendedconfirmed\n# 3 => sysop\nlabels_encoded = []\nfor item in labels:\n if(item ==\"unprotected\"):\n labels_encoded.append(0)\n elif(item == \"autoconfirmed\"):\n labels_encoded.append(1)\n elif(item == \"extendedconfirmed\"):\n labels_encoded.append(2)\n elif(item == \"sysop\"):\n labels_encoded.append(3) \n\n# Using Skicit-learn to split data into training and testing sets\nfrom sklearn.model_selection import train_test_split\n# Split the data into training and testing sets\ntrain_features, test_features, train_labels, test_labels = train_test_split(features, labels_encoded, test_size =0.20, random_state = 53)\n\nX_train = train_features\ny_train = train_labels\nX_test = test_features\ny_test = test_labels\n\nprint(X_train)\nfrom lazypredict.Supervised import LazyClassifier\nclf = LazyClassifier(verbose=0,ignore_warnings=True, custom_metric=None)\nmodels,predictions = clf.fit(X_train, X_test, y_train, y_test)\n\nprint(models) "
] | [
[
"numpy.array",
"numpy.set_printoptions",
"pandas.read_csv",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
oz123/python-nvd3 | [
"fd4998549542343b74b82ca72cbcee97845b06ee"
] | [
"examples/lineChartXY.py"
] | [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nExamples for Python-nvd3 is a Python wrapper for NVD3 graph library.\nNVD3 is an attempt to build re-usable charts and chart components\nfor d3.js without taking away the power that d3.js gives you.\n\nProject location : https://github.com/areski/python-nvd3\n\"\"\"\n\nfrom nvd3 import lineChart\nfrom numpy import sin, pi, linspace\n\noutput_file = open('test_lineChartXY.html', 'w')\n\ntype = \"lineChart\"\nchart = lineChart(name=type, x_is_date=False,\n x_axis_format=\".1f\", y_axis_format=\".1f\",\n width=500, height=500,\n show_legend=False)\n\n# lissajous parameters of a/b\na = [1, 3, 5, 3]\nb = [1, 5, 7, 4]\ndelta = pi / 2\nt = linspace(-pi, pi, 300)\n\nfor i in range(0, 4):\n x = sin(a[i] * t + delta)\n y = sin(b[i] * t)\n chart.add_serie(y=y, x=x, name='lissajous-n%d' % i, color='red' if i == 0 else 'black')\n\nchart.buildhtml()\noutput_file.write(chart.htmlcontent)\noutput_file.close()\n"
] | [
[
"numpy.linspace",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
furgerf/GAN-for-dermatologic-imaging | [
"e90b06c46c7693e984a4c5b067e18460113cd23b",
"e90b06c46c7693e984a4c5b067e18460113cd23b"
] | [
"src/perceptual_scores.py",
"src/two_way_evaluation.py"
] | [
"#!/usr/bin/env python\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom scipy.misc import imread\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition.pca import PCA\nfrom tqdm import tqdm\n\nfrom utils import (kernel_classifier_distance_and_std_from_activations,\n load_image_names)\n\n\nclass PerceptualScores:\n EXTRACTOR_NAMES = [\"MobileNetV2\", \"ResNet50\", \"VGG16\", \"VGG19\"]\n\n def __init__(self, config):\n # pylint: disable=no-else-raise\n self._config = config\n self._real_activations = None\n if self._config.extractor_name == \"MobileNetV2\":\n from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2\n from tensorflow.keras.applications.mobilenet_v2 import preprocess_input\n model = MobileNetV2(include_top=False, weights=\"imagenet\", alpha=1.4)\n self._preprocess = preprocess_input\n raise NotImplementedError(\"Need to update blocks...\")\n elif self._config.extractor_name == \"ResNet50\":\n from tensorflow.keras.applications.resnet50 import ResNet50\n from tensorflow.keras.applications.resnet50 import preprocess_input\n model = ResNet50(include_top=False, weights=\"imagenet\")\n self._preprocess = preprocess_input\n raise NotImplementedError(\"Need to update blocks...\")\n elif self._config.extractor_name == \"VGG16\":\n from tensorflow.keras.applications.vgg16 import VGG16\n from tensorflow.keras.applications.vgg16 import preprocess_input\n model = VGG16(include_top=False, weights=\"imagenet\")\n self._preprocess = preprocess_input\n raise NotImplementedError(\"Need to update blocks...\")\n elif self._config.extractor_name == \"VGG19\":\n from tensorflow.keras.applications.vgg19 import VGG19\n from tensorflow.keras.applications.vgg19 import preprocess_input\n model = VGG19(include_top=False, weights=\"imagenet\")\n self._extractor = Model(inputs=model.input, outputs=\n [model.get_layer(\"block{}_pool\".format(i)).output for i in range(1, 6)])\n self._preprocess = preprocess_input\n else:\n raise ValueError(\"Unknown feature extractor '{}'\".format(self._config.extractor_name))\n self._pca = None\n self._high_dimensional_kmeans = None\n self._low_dimensional_kmeans = None\n\n def _get_activations_from_images(self, all_image_names):\n activations = []\n data = tf.data.Dataset.from_tensor_slices(all_image_names).batch(self._config.batch_size)\n\n tf.logging.info(\"Computing activations for {} images\".format(len(all_image_names)))\n for image_names in tqdm(data, total=len(all_image_names) // self._config.batch_size + 1):\n images = [imread(image_name.numpy().decode(\"utf-8\"), mode=\"RGB\") for image_name in image_names]\n batch = tf.cast(tf.stack(images), dtype=tf.float32)\n activations.append([tf.reduce_mean(features, axis=[1, 2]) for features in self._extractor(self._preprocess(batch))])\n return [tf.concat([act[i] for act in activations], axis=0) for i in range(len(activations[0]))]\n\n def _get_activations_from_generator(self, generator, data_set):\n activations = []\n tf.logging.debug(\"Computing activations for newly-generated samples\")\n for batch in data_set:\n samples = tf.cast(tf.cast((generator(batch)+1) * 127.5, dtype=tf.int32), dtype=tf.float32) # denormalize to normal RGB\n activations.append([tf.reduce_mean(features, axis=[1, 2]) for features in self._extractor(self._preprocess(samples))])\n return [tf.concat([act[i] for act in activations], axis=0) for i in range(len(activations[0]))]\n\n def initialize(self, override_data_dir=None):\n assert self._real_activations is None\n\n data_dir = override_data_dir if override_data_dir else \\\n (self._config.target_data_dir if self._config.target_data_dir else self._config.data_dir)\n activations_file = os.path.join(\"data\", data_dir, \"activations_{}.npz\".format(self._config.extractor_name))\n if os.path.exists(activations_file):\n tf.logging.info(\"Loading activations from {}\".format(activations_file))\n with np.load(activations_file) as activations:\n self._real_activations = [tf.convert_to_tensor(activations[f]) for f in sorted(activations.files)]\n else:\n tf.logging.warning(\"Computing activations for real images in '{}'\".format(data_dir))\n self._real_activations = self._get_activations_from_images(load_image_names(data_dir))\n tf.logging.info(\"Saving activations to {}\".format(activations_file))\n np.savez(activations_file, **{\"block_{}\".format(i): act.numpy() for i, act in enumerate(self._real_activations)})\n\n tf.logging.debug(\"Fitting PCA\")\n self._pca = PCA(n_components=2)\n low_dimensional_real_activations = self._pca.fit_transform(self._real_activations[-1])\n tf.logging.debug(\"Explained variance: {} ({:.5f})\".format(\n self._pca.explained_variance_ratio_, np.sum(self._pca.explained_variance_ratio_)))\n\n high_dimensional_clusters = 7\n tf.logging.debug(\"Clustering high-dimensional activations with {} clusters\".format(high_dimensional_clusters))\n self._high_dimensional_kmeans = KMeans(n_clusters=high_dimensional_clusters)\n self._high_dimensional_kmeans.fit(self._real_activations[-1])\n tf.logging.debug(\"Inertia: {:.1f}\".format(self._high_dimensional_kmeans.inertia_))\n\n low_dimensional_clusters = 4\n tf.logging.debug(\"Clustering low-dimensional activations with {} clusters\".format(low_dimensional_clusters))\n self._low_dimensional_kmeans = KMeans(n_clusters=low_dimensional_clusters)\n self._low_dimensional_kmeans.fit(low_dimensional_real_activations)\n tf.logging.debug(\"Inertia: {:.1f}\".format(self._low_dimensional_kmeans.inertia_))\n\n def _compute_scores_from_activations(self, generated_activations):\n fid = tf.contrib.gan.eval.frechet_classifier_distance_from_activations(self._real_activations[-1], generated_activations[-1])\n mmd, _ = kernel_classifier_distance_and_std_from_activations(self._real_activations[-1], generated_activations[-1])\n low_level_fids = [\n tf.contrib.gan.eval.frechet_classifier_distance_from_activations(self._real_activations[i], generated_activations[i]) \\\n for i in range(len(self._real_activations)-1)]\n combined_fid = tf.contrib.gan.eval.frechet_classifier_distance_from_activations(\n tf.concat(self._real_activations, axis=-1), tf.concat(generated_activations, axis=-1))\n\n # high_dimensional_cluster_distances = tf.reduce_min(self._high_dimensional_kmeans.transform(generated_activations), axis=-1)\n # low_dimensional_cluster_distances = tf.reduce_min(self._low_dimensional_kmeans.transform(self._pca.transform(generated_activations)), axis=-1)\n # mean_std = lambda d: (tf.reduce_mean(d), tf.convert_to_tensor(np.std(d)))\n # return fid, k_mmd, mean_std(high_dimensional_cluster_distances), mean_std(low_dimensional_cluster_distances)\n\n return fid, mmd, -self._high_dimensional_kmeans.score(generated_activations[-1]), \\\n -self._low_dimensional_kmeans.score(self._pca.transform(generated_activations[-1])), low_level_fids, combined_fid\n\n def compute_scores_from_samples(self):\n assert os.path.exists(self._config.samples_dir)\n all_image_names = [os.path.join(self._config.samples_dir, sample) for sample in \\\n sorted(os.listdir(self._config.samples_dir)) if sample.endswith(\".png\")]\n\n activations_file = os.path.join(self._config.samples_dir, \"activations_{}.npz\".format(self._config.extractor_name))\n if os.path.exists(activations_file):\n tf.logging.info(\"Loading activations from {}\".format(activations_file))\n generated_activations = tf.convert_to_tensor(np.load(activations_file))\n else:\n tf.logging.warning(\"Computing activations for generated images in '{}'\".format(self._config.samples_dir))\n generated_activations = self._get_activations_from_images(all_image_names)\n tf.logging.info(\"Saving activations to {}\".format(activations_file))\n np.savez(activations_file, **{\"block_{}\".format(i): act.numpy() for i, act in enumerate(self._real_activations)})\n\n tf.logging.info(\"Computing scores\")\n return self._compute_scores_from_activations(generated_activations)\n\n def compute_scores_from_generator(self, generator, data_set):\n generated_activations = self._get_activations_from_generator(generator, data_set)\n\n tf.logging.debug(\"Computing scores\")\n return self._compute_scores_from_activations(generated_activations)\n",
"#!/usr/bin/env python\n\nimport os\nimport pickle\nimport time\nfrom abc import abstractmethod\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom evaluation import Evaluation\nfrom generator_loss_args import GeneratorLossArgs\nfrom perceptual_scores import PerceptualScores\nfrom two_way_metrics import TwoWayMetrics\nfrom utils import get_memory_usage_string, logistic\n\n\nclass TwoWayEvaluation(Evaluation):\n def __init__(self, model, config):\n assert not config.has_noise_input, \"we don't want to translate back to noise\"\n self._first_generator = None\n self._first_generator_optimizer = None\n self._first_discriminator = None\n self._first_discriminator_optimizer = None\n self._second_generator = None\n self._second_generator_optimizer = None\n self._second_discriminator = None\n self._second_discriminator_optimizer = None\n self._checkpoint = None\n self._final_checkpoint = None\n self._perceptual_scores = PerceptualScores(config) if config.target_type == \"image\" else None\n self._reverse_perceptual_scores = PerceptualScores(config) if config.input_type == \"image\" else None\n super(TwoWayEvaluation, self).__init__(model, config)\n\n def set_up_model(self):\n tf.logging.info(\"Setting up models with learing rate {} for G, {} for D\".format(\n self._model.gen_learning, self._model.disc_learning))\n self._first_generator = self._model.get_generator()\n self._first_discriminator = self._model.get_discriminator()\n has_colored_target = self._config.has_colored_target\n self._config.has_colored_target = self._config.has_colored_input\n self._second_generator = self._model.get_generator()\n self._config.has_colored_target = has_colored_target\n self._second_discriminator = self._model.get_discriminator()\n # defun gives 10 secs/epoch performance boost\n self._first_generator.call = tf.contrib.eager.defun(self._first_generator.call)\n self._first_discriminator.call = tf.contrib.eager.defun(self._first_discriminator.call)\n self._second_generator.call = tf.contrib.eager.defun(self._second_generator.call)\n self._second_discriminator.call = tf.contrib.eager.defun(self._second_discriminator.call)\n\n self._first_generator_optimizer = tf.train.AdamOptimizer(self._model.gen_learning)\n self._first_discriminator_optimizer = tf.train.AdamOptimizer(self._model.disc_learning)\n self._second_generator_optimizer = tf.train.AdamOptimizer(self._model.gen_learning)\n self._second_discriminator_optimizer = tf.train.AdamOptimizer(self._model.disc_learning)\n checkpoint = tf.train.Checkpoint(\n first_generator_optimizer=self._first_generator_optimizer,\n first_discriminator_optimizer=self._first_discriminator_optimizer,\n first_generator=self._first_generator,\n first_discriminator=self._first_discriminator,\n second_generator_optimizer=self._second_generator_optimizer,\n second_discriminator_optimizer=self._second_discriminator_optimizer,\n second_generator=self._second_generator,\n second_discriminator=self._second_discriminator)\n self._checkpoint = tf.contrib.checkpoint.CheckpointManager(checkpoint, self._config.checkpoint_dir,\n max_to_keep=None if self._config.keep_all_checkpoints else 5)\n if self._config.keep_final_checkpoints:\n final_checkpoint = tf.train.Checkpoint(\n first_generator_optimizer=self._first_generator_optimizer,\n first_discriminator_optimizer=self._first_discriminator_optimizer,\n first_generator=self._first_generator,\n first_discriminator=self._first_discriminator,\n second_generator_optimizer=self._second_generator_optimizer,\n second_discriminator_optimizer=self._second_discriminator_optimizer,\n second_generator=self._second_generator,\n second_discriminator=self._second_discriminator)\n self._final_checkpoint = tf.contrib.checkpoint.CheckpointManager(final_checkpoint, self._config.final_checkpoint_dir,\n max_to_keep=None if self._config.keep_all_checkpoints else 5)\n\n try:\n self._model.print_model_summary(self._first_generator, self._second_discriminator, self.epoch_sample_input)\n except Exception as ex:\n tf.logging.warning(\"Unable to print model summary ({}: {})\".format(ex.__class__.__name__, ex))\n\n if self._perceptual_scores:\n self._perceptual_scores.initialize()\n if self._reverse_perceptual_scores:\n self._reverse_perceptual_scores.initialize(self._config.data_dir)\n\n @property\n @abstractmethod\n def data_set(self):\n \"\"\"\n The data set to train on. Each batch should consist of a tuple of generator inputs in the first\n and in the second domain.\n \"\"\"\n pass\n\n @property\n @abstractmethod\n def extra_discriminator_data_set(self):\n \"\"\"\n The data set of additional real samples for the SECOND discriminator to train on.\n Only makes sense for non-conditioned discriminators.\n \"\"\"\n pass\n\n @property\n @abstractmethod\n def test_data_set(self):\n \"\"\"\n The data set to train on. Each batch should consist of a tuple of generator inputs in the first\n and in the second domain - same as the main data set.\n \"\"\"\n pass\n\n @property\n @abstractmethod\n def epoch_sample_input(self):\n \"\"\"\n Generator input for the generation of epoch samples.\n \"\"\"\n pass\n\n class TrainingResult:\n def __init__(self, gen_loss, gen_losses, disc_loss, disc_on_real, disc_on_generated,\n gen_gradients, disc_gradients):\n # pylint: disable=too-many-arguments\n self.gen_loss = gen_loss\n self.gen_losses = gen_losses\n self.disc_loss = disc_loss\n\n self.disc_on_real = disc_on_real\n self.disc_on_generated = disc_on_generated\n\n self.gen_gradients = gen_gradients\n self.disc_gradients = disc_gradients\n\n def train_generator_discriminator_pair(self, generator, discriminator, reverse_generator, batch_input, batch_target):\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n # FORWARD DIRECTION\n generated_images = generator(batch_input, training=True)\n\n if batch_input.shape[-1] == 4:\n assert batch_target.shape[-1] == 4\n disc_input_real = tf.concat([batch_input, batch_target[:, :, :, :3]], axis=-1) \\\n if self._config.conditioned_discriminator else batch_target[:, :, :, :3]\n disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \\\n if self._config.conditioned_discriminator else generated_images\n else:\n assert batch_target.shape[-1] in [1, 3] and batch_input.shape[-1] in [1, 3]\n disc_input_real = tf.concat([batch_input, batch_target], axis=-1) \\\n if self._config.conditioned_discriminator else batch_target\n disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \\\n if self._config.conditioned_discriminator else generated_images\n\n disc_on_real = discriminator(disc_input_real, training=True)\n disc_on_generated = discriminator(disc_input_generated, training=True)\n\n # BACKWARD DIRECTION - not training or discriminating reconstructed image\n # the input for the reconstruction may need to be augmented with the segmentation\n reconstruction_input = tf.concat([generated_images, batch_input[:, :, :, 3:]], axis=-1) if batch_input.shape[-1] > 3 else generated_images\n reconstructed_images = reverse_generator(reconstruction_input, training=False)\n\n if self._config.loss_identity:\n targets = batch_target\n identity_images = generator(batch_target, training=True)\n else:\n targets = None\n identity_images = None\n\n gen_losses = self._model.gen_loss(disc_on_generated, GeneratorLossArgs(generated_images, batch_input,\n targets=targets, reconstructed_images=reconstructed_images, identity_images=identity_images))\n gen_loss = sum(gen_losses.values())\n disc_loss = self._model.disc_loss(disc_on_real, disc_on_generated)\n\n # compute gradients\n gradients_of_generator = gen_tape.gradient(gen_loss, generator.variables)\n gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.variables)\n\n return TwoWayEvaluation.TrainingResult(gen_loss, gen_losses, disc_loss, disc_on_real, disc_on_generated,\n gradients_of_generator, gradients_of_discriminator)\n\n def train_discriminator(self, generator, discriminator, batch_input, batch_target):\n with tf.GradientTape() as disc_tape:\n if generator:\n generated_images = generator(batch_input, training=True)\n\n if batch_input.shape[-1] == 4:\n assert batch_target.shape[-1] == 4\n disc_input_real = tf.concat([batch_input, batch_target[:, :, :, :3]], axis=-1) \\\n if self._config.conditioned_discriminator else batch_target[:, :, :, :3]\n if generator:\n disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \\\n if self._config.conditioned_discriminator else generated_images\n else:\n assert batch_target.shape[-1] == 3 and batch_input.shape[-1] == 3\n disc_input_real = tf.concat([batch_input, batch_target], axis=-1) \\\n if self._config.conditioned_discriminator else batch_target\n if generator:\n disc_input_generated = tf.concat([batch_input, generated_images], axis=-1) \\\n if self._config.conditioned_discriminator else generated_images\n\n disc_on_real = discriminator(disc_input_real, training=True)\n if generator:\n disc_on_generated = discriminator(disc_input_generated, training=True)\n else:\n disc_on_generated = None\n\n disc_loss = self._model.disc_loss(disc_on_real, disc_on_generated)\n\n gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.variables)\n\n return TwoWayEvaluation.TrainingResult(None, None, disc_loss, disc_on_real, disc_on_generated,\n None, gradients_of_discriminator)\n\n def train(self, epochs, metrics_writer):\n # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n try:\n tf.logging.info(\"Memory usage before training: {}\".format(get_memory_usage_string()))\n except: # pylint: disable=bare-except\n tf.logging.warning(\"Unable to get memory usage, no GPU available?\")\n\n assert not self._config.train_disc_on_previous_images \\\n and not self._config.real_image_noise_stdev, \"not implemented\"\n\n checkpoint_interval = 25 # always have same interval for easier epoch number -> checkpoint-number conversion\n gradients_interval = epochs // 5 // 25 * 25 # aim for at least 5 gradients in total but have it a multiple of 25\n gradients_interval = 25 if gradients_interval == 0 else min(gradients_interval, 150)\n if self._config.scores_every_epoch:\n scores_interval = 1\n else:\n scores_interval = epochs // 10 // 10 * 10 # aim for at least 10 percentual scores in total but have it a multiple of 10\n scores_interval = 10 if scores_interval == 0 else min(scores_interval, 25)\n tf.logging.info(\"Intervals: checkpoint {}, scores {}, gradients {}\".format(checkpoint_interval, scores_interval, gradients_interval))\n for epoch in range(epochs):\n start = time.time()\n\n metrics = TwoWayMetrics(epoch+1, 4)\n\n # NOTE, KEEP IN MIND: \"first\"/\"second\" refers to the input domain\n # ie \"first generator\" is the generator that receives input from the first domain (and generates for the second)\n\n if self._config.extra_disc_step_real or self._config.extra_disc_step_both:\n for batch_number, batch in enumerate(self.data_set):\n batch_first_domain, batch_second_domain = batch\n\n forward_result = self.train_discriminator(self._first_generator if self._config.extra_disc_step_both else None,\n self._second_discriminator, batch_first_domain, batch_second_domain)\n backward_result = self.train_discriminator(self._second_generator if self._config.extra_disc_step_both else None,\n self._first_discriminator, batch_second_domain, batch_first_domain)\n\n self._second_discriminator_optimizer.apply_gradients(zip(forward_result.disc_gradients, self._second_discriminator.variables))\n self._first_discriminator_optimizer.apply_gradients(zip(backward_result.disc_gradients, self._first_discriminator.variables))\n\n for batch_number, batch in enumerate(self.data_set):\n batch_first_domain, batch_second_domain = batch\n\n # evaluate models\n forward_result = self.train_generator_discriminator_pair(self._first_generator, self._second_discriminator,\n self._second_generator, batch_first_domain, batch_second_domain)\n backward_result = self.train_generator_discriminator_pair(self._second_generator, self._first_discriminator,\n self._first_generator, batch_second_domain, batch_first_domain)\n\n # store results\n metrics.add_losses((forward_result.gen_losses, backward_result.gen_losses), (backward_result.disc_loss, forward_result.disc_loss))\n metrics.add_discriminations((logistic(backward_result.disc_on_real), logistic(forward_result.disc_on_real)),\n (logistic(backward_result.disc_on_generated), logistic(forward_result.disc_on_generated)))\n\n # train\n self._first_generator_optimizer.apply_gradients(zip(forward_result.gen_gradients, self._first_generator.variables))\n self._second_discriminator_optimizer.apply_gradients(zip(forward_result.disc_gradients, self._second_discriminator.variables))\n self._second_generator_optimizer.apply_gradients(zip(backward_result.gen_gradients, self._second_generator.variables))\n self._first_discriminator_optimizer.apply_gradients(zip(backward_result.disc_gradients, self._first_discriminator.variables))\n\n if batch_number == 0:\n # work with the gradients of the first (rather than last) batch since here, the batch is full for sure\n for i, variable in enumerate(self._first_generator.variables):\n if \"batch_normalization\" in variable.name or forward_result.gen_gradients[i] is None:\n continue\n tf.contrib.summary.histogram(variable.name.replace(\":\", \"_\"), forward_result.gen_gradients[i], \"gradients/first_gen\", epoch)\n for i, variable in enumerate(self._first_discriminator.variables):\n if \"batch_normalization\" in variable.name or backward_result.disc_gradients[i] is None:\n continue\n tf.contrib.summary.histogram(variable.name.replace(\":\", \"_\"), backward_result.disc_gradients[i], \"gradients/first_disc\", epoch)\n for i, variable in enumerate(self._second_generator.variables):\n if \"batch_normalization\" in variable.name or backward_result.gen_gradients[i] is None:\n continue\n tf.contrib.summary.histogram(variable.name.replace(\":\", \"_\"), backward_result.gen_gradients[i], \"gradients/second_gen\", epoch)\n for i, variable in enumerate(self._second_discriminator.variables):\n if \"batch_normalization\" in variable.name or forward_result.disc_gradients[i] is None:\n continue\n tf.contrib.summary.histogram(variable.name.replace(\":\", \"_\"), forward_result.disc_gradients[i], \"gradients/second_disc\", epoch)\n\n if (epoch+1) % gradients_interval == 0 or epoch == epochs - 1:\n first_generator_gradients = [(variable.name, forward_result.gen_gradients[i].numpy()) \\\n for i, variable in enumerate(self._first_generator.variables) if \"batch_normalization\" not in variable.name]\n first_discriminator_gradients = [(variable.name, backward_result.disc_gradients[i].numpy()) \\\n for i, variable in enumerate(self._first_discriminator.variables) if \"batch_normalization\" not in variable.name]\n second_generator_gradients = [(variable.name, backward_result.gen_gradients[i].numpy()) \\\n for i, variable in enumerate(self._second_generator.variables) if \"batch_normalization\" not in variable.name]\n second_discriminator_gradients = [(variable.name, forward_result.disc_gradients[i].numpy()) \\\n for i, variable in enumerate(self._second_discriminator.variables) if \"batch_normalization\" not in variable.name]\n with open(os.path.join(self._config.gradients_dir, \"gradients_at_epoch_{:04d}.pkl\".format(epoch+1)), \"wb\") as fh:\n pickle.dump((first_generator_gradients, forward_result.gen_loss, second_generator_gradients, backward_result.gen_loss,\n first_discriminator_gradients, backward_result.disc_loss, second_discriminator_gradients, forward_result.disc_loss), fh)\n\n if self.extra_discriminator_data_set:\n assert not self._config.conditioned_discriminator and \\\n not self._config.train_disc_on_previous_images and \\\n not self._config.real_image_noise_stdev, \"not implemented\"\n\n for disc_input in self.extra_discriminator_data_set:\n with tf.GradientTape() as disc_tape:\n disc_on_real = self._second_discriminator(disc_input, training=True)\n disc_loss = self._model.disc_loss(disc_on_real, []) # no generated samples\n\n gradients_of_discriminator = disc_tape.gradient(disc_loss, self._second_discriminator.variables)\n self._second_discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, self._second_discriminator.variables))\n\n _ = self.save_epoch_samples((self._first_generator, self._second_generator),\n (self._first_discriminator, self._second_discriminator), epoch+1, (epoch+1) % 5 == 0)\n\n tf.contrib.summary.histogram(\"first_gen\", metrics.first_gen_loss, \"loss\", epoch)\n tf.contrib.summary.histogram(\"first_disc\", metrics.first_disc_loss, \"loss\", epoch)\n tf.contrib.summary.histogram(\"second_gen\", metrics.second_gen_loss, \"loss\", epoch)\n tf.contrib.summary.histogram(\"second_disc\", metrics.second_disc_loss, \"loss\", epoch)\n\n tf.contrib.summary.histogram(\"first_on_real\", metrics.first_disc_on_real, \"predictions\", epoch)\n tf.contrib.summary.histogram(\"first_on_gen\", metrics.first_disc_on_generated, \"predictions\", epoch)\n tf.contrib.summary.histogram(\"second_on_real\", metrics.second_disc_on_real, \"predictions\", epoch)\n tf.contrib.summary.histogram(\"second_on_gen\", metrics.second_disc_on_generated, \"predictions\", epoch)\n\n if (epoch+1) % checkpoint_interval == 0 or epoch == epochs - 1:\n self._checkpoint.save()\n elif epoch > epochs - 6 and self._final_checkpoint:\n self._final_checkpoint.save()\n\n if epoch == 0 or epoch == 4 or (epoch+1) % 10 == 0:\n memory_usage = \"\"\n if epoch == 0 or epoch == 4 or (epoch+1) % 50 == 0:\n try:\n memory_usage = \" - memory: \" + get_memory_usage_string()\n except: # pylint: disable=bare-except\n memory_usage = \" - Unable to get memory usage, no GPU available?\"\n time_remaining = (time.time()-self._config.start_time)/(epoch+1)*(epochs-epoch-1)/60\n tf.logging.info(\"{}/{}: Round time: {:.1f}m - ETA {:%H:%M} ({:.1f}h){}\".format(epoch + 1, epochs,\n (time.time()-start)/60, datetime.now() + timedelta(minutes=time_remaining), time_remaining/60,\n memory_usage))\n\n tf.logging.info(\"{}/{} FWD: Loss G {:.2f}+-{:.3f}, D {:.2f}+-{:.3f}; D on real {:.3f}+-{:.3f}, on fake {:.3f}+-{:.3f}\".format(\n epoch + 1, epochs,\n np.mean(metrics.first_gen_loss), np.std(metrics.first_gen_loss),\n np.mean(metrics.second_disc_loss), np.std(metrics.second_disc_loss),\n np.mean(metrics.second_disc_on_real), np.std(metrics.second_disc_on_real),\n np.mean(metrics.second_disc_on_generated), np.std(metrics.second_disc_on_generated)))\n tf.logging.info(\"{}/{} BWD: Loss G {:.2f}+-{:.3f}, D {:.2f}+-{:.3f}; D on real {:.3f}+-{:.3f}, on fake {:.3f}+-{:.3f}\".format(\n epoch + 1, epochs,\n np.mean(metrics.second_gen_loss), np.std(metrics.second_gen_loss),\n np.mean(metrics.first_disc_loss), np.std(metrics.first_disc_loss),\n np.mean(metrics.first_disc_on_real), np.std(metrics.first_disc_on_real),\n np.mean(metrics.first_disc_on_generated), np.std(metrics.first_disc_on_generated)))\n\n is_near_interval = \\\n (epoch+0) % scores_interval == 0 or \\\n (epoch+1) % scores_interval == 0 or \\\n (epoch+2) % scores_interval == 0\n if (epoch > 1 or self._config.scores_every_epoch) and (epoch > epochs - 6 or is_near_interval):\n first_fid = first_mmd = first_clustering_high = first_clustering_low = first_combined_fid = tf.convert_to_tensor(np.nan)\n first_low_level_fids = [tf.convert_to_tensor(np.nan)] * metrics.n_low_level_fids\n second_fid = second_mmd = second_clustering_high = second_clustering_low = second_combined_fid = tf.convert_to_tensor(np.nan)\n second_low_level_fids = [tf.convert_to_tensor(np.nan)] * metrics.n_low_level_fids\n if self._perceptual_scores:\n first_fid, first_mmd, first_clustering_high, first_clustering_low, first_low_level_fids, first_combined_fid = \\\n self._perceptual_scores.compute_scores_from_generator(self._first_generator, self.data_set.map(lambda x, y: x))\n tf.logging.warning(\"{}/{}: FWD: Computed perceptual scores: FID={:.1f}, MMD={:.3f}, clustering-high={:.3f}, clustering-low={:.3f}\".format(\n epoch + 1, epochs, first_fid, first_mmd, first_clustering_high, first_clustering_low))\n if self._reverse_perceptual_scores:\n second_fid, second_mmd, second_clustering_high, second_clustering_low, second_low_level_fids, second_combined_fid = \\\n self._reverse_perceptual_scores.compute_scores_from_generator(self._second_generator, self.data_set.map(lambda x, y: y))\n tf.logging.warning(\"{}/{}: BWD: Computed perceptual scores: FID={:.1f}, MMD={:.3f}, clustering-high={:.3f}, clustering-low={:.3f}\".format(\n epoch + 1, epochs, second_fid, second_mmd, second_clustering_high, second_clustering_low))\n metrics.add_perceptual_scores((first_fid, second_fid), (first_mmd, second_mmd), (first_clustering_high, second_clustering_high),\n (first_clustering_low, second_clustering_low), (first_low_level_fids, second_low_level_fids), (first_combined_fid, second_combined_fid))\n tf.contrib.summary.scalar(\"first_fid\", first_fid, \"perceptual\", epoch)\n tf.contrib.summary.scalar(\"first_mmd\", first_mmd, \"perceptual\", epoch)\n tf.contrib.summary.scalar(\"first_clustering_high\", first_clustering_high, \"perceptual\", epoch)\n tf.contrib.summary.scalar(\"first_clustering_low\", first_clustering_low, \"perceptual\", epoch)\n # not adding low-level FIDs to TB since I'm not using it anyway\n tf.contrib.summary.scalar(\"first_combined_fid\", first_combined_fid, \"perceptual\", epoch)\n tf.contrib.summary.scalar(\"second_fid\", second_fid, \"perceptual\", epoch)\n tf.contrib.summary.scalar(\"second_mmd\", second_mmd, \"perceptual\", epoch)\n tf.contrib.summary.scalar(\"second_clustering_high\", second_clustering_high, \"perceptual\", epoch)\n tf.contrib.summary.scalar(\"second_clustering_low\", second_clustering_low, \"perceptual\", epoch)\n # not adding low-level FIDs to TB since I'm not using it anyway\n tf.contrib.summary.scalar(\"second_combined_fid\", second_combined_fid, \"perceptual\", epoch)\n\n if self.test_data_set and (epoch > 1 or self._config.scores_every_epoch) and (epoch > epochs - 6 or is_near_interval):\n first_disc_on_training = self._discriminate_data_set(self._first_discriminator, self.data_set.map(lambda x, y: (y, x[:, :, :, :3])))\n first_disc_on_training_mean = np.mean(first_disc_on_training)\n first_disc_on_training_std = np.std(first_disc_on_training)\n first_disc_on_test = self._discriminate_data_set(self._first_discriminator, self.test_data_set.map(lambda x, y: (y, x[:, :, :, :3])))\n first_disc_on_test_mean = np.mean(first_disc_on_test)\n first_disc_on_test_std = np.std(first_disc_on_test)\n second_disc_on_training = self._discriminate_data_set(self._second_discriminator, self.data_set.map(lambda x, y: (x, y[:, :, :, :3])))\n second_disc_on_training_mean = np.mean(second_disc_on_training)\n second_disc_on_training_std = np.std(second_disc_on_training)\n second_disc_on_test = self._discriminate_data_set(self._second_discriminator, self.test_data_set.map(lambda x, y: (x, y[:, :, :, :3])))\n second_disc_on_test_mean = np.mean(second_disc_on_test)\n second_disc_on_test_std = np.std(second_disc_on_test)\n tf.logging.warning(\"{}/{}: First disc on training: {:.3f}+-{:.3f}, on test: {:.3f}+-{:.3f}, diff: {:.3f}\".format(\n epoch + 1, epochs, first_disc_on_training_mean, first_disc_on_training_std, first_disc_on_test_mean, first_disc_on_test_std,\n first_disc_on_training_mean - first_disc_on_test_mean))\n tf.logging.warning(\"{}/{}: Second disc on training: {:.3f}+-{:.3f}, on test: {:.3f}+-{:.3f}, diff: {:.3f}\".format(\n epoch + 1, epochs, second_disc_on_training_mean, second_disc_on_training_std, second_disc_on_test_mean, second_disc_on_test_std,\n second_disc_on_training_mean - second_disc_on_test_mean))\n metrics.add_disc_on_training_test((first_disc_on_training_mean, second_disc_on_training_mean), (first_disc_on_training_std,\n second_disc_on_training_std), (first_disc_on_test_mean, second_disc_on_test_mean), (first_disc_on_test_std, second_disc_on_test_std))\n tf.contrib.summary.scalar(\"first_disc_on_training_mean\", first_disc_on_training_mean, \"disc_overfitting\", epoch)\n tf.contrib.summary.scalar(\"first_disc_on_training_std\", first_disc_on_training_std, \"disc_overfitting\", epoch)\n tf.contrib.summary.scalar(\"first_disc_on_test_mean\", first_disc_on_test_mean, \"disc_overfitting\", epoch)\n tf.contrib.summary.scalar(\"first_disc_on_test_std\", first_disc_on_test_std, \"disc_overfitting\", epoch)\n tf.contrib.summary.scalar(\"second_disc_on_training_mean\", second_disc_on_training_mean, \"disc_overfitting\", epoch)\n tf.contrib.summary.scalar(\"second_disc_on_training_std\", second_disc_on_training_std, \"disc_overfitting\", epoch)\n tf.contrib.summary.scalar(\"second_disc_on_test_mean\", second_disc_on_test_mean, \"disc_overfitting\", epoch)\n tf.contrib.summary.scalar(\"second_disc_on_test_std\", second_disc_on_test_std, \"disc_overfitting\", epoch)\n\n metrics_writer.writerow(metrics.get_row_data())\n\n @abstractmethod\n def _plot_epoch_samples(self, generator, discriminator):\n pass\n\n @abstractmethod\n def _plot_hq_epoch_samples(self, generated_samples, discriminator_probabilities):\n pass\n"
] | [
[
"tensorflow.contrib.gan.eval.frechet_classifier_distance_from_activations",
"tensorflow.keras.applications.resnet50.ResNet50",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"sklearn.cluster.KMeans",
"sklearn.decomposition.pca.PCA",
"tensorflow.reduce_mean",
"tensorflow.logging.debug",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.stack",
"tensorflow.keras.applications.mobilenet_v2.MobileNetV2",
"tensorflow.keras.applications.vgg16.VGG16",
"tensorflow.keras.applications.vgg19.VGG19",
"tensorflow.logging.info",
"numpy.load",
"numpy.sum"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.logging.warning",
"tensorflow.concat",
"tensorflow.train.Checkpoint",
"tensorflow.contrib.checkpoint.CheckpointManager",
"tensorflow.contrib.summary.histogram",
"numpy.std",
"tensorflow.contrib.summary.scalar",
"numpy.mean",
"tensorflow.train.AdamOptimizer",
"tensorflow.contrib.eager.defun",
"tensorflow.GradientTape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
cloudspectatordevelopment/cudamat | [
"d26cf019a7855077b7d4344ae1a3202a156c5170"
] | [
"test/test_cudamat.py"
] | [
"import numpy as np\nimport nose\nimport cudamat as cm\n\ndef setup():\n cm.cublas_init()\n\ndef teardown():\n cm.cublas_shutdown()\n\ndef test_reshape():\n m = 256\n n = 1\n cm1 = np.array(np.random.rand(n, m)*10, dtype=np.float32, order='F')\n cm2 = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n gm1 = cm.CUDAMatrix(cm1)\n gm2 = cm.CUDAMatrix(cm2)\n\n gm1.reshape((m, n))\n gm2.assign(gm1)\n gm1.reshape((n, m))\n\n gm1.copy_to_host()\n gm2.copy_to_host()\n\n assert np.max(np.abs(gm1.numpy_array - gm2.numpy_array.T)) < 10**-2, \"Error in CUDAMatrix.reshape exceeded threshold\"\n\ndef test_T_field():\n m = 256\n n = 128\n cm1 = np.array(np.random.rand(n, m)*10, dtype=np.float32, order='F')\n cm2 = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n gm1 = cm.CUDAMatrix(cm1)\n gm2 = cm.CUDAMatrix(cm2)\n\n # test dot\n gm = cm.dot(gm2.T, gm1.T)\n c = np.dot(cm2.T, cm1.T)\n gm.copy_to_host()\n\n assert np.max(np.abs(gm.numpy_array - c)) < 10**-2, \"Error in CUDAMatrix.dot with TransposedCUDAMatrix exceeded threshold\"\n\n # test add_dot\n cm3 = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n gm3 = cm.CUDAMatrix(cm3)\n gm3.add_dot(gm2.T, gm1.T)\n c = cm3 + np.dot(cm2.T, cm1.T)\n gm3.copy_to_host()\n\n assert np.max(np.abs(gm3.numpy_array - c)) < 10**-2, \"Error in CUDAMatrix.add_dot TransposedCUDAMatrix exceeded threshold\"\n\n # test add_sums\n gm2.add_sums(gm1.T, axis = 1)\n c = cm2 + np.atleast_2d(cm1.sum(0)).T\n gm2.copy_to_host()\n\n assert np.max(np.abs(gm2.numpy_array - c)) < 10**-2, \"Error in CUDAMatrix.add_sums TransposedCUDAMatrix exceeded threshold\"\n\ndef test_assign():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n\n m1.assign(m2)\n m1.copy_to_host()\n\n assert np.max(np.abs(m1.numpy_array - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.assign exceeded threshold\"\n\ndef test_assign_scalar():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n m1 = cm.CUDAMatrix(a)\n\n m1.assign(np.pi)\n m1.copy_to_host()\n\n assert np.max(np.abs(m1.numpy_array - np.pi)) < 10**-4, \"Error in CUDAMatrix.assign_scalar exceeded threshold\"\n\ndef test_get_row_slice():\n m = 256\n n = 128\n start = 11\n end = 54\n\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(end-start, n)*10, dtype=np.float32, order='F')\n\n c = np.array(a[start:end,:], order='F')\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.get_row_slice(start, end, target = m2)\n m3 = m1.get_row_slice(start, end)\n m2.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.get_row_slice exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.get_row_slice exceeded threshold\"\n\ndef test_set_row_slice():\n m = 256\n n = 128\n start = 11\n end = 54\n\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(end-start, n)*10, dtype=np.float32, order='F')\n\n c = a.copy()\n c[start:end,:] = b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.set_row_slice(start, end, m2)\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.set_row_slice exceeded threshold\"\n\ndef test_transpose():\n m = 6\n n = 128\n\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(n, m), dtype=np.float32, order='F')\n\n c = a.copy().T\n\n m = cm.CUDAMatrix(a)\n mt1 = cm.CUDAMatrix(b)\n m.transpose(target = mt1)\n mt2 = m.transpose()\n\n mt1.copy_to_host()\n mt2.copy_to_host()\n\n assert np.max(np.abs(c - mt1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.transpose exceeded threshold\"\n assert np.max(np.abs(c - mt2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.transpose exceeded threshold\"\n\ndef test_slice():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = np.array(a[:,32:64], order='F')\n\n m1 = cm.CUDAMatrix(a)\n m2 = m1.slice(32, 64)\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.slice exceeded threshold\"\n\n\ndef test_add_col_vec():\n m = 250\n n = 120\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a + b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.add_col_vec(m2, target = m3)\n m1.add_col_vec(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_col_vec exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_col_vec exceeded threshold\"\n\ndef test_add_col_mult():\n m = 256\n n = 128\n mult = np.pi\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a + mult * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.add_col_mult(m2, mult, target = m3)\n m1.add_col_mult(m2, mult)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_col_mult exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_col_mult exceeded threshold\"\n\ndef test_add_row_vec():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a + b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.add_row_vec(m2, target = m3)\n m1.add_row_vec(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_row_vec exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_row_vec exceeded threshold\"\n\ndef test_mult_by_col():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.mult_by_col(m2, target = m3)\n m1.mult_by_col(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult_by_col exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult_by_col exceeded threshold\"\n\ndef test_mult_by_row():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.mult_by_row(m2, target = m3)\n m1.mult_by_row(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult_by_row exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult_by_row exceeded threshold\"\n\ndef test_div_by_col():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F') + 0.1\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a / b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.div_by_col(m2, target = m3)\n m1.div_by_col(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div_by_col exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div_by_col exceeded threshold\"\n\ndef test_div_by_row():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F') + 0.1\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a / b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.div_by_row(m2, target = m3)\n m1.div_by_row(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div_by_row exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div_by_row exceeded threshold\"\n\ndef test_sum():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n\n mult = 0.8\n c1 = np.atleast_2d(a.sum(0)) * mult\n c2 = np.atleast_2d(a.sum(1)).T\n\n m = cm.CUDAMatrix(a)\n mt1 = cm.CUDAMatrix(t1)\n mt2 = cm.CUDAMatrix(t2)\n\n m.sum(axis = 0, target = mt1, mult = mult)\n mt1r = m.sum(axis = 0, mult = mult)\n\n m.sum(axis = 1, target = mt2)\n mt2r = m.sum(axis = 1)\n\n mt1.copy_to_host()\n mt1r.copy_to_host()\n mt2.copy_to_host()\n mt2r.copy_to_host()\n\n assert np.max(np.abs(c1 - mt1.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c1 - mt1r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n\ndef test_sum_trans():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.rand(1, m)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.rand(n, 1)*10, dtype=np.float32, order='F')\n\n c1 = np.atleast_2d(a.T.sum(0))\n c2 = np.atleast_2d(a.T.sum(1)).T\n\n m = cm.CUDAMatrix(a)\n m.set_trans(True)\n mt1 = cm.CUDAMatrix(t1)\n mt2 = cm.CUDAMatrix(t2)\n\n m.sum(axis = 0, target = mt1)\n mt1r = m.sum(axis = 0)\n\n m.sum(axis = 1, target = mt2)\n mt2r = m.sum(axis = 1)\n\n mt1.copy_to_host()\n mt1r.copy_to_host()\n mt2.copy_to_host()\n mt2r.copy_to_host()\n\n assert np.max(np.abs(c1 - mt1.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c1 - mt1r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n\ndef test_mean():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n\n c1 = np.atleast_2d(a.mean(0))\n c2 = np.atleast_2d(a.mean(1)).T\n\n m = cm.CUDAMatrix(a)\n mt1 = cm.CUDAMatrix(t1)\n mt2 = cm.CUDAMatrix(t2)\n\n m.mean(axis = 0, target = mt1)\n mt1r = m.mean(axis = 0)\n\n m.mean(axis = 1, target = mt2)\n mt2r = m.mean(axis = 1)\n\n mt1.copy_to_host()\n mt1r.copy_to_host()\n mt2.copy_to_host()\n mt2r.copy_to_host()\n\n assert np.max(np.abs(c1 - mt1.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c1 - mt1r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n\ndef test_add_sums():\n m = 256\n n = 128\n\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n\n mult = np.pi\n beta = 0.7\n\n c1 = beta * t1 + mult * np.atleast_2d(a.sum(1)).T\n c2 = t2 + np.atleast_2d(a.sum(0))\n\n m = cm.CUDAMatrix(a)\n mt1 = cm.CUDAMatrix(t1)\n mt2 = cm.CUDAMatrix(t2)\n\n mt1.add_sums(m, axis = 1, mult = np.pi, beta = beta)\n mt2.add_sums(m, axis = 0)\n\n mt1.copy_to_host()\n mt2.copy_to_host()\n\n assert np.max(np.abs(c1 - mt1.numpy_array)) < 10**-3, \"Error in CUDAMatrix.add_sums exceeded threshold\"\n assert np.max(np.abs(c2 - mt2.numpy_array)) < 10**-3, \"Error in CUDAMatrix.add_sums exceeded threshold\"\n\n\ndef test_less_than():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n v = 0.1\n\n r1 = 1 * (a < b)\n r2 = 1 * (a < v)\n\n da = cm.CUDAMatrix(a)\n db = cm.CUDAMatrix(b)\n dt1 = cm.CUDAMatrix(t1)\n dt2 = cm.CUDAMatrix(t2)\n\n da.less_than(db, target = dt1)\n da.less_than(v, target = dt2)\n da.less_than(db)\n\n da.copy_to_host()\n dt1.copy_to_host()\n dt2.copy_to_host()\n\n assert np.max(np.abs(r1 - da.numpy_array)) < 10**-4, \"Error in CUDAMatrix.less_than exceeded threshold\"\n assert np.max(np.abs(r1 - dt1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.less_than exceeded threshold\"\n assert np.max(np.abs(r2 - dt2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.less_than exceeded threshold\"\n\ndef test_greater_than():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n v = 0.1\n\n r1 = 1 * (a > b)\n r2 = 1 * (a > v)\n\n da = cm.CUDAMatrix(a)\n db = cm.CUDAMatrix(b)\n dt1 = cm.CUDAMatrix(t1)\n dt2 = cm.CUDAMatrix(t2)\n\n da.greater_than(db, target = dt1)\n da.greater_than(v, target = dt2)\n da.greater_than(db)\n\n da.copy_to_host()\n dt1.copy_to_host()\n dt2.copy_to_host()\n\n assert np.max(np.abs(r1 - da.numpy_array)) < 10**-4, \"Error in CUDAMatrix.greater_than exceeded threshold\"\n assert np.max(np.abs(r1 - dt1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.greater_than exceeded threshold\"\n assert np.max(np.abs(r2 - dt2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.greater_than exceeded threshold\"\n\ndef test_minimum():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n v = 0.1\n\n r1 = np.minimum(a, b)\n r2 = np.minimum(a, v)\n\n da = cm.CUDAMatrix(a)\n db = cm.CUDAMatrix(b)\n dt1 = cm.CUDAMatrix(t1)\n dt2 = cm.CUDAMatrix(t2)\n\n da.minimum(db, target = dt1)\n da.minimum(v, target = dt2)\n da.minimum(db)\n\n da.copy_to_host()\n dt1.copy_to_host()\n dt2.copy_to_host()\n\n assert np.max(np.abs(r1 - da.numpy_array)) < 10**-4, \"Error in CUDAMatrix.minimum exceeded threshold\"\n assert np.max(np.abs(r1 - dt1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.minimum exceeded threshold\"\n assert np.max(np.abs(r2 - dt2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.minimum exceeded threshold\"\n\ndef test_maximum():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n v = 0.1\n\n r1 = np.maximum(a, b)\n r2 = np.maximum(a, v)\n\n da = cm.CUDAMatrix(a)\n db = cm.CUDAMatrix(b)\n dt1 = cm.CUDAMatrix(t1)\n dt2 = cm.CUDAMatrix(t2)\n\n da.maximum(db, target = dt1)\n da.maximum(v, target = dt2)\n da.maximum(db)\n\n da.copy_to_host()\n dt1.copy_to_host()\n dt2.copy_to_host()\n\n assert np.max(np.abs(r1 - da.numpy_array)) < 10**-4, \"Error in CUDAMatrix.maximum exceeded threshold\"\n assert np.max(np.abs(r1 - dt1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.maximum exceeded threshold\"\n assert np.max(np.abs(r2 - dt2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.maximum exceeded threshold\"\n\ndef test_minmax():\n m = 256\n n = 128\n for op in 'min', 'max', 'argmin', 'argmax':\n for sign in (1, -1):\n a = np.array(np.random.randn(m, n)*10*sign, dtype=np.float32, order='F')\n t0 = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n\n r0 = np.atleast_2d(getattr(a, op)(0))\n r1 = np.atleast_2d(getattr(a, op)(1))\n\n da = cm.CUDAMatrix(a)\n dr10 = cm.CUDAMatrix(t0)\n dr11 = cm.CUDAMatrix(t1)\n\n getattr(da, op)(axis = 0, target = dr10)\n getattr(da, op)(axis = 1, target = dr11)\n dr20 = getattr(da, op)(axis = 0)\n dr21 = getattr(da, op)(axis = 1)\n\n dr10.copy_to_host()\n dr11.copy_to_host()\n dr20.copy_to_host()\n dr21.copy_to_host()\n\n assert np.max(np.abs(r0 - dr10.numpy_array)) < 10**-4, \"Error in CUDAMatrix.%s exceeded threshold\" % op\n assert np.max(np.abs(r1 - dr11.numpy_array.T)) < 10**-4, \"Error in CUDAMatrix.%s exceeded threshold\" % op\n assert np.max(np.abs(r0 - dr20.numpy_array)) < 10**-4, \"Error in CUDAMatrix.%s exceeded threshold\" % op\n assert np.max(np.abs(r1 - dr21.numpy_array.T)) < 10**-4, \"Error in CUDAMatrix.%s exceeded threshold\" % op\n\ndef test_sign():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n a[0,0] = 0.\n a[0,1] = -0.\n t = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n c = np.sign(a)\n\n m1 = cm.CUDAMatrix(a)\n m3 = cm.CUDAMatrix(t)\n\n m2 = m1.sign()\n m1.sign(target = m3)\n\n m2.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.sign exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.sign exceeded threshold\"\n\ndef test_sigmoid():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n c = 1. / (1. + np.exp(-a))\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.apply_sigmoid(target = m2)\n m1.apply_sigmoid()\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_sigmoid exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_sigmoid exceeded threshold\"\n\ndef test_tanh():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n c = np.tanh(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.apply_tanh(target = m2)\n m1.apply_tanh()\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_tanh exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_tanh exceeded threshold\"\n\ndef test_soft_threshold():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n alpha = 0.5\n c = np.sign(a) * np.maximum(0, np.abs(a) - alpha)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.apply_soft_threshold(alpha, target = m2)\n m1.apply_soft_threshold(alpha)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_soft_threshold exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_soft_threshold exceeded threshold\"\n\ndef test_log():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10+0.1, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n)*10+0.1, dtype=np.float32, order='F')\n\n c = np.log(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.log(m1, target = m2)\n cm.log(m1)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in cudamat.log exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in cudamat.log exceeded threshold\"\n\ndef test_exp():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n\n c = np.exp(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.exp(m1, target = m2)\n cm.exp(m1)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in cudamat.exp exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in cudamat.exp exceeded threshold\"\n\ndef test_gamma():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*5, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n)*5, dtype=np.float32, order='F')\n\n from scipy.special import gamma\n c = gamma(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.gamma(m1, target = m2)\n cm.gamma(m1)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-2, \"Error in cudamat.gamma exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-2, \"Error in cudamat.gamma exceeded threshold\"\n\ndef test_lgamma():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n from scipy.special import gammaln\n c = gammaln(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.lgamma(m1, target = m2)\n cm.lgamma(m1)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-2, \"Error in cudamat.lgamma exceeded threshold \" + str(np.max(np.abs(c - m1.numpy_array)))\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-2, \"Error in cudamat.lgamma exceeded threshold\"\n\ndef test_sqrt():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*20, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n), dtype=np.float32, order='F')\n\n c = np.sqrt(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.sqrt(m1, target = m2)\n cm.sqrt(m1)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in cudamat.sqrt exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in cudamat.sqrt exceeded threshold\"\n\ndef test_pow():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*20, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n), dtype=np.float32, order='F')\n p = 2\n\n c = a**p\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.pow(m1, p, target = m2)\n cm.pow(m1, p)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-3, \"Error in cudamat.pow exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-3, \"Error in cudamat.pow exceeded threshold\"\n\ndef test_pow_matrix():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*20, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n), dtype=np.float32, order='F')\n p = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n\n\n c = a**p\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n mp = cm.CUDAMatrix(p)\n cm.pow(m1, mp, target = m2)\n cm.pow(m1, mp)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-2, \"Error in cudamat.pow exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-2, \"Error in cudamat.pow exceeded threshold\"\n\ndef test_reciprocal():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10+10**-3, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = 1. / a\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.reciprocal(target = m2)\n m1.reciprocal()\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.reciprocal exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.reciprocal exceeded threshold\"\n\ndef test_add_mult():\n m = 256\n n = 128\n alpha = np.pi\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n c = a + np.pi * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.add_mult(m2, np.pi)\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_mult exceeded threshold\"\n\ndef test_subtract_mult():\n m = 256\n n = 128\n alpha = np.pi\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n c = a - np.pi * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.subtract_mult(m2, np.pi)\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.subtract_mult exceeded threshold\"\n\ndef test_add():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(1.+np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a + b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.add(m2, target = m3)\n m1.add(m2)\n\n m3.copy_to_host()\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add exceeded threshold\"\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add exceeded threshold\"\n\ndef test_subtract():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(1.+np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a - b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.subtract(m2, target = m3)\n m1.subtract(m2)\n\n m3.copy_to_host()\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.subtract exceeded threshold\"\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.subtract exceeded threshold\"\n\ndef test_divide():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(1.+np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a / b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.divide(m2, target = m3)\n m1.divide(m2)\n\n m3.copy_to_host()\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div exceeded threshold\"\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div exceeded threshold\"\n\ndef test_mult():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.mult(m2, target = m3)\n m1.mult(m2)\n\n m3.copy_to_host()\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.multiply exceeded threshold\"\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.multiply exceeded threshold\"\n\ndef test_scalar_mult():\n m = 256\n n = 128\n alpha = np.pi\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a * alpha\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(t)\n\n m1.mult(alpha, target = m2)\n m1.mult(alpha)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult exceeded threshold\"\n\ndef test_scalar_div():\n m = 256\n n = 128\n alpha = np.pi\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a / alpha\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(t)\n\n m1.divide(alpha, target = m2)\n m1.divide(alpha)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.divide exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.divide exceeded threshold\"\n\ndef test_add_scalar():\n m = 256\n n = 128\n alpha = np.pi\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a + alpha\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(t)\n\n m1.add(alpha, target = m2)\n m1.add(alpha)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_scalar exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_scalar exceeded threshold\"\n\ndef test_dot():\n m = 128\n k = 256\n n = 64\n a = np.array(np.random.randn(m, k)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(k, n)*10, dtype=np.float32, order='F')\n c = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n alpha = 2.\n beta = 0.3\n r = beta * c + alpha * np.dot(a, b)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(c)\n m3 = cm.dot(m1, m2, target = m3, alpha = alpha, beta = beta)\n m3.copy_to_host()\n\n assert np.max(np.abs(r - m3.numpy_array)) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\ndef test_dot_trans():\n m = 128\n k = 256\n n = 64\n a = np.array(np.random.randn(k, m)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(k, n)*10, dtype=np.float32, order='F')\n\n c = np.dot(a.T, b)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.set_trans(True);\n m3 = cm.dot(m1, m2)\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\ndef test_dot_vect():\n m = 128\n k = 256\n n = 1\n a = np.array(np.random.randn(m, k)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(k, n)*10, dtype=np.float32, order='F')\n A = cm.CUDAMatrix(a)\n B = cm.CUDAMatrix(b)\n\n c = np.dot(a, b)\n C = cm.dot(A, B)\n assert np.max(np.abs(c - C.asarray())) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\n c = np.dot(a.T, b[:m])\n C = cm.dot(A.T, B.slice(0, m))\n assert np.max(np.abs(c - C.asarray())) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\n c = np.dot(b.T, a.T)\n C = cm.dot(B.T, A.T)\n assert np.max(np.abs(c - C.asarray())) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\n c = np.dot(b[:m].T, a)\n C = cm.dot(B.slice(0, m).reshape((1, m)), A)\n assert np.max(np.abs(c - C.asarray())) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\ndef test_add_dot():\n m = 128\n k = 256\n n = 64\n a = np.array(np.random.randn(m, k)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(k, n)*10, dtype=np.float32, order='F')\n c = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n mult = 2.1\n beta = 0.8\n res = beta * c + mult * np.dot(a, b)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(c)\n m3.add_dot(m1, m2, mult = mult, beta = beta)\n\n m3.copy_to_host()\n\n assert np.max(np.abs(res - m3.numpy_array)) < 10**-2, \"Error in CUDAMatrix.add_dot exceeded threshold\"\n\ndef test_vdot():\n m = 64\n n = 64\n a = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n\n true_res = np.vdot(a, b)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n\n res = cm.vdot(m1, m2)\n\n assert np.abs(res - true_res) < 10**-2, \"Error in CUDAMatrix.vdot exceeded threshold\"\n\ndef test_subtract_dot():\n m = 128\n k = 256\n n = 64\n a = np.array(np.random.randn(m, k)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(k, n)*10, dtype=np.float32, order='F')\n c = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n res = c - np.dot(a, b)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(c)\n m3.subtract_dot(m1, m2)\n\n m3.copy_to_host()\n\n assert np.max(np.abs(res - m3.numpy_array)) < 10**-2, \"Error in CUDAMatrix.subtract_dot exceeded threshold\"\n\ndef test_random():\n cm.CUDAMatrix.init_random(1)\n m1 = cm.CUDAMatrix(np.array(np.empty((128,256)), dtype=np.float32, order='F'))\n m2 = cm.CUDAMatrix(np.array(np.empty((128,256)), dtype=np.float32, order='F'))\n\n m1.fill_with_rand()\n m1.copy_to_host()\n m2.fill_with_randn()\n m2.copy_to_host()\n\n assert np.abs(np.mean(m1.numpy_array) - 0.5) < 10**-2, \"Error in CUDAMatrix.fill_with_rand threshold\"\n assert np.abs(np.mean(m2.numpy_array)) < 10**-2, \"Error in CUDAMatrix.fill_with_randn threshold\"\n\ndef test_euclid_norm():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n m = cm.CUDAMatrix(a)\n\n n1 = np.sqrt(np.sum(a**2))\n n2 = m.euclid_norm()\n\n assert np.abs(n1-n2) < 10**-2, \"Error in CUDAMatrix.euclid_norm exceeded threshold\"\n\ndef test_manhattan_norm():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n m = cm.CUDAMatrix(a)\n\n n1 = np.sum(np.abs(a), dtype=np.double)\n n2 = m.manhattan_norm()\n\n assert np.abs(n1-n2) < 2e-2, \"Error in CUDAMatrix.manhattan_norm exceeded threshold (%f != %f)\" % (n1, n2)\n\ndef test_allfinite():\n a = cm.empty((10, 20)).assign(1).divide(0) # NaN\n b = cm.empty((10, 20)).assign(1e20).mult(1e20) # Inf\n c = cm.empty((10, 20)).assign(1) # 1.0\n\n assert (not a.allfinite()) and (not b.allfinite()) and c.allfinite(), \"CUDAMatrix.allfinite does not work\"\n\ndef test_select_columns():\n m = 256\n n = 128\n k = 8\n\n s = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n i_l = [0, 1, 2, 3, 5, 10, 12, 20]\n i = np.array(i_l).T[np.newaxis, :]\n t = np.empty((m, k))\n\n s_d = cm.CUDAMatrix(s)\n i_d = cm.CUDAMatrix(i)\n t_d = cm.CUDAMatrix(t)\n\n s_d.select_columns(i_d, t_d)\n res = s[:,i_l]\n\n assert np.max(np.abs(res - t_d.asarray())) < 10**-4, \"Error in CUDAMatrix.select_columns exceeded threshold\"\n\n\ndef test_where():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n z = np.zeros_like(a)\n res = np.where(a > 0, a, z);\n\n a_d = cm.CUDAMatrix(a)\n z_d = cm.CUDAMatrix(z)\n res_d = cm.empty(a_d.shape)\n a_d.greater_than(0, res_d)\n cm.where(res_d, a_d, z_d)\n assert np.abs(res-res_d.asarray()).max() < 1e-2, \"Error in cudamat.where\"\n\n\ndef test_correlate():\n m = 64\n n = 32\n km = 17\n kn = 11\n\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n k = np.array(np.random.randn(km, kn)*10, dtype=np.float32, order='F')\n\n res = np.zeros_like(a)\n for i in range(len(a)):\n for j in range(len(a[0])):\n for h in range(-(km/2), km/2 + 1):\n for w in range(-(kn/2), kn/2 + 1):\n if i+h >= 0 and i+h < m and j+w >= 0 and j+w < n:\n res[i][j] += a[i + h][j + w] * k[km/2 + h][kn/2 + w]\n\n a_d = cm.CUDAMatrix(a)\n k_d = cm.CUDAMatrix(k)\n\n res_d = cm.correlate(a_d, k_d)\n assert np.abs(res-res_d.asarray()).max() < 1e-2, \"Error in cudamat.correlate\"\n\n\nif __name__ == '__main__':\n nose.runmodule()\n"
] | [
[
"numpy.dot",
"numpy.minimum",
"numpy.sqrt",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.mean",
"numpy.exp",
"numpy.where",
"numpy.log",
"scipy.special.gamma",
"numpy.random.rand",
"scipy.special.gammaln",
"numpy.tanh",
"numpy.array",
"numpy.sum",
"numpy.vdot",
"numpy.maximum",
"numpy.abs",
"numpy.sign",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.18",
"0.19"
],
"tensorflow": []
}
] |
shun60s/BipedalWalkerHardcore-Weights-Choice | [
"76a3df3585a13881f1754274b8ded73a054d551d"
] | [
"train.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n-----------------------------------------------------------------------------\n Copyright 2017 David Griffis\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-----------------------------------------------------------------------------\nChanged:\n Add args.entropy, args.value\n Add CONV6_Net\n Add second environment and its worker\n Add load two basis models\n Add CONV_Choice1_Net\n \n\"\"\"\n\nfrom __future__ import division\nfrom setproctitle import setproctitle as ptitle\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom environment import create_env\nfrom utils import ensure_shared_grads\nfrom model import * # change to import any models\nfrom player_util import Agent\nfrom torch.autograd import Variable\nimport gym\n\n\ndef train(rank, args, shared_model, optimizer, shared_bm1_model, shared_bm2_model):\n ptitle('Training Agent: {}'.format(rank))\n gpu_id = args.gpu_ids[rank % len(args.gpu_ids)]\n torch.manual_seed(args.seed + rank)\n if gpu_id >= 0:\n torch.cuda.manual_seed(args.seed + rank)\n\n # add second environment\n if rank >= args.workers:\n print ('training agent of second environment', rank)\n env = create_env(args.env2, args)\n else:\n env = create_env(args.env, args)\n \n if optimizer is None:\n if args.optimizer == 'RMSprop':\n optimizer = optim.RMSprop(shared_model.parameters(), lr=args.lr)\n if args.optimizer == 'Adam':\n optimizer = optim.Adam(shared_model.parameters(), lr=args.lr)\n\n env.seed(args.seed + rank)\n player = Agent(None, env, args, None)\n player.gpu_id = gpu_id\n \n if args.model == 'CONV_Choice1':\n player.model = CONV_Choice1_Net(args.stack_frames, player.env.action_space, args.discrete_number, player.env.observation_space.shape[0]) # change\n if args.basis_model1 == 'CONV6':\n player.bm1_model = CONV6_Net(args.stack_frames, player.env.action_space, player.env.observation_space.shape[0]) # change\n if args.basis_model2 == 'CONV6':\n player.bm2_model = CONV6_Net(args.stack_frames, player.env.action_space, player.env.observation_space.shape[0]) # change\n \n \n \n player.state = player.env.reset()\n player.state = torch.from_numpy(player.state).float()\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n player.state = player.state.cuda()\n player.bm1_model = player.bm1_model.cuda()\n player.bm2_model = player.bm2_model.cuda()\n player.model = player.model.cuda()\n # \n ratio_entropy =args.entropy\n ratio_value = args.value\n \n # This is no train about two basis models\n player.bm1_model.eval() # eval()はdropoutやbatch normの on/offの切替です\n player.bm2_model.eval() # eval()はdropoutやbatch normの on/offの切替です\n player.model.train() # Sets the module in training mode.\n \n while True:\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n player.model.load_state_dict(shared_model.state_dict())\n player.bm1_model.load_state_dict(shared_bm1_model.state_dict())\n player.bm2_model.load_state_dict(shared_bm2_model.state_dict())\n else:\n player.model.load_state_dict(shared_model.state_dict())\n player.bm1_model.load_state_dict(shared_bm1_model.state_dict())\n player.bm2_model.load_state_dict(shared_bm2_model.state_dict())\n \n if player.done:\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n # use for CONV_Choice1_Net\n player.cx = Variable(torch.zeros(1, 128).cuda())\n player.hx = Variable(torch.zeros(1, 128).cuda())\n \n if args.basis_model1 == 'CONV6':\n player.bm1_cx1 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm1_hx1 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm1_cx2 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm1_hx2 = Variable(torch.zeros(\n 1,128).cuda())\n if args.basis_model2 == 'CONV6':\n player.bm2_cx1 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm2_hx1 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm2_cx2 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm2_hx2 = Variable(torch.zeros(\n 1,128).cuda())\n else:\n # use for CONV_Choice1_Net\n player.cx = Variable(torch.zeros(1, 128))\n player.hx = Variable(torch.zeros(1, 128))\n \n if args.basis_model1 == 'CONV6':\n player.bm1_cx1 = Variable(torch.zeros(1, 128))\n player.bm1_hx1 = Variable(torch.zeros(1, 128))\n player.bm1_cx2 = Variable(torch.zeros(1, 128))\n player.bm1_hx2 = Variable(torch.zeros(1, 128))\n if args.basis_model2 == 'CONV6':\n player.bm2_cx1 = Variable(torch.zeros(1, 128))\n player.bm2_hx1 = Variable(torch.zeros(1, 128))\n player.bm2_cx2 = Variable(torch.zeros(1, 128))\n player.bm2_hx2 = Variable(torch.zeros(1, 128))\n else:\n # use for CONV_Choice1_Net\n player.cx = Variable(player.cx.data)\n player.hx = Variable(player.hx.data)\n if args.basis_model1 == 'CONV6':\n player.bm1_cx1 = Variable(player.bm1_cx1.data)\n player.bm1_hx1 = Variable(player.bm1_hx1.data)\n player.bm1_cx2 = Variable(player.bm1_cx2.data)\n player.bm1_hx2 = Variable(player.bm1_hx2.data)\n if args.basis_model2 == 'CONV6':\n player.bm2_cx1 = Variable(player.bm2_cx1.data)\n player.bm2_hx1 = Variable(player.bm2_hx1.data)\n player.bm2_cx2 = Variable(player.bm2_cx2.data)\n player.bm2_hx2 = Variable(player.bm2_hx2.data)\n \n \n # try args.num_steps times\n for step in range(args.num_steps):\n \n player.action_train() # call action_train\n \n if player.done:\n break\n \n \n \n if player.done:\n player.eps_len = 0\n state = player.env.reset()\n player.state = torch.from_numpy(state).float()\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n player.state = player.state.cuda()\n \n # --- add\n if args.use_discrete_model:\n if player.env.observation_space.shape[0] == 28 and args.discrete_number == 4:\n \n state_out_loss = 0\n for i in range(len(player.loss_state_out)):\n state_out_loss = state_out_loss + player.loss_state_out[i]\n \n #\n player.model.zero_grad()\n state_out_loss.backward()\n ensure_shared_grads(player.model, shared_model, gpu=gpu_id >= 0)\n optimizer.step()\n player.clear_actions()\n \n \n else: # ---\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n R = torch.zeros(1, 1).cuda()\n else:\n R = torch.zeros(1, 1)\n \n if not player.done:\n state = player.state\n \n if args.use_discrete_model:\n # --- Only compute args.model's value ---\n if args.model == 'CONV_Choice1':\n state = state.unsqueeze(0)\n # value is critic\n value, _, _ = player.model(\n (Variable(state), (player.hx, player.cx)))\n else: # continouse model\n if args.basis_model1 == 'CONV6':\n value, _, _, _ = player.model(\n (Variable(state), (player.bm1_hx1, player.bm1_cx1, player.bm1_hx2, player.bm1_cx2)))\n \n R = value.data\n \n player.values.append(Variable(R))\n policy_loss = 0\n value_loss = 0\n R = Variable(R)\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n gae = torch.zeros(1, 1).cuda()\n else:\n gae = torch.zeros(1, 1)\n \n \n for i in reversed(range(len(player.rewards))):\n R = args.gamma * R + player.rewards[i]\n advantage = R - player.values[i]\n \n # Value Loss is ...\n value_loss = value_loss + 0.5 * advantage.pow(2)\n \n # Generalized Advantage Estimataion\n # print(player.rewards[i])\n # rewards + gamma* value[i+1] - value[i]\n delta_t = player.rewards[i] + args.gamma * \\\n player.values[i + 1].data - player.values[i].data\n \n gae = gae * args.gamma * args.tau + delta_t\n \n if args.use_discrete_model:\n # Policy Loss is ....\n policy_loss = policy_loss - \\\n (player.log_probs[i].sum() * Variable(gae)) # Policy Gradient Theorem ?\n else: # continouse model\n # Policy Loss is ....\n policy_loss = policy_loss - \\\n (player.log_probs[i].sum() * Variable(gae)) - \\\n (ratio_entropy * player.entropies[i].sum())\n \n player.model.zero_grad()\n \n # --- backward ---\n (policy_loss + ratio_value * value_loss).backward()\n ensure_shared_grads(player.model, shared_model, gpu=gpu_id >= 0)\n optimizer.step()\n player.clear_actions()\n"
] | [
[
"torch.cuda.manual_seed",
"torch.zeros",
"torch.manual_seed",
"torch.from_numpy",
"torch.cuda.device",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shahad-bit/Disaster-Response-Pipeline | [
"76a86db14845c8d8ba8d87c81112580c96b2b0d4"
] | [
"data/process_data.py"
] | [
"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"Load disaster messages and categories from csv files.\n \n Arguments:\n messages_filepath {String} -- disaster message file path\n categories_filepath {String} -- disaster categories file path\n \n Returns:\n pandas dataframe -- merged disaster data\n \"\"\" \n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n df = messages.merge(categories, on='id')\n return df\n\n\n\ndef clean_data(df):\n \"\"\"Preprocess data\n \n Arguments:\n df {pandas dataframe} -- disaster data\n \"\"\" \n # create a dataframe of the 36 individual category columns\n categories = df['categories'].str.split(';', expand=True)\n\n # select the first row of the categories dataframe\n row = categories.iloc[0]\n\n # use this row to extract a list of new column names for categories.\n # one way is to apply a lambda function that takes everything \n # up to the second to last character of each string with slicing\n category_colnames = [val.split('-')[0] for val in row]\n print(category_colnames)\n\n # rename the columns of `categories`\n categories.columns = category_colnames\n\n for column in categories:\n # set each value to be the last character of the string\n categories[column] = categories[column].str[-1]\n # convert column from string to numeric\n categories[column] = categories[column].astype(int)\n\n # drop the original categories column from `df`\n df.drop(['categories'], axis=1, inplace=True)\n # concatenate the original dataframe with the new `categories` dataframe\n df = pd.concat([df, categories], axis=1)\n # drop duplicates\n df.drop_duplicates(inplace=True)\n return df\n\n\n\n\ndef save_data(df, database_filename):\n \"\"\"Store the data in mysql db.\n \n Arguments:\n df {pandas dataframe} -- disaster data\n database_filename {String} -- path to the db\n \"\"\" \n engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql('disaster_response', engine, index=False) \n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"pandas.concat",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
adesgautam/objdet | [
"7154bd5035dd51de8a49b7ae59b65277a1727263"
] | [
"yolov3/yolo_detection/yolo_files/Utils/yolo3/model.py"
] | [
"\"\"\"YOLO_v3 Model Defined in Keras.\"\"\"\n\nfrom functools import wraps\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops import control_flow_ops\nimport keras\nfrom keras import backend as K\nfrom keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.regularizers import l2\n\nfrom ..yolo3.utils import compose\n\n\n@wraps(Conv2D)\ndef DarknetConv2D(*args, **kwargs):\n \"\"\"Wrapper to set Darknet parameters for Convolution2D.\"\"\"\n darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}\n darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'\n darknet_conv_kwargs.update(kwargs)\n return Conv2D(*args, **darknet_conv_kwargs)\n\ndef DarknetConv2D_BN_Leaky(*args, **kwargs):\n \"\"\"Darknet Convolution2D followed by BatchNormalization and LeakyReLU.\"\"\"\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1))\n\ndef resblock_body(x, num_filters, num_blocks):\n '''A series of resblocks starting with a downsampling Convolution2D'''\n # Darknet uses left and top padding instead of 'same' mode\n x = ZeroPadding2D(((1,0),(1,0)))(x)\n x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)\n for i in range(num_blocks):\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)\n x = Add()([x,y])\n return x\n\ndef darknet_body(x):\n '''Darknent body having 52 Convolution2D layers'''\n x = DarknetConv2D_BN_Leaky(32, (3,3))(x)\n x = resblock_body(x, 64, 1)\n x = resblock_body(x, 128, 2)\n x = resblock_body(x, 256, 8)\n x = resblock_body(x, 512, 8)\n x = resblock_body(x, 1024, 4)\n return x\n\ndef make_last_layers(x, num_filters, out_filters):\n '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''\n x = compose(\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D(out_filters, (1,1)))(x)\n return x, y\n\n\ndef yolo_body(inputs, num_anchors, num_classes):\n \"\"\"Create YOLO_V3 model CNN body in Keras.\"\"\"\n darknet = Model(inputs, darknet_body(inputs))\n x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))\n\n x = compose(\n DarknetConv2D_BN_Leaky(256, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[152].output])\n x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))\n\n x = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[92].output])\n x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))\n\n return Model(inputs, [y1,y2,y3])\n\ndef tiny_yolo_body(inputs, num_anchors, num_classes):\n '''Create Tiny YOLO_v3 model CNN body in keras.'''\n x1 = compose(\n DarknetConv2D_BN_Leaky(16, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(32, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(64, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(128, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(256, (3,3)))(inputs)\n x2 = compose(\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(512, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),\n DarknetConv2D_BN_Leaky(1024, (3,3)),\n DarknetConv2D_BN_Leaky(256, (1,1)))(x1)\n y1 = compose(\n DarknetConv2D_BN_Leaky(512, (3,3)),\n DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)\n\n x2 = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x2)\n y2 = compose(\n Concatenate(),\n DarknetConv2D_BN_Leaky(256, (3,3)),\n DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])\n\n return Model(inputs, [y1,y2])\n\n\ndef yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):\n \"\"\"Convert final layer features to bounding box parameters.\"\"\"\n num_anchors = len(anchors)\n # Reshape to batch, height, width, num_anchors, box_params.\n anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])\n\n grid_shape = K.shape(feats)[1:3] # height, width\n grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),\n [1, grid_shape[1], 1, 1])\n grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),\n [grid_shape[0], 1, 1, 1])\n grid = K.concatenate([grid_x, grid_y])\n grid = K.cast(grid, K.dtype(feats))\n\n feats = K.reshape(\n feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])\n\n # Adjust preditions to each spatial grid point and anchor size.\n box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))\n box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))\n box_confidence = K.sigmoid(feats[..., 4:5])\n box_class_probs = K.sigmoid(feats[..., 5:])\n\n if calc_loss == True:\n return grid, feats, box_xy, box_wh\n return box_xy, box_wh, box_confidence, box_class_probs\n\n\ndef yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):\n '''Get corrected boxes'''\n box_yx = box_xy[..., ::-1]\n box_hw = box_wh[..., ::-1]\n input_shape = K.cast(input_shape, K.dtype(box_yx))\n image_shape = K.cast(image_shape, K.dtype(box_yx))\n new_shape = K.round(image_shape * K.min(input_shape/image_shape))\n offset = (input_shape-new_shape)/2./input_shape\n scale = input_shape/new_shape\n box_yx = (box_yx - offset) * scale\n box_hw *= scale\n\n box_mins = box_yx - (box_hw / 2.)\n box_maxes = box_yx + (box_hw / 2.)\n boxes = K.concatenate([\n box_mins[..., 0:1], # y_min\n box_mins[..., 1:2], # x_min\n box_maxes[..., 0:1], # y_max\n box_maxes[..., 1:2] # x_max\n ])\n\n # Scale boxes back to original image shape.\n boxes *= K.concatenate([image_shape, image_shape])\n return boxes\n\n\ndef yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):\n '''Process Conv layer output'''\n box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,\n anchors, num_classes, input_shape)\n boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)\n boxes = K.reshape(boxes, [-1, 4])\n box_scores = box_confidence * box_class_probs\n box_scores = K.reshape(box_scores, [-1, num_classes])\n return boxes, box_scores\n\n\ndef yolo_eval(yolo_outputs,\n anchors,\n num_classes,\n image_shape,\n max_boxes=20,\n score_threshold=.6,\n iou_threshold=.5):\n \"\"\"Evaluate YOLO model on given input and return filtered boxes.\"\"\"\n num_layers = len(yolo_outputs)\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting\n input_shape = K.shape(yolo_outputs[0])[1:3] * 32\n boxes = []\n box_scores = []\n for l in range(num_layers):\n _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, image_shape)\n boxes.append(_boxes)\n box_scores.append(_box_scores)\n boxes = K.concatenate(boxes, axis=0)\n box_scores = K.concatenate(box_scores, axis=0)\n\n mask = box_scores >= score_threshold\n max_boxes_tensor = K.constant(max_boxes, dtype='int32')\n boxes_ = []\n scores_ = []\n classes_ = []\n for c in range(num_classes):\n # TODO: use keras backend instead of tf.\n class_boxes = tf.boolean_mask(boxes, mask[:, c])\n class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])\n nms_index = tf.image.non_max_suppression(\n class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)\n class_boxes = K.gather(class_boxes, nms_index)\n class_box_scores = K.gather(class_box_scores, nms_index)\n classes = K.ones_like(class_box_scores, 'int32') * c\n boxes_.append(class_boxes)\n scores_.append(class_box_scores)\n classes_.append(classes)\n boxes_ = K.concatenate(boxes_, axis=0)\n scores_ = K.concatenate(scores_, axis=0)\n classes_ = K.concatenate(classes_, axis=0)\n\n return boxes_, scores_, classes_\n\n\ndef preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):\n '''Preprocess true boxes to training input format\n\n Parameters\n ----------\n true_boxes: array, shape=(m, T, 5)\n Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.\n input_shape: array-like, hw, multiples of 32\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n\n Returns\n -------\n y_true: list of array, shape like yolo_outputs, xywh are reletive value\n\n '''\n assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'\n num_layers = len(anchors)//3 # default setting\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n\n true_boxes = np.array(true_boxes, dtype='float32')\n input_shape = np.array(input_shape, dtype='int32')\n boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2\n boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]\n true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]\n true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]\n\n m = true_boxes.shape[0]\n grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]\n y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),\n dtype='float32') for l in range(num_layers)]\n\n # Expand dim to apply broadcasting.\n anchors = np.expand_dims(anchors, 0)\n anchor_maxes = anchors / 2.\n anchor_mins = -anchor_maxes\n valid_mask = boxes_wh[..., 0]>0\n\n for b in range(m):\n # Discard zero rows.\n wh = boxes_wh[b, valid_mask[b]]\n if len(wh)==0: continue\n # Expand dim to apply broadcasting.\n wh = np.expand_dims(wh, -2)\n box_maxes = wh / 2.\n box_mins = -box_maxes\n\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n box_area = wh[..., 0] * wh[..., 1]\n anchor_area = anchors[..., 0] * anchors[..., 1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n\n # Find best anchor for each true box\n best_anchor = np.argmax(iou, axis=-1)\n\n for t, n in enumerate(best_anchor):\n for l in range(num_layers):\n if n in anchor_mask[l]:\n i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')\n j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')\n k = anchor_mask[l].index(n)\n c = true_boxes[b,t, 4].astype('int32')\n y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4]\n y_true[l][b, j, i, k, 4] = 1\n y_true[l][b, j, i, k, 5+c] = 1\n\n return y_true\n\n\ndef box_iou(b1, b2):\n '''Return iou tensor\n\n Parameters\n ----------\n b1: tensor, shape=(i1,...,iN, 4), xywh\n b2: tensor, shape=(j, 4), xywh\n\n Returns\n -------\n iou: tensor, shape=(i1,...,iN, j)\n\n '''\n\n # Expand dim to apply broadcasting.\n b1 = K.expand_dims(b1, -2)\n b1_xy = b1[..., :2]\n b1_wh = b1[..., 2:4]\n b1_wh_half = b1_wh/2.\n b1_mins = b1_xy - b1_wh_half\n b1_maxes = b1_xy + b1_wh_half\n\n # Expand dim to apply broadcasting.\n b2 = K.expand_dims(b2, 0)\n b2_xy = b2[..., :2]\n b2_wh = b2[..., 2:4]\n b2_wh_half = b2_wh/2.\n b2_mins = b2_xy - b2_wh_half\n b2_maxes = b2_xy + b2_wh_half\n\n intersect_mins = K.maximum(b1_mins, b2_mins)\n intersect_maxes = K.minimum(b1_maxes, b2_maxes)\n intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n b1_area = b1_wh[..., 0] * b1_wh[..., 1]\n b2_area = b2_wh[..., 0] * b2_wh[..., 1]\n iou = intersect_area / (b1_area + b2_area - intersect_area)\n\n return iou\n\n\ndef yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):\n '''Return yolo_loss tensor\n\n Parameters\n ----------\n yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body\n y_true: list of array, the output of preprocess_true_boxes\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n ignore_thresh: float, the iou threshold whether to ignore object confidence loss\n\n Returns\n -------\n loss: tensor, shape=(1,)\n\n '''\n num_layers = len(anchors)//3 # default setting\n yolo_outputs = args[:num_layers]\n y_true = args[num_layers:]\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))\n grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]\n loss = 0\n m = K.shape(yolo_outputs[0])[0] # batch size, tensor\n mf = K.cast(m, K.dtype(yolo_outputs[0]))\n\n for l in range(num_layers):\n object_mask = y_true[l][..., 4:5]\n true_class_probs = y_true[l][..., 5:]\n\n grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)\n pred_box = K.concatenate([pred_xy, pred_wh])\n\n # Darknet raw box to calculate loss.\n raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid\n raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])\n raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf\n box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]\n\n # Find ignore mask, iterate over each of batch.\n ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)\n object_mask_bool = K.cast(object_mask, 'bool')\n def loop_body(b, ignore_mask):\n true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])\n iou = box_iou(pred_box[b], true_box)\n best_iou = K.max(iou, axis=-1)\n ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))\n return b+1, ignore_mask\n _, ignore_mask = control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])\n ignore_mask = ignore_mask.stack()\n ignore_mask = K.expand_dims(ignore_mask, -1)\n\n # K.binary_crossentropy is helpful to avoid exp overflow.\n xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)\n wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])\n confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \\\n (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask\n class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)\n\n xy_loss = K.sum(xy_loss) / mf\n wh_loss = K.sum(wh_loss) / mf\n confidence_loss = K.sum(confidence_loss) / mf\n class_loss = K.sum(class_loss) / mf\n loss += xy_loss + wh_loss + confidence_loss + class_loss\n if print_loss:\n loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ')\n return loss\n"
] | [
[
"tensorflow.boolean_mask",
"numpy.expand_dims",
"numpy.maximum",
"numpy.minimum",
"tensorflow.image.non_max_suppression",
"tensorflow.python.ops.control_flow_ops.while_loop",
"numpy.argmax",
"numpy.floor",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
dipjyoti92/WaveRNN | [
"43c170dac7f6f27697fa4f04d44731f744c27fb4"
] | [
"gen_tacotron.py"
] | [
"import torch\nfrom models.fatchord_version import WaveRNN\nimport hparams as hp\nfrom utils.text.symbols import symbols\nfrom utils.paths import Paths\nfrom models.tacotron import Tacotron\nimport argparse\nfrom utils.text import text_to_sequence\nfrom utils.display import save_attention, simple_table\n\nif __name__ == \"__main__\" :\n\n # Parse Arguments\n parser = argparse.ArgumentParser(description='TTS Generator')\n parser.add_argument('--input_text', '-i', type=str, help='[string] Type in something here and TTS will generate it!')\n parser.add_argument('--batched', '-b', dest='batched', action='store_true', help='Fast Batched Generation')\n parser.add_argument('--unbatched', '-u', dest='batched', action='store_false', help='Slow Unbatched Generation')\n parser.add_argument('--target', '-t', type=int, help='[int] number of samples in each batch index')\n parser.add_argument('--overlap', '-o', type=int, help='[int] number of crossover samples')\n parser.add_argument('--weights_path', '-w', type=str, help='[string/path] Load in different Tacotron Weights')\n parser.add_argument('--save_attention', '-a', dest='save_attn', action='store_true', help='Save Attention Plots')\n parser.set_defaults(batched=hp.voc_gen_batched)\n parser.set_defaults(target=hp.voc_target)\n parser.set_defaults(overlap=hp.voc_overlap)\n parser.set_defaults(input_text=None)\n parser.set_defaults(weights_path=None)\n parser.set_defaults(save_attention=False)\n args = parser.parse_args()\n\n batched = args.batched\n target = args.target\n overlap = args.overlap\n input_text = args.input_text\n weights_path = args.weights_path\n save_attn = args.save_attention\n\n paths = Paths(hp.data_path, hp.voc_model_id, hp.tts_model_id)\n\n print('\\nInitialising WaveRNN Model...\\n')\n\n # Instantiate WaveRNN Model\n voc_model = WaveRNN(rnn_dims=hp.voc_rnn_dims,\n fc_dims=hp.voc_fc_dims,\n bits=hp.bits,\n pad=hp.voc_pad,\n upsample_factors=hp.voc_upsample_factors,\n feat_dims=hp.num_mels,\n compute_dims=hp.voc_compute_dims,\n res_out_dims=hp.voc_res_out_dims,\n res_blocks=hp.voc_res_blocks,\n hop_length=hp.hop_length,\n sample_rate=hp.sample_rate,\n mode=hp.voc_mode).cuda()\n\n voc_model.restore(paths.voc_latest_weights)\n\n print('\\nInitialising Tacotron Model...\\n')\n\n # Instantiate Tacotron Model\n tts_model = Tacotron(embed_dims=hp.tts_embed_dims,\n num_chars=len(symbols),\n encoder_dims=hp.tts_encoder_dims,\n decoder_dims=hp.tts_decoder_dims,\n n_mels=hp.num_mels,\n fft_bins=hp.num_mels,\n postnet_dims=hp.tts_postnet_dims,\n encoder_K=hp.tts_encoder_K,\n lstm_dims=hp.tts_lstm_dims,\n postnet_K=hp.tts_postnet_K,\n num_highways=hp.tts_num_highways,\n dropout=hp.tts_dropout).cuda()\n\n tts_restore_path = weights_path if weights_path else paths.tts_latest_weights\n tts_model.restore(tts_restore_path)\n\n if input_text :\n inputs = [text_to_sequence(input_text.strip(), hp.tts_cleaner_names)]\n else :\n with open('sentences.txt') as f :\n inputs = [text_to_sequence(l.strip(), hp.tts_cleaner_names) for l in f]\n\n voc_k = voc_model.get_step() // 1000\n tts_k = tts_model.get_step() // 1000\n\n simple_table([('WaveRNN', str(voc_k) + 'k'),\n ('Tacotron', str(tts_k) + 'k'),\n ('r', tts_model.r.item()),\n ('Generation Mode', 'Batched' if batched else 'Unbatched'),\n ('Target Samples', target if batched else 'N/A'),\n ('Overlap Samples', overlap if batched else 'N/A')])\n\n for i, x in enumerate(inputs, 1) :\n\n print(f'\\n| Generating {i}/{len(inputs)}')\n _, m, attention = tts_model.generate(x)\n\n if input_text :\n # save_path = f'{paths.tts_output}__input_{input_text[:10]}_{tts_k}k.wav'\n save_path = f'{paths.tts_output}output.wav'\n else :\n save_path = f'{paths.tts_output}{i}_batched{str(batched)}_{tts_k}k.wav'\n\n if save_attn : save_attention(attention, save_path)\n\n m = torch.tensor(m).unsqueeze(0)\n m = (m + 4) / 8\n\n voc_model.generate(m, save_path, batched, hp.voc_target, hp.voc_overlap, hp.mu_law)\n\n print('\\n\\nDone.\\n')\n"
] | [
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
roycezhou/Anomaly-detection-and-classification-with-deep-learning | [
"12b26f7c6f97a0a5305c653ab36b5272f94696fa",
"12b26f7c6f97a0a5305c653ab36b5272f94696fa"
] | [
"src/anomaly_detection/knn/predict.py",
"src/anomaly_detection/dagmm/solver.py"
] | [
"import sys\r\nimport numpy as np\r\nfrom itertools import product\r\nimport torchvision.transforms as transforms\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nfrom Utils.transform import *\r\nfrom Utils.pillowhelper import *\r\n\r\n\r\ndef rowcolumn2coor(row, col, patch_size):\r\n \"\"\" Map row column number to pillow image coordinates: (left, upper, right, lower)\r\n \"\"\"\r\n left = col * patch_size\r\n upper = row * patch_size\r\n right = (col + 1) * patch_size\r\n lower = (row + 1) * patch_size\r\n return (left, upper, right, lower)\r\n\r\n\r\ndef main_road_knn(image, feature_extractor, z_list, thresh, num_patch, patch_ignore, patch_size, flag_equalizer=True, img_resize=224, flag_cuda=True):\r\n z_list_test = get_latent_vector_list_test(image, feature_extractor, num_patch, patch_ignore, patch_size, flag_equalizer, img_resize, flag_cuda)\r\n detected = detect_anomaly_knn(z_list, z_list_test, thresh, num_patch, patch_ignore)\r\n return detected\r\n\r\n\r\ndef get_latent_vector_list_test(image, feature_extractor, num_patch, patch_ignore, patch_size, flag_equalizer, img_resize, flag_cuda): \r\n # Extraction\r\n z_list_patches = []\r\n for i, (row, col) in enumerate(product(range(num_patch), range(num_patch))):\r\n if patch_ignore and (row, col) in patch_ignore:\r\n print('skip {}'.format((row, col)))\r\n continue\r\n print('compute {}'.format((row, col)))\r\n tmp_coor = rowcolumn2coor(row, col, patch_size)\r\n # Apply transformer\r\n tmp_transforms = transforms.Compose([\r\n EqualizerCroppedGrey(flag_equalizer, tmp_coor, img_resize),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]),\r\n RepeatChannel(3)\r\n ])\r\n tmp_patch = tmp_transforms(image)\r\n tmp_patch = tmp_patch.unsqueeze(0)\r\n if flag_cuda:\r\n tmp_patch = tmp_patch.cuda()\r\n tmp_z = feature_extractor(tmp_patch).detach().cpu().numpy()\r\n z_list_patches.append(tmp_z)\r\n \r\n tmp_transforms = transforms.Compose([\r\n EqualizerCroppedGrey(flag_equalizer, None, img_resize),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]),\r\n RepeatChannel(3)\r\n ])\r\n\r\n tmp_patch = tmp_transforms(image)\r\n tmp_patch = tmp_patch.unsqueeze(0)\r\n if flag_cuda:\r\n tmp_patch = tmp_patch.cuda()\r\n tmp_z = feature_extractor(tmp_patch).detach().cpu().numpy()\r\n z_list_patches.append(tmp_z)\r\n return z_list_patches\r\n\r\n\r\ndef detect_anomaly_knn(z_list, z_list_test, thresh, num_patch, patch_ignore):\r\n counter = 0\r\n detected = []\r\n for i, (row, col) in enumerate(product(range(num_patch), range(num_patch))):\r\n if patch_ignore and (row, col) in patch_ignore:\r\n print('skip {}'.format((row, col)))\r\n continue\r\n print('compute {}'.format((row, col)))\r\n score = np.mean(cosine_similarity(z_list[counter], z_list_test[counter]), axis=0)\r\n if score[0] < thresh[counter]:\r\n detected.append({'index': counter, 'row': row, 'col': col, 'score': score[0]})\r\n counter+=1\r\n score = np.mean(cosine_similarity(z_list[counter], z_list_test[counter]), axis=0)\r\n if score[0] < thresh[counter]:\r\n detected.append({'index': counter, 'row': None, 'col': None, 'score': score[0]})\r\n return detected",
"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nimport os\r\nimport time\r\nimport datetime\r\nfrom torch.autograd import grad\r\nfrom torch.autograd import Variable\r\nfrom model import *\r\nimport matplotlib.pyplot as plt\r\nfrom utils import *\r\nfrom data_loader import *\r\nimport IPython\r\nfrom tqdm import tqdm\r\n\r\nclass Solver(object):\r\n DEFAULTS = {} \r\n def __init__(self, data_loader, config):\r\n # Data loader\r\n self.__dict__.update(Solver.DEFAULTS, **config)\r\n self.data_loader = data_loader\r\n\r\n # Build tensorboard if use\r\n self.build_model()\r\n if self.use_tensorboard:\r\n self.build_tensorboard()\r\n\r\n # Start with trained model\r\n if self.pretrained_model:\r\n self.load_pretrained_model()\r\n\r\n def build_model(self):\r\n # Define model\r\n self.dagmm = DaGMM(self.gmm_k)\r\n\r\n # Optimizers\r\n self.optimizer = torch.optim.Adam(self.dagmm.parameters(), lr=self.lr)\r\n\r\n # Print networks\r\n self.print_network(self.dagmm, 'DaGMM')\r\n\r\n if torch.cuda.is_available():\r\n self.dagmm.cuda()\r\n\r\n def print_network(self, model, name):\r\n num_params = 0\r\n for p in model.parameters():\r\n num_params += p.numel()\r\n print(name)\r\n print(model)\r\n print(\"The number of parameters: {}\".format(num_params))\r\n\r\n def load_pretrained_model(self):\r\n self.dagmm.load_state_dict(torch.load(os.path.join(\r\n self.model_save_path, '{}_dagmm.pth'.format(self.pretrained_model))))\r\n\r\n print(\"phi\", self.dagmm.phi,\"mu\",self.dagmm.mu, \"cov\",self.dagmm.cov)\r\n\r\n print('loaded trained models (step: {})..!'.format(self.pretrained_model))\r\n\r\n def build_tensorboard(self):\r\n from logger import Logger\r\n self.logger = Logger(self.log_path)\r\n\r\n def reset_grad(self):\r\n self.dagmm.zero_grad()\r\n\r\n def to_var(self, x, volatile=False):\r\n if torch.cuda.is_available():\r\n x = x.cuda()\r\n return Variable(x, volatile=volatile)\r\n\r\n def train(self):\r\n iters_per_epoch = len(self.data_loader)\r\n\r\n # Start with trained model if exists\r\n if self.pretrained_model:\r\n start = int(self.pretrained_model.split('_')[0])\r\n else:\r\n start = 0\r\n\r\n # Start training\r\n iter_ctr = 0\r\n start_time = time.time()\r\n\r\n\r\n\r\n self.ap_global_train = np.array([0,0,0])\r\n for e in range(start, self.num_epochs):\r\n for i, (input_data, labels) in enumerate(tqdm(self.data_loader)):\r\n iter_ctr += 1\r\n start = time.time()\r\n\r\n input_data = self.to_var(input_data)\r\n\r\n total_loss,sample_energy, recon_error, cov_diag = self.dagmm_step(input_data)\r\n # Logging\r\n loss = {}\r\n loss['total_loss'] = total_loss.data.item()\r\n loss['sample_energy'] = sample_energy.item()\r\n loss['recon_error'] = recon_error.item()\r\n loss['cov_diag'] = cov_diag.item()\r\n\r\n\r\n\r\n # Print out log info\r\n if (i+1) % self.log_step == 0:\r\n elapsed = time.time() - start_time\r\n total_time = ((self.num_epochs*iters_per_epoch)-(e*iters_per_epoch+i)) * elapsed/(e*iters_per_epoch+i+1)\r\n epoch_time = (iters_per_epoch-i)* elapsed/(e*iters_per_epoch+i+1)\r\n \r\n epoch_time = str(datetime.timedelta(seconds=epoch_time))\r\n total_time = str(datetime.timedelta(seconds=total_time))\r\n elapsed = str(datetime.timedelta(seconds=elapsed))\r\n\r\n lr_tmp = []\r\n for param_group in self.optimizer.param_groups:\r\n lr_tmp.append(param_group['lr'])\r\n tmplr = np.squeeze(np.array(lr_tmp))\r\n\r\n log = \"Elapsed {}/{} -- {} , Epoch [{}/{}], Iter [{}/{}], lr {}\".format(\r\n elapsed,epoch_time,total_time, e+1, self.num_epochs, i+1, iters_per_epoch, tmplr)\r\n\r\n for tag, value in loss.items():\r\n log += \", {}: {:.4f}\".format(tag, value)\r\n\r\n IPython.display.clear_output()\r\n print(log)\r\n\r\n if self.use_tensorboard:\r\n for tag, value in loss.items():\r\n self.logger.scalar_summary(tag, value, e * iters_per_epoch + i + 1)\r\n else:\r\n plt_ctr = 1\r\n if not hasattr(self,\"loss_logs\"):\r\n self.loss_logs = {}\r\n for loss_key in loss:\r\n self.loss_logs[loss_key] = [loss[loss_key]]\r\n plt.subplot(2,2,plt_ctr)\r\n plt.plot(np.array(self.loss_logs[loss_key]), label=loss_key)\r\n plt.legend()\r\n plt_ctr += 1\r\n else:\r\n for loss_key in loss:\r\n self.loss_logs[loss_key].append(loss[loss_key])\r\n plt.subplot(2,2,plt_ctr)\r\n plt.plot(np.array(self.loss_logs[loss_key]), label=loss_key)\r\n plt.legend()\r\n plt_ctr += 1\r\n\r\n plt.show()\r\n\r\n print(\"phi\", self.dagmm.phi,\"mu\",self.dagmm.mu, \"cov\",self.dagmm.cov)\r\n # Save model checkpoints\r\n if (i+1) % self.model_save_step == 0:\r\n torch.save(self.dagmm.state_dict(),\r\n os.path.join(self.model_save_path, '{}_{}_dagmm.pth'.format(e+1, i+1)))\r\n\r\n def dagmm_step(self, input_data):\r\n self.dagmm.train()\r\n enc, dec, z, gamma = self.dagmm(input_data)\r\n\r\n total_loss, sample_energy, recon_error, cov_diag = self.dagmm.loss_function(input_data, dec, z, gamma, self.lambda_energy, self.lambda_cov_diag)\r\n\r\n self.reset_grad()\r\n total_loss.backward()\r\n\r\n torch.nn.utils.clip_grad_norm_(self.dagmm.parameters(), 5)\r\n self.optimizer.step()\r\n\r\n return total_loss,sample_energy, recon_error, cov_diag\r\n\r\n def test(self):\r\n print(\"======================TEST MODE======================\")\r\n self.dagmm.eval()\r\n self.data_loader.dataset.mode=\"train\"\r\n\r\n N = 0\r\n mu_sum = 0\r\n cov_sum = 0\r\n gamma_sum = 0\r\n\r\n for it, (input_data, labels) in enumerate(self.data_loader):\r\n input_data = self.to_var(input_data)\r\n enc, dec, z, gamma = self.dagmm(input_data)\r\n phi, mu, cov = self.dagmm.compute_gmm_params(z, gamma)\r\n \r\n batch_gamma_sum = torch.sum(gamma, dim=0)\r\n \r\n gamma_sum += batch_gamma_sum\r\n mu_sum += mu * batch_gamma_sum.unsqueeze(-1) # keep sums of the numerator only\r\n cov_sum += cov * batch_gamma_sum.unsqueeze(-1).unsqueeze(-1) # keep sums of the numerator only\r\n \r\n N += input_data.size(0)\r\n \r\n train_phi = gamma_sum / N\r\n train_mu = mu_sum / gamma_sum.unsqueeze(-1)\r\n train_cov = cov_sum / gamma_sum.unsqueeze(-1).unsqueeze(-1)\r\n\r\n print(\"N:\",N)\r\n print(\"phi :\\n\",train_phi)\r\n print(\"mu :\\n\",train_mu)\r\n print(\"cov :\\n\",train_cov)\r\n\r\n train_energy = []\r\n train_labels = []\r\n train_z = []\r\n for it, (input_data, labels) in enumerate(self.data_loader):\r\n input_data = self.to_var(input_data)\r\n enc, dec, z, gamma = self.dagmm(input_data)\r\n sample_energy, cov_diag = self.dagmm.compute_energy(z, phi=train_phi, mu=train_mu, cov=train_cov, size_average=False)\r\n \r\n train_energy.append(sample_energy.data.cpu().numpy())\r\n train_z.append(z.data.cpu().numpy())\r\n train_labels.append(labels.numpy())\r\n\r\n\r\n train_energy = np.concatenate(train_energy,axis=0)\r\n train_z = np.concatenate(train_z,axis=0)\r\n train_labels = np.concatenate(train_labels,axis=0)\r\n\r\n\r\n self.data_loader.dataset.mode=\"test\"\r\n test_energy = []\r\n test_labels = []\r\n test_z = []\r\n for it, (input_data, labels) in enumerate(self.data_loader):\r\n input_data = self.to_var(input_data)\r\n enc, dec, z, gamma = self.dagmm(input_data)\r\n sample_energy, cov_diag = self.dagmm.compute_energy(z, size_average=False)\r\n test_energy.append(sample_energy.data.cpu().numpy())\r\n test_z.append(z.data.cpu().numpy())\r\n test_labels.append(labels.numpy())\r\n\r\n\r\n test_energy = np.concatenate(test_energy,axis=0)\r\n test_z = np.concatenate(test_z,axis=0)\r\n test_labels = np.concatenate(test_labels,axis=0)\r\n\r\n combined_energy = np.concatenate([train_energy, test_energy], axis=0)\r\n combined_labels = np.concatenate([train_labels, test_labels], axis=0)\r\n\r\n thresh = np.percentile(combined_energy, 100 - 20)\r\n print(\"Threshold :\", thresh)\r\n\r\n pred = (test_energy > thresh).astype(int)\r\n gt = test_labels.astype(int)\r\n\r\n from sklearn.metrics import precision_recall_fscore_support as prf, accuracy_score\r\n\r\n accuracy = accuracy_score(gt,pred)\r\n precision, recall, f_score, support = prf(gt, pred, average='binary')\r\n\r\n print(\"Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}\".format(accuracy, precision, recall, f_score))\r\n \r\n return accuracy, precision, recall, f_score"
] | [
[
"sklearn.metrics.pairwise.cosine_similarity"
],
[
"matplotlib.pyplot.legend",
"sklearn.metrics.accuracy_score",
"torch.sum",
"numpy.percentile",
"numpy.concatenate",
"sklearn.metrics.precision_recall_fscore_support",
"matplotlib.pyplot.subplot",
"torch.cuda.is_available",
"numpy.array",
"matplotlib.pyplot.show",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thanhtung09t2/Hyperbox-classifier | [
"4b4cf9dfae68902bd9a742db421cacce8daf37a4"
] | [
"GFMM/agglo_onlgfmm.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 28 15:41:27 2018\n\n@author: Thanh Tung Khuat\n\nAnother method for serial combination of online learning and agglomerative learning gfmm\n\n Using Agglomerative learning to train a base model, then deploy the trained model for online learning with different training data\n \n AggloOnlineGFMM(gamma, teta_onl, teta_agglo, bthres, simil, sing, isDraw, oper, isNorm, norm_range, V_pre, W_pre, classId_pre)\n\n INPUT\n gamma Membership function slope (default: 1)\n teta_onl Maximum hyperbox size (default: 1) for online learning\n teta_agglo Maximum hyperbox size (default: 1) for agglomerative v2 learning\n bthres Similarity threshold for hyperbox concatenation (default: 0.5)\n simil Similarity measure: 'short', 'long' or 'mid' (default: 'mid')\n sing Use 'min' or 'max' (default) memberhsip in case of assymetric similarity measure (simil='mid')\n isDraw Progress plot flag (default: False)\n oper Membership calculation operation: 'min' or 'prod' (default: 'min')\n isNorm Do normalization of input training samples or not?\n norm_range New ranging of input data after normalization, for example: [0, 1]\n V_pre Hyperbox lower bounds for the model to be updated using new data\n W_pre Hyperbox upper bounds for the model to be updated using new data\n classId_pre Hyperbox class labels (crisp) for the model to be updated using new data \n \n ATTRIBUTES:\n V Hyperbox lower bounds\n W Hyperbox upper bounds\n classId Hyperbox class labels (crisp)\n cardin Hyperbox cardinalities (the number of training samples is covered by corresponding hyperboxes)\n clusters Identifiers of input objects in each hyperbox (indexes of training samples covered by corresponding hyperboxes)\n\n\"\"\"\n\nimport sys, os\nsys.path.insert(0, os.path.pardir)\n\nimport ast\nimport numpy as np\nimport time\nimport matplotlib\ntry:\n matplotlib.use('TkAgg')\nexcept:\n pass\n\nfrom functionhelper.preprocessinghelper import loadDataset, string_to_boolean, splitDatasetRndClassBasedTo2Part, splitDatasetRndTo2Part\nfrom GFMM.basebatchlearninggfmm import BaseBatchLearningGFMM\nfrom GFMM.onlinegfmm import OnlineGFMM\nfrom GFMM.accelbatchgfmm import AccelBatchGFMM\nfrom GFMM.batchgfmm_v1 import BatchGFMMV1\nfrom GFMM.batchgfmm_v2 import BatchGFMMV2\n\nclass AggloOnlineGFMM(BaseBatchLearningGFMM):\n \n def __init__(self, gamma = 1, teta_onl = 1, teta_agglo = 1, bthres = 0.5, simil = 'mid', sing = 'max', isDraw = False, oper = 'min', isNorm = False, norm_range = [0, 1], V_pre = np.array([], dtype=np.float64), W_pre = np.array([], dtype=np.float64), classId_pre = np.array([], dtype=np.int16)):\n BaseBatchLearningGFMM.__init__(self, gamma, teta_onl, isDraw, oper, isNorm, norm_range)\n \n self.teta_onl = teta_onl\n self.teta_agglo = teta_agglo\n \n self.V = V_pre\n self.W = W_pre\n self.classId = classId_pre\n \n self.bthres = bthres\n self.simil = simil\n self.sing = sing\n \n \n def fit(self, Xl_onl, Xu_onl, patClassId_onl, Xl_off, Xu_off, patClassId_off, typeOfAgglo = 1):\n \"\"\"\n The input data need to be normalized before using this function\n \n Xl_onl Input data lower bounds (rows = objects, columns = features) for online learning\n Xu_onl Input data upper bounds (rows = objects, columns = features) for online learning\n patClassId_onl Input data class labels (crisp) for online learning\n \n Xl_off Input data lower bounds (rows = objects, columns = features) for agglomerative learning\n Xu_off Input data upper bounds (rows = objects, columns = features) for agglomerative learning\n patClassId_off Input data class labels (crisp) for agglomerative learning\n \n typeOfAgglo The used type of agglomerative learning algorithms\n \"\"\"\n \n time_start = time.clock()\n \n # Perform agglomerative learning\n if typeOfAgglo == 1:\n aggloClassifier = AccelBatchGFMM(self.gamma, self.teta_agglo, bthres = self.bthres, simil = self.simil, sing = self.sing, isDraw = self.isDraw, oper = self.oper, isNorm = False)\n elif typeOfAgglo == 2:\n aggloClassifier = BatchGFMMV2(self.gamma, self.teta_agglo, bthres = self.bthres, simil = self.simil, sing = self.sing, isDraw = self.isDraw, oper = self.oper, isNorm = False)\n else:\n aggloClassifier = BatchGFMMV1(self.gamma, self.teta_agglo, bthres = self.bthres, simil = self.simil, sing = self.sing, isDraw = self.isDraw, oper = self.oper, isNorm = False)\n \n aggloClassifier.fit(Xl_off, Xu_off, patClassId_off)\n \n self.V = aggloClassifier.V\n self.W = aggloClassifier.W\n self.classId = aggloClassifier.classId\n \n # Perform online learning\n onlClassifier = OnlineGFMM(self.gamma, self.teta_onl, self.teta_onl, isDraw = self.isDraw, oper = self.oper, isNorm = False, norm_range = [self.loLim, self.hiLim], V = self.V, W = self.W, classId = self.classId)\n # training for online GFMM\n onlClassifier.fit(Xl_onl, Xu_onl, patClassId_onl)\n \n self.V = onlClassifier.V\n self.W = onlClassifier.W\n self.classId = onlClassifier.classId\n \n time_end = time.clock()\n self.elapsed_training_time = time_end - time_start\n \n return self\n \n\nif __name__ == '__main__':\n \"\"\"\n INPUT parameters from command line\n \n arg1: + 1 - training and testing datasets are located in separated files\n + 2 - training and testing datasets are located in the same files\n arg2: path to file containing the training dataset (arg1 = 1) or both training and testing datasets (arg1 = 2)\n arg3: + path to file containing the testing dataset (arg1 = 1)\n + percentage of the training dataset in the input file\n arg4: + True: drawing hyperboxes during the training process\n + False: no drawing\n arg5: + Maximum size of hyperboxes of online learning algorithm (teta_onl, default: 1)\n arg6: + Maximum size of hyperboxes of agglomerative learning algorithm (teta_agglo, default: 1)\n arg7: + gamma value (default: 1)\n arg8: + Similarity threshod (default: 0.5)\n arg9: + Similarity measure: 'short', 'long' or 'mid' (default: 'mid')\n arg10: + operation used to compute membership value: 'min' or 'prod' (default: 'min')\n arg11: + do normalization of datasets or not? True: Normilize, False: No normalize (default: True)\n arg12: + range of input values after normalization (default: [0, 1]) \n arg13: + Use 'min' or 'max' (default) memberhsip in case of assymetric similarity measure (simil='mid')\n arg14: + Type of agglomerative learning\n - 1: Accelerated agglomerative learning AGGLO-2\n - 2: Full batch learning slower version\n - 3: Full batch learning faster version\n arg15: + Percentage of online training data (default: 0.5)\n \"\"\"\n \n # Init default parameters\n if len(sys.argv) < 5:\n isDraw = False\n else:\n isDraw = string_to_boolean(sys.argv[4])\n \n if len(sys.argv) < 6:\n teta_onl = 1 \n else:\n teta_onl = float(sys.argv[5])\n \n if len(sys.argv) < 7:\n teta_agglo = 1\n else:\n teta_agglo = float(sys.argv[6])\n \n if len(sys.argv) < 8:\n gamma = 1\n else:\n gamma = float(sys.argv[7])\n \n if len(sys.argv) < 9:\n bthres = 0.5\n else:\n bthres = float(sys.argv[8])\n \n if len(sys.argv) < 10:\n simil = 'mid'\n else:\n simil = sys.argv[9]\n \n if len(sys.argv) < 11:\n oper = 'min'\n else:\n oper = sys.argv[10]\n \n if len(sys.argv) < 12:\n isNorm = True\n else:\n isNorm = string_to_boolean(sys.argv[11])\n \n if len(sys.argv) < 13:\n norm_range = [0, 1]\n else:\n norm_range = ast.literal_eval(sys.argv[12])\n \n if len(sys.argv) < 14:\n sing = 'max'\n else:\n sing = sys.argv[13]\n \n if len(sys.argv) < 15:\n typeOfAgglo = 1\n else:\n typeOfAgglo = int(sys.argv[14])\n \n if len(sys.argv) < 16:\n percentOnl = 0.5\n else:\n percentOnl = float(sys.argv[15])\n \n if sys.argv[1] == '1':\n training_file = sys.argv[2]\n testing_file = sys.argv[3]\n\n # Read training file\n Xtr, X_tmp, patClassIdTr, pat_tmp = loadDataset(training_file, 1, False)\n # Read testing file\n X_tmp, Xtest, pat_tmp, patClassIdTest = loadDataset(testing_file, 0, False)\n \n else:\n dataset_file = sys.argv[2]\n percent_Training = float(sys.argv[3])\n Xtr, Xtest, patClassIdTr, patClassIdTest = loadDataset(dataset_file, percent_Training, False)\n \n \n classifier = AggloOnlineGFMM(gamma, teta_onl, teta_agglo, bthres, simil, sing, isDraw, oper, isNorm, norm_range)\n \n Xtr_onl, Xtr_off = splitDatasetRndTo2Part(Xtr, Xtr, patClassIdTr, percentOnl)\n \n classifier.fit(Xtr_onl.lower, Xtr_onl.upper, Xtr_onl.label, Xtr_off.lower, Xtr_off.upper, Xtr_off.label)\n \n \n # Testing\n print(\"-- Testing --\")\n result = classifier.predict(Xtest, Xtest, patClassIdTest)\n if result != None:\n print(\"Number of wrong predicted samples = \", result.summis)\n numTestSample = Xtest.shape[0]\n print(\"Error Rate = \", np.round(result.summis / numTestSample * 100, 2), \"%\")\n \n"
] | [
[
"numpy.round",
"matplotlib.use",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alcinos/dps | [
"5467db1216e9f9089376d2c71f524ced2382e4f6",
"5467db1216e9f9089376d2c71f524ced2382e4f6",
"5467db1216e9f9089376d2c71f524ced2382e4f6"
] | [
"dps/hyper/parallel_session.py",
"dps/utils/tf.py",
"scripts/iclr_2018/rl_size.py"
] | [
"from __future__ import print_function\nimport os\nimport datetime\nimport subprocess\nfrom future.utils import raise_with_traceback\nimport numpy as np\nimport time\nimport progressbar\nimport shutil\nfrom collections import defaultdict\nimport sys\nimport dill\nfrom zipfile import ZipFile\nfrom contextlib import ExitStack\nimport json\n\nfrom dps import cfg\nfrom dps.parallel import ReadOnlyJob\nfrom dps.utils import (\n cd, parse_timedelta, make_symlink, ExperimentStore,\n zip_root, process_path, path_stem, redirect_stream\n)\n\n\nDEFAULT_HOST_POOL = ['ecrawf6@cs-{}.cs.mcgill.ca'.format(i) for i in range(1, 33)]\n\n\nclass ParallelSession(object):\n \"\"\" Run a Job in parallel using gnu-parallel.\n\n A directory for this job execution is created in `scratch`, and results are saved there.\n\n Parameters\n ----------\n name: str\n Name for the experiment.\n input_zip: str\n Path to a zip archive storing the Job.\n pattern: str\n Pattern to use to select which ops to run within the Job.\n scratch: str\n Path to location where the results of running the selected ops will be\n written. Must be writeable by the master process.\n local_scratch_prefix: str\n Path to scratch directory that is local to each remote host.\n ppn: int\n Number of processors per node.\n wall_time: str\n String specifying the maximum wall-time allotted to running the selected ops.\n cleanup_time: str\n String specifying the amount of cleanup time to allow per step. Affects the time-limit\n that we pass to `gnu-parallel`, as well as the time limit passed to the python script.\n slack_time: float\n String specifying the amount of slack time to allow per step. Corresponds to\n time allotted to each process to respond to the signal that the step's time is up.\n Affects the time limit that we pass to the python script.\n add_date: bool\n Whether to add current date/time to the name of the directory where results are stored.\n dry_run: bool\n If True, control script will be generated but not executed/submitted.\n parallel_exe: str\n Path to the `gnu-parallel` executable to use.\n host_pool: list of str\n A list of names of hosts to use to execute the job.\n load_avg_threshold: float\n If a host exhibits a load average greater than this, it will not be used.\n max_hosts: int\n Maximum number of hosts to use.\n env_vars: dict (str -> str)\n Dictionary mapping environment variable names to values. These will be accessible\n by the submit script, and will also be sent to the worker nodes.\n output_to_files: bool\n If True, stderr and stdout of jobs is saved in files rather than being printed to screen.\n n_retries: int\n Number of retries per job.\n gpu_set: str\n Comma-separated list of indices of gpus to use.\n copy_venv: bool\n If True, copy the virtualenv from the launching environment and use it to run the simulation.\n python_startup: bool\n If True, source script located at \"$HOME/python_startup.sh\" before running step command.\n step_time_limit: str\n String specifying time limit for each step. If not supplied, a time limit is inferred\n automatically based on wall_time and number of steps (giving each step an equal amount\n of time).\n ignore_gpu: bool\n If True, GPUs will be requested by as part of the job, but will not be used at run time.\n ssh_options: string\n String of options to pass to ssh.\n loud_output: bool\n Whether to capture stdout for the main execution command.\n\n \"\"\"\n def __init__(\n self, name, input_zip, pattern, scratch, local_scratch_prefix='/tmp/dps/hyper/', ppn=12, cpp=1,\n pmem=None, wall_time=\"1hour\", cleanup_time=\"1min\", slack_time=\"1min\", add_date=True, dry_run=0,\n parallel_exe=None, kind=\"parallel\", host_pool=None, load_avg_threshold=8., min_hosts=None,\n max_hosts=1, env_vars=None, output_to_files=True, n_retries=0, gpu_set=\"\", copy_venv=\"\",\n python_startup=False, step_time_limit=None, ignore_gpu=False, ssh_options=None, loud_output=True,\n rsync_verbosity=0):\n\n args = locals().copy()\n del args['self']\n\n print(\"\\nParallelSession args:\")\n print(args)\n\n launch_venv = os.getenv('VIRTUAL_ENV')\n if launch_venv:\n launch_venv = os.path.split(launch_venv)[1]\n\n if not parallel_exe:\n parallel_exe = \"$HOME/.local/bin/parallel\"\n\n if ssh_options is None:\n ssh_options = (\n \"-oPasswordAuthentication=no \"\n \"-oStrictHostKeyChecking=no \"\n \"-oConnectTimeout=5 \"\n \"-oServerAliveInterval=2\"\n )\n\n if kind == \"pbs\":\n local_scratch_prefix = \"\\\\$RAMDISK\"\n\n assert kind in \"parallel pbs slurm slurm-local\".split()\n hpc = kind != \"parallel\"\n\n # Create directory to run the job from - should be on scratch.\n scratch = os.path.abspath(os.path.expandvars(scratch))\n\n es = ExperimentStore(scratch, prefix=\"run_search\")\n\n job_dir = es.new_experiment(name, 0, add_date=add_date, force_fresh=1)\n job_dir.record_environment()\n\n with open(job_dir.path_for('run_kwargs.json'), 'w') as f:\n json.dump(args, f, default=str, indent=4, sort_keys=True)\n del f\n del args\n\n job_path = job_dir.path\n job_dir.make_directory('experiments')\n\n input_zip_stem = path_stem(input_zip)\n input_zip = shutil.copy(input_zip, job_dir.path_for(\"orig.zip\"))\n input_zip_abs = process_path(input_zip)\n input_zip_base = os.path.basename(input_zip)\n archive_root = zip_root(input_zip)\n\n self.copy_files(\n job_dir, input_zip, archive_root,\n [\"README.md\", \"sampled_configs.txt\", \"config.json\", \"config.pkl\"])\n\n # storage local to each node, from the perspective of that node\n local_scratch = os.path.join(local_scratch_prefix, os.path.basename(job_path))\n\n output_to_files = \"--output-to-files\" if output_to_files else \"\"\n\n env = os.environ.copy()\n\n env_vars = env_vars or {}\n\n env.update({e: str(v) for e, v in env_vars.items()})\n env_vars = ' '.join('--env ' + k for k in env_vars)\n\n rsync_verbosity = \"\" if not rsync_verbosity else \"-\" + \"v\" * rsync_verbosity\n\n ro_job = ReadOnlyJob(input_zip)\n indices_to_run = sorted([op.idx for op in ro_job.ready_incomplete_ops(sort=False)])\n del ro_job\n n_jobs_to_run = len(indices_to_run)\n if n_jobs_to_run == 0:\n print(\"All jobs are finished! Exiting.\")\n return\n\n dirty_hosts = set()\n\n if hpc:\n host_pool = []\n n_nodes = max_hosts\n n_procs = n_nodes * ppn\n n_steps = int(np.ceil(n_jobs_to_run / n_procs))\n else:\n self.__dict__.update(locals())\n\n host_pool = host_pool or DEFAULT_HOST_POOL\n if isinstance(host_pool, str):\n host_pool = host_pool.split()\n\n # Get an estimate of the number of hosts we'll have available.\n with cd(job_path):\n hosts, n_procs = self.recruit_hosts(\n hpc, min_hosts, max_hosts, host_pool,\n ppn, max_procs=np.inf)\n n_nodes = len(hosts)\n\n if n_jobs_to_run < n_procs:\n n_steps = 1\n n_nodes = int(np.ceil(n_jobs_to_run / ppn))\n n_procs = n_nodes * ppn\n hosts = hosts[:n_nodes]\n else:\n n_steps = int(np.ceil(n_jobs_to_run / n_procs))\n\n node_file = \" --sshloginfile nodefile.txt \"\n\n wall_time_seconds, total_seconds_per_step, parallel_seconds_per_step, python_seconds_per_step = \\\n self.compute_time_limits(wall_time, cleanup_time, slack_time, step_time_limit, n_steps)\n\n self.__dict__.update(locals())\n\n self.print_time_limits()\n\n def get_load_avg(self, host):\n return_code, stdout, stderr = self.ssh_execute(\"uptime\", host, robust=True)\n print(stdout)\n if return_code:\n return 1000.0, 1000.0, 1000.0\n return [float(s) for s in stdout.split(':')[-1].split(',')]\n\n def print_time_limits(self):\n print(\"\\n\" + \"~\" * 40)\n print(\"We have {wall_time_seconds} seconds to complete {n_jobs_to_run} \"\n \"sub-jobs (grouped into {n_steps} steps) using {n_procs} processors.\".format(**self.__dict__))\n print(\"Each step, we are allowing {slack_time} as slack and \"\n \"{cleanup_time} for cleanup.\".format(**self.__dict__))\n print(\"Total time per step is {total_seconds_per_step} seconds.\".format(**self.__dict__))\n print(\"Time-limit passed to parallel is {parallel_seconds_per_step} seconds.\".format(**self.__dict__))\n print(\"Time-limit passed to dps-hyper is {python_seconds_per_step} seconds.\".format(**self.__dict__))\n\n @staticmethod\n def compute_time_limits(wall_time, cleanup_time_per_step, slack_time_per_step, step_time_limit, n_steps):\n if isinstance(wall_time, str):\n wall_time = int(parse_timedelta(wall_time).total_seconds())\n assert isinstance(wall_time, int)\n assert wall_time > 0\n\n if isinstance(cleanup_time_per_step, str):\n cleanup_time_per_step = int(parse_timedelta(cleanup_time_per_step).total_seconds())\n assert isinstance(cleanup_time_per_step, int)\n assert cleanup_time_per_step > 0\n\n if isinstance(slack_time_per_step, str):\n slack_time_per_step = int(parse_timedelta(slack_time_per_step).total_seconds())\n assert isinstance(slack_time_per_step, int)\n assert slack_time_per_step > 0\n\n if step_time_limit is None:\n total_seconds_per_step = int(np.floor(wall_time / n_steps))\n else:\n if isinstance(step_time_limit, str):\n step_time_limit = int(parse_timedelta(step_time_limit).total_seconds())\n assert isinstance(step_time_limit, int)\n assert step_time_limit > 0\n\n total_seconds_per_step = step_time_limit\n\n # Subtract cleanup time and wall time.\n parallel_seconds_per_step = int(total_seconds_per_step - cleanup_time_per_step)\n python_seconds_per_step = int(\n total_seconds_per_step - cleanup_time_per_step - slack_time_per_step)\n\n assert total_seconds_per_step > 0\n assert parallel_seconds_per_step > 0\n assert python_seconds_per_step > 0\n\n return wall_time, total_seconds_per_step, parallel_seconds_per_step, python_seconds_per_step\n\n @staticmethod\n def copy_files(job_dir, input_zip, archive_root, filenames):\n # Copy files from archive\n with ZipFile(input_zip, 'r') as _input_zip:\n for filename in filenames:\n name_in_zip = os.path.join(archive_root, filename)\n text = None\n try:\n text = _input_zip.read(name_in_zip).decode()\n except Exception:\n print(\"No {} found in zip file.\".format(filename))\n\n if text is not None:\n with open(job_dir.path_for(filename), 'w') as f:\n f.write(text)\n\n def recruit_hosts(self, hpc, min_hosts, max_hosts, host_pool, ppn, max_procs):\n if not hpc and getattr(self, 'candidate_hosts', None) is None:\n print(\"Ranking hosts by suitability...\")\n candidate_hosts = {}\n for host in host_pool:\n if host is not ':':\n print(\"\\n\" + \"~\" * 40)\n print(\"Testing connection to host {}...\".format(host))\n failed, _, _ = self.ssh_execute(\"echo Connected to \\$HOSTNAME\", host, robust=True)\n if failed:\n print(\"Could not connect.\")\n continue\n\n load_avg, _, _ = self.get_load_avg(host)\n print(\"1 minute load average: {}\".format(load_avg))\n\n if load_avg < self.load_avg_threshold:\n candidate_hosts[host] = load_avg\n else:\n print(\"`load_avg` above threshold of {}, discarding host.\".format(self.load_avg_threshold))\n\n self.candidate_hosts = candidate_hosts\n\n hosts = []\n\n if hpc:\n candidate_hosts = host_pool\n else:\n candidate_hosts = sorted(self.candidate_hosts, key=self.candidate_hosts.__getitem__)\n\n for host in candidate_hosts:\n n_hosts_recruited = len(hosts)\n if n_hosts_recruited >= max_hosts:\n break\n\n if n_hosts_recruited * ppn >= max_procs:\n break\n\n print(\"\\n\" + (\"~\" * 40))\n print(\"Recruiting host {}...\".format(host))\n\n if not hpc:\n load_avg, _, _ = self.get_load_avg(host)\n print(\"Previous 1 minute load average: {}\".format(self.candidate_hosts[host]))\n print(\"Recalculated 1 minute load average: {}\".format(load_avg))\n self.candidate_hosts[host] = load_avg\n\n print(\"Preparing host...\")\n try:\n command = \"stat {local_scratch}\"\n create_local_scratch, _, _ = self.ssh_execute(command, host, robust=True, output=\"quiet\")\n\n if create_local_scratch:\n print(\"Creating local scratch directory...\")\n command = \"mkdir -p {local_scratch}\"\n self.ssh_execute(command, host, robust=False)\n self.dirty_hosts.add(host)\n\n command = \"cd {local_scratch} && stat {archive_root}\"\n missing_archive, _, _ = self.ssh_execute(command, host, robust=True, output=\"quiet\")\n\n if missing_archive:\n command = \"cd {local_scratch} && stat {input_zip_base}\"\n missing_zip, _, _ = self.ssh_execute(command, host, robust=True, output=\"quiet\")\n\n if missing_zip:\n print(\"Copying zip to local scratch...\")\n if host == ':':\n command = \"cp {input_zip_abs} {local_scratch}\".format(**self.__dict__)\n else:\n command = (\n \"rsync -a {rsync_verbosity} --timeout=300 -e \\\"ssh {ssh_options}\\\" \"\n \"{input_zip_abs} {host}:{local_scratch}\".format(host=host, **self.__dict__)\n )\n self.execute_command(command, frmt=False, robust=False)\n\n print(\"Unzipping...\")\n command = \"cd {local_scratch} && unzip -ouq {input_zip_base}\"\n self.ssh_execute(command, host, robust=False)\n\n print(\"Host successfully prepared.\")\n hosts.append(host)\n\n except subprocess.CalledProcessError as e:\n print(\"Preparation of host failed.\")\n print(\"Command output:\\n{}\".format(e.output))\n\n if min_hosts is not None and len(hosts) < min_hosts:\n raise Exception(\n \"Found only {} usable hosts, but minimum \"\n \"required hosts is {}.\".format(len(hosts), min_hosts))\n\n n_procs = ppn * len(hosts)\n\n print(\"\\nProceeding with {} usable hosts, translates into {} procs total \"\n \"(max_procs: {}, max_hosts: {}).\".format(\n len(hosts), n_procs, max_procs, max_hosts))\n\n with open('nodefile.txt', 'w') as f:\n f.write('\\n'.join(hosts))\n\n return hosts, n_procs\n\n def execute_command(\n self, command, frmt=True, shell=True, max_seconds=None,\n progress=False, robust=False, output=None):\n \"\"\" Uses `subprocess` to execute `command`. Has a few added bells and whistles.\n\n if command returns non-zero exit status:\n if robust:\n returns as normal\n else:\n raise CalledProcessError\n\n Parameters\n ----------\n command: str\n The command to execute.\n\n\n Returns\n -------\n returncode, stdout, stderr\n\n \"\"\"\n p = None\n try:\n assert isinstance(command, str)\n if frmt:\n command = command.format(**self.__dict__)\n\n if output == \"loud\":\n print(\"\\nExecuting command: \" + (\">\" * 40) + \"\\n\")\n print(command)\n\n if not shell:\n command = command.split()\n\n stdout = None if output == \"loud\" else subprocess.PIPE\n stderr = None if output == \"loud\" else subprocess.PIPE\n\n start = time.time()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n p = subprocess.Popen(command, shell=shell, universal_newlines=True,\n stdout=stdout, stderr=stderr)\n\n progress_bar = None\n if progress:\n widgets = ['[', progressbar.Timer(), '] ',\n '(', progressbar.ETA(), ') ',\n progressbar.Bar()]\n _max_value = max_seconds or progressbar.UnknownLength\n progress_bar = progressbar.ProgressBar(\n widgets=widgets, max_value=_max_value, redirect_stdout=True)\n\n interval_length = 1\n while True:\n try:\n p.wait(interval_length)\n except subprocess.TimeoutExpired:\n if progress_bar is not None:\n progress_bar.update(min(int(time.time() - start), max_seconds))\n\n if p.returncode is not None:\n break\n\n if progress_bar is not None:\n progress_bar.finish()\n\n if output == \"loud\":\n print(\"\\nCommand took {} seconds.\\n\".format(time.time() - start))\n\n _stdout = \"\" if p.stdout is None else p.stdout.read()\n _stderr = \"\" if p.stderr is None else p.stderr.read()\n\n if p.returncode != 0:\n if isinstance(command, list):\n command = ' '.join(command)\n\n print(\"The following command returned with non-zero exit code \"\n \"{}:\\n {}\".format(p.returncode, command))\n\n if output is None or (output == \"quiet\" and not robust):\n print(\"\\n\" + \"-\" * 20 + \" stdout \" + \"-\" * 20 + \"\\n\")\n print(_stdout)\n\n print(\"\\n\" + \"-\" * 20 + \" stderr \" + \"-\" * 20 + \"\\n\")\n print(_stderr)\n\n if robust:\n return p.returncode, _stdout, _stderr\n else:\n raise subprocess.CalledProcessError(p.returncode, command, _stdout, _stderr)\n\n return p.returncode, _stdout, _stderr\n\n except BaseException as e:\n if p is not None:\n p.terminate()\n p.kill()\n if progress_bar is not None:\n progress_bar.finish()\n raise_with_traceback(e)\n\n def ssh_execute(self, command, host, **kwargs):\n if host == \":\":\n cmd = command\n else:\n cmd = \"ssh {ssh_options} -T {host} \\\"{command}\\\"\".format(\n ssh_options=self.ssh_options, host=host, command=command)\n return self.execute_command(cmd, **kwargs)\n\n def _step(self, i, indices_for_step):\n if not indices_for_step:\n print(\"No jobs left to run on step {}.\".format(i))\n return\n\n _ignore_gpu = \"--ignore-gpu\" if self.ignore_gpu else \"\"\n\n indices = ' '.join(str(i) for i in indices_for_step)\n\n if \"slurm\" in self.kind:\n parallel_command = (\n \"cd {local_scratch} && \"\n \"dps-hyper run {archive_root} {pattern} {indices} --max-time {python_seconds_per_step} \"\n \"--log-root {local_scratch} --env-name experiments --gpu-set={gpu_set} --ppn={ppn} \"\n \"{_ignore_gpu} {output_to_files}\"\n )\n\n bind = \"--accel-bind=g\" if self.gpu_set else \"\"\n mem = \"--mem-per-cpu={}mb\".format(self.pmem) if self.pmem else \"\"\n\n command = ('timeout --signal=INT {parallel_seconds_per_step} srun --cpus-per-task {cpp} --ntasks {n_tasks} {bind} '\n '{mem} --no-kill --quit-on-interrupt sh -c \"{parallel_command}\"'.format(\n parallel_seconds_per_step=self.parallel_seconds_per_step,\n cpp=self.cpp,\n n_tasks=len(indices_for_step),\n bind=bind,\n mem=mem,\n parallel_command=parallel_command))\n else:\n workon = \"workon {launch_venv} && \" if (self.copy_venv and self.launch_venv) else \"\"\n python_startup = \"source \\$HOME/python_startup.sh && \" if self.python_startup else \"\"\n parallel_command = (\n python_startup +\n workon +\n \"cd {local_scratch} && \"\n \"dps-hyper run {archive_root} {pattern} {{}} --max-time {python_seconds_per_step} \"\n \"--log-root {local_scratch} --env-name experiments \"\n \"--idx-in-node={{%}} --gpu-set={gpu_set} --ppn={ppn} {_ignore_gpu} {output_to_files}\"\n )\n\n command = (\n '{parallel_exe} --timeout {parallel_seconds_per_step} --no-notice -j{ppn} \\\\\\n'\n ' --joblog {job_path}/job_log.txt {node_file} \\\\\\n'\n ' {env_vars} -v \\\\\\n'\n # ' --env PATH --env LD_LIBRARY_PATH {env_vars} -v \\\\\\n'\n ' \"' + parallel_command + '\" \\\\\\n'\n ' ::: {indices}'\n )\n\n command = command.format(\n indices=indices, _ignore_gpu=_ignore_gpu, **self.__dict__)\n\n self.execute_command(\n command, frmt=False, robust=True,\n max_seconds=self.parallel_seconds_per_step, progress=not self.hpc,\n output='loud' if self.loud_output else None)\n\n def _checkpoint(self, i):\n print(\"Fetching results of step {} at: \".format(i))\n print(datetime.datetime.now())\n\n for i, host in enumerate(self.hosts):\n if host == ':':\n command = \"mv {local_scratch}/experiments/* ./experiments\"\n self.execute_command(command, robust=True)\n\n command = \"rm -rf {local_scratch}/experiments\"\n self.execute_command(command, robust=True)\n\n command = \"cp -ru {local_scratch}/{archive_root} .\"\n self.execute_command(command, robust=True)\n else:\n command = (\n \"rsync -az {rsync_verbosity} --timeout=300 -e \\\"ssh {ssh_options}\\\" \"\n \"{host}:{local_scratch}/experiments/ ./experiments\".format(\n host=host, **self.__dict__)\n )\n self.execute_command(command, frmt=False, robust=True, output=\"loud\")\n\n command = \"rm -rf {local_scratch}/experiments\"\n self.ssh_execute(command, host, robust=True, output=\"loud\")\n\n command = (\n \"rsync -az {rsync_verbosity} --timeout=300 -e \\\"ssh {ssh_options}\\\" \"\n \"{host}:{local_scratch}/{archive_root} .\".format(\n host=host, **self.__dict__)\n )\n self.execute_command(command, frmt=False, robust=True, output=\"loud\")\n\n self.execute_command(\"zip -rq results {archive_root}\", robust=True)\n\n try:\n from dps.hyper import HyperSearch\n search = HyperSearch('.')\n with redirect_stream('stdout', 'results.txt', tee=False):\n search.print_summary(print_config=False, verbose=False)\n print(search.job.summary(verbose=False))\n except Exception:\n job_path = 'results.zip' if os.path.exists('results.zip') else 'orig.zip'\n assert os.path.exists(job_path)\n job = ReadOnlyJob(job_path)\n print(job.summary(verbose=False))\n\n def get_slurm_var(self, var_name):\n parallel_command = \"printenv | grep {}\".format(var_name)\n command = 'srun --ntasks 1 --no-kill sh -c \"{parallel_command}\"'.format(parallel_command=parallel_command)\n returncode, stdout, stderr = self.execute_command(command, frmt=False, robust=False, progress=False)\n split = stdout.split('=')\n\n if len(split) != 2:\n raise Exception(\n \"Unparseable output while getting SLURM environment \"\n \"variable {}: {}\".format(var_name, stdout))\n\n _var_name, value = split\n _var_name = _var_name.strip()\n value = value.strip()\n\n if _var_name != var_name:\n raise Exception(\n \"Got wrong variable. Wanted {}, got {} with value {}\".format(var_name, _var_name, value))\n return value\n\n def run(self):\n with ExitStack() as stack:\n if not self.hpc:\n stack.enter_context(redirect_stream('stdout', self.job_dir.path_for('stdout'), tee=True))\n stack.enter_context(redirect_stream('stderr', self.job_dir.path_for('stderr'), tee=True))\n\n self._run()\n\n def _run(self):\n if self.dry_run:\n print(\"Dry run, so not running.\")\n return\n\n if \"slurm\" in self.kind:\n # Have to jump through a hoop to get the proper node-local storage on cedar/graham.\n self.local_scratch_prefix = self.get_slurm_var(\"SLURM_TMPDIR\")\n self.local_scratch = os.path.join(\n self.local_scratch_prefix,\n os.path.basename(self.job_path))\n\n # Compute new time limits based on the actual time remaining (protect against e.g. job starting late)\n\n print(\"Time limits before adjustment:\")\n self.print_time_limits()\n\n job_id = os.getenv(\"SLURM_JOBID\")\n command = 'squeue -h -j {} -o \"%L\"'.format(job_id)\n returncode, stdout, stderr = self.execute_command(command, frmt=False, robust=False)\n days = 0\n if \"-\" in stdout:\n days, time = stdout.split(\"-\")\n days = int(days)\n else:\n time = stdout\n\n time = time.split(\":\")\n\n hours = int(time[-3]) if len(time) > 2 else 0\n minutes = int(time[-2]) if len(time) > 1 else 0\n seconds = int(time[-1])\n\n wall_time_delta = datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)\n wall_time_seconds = int(wall_time_delta.total_seconds())\n\n print(\"Actual remaining walltime: {}\".format(wall_time_delta))\n print(\"Time limits after adjustment:\")\n\n (self.wall_time_seconds, self.total_seconds_per_step,\n self.parallel_seconds_per_step, self.python_seconds_per_step) = \\\n self.compute_time_limits(\n wall_time_seconds, self.cleanup_time, self.slack_time, self.step_time_limit, self.n_steps)\n\n self.print_time_limits()\n\n with cd(self.job_path):\n print(\"\\n\" + (\"=\" * 80))\n job_start = datetime.datetime.now()\n print(\"Starting job at {}\".format(job_start))\n\n job = ReadOnlyJob(self.input_zip)\n subjobs_remaining = sorted([op.idx for op in job.ready_incomplete_ops(sort=False)])\n\n n_failures = defaultdict(int)\n dead_jobs = set()\n\n i = 0\n while subjobs_remaining:\n step_start = datetime.datetime.now()\n\n print(\"\\nStarting step {} at: \".format(i) + \"=\" * 90)\n print(\"{} ({} since start of job)\".format(step_start, step_start - job_start))\n\n if not self.host_pool:\n if self.kind == \"pbs\":\n with open(os.path.expandvars(\"$PBS_NODEFILE\"), 'r') as f:\n self.host_pool = list(set([s.strip() for s in iter(f.readline, '')]))\n print(self.host_pool)\n elif \"slurm\" in self.kind:\n p = subprocess.run(\n 'scontrol show hostnames $SLURM_JOB_NODELIST', stdout=subprocess.PIPE, shell=True)\n self.host_pool = list(set([host.strip() for host in p.stdout.decode().split('\\n') if host]))\n else:\n raise Exception(\"NotImplemented\")\n\n self.hosts, self.n_procs = self.recruit_hosts(\n self.hpc, self.min_hosts, self.max_hosts, self.host_pool,\n self.ppn, max_procs=len(subjobs_remaining))\n\n indices_for_step = subjobs_remaining[:self.n_procs]\n self._step(i, indices_for_step)\n self._checkpoint(i)\n\n job = ReadOnlyJob(self.archive_root)\n\n subjobs_remaining = set([op.idx for op in job.ready_incomplete_ops(sort=False)])\n\n for j in indices_for_step:\n if j in subjobs_remaining:\n n_failures[j] += 1\n if n_failures[j] > self.n_retries:\n print(\"All {} attempts at completing job with index {} have failed, \"\n \"permanently removing it from set of eligible jobs.\".format(n_failures[j], j))\n dead_jobs.add(j)\n\n subjobs_remaining = [idx for idx in subjobs_remaining if idx not in dead_jobs]\n subjobs_remaining = sorted(subjobs_remaining)\n\n i += 1\n\n print(\"Step duration: {}.\".format(datetime.datetime.now() - step_start))\n\n self.execute_command(\"rm -rf {archive_root}\", robust=True)\n\n print(\"Cleaning up dirty hosts...\")\n command = \"rm -rf {local_scratch}\"\n for host in self.dirty_hosts:\n print(\"Cleaning host {}...\".format(host))\n self.ssh_execute(command, host, robust=True)\n\n\ndef submit_job(\n archive_path, name, wall_time=\"1year\", ppn=1, cpp=1, pmem=0,\n queue=\"\", kind=\"local\", gpu_set=\"\", project=\"rpp-bengioy\", **run_kwargs):\n\n assert kind in \"pbs slurm slurm-local parallel\".split()\n\n if \"slurm\" in kind and not pmem:\n raise Exception(\"Must supply a value for pmem (per-process-memory in mb) when using SLURM\")\n\n run_kwargs.update(\n wall_time=wall_time, ppn=ppn, cpp=cpp, kind=kind,\n gpu_set=gpu_set, pmem=pmem)\n\n run_kwargs['env_vars'] = dict(TF_CPP_MIN_LOG_LEVEL=3, CUDA_VISIBLE_DEVICES='-1')\n run_kwargs['dry_run'] = False\n\n session = ParallelSession(\n name, archive_path, 'map', cfg.parallel_experiments_run_dir, **run_kwargs)\n\n job_path = session.job_path\n\n # Not strictly required if kind == \"parallel\", but do it anyway for completeness.\n with open(os.path.join(job_path, \"session.pkl\"), 'wb') as f:\n dill.dump(session, f, protocol=dill.HIGHEST_PROTOCOL, recurse=True)\n\n if kind in \"parallel slurm-local\".split():\n session.run()\n return session\n\n python_script = \"\"\"#!{}\nimport datetime\nstart = datetime.datetime.now()\nprint(\"Starting job at \" + str(start))\nimport dill\nwith open(\"./session.pkl\", \"rb\") as f:\n session = dill.load(f)\nsession.run()\nend = datetime.datetime.now()\nprint(\"Finishing job at \" + str(end))\nprint(str((end - start).total_seconds()) + \" seconds elapsed between start and finish.\")\n\n\"\"\".format(sys.executable)\n with open(os.path.join(job_path, \"run.py\"), 'w') as f:\n f.write(python_script)\n\n if kind == \"pbs\":\n resources = \"nodes={}:ppn={},walltime={}\".format(session.n_nodes, session.ppn, session.wall_time_seconds)\n if pmem:\n resources = \"{},pmem={}mb\".format(resources, pmem)\n\n email = \"[email protected]\"\n if queue:\n queue = \"-q \" + queue\n command = (\n \"qsub -N {name} -d {job_path} -w {job_path} -m abe -M {email} \"\n \"-A {project} {queue} -V -l {resources} \"\n \"-j oe output.txt run.py\".format(\n name=name, job_path=job_path, email=email, project=project,\n queue=queue, resources=resources\n )\n )\n\n elif kind == \"slurm\":\n wall_time_minutes = int(np.ceil(session.wall_time_seconds / 60))\n resources = \"--nodes={} --ntasks-per-node={} --cpus-per-task={} --time={}\".format(\n session.n_nodes, session.ppn, cpp, wall_time_minutes)\n\n if pmem:\n resources = \"{} --mem-per-cpu={}mb\".format(resources, pmem)\n\n if gpu_set:\n n_gpus = len([int(i) for i in gpu_set.split(',')])\n resources = \"{} --gres=gpu:{}\".format(resources, n_gpus)\n\n email = \"[email protected]\"\n if queue:\n queue = \"-p \" + queue\n command = (\n \"sbatch --job-name {name} -D {job_path} --mail-type=ALL [email protected] \"\n \"-A {project} {queue} --export=ALL {resources} \"\n \"-o stdout -e stderr run.py\".format(\n name=name, job_path=job_path, email=email, project=project,\n queue=queue, resources=resources\n )\n )\n\n else:\n raise Exception()\n\n print(\"\\n\" + \"~\" * 40)\n print(command)\n\n with cd(job_path):\n subprocess.run(command.split())\n return session\n",
"import numpy as np\nfrom collections import deque, OrderedDict, defaultdict\nimport os\nimport hashlib\nimport pprint\nimport argparse\nfrom tabulate import tabulate\nimport shutil\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\ntry:\n from tensorflow.nn import dynamic_rnn, bidirectional_dynamic_rnn\nexcept Exception:\n pass\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.util import nest\n\nimport dps\nfrom dps import cfg\nfrom dps.utils.base import _bool, popleft, Parameterized, Param, Config\nfrom dps.utils.inspect_checkpoint import get_tensors_from_checkpoint_file # noqa: F401\n\n\ndef apply_object_wise(func, signal, output_size, is_training, restore_shape=True, n_trailing_dims=1):\n \"\"\" Treat `signal` as a batch of objects. Apply function `func` separately to each object.\n The final `n_trailing_dims`-many dimensions are treated as \"within-object\" dimensions.\n By default, objects are assumed to be vectors, but this can be changed by increasing\n `n_trailing_dims`. e.g. n_trailing_dims==2 means each object is a matrix, i.e. the\n last 2 dimensions of signal are dimensions of the object.\n\n \"\"\"\n shape = tf_shape(signal)\n leading_dim = tf.reduce_prod(shape[:-n_trailing_dims])\n signal = tf.reshape(signal, (leading_dim, *shape[-n_trailing_dims:]))\n output = func(signal, output_size, is_training)\n\n if restore_shape:\n if not isinstance(output_size, tuple):\n output_size = [output_size]\n output = tf.reshape(output, (*shape[:-n_trailing_dims], *output_size))\n\n return output\n\n\ndef tf_tensor_shape(shape):\n _tuple = []\n for i in shape:\n try:\n i = int(i)\n except (ValueError, TypeError):\n i = None\n _tuple.append(i)\n return tf.TensorShape(_tuple)\n\n\ndef tf_shape(tensor):\n \"\"\" Returns a tuple whose length is equal to the length of `tensor.shape`. Static shape is\n used where possible, and dynamic shape is used everywhere else.\n\n \"\"\"\n assert isinstance(tensor, tf.Tensor)\n static_shape = tensor.shape\n dynamic_shape = tf.unstack(tf.shape(tensor))\n\n shape = []\n\n for d, s in zip(dynamic_shape, static_shape):\n if s is None or s.value is None:\n shape.append(d)\n else:\n shape.append(int(s))\n\n return tuple(shape)\n\n\ndef apply_mask_and_group_at_front(data, mask):\n \"\"\" For masking data and converting it into a format suitable for input into an RNN.\n Finds all the elements of data that correspond to \"on\" elements of the mask,\n and collects them all into a sequence for each batch element. Elements are\n collected in row-major order.\n\n\n >> data = np.arange(24).reshape(2, 2, 2, 3)\n array([[[[ 0, 1, 2],\n [ 3, 4, 5]],\n [[ 6, 7, 8],\n [ 9, 10, 11]]],\n [[[12, 13, 14],\n [15, 16, 17]],\n [[18, 19, 20],\n [21, 22, 23]]]])\n\n >> mask = np.random.randint(2, size=(2, 2, 2))\n array([[[1, 1],\n [0, 1]],\n [[1, 0],\n [0, 0]]])\n\n >> result, _, _ = apply_mask_and_group_at_front(data, mask)\n >> tf.Session.run(result)\n array([[[ 0, 1, 2],\n [ 3, 4, 5],\n [ 9, 10, 11]],\n\n [[12, 13, 14],\n [ 0, 0, 0],\n [ 0, 0, 0]]])\n\n \"\"\"\n mask = tf.cast(mask, tf.bool)\n\n batch_size = tf.shape(data)[0]\n\n if len(mask.shape) == len(data.shape):\n assert mask.shape[-1] == 1\n mask = mask[..., 0]\n\n assert len(mask.shape) == len(data.shape)-1\n # assert data.shape[1:-1] == mask.shape[1:] Doesn't work if shapes partially unknown\n\n A = data.shape[-1]\n data = tf.reshape(data, (batch_size, -1, A))\n mask = tf.reshape(mask, (batch_size, -1))\n\n # data where the mask is \"on\". dimension should be (total_n_on, A)\n on_data = tf.boolean_mask(data, mask)\n\n # number of \"on\" elements in each batch element\n n_on = tf.reduce_sum(tf.layers.flatten(tf.to_int32(mask)), axis=1)\n\n # create an index array that can be used to index into on_data\n seq_mask = tf.sequence_mask(n_on)\n int_seq_mask = tf.to_int32(seq_mask)\n max_n_on = tf.shape(seq_mask)[1]\n indices = tf.cumsum(tf.reshape(int_seq_mask, (-1,)), exclusive=True, reverse=False)\n\n # Make sure dummy indices at the end are within bounds\n indices = tf.minimum(indices, tf.shape(on_data)[0]-1)\n\n result = tf.gather(on_data, indices)\n result = tf.reshape(result, (batch_size, max_n_on, A))\n\n # zero out the extra elements we've gathered\n result *= tf.cast(seq_mask, result.dtype)[:, :, None]\n return result, n_on, seq_mask\n\n\ndef tf_inspect_cl():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path\")\n parser.add_argument(\"--names-only\", action=\"store_true\")\n args, _ = parser.parse_known_args()\n\n path = os.path.realpath(args.path)\n variables = get_tensors_from_checkpoint_file(path)\n\n if args.names_only:\n pprint.pprint(list(variables.keys()))\n else:\n pprint.pprint(variables)\n\n\nRNNCell = tf.nn.rnn_cell.RNNCell\n\n\ndef count_trainable_variables(variables=None, var_scope=None):\n assert (variables is None) != (var_scope is None)\n\n if var_scope is not None:\n variables = trainable_variables(var_scope, for_opt=True)\n\n return np.sum([np.prod(v.get_shape().as_list()) for v in variables])\n\n\ndef walk_variable_scopes(max_depth=None):\n def _fmt(i):\n return \"{:,}\".format(i)\n\n all_fixed = set(tf.get_collection(FIXED_COLLECTION, scope=\"\"))\n\n fixed = defaultdict(int)\n trainable = defaultdict(int)\n\n for v in trainable_variables(\"\", for_opt=False):\n n_variables = int(np.prod(v.get_shape().as_list()))\n\n if v in all_fixed:\n fixed[\"\"] += n_variables\n trainable[\"\"] += 0\n else:\n fixed[\"\"] += 0\n trainable[\"\"] += n_variables\n\n name_so_far = \"\"\n\n for token in v.name.split(\"/\")[:-1]:\n name_so_far += token\n if v in all_fixed:\n fixed[name_so_far] += n_variables\n trainable[name_so_far] += 0\n else:\n fixed[name_so_far] += 0\n trainable[name_so_far] += n_variables\n name_so_far += \"/\"\n\n table = [\"scope n_trainable n_fixed total\".split()]\n for scope in sorted(fixed, reverse=True):\n depth = sum(c == \"/\" for c in scope) + 1\n\n if max_depth is not None and depth > max_depth:\n continue\n\n table.append([\n scope,\n _fmt(trainable[scope]),\n _fmt(fixed[scope]),\n _fmt(trainable[scope] + fixed[scope])])\n\n print(\"TensorFlow variable scopes (down to maximum depth of {}):\".format(max_depth))\n print(tabulate(table, headers=\"firstrow\", tablefmt=\"fancy_grid\"))\n\n\ndef tf_normal_kl(q_mean, q_std, p_mean, p_std):\n return tf.log(p_std / q_std) + (q_std**2 + (q_mean - p_mean)**2) / (2 * p_std**2) - 0.5\n\n\ndef tf_mean_sum(t):\n \"\"\" Average over batch dimension, sum over all other dimensions \"\"\"\n return tf.reduce_mean(tf.reduce_sum(tf.layers.flatten(t), axis=1))\n\n\ndef tf_atleast_nd(array, n):\n diff = n - len(array.shape)\n if diff > 0:\n s = (Ellipsis,) + (None,) * diff\n array = array[s]\n return array\n\n\ndef resize_image_with_crop_or_pad(img, target_height, target_width):\n if tf.__version__ >= \"1.1\":\n return tf.image.resize_image_with_crop_or_pad(img, target_height, target_width)\n else:\n batch_size = tf.shape(img)[0]\n img_height = int(img.shape[1])\n img_width = int(img.shape[2])\n depth = int(img.shape[3])\n\n upper_height = int(np.ceil((target_height - img_height) / 2))\n upper = tf.zeros((batch_size, upper_height, img_width, depth))\n\n lower_height = int(np.floor((target_height - img_height) / 2))\n lower = tf.zeros((batch_size, lower_height, img_width, depth))\n\n img = tf.concat([upper, img, lower], axis=1)\n\n left_width = int(np.ceil((target_width - img_width) / 2))\n left = tf.zeros((batch_size, target_height, left_width, depth))\n\n right_width = int(np.floor((target_width - img_width) / 2))\n right = tf.zeros((batch_size, target_height, right_width, depth))\n\n img = tf.concat([left, img, right], axis=2)\n\n return img\n\n\ndef extract_glimpse_numpy_like(inp, glimpse_shape, glimpse_offsets, name=None, uniform_noise=None, fill_value=None):\n \"\"\" Based on: https://github.com/tensorflow/tensorflow/issues/2134#issuecomment-262525617\n\n Works like numpy with pixel coordinates starting at (0, 0), returns:\n inp[:, glimpse_offset[0] : glimpse_offset[0] + glimpse_size[0],\n glimpse_offset[1] : glimpse_offset[1] + glimpse_size[1], :]\n\n \"\"\"\n assert(len(glimpse_shape) == 2)\n inp_shape = tuple(inp.get_shape().as_list()) # includes batch and number of channels\n corrected_offsets = 2 * glimpse_offsets - np.array(inp_shape[1:3]) + np.array(glimpse_shape)\n glimpses = tf.image.extract_glimpse(\n inp, glimpse_shape, corrected_offsets, centered=True, normalized=False,\n uniform_noise=uniform_noise, name=name)\n\n if fill_value is not None:\n glimpse_offsets = tf.cast(glimpse_offsets, tf.int32)\n y_indices = tf.range(glimpse_shape[0])\n y_indices = tf.reshape(y_indices, (1, -1))\n y_indices += glimpse_offsets[:, 0:1]\n valid_y = tf.cast(tf.logical_and(0 <= y_indices, y_indices < tf.shape(inp)[1]), tf.float32)\n valid_y = tf.expand_dims(valid_y, axis=-1)\n valid_y = tf.expand_dims(valid_y, axis=-1)\n\n glimpses = valid_y * glimpses + (1 - valid_y) * fill_value\n\n x_indices = tf.range(glimpse_shape[1])\n x_indices = tf.reshape(x_indices, (1, -1))\n x_indices += glimpse_offsets[:, 1:2]\n valid_x = tf.cast(tf.logical_and(0 <= x_indices, x_indices < tf.shape(inp)[2]), tf.float32)\n valid_x = tf.expand_dims(valid_x, axis=1)\n valid_x = tf.expand_dims(valid_x, axis=-1)\n\n glimpses = valid_x * glimpses + (1 - valid_x) * fill_value\n\n return glimpses\n\n\ndef uninitialized_variables_initializer():\n print(\"\\nStarting variable init.\")\n sess = tf.get_default_session()\n\n print(\"\\nFinding uninitialized vars...\")\n import time\n start = time.time()\n global_vars = tf.global_variables()\n is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])\n uninitialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]\n\n print(\"Took {} seconds\".format(time.time() - start))\n print(\"\\nInitializing {} var arrays...\".format(len(uninitialized_vars)))\n start = time.time()\n uninit_init_op = tf.variables_initializer(uninitialized_vars)\n print(\"Took {} seconds.\".format(time.time() - start))\n return uninit_init_op\n\n\nFIXED_COLLECTION = \"FIXED_COLLECTION\"\n\n\ndef trainable_variables(scope, for_opt):\n if isinstance(scope, tf.VariableScope):\n scope = scope.name\n\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)\n\n if for_opt:\n fixed = set(tf.get_collection(FIXED_COLLECTION, scope=scope))\n variables = [v for v in variables if v not in fixed]\n\n return variables\n\n\nclass ScopedFunction(Parameterized):\n \"\"\"\n Parameters\n ----------\n scope: string or VariableScope instance\n The scope where we will build the variables, or a string giving the name of a variable\n scope to be created within the variable scope where this function is first called.\n\n Attributes\n ----------\n scope: VariableScope instance or None\n If a VariableScope is passed to __init__, it is stored here. Otherwise, the first time\n that this instance of ScopedFunction is called, a new variable scope is created inside the\n scope in which the function is called. The name of the new variable scope is given by self.name.\n initialized: bool\n False up until the end of the first time that this instance of ScopedFunction is called.\n\n \"\"\"\n fixed_values = Param(None)\n fixed_weights = Param(\"\")\n no_gradient = Param(\"\")\n\n def __init__(self, scope=None, **kwargs):\n if scope is None:\n scope = self.__class__.__name__\n\n if isinstance(scope, tf.VariableScope):\n self.name = scope.name.split(\"/\")[-1]\n self.scope = scope\n else:\n self.name = scope\n self.scope = None\n\n self.initialized = False\n self.path = None\n self.directory = None\n self.train_config = None\n self.was_loaded = None\n self.do_pretraining = False\n self.fixed_variables = False\n\n self.fixed_values = self.fixed_values or {}\n\n if isinstance(self.fixed_weights, str):\n self.fixed_weights = self.fixed_weights.split()\n\n if isinstance(self.no_gradient, str):\n self.no_gradient = self.no_gradient.split()\n\n print(\n \"\\nBuilding {}(name={}) with args:\\n{}\".format(\n self.__class__.__name__, self.name, pprint.pformat(self._params_at_creation_time)))\n\n def trainable_variables(self, for_opt):\n return trainable_variables(self.scope, for_opt)\n\n def resolve_scope(self):\n if self.scope is None:\n with tf.variable_scope(self.name):\n self.scope = tf.get_variable_scope()\n\n def _call(self, *args, **kwargs):\n raise Exception(\"NotImplemented\")\n\n def __call__(self, *args, **kwargs):\n self.resolve_scope()\n\n first_call = not self.initialized\n\n with tf.variable_scope(self.scope, reuse=self.initialized):\n if first_call:\n print(\"\\nEntering var scope '{}' for first time.\".format(self.scope.name))\n\n outp = self._call(*args, **kwargs)\n\n if first_call:\n s = \"Leaving var scope '{}' for first time.\".format(self.scope.name)\n if isinstance(outp, tf.Tensor):\n s += \" Actual output shape: {}.\".format(outp.shape)\n print(s)\n\n self._maybe_initialize()\n\n return outp\n\n def _maybe_initialize(self):\n \"\"\" Initialize the network once it has been built. \"\"\"\n\n if not self.initialized:\n if self.do_pretraining:\n from dps.train import load_or_train\n\n filename = \"{}_{}.chk\".format(self.name, self.param_hash)\n self.path = os.path.join(self.directory, filename)\n\n self.was_loaded = load_or_train(self.train_config, self.scope, self.path, target_var_scope=self.name)\n\n param_path = os.path.join(self.directory, \"{}_{}.params\".format(self.name, self.param_hash))\n\n if not os.path.exists(param_path):\n with open(param_path, 'w') as f:\n f.write(str(self.param_dict))\n\n if self.fixed_variables:\n for v in self.trainable_variables(False):\n tf.add_to_collection(FIXED_COLLECTION, v)\n\n self.initialized = True\n\n def maybe_build_subnet(self, network_name, key=None, builder=None, builder_name=None):\n existing = getattr(self, network_name, None)\n\n if existing is None:\n if builder is None:\n if builder_name is None:\n builder_name = \"build_\" + network_name\n\n builder = getattr(self, builder_name, None)\n\n if builder is None:\n builder = getattr(cfg, builder_name, None)\n\n if builder is None:\n raise AttributeError(\n \"No builder with name `{}` found for building subnet `{}`\".format(builder_name, network_name))\n\n network = builder(scope=network_name)\n setattr(self, network_name, network)\n\n if key is None:\n key = network_name\n\n if key in self.fixed_weights:\n network.fix_variables()\n\n return network\n else:\n return existing\n\n def set_pretraining_params(self, train_config, name_params=None, directory=None):\n if self.initialized:\n raise Exception(\"ScopedFunction with scope {} has already been initialized, \"\n \"it is an error to call `set_pretraining_params` at this point\")\n\n assert train_config is not None\n self.train_config = train_config\n\n self.do_pretraining = True\n\n if isinstance(name_params, str):\n name_params = name_params.split()\n name_params = sorted(name_params or [])\n self.param_hash = get_param_hash(train_config, name_params)\n\n self.directory = directory or os.path.join(cfg.local_experiments_dir, cfg.env_name)\n\n self.param_dict = OrderedDict((key, train_config[key]) for key in name_params)\n\n def fix_variables(self):\n if self.initialized:\n raise Exception(\"ScopedFunction with scope {} has already been initialized, \"\n \"it is an error to call `fix_variables` at this point\")\n self.fixed_variables = True\n\n def save(self, session, filename):\n updater_variables = {v.name: v for v in self.trainable_variables(for_opt=False)}\n saver = tf.train.Saver(updater_variables)\n path = saver.save(tf.get_default_session(), filename)\n return path\n\n def restore(self, session, path):\n updater_variables = {v.name: v for v in self.trainable_variables(for_opt=False)}\n saver = tf.train.Saver(updater_variables)\n saver.restore(tf.get_default_session(), path)\n\n\ndef get_param_hash(train_config, name_params):\n param_str = []\n for name in name_params:\n value = train_config[name]\n try:\n value = sorted(value)\n except (TypeError, ValueError):\n pass\n param_str.append(\"{}={}\".format(name, value))\n param_str = \"_\".join(param_str)\n param_hash = hashlib.sha1(param_str.encode()).hexdigest()\n return param_hash\n\n\nclass ScopedFunctionWrapper(ScopedFunction):\n \"\"\" Similar to ScopedFunction, but used in cases where the function we want\n to scope does not inherit from ScopedFunction. \"\"\"\n\n def __init__(self, function, scope=None):\n self.function = function\n super(ScopedFunctionWrapper, self).__init__(scope)\n\n def _call(self, inp, output_size, is_training):\n return self.function(inp, output_size, is_training)\n\n\nclass IdentityFunction(ScopedFunction):\n def _call(self, inp, output_size, is_training):\n return inp\n\n\nclass MLP(ScopedFunction):\n n_units = Param(None)\n fc_kwargs = Param(None)\n\n def _call(self, inp, output_size, is_training):\n from tensorflow.contrib.slim import fully_connected\n inp = tf.layers.flatten(inp)\n\n n_units = self.n_units or []\n fc_kwargs = self.fc_kwargs or {}\n fc_kwargs = fc_kwargs.copy()\n\n hidden = inp\n for i, nu in enumerate(n_units):\n hidden = fully_connected(hidden, nu, **fc_kwargs)\n\n _fc_kwargs = fc_kwargs.copy()\n _fc_kwargs['activation_fn'] = None\n\n try:\n output_dim = int(np.product([int(i) for i in output_size]))\n output_shape = output_size\n except Exception:\n output_dim = int(output_size)\n output_shape = (output_dim,)\n\n hidden = fully_connected(hidden, output_dim, **_fc_kwargs)\n hidden = tf.reshape(hidden, (tf.shape(inp)[0], *output_shape), name=\"mlp_out\")\n return hidden\n\n\nclass LeNet(ScopedFunction):\n def __init__(\n self, n_units=1024, dropout_keep_prob=0.5,\n conv_kwargs=None, fc_kwargs=None, scope=None):\n\n self.n_units = n_units\n self.dropout_keep_prob = dropout_keep_prob\n self.conv_kwargs = conv_kwargs or {}\n self.fc_kwargs = fc_kwargs or {}\n super(LeNet, self).__init__(scope)\n\n def _call(self, images, output_size, is_training):\n if len(images.shape) <= 1:\n raise Exception()\n\n if len(images.shape) == 2:\n s = int(np.sqrt(int(images.shape[1])))\n images = tf.reshape(images, (-1, s, s, 1))\n\n if len(images.shape) == 3:\n images = images[..., None]\n\n slim = tf.contrib.slim\n net = images\n net = slim.conv2d(net, 32, 5, scope='conv1', **self.conv_kwargs)\n net = slim.max_pool2d(net, 2, 2, scope='pool1')\n net = slim.conv2d(net, 64, 5, scope='conv2', **self.conv_kwargs)\n net = slim.max_pool2d(net, 2, 2, scope='pool2')\n\n trailing_dim = np.product([int(s) for s in net.shape[1:]])\n net = tf.reshape(net, (tf.shape(net)[0], trailing_dim))\n\n net = slim.fully_connected(net, self.n_units, scope='fc3', **self.fc_kwargs)\n net = slim.dropout(net, self.dropout_keep_prob, is_training=is_training, scope='dropout3')\n\n fc_kwargs = self.fc_kwargs.copy()\n fc_kwargs['activation_fn'] = None\n\n try:\n _output_size = output_size[0]\n assert len(output_size) == 1\n except Exception:\n _output_size = output_size\n\n net = slim.fully_connected(net, _output_size, scope='fc4', **fc_kwargs)\n return net\n\n\nclass VGGNet(ScopedFunction):\n\n def __init__(self, kind, scope=None):\n assert kind in 'a 16 19'.split()\n self.kind = kind\n super(VGGNet, self).__init__(scope)\n\n def _call(self, images, output_size, is_training):\n if len(images.shape) <= 1:\n raise Exception()\n if len(images.shape) == 2:\n s = int(np.sqrt(int(images.shape[1])))\n images = tf.reshape(images, (-1, s, s, 1))\n if len(images.shape) == 3:\n images = images[..., None]\n from tensorflow.contrib.slim.python.slim.nets.vgg import vgg_a, vgg_16, vgg_19\n\n try:\n _output_size = output_size[0]\n assert len(output_size) == 1\n except IndexError:\n _output_size = output_size\n\n if self.kind == 'a':\n return vgg_a(images, _output_size, is_training)\n elif self.kind == '16':\n return vgg_16(images, _output_size, is_training)\n elif self.kind == '19':\n return vgg_19(images, _output_size, is_training)\n else:\n raise Exception()\n\n\nclass ConvNet(ScopedFunction):\n \"\"\"\n Parameters\n ----------\n layers: list of dict\n Each entry supplies parameters for a layer of the network. Valid argument names are:\n kind\n filters (required, int)\n kernel_size (required, int or pair of ints)\n strides (defaults to 1, int or pair of ints)\n pool (defaults to False, bool, whether to apply 2x2 pooling with stride 2,\n pooling is never done on final layer)\n\n Uses 'padding' == valid.\n\n \"\"\"\n nonlinearities = dict(\n relu=tf.nn.relu,\n sigmoid=tf.nn.sigmoid,\n tanh=tf.nn.tanh,\n elu=tf.nn.elu,\n linear=lambda x: x,\n softmax=tf.nn.softmax\n )\n\n def __init__(self, layers, scope=None, **kwargs):\n self.layers = layers\n self.volumes = []\n super(ConvNet, self).__init__(scope)\n\n @staticmethod\n def _output_shape_1d(inp_dim, f, s, padding, pool):\n if padding == \"SAME\" or padding == \"RIGHT_ONLY\":\n if inp_dim % s == 0:\n p = f - s\n else:\n p = f - (inp_dim % s)\n\n out_dim = int((inp_dim + p - f) / s) + 1\n else:\n out_dim = int((inp_dim - f) / s) + 1\n return out_dim\n\n @staticmethod\n def predict_output_shape(input_shape, layers):\n \"\"\" Get spatial shape of the output given a spatial shape of the input. \"\"\"\n shape = [int(i) for i in input_shape]\n for layer in layers:\n kernel_size = layer['kernel_size']\n if isinstance(kernel_size, tuple):\n f0, f1 = kernel_size\n else:\n f0, f1 = kernel_size, kernel_size\n\n strides = layer['strides']\n if isinstance(strides, tuple):\n strides0, strides1 = strides\n else:\n strides0, strides1 = strides, strides\n\n padding = layer.get('padding', 'VALID')\n pool = layer.get('pool', False)\n\n shape[0] = ConvNet._output_shape_1d(shape[0], f0, strides0, padding, pool)\n shape[1] = ConvNet._output_shape_1d(shape[1], f1, strides1, padding, pool)\n\n return shape\n\n @staticmethod\n def predict_padding(input_shape, layer):\n \"\"\" Predict padding that would be used by the \"SAME\" tensorflow padding settings. \"\"\"\n shape = [int(i) for i in input_shape]\n kernel_size = layer['kernel_size']\n if isinstance(kernel_size, tuple):\n f0, f1 = kernel_size\n else:\n f0, f1 = kernel_size, kernel_size\n\n strides = layer['strides']\n if isinstance(strides, tuple):\n strides0, strides1 = strides\n else:\n strides0, strides1 = strides, strides\n\n if shape[0] % strides0 == 0:\n pad0 = max(f0 - strides0, 0)\n else:\n pad0 = max(f0 - (shape[0] % strides0), 0)\n\n if shape[1] % strides1 == 0:\n pad1 = max(f1 - strides1, 0)\n else:\n pad1 = max(f1 - (shape[1] % strides1), 0)\n\n return pad0, pad1\n\n @staticmethod\n def _apply_layer(volume, layer_spec, idx, is_final, is_training):\n from tensorflow.contrib.slim import fully_connected\n kind = layer_spec.get('kind', 'conv')\n\n if kind == 'conv':\n filters = layer_spec['filters']\n if filters is None:\n filters = tf_shape(volume)[-1]\n strides = layer_spec['strides']\n transpose = layer_spec.get('transpose', False)\n kernel_size = layer_spec['kernel_size']\n padding = layer_spec.get('padding', 'VALID')\n dropout = layer_spec.get('dropout', False)\n pool = layer_spec.get('pool', False)\n nl_string = layer_spec.get('nl', 'relu')\n nl = ConvNet.nonlinearities[nl_string or 'relu']\n\n if transpose:\n volume = tf.layers.conv2d_transpose(\n volume, filters=filters, kernel_size=kernel_size,\n strides=strides, padding=padding, name=\"fcn-conv_transpose{}\".format(idx))\n else:\n if padding == \"RIGHT_ONLY\":\n pad0, pad1 = ConvNet.predict_padding(volume.shape[1:3], layer_spec)\n paddings = [[0, 0], [0, pad0], [0, pad1], [0, 0]]\n volume = tf.pad(volume, paddings, mode=\"CONSTANT\")\n padding = \"VALID\"\n\n volume = tf.layers.conv2d(\n volume, filters=filters, kernel_size=kernel_size,\n strides=strides, padding=padding, name=\"fcn-conv{}\".format(idx))\n\n if not is_final:\n volume = nl(volume, name=\"fcn-{}{}\".format(nl_string, idx))\n\n if pool:\n volume = tf.layers.max_pooling2d(\n volume, pool_size=2, strides=2, name='fcn-pool{}'.format(idx))\n\n if dropout:\n volume = tf.contrib.slim.dropout(volume, 0.5, is_training=is_training)\n\n elif kind == 'fc':\n n_units = layer_spec['n_units']\n output_shape = layer_spec.get('output_shape', None)\n nl_string = layer_spec.get('nl', 'relu')\n nl = ConvNet.nonlinearities[nl_string or 'relu']\n\n volume = tf.layers.flatten(volume)\n volume = fully_connected(volume, n_units, activation_fn=nl)\n\n if output_shape is not None:\n batch_size = tf.shape(volume)[0]\n volume = tf.reshape(volume, (batch_size, *output_shape))\n elif kind == 'global_pool': # a global spatial pooling layer\n pool_kind = layer_spec.get('pool_kind', 'mean')\n keepdims = layer_spec.get('keepdims', False)\n\n if pool_kind == \"max\":\n volume = tf.reduce_max(volume, axis=(1, 2), keepdims=keepdims)\n elif pool_kind == \"mean\":\n volume = tf.reduce_mean(volume, axis=(1, 2), keepdims=keepdims)\n elif pool_kind == \"sum\":\n volume = tf.reduce_sum(volume, axis=(1, 2), keepdims=keepdims)\n\n layer_string = ', '.join(\"{}={}\".format(k, v) for k, v in sorted(layer_spec.items()))\n output_shape = tuple(int(i) for i in volume.shape[1:])\n print(\"CNN >>> Applying layer {} of kind {}: {}. Output shape: {}\".format(idx, kind, layer_string, output_shape))\n\n return volume\n\n def _call(self, inp, final_n_channels, is_training):\n volume = inp\n self.volumes = [volume]\n\n for i, layer in enumerate(self.layers):\n final = i == len(self.layers) - 1\n\n if final and final_n_channels is not None:\n layer['filters'] = final_n_channels\n\n volume = self._apply_layer(volume, layer, i, final, is_training)\n self.volumes.append(volume)\n\n return volume\n\n\nclass GridConvNet(ConvNet):\n def __init__(self, layers, n_grid_dims=2, scope=None, **kwargs):\n self.layers = layers\n self.n_grid_dims = n_grid_dims\n self.volumes = []\n super(ConvNet, self).__init__(scope)\n\n @staticmethod\n def compute_receptive_field(ndim, layers):\n j = np.array((1,)*ndim)\n r = np.array((1,)*ndim)\n receptive_fields = []\n\n for layer in layers:\n kernel_size = np.array(layer['kernel_size'])\n stride = np.array(layer['strides'])\n r = r + (kernel_size-1) * j\n j = j * stride\n receptive_fields.append(dict(size=r, translation=j))\n return receptive_fields\n\n def _call(self, inp, final_n_channels, is_training):\n volume = inp\n self.volumes = [volume]\n\n receptive_fields = self.compute_receptive_field(len(inp.shape)-2, self.layers)\n print(\"Receptive fields for {} (GridConvNet)\".format(self.name))\n pprint.pprint(receptive_fields)\n\n grid_cell_size = receptive_fields[-1][\"translation\"][:self.n_grid_dims]\n rf_size = receptive_fields[-1][\"size\"][:self.n_grid_dims]\n pre_padding = np.floor(rf_size / 2 - grid_cell_size / 2).astype('i')\n image_shape = np.array([int(i) for i in inp.shape[1:self.n_grid_dims+1]])\n n_grid_cells = np.ceil(image_shape / grid_cell_size).astype('i')\n required_image_size = rf_size + (n_grid_cells-1) * grid_cell_size\n post_padding = required_image_size - image_shape - pre_padding\n\n print(\"{} (GridConvNet):\".format(self.name))\n print(\"rf_size: {}\".format(rf_size))\n print(\"grid_cell_size: {}\".format(grid_cell_size))\n print(\"n_grid_cells: {}\".format(n_grid_cells))\n print(\"pre_padding: {}\".format(pre_padding))\n print(\"post_padding: {}\".format(post_padding))\n print(\"required_image_size: {}\".format(required_image_size))\n\n padding = (\n [[0, 0]]\n + list(zip(pre_padding, post_padding))\n + [[0, 0]] * (len(inp.shape) - 1 - self.n_grid_dims)\n )\n\n volume = tf.pad(inp, padding, mode=\"CONSTANT\")\n\n for i, layer in enumerate(self.layers):\n padding_type = layer.get('padding', 'VALID')\n if padding_type != 'VALID':\n raise Exception(\"Layer {} trying to use padding type {} in GridConvNet.\".format(i, padding_type))\n\n final = i == len(self.layers) - 1\n\n if final and final_n_channels is not None:\n layer['filters'] = final_n_channels\n\n volume = self._apply_layer(volume, layer, i, final, is_training)\n self.volumes.append(volume)\n\n return volume, n_grid_cells, grid_cell_size\n\n\nclass GridTransposeConvNet(GridConvNet):\n \"\"\" Incomplete, particularly figuring out the correct amount of padding...\"\"\"\n\n def _call(self, inp, output_size, is_training):\n volume = inp\n self.volumes = [volume]\n\n reverse_layers = self.layers[::-1]\n\n *image_shape, final_n_channels = output_size\n\n receptive_fields = self.compute_receptive_field(len(inp.shape)-2, reverse_layers)\n print(\"Inverse receptive fields for {} (GridTransposeConvNet)\".format(self.name))\n pprint.pprint(receptive_fields)\n\n grid_cell_size = receptive_fields[-1][\"translation\"][:self.n_grid_dims]\n rf_size = receptive_fields[-1][\"size\"][:self.n_grid_dims]\n pre_padding = np.floor(rf_size / 2 - grid_cell_size / 2).astype('i')\n image_shape = np.array([int(i) for i in inp.shape[1:self.n_grid_dims+1]])\n n_grid_cells = np.ceil(image_shape / grid_cell_size).astype('i')\n required_image_size = rf_size + (n_grid_cells-1) * grid_cell_size\n post_padding = required_image_size - image_shape - pre_padding\n\n print(\"{} (GridTransposeConvNet):\".format(self.name))\n print(\"rf_size: {}\".format(rf_size))\n print(\"grid_cell_size: {}\".format(grid_cell_size))\n print(\"n_grid_cells: {}\".format(n_grid_cells))\n print(\"pre_padding: {}\".format(pre_padding))\n print(\"post_padding: {}\".format(post_padding))\n print(\"required_image_size: {}\".format(required_image_size))\n\n for i, layer in enumerate(self.layers):\n padding_type = layer.get('padding', 'VALID')\n if padding_type != 'VALID':\n raise Exception(\"Layer {} trying to use padding type {} in GridTransposeConvNet.\".format(i, padding_type))\n\n final = i == len(self.layers) - 1\n\n if final and final_n_channels is not None:\n layer['filters'] = final_n_channels\n\n volume = self._apply_layer(volume, layer, i, final, is_training)\n self.volumes.append(volume)\n\n slices = (\n [slice(None)]\n + [slice(pre, post) for pre, post in zip(pre_padding, post_padding)]\n + [slice(None)] * (len(inp.shape) - 1 - self.n_grid_dims)\n )\n\n volume = volume[slices]\n\n return volume, n_grid_cells, grid_cell_size\n\n\nclass RecurrentGridConvNet(GridConvNet):\n \"\"\" Operates on video rather than images. Apply a GridConvNet to each frame independently,\n and integrate information over time by using a recurrent network, where each spatial location\n has its own hidden state. The same recurrent network is used to update all spatial locations.\n\n \"\"\"\n build_cell = Param()\n bidirectional = Param()\n\n forward_cell = None\n backward_cell = None\n\n def _call(self, inp, final_n_channels, is_training):\n B, T, *rest = tf_shape(inp)\n inp = tf.reshape(inp, (B*T, *rest))\n\n processed, n_grid_cells, grid_cell_size = super()._call(inp, final_n_channels, is_training)\n\n _, H, W, C = tf_shape(processed)\n processed = tf.reshape(processed, (T, B, H, W, C))\n\n processed = tf.transpose(processed, (1, 0, 2, 3, 4))\n processed = tf.reshape(processed, (T, B*H*W, C))\n\n if self.forward_cell is None:\n self.forward_cell = self.build_cell(n_hidden=final_n_channels, scope=\"forward_cell\")\n\n if self.bidirectional:\n if self.backward_cell is None:\n self.backward_cell = self.build_cell(n_hidden=final_n_channels, scope=\"backward_cell\")\n\n (fw_output, bw_output), final_state = bidirectional_dynamic_rnn(\n self.forward_cell, self.backward_cell, processed,\n initial_state_fw=self.forward_cell.zero_state(B*H*W, tf.float32),\n initial_state_bw=self.backward_cell.zero_state(B*H*W, tf.float32),\n parallel_iterations=1, swap_memory=False, time_major=True)\n output = (fw_output + bw_output) / 2\n\n else:\n output, final_state = dynamic_rnn(\n self.forward_cell, processed, initial_state=self.forward_cell.zero_state(B*H*W, tf.float32),\n parallel_iterations=1, swap_memory=False, time_major=True)\n\n output = tf.reshape(output, (T, B, H, W, C))\n output = tf.transpose(output, (1, 0, 2, 3, 4))\n return output, n_grid_cells, grid_cell_size\n\n\ndef pool_objects(op, objects, mask):\n batch_size = tf.shape(objects)[0]\n n_objects = tf.reduce_prod(tf.shape(objects)[1:-1])\n obj_dim = int(objects.shape[-1])\n\n mask = tf.reshape(mask, (batch_size, n_objects, 1))\n\n if op == \"concat\" or op is None:\n objects *= tf.to_float(mask)\n pooled_objects = tf.reshape(objects, (batch_size, n_objects*obj_dim))\n elif op == \"sum\":\n objects *= tf.to_float(mask)\n pooled_objects = tf.reduce_sum(objects, axis=1, keepdims=False)\n elif op == \"max\":\n mask = tf.tile(tf.cast(mask, tf.bool), (1, 1, obj_dim))\n objects = tf.where(mask, objects, -np.inf * tf.ones_like(objects))\n pooled_objects = tf.reduce_max(objects, axis=1, keepdims=False)\n else:\n raise Exception(\"Unknown symmetric op: {}. \"\n \"Valid values are: None, concat, mean, max.\".format(op))\n return pooled_objects\n\n\nclass ObjectNetwork(ScopedFunction):\n n_repeats = Param()\n d = Param()\n symmetric_op = Param()\n layer_norm = Param()\n use_mask = Param(help=\"If True, extract mask from objects by taking first element.\")\n\n input_network = None\n object_network = None\n output_network = None\n\n def process_objects(self, batch_size, n_objects, objects, is_training):\n if self.object_network is None:\n self.object_network = dps.cfg.build_on_object_network(scope=\"object_network\")\n\n for i in range(self.n_repeats):\n prev_objects = objects\n objects = self.object_network(prev_objects, self.d, is_training)\n objects += prev_objects\n\n if self.layer_norm:\n objects = tf.contrib.layers.layer_norm(objects, self.d)\n\n return objects\n\n def _call(self, inp, output_size, is_training):\n if self.input_network is None:\n self.input_network = dps.cfg.build_on_input_network(scope=\"input_network\")\n if self.output_network is None:\n self.output_network = dps.cfg.build_on_output_network(scope=\"output_network\")\n\n if self.use_mask:\n final_dim = int(inp.shape[-1])\n mask, inp = tf.split(inp, (1, final_dim-1), axis=-1)\n inp, _, mask = apply_mask_and_group_at_front(inp, mask)\n else:\n mask = tf.ones_like(inp[..., 0])\n\n batch_size = tf.shape(inp)[0]\n n_objects = tf.reduce_prod(tf.shape(inp)[1:-1])\n obj_dim = int(inp.shape[-1])\n\n inp = tf.reshape(inp, (batch_size*n_objects, obj_dim))\n objects = self.input_network(inp, self.d, is_training)\n\n objects = self.process_objects(batch_size, n_objects, objects, is_training)\n\n objects = tf.reshape(objects, (batch_size, n_objects, self.d))\n mask = tf.reshape(mask, (batch_size, n_objects))\n pooled_objects = pool_objects(self.symmetric_op, objects, mask)\n return self.output_network(pooled_objects, output_size, is_training)\n\n\nclass AttentionalRelationNetwork(ObjectNetwork):\n \"\"\" Implements one of the \"attention blocks\" from \"Relational Deep Reinforcement Learning\". \"\"\"\n n_heads = Param()\n\n query_network = None\n key_network = None\n value_network = None\n\n def process_objects(self, batch_size, n_objects, objects, is_training):\n if self.query_network is None:\n self.query_network = dps.cfg.build_arn_network(scope=\"query_network\")\n if self.key_network is None:\n self.key_network = dps.cfg.build_arn_network(scope=\"key_network\")\n if self.value_network is None:\n self.value_network = dps.cfg.build_arn_network(scope=\"value_network\")\n if self.object_network is None:\n self.object_network = dps.cfg.build_arn_object_network(scope=\"object_network\")\n\n for i in range(self.n_repeats):\n a = []\n for h in range(self.n_heads):\n query = self.query_network(objects, self.d, is_training)\n query = tf.reshape(query, (batch_size, n_objects, self.d))\n if self.layer_norm:\n query = tf.contrib.layers.layer_norm(query, self.d)\n\n key = self.key_network(objects, self.d, is_training)\n key = tf.reshape(key, (batch_size, n_objects, self.d))\n if self.layer_norm:\n key = tf.contrib.layers.layer_norm(key, self.d)\n\n value = self.value_network(objects, self.d, is_training)\n value = tf.reshape(value, (batch_size, n_objects, self.d))\n if self.layer_norm:\n value = tf.contrib.layers.layer_norm(value, self.d)\n\n s = tf.matmul(query, key, transpose_b=True)\n w = tf.nn.softmax(s/np.sqrt(self.d), axis=2)\n _a = tf.matmul(w, value)\n\n a.append(_a)\n\n a = tf.concat(a, axis=2)\n a = tf.reshape(a, (batch_size * n_objects, self.n_heads * self.d))\n\n prev_objects = objects\n objects = self.object_network(a, self.d, is_training)\n objects += prev_objects\n\n if self.layer_norm:\n objects = tf.contrib.layers.layer_norm(objects, self.d)\n\n return objects\n\n\nclass RelationNetwork(ScopedFunction):\n \"\"\" TODO: make this inherit from ObjectNetwork. \"\"\"\n f = None\n g = None\n\n f_dim = Param()\n symmetric_op = Param()\n\n def _call(self, inp, output_size, is_training):\n # Assumes objects range of all but the first and last dimensions\n batch_size = tf.shape(inp)[0]\n spatial_shape = inp.shape[1:-1]\n n_objects = int(np.prod(spatial_shape))\n obj_dim = int(inp.shape[-1])\n inp = tf.reshape(inp, (batch_size, n_objects, obj_dim))\n\n if self.f is None:\n self.f = dps.cfg.build_relation_network_f(scope=\"relation_network_f\")\n\n if self.g is None:\n self.g = dps.cfg.build_relation_network_g(scope=\"relation_network_g\")\n\n f_inputs = []\n for i in range(n_objects):\n for j in range(n_objects):\n f_inputs.append(tf.concat([inp[:, i, :], inp[:, j, :]], axis=1))\n f_inputs = tf.concat(f_inputs, axis=0)\n\n f_output = self.f(f_inputs, self.f_dim, is_training)\n f_output = tf.split(f_output, n_objects**2, axis=0)\n\n if self.symmetric_op == \"concat\" or self.symmetric_op is None:\n g_input = tf.concat(f_output, axis=1)\n elif self.symmetric_op == \"mean\":\n g_input = tf.stack(f_output, axis=0)\n g_input = tf.reduce_mean(g_input, axis=0, keepdims=False)\n elif self.symmetric_op == \"max\":\n g_input = tf.stack(f_output, axis=0)\n g_input = tf.reduce_max(g_input, axis=0, keepdims=False)\n else:\n raise Exception(\"Unknown symmetric op for RelationNetwork: {}. \"\n \"Valid values are: None, concat, mean, max.\".format(self.symmetric_op))\n\n return self.g(g_input, output_size, is_training)\n\n\nclass VectorQuantization(ScopedFunction):\n H = Param()\n W = Param()\n K = Param()\n D = Param()\n common_embedding = Param(help=\"If True, all latent variables share a common set of embedding vectors.\")\n\n _embedding = None\n\n def __call__(self, inp, output_size, is_training):\n if self._embedding is None:\n initializer = tf.truncated_normal_initializer(stddev=0.1)\n shape = (self.K, self.D)\n if not self.common_embedding:\n shape = (self.H, self.W,) + shape\n self._embedding = tf.get_variable(\"embedding\", shape, initializer=initializer)\n\n self.z_e = inp\n\n if self.common_embedding:\n # self._embedding has shape (K, D), i.e. same dictionary used for all latents\n embedding = self._embedding[None, None, None, ...]\n elif len(self._embedding.shape) == 4:\n # self._embedding has shape (H, W, K, D), i.e. different dictionary for each latent\n embedding = self._embedding[None, ...]\n # shape of embedding should now be (1, H, W, K, D) either way\n\n z_e = self.z_e[..., None, :] # (batch, H, W, 1, D)\n sum_squared_error = tf.reduce_sum((z_e - embedding) ** 2, axis=-1)\n self.k = k = tf.argmin(sum_squared_error, axis=-1) # (batch, H, W)\n one_hot_k = tf.stop_gradient(tf.one_hot(k, self.K)[..., None])\n self.z_q = tf.reduce_sum(self._embedding[None, ...] * one_hot_k, axis=3) # (batch, H, W, D)\n\n # On the forward pass z_q gets sent through, but the gradient gets sent back to z_e\n return tf.stop_gradient(self.z_q - self.z_e) + self.z_e\n\n\nclass VQ_ConvNet(ConvNet):\n H = Param()\n W = Param()\n K = Param()\n D = Param()\n\n common_embedding = Param(help=\"If True, all latent variables share a common set of embedding vectors.\")\n\n _vq = None\n\n def _call(self, inp, output_size, is_training):\n if self._vq is None:\n self._vq = VectorQuantization(\n H=self.H, W=self.W, K=self.K, D=self.D,\n common_embedding=self.common_embedding)\n\n inp = self._vq(inp, (self.H, self.W, self.D), is_training)\n return super(VQ_ConvNet, self)._call(inp, output_size, is_training)\n\n\nclass SalienceMap(ScopedFunction):\n def __init__(\n self, n_locs, func, output_shape, std=None,\n flatten_output=False, scope=None):\n self.n_locs = n_locs\n self.func = func\n self.output_shape = output_shape\n self.std = std\n self.flatten_output = flatten_output\n super(SalienceMap, self).__init__(scope)\n\n def _call(self, inp, output_size, is_training):\n if self.std is None:\n func_output = self.func(inp, self.n_locs*5, is_training)\n else:\n func_output = self.func(inp, self.n_locs*3, is_training)\n\n y = (np.arange(self.output_shape[0]).astype('f') + 0.5) / self.output_shape[0]\n x = (np.arange(self.output_shape[1]).astype('f') + 0.5) / self.output_shape[1]\n yy, xx = tf.meshgrid(y, x, indexing='ij')\n yy = yy[None, ...]\n xx = xx[None, ...]\n output = None\n\n params = tf.nn.sigmoid(func_output/10.)\n\n per_loc_params = tf.split(params, self.n_locs, axis=1)\n for p in per_loc_params:\n if self.std is None:\n weight, mu_y, mu_x, std_y, std_x = tf.unstack(p, axis=1)\n std_y = std_y[:, None, None]\n std_x = std_x[:, None, None]\n else:\n weight, mu_y, mu_x = tf.unstack(p, axis=1)\n try:\n std_y = float(self.std)\n std_x = float(self.std)\n except (TypeError, ValueError):\n std_y, std_x = self.std\n\n weight = weight[:, None, None]\n mu_y = mu_y[:, None, None]\n mu_x = mu_x[:, None, None]\n\n new = weight * tf.exp(\n 0.5 * (\n 0.\n - ((yy - mu_y)/std_y)**2\n - ((xx - mu_x)/std_x)**2\n )\n )\n\n if output is None:\n output = new\n else:\n output = tf.maximum(new, output)\n\n if self.flatten_output:\n output = tf.reshape(\n output,\n (tf.shape(output)[0], int(np.prod(output.shape[1:])))\n )\n\n return output\n\n\nclass ScopedCell(ScopedFunction):\n @property\n def state_size(self):\n raise Exception(\"NotImplemented\")\n\n @property\n def output_size(self):\n raise Exception(\"NotImplemented\")\n\n def zero_state(self, batch_size, dtype):\n raise Exception(\"NotImplemented\")\n\n\nclass ScopedCellWrapper(ScopedCell):\n \"\"\" Similar to ScopedCell, but used in cases where the cell we want to scope does not inherit from ScopedCell. \"\"\"\n def __init__(self, cell, scope=None, **kwargs):\n self.cell = cell\n super(ScopedCellWrapper, self).__init__(scope=scope, **kwargs)\n\n def _call(self, inp, state):\n return self.cell(inp, state)\n\n @property\n def state_size(self):\n return self.cell.state_size\n\n @property\n def output_size(self):\n return self.cell.output_size\n\n def zero_state(self, batch_size, dtype):\n return self.cell.zero_state(batch_size, dtype)\n\n\nclass FixedController(ScopedCell):\n \"\"\" A controller that outputs a fixed sequence of actions.\n\n Parameters\n ----------\n action_sequence: ndarray (n_timesteps,) + action_shape\n t-th row gives the action this controller will select at time t.\n\n \"\"\"\n def __init__(self, action_sequence, name=\"fixed_controller\"):\n self.action_sequence = np.array(action_sequence)\n super(FixedController, self).__init__(name)\n\n def _call(self, inp, state):\n action_seq = tf.constant(self.action_sequence, tf.float32)\n int_state = tf.squeeze(tf.cast(state, tf.int32), axis=1)\n actions = tf.gather(action_seq, int_state)\n\n return actions, state + 1\n\n def __len__(self):\n return len(self.action_sequence)\n\n @property\n def state_size(self):\n return 1\n\n @property\n def output_size(self):\n return self.action_sequence.shape[1]\n\n def zero_state(self, batch_size, dtype):\n return tf.cast(tf.fill((batch_size, 1), 0), dtype)\n\n\nclass FixedDiscreteController(ScopedCell):\n \"\"\" A controller that outputs a fixed sequence of actions.\n\n Parameters\n ----------\n action_sequence: list of int\n t-th entry gives the idx of the action this controller will select at time t.\n n_actions: int\n Number of actions.\n\n \"\"\"\n def __init__(self, action_sequence, n_actions, name=\"fixed_discrete_controller\"):\n self.action_sequence = np.array(action_sequence)\n self.n_actions = n_actions\n super(FixedDiscreteController, self).__init__(name)\n\n def _call(self, inp, state):\n action_seq = tf.constant(self.action_sequence, tf.int32)\n int_state = tf.cast(state, tf.int32)\n action_idx = tf.gather(action_seq, int_state)\n actions = tf.one_hot(tf.reshape(action_idx, (-1,)), self.n_actions)\n return actions, state + 1\n\n def __len__(self):\n return len(self.action_sequence)\n\n @property\n def state_size(self):\n return 1\n\n @property\n def output_size(self):\n return self.n_actions\n\n def zero_state(self, batch_size, dtype):\n return tf.cast(tf.fill((batch_size, 1), 0), dtype)\n\n\nclass NullCell(ScopedCell):\n \"\"\" A cell with no meaningful output. \"\"\"\n def __init__(self, output_size=0, name=\"null_cell\"):\n self._output_size = output_size\n super(NullCell, self).__init__(name)\n\n def _call(self, inp, state):\n batch_size = tf.shape(inp)[0]\n return tf.zeros((batch_size, self.output_size)), tf.zeros((batch_size, 1))\n\n @property\n def state_size(self):\n return 1\n\n @property\n def output_size(self):\n return self._output_size\n\n def zero_state(self, batch_size, dtype):\n return tf.zeros((batch_size, self.output_size), dtype=dtype)\n\n\nclass CompositeCell(ScopedCell):\n \"\"\" A wrapper around a cell that adds additional transformations to the input and output.\n\n Parameters\n ----------\n cell: instance of RNNCell\n The cell to wrap.\n outp: callable (Tensor, int) -> Tensor\n Maps from an input tensor and an output size to an output tensor.\n output_size: int\n The size of the output, passed as the second argument when calling ``output``.\n inp: callable (Tensor, int) -> Tensor\n Maps from an input tensor and an output size to a new input tensor for cell.\n inp_size: int\n Size of the vector that `input` maps to\n Maps from an input tensor and an output size to a new input tensor for cell.\n\n \"\"\"\n def __init__(self, cell, outp, output_size, inp=None, name=\"composite_cell\"):\n self.cell = cell\n self.outp = outp\n self._output_size = output_size\n self.inp = inp\n\n super(CompositeCell, self).__init__(name)\n\n def _call(self, inp, state):\n if self.inp is not None:\n inp = self.inp(inp)\n output, new_state = self.cell(inp, state)\n return self.outp(output, self._output_size, False), new_state\n\n @property\n def state_size(self):\n return self.cell.state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n def zero_state(self, batch_size, dtype):\n return self.cell.zero_state(batch_size, dtype)\n\n\nclass FeedforwardCell(ScopedCell):\n \"\"\" A wrapper around a feedforward network that turns it into an RNNCell with a dummy state.\n\n Parameters\n ----------\n ff: callable (Tensor, int) -> Tensor\n A function that generates the tensorflow ops implementing the\n feedforward network we want to wrap. Maps from an input tensor\n and an output size to an output tensor.\n output_size: int\n The size of the output, passed as the second argument when calling ``output``.\n\n \"\"\"\n ignore_state = True\n\n def __init__(self, ff, output_size, name=\"feedforward_cell\"):\n self.ff = ff\n self._output_size = output_size\n\n super(FeedforwardCell, self).__init__(name)\n\n def _call(self, inp, state):\n output = self.ff(inp, self._output_size, False)\n return output, tf.zeros((tf.shape(inp)[0], 1))\n\n @property\n def state_size(self):\n return 1\n\n @property\n def output_size(self):\n return self._output_size\n\n def zero_state(self, batch_size, dtype):\n return tf.zeros((batch_size, 1))\n\n\ndef print_variables(collection, scope):\n g = tf.get_default_graph()\n variables = g.get_collection(collection, scope=scope)\n sess = tf.get_default_session()\n for v in variables:\n print(\"\\n\")\n print(v.name)\n print(sess.run(v))\n\n\ndef add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):\n \"\"\"Taken from tensorflow.\n\n Adds scaled noise from a 0-mean normal distribution to gradients.\n\n \"\"\"\n gradients, variables = zip(*grads_and_vars)\n noisy_gradients = []\n for gradient in gradients:\n if gradient is None:\n noisy_gradients.append(None)\n continue\n if isinstance(gradient, ops.IndexedSlices):\n gradient_shape = gradient.dense_shape\n else:\n gradient_shape = gradient.get_shape()\n noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale\n noisy_gradients.append(gradient + noise)\n return noisy_gradients\n\n\ndef lst_to_vec(lst):\n if isinstance(lst[0], np.ndarray):\n return np.concatenate([np.reshape(v, (-1,)) for v in lst], axis=0)\n elif isinstance(lst[0], tf.Tensor) or isinstance(lst[0], tf.Variable):\n return tf.concat([tf.reshape(v, (-1,)) for v in lst], axis=0)\n else:\n raise Exception()\n\n\ndef vec_to_lst(vec, reference):\n if isinstance(vec, np.ndarray):\n splits = np.split(vec, [r.size for r in reference])\n return [np.reshape(v, r.shape) for v, r in zip(splits, reference)]\n elif isinstance(vec, tf.Tensor) or isinstance(vec, tf.Variable):\n splits = tf.split(vec, [tf.size(r) for r in reference])\n return [tf.reshape(v, tf.shape(r)) for v, r in zip(splits, reference)]\n else:\n raise Exception()\n\n\ndef get_scheduled_values():\n sess = tf.get_default_session()\n return getattr(sess, \"scheduled_values\", {})\n\n\ndef build_scheduled_value(schedule, name=None, global_step=None, dtype=None):\n \"\"\"\n Parameters\n ----------\n schedule: str\n String which returns a schedule object when eval-ed. One exception is that\n constants can be specified by simply supplying the constant value,\n with no kind string.\n name: str\n Name to use for the output op. Also creates a record in\n `tf.get_default_session().scheduled_values` with this name\n dtype: object convertible to tf.DType\n Will cast output value to this dtype.\n\n \"\"\"\n op_name = name + \"_schedule\" if name else None\n\n schedule = eval_schedule(schedule)\n assert isinstance(schedule, Schedule), \"{} is not a schedule instance.\".format(schedule)\n\n global_step = tf.train.get_or_create_global_step() if global_step is None else global_step\n scheduled_value = schedule.build(global_step)\n\n if dtype is not None:\n dtype = tf.as_dtype(np.dtype(dtype))\n scheduled_value = tf.cast(scheduled_value, dtype, name=op_name)\n else:\n scheduled_value = tf.cast(scheduled_value, tf.float32, name=op_name)\n\n if name is not None:\n sess = tf.get_default_session()\n if not hasattr(sess, \"scheduled_values\"):\n sess.scheduled_values = {}\n sess.scheduled_values[name] = scheduled_value\n\n return scheduled_value\n\n\ndef eval_schedule(schedule):\n try:\n schedule = \"Constant({})\".format(float(schedule))\n except (TypeError, ValueError):\n pass\n\n if isinstance(schedule, str):\n schedule = eval(schedule)\n\n return schedule\n\n\nclass Schedule(object):\n pass\n\n\nclass RepeatSchedule(Schedule):\n def __init__(self, schedule, period):\n self.schedule = schedule\n self.period = period\n\n def build(self, t):\n return self.schedule.build(t % self.period)\n\n\nclass Exponential(Schedule):\n def __init__(self, start, end, decay_steps, decay_rate, staircase=False, log=False):\n self.start = start\n self.end = end\n self.decay_steps = decay_steps\n self.decay_rate = decay_rate\n self.staircase = staircase\n self.log = log\n\n assert isinstance(self.decay_steps, int)\n assert self.decay_steps > 1\n assert 0 <= self.decay_rate <= 1\n\n def build(self, t):\n if self.staircase:\n t = tf.to_float(t // self.decay_steps)\n else:\n t = t / self.decay_steps\n value = (self.start - self.end) * (self.decay_rate ** t) + self.end\n\n if self.log:\n value = tf.log(value + 1e-6)\n\n return value\n\n\nclass Exp(Exponential):\n pass\n\n\nclass Polynomial(Schedule):\n def __init__(self, start, end, decay_steps, power=1.0):\n self.start = start\n self.end = end\n self.decay_steps = decay_steps\n self.power = power\n\n assert isinstance(self.decay_steps, int)\n assert self.decay_steps > 1\n assert power > 0\n\n def build(self, t):\n t = tf.minimum(tf.cast(self.decay_steps, tf.int64), t)\n return (self.start - self.end) * ((1 - t / self.decay_steps) ** self.power) + self.end\n\n\nclass Poly(Polynomial):\n pass\n\n\nclass Reciprocal(Schedule):\n def __init__(self, start, end, decay_steps, gamma=1.0, staircase=False):\n self.start = start\n self.end = end\n self.decay_steps = decay_steps\n self.gamma = gamma\n self.staircase = staircase\n\n assert isinstance(self.decay_steps, int)\n assert self.decay_steps > 1\n assert self.gamma > 0\n\n def build(self, t):\n if self.staircase:\n t = tf.to_float(t // self.decay_steps)\n else:\n t = t / self.decay_steps\n return ((self.start - self.end) / (1 + t))**self.gamma + self.end\n\n\nclass Constant(Schedule):\n def __init__(self, value):\n self.value = value\n\n def build(self, t):\n return tf.constant(self.value)\n\n\n# class MixtureSchedule(Schedule):\n# def __init__(self, components, reset_n_steps, shared_clock=False, p=None, name=None):\n# self.components = components\n# self.n_components = len(components)\n# self.reset_n_steps = reset_n_steps\n# self.shared_clock = shared_clock\n# self.p = p\n#\n# def build(self, t):\n# t = t.copy()\n# n_periods = int(np.ceil(len(t) / self.reset_n_steps))\n# offsets = [0] * self.n_components\n# signal = []\n# for i in range(n_periods):\n# if len(signal) >= len(t):\n# break\n# selected = np.random.choice(range(self.n_components), p=self.p)\n# if self.shared_clock:\n# start = offsets[0]\n# else:\n# start = offsets[selected]\n#\n# t_ = t[start:start+self.reset_n_steps]\n# _signal = self.components[selected].build(t_)\n# signal.extend(_signal)\n#\n# if self.shared_clock:\n# offsets[0] += self.reset_n_steps\n# else:\n# offsets[selected] += self.reset_n_steps\n# signal = np.array(signal).reshape(-1)[:len(t)]\n# return signal\n#\n#\n# class ChainSchedule(Schedule):\n# def __init__(self, components, component_n_steps, shared_clock=False):\n# self.components = components\n# self.n_components = len(components)\n# self.component_n_steps = component_n_steps\n# self.shared_clock = shared_clock\n#\n# def build(self, t):\n# try:\n# int(self.component_n_steps)\n# n_steps = [self.component_n_steps] * self.n_components\n# except Exception:\n# n_steps = self.component_n_steps\n#\n# signal = []\n# offsets = [0] * self.n_components\n# for i in cycle(range(self.n_components)):\n# if len(signal) >= len(t):\n# break\n# if self.shared_clock:\n# start = offsets[0]\n# else:\n# start = offsets[i]\n#\n# t_ = t[start: start+n_steps[i]]\n# _signal = self.components[i].build(t_).astype('f')\n# signal.extend(list(_signal))\n#\n# if self.shared_clock:\n# offsets[0] += n_steps[i]\n# else:\n# offsets[i] += n_steps[i]\n#\n# return np.array(signal).reshape(-1)[:len(t)]\n\n\ndef build_optimizer(spec, learning_rate):\n \"\"\"\n\n Parameters\n ----------\n spec: str\n String of the form \"kind arg1 arg2 ...\".\n learning_rate: float\n First argument to the constructed optimizer.\n\n \"\"\"\n assert isinstance(spec, str)\n kind, *args = spec.split()\n kind = kind.lower()\n args = deque(args)\n\n if kind == \"adam\":\n beta1 = float(popleft(args, 0.9))\n beta2 = float(popleft(args, 0.999))\n epsilon = float(popleft(args, 1e-08))\n use_locking = _bool(popleft(args, False))\n opt = tf.train.AdamOptimizer(\n learning_rate, beta1=beta1, beta2=beta2,\n epsilon=epsilon, use_locking=use_locking)\n elif kind == \"rmsprop\":\n decay = float(popleft(args, 0.95))\n momentum = float(popleft(args, 0.95))\n epsilon = float(popleft(args, 1e-8))\n use_locking = _bool(popleft(args, False))\n centered = _bool(popleft(args, False))\n opt = tf.train.RMSPropOptimizer(\n learning_rate, decay=decay, momentum=momentum,\n epsilon=epsilon, use_locking=use_locking, centered=centered)\n else:\n raise Exception(\n \"No known optimizer with kind `{}` and args `{}`.\".format(kind, args))\n\n return opt\n\n\ndef masked_mean(array, mask, axis=None, keepdims=False):\n denom = tf.count_nonzero(mask, axis=axis, keepdims=keepdims)\n denom = tf.maximum(denom, 1)\n denom = tf.to_float(denom)\n return tf.reduce_sum(array * mask, axis=axis) / denom\n\n\ndef build_gradient_train_op(\n loss, tvars, optimizer_spec, lr_schedule, max_grad_norm=None,\n noise_schedule=None, global_step=None, record_prefix=None):\n \"\"\" By default, `global_step` is None, so the global step is not incremented. \"\"\"\n\n pure_gradients = tf.gradients(loss, tvars)\n\n clipped_gradients = pure_gradients\n if max_grad_norm is not None and max_grad_norm > 0.0:\n clipped_gradients, _ = tf.clip_by_global_norm(pure_gradients, max_grad_norm)\n\n noisy_gradients = clipped_gradients\n if noise_schedule is not None:\n grads_and_vars = zip(clipped_gradients, tvars)\n noise = build_scheduled_value(noise_schedule, 'gradient_noise')\n noisy_gradients = add_scaled_noise_to_gradients(grads_and_vars, noise)\n\n grads_and_vars = list(zip(noisy_gradients, tvars))\n\n lr = build_scheduled_value(lr_schedule, 'learning_rate')\n\n valid_lr = tf.Assert(\n tf.logical_and(tf.less(lr, 1.0), tf.less(0.0, lr)),\n [lr], name=\"valid_learning_rate\")\n\n optimizer = build_optimizer(optimizer_spec, lr)\n\n with tf.control_dependencies([valid_lr]):\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n\n pre = record_prefix + \"_\" if record_prefix else \"\"\n records = {\n pre + 'grad_norm_pure': tf.global_norm(pure_gradients),\n pre + 'grad_norm_processed': tf.global_norm(noisy_gradients),\n }\n\n return train_op, records\n\n\ndef tf_roll(a, n, axis=0, fill=None, reverse=False):\n \"\"\" n > 0 corresponds to taking the final n elements, and putting them at the start.\n n < 0 corresponds to taking the first -n elements, and putting them at the end.\n\n If fill is not None then it should be a value with the same type as `a`.\n The space that is \"vacated\" (starting at the beginning of\n the array if n > 0, starting at the end if n < 0) is filled with the given value\n instead of a part of the original array.\n\n fill can also be an array that this broadcastable to the required shape (we just\n element-wise multiply fill with an array of ones of the appropriate shape).\n\n\n \"\"\"\n if reverse:\n a = tf.reverse(a, axis=[axis])\n\n pre_slices = [slice(None) for i in a.shape]\n pre_slices[axis] = slice(None, -n)\n\n pre = a[pre_slices]\n\n post_slices = [slice(None) for i in a.shape]\n post_slices[axis] = slice(-n, None)\n\n post = a[post_slices]\n\n if fill is not None:\n if n > 0:\n post = fill * tf.ones_like(post, dtype=a.dtype)\n else:\n pre = fill * tf.ones_like(pre, dtype=a.dtype)\n\n r = tf.concat([post, pre], axis=axis)\n\n if reverse:\n r = tf.reverse(r, axis=[axis])\n\n return r\n\n\ndef tf_discount_matrix(base, T, n=None):\n x = tf.cast(tf.range(T), tf.float32)\n r = (x - x[:, None])\n if n is not None:\n r = tf.where(r >= n, np.inf * tf.ones_like(r), r)\n r = base ** r\n return tf.matrix_band_part(r, 0, -1)\n\n\nclass RenderHook(object):\n N = 16\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def imshow(self, ax, frame, **kwargs):\n \"\"\" If ax already has an image, uses set_array on that image instead of doing imshow.\n Allows this function to work well with animations. \"\"\"\n\n if frame.ndim == 3 and frame.shape[2] == 1:\n frame = frame[:, :, 0]\n frame = np.clip(frame, 0.0, 1.0)\n frame = np.where(np.isnan(frame), 0, frame)\n\n if ax.images:\n ax.images[0].set_array(frame)\n else:\n ax.imshow(frame, vmin=0.0, vmax=1.0, **kwargs)\n\n def get_feed_dict(self, updater):\n is_training = getattr(self, 'is_training', False)\n return updater.data_manager.do_val(is_training)\n\n def _fetch(self, updater, fetches=None):\n if fetches is None:\n fetches = self.fetches\n\n if isinstance(fetches, str):\n fetches = fetches.split()\n\n feed_dict = self.get_feed_dict(updater)\n\n try:\n tensors = updater.tensors\n except AttributeError:\n tensors = updater._tensors\n\n tensors_config = Config(tensors)\n to_fetch = {k: tensors_config[k] for k in fetches}\n to_fetch = nest.map_structure(lambda s: s[:self.N], to_fetch)\n\n sess = tf.get_default_session()\n fetched = sess.run(to_fetch, feed_dict=feed_dict)\n\n return fetched\n\n def path_for(self, name, updater, ext=\"pdf\"):\n local_step = (\n np.inf if dps.cfg.overwrite_plots else \"{:0>10}\".format(updater.n_updates))\n\n return updater.exp_dir.path_for(\n 'plots', name,\n 'stage={:0>4}_local_step={}.{}'.format(updater.stage_idx, local_step, ext))\n\n def savefig(self, name, fig, updater, is_dir=True):\n if is_dir:\n path = self.path_for(name, updater)\n fig.savefig(path)\n plt.close(fig)\n\n shutil.copyfile(\n path,\n os.path.join(\n os.path.dirname(path),\n 'latest_stage{:0>4}.pdf'.format(updater.stage_idx)))\n else:\n path = updater.exp_dir.path_for('plots', name + \".pdf\")\n fig.savefig(path)\n plt.close(fig)\n",
"import numpy as np\nimport os\nimport clify\nimport argparse\n\nfrom config import rl_config as config\n\nconfig.update(\n image_shape_grid=(3, 3),\n reductions=\"sum\",\n)\n\ngrid = [dict(n_train=1, do_train=False)] + [dict(n_train=x) for x in 2**np.arange(0, 18, 2)]\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--task\", choices=\"A B C D E F 0\".split(), default='')\n\nargs, _ = parser.parse_known_args()\n\nstage_1 = dict()\nstage_2 = dict(min_digits=4, max_digits=4)\nstage_3 = dict(min_digits=5, max_digits=5)\n\nif args.task == \"0\":\n grid = dict(n_train=2**np.arange(14, 18, 2))\n config.update(image_shape_grid=(2, 2))\n\nelif args.task == \"A\":\n zero_dir = \"/home/e2crawfo/rl_size_0/\"\n config.load_path = [\n os.path.join(zero_dir, d, 'weights/best_of_stage_0') for d in os.listdir(zero_dir)\n ]\n config.update(stage_1)\n\nelif args.task == \"B\":\n A_dir = \"/home/e2crawfo/rl_size_A/\"\n config.load_path = [\n os.path.join(A_dir, d, 'weights/best_of_stage_0') for d in os.listdir(A_dir)\n ]\n config.update(stage_2)\n\nelif args.task == \"C\":\n B_dir = \"/home/e2crawfo/rl_size_B/\"\n config.load_path = [\n os.path.join(B_dir, d, 'weights/best_of_stage_0') for d in os.listdir(B_dir)\n ]\n config.update(stage_3)\n\nelif args.task == \"D\":\n config.update(stage_1)\nelif args.task == \"E\":\n config.update(stage_2)\nelif args.task == \"F\":\n config.update(stage_3)\nelse:\n raise Exception()\n\n\nfrom dps.hyper import build_and_submit, default_host_pool\nclify.wrap_function(build_and_submit)(\n config=config, distributions=grid, n_param_settings=None, host_pool=default_host_pool)\n"
] | [
[
"numpy.ceil",
"numpy.floor"
],
[
"numpy.sqrt",
"tensorflow.count_nonzero",
"tensorflow.matrix_band_part",
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"tensorflow.variables_initializer",
"tensorflow.train.get_or_create_global_step",
"numpy.ceil",
"tensorflow.image.extract_glimpse",
"tensorflow.train.Saver",
"tensorflow.TensorShape",
"tensorflow.matmul",
"tensorflow.unstack",
"tensorflow.less",
"tensorflow.exp",
"tensorflow.contrib.slim.fully_connected",
"tensorflow.one_hot",
"numpy.floor",
"tensorflow.split",
"numpy.array",
"tensorflow.reduce_mean",
"tensorflow.ones_like",
"tensorflow.clip_by_global_norm",
"numpy.split",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.global_variables",
"tensorflow.pad",
"tensorflow.train.AdamOptimizer",
"tensorflow.get_default_graph",
"tensorflow.boolean_mask",
"numpy.reshape",
"tensorflow.gather",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.contrib.slim.python.slim.nets.vgg.vgg_16",
"tensorflow.add_to_collection",
"tensorflow.layers.flatten",
"tensorflow.reduce_max",
"tensorflow.expand_dims",
"tensorflow.log",
"tensorflow.get_variable_scope",
"tensorflow.get_variable",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.is_variable_initialized",
"numpy.clip",
"tensorflow.stop_gradient",
"matplotlib.pyplot.close",
"tensorflow.reverse",
"tensorflow.nn.sigmoid",
"tensorflow.shape",
"numpy.isnan",
"tensorflow.contrib.slim.python.slim.nets.vgg.vgg_a",
"tensorflow.reduce_prod",
"tensorflow.meshgrid",
"tensorflow.size",
"tensorflow.get_default_session",
"tensorflow.image.resize_image_with_crop_or_pad",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.contrib.slim.dropout",
"tensorflow.reshape",
"tensorflow.variable_scope",
"numpy.dtype",
"tensorflow.to_int32",
"tensorflow.argmin",
"tensorflow.get_collection",
"numpy.arange",
"tensorflow.gradients",
"tensorflow.truncated_normal_initializer",
"tensorflow.contrib.slim.python.slim.nets.vgg.vgg_19",
"tensorflow.to_float",
"tensorflow.python.util.nest.map_structure",
"tensorflow.fill",
"tensorflow.python.ops.random_ops.truncated_normal",
"tensorflow.sequence_mask",
"tensorflow.global_norm",
"tensorflow.maximum",
"tensorflow.contrib.layers.layer_norm",
"numpy.prod"
],
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jiduque/scikit-fda | [
"5ea71e78854801b259aa3a01eb6b154aa63bf54b",
"5ea71e78854801b259aa3a01eb6b154aa63bf54b",
"5ea71e78854801b259aa3a01eb6b154aa63bf54b"
] | [
"tests/test_classification.py",
"skfda/exploratory/depth/multivariate.py",
"skfda/preprocessing/dim_reduction/feature_extraction/_ddg_transformer.py"
] | [
"\"\"\"Tests of classification methods.\"\"\"\n\nimport unittest\n\nimport numpy as np\nfrom sklearn.base import clone\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier as _KNeighborsClassifier\n\nfrom skfda.datasets import fetch_growth\nfrom skfda.misc.metrics import l2_distance\nfrom skfda.ml.classification import (\n DDClassifier,\n DDGClassifier,\n DTMClassifier,\n KNeighborsClassifier,\n MaximumDepthClassifier,\n NearestCentroid,\n RadiusNeighborsClassifier,\n)\nfrom skfda.ml.classification._depth_classifiers import _ArgMaxClassifier\nfrom skfda.representation import FData\n\n\nclass TestClassifiers(unittest.TestCase):\n \"\"\"Tests for classifiers.\"\"\"\n\n def setUp(self) -> None:\n \"\"\"Establish train and test data sets.\"\"\"\n X, y = fetch_growth(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(\n X,\n y,\n test_size=0.25,\n stratify=y,\n random_state=0,\n )\n self._X_train = X_train\n self._X_test = X_test\n self._y_train = y_train\n self._y_test = y_test\n\n def test_dtm_independent_copy(self) -> None:\n \"\"\"Check that copies are un-linked.\"\"\"\n clf: DTMClassifier[FData] = DTMClassifier(proportiontocut=0.25)\n clf1 = clone(clf)\n clf2: DTMClassifier[FData] = DTMClassifier(proportiontocut=0.75)\n\n clf1.proportiontocut = 0.75\n clf1.fit(self._X_train, self._y_train)\n clf2.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf1.predict(self._X_test),\n clf2.predict(self._X_test),\n )\n\n def test_dtm_classifier(self) -> None:\n \"\"\"Check DTM classifier.\"\"\"\n clf: DTMClassifier[FData] = DTMClassifier(proportiontocut=0.25)\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n ],\n )\n\n def test_centroid_classifier(self) -> None:\n \"\"\"Check NearestCentroid classifier.\"\"\"\n clf: NearestCentroid[FData] = NearestCentroid()\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n ],\n )\n\n def test_dtm_inheritance(self) -> None:\n \"\"\"Check that DTM is a subclass of NearestCentroid.\"\"\"\n clf1: NearestCentroid[FData] = NearestCentroid()\n clf2: DTMClassifier[FData] = DTMClassifier(\n proportiontocut=0,\n metric=l2_distance,\n )\n clf1.fit(self._X_train, self._y_train)\n clf2.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf1.predict(self._X_test),\n clf2.predict(self._X_test),\n )\n\n def test_maximumdepth_classifier(self) -> None:\n \"\"\"Check MaximumDepth classifier.\"\"\"\n clf: MaximumDepthClassifier[FData] = MaximumDepthClassifier()\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n ],\n )\n\n def test_dd_classifier(self) -> None:\n \"\"\"Check DD classifier.\"\"\"\n clf: DDClassifier[FData] = DDClassifier(degree=2)\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n ],\n )\n\n def test_ddg_classifier(self) -> None:\n \"\"\"Check DDG classifier.\"\"\"\n clf: DDGClassifier[FData] = DDGClassifier(_KNeighborsClassifier())\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n ],\n )\n\n def test_maximumdepth_inheritance(self) -> None:\n \"\"\"Check that MaximumDepth is a subclass of DDG.\"\"\"\n clf1: DDGClassifier[FData] = DDGClassifier(_ArgMaxClassifier())\n clf2: MaximumDepthClassifier[FData] = MaximumDepthClassifier()\n clf1.fit(self._X_train, self._y_train)\n clf2.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf1.predict(self._X_test),\n clf2.predict(self._X_test),\n )\n\n def test_kneighbors_classifier(self) -> None:\n \"\"\"Check KNeighbors classifier.\"\"\"\n clf = KNeighborsClassifier()\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n ],\n )\n\n def test_radiusneighbors_classifier(self) -> None:\n \"\"\"Check RadiusNeighbors classifier.\"\"\"\n clf = RadiusNeighborsClassifier(radius=15)\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n ],\n )\n\n def test_radiusneighbors_small_raidus(self) -> None:\n \"\"\"Check that an error is raised if radius too small.\"\"\"\n clf = RadiusNeighborsClassifier(radius=1)\n clf.fit(self._X_train, self._y_train)\n\n with np.testing.assert_raises(ValueError):\n clf.predict(self._X_test)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"Depth and outlyingness ABCs and implementations for multivariate data.\"\"\"\n\nfrom __future__ import annotations\n\nimport abc\nimport math\nfrom typing import Generic, Optional, TypeVar\n\nimport numpy as np\nimport scipy.stats\nimport sklearn\nfrom scipy.special import comb\nfrom typing_extensions import Literal\n\nT = TypeVar(\"T\", contravariant=True)\nSelfType = TypeVar(\"SelfType\")\n_Side = Literal[\"left\", \"right\"]\n\n\nclass _DepthOrOutlyingness(\n abc.ABC,\n sklearn.base.BaseEstimator, # type: ignore\n Generic[T],\n):\n \"\"\"Abstract class representing a depth or outlyingness function.\"\"\"\n\n def fit(self: SelfType, X: T, y: None = None) -> SelfType:\n \"\"\"\n Learn the distribution from the observations.\n\n Args:\n X: Functional dataset from which the distribution of the data is\n inferred.\n y: Unused. Kept only for convention.\n\n Returns:\n Fitted estimator.\n\n \"\"\"\n return self\n\n @abc.abstractmethod\n def predict(self, X: T) -> np.ndarray:\n \"\"\"\n Compute the depth or outlyingness inside the learned distribution.\n\n Args:\n X: Points whose depth is going to be evaluated.\n\n Returns:\n Depth of each observation.\n\n \"\"\"\n pass\n\n def fit_predict(self, X: T, y: None = None) -> np.ndarray:\n \"\"\"\n Compute the depth or outlyingness of each observation.\n\n This computation is done with respect to the whole dataset.\n\n Args:\n X: Dataset.\n y: Unused. Kept only for convention.\n\n Returns:\n Depth of each observation.\n\n \"\"\"\n return self.fit(X).predict(X)\n\n def __call__(\n self,\n X: T,\n *,\n distribution: Optional[T] = None,\n ) -> np.ndarray:\n \"\"\"\n Allow the depth or outlyingness to be used as a function.\n\n Args:\n X: Points whose depth is going to be evaluated.\n distribution: Functional dataset from which the distribution of\n the data is inferred. If ``None`` it is the same as ``X``.\n\n Returns:\n Depth of each observation.\n\n \"\"\"\n copy = sklearn.base.clone(self)\n\n if distribution is None:\n return copy.fit_predict(X)\n\n return copy.fit(distribution).predict(X)\n\n @property # noqa: WPS125\n def max(self) -> float: # noqa: WPS125\n \"\"\"\n Maximum (or supremum if there is no maximum) of the possibly predicted\n values.\n\n \"\"\"\n return 1\n\n @property # noqa: WPS125\n def min(self) -> float: # noqa: WPS125\n \"\"\"\n Minimum (or infimum if there is no maximum) of the possibly predicted\n values.\n\n \"\"\"\n return 0\n\n\nclass Depth(_DepthOrOutlyingness[T]):\n \"\"\"Abstract class representing a depth function.\"\"\"\n\n\nclass Outlyingness(_DepthOrOutlyingness[T]):\n \"\"\"Abstract class representing an outlyingness function.\"\"\"\n\n\ndef _searchsorted_one_dim(\n array: np.ndarray,\n values: np.ndarray,\n *,\n side: _Side = 'left',\n) -> np.ndarray:\n return np.searchsorted(array, values, side=side)\n\n\n_searchsorted_vectorized = np.vectorize(\n _searchsorted_one_dim,\n signature='(n),(m),()->(m)',\n excluded='side',\n)\n\n\ndef _searchsorted_ordered(\n array: np.ndarray,\n values: np.ndarray,\n *,\n side: _Side = 'left',\n) -> np.ndarray:\n return _searchsorted_vectorized(array, values, side=side)\n\n\ndef _cumulative_distribution(column: np.ndarray) -> np.ndarray:\n \"\"\"Calculate the cumulative distribution function at each point.\n\n Args:\n column (numpy.darray): Array containing the values over which the\n distribution function is calculated.\n\n Returns:\n numpy.darray: Array containing the evaluation at each point of the\n distribution function.\n\n Examples:\n >>> _cumulative_distribution(np.array([1, 4, 5, 1, 2, 2, 4, 1, 1, 3]))\n array([ 0.4, 0.9, 1. , 0.4, 0.6, 0.6, 0.9, 0.4, 0.4, 0.7])\n\n \"\"\"\n return _searchsorted_ordered(\n np.sort(column),\n column,\n side='right',\n ) / len(column)\n\n\nclass _UnivariateFraimanMuniz(Depth[np.ndarray]):\n r\"\"\"\n Univariate depth used to compute the Fraiman an Muniz depth.\n\n Each column is considered as the samples of an aleatory variable.\n The univariate depth of each of the samples of each column is calculated\n as follows:\n\n .. math::\n D(x) = 1 - \\left\\lvert \\frac{1}{2}- F(x)\\right\\rvert\n\n Where :math:`F` stands for the marginal univariate distribution function of\n each column.\n\n \"\"\"\n\n def fit(self: SelfType, X: np.ndarray, y: None = None) -> SelfType:\n self._sorted_values = np.sort(X, axis=0)\n return self\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n cum_dist = _searchsorted_ordered(\n np.moveaxis(self._sorted_values, 0, -1),\n np.moveaxis(X, 0, -1),\n side='right',\n ) / len(self._sorted_values)\n\n assert cum_dist.shape[-2] == 1\n return 1 - np.abs(0.5 - np.moveaxis(cum_dist, -1, 0)[..., 0])\n\n @property # noqa: WPS125\n def min(self) -> float: # noqa: WPS125\n return 1 / 2\n\n\nclass SimplicialDepth(Depth[np.ndarray]):\n r\"\"\"\n Simplicial depth.\n\n The simplicial depth of a point :math:`x` in :math:`\\mathbb{R}^p` given a\n distribution :math:`F` is the probability that a random simplex with its\n :math:`p + 1` points sampled from :math:`F` contains :math:`x`.\n\n References:\n Liu, R. Y. (1990). On a Notion of Data Depth Based on Random\n Simplices. The Annals of Statistics, 18(1), 405–414.\n\n\n \"\"\"\n\n def fit( # noqa: D102\n self,\n X: np.ndarray,\n y: None = None,\n ) -> SimplicialDepth:\n self._dim = X.shape[-1]\n\n if self._dim == 1:\n self.sorted_values = np.sort(X, axis=0)\n else:\n raise NotImplementedError(\n \"SimplicialDepth is currently only \"\n \"implemented for one-dimensional data.\",\n )\n\n return self\n\n def predict(self, X: np.ndarray) -> np.ndarray: # noqa: D102\n\n assert self._dim == X.shape[-1]\n\n if self._dim == 1:\n positions_left = _searchsorted_ordered(\n np.moveaxis(self.sorted_values, 0, -1),\n np.moveaxis(X, 0, -1),\n )\n\n positions_left = np.moveaxis(positions_left, -1, 0)[..., 0]\n\n positions_right = _searchsorted_ordered(\n np.moveaxis(self.sorted_values, 0, -1),\n np.moveaxis(X, 0, -1),\n side='right',\n )\n\n positions_right = np.moveaxis(positions_right, -1, 0)[..., 0]\n\n num_strictly_below = positions_left\n num_strictly_above = len(self.sorted_values) - positions_right\n\n total_pairs = comb(len(self.sorted_values), 2)\n\n return (\n total_pairs - comb(num_strictly_below, 2)\n - comb(num_strictly_above, 2)\n ) / total_pairs\n\n\nclass OutlyingnessBasedDepth(Depth[T]):\n r\"\"\"\n Computes depth based on an outlyingness measure.\n\n An outlyingness function :math:`O(x)` can be converted to a depth\n function as\n\n .. math::\n D(x) = \\frac{1}{1 + O(x)}\n\n if :math:`O(x)` is unbounded or as\n\n .. math::\n D(x) = 1 - \\frac{O(x)}{\\sup O(x)}\n\n if :math:`O(x)` is bounded. If the infimum value of the\n outlyiness function is not zero, it is subtracted beforehand.\n\n Args:\n outlyingness (Outlyingness): Outlyingness object.\n\n References:\n Serfling, R. (2006). Depth functions in nonparametric\n multivariate inference. DIMACS Series in Discrete Mathematics and\n Theoretical Computer Science, 72, 1.\n\n \"\"\"\n\n def __init__(self, outlyingness: Outlyingness[T]):\n self.outlyingness = outlyingness\n\n def fit( # noqa: D102\n self,\n X: T,\n y: None = None,\n ) -> OutlyingnessBasedDepth[T]:\n self.outlyingness.fit(X)\n\n return self\n\n def predict(self, X: np.ndarray) -> np.ndarray: # noqa: D102\n outlyingness_values = self.outlyingness.predict(X)\n\n min_val = self.outlyingness.min\n max_val = self.outlyingness.max\n\n if math.isinf(max_val):\n return 1 / (1 + outlyingness_values - min_val)\n\n return 1 - (outlyingness_values - min_val) / (max_val - min_val)\n\n\nclass StahelDonohoOutlyingness(Outlyingness[np.ndarray]):\n r\"\"\"\n Computes Stahel-Donoho outlyingness.\n\n Stahel-Donoho outlyingness is defined as\n\n .. math::\n \\sup_{\\|u\\|=1} \\frac{|u^T x - \\text{Med}(u^T X))|}{\\text{MAD}(u^TX)}\n\n where :math:`\\text{X}` is a sample with distribution :math:`F`,\n :math:`\\text{Med}` is the median and :math:`\\text{MAD}` is the\n median absolute deviation.\n\n References:\n Zuo, Y., Cui, H., & He, X. (2004). On the Stahel-Donoho\n estimator and depth-weighted means of multivariate data. Annals of\n Statistics, 32(1), 167–188. https://doi.org/10.1214/aos/1079120132\n\n \"\"\"\n\n def fit( # noqa: D102\n self,\n X: np.ndarray,\n y: None = None,\n ) -> StahelDonohoOutlyingness:\n\n dim = X.shape[-1]\n\n if dim == 1:\n self._location = np.median(X, axis=0)\n self._scale = scipy.stats.median_abs_deviation(X, axis=0)\n else:\n raise NotImplementedError(\"Only implemented for one dimension\")\n\n return self\n\n def predict(self, X: np.ndarray) -> np.ndarray: # noqa: D102\n\n dim = X.shape[-1]\n\n if dim == 1:\n # Special case, can be computed exactly\n return (\n np.abs(X - self._location)\n / self._scale\n )[..., 0]\n\n raise NotImplementedError(\"Only implemented for one dimension\")\n\n @property # noqa: WPS125\n def max(self) -> float: # noqa: WPS125\n return math.inf\n\n\nclass ProjectionDepth(OutlyingnessBasedDepth[np.ndarray]):\n r\"\"\"\n Computes Projection depth.\n\n It is defined as the depth induced by the\n :class:`Stahel-Donoho outlyingness <StahelDonohoOutlyingness>`.\n\n See also:\n :class:`StahelDonohoOutlyingness`: Stahel-Donoho outlyingness.\n\n References:\n Zuo, Y., Cui, H., & He, X. (2004). On the Stahel-Donoho\n estimator and depth-weighted means of multivariate data. Annals of\n Statistics, 32(1), 167–188. https://doi.org/10.1214/aos/1079120132\n\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__(outlyingness=StahelDonohoOutlyingness())\n",
"\"\"\"Feature extraction transformers for dimensionality reduction.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Generic, Sequence, TypeVar, Union\n\nimport numpy as np\nfrom numpy import ndarray\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.utils.validation import check_is_fitted as sklearn_check_is_fitted\n\nfrom ...._utils import _classifier_fit_depth_methods\nfrom ....exploratory.depth import Depth, ModifiedBandDepth\nfrom ....representation.grid import FData\n\nT = TypeVar(\"T\", bound=FData)\n\n\nclass DDGTransformer(\n BaseEstimator, # type: ignore\n TransformerMixin, # type: ignore\n Generic[T],\n):\n r\"\"\"Generalized depth-versus-depth (DD) transformer for functional data.\n\n This transformer takes a list of k depths and performs the following map:\n\n .. math::\n \\mathcal{X} &\\rightarrow \\mathbb{R}^G \\\\\n x &\\rightarrow \\textbf{d} = (D_1^1(x), D_1^2(x),...,D_g^k(x))\n\n Where :math:`D_i^j(x)` is the depth of the point :math:`x` with respect to\n the data in the :math:`i`-th group using the :math:`j`-th depth of the\n provided list.\n\n Note that :math:`\\mathcal{X}` is possibly multivariate, that is,\n :math:`\\mathcal{X} = \\mathcal{X}_1 \\times ... \\times \\mathcal{X}_p`.\n\n Parameters:\n depth_method:\n The depth class or sequence of depths to use when calculating\n the depth of a test sample in a class. See the documentation of\n the depths module for a list of available depths. By default it\n is ModifiedBandDepth.\n\n Examples:\n Firstly, we will import and split the Berkeley Growth Study dataset\n\n >>> from skfda.datasets import fetch_growth\n >>> from sklearn.model_selection import train_test_split\n >>> dataset = fetch_growth()\n >>> fd = dataset['data']\n >>> y = dataset['target']\n >>> X_train, X_test, y_train, y_test = train_test_split(\n ... fd, y, test_size=0.25, stratify=y, random_state=0)\n\n >>> from skfda.preprocessing.dim_reduction.feature_extraction import \\\n ... DDGTransformer\n >>> from sklearn.pipeline import make_pipeline\n >>> from sklearn.neighbors import KNeighborsClassifier\n\n We classify by first transforming our data using the defined map\n and then using KNN\n\n >>> pipe = make_pipeline(DDGTransformer(), KNeighborsClassifier())\n >>> pipe.fit(X_train, y_train)\n Pipeline(steps=[('ddgtransformer',\n DDGTransformer(depth_method=[ModifiedBandDepth()])),\n ('kneighborsclassifier', KNeighborsClassifier())])\n\n We can predict the class of new samples\n\n >>> pipe.predict(X_test)\n array([1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1])\n\n Finally, we calculate the mean accuracy for the test data\n\n >>> pipe.score(X_test, y_test)\n 0.875\n\n References:\n Cuesta-Albertos, J. A., Febrero-Bande, M. and Oviedo de la Fuente, M.\n (2017). The DDG-classifier in the functional setting.\n TEST, 26. 119-142.\n \"\"\"\n\n def __init__(\n self,\n depth_method: Union[Depth[T], Sequence[Depth[T]], None] = None,\n ) -> None:\n self.depth_method = depth_method\n\n def fit(self, X: T, y: ndarray) -> DDGTransformer[T]:\n \"\"\"Fit the model using X as training data and y as target values.\n\n Args:\n X: FDataGrid with the training data.\n y: Target values of shape = (n_samples).\n\n Returns:\n self\n \"\"\"\n if self.depth_method is None:\n self.depth_method = ModifiedBandDepth()\n\n if isinstance(self.depth_method, Depth):\n self.depth_method = [self.depth_method]\n\n classes, class_depth_methods = _classifier_fit_depth_methods(\n X, y, self.depth_method,\n )\n\n self._classes = classes\n self.class_depth_methods_ = class_depth_methods\n\n return self\n\n def transform(self, X: T) -> ndarray:\n \"\"\"Transform the provided data using the defined map.\n\n Args:\n X: FDataGrid with the test samples.\n\n Returns:\n Array of shape (n_samples, G).\n \"\"\"\n sklearn_check_is_fitted(self)\n\n return np.transpose([\n depth_method.predict(X)\n for depth_method in self.class_depth_methods_\n ])\n"
] | [
[
"sklearn.base.clone",
"numpy.testing.assert_raises",
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.KNeighborsClassifier"
],
[
"numpy.abs",
"numpy.median",
"numpy.sort",
"scipy.special.comb",
"sklearn.base.clone",
"numpy.vectorize",
"numpy.searchsorted",
"numpy.moveaxis"
],
[
"sklearn.utils.validation.check_is_fitted"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
theglossy1/Minesweeper | [
"9c641310e82e470a4c4e74bf91239f70b9dc7caa"
] | [
"minesweeper.py"
] | [
"import math\nimport random\n\nimport numpy as np\n\nMINE_BIT = 0b01\nFLAG_BIT = 0b10\n\nEMPTY_SLOT = 0xFF\nFLAG_SLOT = 0xFE\n\nSURROUNDING = [\n (1, 0),\n (1, 1),\n (0, 1),\n (-1, 1),\n (-1, 0),\n (-1, -1),\n (0, -1),\n (1, -1),\n]\n\n\nclass Minesweeper:\n def __init__(self, *shape, seed=None):\n if len(shape) < 1:\n shape = (10, 10)\n bomb_count = 7\n else:\n shape, bomb_count = shape[:-1], shape[-1]\n if math.prod(shape) < bomb_count:\n raise ValueError('cannot be more bombs than spaces on the board')\n self.board_matrix = np.zeros(shape, 'uint16')\n self.render_matrix = np.full(shape, EMPTY_SLOT, 'uint8')\n randomizer = random.Random(seed)\n bombs = []\n while bomb_count:\n bomb = []\n for size in shape:\n bomb.append(randomizer.randrange(size))\n bomb = tuple(bomb)\n if bomb not in bombs:\n bombs.append(bomb)\n self.board_matrix[bomb] |= MINE_BIT\n bomb_count -= 1\n\n def add_flag(self, *pos):\n self.board_matrix[pos] |= FLAG_BIT\n self.render_matrix[pos] = FLAG_SLOT\n\n def remove_flag(self, *pos):\n self.board_matrix[pos] ^= FLAG_BIT\n self.render_matrix[pos] = EMPTY_SLOT\n\n def is_flagged(self, *pos):\n return FLAG_BIT & self.board_matrix[pos]\n\n def toggle_flag(self, *pos):\n if self.is_flagged(*pos):\n self.remove_flag(*pos)\n else:\n self.add_flag(*pos)\n\n def _reveal(self, pos):\n cell = self.board_matrix[pos]\n if cell & FLAG_BIT:\n return -2\n elif cell & MINE_BIT:\n return -1\n else:\n count = 0\n shape = self.board_matrix.shape\n for direction in SURROUNDING:\n # newpos = (pos[0] + direction[0], pos[1] + direction[1])\n newpos = tuple(map(sum, ((pos[x], direction[x]) for x in range(len(direction)))))\n if all(map((lambda x: x[1] >= 0 and x[1] < shape[x[0]]), enumerate(newpos))):\n count += self.board_matrix[newpos] & MINE_BIT\n return count\n\n def reveal(self, *pos):\n count = self._reveal(pos)\n if count >= 0:\n self.render_matrix[pos] = count\n return count\n\n def recursive_reveal(self, *pos, reached=None):\n if reached is None:\n reached = set()\n if pos in reached:\n return None\n count = self.reveal(*pos)\n reached.add(pos)\n if count == 0:\n shape = self.board_matrix.shape\n for direction in SURROUNDING:\n # newpos = (pos[0] + direction[0], pos[1] + direction[1])\n newpos = tuple(map(sum, ((pos[x], direction[x]) for x in range(len(direction)))))\n if all(map((lambda x: x[1] >= 0 and x[1] < shape[x[0]]), enumerate(newpos))):\n if newpos not in reached:\n self.recursive_reveal(*newpos, reached=reached)\n return count\n\n def has_won(self):\n return all((bool(cell & FLAG_BIT) == bool(cell & MINE_BIT)) for cell in np.nditer(self.board_matrix))\n\n def reveal_all(self):\n for (pos, cell) in np.ndenumerate(self.board_matrix):\n if not cell & FLAG_BIT and not cell & MINE_BIT:\n self.reveal(*pos)\n"
] | [
[
"numpy.ndenumerate",
"numpy.zeros",
"numpy.nditer",
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Horacehxw/Multi-label | [
"76095c72327e9aa379eaa653dbbb775ca638e6db"
] | [
"src/LDPC/pyldpc/ldpcmatrices.py"
] | [
"import numpy as np\nfrom scipy.sparse import csr_matrix\nfrom .ldpcalgebra import*\n\n__all__ = ['BinaryProduct', 'InCode', 'BinaryRank','RegularH','CodingMatrix','CodingMatrix_systematic','HtG']\n\n\ndef RegularH(n,d_v,d_c):\n\n \"\"\" ------------------------------------------------------------------------------\n\n Builds a regular Parity-Check Matrix H (n,d_v,d_c) following Callager's algorithm : \n\n ----------------------------------------------------------------------------------\n\n Paramaeters:\n\n n: Number of columns (Same as number of coding bits)\n d_v: number of ones per column (number of parity-check equations including a certain variable) \n d_c: number of ones per row (number of variables participating in a certain parity-check equation); \n\n ----------------------------------------------------------------------------------\n\n Errors: \n\n The number of ones in the matrix is the same no matter how we calculate it (rows or columns), therefore, if m is \n the number of rows in the matrix: \n\n m*d_c = n*d_v with m < n (because H is a decoding matrix) => Parameters must verify:\n\n\n 0 - all integer parameters\n 1 - d_v < d_v\n 2 - d_c divides n \n\n ---------------------------------------------------------------------------------------\n\n Returns: 2D-array (shape = (m,n))\n\n \"\"\"\n\n\n if n%d_c:\n raise ValueError('d_c must divide n. Help(RegularH) for more info.')\n\n if d_c <= d_v: \n raise ValueError('d_c must be greater than d_v. Help(RegularH) for more info.')\n\n m = (n*d_v)// d_c\n\n Set=np.zeros((m//d_v,n),dtype=int) \n a=m//d_v\n\n # Filling the first set with consecutive ones in each row of the set \n\n for i in range(a): \n for j in range(i*d_c,(i+1)*d_c): \n Set[i,j]=1\n\n #Create list of Sets and append the first reference set\n Sets=[]\n Sets.append(Set.tolist())\n\n #Create remaining sets by permutations of the first set's columns: \n i=1\n for i in range(1,d_v):\n newSet = np.transpose(np.random.permutation(np.transpose(Set))).tolist()\n Sets.append(newSet)\n\n #Returns concatenated list of sest:\n H = np.concatenate(Sets)\n return H\n\n\n\ndef CodingMatrix(MATRIX,use_sparse=1):\n\n \"\"\" \n CAUTION: RETURNS tG TRANSPOSED CODING MATRIX. \n \n Function Applies GaussJordan Algorithm on Columns and rows of MATRIX in order\n to permute Basis Change matrix using Matrix Equivalence.\n\n Let A be the treated Matrix. refAref the double row reduced echelon Matrix.\n\n refAref has the form:\n\n (e.g) : |1 0 0 0 0 0 ... 0 0 0 0| \n |0 1 0 0 0 0 ... 0 0 0 0| \n |0 0 0 0 0 0 ... 0 0 0 0| \n |0 0 0 1 0 0 ... 0 0 0 0| \n |0 0 0 0 0 0 ... 0 0 0 0| \n |0 0 0 0 0 0 ... 0 0 0 0| \n\n\n First, let P1 Q1 invertible matrices: P1.A.Q1 = refAref\n\n We would like to calculate:\n P,Q are the square invertible matrices of the appropriate size so that:\n\n P.A.Q = J. Where J is the matrix of the form (having MATRIX's shape):\n\n | I_p O | where p is MATRIX's rank and I_p Identity matrix of size p.\n | 0 0 |\n\n Therfore, we perform permuations of rows and columns in refAref (same changes\n are applied to Q1 in order to get final Q matrix)\n\n\n NOTE: P IS NOT RETURNED BECAUSE WE DO NOT NEED IT TO SOLVE H.G' = 0 \n P IS INVERTIBLE, WE GET SIMPLY RID OF IT. \n \n Then\n \n solves: inv(P).J.inv(Q).G' = 0 (1) where inv(P) = P^(-1) and \n P.H.Q = J. Help(PJQ) for more info.\n \n Let Y = inv(Q).G', equation becomes J.Y = 0 (2) whilst:\n \n J = | I_p O | where p is H's rank and I_p Identity matrix of size p.\n | 0 0 |\n \n Knowing that G must have full rank, a solution of (2) is Y = | 0 | Where k = n-p. \n | I-k |\n \n Because of rank-nullity theorem. \n \n -----------------\n parameters:\n \n H: Parity check matrix. \n use_sparse: (optional, default True): use scipy.sparse format to speed up calculations\n ---------------\n returns:\n \n tG: Transposed Coding Matrix. \n \n \"\"\"\n\n\n H = np.copy(MATRIX)\n m,n = H.shape\n\n if m > n: \n raise ValueError('MATRIX must have more rows than columns (a parity check matrix)')\n \n if n > 500 and use_sparse:\n sparse = 1\n \n else:\n sparse = 0\n ##### DOUBLE GAUSS-JORDAN:\n\n Href_colonnes,tQ = GaussJordan(np.transpose(H),1)\n\n Href_diag = GaussJordan(np.transpose(Href_colonnes)) \n\n Q=np.transpose(tQ)\n \n k = n - sum(Href_diag.reshape(m*n))\n\n \n Y = np.zeros(shape=(n,k)).astype(int)\n Y[n-k:,:] = np.identity(k)\n \n if sparse:\n Q = csr_matrix(Q)\n Y = csr_matrix(Y)\n\n tG = BinaryProduct(Q,Y)\n\n return tG\n \n \ndef CodingMatrix_systematic(MATRIX,use_sparse = 1):\n\n \"\"\" \n Description:\n\n Solves H.G' = 0 and finds the coding matrix G in the systematic form : [I_k A] by applying permutations on MATRIX.\n \n CAUTION: RETURNS TUPLE (Hp,tGS) WHERE Hp IS A MODIFIED VERSION OF THE GIVEN PARITY CHECK MATRIX, tGS THE TRANSPOSED \n SYSTEMATIC CODING MATRIX ASSOCIATED TO Hp. YOU MUST USE THE RETURNED TUPLE IN CODING AND DECODING, RATHER THAN THE UNCHANGED \n PARITY-CHECK MATRIX H. \n\n -------------------------------------------------\n Parameters: \n\n MATRIX: 2D-Array. Parity-check matrix.\n use_sparse: (optional, default True): use scipy.sparse matrices to speed up calculations if n>100.\n\n ------------------------------------------------\n\n >>> Returns Tuple of 2D-arrays (Hp,GS):\n Hp: Modified H: permutation of columns (The code doesn't change)\n tGS: Transposed Systematic Coding matrix associated to Hp.\n\n \"\"\"\n\n H = np.copy(MATRIX)\n m,n = H.shape\n \n if n>100 and use_sparse:\n sparse = 1\n else:\n sparse = 0 \n \n P1 = np.identity(n,dtype=int)\n \n Hrowreduced = GaussJordan(H)\n \n k = n - sum([a.any() for a in Hrowreduced ])\n\n ## After this loop, Hrowreduced will have the form H_ss : | I_(n-k) A |\n permut = np.array(list(range(n)))\n\n while(True):\n zeros = [i for i in range(min(m,n)) if not Hrowreduced[i,i]]\n if len(zeros)==0:\n \tbreak\n indice_colonne_a = min(zeros)\n list_ones = [j for j in range(indice_colonne_a+1,n) if Hrowreduced[indice_colonne_a,j] ]\n if not len(list_ones):\n break\n\n indice_colonne_b = min(list_ones)\n \n aux = np.copy(Hrowreduced[:,indice_colonne_a])\n Hrowreduced[:,indice_colonne_a] = Hrowreduced[:,indice_colonne_b]\n Hrowreduced[:,indice_colonne_b] = aux \n \n aux = np.copy(P1[:,indice_colonne_a])\n P1[:,indice_colonne_a] = P1[:,indice_colonne_b]\n P1[:,indice_colonne_b] = aux\n \n ############ NOW, Hrowreduced has the form: | I_(n-k) A | , the permutation above makes it look like : \n ########### |A I_(n-k)|\n \n P1 = P1.T\n identity = list(range(n))\n sigma = identity[n-k:]+identity[:n-k]\n \n P2 = np.zeros(shape=(n,n),dtype=int)\n P2[identity,sigma] = np.ones(n)\n \n if sparse:\n P1 = csr_matrix(P1)\n P2 = csr_matrix(P2)\n H = csr_matrix(H)\n\n P = BinaryProduct(P2,P1)\n \n if sparse:\n P = csr_matrix(P)\n \n Hp = BinaryProduct(H,np.transpose(P))\n\n GS = np.zeros((k,n),dtype=int)\n GS[:,:k] = np.identity(k)\n GS[:,k:] = np.transpose(Hrowreduced[:n-k,n-k:])\n \n \n return Hp,np.transpose(GS)\n \ndef HtG(invrate,k,systematic=True):\n\n \"\"\"\n Constructs tuple H,tG using approximate rate (k/n) and k.\n\n Parameters:\n\n - invrate= 1/rate must be > 2\n - k must be > 1 \n - systematic (Boolean optional, default = True) Construction method of transposed coding matrix tG. \n\n returns tuple: H,tG \n \"\"\"\n\n if invrate < 3:\n raise ValueError('invrate must be > 2')\n if k < 2: \n raise ValueError('k must be > 1')\n\n d_c = invrate\n d_v = invrate-1\n\n n = invrate*k - (d_c-2)*d_c\n\n H = RegularH(n,d_v,d_c)\n if systematic:\n H,tG = CodingMatrix_systematic(H)\n else:\n tG = CodingMatrix(H)\n\n return H,tG\n\n\n \n "
] | [
[
"scipy.sparse.csr_matrix",
"numpy.ones",
"numpy.concatenate",
"numpy.copy",
"numpy.identity",
"numpy.transpose",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
lixuekai2001/ml_for_log_data | [
"1e01c4c6c9a3ee6e20c5cfe8db44029c0aeaedd8"
] | [
"notebooks/c07_Recurrent_Neural_Networks/RNN_Depthseries.py"
] | [
"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.6.0\n# kernelspec:\n# display_name: deep_ml_curriculum\n# language: python\n# name: deep_ml_curriculum\n# ---\n\n# + [markdown] colab_type=\"text\" id=\"iALm8shtXMVK\"\n# # Recurrent Neural Networks: A Case study in well logs and LSTM's\n#\n# All the models we have discussed so far were looking at the inputs as isolated instances. But there are many cases were datapoints are not isolated instances and have connection to each other. *Sequential data* are the type of data where each instance is related to the instances came before. \n#\n# A good example for this type of data is time series data. At each point in time to the value of the time series depends on the value of the prior points. Another example is depth data, like well logs.\n#\n# Recurrent Neural Networks (RNN) are a class of networks which deal with sequential data. There are many variants of Recurrent Neural Networks, including:\n#\n# - Simple Recurrect Neural Networks (Simple RNN - or often just called RNN)\n# - Gated Recurrent Unit (GRU)\n# - **Long Short-Term Memory (LSTM)**\n#\n# In this notebook we will discuss LSTM; however, the general logic behind all these methods are the same. They only differ in the way they handle information internally. \n#\n#\n# RNN's have been used for\n# - translation\n# - drawing chinese charectors\n# - composing music\n# - timeseries\n# - depth\n# - weather\n# - many more\n# -\n\n# ## A minute of Theory\n#\n# This is a hand on course, not theory so we will look at a high level view of one type of RNN, the LSTM. But lets look at the theory for a moment, to get some broad idea of how they work\n#\n\n# The figure below is from d2l.ai and shows how an RNN can operate on a text sequence to predict the next charector.\n#\n# \n#\n#\n\n# How does the model itself work? Let look at an excerpt from the open source machine learning book [d2l.ai](d2l.ai):\n#\n# \n#\n# > The figure below illustrates the computational logic of an RNN at three adjacent time steps. At any time step `t`, the computation of the hidden state can be treated as: \n#\n# > i) concatenating the input `Xt` at the current time step `t` and the hidden state `Ht−1` at the previous time step `t−1` ; \n#\n# > ii) feeding the concatenation result into a fully-connected layer with the activation function `ϕ`. \n#\n# > The output of such a fully-connected layer is the hidden state `Ht` of the current time step t . In this case, the model parameters are the concatenation of `Wxh` and `Whh` , and a bias of `bh`. The hidden state of the current time step `t` , `Ht` , will participate in computing the hidden state `Ht+1` of the next time step t+1 . What is more, `Ht` will also be fed into the fully-connected output layer to compute the output `Ot` of the current time step `t` .\n\n# To understand more see these visualisations:\n#\n# - [distill.pub memorization in rnns](memorization-in-rnns)\n# - [Chris Olah Understanding LSTMs](https://colah.github.io/posts/2015-08-Understanding-LSTMs/)\n#\n# And see these chapters:\n#\n# - [d2l.ai RNN's](http://d2l.ai/chapter_recurrent-neural-networks/rnn.html)\n# - [d2l.ai LSTM's](http://d2l.ai/chapter_recurrent-modern/lstm.html)\n#\n\n# # Hands on example with well logs\n#\n# You can read more [here](http://d2l.ai/chapter_recurrent-neural-networks/rnn.html), but lets dive into a hand on example first and it will begin to make more sense. We will be focusing on\n#\n# - How do RNN's represent data\n# - How do we implement them in pytorch\n# - What are the key parameters and example values\n# - Where might you use them\n\n# +\nimport torch\nfrom torch import nn, optim\nfrom torch import functional as F\nfrom torch.autograd import Variable\nimport torch\nimport torch.utils.data\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm.auto import tqdm\nimport xarray as xr\n# -\n\n# # Models\n\n# ## Low level\n#\n# [LSTMCell docs](https://pytorch.org/docs/stable/generated/torch.nn.LSTMCell.html)\n#\n# Lets look at a low level implementation, and compare it to the figure we previously saw\n#\n# \n\n# +\nfrom sklearn.preprocessing import LabelEncoder\n\n# Our input text\ntext = list(\"\"\"Machine Learning. Deep\"\"\")\ne = LabelEncoder()\ninput = e.fit_transform(text)\ninput\n# -\n\n# Visualize it\npd.DataFrame(list(zip(text, input)), columns=['char', 'int']).T\n\n# +\n# We can use the low level LSTM Cell\nrnn = nn.LSTMCell(input_size=1, hidden_size=20)\n\n# Input [Sequence Length=6, BatchSize=1, input_size=1]\ninput = torch.from_numpy(input)[:, None, None].float()\n\n# Initial states (Batch size, Hidden Size)\nhx = torch.randn(1, 20) # Initial hidden\ncx = torch.randn(1, 20) # Initial cell\n\noutput = []\n# we manually call it on each part of the sequence\nfor i in range(6):\n # We manually handle states\n hx, cx = rnn(input[i], (hx, cx))\n output.append(hx)\n \n# Seqence, Batch, Hidden size\noutput = torch.stack(output)\noutput.shape\n# -\n\n# As always you can read more about an LSTMCell in the help or docs\nhelp(nn.LSTMCell)\n\n# ## High level\n#\n# Or we can use the high level API that handles it for you\n#\n# [LSTMdocs](https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html)\n\nhelp(nn.LSTM)\n\n\n# +\n\nclass LSTM(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, output_size=1):\n super(LSTM, self).__init__()\n # Params\n self.num_layers = num_layers\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.seq_length = seq_length\n \n # High level LSTM library, nn.LSTMCell is a lower level one\n self.lstm = nn.LSTM(\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n batch_first=True,\n )\n \n # Final layer\n self.linear = nn.Linear(hidden_size, output_size)\n\n def forward(self, x):\n seq_len = x.shape[1]\n \n output, (_, _) = self.lstm(x)\n h = output.view(-1, seq_len, self.hidden_size)[:, -1]\n \n return self.linear(h)\n\n\n# + [markdown] colab_type=\"text\" id=\"vaL6j3pkCen3\"\n# ## The well log classification problem \n#\n# In this scenario we are drilling downwards, and while well logs are reported instantly, there is a lag in facies of around 15 meter (see diagram), while they are interpreated by a petrophysicist. The problem is we would like to know the facies as soon as possible in order decide if, how, and where to drill.\n#\n# Lets apply machine learning. There are many ways to set up this problem, and geology is especially hard due to the important of context and the amount of undigitized information (much of it is in the brain of old and grizzled geologists).\n#\n# In this scenario we will apply an RNN. \n# - It will travel down the well\n# - Input are \n# - the last 200 meters of well logs \n# - and the geologist facies interpreation up to 15 meters ago\n# - The label is the facies at the point in the well\n#\n#\n# You may ask: \"Isn't it cheating? Because it knows the human labels from 15 meters above?\" \n#\n# We measure this and it gives a ~60% accuracy. So this is the naive baseline that we have to beat.\n#\n# <img src=\"images/diagram.png\" width=\"600\"/>\n#\n#\n# -\n\n# # Parameters\n\n# +\n# Params\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nshift_length = 100\nseq_length = 600\nmax_lithologies = 12\nmax_wells = 20\n\n\nprint(f'context length of {0.15*seq_length} m or {seq_length} intervals')\nprint(f'model can see human labels up to {shift_length*0.15}m above. Or {shift_length} intervals')\n# -\n\n# ## Data\n\n# + [markdown] colab_type=\"text\" id=\"kzlqXAj4EIBN\"\n# In this example we are going to look at well logs which are sequential data as well.\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 255} colab_type=\"code\" id=\"uNl846nE-jjq\" outputId=\"de7b4197-6a3f-4e88-e07e-2463adba90d0\"\n\n\nimport pandas as pd\nimport xarray as xr\nxf = xr.open_zarr(\"../../data/processed/geolink_norge_dataset/geolink_norge_well_logs.zarr\")\n\n# We will use just the 30* wells\nxf = xf.where(xf['Well'].str.startswith('30')).dropna(dim='Well', how='all')\n\ndf = xf.to_dataframe().swaplevel()\ndf['LITHOLOGY_GEOLINK'] = df['LITHOLOGY_GEOLINK'].astype('category')\ndf['Well'] = df.index.get_level_values(0).astype('category')\ndf['DEPT'] = df.index.get_level_values(1)\n\n# Keep these cols\nfeature_cols = ['CALI', 'DTC', 'GR', 'RDEP', 'RHOB',\n 'RMED', 'xc', 'yc', 'DEPT']\ndf = df.dropna(how='any', subset=feature_cols+['LITHOLOGY_GEOLINK'])\ndf = df.sort_index()\ndf\n# -\n\n\n\n#\n# <div class=\"alert alert-success\">\n# <h2>Exercise</h2>\n#\n# Discussion: Are there better ways we should set this up?\n# \n# What are the benefits?\n# \n# What information are we missing?\n# \n#\n# <details>\n# <summary><b>→ Hints</b></summary>\n#\n# There is no right answer except experimentation, but on creating this demo we found:\n# \n# * Generalising to a new well is hard, and it's important to have a similar distribution in test and train. So we took the top of some wells, and the bottom of others as training. \n# * Seeing the previous labels is important, as this encodes how the particular geologist interprets facies in this well. Which can often have some subjectivity\n# * Long context help a lot, but also slow it down. We're using the last 200 meters, but seeing the whole well helps\n# * Using all wells, instead of just the 30* wells will help it learn to generalise\n# * Using all logs may help\n# * We could do infilling instead\n# * We could make it bi-directional\n# * We could make it a sequence to sequence model, instead of sequence to 1\n# * Transformer may do better\n# * We could normalise the logs per window or well\n# * Many more\n#\n# </details>\n#\n# </div>\n\n# ## Data processing\n\n# We will stick to a group of long wells 29, 30, 31, 35 are valid groups\n# df=df[df['Well'].str.startswith('30')]\ncounts = df['Well'].value_counts()\ncounts[counts>0]\n\n\n\n# Let take the top N lithologies, replacing the rest with \"rare\"\n# print(len(df))\nremoved_labels = df[\"LITHOLOGY_GEOLINK\"].value_counts()[max_lithologies:].index\nprint(df['LITHOLOGY_GEOLINK'].value_counts())\nprint('removed_labels', removed_labels)\nl = df[\"LITHOLOGY_GEOLINK\"].values.remove_categories(removed_labels)\ndf['LITHOLOGY_GEOLINK'] = l.add_categories('rare').fillna('rare')\ndf['LITHOLOGY_GEOLINK'].value_counts()\n\n# Lets keep the top 12 lithologies, and rename the rest as \"rare\" (if any)\nremoved_labels = list(df[\"LITHOLOGY_GEOLINK\"].value_counts()[12:].keys())\ni = df[\"LITHOLOGY_GEOLINK\"].values.remove_categories(removed_labels)\ni[i.isna()]\n\n# +\n# Remove unused categories, and order\ndf['LITHOLOGY_GEOLINK'] = df['LITHOLOGY_GEOLINK'].values.remove_unused_categories()\n\n# sort categories (leads to nicer histograms)\ni = df['LITHOLOGY_GEOLINK'].values\nlitho_sorted = i.value_counts().sort_values(ascending=True).index\ndf['LITHOLOGY_GEOLINK'] = i.reorder_categories(list(litho_sorted), ordered=True)\n\ndf['LITHOLOGY_GEOLINK'].values.categories\n# -\n\ndf['LITHOLOGY_GEOLINK'].value_counts().plot.bar()\ndf['LITHOLOGY_GEOLINK'].value_counts()\n\n# Gvie each well an number, since the model needs numbers\nwell_index = df['Well'].values\nwell_int = well_index.rename_categories(range(len(well_index.categories))).astype(int)\ndf['Well_int']= well_int\ndf[['Well_int']]\n\n# Select the N longest well logs\nwells = sorted(df['Well'].unique())\nn_wells = min(len(wells), max_wells)\nselected_wells = wells[:n_wells]\ndf = df.loc[selected_wells]\n# df\n\n# Get a list of wells, ordered by frequency\nwell_counts = df['Well'].value_counts()\nwell_counts = well_counts[well_counts>0]\nwells = list(well_counts.index)\n# well_counts.plot.bar()\n1\n# well_counts\n\n# We want to see the facies N intervals above\ndf['LITH_ABV'] = df[\"LITHOLOGY_GEOLINK\"].shift(shift_length).fillna('Shaly Silt')\ndf['LITH_ABV_INT'] = df['LITH_ABV'].values.codes\ndf[['LITHOLOGY_GEOLINK', 'LITH_ABV']]\n\n# ### Split data\n#\n# There are many ways to split the data, the best way would be to split by well, but this is too hard and leads to poor results.\n#\n# We could split randomly but this is too easy, since seeing the lithology at 1000 m gives you 90% of the answer at 1010 m.\n#\n# Lets split the wells by depth, this way the model gets some idea about each well, but can't peek ahead. We will take the top of the well as training for even numbered wells, and vice versa. There is a graph below showing the outcome.\n#\n\n# +\n\nfrom functools import partial\n\ndef get_depth_thresh(x, even_bottom=True):\n \"\"\"\n On even number well codes take the bottom of the well for trainin\n \"\"\"\n if len(x)==0: return x\n \n # if the well code is even take the top \n code_is_even = (x['Well'].values.codes[0]%2)==0\n if code_is_even:\n even_bottom = not even_bottom\n \n d = x['DEPT']\n thresh = np.round(d.mean())\n x['thresh'] = thresh\n if even_bottom:\n return x[d<thresh]\n else:\n return x[d>thresh]\n\n\ndf_test = df.groupby(level=0).apply(partial(get_depth_thresh, even_bottom=False))\ndf_train = df.groupby(level=0).apply(partial(get_depth_thresh, even_bottom=True))\nprint('train', df_train.shape, 'test', df_test.shape)\nprint(f'Train {len(df_train)/len(df):.0%}, test {len(df_test)/len(df):.0%}')\n\n# +\ntrain = []\ntest = []\nfor i, well in enumerate(selected_wells):\n df_well = df.loc[well]\n df_well.name = well\n i_halfway = int(len(df_well)*0.5)\n df_top = df_well.iloc[:i_halfway]\n df_bottom = df_well.iloc[i_halfway:]\n is_even = i%2==0\n if is_even==0:\n train.append(df_top)\n test.append(df_bottom)\n else:\n train.append(df_bottom)\n test.append(df_top)\n \ndf_test = pd.concat(test).set_index(['Well', 'DEPT'], drop=False)\ndf_train = pd.concat(train).set_index(['Well', 'DEPT'], drop=False)\nprint('train', df_train.shape, 'test', df_test.shape)\nprint(f'Train {len(df_train)/len(df):.0%}, test {len(df_test)/len(df):.0%}')\n\n# +\n# Plot the data split\nwell_split = []\nfor i, well in enumerate(selected_wells):\n df_well = df.loc[well]\n i_halfway = int(len(df_well)*0.5)\n is_even = i%2==0\n well_split.append(dict(\n well=well,\n top=df_well.Depth.min(),\n half=df_well.Depth.iloc[i_halfway],\n bottom=df_well.Depth.max(),\n train_top=is_even,\n ))\n \ndf_well_split = pd.DataFrame(well_split)\n\nwell_top = df_well_split[df_well_split.train_top]\nwell_bottom = df_well_split[~df_well_split.train_top]\n\n# Do the ones where train is at top\nplt.bar(\n x=well_top.well,\n height=well_top.bottom,\n color=\"green\",\n label=\"test\"\n)\nplt.bar(\n x=well_top.well,\n height=well_top.half,\n color=\"blue\",\n label=\"train\"\n)\nplt.bar(\n x=well_top.well,\n height=well_top.top,\n color=\"white\",\n)\n\n\n# Others\nplt.bar(\n x=well_bottom.well,\n height=well_bottom.bottom,\n color=\"blue\",\n)\nplt.bar(\n x=well_bottom.well,\n height=well_bottom.half,\n color=\"green\",\n)\nplt.bar(\n x=well_bottom.well,\n height=well_bottom.top,\n color=\"white\",\n)\nplt.gca().invert_yaxis()\n\nplt.legend()\nplt.title('data split')\nplt.xticks(rotation=90)\nplt.ylabel('depth')\nplt.show()\n# -\n\n# Double check there is not overlap\na=set(df_train.index)\nb=set(df_test.index)\nassert len(a.intersection(b))==0\n\n# ### Scale\n\n# We need to process the input and target data. The input data needs to be normalised with a standard scaler, and the output data needs to be converted from text to numbers. To convert text to numbers we use `LabelEncoder` from Scikit Learn.\n\n# +\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\n\nscaler = StandardScaler()\n\n# Make a encoder, that order by frequency\nencoder = LabelEncoder()\n\n# Instead of fitting, use the same codes as the pandas.Category\nencoder.classes_ = df[\"LITHOLOGY_GEOLINK\"].values.categories\nprint(encoder.classes_)\nfeat_cols = feature_cols = ['CALI', 'DTC', 'GR', 'RDEP', 'RHOB', 'RMED', 'xc', 'yc', 'DEPT', \"LITH_ABV_INT\"]\nscaler.fit(df[feat_cols].values)\n# -\n\n# `LabelEncoder` converts each type to a value.\n\nencoder.transform([\"Shaly Silt\"])\n\n\n# ### To pytorch sequences\n\n# We will be using depth and other measurements to determine the lithology. We dealt with the same problem in the tablular data. But in tabular data we only look at the measurements at each depth to find the class, while here we can look at the variations in the measurements as well.\n\n# +\n\ndef get_sequences(df, seq_length = 10):\n \"\"\"Take moving sequences of a dataframe\"\"\"\n \n\n x = []\n y = []\n features = scaler.transform(df.loc[:, feat_cols].values)\n targets = encoder.transform(df.loc[:, \"LITHOLOGY_GEOLINK\"])\n\n # Add prev labels, as one hot, to inputs\n one_hot_targets = np.eye(len(encoder.classes_))[targets]\n prev_one_host_targets = np.roll(one_hot_targets, shift=shift_length)\n features = np.concatenate([features, prev_one_host_targets], 1)\n\n for i in range(len(targets) - seq_length):\n xi = features[i : i + seq_length, :]\n yi = targets[i + seq_length - 1]\n x.append(xi)\n y.append(yi)\n return x, y\n\n\n# -\n\ndef to_numpy(x):\n if isinstance(x, torch.Tensor):\n x = x.cpu().detach().numpy()\n return x\n\n\n# +\nx_train, y_train = get_sequences(df_train, seq_length=seq_length)\nx_test, y_test = get_sequences(df_test, seq_length=seq_length)\n\n# What's the shape or one row of data? \nprint(x_test[0].shape, y_test[0].shape)\nx_test[0], y_test[0]\n# -\n\n# The output of a classification model is a value for each type. The type with the highest value is the one the model thinks is most likely to be associated with the input data. Therefore, the output size of the model should be the number of types.\n\noutput_size = len(df[\"LITHOLOGY_GEOLINK\"].unique())\n\n# ### Distribution\n\n# It is important that we make sure the training and test set have close distribution. For instance, if there is a certain type in test data that doesn't exist in training data, the model will not be able to predict it.\n\n\n\n# +\ndef show_distribution(y, label):\n y = to_numpy(y)\n plt.hist(y, output_size * 2, alpha=0.5, label=label, density=True)\n plt.xticks(ticks=range(len(encoder.classes_)), labels=encoder.classes_, rotation=90)\n\nshow_distribution(y_train, 'train')\nshow_distribution(y_test, 'test')\nplt.legend()\nplt.show()\n# -\n\n# ## Baseline accuracy\n#\n# When you experiment with a machine learning problem it's important to use a baseline, to check if the model is actually doing any work. Sometimes you can use humans, or a prior work, but in novel problems we look at a naive answer, then aim to do better.\n#\n# Below we investigate several methods of naive estimation and try to beat the best.\n#\n# [more](https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html)\n\n# +\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.metrics import accuracy_score, f1_score\nscore_fn=accuracy_score\n\ntrue = np.array(y_test)\nfor strategy in [\"most_frequent\", \"uniform\"]:\n dummy_clf = DummyClassifier(strategy=strategy)\n dummy_clf.fit(x_train, y_train)\n score = dummy_clf.score(x_test, y_test)\n print(f\"baseline accuracy={score:2.2%} for {strategy}\")\n# -\n\n# Prev litho Baseline, this is like a model that says \"the same as the last lithology\"\npred_baseline = np.roll(true, shift=shift_length)\nscore_prev_base=score_fn(true, pred_baseline)\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values or {shift_length*0.15}m above')\n\n\n# OK so which baseline do we use? The highest is the one we need to beat\n\n# ## Train\n#\n# Note that this can be simplified with libraries like pytorch lightning or fast-ai, but they are not yet approved at many companies. So we do it manually, this also helps you see the details of the training loop.\n\nclass NumpyDataset(torch.utils.data.Dataset):\n \"\"\"Dataset wrapping arrays.\n Each sample will be retrieved by indexing array along the first dimension.\n Arguments:\n *arrays (numpy.array): arrays that have the same size of the first dimension.\n \"\"\"\n\n def __init__(self, *arrays):\n self.arrays = arrays\n\n def __getitem__(self, index):\n return tuple(array[index] for array in self.arrays)\n\n def __len__(self):\n return len(self.arrays[0])\n\n\n# +\ndef train_epoch(x_train, y_train, model, bs=128, max_epoch_iters=128*128):\n model.train()\n\n training_loss = []\n training_accuracy = []\n\n # Put data into a loader\n dset_train = NumpyDataset(x_train, y_train)\n load_train = torch.utils.data.dataloader.DataLoader(\n dset_train, \n batch_size=bs, pin_memory=True,\n shuffle=True,\n )\n\n for x, y in tqdm(load_train, leave=False, desc='train'):\n # make it a pytorch gpu variable\n x = x.float().to(device)\n y = y.long().to(device)\n\n \n optimizer.zero_grad()\n preds = model(x) # Make prediction\n loss = loss_func(preds, y) # Measure error/lopss\n \n # Backprop\n loss.backward()\n optimizer.step()\n\n # Record stats\n training_loss.append(loss.item())\n accuracy = score_fn(\n to_numpy(y), to_numpy(preds).argmax(-1)\n )\n training_accuracy.append(accuracy)\n\n return [np.mean(training_loss), np.mean(training_accuracy)]\n\ndef test_epoch(x_test, y_test, model, bs=512, max_epoch_iters=128*128):\n model.eval()\n preds = []\n true = []\n\n test_loss = []\n\n dset_test = NumpyDataset(x_test[:max_epoch_iters//4], y_test[:max_epoch_iters//4])\n load_test = torch.utils.data.dataloader.DataLoader(dset_test, batch_size=bs, pin_memory=True)\n for x, y in tqdm(load_test, leave=False, desc='test'):\n x = x.float().to(device)\n y = y.long().to(device)\n \n pred = model(x)\n loss = loss_func(pred, y)\n\n preds.append(to_numpy(pred))\n true.append(to_numpy(y))\n test_loss.append(loss.item())\n\n preds = np.concatenate(preds, 0).argmax(axis=-1)\n true = np.concatenate(true, 0)\n test_accuracy = score_fn(true, preds)\n return preds, true, np.mean(test_loss), test_accuracy\n\ndef training_loop(x_train, y_train, x_test, y_test, mode, epochs=1, bs=128, max_epoch_iters=128*128):\n all_losses = []\n all_accuracys = []\n try:\n _, _, test_loss, test_acc = test_epoch(x_test, y_test, model, max_epoch_iters=max_epoch_iters)\n print(\n f\"Start: Test Loss = {test_loss:.2f}, Test accuracy = {test_acc:.3f}\"\n )\n for epoch in tqdm(range(epochs), desc='epochs'):\n loss, acc = train_epoch(x_train, y_train, model, bs=bs, max_epoch_iters=max_epoch_iters)\n print(f\"Epoch {epoch+1}/{epochs}: Training Loss = {loss:.2f}, Train accuracy = {acc:.3f}\")\n \n _, _, test_loss, test_acc = test_epoch(x_test, y_test, model, max_epoch_iters=max_epoch_iters)\n print(\n f\"Epoch {epoch+1}/{epochs}: Test Loss = {test_loss:.2f}, Test accuracy = {test_acc:.3f}\"\n )\n print(\"-\" * 50)\n \n all_losses.append([loss, test_loss])\n all_accuracys.append([acc, test_acc])\n \n except KeyboardInterrupt:\n # This lets you stop manually. and still get the results\n pass\n\n # Visualising the results\n all_losses = np.array(all_losses)\n plt.plot(all_losses[:, 0], label=\"Training\")\n plt.plot(all_losses[:, 1], label=\"Test\")\n plt.title(\"Loss\")\n plt.legend()\n \n plt.figure()\n all_accuracys = np.array(all_accuracys)\n plt.plot(all_accuracys[:, 0], label=\"Training\")\n plt.plot(all_accuracys[:, 1], label=\"Test\")\n plt.title(\"accuracy\")\n plt.legend()\n \n return all_losses, all_accuracys\n# -\n\n\n\n# Init the model\nmodel = LSTM(\n input_size=x_train[0].shape[-1],\n hidden_size=32,\n num_layers=2,\n output_size=output_size,\n)\nmodel = model.to(device)\nmodel\n\n# Init the optimiser, and loss function\noptimizer = optim.Adam(model.parameters(), lr=0.001)\nloss_func = torch.nn.CrossEntropyLoss().to(device)\n\n# Let's train for 10 epochs\n\ntraining_loop(x_train, y_train, x_test, y_test, model, epochs=4, bs=128)\n\n# Did it overfit?\n\n# ## Test\n\npreds, true, loss, acc = test_epoch(x_test, y_test, model)\nprint('final test acc', acc)\n\n\n\n# ### Reports\n\n# This beats the baseline, so the model is doing better than the naive answer of \"the same again\". But lets break it down by lithology\n\n\n\n# +\n# pred_baseline = np.roll(true, shift=shift_length)\n# df_report = classification_report(true, pred_baseline, labels=range(len(encoder.classes_)), target_names=encoder.classes_)\n# df_report[df_report.support>0]\n# -\n\nfrom deep_ml_curriculum.classification_report import pd_classification_report\ndf_report = classification_report(true, preds, labels=range(len(encoder.classes_)), target_names=encoder.classes_)\ndf_report[df_report.support>0]\n\n\ndef confusion_matrix(true, preds):\n cm = sklearn.metrics.confusion_matrix(true, preds, labels=range(len(encoder.classes_)))\n\n plt.figure(figsize=(10, 10))\n plt.title('Confusion Matrix')\n ax=plt.gca()\n disp = sklearn.metrics.ConfusionMatrixDisplay(confusion_matrix=cm,\n display_labels=encoder.classes_)\n disp.plot(ax=ax, xticks_rotation=90)\n plt.show()\n\n\nconfusion_matrix(true, preds)\n\n# ## Plot results\n\n# Let's have a look at model's predictions.\n\n# +\nfrom deep_ml_curriculum.visualization.well_log import plot_well_pred\n\ndef plot_well(df, model, depth_min=0, depth_max=18000, well_name=\"30_6-11\", device=device):\n logs = df.loc[well_name].sort_index()\n x_test, y_test = get_sequences(logs)\n x_test = torch.Tensor(x_test)\n preds = to_numpy(model(x_test.to(device)).argmax(axis=-1))\n acc = score_fn(y_test, preds)\n df_log_results = logs.iloc[10:].copy()\n df_log_results['pred'] = pd.Categorical(encoder.inverse_transform(preds), categories=df_log_results.LITHOLOGY_GEOLINK.values.categories)\n \n # Lets zoom in on an interesting interval a:b\n plot_well_pred(f'{well_name} acc={acc:2.2f}', df_log_results.loc[depth_min:depth_max],\n facies_true=df_log_results.loc[depth_min:depth_max].LITHOLOGY_GEOLINK.values, \n facies_pred=df_log_results.loc[depth_min:depth_max].pred.values)\n plt.show()\n return df_log_results[['LITHOLOGY_GEOLINK', 'pred']]\n \nplot_well(df, model)\n# -\n\nprint(f'{n_wells} wells. {max_lithologies} lithologies')\nprint(f'context length of {0.15*seq_length} m or {seq_length} intervals')\nprint(f'model can see human labels up to {shift_length*0.15}m above. Or {shift_length} intervals')\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\n\n# The bottom half was test\nplot_well(df, model)#, depth_min=3000, depth_max=6000)\n\n# We can also zoom into a range\nplot_well(df, model, depth_min=3200, depth_max=3500)\n1\n\n\n\n# The model requires hyper parameter tuning and possibly training over 100s of epochs to reach the best results. However, in this example due to large size of dataset and the model we stopped after `10` epochs. \n#\n# There are number ways we can improve it:\n#\n# - Training for longer. Instead of stopping after 10 `epochs` go for longer. (might overfit)\n# - Increase or decrease the `hidden_size`. (might overfit)\n# - Increase the size of the sequences `seq_length` so the model get to look further in the history. (might underfit)\n# - Increase the learning rate or decrease batch size `bs` (might overfit)\n# - (advanced) Increase the size of training data by adding data from more wells to training. `max_wells` (might underfit)\n#\n# #### Exercise 2\n#\n# Try one of the options above to improve the model. (hint search for \"# CHANGE ME\", change values, then rerun notebook)\n\n# <div class=\"alert alert-success\">\n# <h2>Exercise</h2>\n#\n# Try one of the options above to improve the model. \n# \n# To help we've collected and summarised all the code below, so you can change and run the cells below\n# \n# \n# ```python\n# # Params\n# seq_length = 400 # CHANGE ME\n#\n# # Prepare data\n# x_train, y_train = get_sequences(df_train, seq_length=seq_length)\n# x_test, y_test = get_sequences(df_test, seq_length=seq_length)\n#\n# # Init the model\n# model = LSTM(\n# input_size=x_train[0].shape[-1],\n# hidden_size=64, # CHANGE ME\n# num_layers=3, # CHANGE ME\n# output_size=output_size,\n# ).to(device)\n#\n# # Init the optimiser, and loss function\n# optimizer = optim.Adam(model.parameters(), lr=0.001) # CHANGE ME\n# loss_func = torch.nn.CrossEntropyLoss().to(device)\n#\n# # Train\n# training_loop(x_train, y_train, x_test, y_test, model, epochs=10, bs=128) # Change me\n#\n# # Measure baseline\n# pred_baseline = np.roll(np.array(y_test), shift=shift_length)\n# score_prev_base=score_fn(y_test, pred_baseline)\n# print(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\n# print(f'{n_wells} wells. {max_lithologies} lithologies')\n# print(f'context length of {0.15*seq_length} m or {seq_length} intervals')\n# print(f'model can see human labels up to {shift_length*0.15}m above. Or {shift_length} intervals')\n# print(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\n#\n# # Test\n# preds, true, loss, acc = test_epoch(x_test, y_test, model)\n# print('acc', acc)\n#\n# df_report = classification_report(true, preds, labels=range(len(encoder.classes_)), target_names=encoder.classes_)\n# display(df_report[df_report.support>0])\n#\n# plot_well(df, model)\n# confusion_matrix(true, preds)\n# 1\n# ```\n#\n# <details>\n# <summary><b>→ Hints</b></summary>\n# \n# - The model is close to over fitting to just increasing epochs, or hidden size likely wont help\n#\n# - To change a value\n# - Hint search for \"# CHANGE ME\" below\n# - Change values\n# - then run the cells\n# - wait, some values will take longer\n#\n# </details>\n#\n# <br/>\n# <br/>\n# <details>\n# <summary>\n# <b>→ Solution</b>\n# </summary>\n#\n# ```python\n# # this helps a lot\n# seq_length = 1000 \n# ```\n#\n# </details>\n#\n# </div>\n\n# +\n# Params\nseq_length = 400 # CHANGE ME\n\n# Prepare data\nx_train, y_train = get_sequences(df_train, seq_length=seq_length)\nx_test, y_test = get_sequences(df_test, seq_length=seq_length)\n\n# Init the model\nmodel = LSTM(\n input_size=x_train[0].shape[-1],\n hidden_size=64, # CHANGE ME\n num_layers=3, # CHANGE ME\n output_size=output_size,\n).to(device)\n\n# Init the optimiser, and loss function\noptimizer = optim.Adam(model.parameters(), lr=0.001) # CHANGE ME\nloss_func = torch.nn.CrossEntropyLoss().to(device)\n\n# Train\ntraining_loop(x_train, y_train, x_test, y_test, model, epochs=10, bs=128) # Change me\n\n# Measure baseline\npred_baseline = np.roll(np.array(y_test), shift=shift_length)\nscore_prev_base=score_fn(y_test, pred_baseline)\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\nprint(f'{n_wells} wells. {max_lithologies} lithologies')\nprint(f'context length of {0.15*seq_length} m or {seq_length} intervals')\nprint(f'model can see human labels up to {shift_length*0.15}m above. Or {shift_length} intervals')\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\n\n# Test\npreds, true, loss, acc = test_epoch(x_test, y_test, model)\nprint('final test acc', acc)\n\ndf_report = classification_report(true, preds, labels=range(len(encoder.classes_)), target_names=encoder.classes_)\ndisplay(df_report[df_report.support>0])\n\nplot_well(df, model)\nconfusion_matrix(true, preds)\n1\n# +\n# Params\nseq_length = 400 # CHANGE ME\n\n# Prepare data\nx_train, y_train = get_sequences(df_train, seq_length=seq_length)\nx_test, y_test = get_sequences(df_test, seq_length=seq_length)\n\n# Init the model\nmodel = LSTM(\n input_size=x_train[0].shape[-1],\n hidden_size=64, # CHANGE ME\n num_layers=3, # CHANGE ME\n output_size=output_size,\n).to(device)\n\n# Init the optimiser, and loss function\noptimizer = optim.Adam(model.parameters(), lr=0.001) # CHANGE ME\n\ncounts = pd.Series(y_train).value_counts().sort_index() + 1000\nweights = 1/counts.values\nweights /= weights.sum()\nloss_func = torch.nn.CrossEntropyLoss(weight=torch.from_numpy(weights).float()).to(device)\n\nplt.title('label weighting')\nplt.bar(range(weights.shape[0]), weights)\nplt.show()\n\n# Train\ntraining_loop(x_train, y_train, x_test, y_test, model, epochs=10, bs=128) # Change me\n\n# Measure baseline\npred_baseline = np.roll(np.array(y_test), shift=shift_length)\nscore_prev_base=score_fn(y_test, pred_baseline)\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\nprint(f'{n_wells} wells. {max_lithologies} lithologies')\nprint(f'context length of {0.15*seq_length} m or {seq_length} intervals')\nprint(f'model can see human labels up to {shift_length*0.15}m above. Or {shift_length} intervals')\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\n\n# Test\npreds, true, loss, acc = test_epoch(x_test, y_test, model)\nprint('final test acc', acc)\n\ndf_report = classification_report(true, preds, labels=range(len(encoder.classes_)), target_names=encoder.classes_)\ndisplay(df_report[df_report.support>0])\n\nplot_well(df, model)\nconfusion_matrix(true, preds)\n1\n# -\n\n\n\n\n\n# ## Further Reading\n# - [Introduction to RNN](http://slazebni.cs.illinois.edu/spring17/lec02_rnn.pdf)\n# - [A friendly introduction to Recurrent Neural Networks](https://www.youtube.com/watch?v=UNmqTiOnRfg)\n# - [Recurrent Neural Networks (RNN) and Long Short-Term Memory (LSTM)](https://www.youtube.com/watch?v=WCUNPb-5EYI&t=97s)\n# - [Introduction to LSTM](https://medium.com/x8-the-ai-community/a-7-minute-introduction-to-lstm-5e1480e6f52a)\n# - [LSTM and GRU](https://towardsdatascience.com/illustrated-guide-to-lstms-and-gru-s-a-step-by-step-explanation-44e9eb85bf21)\n# - [Time Series Prediction with LSTM](https://stackabuse.com/time-series-prediction-using-lstm-with-pytorch-in-python/)\n# - [Building RNN from scratch](https://medium.com/dair-ai/building-rnns-is-fun-with-pytorch-and-google-colab-3903ea9a3a79)\n#\n\n# We can also zoom into a range\nplot_well(df, model, depth_min=3200, depth_max=3500)\n1\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.Series",
"pandas.DataFrame",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.mean",
"torch.cuda.is_available",
"sklearn.preprocessing.LabelEncoder",
"numpy.roll",
"matplotlib.pyplot.gca",
"torch.nn.CrossEntropyLoss",
"torch.randn",
"torch.from_numpy",
"torch.nn.LSTMCell",
"torch.utils.data.dataloader.DataLoader",
"matplotlib.pyplot.figure",
"pandas.concat",
"sklearn.dummy.DummyClassifier",
"matplotlib.pyplot.title",
"torch.nn.Linear",
"torch.stack",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"torch.Tensor",
"torch.nn.LSTM",
"matplotlib.pyplot.bar",
"sklearn.preprocessing.StandardScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
siyuchen95/madminer | [
"dfcbd7ee26c47dd294610c195fafce15f74c10eb"
] | [
"madminer/utils/ml/trainer.py"
] | [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport six\nimport logging\nfrom collections import OrderedDict\nimport numpy as np\nimport time\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.nn.utils import clip_grad_norm_\n\nlogger = logging.getLogger(__name__)\n\n\nclass EarlyStoppingException(Exception):\n pass\n\n\nclass NanException(Exception):\n pass\n\n\nclass NumpyDataset(Dataset):\n \"\"\" Dataset for numpy arrays with explicit memmap support \"\"\"\n\n def __init__(self, *arrays, **kwargs):\n\n self.dtype = kwargs.get(\"dtype\", torch.float)\n self.memmap = []\n self.data = []\n self.n = None\n\n for array in arrays:\n if self.n is None:\n self.n = array.shape[0]\n assert array.shape[0] == self.n\n\n if isinstance(array, np.memmap):\n self.memmap.append(True)\n self.data.append(array)\n else:\n self.memmap.append(False)\n tensor = torch.from_numpy(array).to(self.dtype)\n self.data.append(tensor)\n\n def __getitem__(self, index):\n items = []\n for memmap, array in zip(self.memmap, self.data):\n if memmap:\n tensor = np.array(array[index])\n items.append(torch.from_numpy(tensor).to(self.dtype))\n else:\n items.append(array[index])\n return tuple(items)\n\n def __len__(self):\n return self.n\n\n\nclass Trainer(object):\n \"\"\" Trainer class. Any subclass has to implement the forward_pass() function. \"\"\"\n\n def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):\n self._init_timer()\n self._timer(start=\"ALL\")\n self._timer(start=\"initialize model\")\n self.model = model\n self.run_on_gpu = run_on_gpu and torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if self.run_on_gpu else \"cpu\")\n self.dtype = torch.double if double_precision else torch.float\n self.n_workers = n_workers\n\n self.model = self.model.to(self.device, self.dtype)\n\n logger.info(\n \"Training on %s with %s precision\",\n \"GPU\" if self.run_on_gpu else \"CPU\",\n \"double\" if double_precision else \"single\",\n )\n\n self._timer(stop=\"initialize model\")\n self._timer(stop=\"ALL\")\n\n def train(\n self,\n data,\n loss_functions,\n loss_weights=None,\n loss_labels=None,\n epochs=50,\n batch_size=100,\n optimizer=optim.Adam,\n optimizer_kwargs=None,\n initial_lr=0.001,\n final_lr=0.0001,\n data_val=None,\n validation_split=0.25,\n early_stopping=True,\n early_stopping_patience=None,\n clip_gradient=None,\n verbose=\"some\",\n ):\n self._timer(start=\"ALL\")\n self._timer(start=\"check data\")\n\n logger.debug(\"Initialising training data\")\n self.check_data(data)\n self.report_data(data)\n if data_val is not None:\n logger.debug(\"Found external validation data set\")\n self.check_data(data_val)\n self.report_data(data_val)\n self._timer(stop=\"check data\", start=\"make dataset\")\n data_labels, dataset = self.make_dataset(data)\n if data_val is not None:\n _, dataset_val = self.make_dataset(data_val)\n else:\n dataset_val = None\n self._timer(stop=\"make dataset\", start=\"make dataloader\")\n train_loader, val_loader = self.make_dataloaders(dataset, dataset_val, validation_split, batch_size)\n\n self._timer(stop=\"make dataloader\", start=\"setup optimizer\")\n logger.debug(\"Setting up optimizer\")\n optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs\n opt = optimizer(self.model.parameters(), lr=initial_lr, **optimizer_kwargs)\n\n early_stopping = early_stopping and (validation_split is not None) and (epochs > 1)\n best_loss, best_model, best_epoch = None, None, None\n if early_stopping and early_stopping_patience is None:\n logger.debug(\"Using early stopping with infinite patience\")\n elif early_stopping:\n logger.debug(\"Using early stopping with patience %s\", early_stopping_patience)\n else:\n logger.debug(\"No early stopping\")\n\n self._timer(stop=\"setup optimizer\", start=\"initialize training\")\n n_losses = len(loss_functions)\n loss_weights = [1.0] * n_losses if loss_weights is None else loss_weights\n\n # Verbosity\n if verbose == \"all\": # Print output after every epoch\n n_epochs_verbose = 1\n elif verbose == \"many\": # Print output after 2%, 4%, ..., 100% progress\n n_epochs_verbose = max(int(round(epochs / 50, 0)), 1)\n elif verbose == \"some\": # Print output after 10%, 20%, ..., 100% progress\n n_epochs_verbose = max(int(round(epochs / 20, 0)), 1)\n elif verbose == \"few\": # Print output after 20%, 40%, ..., 100% progress\n n_epochs_verbose = max(int(round(epochs / 5, 0)), 1)\n elif verbose == \"none\": # Never print output\n n_epochs_verbose = epochs + 2\n else:\n raise ValueError(\"Unknown value %s for keyword verbose\", verbose)\n logger.debug(\"Will print training progress every %s epochs\", n_epochs_verbose)\n\n logger.debug(\"Beginning main training loop\")\n losses_train, losses_val = [], []\n self._timer(stop=\"initialize training\")\n\n # Loop over epochs\n for i_epoch in range(epochs):\n logger.debug(\"Training epoch %s / %s\", i_epoch + 1, epochs)\n\n self._timer(start=\"set lr\")\n lr = self.calculate_lr(i_epoch, epochs, initial_lr, final_lr)\n self.set_lr(opt, lr)\n logger.debug(\"Learning rate: %s\", lr)\n self._timer(stop=\"set lr\")\n loss_val = None\n\n try:\n loss_train, loss_val, loss_contributions_train, loss_contributions_val = self.epoch(\n i_epoch, data_labels, train_loader, val_loader, opt, loss_functions, loss_weights, clip_gradient\n )\n losses_train.append(loss_train)\n losses_val.append(loss_val)\n except NanException:\n logger.info(\"Ending training during epoch %s because NaNs appeared\", i_epoch + 1)\n break\n\n self._timer(start=\"early stopping\")\n if early_stopping:\n try:\n best_loss, best_model, best_epoch = self.check_early_stopping(\n best_loss, best_model, best_epoch, loss_val, i_epoch, early_stopping_patience\n )\n except EarlyStoppingException:\n logger.info(\"Early stopping: ending training after %s epochs\", i_epoch + 1)\n break\n self._timer(stop=\"early stopping\", start=\"report epoch\")\n\n verbose_epoch = (i_epoch + 1) % n_epochs_verbose == 0\n self.report_epoch(\n i_epoch,\n loss_labels,\n loss_train,\n loss_val,\n loss_contributions_train,\n loss_contributions_val,\n verbose=verbose_epoch,\n )\n self._timer(stop=\"report epoch\")\n\n self._timer(start=\"early stopping\")\n if early_stopping and len(losses_val) > 0:\n self.wrap_up_early_stopping(best_model, loss_val, best_loss, best_epoch)\n self._timer(stop=\"early stopping\")\n\n logger.debug(\"Training finished\")\n\n self._timer(stop=\"ALL\")\n self._report_timer()\n\n return np.array(losses_train), np.array(losses_val)\n\n @staticmethod\n def report_data(data):\n logger.debug(\"Training data:\")\n for key, value in six.iteritems(data):\n if value is None:\n logger.debug(\" %s: -\", key)\n else:\n logger.debug(\n \" %s: shape %s, first %s, mean %s, min %s, max %s\",\n key,\n value.shape,\n value[0],\n np.mean(value, axis=0),\n np.min(value, axis=0),\n np.max(value, axis=0),\n )\n\n @staticmethod\n def check_data(data):\n pass\n\n def make_dataset(self, data):\n data_arrays = []\n data_labels = []\n for key, value in six.iteritems(data):\n data_labels.append(key)\n data_arrays.append(value)\n dataset = NumpyDataset(*data_arrays, dtype=self.dtype)\n return data_labels, dataset\n\n def make_dataloaders(self, dataset, dataset_val, validation_split, batch_size):\n if dataset_val is None and (validation_split is None or validation_split <= 0.0):\n train_loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True, pin_memory=self.run_on_gpu, num_workers=self.n_workers\n )\n val_loader = None\n\n elif dataset_val is not None:\n train_loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True, pin_memory=self.run_on_gpu, num_workers=self.n_workers\n )\n val_loader = DataLoader(\n dataset_val, batch_size=batch_size, shuffle=True, pin_memory=self.run_on_gpu, num_workers=self.n_workers\n )\n\n else:\n assert 0.0 < validation_split < 1.0, \"Wrong validation split: {}\".format(validation_split)\n\n n_samples = len(dataset)\n indices = list(range(n_samples))\n split = int(np.floor(validation_split * n_samples))\n np.random.shuffle(indices)\n train_idx, valid_idx = indices[split:], indices[:split]\n\n train_sampler = SubsetRandomSampler(train_idx)\n val_sampler = SubsetRandomSampler(valid_idx)\n\n train_loader = DataLoader(\n dataset,\n sampler=train_sampler,\n batch_size=batch_size,\n pin_memory=self.run_on_gpu,\n num_workers=self.n_workers,\n )\n val_loader = DataLoader(\n dataset,\n sampler=val_sampler,\n batch_size=batch_size,\n pin_memory=self.run_on_gpu,\n num_workers=self.n_workers,\n )\n\n return train_loader, val_loader\n\n @staticmethod\n def calculate_lr(i_epoch, n_epochs, initial_lr, final_lr):\n if n_epochs == 1:\n return initial_lr\n return initial_lr * (final_lr / initial_lr) ** float(i_epoch / (n_epochs - 1.0))\n\n @staticmethod\n def set_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n def epoch(\n self,\n i_epoch,\n data_labels,\n train_loader,\n val_loader,\n optimizer,\n loss_functions,\n loss_weights,\n clip_gradient=None,\n ):\n n_losses = len(loss_functions)\n\n self.model.train()\n loss_contributions_train = np.zeros(n_losses)\n loss_train = 0.0\n\n self._timer(start=\"load training batch\")\n for i_batch, batch_data in enumerate(train_loader):\n batch_data = OrderedDict(list(zip(data_labels, batch_data)))\n self._timer(stop=\"load training batch\")\n\n batch_loss, batch_loss_contributions = self.batch_train(\n batch_data, loss_functions, loss_weights, optimizer, clip_gradient\n )\n loss_train += batch_loss\n for i, batch_loss_contribution in enumerate(batch_loss_contributions):\n loss_contributions_train[i] += batch_loss_contribution\n\n self.report_batch(i_epoch, i_batch, batch_loss)\n\n self._timer(start=\"load training batch\")\n self._timer(stop=\"load training batch\")\n\n loss_contributions_train /= len(train_loader)\n loss_train /= len(train_loader)\n\n if val_loader is not None:\n self.model.eval()\n loss_contributions_val = np.zeros(n_losses)\n loss_val = 0.0\n\n self._timer(start=\"load validation batch\")\n for i_batch, batch_data in enumerate(val_loader):\n batch_data = OrderedDict(list(zip(data_labels, batch_data)))\n self._timer(stop=\"load validation batch\")\n\n batch_loss, batch_loss_contributions = self.batch_val(batch_data, loss_functions, loss_weights)\n loss_val += batch_loss\n for i, batch_loss_contribution in enumerate(batch_loss_contributions):\n loss_contributions_val[i] += batch_loss_contribution\n\n self._timer(start=\"load validation batch\")\n self._timer(stop=\"load validation batch\")\n\n loss_contributions_val /= len(val_loader)\n loss_val /= len(val_loader)\n\n else:\n loss_contributions_val = None\n loss_val = None\n\n return loss_train, loss_val, loss_contributions_train, loss_contributions_val\n\n def batch_train(self, batch_data, loss_functions, loss_weights, optimizer, clip_gradient=None):\n self._timer(start=\"training forward pass\")\n loss_contributions = self.forward_pass(batch_data, loss_functions)\n self._timer(stop=\"training forward pass\", start=\"training sum losses\")\n loss = self.sum_losses(loss_contributions, loss_weights)\n self._timer(stop=\"training sum losses\", start=\"optimizer step\")\n\n self.optimizer_step(optimizer, loss, clip_gradient)\n self._timer(stop=\"optimizer step\", start=\"training sum losses\")\n\n loss = loss.item()\n loss_contributions = [contrib.item() for contrib in loss_contributions]\n self._timer(stop=\"training sum losses\")\n\n return loss, loss_contributions\n\n def batch_val(self, batch_data, loss_functions, loss_weights):\n self._timer(start=\"validation forward pass\")\n loss_contributions = self.forward_pass(batch_data, loss_functions)\n self._timer(stop=\"validation forward pass\", start=\"validation sum losses\")\n loss = self.sum_losses(loss_contributions, loss_weights)\n\n loss = loss.item()\n loss_contributions = [contrib.item() for contrib in loss_contributions]\n self._timer(stop=\"validation sum losses\")\n return loss, loss_contributions\n\n def forward_pass(self, batch_data, loss_functions):\n \"\"\"\n Forward pass of the model. Needs to be implemented by any subclass.\n\n Parameters\n ----------\n batch_data : OrderedDict with str keys and Tensor values\n The data of the minibatch.\n\n loss_functions : list of function\n Loss functions.\n\n Returns\n -------\n losses : list of Tensor\n Losses as scalar pyTorch tensors.\n\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def sum_losses(contributions, weights):\n loss = weights[0] * contributions[0]\n for _w, _l in zip(weights[1:], contributions[1:]):\n loss = loss + _w * _l\n return loss\n\n def optimizer_step(self, optimizer, loss, clip_gradient):\n self._timer(start=\"opt: zero grad\")\n optimizer.zero_grad()\n self._timer(stop=\"opt: zero grad\", start=\"opt: backward\")\n loss.backward()\n self._timer(start=\"opt: clip grad norm\", stop=\"opt: backward\")\n if clip_gradient is not None:\n clip_grad_norm_(self.model.parameters(), clip_gradient)\n self._timer(stop=\"opt: clip grad norm\", start=\"opt: step\")\n optimizer.step()\n self._timer(stop=\"opt: step\")\n\n def check_early_stopping(self, best_loss, best_model, best_epoch, loss, i_epoch, early_stopping_patience=None):\n if best_loss is None or loss < best_loss:\n best_loss = loss\n best_model = self.model.state_dict()\n best_epoch = i_epoch\n\n if early_stopping_patience is not None and i_epoch - best_epoch > early_stopping_patience >= 0:\n raise EarlyStoppingException\n\n if loss is None or not np.isfinite(loss):\n raise EarlyStoppingException\n\n return best_loss, best_model, best_epoch\n\n @staticmethod\n def report_batch(i_epoch, i_batch, loss_train):\n if i_batch in [0, 1, 10, 100, 1000]:\n logger.debug(\" Epoch {:>3d}, batch {:>3d}: loss {:>8.5f}\".format(i_epoch + 1, i_batch + 1, loss_train))\n\n @staticmethod\n def report_epoch(\n i_epoch, loss_labels, loss_train, loss_val, loss_contributions_train, loss_contributions_val, verbose=False\n ):\n logging_fn = logger.info if verbose else logger.debug\n\n def contribution_summary(labels, contributions):\n summary = \"\"\n for i, (label, value) in enumerate(zip(labels, contributions)):\n if i > 0:\n summary += \", \"\n summary += \"{}: {:>6.3f}\".format(label, value)\n return summary\n\n train_report = \" Epoch {:>3d}: train loss {:>8.5f} ({})\".format(\n i_epoch + 1, loss_train, contribution_summary(loss_labels, loss_contributions_train)\n )\n logging_fn(train_report)\n\n if loss_val is not None:\n val_report = \" val. loss {:>8.5f} ({})\".format(\n loss_val, contribution_summary(loss_labels, loss_contributions_val)\n )\n logging_fn(val_report)\n\n def wrap_up_early_stopping(self, best_model, currrent_loss, best_loss, best_epoch):\n if best_loss is None or not np.isfinite(best_loss):\n logger.warning(\"Best loss is None, cannot wrap up early stopping\")\n elif currrent_loss is None or not np.isfinite(currrent_loss) or best_loss < currrent_loss:\n logger.info(\n \"Early stopping after epoch %s, with loss %8.5f compared to final loss %8.5f\",\n best_epoch + 1,\n best_loss,\n currrent_loss,\n )\n self.model.load_state_dict(best_model)\n else:\n logger.info(\"Early stopping did not improve performance\")\n\n @staticmethod\n def _check_for_nans(label, *tensors):\n for tensor in tensors:\n if tensor is None:\n continue\n if torch.isnan(tensor).any():\n logger.warning(\"%s contains NaNs, aborting training!\", label)\n raise NanException\n\n def _init_timer(self):\n self.timer = OrderedDict()\n self.time_started = OrderedDict()\n\n def _timer(self, start=None, stop=None):\n if start is not None:\n self.time_started[start] = time.time()\n\n if stop is not None:\n if stop not in list(self.time_started.keys()):\n logger.warning(\"Timer for task %s has been stopped without being started before\", stop)\n return\n\n dt = time.time() - self.time_started[stop]\n del self.time_started[stop]\n\n if stop in list(self.timer.keys()):\n self.timer[stop] += dt\n else:\n self.timer[stop] = dt\n\n def _report_timer(self):\n logger.info(\"Training time spend on:\")\n for key, value in six.iteritems(self.timer):\n logger.info(\" {:>32s}: {:6.2f}h\".format(key, value / 3600.0))\n\n\nclass SingleParameterizedRatioTrainer(Trainer):\n def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):\n super(SingleParameterizedRatioTrainer, self).__init__(model, run_on_gpu, double_precision, n_workers)\n self.calculate_model_score = True\n\n def check_data(self, data):\n data_keys = list(data.keys())\n if \"x\" not in data_keys or \"theta\" not in data_keys or \"y\" not in data_keys:\n raise ValueError(\"Missing required information 'x', 'theta', or 'y' in training data!\")\n\n for key in data_keys:\n if key not in [\"x\", \"theta\", \"y\", \"r_xz\", \"t_xz\"]:\n logger.warning(\"Unknown key %s in training data! Ignoring it.\", key)\n\n self.calculate_model_score = \"t_xz\" in data_keys\n if self.calculate_model_score:\n logger.debug(\"Model score will be calculated\")\n else:\n logger.debug(\"Model score will not be calculated\")\n\n def forward_pass(self, batch_data, loss_functions):\n self._timer(start=\"fwd: move data\")\n theta = batch_data[\"theta\"].to(self.device, self.dtype, non_blocking=True)\n x = batch_data[\"x\"].to(self.device, self.dtype, non_blocking=True)\n y = batch_data[\"y\"].to(self.device, self.dtype, non_blocking=True)\n try:\n r_xz = batch_data[\"r_xz\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n r_xz = None\n try:\n t_xz = batch_data[\"t_xz\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n t_xz = None\n self._timer(stop=\"fwd: move data\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Training data\", theta, x, y)\n self._check_for_nans(\"Augmented training data\", r_xz, t_xz)\n self._timer(start=\"fwd: model.forward\", stop=\"fwd: check for nans\")\n\n if self.calculate_model_score:\n theta.requires_grad = True\n\n s_hat, log_r_hat, t_hat = self.model(theta, x, track_score=self.calculate_model_score, return_grad_x=False)\n self._timer(stop=\"fwd: model.forward\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Model output\", log_r_hat, s_hat)\n self._check_for_nans(\"Model score\", t_hat)\n\n self._timer(start=\"fwd: calculate losses\", stop=\"fwd: check for nans\")\n losses = [loss_function(s_hat, log_r_hat, t_hat, None, y, r_xz, t_xz, None) for loss_function in loss_functions]\n self._timer(stop=\"fwd: calculate losses\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Loss\", *losses)\n self._timer(stop=\"fwd: check for nans\")\n\n return losses\n\n\nclass DoubleParameterizedRatioTrainer(Trainer):\n def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):\n super(DoubleParameterizedRatioTrainer, self).__init__(model, run_on_gpu, double_precision, n_workers)\n self.calculate_model_score = True\n\n def check_data(self, data):\n data_keys = list(data.keys())\n if \"x\" not in data_keys or \"theta0\" not in data_keys or \"theta1\" not in data_keys or \"y\" not in data_keys:\n raise ValueError(\"Missing required information 'x', 'theta0', 'theta1', or 'y' in training data!\")\n\n for key in data_keys:\n if key not in [\"x\", \"theta0\", \"theta1\", \"y\", \"r_xz\", \"t_xz0\", \"t_xz1\"]:\n logger.warning(\"Unknown key %s in training data! Ignoring it.\", key)\n\n self.calculate_model_score = \"t_xz0\" in data_keys or \"t_xz1\" in data_keys\n if self.calculate_model_score:\n logger.debug(\"Model score will be calculated\")\n else:\n logger.debug(\"Model score will not be calculated\")\n\n def forward_pass(self, batch_data, loss_functions):\n self._timer(start=\"fwd: move data\")\n theta0 = batch_data[\"theta0\"].to(self.device, self.dtype, non_blocking=True)\n theta1 = batch_data[\"theta1\"].to(self.device, self.dtype, non_blocking=True)\n x = batch_data[\"x\"].to(self.device, self.dtype, non_blocking=True)\n y = batch_data[\"y\"].to(self.device, self.dtype, non_blocking=True)\n try:\n r_xz = batch_data[\"r_xz\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n r_xz = None\n try:\n t_xz0 = batch_data[\"t_xz0\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n t_xz0 = None\n try:\n t_xz1 = batch_data[\"t_xz1\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n t_xz1 = None\n self._timer(stop=\"fwd: move data\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Training data\", theta0, theta1, x, y)\n self._check_for_nans(\"Augmented training data\", r_xz, t_xz0, t_xz1)\n self._timer(start=\"fwd: model.forward\", stop=\"fwd: check for nans\")\n\n if self.calculate_model_score:\n theta0.requires_grad = True\n theta1.requires_grad = True\n\n s_hat, log_r_hat, t_hat0, t_hat1 = self.model(\n theta0, theta1, x, track_score=self.calculate_model_score, return_grad_x=False\n )\n self._timer(stop=\"fwd: model.forward\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Model output\", s_hat, log_r_hat, t_hat0, t_hat1)\n\n self._timer(start=\"fwd: calculate losses\", stop=\"fwd: check for nans\")\n losses = [\n loss_function(s_hat, log_r_hat, t_hat0, t_hat1, y, r_xz, t_xz0, t_xz1) for loss_function in loss_functions\n ]\n self._timer(stop=\"fwd: calculate losses\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Loss\", *losses)\n self._timer(stop=\"fwd: check for nans\")\n\n return losses\n\n\nclass LocalScoreTrainer(Trainer):\n def check_data(self, data):\n data_keys = list(data.keys())\n if \"x\" not in data_keys or \"t_xz\" not in data_keys:\n raise ValueError(\"Missing required information 'x' or 't_xz' in training data!\")\n\n for key in data_keys:\n if key not in [\"x\", \"t_xz\"]:\n logger.warning(\"Unknown key %s in training data! Ignoring it.\", key)\n\n def forward_pass(self, batch_data, loss_functions):\n self._timer(start=\"fwd: move data\")\n x = batch_data[\"x\"].to(self.device, self.dtype, non_blocking=True)\n t_xz = batch_data[\"t_xz\"].to(self.device, self.dtype, non_blocking=True)\n self._timer(stop=\"fwd: move data\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Training data\", x)\n self._check_for_nans(\"Augmented training data\", t_xz)\n\n self._timer(start=\"fwd: model.forward\", stop=\"fwd: check for nans\")\n t_hat = self.model(x)\n self._timer(stop=\"fwd: model.forward\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Model output\", t_hat)\n\n self._timer(start=\"fwd: calculate losses\", stop=\"fwd: check for nans\")\n losses = [loss_function(t_hat, t_xz) for loss_function in loss_functions]\n self._timer(stop=\"fwd: calculate losses\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Loss\", *losses)\n self._timer(stop=\"fwd: check for nans\")\n\n return losses\n\n\nclass FlowTrainer(Trainer):\n def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):\n super(FlowTrainer, self).__init__(model, run_on_gpu, double_precision, n_workers)\n self.calculate_model_score = True\n\n def check_data(self, data):\n data_keys = list(data.keys())\n if \"x\" not in data_keys or \"theta\" not in data_keys:\n raise ValueError(\"Missing required information 'x' or 'theta' in training data!\")\n\n for key in data_keys:\n if key not in [\"x\", \"theta\", \"t_xz\"]:\n logger.warning(\"Unknown key %s in training data! Ignoring it.\", key)\n\n self.calculate_model_score = \"t_xz\" in data_keys\n if self.calculate_model_score:\n logger.debug(\"Model score will be calculated\")\n else:\n logger.debug(\"Model score will not be calculated\")\n\n def forward_pass(self, batch_data, loss_functions):\n self._timer(start=\"fwd: move data\")\n x = batch_data[\"x\"].to(self.device, self.dtype, non_blocking=True)\n theta = batch_data[\"theta\"].to(self.device, self.dtype, non_blocking=True)\n try:\n t_xz = batch_data[\"t_xz\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n t_xz = None\n self._timer(stop=\"fwd: move data\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Training data\", theta, x)\n self._check_for_nans(\"Augmented training data\", t_xz)\n\n self._timer(start=\"fwd: model.forward\", stop=\"fwd: check for nans\")\n if self.calculate_model_score:\n theta.requires_grad = True\n _, log_likelihood, t_hat = self.model.log_likelihood_and_score(theta, x)\n else:\n _, log_likelihood = self.model.log_likelihood(theta, x)\n t_hat = None\n self._timer(stop=\"fwd: model.forward\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Model output\", log_likelihood, t_hat)\n\n self._timer(start=\"fwd: calculate losses\", stop=\"fwd: check for nans\")\n losses = [loss_function(log_likelihood, t_hat, t_xz) for loss_function in loss_functions]\n self._timer(stop=\"fwd: calculate losses\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Loss\", *losses)\n self._timer(stop=\"fwd: check for nans\")\n\n return losses\n"
] | [
[
"numpy.isfinite",
"numpy.min",
"torch.isnan",
"torch.utils.data.DataLoader",
"torch.utils.data.sampler.SubsetRandomSampler",
"numpy.random.shuffle",
"torch.from_numpy",
"numpy.max",
"numpy.mean",
"torch.cuda.is_available",
"numpy.floor",
"torch.device",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
steven0820/tensorflow | [
"36ebbf1ddc3ed820b7a5572ff4ed8e9bc707b8e5",
"36ebbf1ddc3ed820b7a5572ff4ed8e9bc707b8e5",
"36ebbf1ddc3ed820b7a5572ff4ed8e9bc707b8e5",
"36ebbf1ddc3ed820b7a5572ff4ed8e9bc707b8e5"
] | [
"tensorflow/contrib/learn/python/learn/graph_actions.py",
"tensorflow/contrib/learn/python/learn/estimators/rnn.py",
"tensorflow/python/kernel_tests/concat_op_test.py",
"tensorflow/python/saved_model/example/saved_model_half_plus_two.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"High level operations on graphs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport sys\nimport threading\nimport time\n\nimport numpy as np\n\nfrom six import reraise\n\nfrom tensorflow.contrib.framework.python.ops import ops as contrib_ops\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\nfrom tensorflow.contrib.learn.python.learn import monitors as monitors_lib\nfrom tensorflow.contrib.learn.python.learn.utils import checkpoints\nfrom tensorflow.core.framework import summary_pb2\nfrom tensorflow.python.client import session as tf_session\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import coordinator\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import queue_runner\nfrom tensorflow.python.training import saver as tf_saver\nfrom tensorflow.python.training import session_manager as session_manager_lib\nfrom tensorflow.python.training import summary_io\nfrom tensorflow.python.training import supervisor as tf_supervisor\n\n# Singleton for SummaryWriter per logdir folder.\n_SUMMARY_WRITERS = {}\n\n# Lock protecting _SUMMARY_WRITERS\n_summary_writer_lock = threading.Lock()\n\n\ndef clear_summary_writers():\n \"\"\"Clear cached summary writers. Currently only used for unit tests.\"\"\"\n return summary_io.SummaryWriterCache.clear()\n\n\ndef get_summary_writer(logdir):\n \"\"\"Returns single SummaryWriter per logdir in current run.\n\n Args:\n logdir: str, folder to write summaries.\n\n Returns:\n Existing `SummaryWriter` object or new one if never wrote to given\n directory.\n \"\"\"\n return summary_io.SummaryWriterCache.get(logdir)\n\n\ndef _make_saver(graph, keep_checkpoint_max=5):\n vars_to_save = graph.get_collection(ops.GraphKeys.VARIABLES)\n if vars_to_save:\n return tf_saver.Saver(vars_to_save,\n sharded=True,\n max_to_keep=keep_checkpoint_max)\n else:\n return None\n\n\ndef _restore_from_checkpoint(session, graph, checkpoint_path, saver=None):\n logging.info('Loading model from checkpoint: %s.', checkpoint_path)\n saver = saver or _make_saver(graph)\n if saver:\n saver.restore(session, checkpoint_path)\n else:\n logging.info('No variables found in graph, not creating Saver() object.')\n\n\ndef _run_with_monitors(session, step, tensors, feed_dict, monitors):\n \"\"\"Runs session for given tensors with monitor callbacks.\"\"\"\n for monitor in monitors:\n tensors += monitor.step_begin(step)\n tensors = list(set(tensors))\n\n outputs = session.run(tensors, feed_dict=feed_dict)\n outputs = dict(zip(\n [t.name if isinstance(t, ops.Tensor) else t for t in tensors],\n outputs))\n\n should_stop = False\n for monitor in monitors:\n induce_stop = monitor.step_end(step, outputs)\n should_stop = should_stop or induce_stop\n return outputs, should_stop\n\n\ndef _monitored_train(graph,\n output_dir,\n train_op,\n loss_op,\n global_step_tensor=None,\n init_op=None,\n init_feed_dict=None,\n init_fn=None,\n log_every_steps=10,\n supervisor_is_chief=True,\n supervisor_master='',\n supervisor_save_model_secs=600,\n supervisor_save_model_steps=None,\n keep_checkpoint_max=5,\n supervisor_save_summaries_steps=100,\n feed_fn=None,\n steps=None,\n fail_on_nan_loss=True,\n hooks=None,\n max_steps=None):\n \"\"\"Train a model via monitored_session.\n\n Given `graph`, a directory to write outputs to (`output_dir`), and some ops,\n run a training loop. The given `train_op` performs one step of training on the\n model. The `loss_op` represents the objective function of the training. It is\n expected to increment the `global_step_tensor`, a scalar integer tensor\n counting training steps. This function uses `Supervisor` to initialize the\n graph (from a checkpoint if one is available in `output_dir`), write summaries\n defined in the graph, and write regular checkpoints as defined by\n `supervisor_save_model_secs`.\n\n Training continues until `global_step_tensor` evaluates to `max_steps`, or, if\n `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the\n program is terminated with exit code 1.\n\n Args:\n graph: A graph to train. It is expected that this graph is not in use\n elsewhere.\n output_dir: A directory to write outputs to.\n train_op: An op that performs one training step when run.\n loss_op: A scalar loss tensor.\n global_step_tensor: A tensor representing the global step. If none is given,\n one is extracted from the graph using the same logic as in `Supervisor`.\n init_op: An op that initializes the graph. If `None`, use `Supervisor`'s\n default.\n init_feed_dict: A dictionary that maps `Tensor` objects to feed values.\n This feed dictionary will be used when `init_op` is evaluated.\n init_fn: Optional callable passed to Supervisor to initialize the model.\n log_every_steps: Output logs regularly. The logs contain timing data and the\n current loss. A `0` or negative value disables logging.\n supervisor_is_chief: Whether the current process is the chief supervisor in\n charge of restoring the model and running standard services.\n supervisor_master: The master string to use when preparing the session. \n supervisor_save_model_secs: Save checkpoints every this many seconds. Can\n not be specified with `supervisor_save_model_steps`.\n supervisor_save_model_steps: Save checkpoints every this many steps. Can not\n be specified with `supervisor_save_model_secs`.\n keep_checkpoint_max: The maximum number of recent checkpoint files to\n keep. As new files are created, older files are deleted. If None or 0,\n all checkpoint files are kept. This is simply passed as the max_to_keep\n arg to `tf.Saver` constructor.\n supervisor_save_summaries_steps: Save summaries every\n `supervisor_save_summaries_steps` seconds when training.\n feed_fn: A function that is called every iteration to produce a `feed_dict`\n passed to `session.run` calls. Optional.\n steps: Trains for this many steps (e.g. current global step + `steps`).\n fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`\n evaluates to `NaN`. If false, continue training as if nothing happened.\n hooks: List of `SessionRunHook` subclass instances. Used for callbacks\n inside the training loop.\n max_steps: Number of total steps for which to train model. If `None`,\n train forever. Two calls fit(steps=100) means 200 training iterations.\n On the other hand two calls of fit(max_steps=100) means, second call\n will not do any iteration since first call did all 100 steps.\n\n Returns:\n The final loss value.\n\n Raises:\n ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`\n is not provided. See `tf.contrib.framework.get_global_step` for how we\n look up the latter if not provided explicitly.\n NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever\n evaluates to `NaN`.\n ValueError: If both `steps` and `max_steps` are not `None`.\n \"\"\"\n if (steps is not None) and (max_steps is not None):\n raise ValueError('Can not provide both steps and max_steps.')\n if not output_dir:\n raise ValueError('Output directory should be non-empty %s.' % output_dir)\n if train_op is None:\n raise ValueError('Missing train_op.')\n if loss_op is None:\n raise ValueError('Missing loss_op.')\n if hooks is None:\n hooks = []\n if not isinstance(hooks, list):\n raise ValueError('Hooks should be a list.')\n with graph.as_default():\n global_step_tensor = contrib_variables.assert_or_get_global_step(\n graph, global_step_tensor)\n if global_step_tensor is None:\n raise ValueError('No \"global_step\" was provided or found in the graph.')\n\n if max_steps is not None:\n try:\n start_step = checkpoints.load_variable(output_dir,\n global_step_tensor.name)\n if max_steps <= start_step:\n logging.info('Skipping training since max_steps has already saved.')\n return None\n except: # pylint: disable=bare-except\n pass\n\n # Adapted SessionRunHooks such as ExportMonitor depend on the\n # CheckpointSaverHook to be executed before they should be executed.\n # The `hooks` param comprises of deprecated monitor hooks\n # (such as ExportMonitor). Appending them after the basic_session_run_hooks.\n all_hooks = []\n with graph.as_default():\n all_hooks.append(basic_session_run_hooks.NanTensorHook(\n loss_op, fail_on_nan_loss=fail_on_nan_loss))\n if log_every_steps > 0:\n all_hooks.append(basic_session_run_hooks.LoggingTensorHook({\n 'loss': loss_op.name,\n 'step': global_step_tensor.name\n }, every_n_iter=log_every_steps))\n\n def make_saver():\n return tf_saver.Saver(\n sharded=True, max_to_keep=keep_checkpoint_max, defer_build=True)\n\n scaffold = monitored_session.Scaffold(\n init_op=init_op,\n init_feed_dict=init_feed_dict,\n init_fn=init_fn,\n saver=monitored_session.Scaffold.get_or_default('saver',\n ops.GraphKeys.SAVERS,\n make_saver))\n\n if not supervisor_is_chief:\n session_creator = monitored_session.WorkerSessionCreator(\n scaffold=scaffold,\n master=supervisor_master)\n else:\n session_creator = monitored_session.ChiefSessionCreator(\n scaffold=scaffold,\n checkpoint_dir=output_dir,\n master=supervisor_master)\n summary_writer = summary_io.SummaryWriterCache.get(output_dir)\n all_hooks.append(\n basic_session_run_hooks.StepCounterHook(\n summary_writer=summary_writer))\n all_hooks.append(\n basic_session_run_hooks.SummarySaverHook(\n save_steps=supervisor_save_summaries_steps,\n summary_writer=summary_writer,\n scaffold=scaffold))\n if (supervisor_save_model_secs is not None\n or supervisor_save_model_steps is not None):\n all_hooks.append(\n basic_session_run_hooks.CheckpointSaverHook(\n output_dir,\n save_secs=supervisor_save_model_secs,\n save_steps=supervisor_save_model_steps,\n scaffold=scaffold))\n\n if steps is not None or max_steps is not None:\n all_hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))\n all_hooks.extend(hooks)\n\n with monitored_session.MonitoredSession(\n session_creator=session_creator,\n hooks=all_hooks) as super_sess:\n loss = None\n while not super_sess.should_stop():\n _, loss = super_sess.run([train_op, loss_op], feed_fn() if feed_fn else\n None)\n return loss\n\n\n# TODO(ispir): Deprecate train in favor of supervised_train\ndef train(graph,\n output_dir,\n train_op,\n loss_op,\n global_step_tensor=None,\n init_op=None,\n init_feed_dict=None,\n init_fn=None,\n log_every_steps=10,\n supervisor_is_chief=True,\n supervisor_master='',\n supervisor_save_model_secs=600,\n keep_checkpoint_max=5,\n supervisor_save_summaries_steps=100,\n feed_fn=None,\n steps=None,\n fail_on_nan_loss=True,\n monitors=None,\n max_steps=None):\n \"\"\"Train a model.\n\n Given `graph`, a directory to write outputs to (`output_dir`), and some ops,\n run a training loop. The given `train_op` performs one step of training on the\n model. The `loss_op` represents the objective function of the training. It is\n expected to increment the `global_step_tensor`, a scalar integer tensor\n counting training steps. This function uses `Supervisor` to initialize the\n graph (from a checkpoint if one is available in `output_dir`), write summaries\n defined in the graph, and write regular checkpoints as defined by\n `supervisor_save_model_secs`.\n\n Training continues until `global_step_tensor` evaluates to `max_steps`, or, if\n `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the\n program is terminated with exit code 1.\n\n Args:\n graph: A graph to train. It is expected that this graph is not in use\n elsewhere.\n output_dir: A directory to write outputs to.\n train_op: An op that performs one training step when run.\n loss_op: A scalar loss tensor.\n global_step_tensor: A tensor representing the global step. If none is given,\n one is extracted from the graph using the same logic as in `Supervisor`.\n init_op: An op that initializes the graph. If `None`, use `Supervisor`'s\n default.\n init_feed_dict: A dictionary that maps `Tensor` objects to feed values.\n This feed dictionary will be used when `init_op` is evaluated.\n init_fn: Optional callable passed to Supervisor to initialize the model.\n log_every_steps: Output logs regularly. The logs contain timing data and the\n current loss.\n supervisor_is_chief: Whether the current process is the chief supervisor in\n charge of restoring the model and running standard services.\n supervisor_master: The master string to use when preparing the session.\n supervisor_save_model_secs: Save a checkpoint every\n `supervisor_save_model_secs` seconds when training.\n keep_checkpoint_max: The maximum number of recent checkpoint files to\n keep. As new files are created, older files are deleted. If None or 0,\n all checkpoint files are kept. This is simply passed as the max_to_keep\n arg to tf.Saver constructor.\n supervisor_save_summaries_steps: Save summaries every\n `supervisor_save_summaries_steps` seconds when training.\n feed_fn: A function that is called every iteration to produce a `feed_dict`\n passed to `session.run` calls. Optional.\n steps: Trains for this many steps (e.g. current global step + `steps`).\n fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`\n evaluates to `NaN`. If false, continue training as if nothing happened.\n monitors: List of `BaseMonitor` subclass instances. Used for callbacks\n inside the training loop.\n max_steps: Number of total steps for which to train model. If `None`,\n train forever. Two calls fit(steps=100) means 200 training iterations.\n On the other hand two calls of fit(max_steps=100) means, second call\n will not do any iteration since first call did all 100 steps.\n\n Returns:\n The final loss value.\n\n Raises:\n ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`\n is not provided. See `tf.contrib.framework.get_global_step` for how we\n look up the latter if not provided explicitly.\n NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever\n evaluates to `NaN`.\n ValueError: If both `steps` and `max_steps` are not `None`.\n \"\"\"\n while True:\n try:\n return _train_internal(graph,\n output_dir,\n train_op,\n loss_op,\n global_step_tensor,\n init_op,\n init_feed_dict,\n init_fn,\n log_every_steps,\n supervisor_is_chief,\n supervisor_master,\n supervisor_save_model_secs,\n keep_checkpoint_max,\n supervisor_save_summaries_steps,\n feed_fn,\n steps,\n fail_on_nan_loss,\n monitors,\n max_steps)\n except errors.AbortedError:\n # Happens when PS restarts, keep training.\n logging.warning('Training got Aborted error. Keep training.')\n\n\ndef _train_internal(graph,\n output_dir,\n train_op,\n loss_op,\n global_step_tensor,\n init_op,\n init_feed_dict,\n init_fn,\n log_every_steps,\n supervisor_is_chief,\n supervisor_master,\n supervisor_save_model_secs,\n keep_checkpoint_max,\n supervisor_save_summaries_steps,\n feed_fn,\n steps,\n fail_on_nan_loss,\n monitors,\n max_steps):\n \"\"\"See train.\"\"\"\n if (steps is not None) and (max_steps is not None):\n raise ValueError('Can not provide both steps and max_steps.')\n if not output_dir:\n raise ValueError('Output directory should be non-empty %s.' % output_dir)\n if train_op is None:\n raise ValueError('Missing train_op.')\n if loss_op is None:\n raise ValueError('Missing loss_op.')\n\n with graph.as_default():\n global_step_tensor = contrib_variables.assert_or_get_global_step(\n graph, global_step_tensor)\n if global_step_tensor is None:\n raise ValueError('No \"global_step\" was provided or found in the graph.')\n\n # Get current step.\n try:\n start_step = checkpoints.load_variable(\n output_dir, global_step_tensor.name)\n except (errors.NotFoundError, ValueError):\n start_step = 0\n\n summary_writer = (get_summary_writer(output_dir)\n if supervisor_is_chief else None)\n\n # Add default chief monitors if none were provided.\n if not monitors:\n monitors = monitors_lib.get_default_monitors(\n loss_op=loss_op,\n summary_op=logging_ops.get_summary_op(),\n save_summary_steps=supervisor_save_summaries_steps,\n summary_writer=summary_writer) if supervisor_is_chief else []\n\n # TODO(ipolosukhin): Replace all functionality of Supervisor\n # with Chief-Exclusive Monitors.\n if not supervisor_is_chief:\n # Prune list of monitor to the ones runnable on all workers.\n monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]\n\n if max_steps is None:\n max_steps = (start_step + steps) if steps else None\n # Start monitors, can create graph parts.\n for monitor in monitors:\n monitor.begin(max_steps=max_steps)\n\n supervisor = tf_supervisor.Supervisor(\n graph,\n init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,\n init_feed_dict=init_feed_dict,\n is_chief=supervisor_is_chief,\n logdir=output_dir,\n saver=_make_saver(graph, keep_checkpoint_max),\n global_step=global_step_tensor,\n summary_op=None,\n summary_writer=summary_writer,\n save_model_secs=supervisor_save_model_secs,\n init_fn=init_fn)\n session = supervisor.PrepareSession(master=supervisor_master,\n start_standard_services=True)\n supervisor.StartQueueRunners(session)\n\n with session:\n get_current_step = lambda: session.run(global_step_tensor)\n\n start_step = get_current_step()\n last_step = start_step\n last_log_step = start_step\n loss_value = None\n logging.info('Training steps [%d,%s)', last_step, 'inf'\n if max_steps is None else str(max_steps))\n\n excinfo = None\n try:\n while not supervisor.ShouldStop() and (\n (max_steps is None) or (last_step < max_steps)):\n start_time = time.time()\n feed_dict = feed_fn() if feed_fn is not None else None\n\n outputs, should_stop = _run_with_monitors(\n session, last_step + 1, [train_op, loss_op], feed_dict, monitors)\n\n loss_value = outputs[loss_op.name]\n if np.isnan(loss_value):\n failure_message = 'Model diverged with loss = NaN.'\n if fail_on_nan_loss:\n logging.error(failure_message)\n raise monitors_lib.NanLossDuringTrainingError()\n else:\n logging.warning(failure_message)\n\n if should_stop:\n break\n\n this_step = get_current_step()\n\n if this_step <= last_step:\n logging.error(\n 'Global step was not incremented by train op at step %s'\n ': new step %d', last_step, this_step)\n\n last_step = this_step\n is_last_step = (max_steps is not None) and (last_step >= max_steps)\n if is_last_step or (last_step - last_log_step >= log_every_steps):\n logging.info(\n 'training step %d, loss = %.5f (%.3f sec/batch).',\n last_step, loss_value, float(time.time() - start_time))\n last_log_step = last_step\n except errors.OutOfRangeError as e:\n logging.warn('Got exception during tf.learn training loop possibly '\n 'due to exhausted input queue %s.', e)\n except StopIteration:\n logging.info('Exhausted input iterarator.')\n except BaseException as e: # pylint: disable=broad-except\n # Hold on to any other exceptions while we try recording a final\n # checkpoint and summary.\n excinfo = sys.exc_info()\n finally:\n try:\n # Call supervisor.Stop() from within a try block because it re-raises\n # exceptions thrown by the supervised threads.\n supervisor.Stop(close_summary_writer=False)\n\n # Save one last checkpoint and summaries\n # TODO(wicke): This should be handled by Supervisor\n\n # In case we encountered an exception in the try block before we updated\n # last_step, update it here (again).\n last_step = get_current_step()\n if supervisor_is_chief:\n ckpt_path = supervisor.save_path\n logging.info('Saving checkpoint for step %d to checkpoint: %s.',\n last_step, ckpt_path)\n supervisor.saver.save(session, ckpt_path, global_step=last_step)\n\n # Finish monitors.\n for monitor in monitors:\n monitor.end()\n\n # catch OutOfRangeError which is thrown when queue is out of data (and for\n # other reasons as well).\n except errors.OutOfRangeError as e:\n logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '\n 'due to exhausted input queue. Note: summary_op is not '\n 'expected to trigger dequeues. %s.', e)\n except BaseException as e: # pylint: disable=broad-except\n # If we don't already have an exception to re-raise, raise this one.\n if not excinfo:\n raise\n # Otherwise, log this one and raise the other in the finally block.\n logging.error('Got exception during tf.learn final checkpoint %s.', e)\n finally:\n if excinfo:\n reraise(*excinfo)\n return loss_value\n\n\ndef _get_first_op_from_collection(collection_name):\n elements = ops.get_collection(collection_name)\n if elements:\n return elements[0]\n return None\n\n\ndef _get_saver():\n \"\"\"Lazy init and return saver.\"\"\"\n saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)\n if saver is None and variables.all_variables():\n saver = tf_saver.Saver()\n ops.add_to_collection(ops.GraphKeys.SAVERS, saver)\n return saver\n\n\ndef _get_ready_op():\n ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP)\n if ready_op is None:\n ready_op = variables.report_uninitialized_variables()\n ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)\n return ready_op\n\n\ndef _get_local_init_op():\n local_init_op = _get_first_op_from_collection(\n ops.GraphKeys.LOCAL_INIT_OP)\n if local_init_op is None:\n op_list = [variables.initialize_local_variables(),\n data_flow_ops.initialize_all_tables()]\n if op_list:\n local_init_op = control_flow_ops.group(*op_list)\n ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)\n return local_init_op\n\n\ndef _eval_results_to_str(eval_results):\n return ', '.join('%s = %s' % (k, v) for k, v in eval_results.items())\n\n\ndef _write_summary_results(output_dir, eval_results, current_global_step):\n \"\"\"Writes eval results into summary file in given dir.\"\"\"\n logging.info('Saving evaluation summary for %d step: %s', current_global_step,\n _eval_results_to_str(eval_results))\n summary_writer = get_summary_writer(output_dir)\n summary = summary_pb2.Summary()\n for key in eval_results:\n if eval_results[key] is None:\n continue\n value = summary.value.add()\n value.tag = key\n if (isinstance(eval_results[key], np.float32) or\n isinstance(eval_results[key], float)):\n value.simple_value = float(eval_results[key])\n else:\n logging.warn('Skipping summary for %s, must be a float or np.float32.',\n key)\n summary_writer.add_summary(summary, current_global_step)\n summary_writer.flush()\n\n\ndef evaluate(graph,\n output_dir,\n checkpoint_path,\n eval_dict,\n update_op=None,\n global_step_tensor=None,\n supervisor_master='',\n log_every_steps=10,\n feed_fn=None,\n max_steps=None):\n \"\"\"Evaluate a model loaded from a checkpoint.\n\n Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint\n to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval\n loop for `max_steps` steps, or until an exception (generally, an\n end-of-input signal from a reader operation) is raised from running\n `eval_dict`.\n\n In each step of evaluation, all tensors in the `eval_dict` are evaluated, and\n every `log_every_steps` steps, they are logged. At the very end of evaluation,\n a summary is evaluated (finding the summary ops using `Supervisor`'s logic)\n and written to `output_dir`.\n\n Args:\n graph: A `Graph` to train. It is expected that this graph is not in use\n elsewhere.\n output_dir: A string containing the directory to write a summary to.\n checkpoint_path: A string containing the path to a checkpoint to restore.\n Can be `None` if the graph doesn't require loading any variables.\n eval_dict: A `dict` mapping string names to tensors to evaluate. It is\n evaluated in every logging step. The result of the final evaluation is\n returned. If `update_op` is None, then it's evaluated in every step. If\n `max_steps` is `None`, this should depend on a reader that will raise an\n end-of-input exception when the inputs are exhausted.\n update_op: A `Tensor` which is run in every step.\n global_step_tensor: A `Variable` containing the global step. If `None`,\n one is extracted from the graph using the same logic as in `Supervisor`.\n Used to place eval summaries on training curves.\n supervisor_master: The master string to use when preparing the session.\n log_every_steps: Integer. Output logs every `log_every_steps` evaluation\n steps. The logs contain the `eval_dict` and timing information.\n feed_fn: A function that is called every iteration to produce a `feed_dict`\n passed to `session.run` calls. Optional.\n max_steps: Integer. Evaluate `eval_dict` this many times.\n\n Returns:\n A tuple `(eval_results, global_step)`:\n eval_results: A `dict` mapping `string` to numeric values (`int`, `float`)\n that are the result of running eval_dict in the last step. `None` if no\n eval steps were run.\n global_step: The global step this evaluation corresponds to.\n\n Raises:\n ValueError: if `output_dir` is empty.\n \"\"\"\n if not output_dir:\n raise ValueError('Output directory should be non-empty %s.' % output_dir)\n with graph.as_default():\n global_step_tensor = contrib_variables.assert_or_get_global_step(\n graph, global_step_tensor)\n\n # Create or get summary op, global_step and saver.\n saver = _get_saver()\n local_init_op = _get_local_init_op()\n ready_op = _get_ready_op()\n\n session_manager = session_manager_lib.SessionManager(\n local_init_op=local_init_op,\n ready_op=ready_op)\n session, initialized = session_manager.recover_session(\n master=supervisor_master,\n saver=saver,\n checkpoint_dir=checkpoint_path)\n\n # Start queue runners.\n coord = coordinator.Coordinator()\n threads = queue_runner.start_queue_runners(session, coord)\n\n with session:\n if not initialized:\n logging.warning('Failed to initialize from %s.', checkpoint_path)\n # TODO(ipolosukhin): This should be failing, but old code relies on that.\n session.run(variables.initialize_all_variables())\n if checkpoint_path:\n _restore_from_checkpoint(session, graph, checkpoint_path, saver)\n\n current_global_step = session.run(global_step_tensor)\n eval_results = None\n # TODO(amodei): Fix this to run through the eval set exactly once.\n step = 0\n eval_step = None\n feed_dict = None\n logging.info('Eval steps [%d,%s) for training step %d.', step,\n 'inf' if max_steps is None\n else str(max_steps), current_global_step)\n try:\n try:\n while (max_steps is None) or (step < max_steps):\n step += 1\n start_time = time.time()\n feed_dict = feed_fn() if feed_fn is not None else None\n if update_op is not None:\n session.run(update_op, feed_dict=feed_dict)\n else:\n eval_results = session.run(eval_dict, feed_dict=feed_dict)\n eval_step = step\n\n # TODO(wicke): We should assert that the global step hasn't changed.\n if step % log_every_steps == 0:\n if eval_step is None or step != eval_step:\n eval_results = session.run(eval_dict, feed_dict=feed_dict)\n eval_step = step\n duration = time.time() - start_time\n logging.info('Results after %d steps (%.3f sec/batch): %s.',\n step, float(duration),\n _eval_results_to_str(eval_results))\n finally:\n if eval_results is None or step != eval_step:\n eval_results = session.run(eval_dict, feed_dict=feed_dict)\n eval_step = step\n # Stop session first, before queue runners.\n session.close()\n\n # Stop queue runners.\n try:\n coord.request_stop()\n coord.join(threads, stop_grace_period_secs=120)\n except (RuntimeError, errors.CancelledError) as e:\n logging.warning('Coordinator didn\\'t stop cleanly: %s', e)\n\n # catch OutOfRangeError which is thrown when queue is out of data (and for\n # other reasons as well).\n except errors.OutOfRangeError as e:\n if max_steps is None:\n logging.info('Input queue is exhausted.')\n else:\n logging.warn('Input queue is exhausted: %s.', e)\n # catch StopIteration which is thrown is DataReader is out of data.\n except StopIteration as e:\n if max_steps is None:\n logging.info('Input iterator is exhausted.')\n else:\n logging.warn('Input iterator is exhausted: %s.', e)\n\n # Save summaries for this evaluation.\n _write_summary_results(output_dir, eval_results, current_global_step)\n\n return eval_results, current_global_step\n\n\ndef run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1):\n \"\"\"Run `output_dict` tensors `n` times, with the same `feed_dict` each run.\n\n Args:\n output_dict: A `dict` mapping string names to tensors to run. Must all be\n from the same graph.\n feed_dict: `dict` of input values to feed each run.\n restore_checkpoint_path: A string containing the path to a checkpoint to\n restore.\n n: Number of times to repeat.\n\n Returns:\n A list of `n` `dict` objects, each containing values read from `output_dict`\n tensors.\n \"\"\"\n return run_feeds(\n output_dict=output_dict,\n feed_dicts=itertools.repeat(feed_dict, n),\n restore_checkpoint_path=restore_checkpoint_path)\n\n\n# TODO(ptucker): Add save_checkpoint_path.\ndef run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):\n \"\"\"Run `output_dict` tensors with each input in `feed_dicts`.\n\n If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,\n init all variables.\n\n Args:\n output_dict: A `dict` mapping string names to `Tensor` objects to run.\n Tensors must all be from the same graph.\n feed_dicts: Iterable of `dict` objects of input values to feed.\n restore_checkpoint_path: A string containing the path to a checkpoint to\n restore.\n\n Yields:\n A sequence of dicts of values read from `output_dict` tensors, one item\n yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,\n values are the results read from the corresponding `Tensor` in\n `output_dict`.\n\n Raises:\n ValueError: if `output_dict` or `feed_dicts` is None or empty.\n \"\"\"\n if not output_dict:\n raise ValueError('output_dict is invalid: %s.' % output_dict)\n if not feed_dicts:\n raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)\n\n graph = contrib_ops.get_graph_from_inputs(output_dict.values())\n\n with graph.as_default() as g:\n with tf_session.Session('') as session:\n if restore_checkpoint_path:\n _restore_from_checkpoint(session, g, restore_checkpoint_path)\n else:\n session.run(variables.initialize_all_variables())\n session.run(variables.initialize_local_variables())\n session.run(data_flow_ops.initialize_all_tables())\n coord = coordinator.Coordinator()\n threads = None\n try:\n threads = queue_runner.start_queue_runners(session, coord=coord)\n for f in feed_dicts:\n yield session.run(output_dict, f)\n finally:\n coord.request_stop()\n if threads:\n coord.join(threads, stop_grace_period_secs=120)\n\n\ndef run_feeds(*args, **kwargs):\n \"\"\"See run_feeds_iter(). Returns a `list` instead of an iterator.\"\"\"\n return list(run_feeds_iter(*args, **kwargs))\n\n\ndef infer(restore_checkpoint_path, output_dict, feed_dict=None):\n \"\"\"Restore graph from `restore_checkpoint_path` and run `output_dict` tensors.\n\n If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,\n init all variables.\n\n Args:\n restore_checkpoint_path: A string containing the path to a checkpoint to\n restore.\n output_dict: A `dict` mapping string names to `Tensor` objects to run.\n Tensors must all be from the same graph.\n feed_dict: `dict` object mapping `Tensor` objects to input values to feed.\n\n Returns:\n Dict of values read from `output_dict` tensors. Keys are the same as\n `output_dict`, values are the results read from the corresponding `Tensor`\n in `output_dict`.\n\n Raises:\n ValueError: if `output_dict` or `feed_dicts` is None or empty.\n \"\"\"\n return run_feeds(output_dict=output_dict,\n feed_dicts=[feed_dict] if feed_dict is not None else [None],\n restore_checkpoint_path=restore_checkpoint_path)[0]\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Recurrent Neural Network estimators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.learn.python.learn import models\nfrom tensorflow.contrib.learn.python.learn.estimators import _sklearn\nfrom tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator\n\n\ndef null_input_op_fn(x):\n \"\"\"This function does no transformation on the inputs, used as default.\"\"\"\n return x\n\n\nclass TensorFlowRNNClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):\n \"\"\"TensorFlow RNN Classifier model.\"\"\"\n\n def __init__(self,\n rnn_size,\n n_classes,\n cell_type='gru',\n num_layers=1,\n input_op_fn=null_input_op_fn,\n initial_state=None,\n bidirectional=False,\n sequence_length=None,\n attn_length=None,\n attn_size=None,\n attn_vec_size=None,\n batch_size=32,\n steps=50,\n optimizer='Adagrad',\n learning_rate=0.1,\n class_weight=None,\n clip_gradients=5.0,\n continue_training=False,\n config=None,\n verbose=1):\n \"\"\"Initializes a TensorFlowRNNClassifier instance.\n\n Args:\n rnn_size: The size for rnn cell, e.g. size of your word embeddings.\n cell_type: The type of rnn cell, including rnn, gru, and lstm.\n num_layers: The number of layers of the rnn model.\n input_op_fn: Function that will transform the input tensor, such as\n creating word embeddings, byte list, etc. This takes\n an argument x for input and returns transformed x.\n bidirectional: boolean, Whether this is a bidirectional rnn.\n sequence_length: If sequence_length is provided, dynamic calculation\n is performed. This saves computational time when unrolling past max\n sequence length.\n initial_state: An initial state for the RNN. This must be a tensor of\n appropriate type and shape [batch_size x cell.state_size].\n attn_length: integer, the size of attention vector attached to rnn cells.\n attn_size: integer, the size of an attention window attached to rnn cells.\n attn_vec_size: integer, the number of convolutional features calculated on\n attention state and the size of the hidden layer built from base cell state.\n n_classes: Number of classes in the target.\n batch_size: Mini batch size.\n steps: Number of steps to run over data.\n optimizer: Optimizer name (or class), for example \"SGD\", \"Adam\",\n \"Adagrad\".\n learning_rate: If this is constant float value, no decay function is\n used. Instead, a customized decay function can be passed that accepts\n global_step as parameter and returns a Tensor.\n e.g. exponential decay function:\n\n ````python\n def exp_decay(global_step):\n return tf.train.exponential_decay(\n learning_rate=0.1, global_step,\n decay_steps=2, decay_rate=0.001)\n ````\n\n class_weight: None or list of n_classes floats. Weight associated with\n classes for loss computation. If not given, all classes are\n supposed to have weight one.\n continue_training: when continue_training is True, once initialized\n model will be continually trained on every call of fit.\n config: RunConfig object that controls the configurations of the session,\n e.g. num_cores, gpu_memory_fraction, etc.\n \"\"\"\n\n self.rnn_size = rnn_size\n self.cell_type = cell_type\n self.input_op_fn = input_op_fn\n self.bidirectional = bidirectional\n self.num_layers = num_layers\n self.sequence_length = sequence_length\n self.initial_state = initial_state\n self.attn_length = attn_length\n self.attn_size = attn_size\n self.attn_vec_size = attn_vec_size\n super(TensorFlowRNNClassifier, self).__init__(\n model_fn=self._model_fn,\n n_classes=n_classes,\n batch_size=batch_size,\n steps=steps,\n optimizer=optimizer,\n learning_rate=learning_rate,\n class_weight=class_weight,\n clip_gradients=clip_gradients,\n continue_training=continue_training,\n config=config,\n verbose=verbose)\n\n def _model_fn(self, x, y): # pylint: disable=method-hidden\n return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,\n self.input_op_fn, self.bidirectional,\n models.logistic_regression,\n self.sequence_length, self.initial_state,\n self.attn_length, self.attn_size,\n self.attn_vec_size)(x, y)\n\n @property\n def bias_(self):\n \"\"\"Returns bias of the rnn layer.\"\"\"\n return self.get_variable_value('logistic_regression/bias')\n\n @property\n def weights_(self):\n \"\"\"Returns weights of the rnn layer.\"\"\"\n return self.get_variable_value('logistic_regression/weights')\n\n\nclass TensorFlowRNNRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):\n \"\"\"TensorFlow RNN Regressor model.\"\"\"\n\n def __init__(self,\n rnn_size,\n cell_type='gru',\n num_layers=1,\n input_op_fn=null_input_op_fn,\n initial_state=None,\n bidirectional=False,\n sequence_length=None,\n attn_length=None,\n attn_size=None,\n attn_vec_size=None,\n n_classes=0,\n batch_size=32,\n steps=50,\n optimizer='Adagrad',\n learning_rate=0.1,\n clip_gradients=5.0,\n continue_training=False,\n config=None,\n verbose=1):\n \"\"\"Initializes a TensorFlowRNNRegressor instance.\n\n Args:\n rnn_size: The size for rnn cell, e.g. size of your word embeddings.\n cell_type: The type of rnn cell, including rnn, gru, and lstm.\n num_layers: The number of layers of the rnn model.\n input_op_fn: Function that will transform the input tensor, such as\n creating word embeddings, byte list, etc. This takes\n an argument x for input and returns transformed x.\n bidirectional: boolean, Whether this is a bidirectional rnn.\n sequence_length: If sequence_length is provided, dynamic calculation\n is performed. This saves computational time when unrolling past max\n sequence length.\n attn_length: integer, the size of attention vector attached to rnn cells.\n attn_size: integer, the size of an attention window attached to rnn cells.\n attn_vec_size: integer, the number of convolutional features calculated on\n attention state and the size of the hidden layer built from base cell state.\n initial_state: An initial state for the RNN. This must be a tensor of\n appropriate type and shape [batch_size x cell.state_size].\n batch_size: Mini batch size.\n steps: Number of steps to run over data.\n optimizer: Optimizer name (or class), for example \"SGD\", \"Adam\",\n \"Adagrad\".\n learning_rate: If this is constant float value, no decay function is\n used. Instead, a customized decay function can be passed that accepts\n global_step as parameter and returns a Tensor.\n e.g. exponential decay function:\n\n ````python\n def exp_decay(global_step):\n return tf.train.exponential_decay(\n learning_rate=0.1, global_step,\n decay_steps=2, decay_rate=0.001)\n ````\n\n continue_training: when continue_training is True, once initialized\n model will be continually trained on every call of fit.\n config: RunConfig object that controls the configurations of the\n session, e.g. num_cores, gpu_memory_fraction, etc.\n verbose: Controls the verbosity, possible values:\n\n * 0: the algorithm and debug information is muted.\n * 1: trainer prints the progress.\n * 2: log device placement is printed.\n \"\"\"\n self.rnn_size = rnn_size\n self.cell_type = cell_type\n self.input_op_fn = input_op_fn\n self.bidirectional = bidirectional\n self.num_layers = num_layers\n self.sequence_length = sequence_length\n self.initial_state = initial_state\n self.attn_length = attn_length\n self.attn_size = attn_size\n self.attn_vec_size = attn_vec_size\n super(TensorFlowRNNRegressor, self).__init__(\n model_fn=self._model_fn,\n n_classes=n_classes,\n batch_size=batch_size,\n steps=steps,\n optimizer=optimizer,\n learning_rate=learning_rate,\n clip_gradients=clip_gradients,\n continue_training=continue_training,\n config=config,\n verbose=verbose)\n\n def _model_fn(self, x, y): # pylint: disable=method-hidden\n return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,\n self.input_op_fn, self.bidirectional,\n models.linear_regression, self.sequence_length,\n self.initial_state, self.attn_length,\n self.attn_size, self.attn_vec_size)(x, y)\n\n @property\n def bias_(self):\n \"\"\"Returns bias of the rnn layer.\"\"\"\n return self.get_variable_value('linear_regression/bias')\n\n @property\n def weights_(self):\n \"\"\"Returns weights of the rnn layer.\"\"\"\n return self.get_variable_value('linear_regression/weights')\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functional tests for Concat Op.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import gen_array_ops\n\n\nclass ConcatOpTest(tf.test.TestCase):\n\n def testHStack(self):\n with self.test_session():\n p1 = tf.placeholder(tf.float32, shape=[4, 4])\n p2 = tf.placeholder(tf.float32, shape=[4, 4])\n c = tf.concat(0, [p1, p2])\n params = {\n p1: np.random.rand(4, 4).astype(\"f\"),\n p2: np.random.rand(4, 4).astype(\"f\")\n }\n result = c.eval(feed_dict=params)\n\n self.assertEqual(result.shape, c.get_shape())\n self.assertAllEqual(result[:4, :], params[p1])\n self.assertAllEqual(result[4:, :], params[p2])\n\n def testVStack(self):\n with self.test_session():\n p1 = tf.placeholder(tf.float32, shape=[4, 4])\n p2 = tf.placeholder(tf.float32, shape=[4, 4])\n c = tf.concat(1, [p1, p2])\n params = {\n p1: np.random.rand(4, 4).astype(\"f\"),\n p2: np.random.rand(4, 4).astype(\"f\")\n }\n result = c.eval(feed_dict=params)\n\n self.assertEqual(result.shape, c.get_shape())\n self.assertAllEqual(result[:, :4], params[p1])\n self.assertAllEqual(result[:, 4:], params[p2])\n\n def testInt32GPU(self):\n with self.test_session(use_gpu=True):\n p1 = np.random.rand(2, 3).astype(\"i\")\n p2 = np.random.rand(2, 3).astype(\"i\")\n x1 = tf.constant(p1)\n x2 = tf.constant(p2)\n c = tf.concat(0, [x1, x2])\n result = c.eval()\n self.assertAllEqual(result[:2, :], p1)\n self.assertAllEqual(result[2:, :], p2)\n\n def testRefType(self):\n with self.test_session():\n p1 = np.random.rand(4, 4).astype(\"f\")\n p2 = np.random.rand(4, 4).astype(\"f\")\n v1 = tf.Variable(p1)\n v2 = tf.Variable(p2)\n c = tf.concat(0, [v1, v2])\n tf.initialize_all_variables().run()\n result = c.eval()\n\n self.assertEqual(result.shape, c.get_shape())\n self.assertAllEqual(result[:4, :], p1)\n self.assertAllEqual(result[4:, :], p2)\n\n def _testRandom(self, dtype, use_gpu=False):\n # Random dims of rank 5\n shape = np.random.randint(1, 5, size=5)\n # Random number of tensors, but always > 1.\n num_tensors = np.random.randint(2, 10)\n # Random dim to concat on\n concat_dim = np.random.randint(5)\n params = {}\n if dtype == tf.bfloat16:\n dtype_feed = tf.float32\n else:\n dtype_feed = dtype\n with self.test_session(use_gpu=use_gpu):\n p = []\n for i in np.arange(num_tensors):\n input_shape = shape\n input_shape[concat_dim] = np.random.randint(1, 5)\n placeholder = tf.placeholder(dtype_feed, shape=input_shape)\n p.append(placeholder)\n\n t = dtype_feed.as_numpy_dtype\n params[placeholder] = np.random.rand(*input_shape).astype(t)\n\n if dtype != dtype_feed:\n concat_inputs = [tf.cast(p_i, dtype) for p_i in p]\n else:\n concat_inputs = p\n c = tf.concat(concat_dim, concat_inputs)\n if dtype != dtype_feed:\n c = tf.cast(c, dtype_feed)\n result = c.eval(feed_dict=params)\n\n self.assertEqual(result.shape, c.get_shape())\n cur_offset = 0\n\n for i in np.arange(num_tensors):\n # The index into the result is the ':' along all dimensions\n # except the concat_dim. slice(0, size) is used for ':', and\n # a list of slices is used to index into result.\n ind = [slice(0, params[p[i]].shape[j]) for j in np.arange(5)]\n ind[concat_dim] = slice(cur_offset,\n cur_offset + params[p[i]].shape[concat_dim])\n cur_offset += params[p[i]].shape[concat_dim]\n if dtype == dtype_feed:\n self.assertAllEqual(result[ind], params[p[i]])\n else:\n self.assertAllClose(result[ind], params[p[i]], 0.01)\n\n def testRandom(self):\n self._testRandom(tf.float32)\n self._testRandom(tf.float32, use_gpu=True)\n self._testRandom(tf.int16)\n self._testRandom(tf.int32, use_gpu=True)\n self._testRandom(tf.bfloat16)\n self._testRandom(tf.bfloat16, use_gpu=True)\n\n def testInvalidConcatDimTypeAndShape(self):\n a = tf.Variable(tf.constant(1.0, shape=[1]))\n b = tf.Variable(tf.constant(2.0, shape=[1]))\n with self.assertRaises(ValueError):\n tf.concat(a, b)\n with self.assertRaises(TypeError):\n tf.concat(4.2, 1)\n with self.assertRaises(ValueError):\n tf.concat(a, 1)\n with self.assertRaises(TypeError):\n tf.concat(a, [a, b])\n with self.assertRaises(ValueError):\n tf.concat([3], [a, b])\n with self.assertRaises(ValueError):\n tf.concat(0, [])\n # An integer tensor for shape dim should throw no error.\n tf.concat(tf.constant(0, shape=[]), 1)\n # A non-scalar tensor for shape should throw ValueError.\n with self.assertRaises(ValueError):\n tf.concat(tf.constant(0, shape=[1]), 1)\n\n def _testGradientsSimple(self, use_gpu):\n with self.test_session(use_gpu=use_gpu):\n inp = []\n inp_tensors = []\n for x in [1, 2, 6]:\n shape = [10, x, 2]\n t = np.random.rand(*shape).astype(\"f\")\n inp.append(t)\n inp_tensors.append(\n tf.constant([float(y) for y in t.flatten()],\n shape=shape, dtype=tf.float32))\n c = tf.concat(1, inp_tensors)\n output_shape = [10, 9, 2]\n grad_inp = np.random.rand(*output_shape).astype(\"f\")\n grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],\n shape=output_shape)\n grad = tf.gradients([c], inp_tensors, [grad_tensor])\n concated_grad = tf.concat(1, grad)\n result = concated_grad.eval()\n self.assertAllEqual(result, grad_inp)\n\n def testGradientsSimpleAll(self):\n self._testGradientsSimple(use_gpu=True)\n self._testGradientsSimple(use_gpu=False)\n\n def _testGradientsFirstDim(self, use_gpu):\n with self.test_session(use_gpu=use_gpu):\n inp = []\n inp_tensors = []\n for x in [1, 2, 6]:\n shape = [x, 10, 2]\n t = np.random.rand(*shape).astype(\"f\")\n inp.append(t)\n inp_tensors.append(\n tf.constant([float(y) for y in t.flatten()],\n shape=shape, dtype=tf.float32))\n c = tf.concat(0, inp_tensors)\n output_shape = [9, 10, 2]\n grad_inp = np.random.rand(*output_shape).astype(\"f\")\n grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],\n shape=output_shape)\n grad = tf.gradients([c], inp_tensors, [grad_tensor])\n concated_grad = tf.concat(0, grad)\n result = concated_grad.eval()\n\n self.assertAllEqual(result, grad_inp)\n\n def testGradientsFirstDimAll(self):\n self._testGradientsFirstDim(use_gpu=False)\n self._testGradientsFirstDim(use_gpu=True)\n\n def _testGradientsLastDim(self, use_gpu):\n with self.test_session(use_gpu=use_gpu):\n inp = []\n inp_tensors = []\n for x in [1, 2, 6]:\n shape = [10, 2, x]\n t = np.random.rand(*shape).astype(\"f\")\n inp.append(t)\n inp_tensors.append(\n tf.constant([float(y) for y in t.flatten()],\n shape=shape, dtype=tf.float32))\n c = tf.concat(2, inp_tensors)\n output_shape = [10, 2, 9]\n grad_inp = np.random.rand(*output_shape).astype(\"f\")\n grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],\n shape=output_shape)\n grad = tf.gradients([c], inp_tensors, [grad_tensor])\n concated_grad = tf.concat(2, grad)\n result = concated_grad.eval()\n\n self.assertAllEqual(result, grad_inp)\n\n def testGradientsLastDimAll(self):\n self._testGradientsLastDim(use_gpu=False)\n self._testGradientsLastDim(use_gpu=True)\n\n def _RunAndVerifyGradientsRandom(self, use_gpu):\n # Random dims of rank 5\n input_shape = np.random.randint(1, 5, size=5)\n # Random number of tensors\n num_tensors = np.random.randint(12, 20)\n # Random dim to concat on\n concat_dim = np.random.randint(5)\n concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)\n with self.test_session(use_gpu=use_gpu):\n inp = []\n inp_tensors = []\n for x in concat_dim_sizes:\n shape = input_shape\n shape[concat_dim] = x\n t = np.random.rand(*shape).astype(\"f\")\n inp.append(t)\n inp_tensors.append(\n tf.constant([float(y) for y in t.flatten()],\n shape=shape, dtype=tf.float32))\n c = tf.concat(concat_dim, inp_tensors)\n output_shape = input_shape\n output_shape[concat_dim] = concat_dim_sizes.sum()\n grad_inp = np.random.rand(*output_shape).astype(\"f\")\n grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],\n shape=output_shape)\n grad = tf.gradients([c], inp_tensors, [grad_tensor])\n concated_grad = tf.concat(concat_dim, grad)\n result = concated_grad.eval()\n\n self.assertAllEqual(result, grad_inp)\n\n def testGradientsRandom(self):\n for _ in range(5):\n self._RunAndVerifyGradientsRandom(use_gpu=False)\n self._RunAndVerifyGradientsRandom(use_gpu=True)\n\n def testShapeError(self):\n # Rank doesn't match.\n with self.assertRaises(ValueError):\n tf.concat(1, [tf.constant(10.0, shape=[4, 4, 4, 4]),\n tf.constant(20.0, shape=[4, 4, 4])])\n\n # Dimensions don't match in a non-concat dim.\n with self.assertRaises(ValueError):\n tf.concat(1, [tf.constant(10.0, shape=[1, 2, 1]),\n tf.constant(20.0, shape=[3, 2, 1])])\n\n # concat_dim out of range.\n with self.assertRaises(ValueError):\n tf.concat(3, [tf.constant(10.0, shape=[4, 4, 4]),\n tf.constant(20.0, shape=[4, 4, 4])])\n\n # concat_dim < 0\n with self.assertRaises(ValueError):\n tf.concat(-1, [tf.constant(10.0, shape=[4, 4, 4]),\n tf.constant(20.0, shape=[4, 4, 4])])\n\n def testShapeWithUnknownConcatDim(self):\n p1 = tf.placeholder(tf.float32)\n c1 = tf.constant(10.0, shape=[4, 4, 4, 4])\n p2 = tf.placeholder(tf.float32)\n c2 = tf.constant(20.0, shape=[4, 4, 4, 4])\n dim = tf.placeholder(tf.int32)\n concat = tf.concat(dim, [p1, c1, p2, c2])\n self.assertEqual(4, concat.get_shape().ndims)\n\n # All dimensions unknown.\n concat2 = tf.concat(dim, [p1, p2])\n self.assertEqual(None, concat2.get_shape())\n\n # Rank doesn't match.\n c3 = tf.constant(30.0, shape=[4, 4, 4])\n with self.assertRaises(ValueError):\n tf.concat(dim, [p1, c1, p2, c3])\n\n def testZeroSize(self):\n # Verify that concat doesn't crash and burn for zero size inputs\n np.random.seed(7)\n for use_gpu in False, True:\n with self.test_session(use_gpu=use_gpu) as sess:\n for shape0 in (), (2,):\n axis = len(shape0)\n for shape1 in (), (3,):\n for n0 in 0, 1, 2:\n for n1 in 0, 1, 2:\n x0 = np.random.randn(*(shape0 + (n0,) + shape1))\n x1 = np.random.randn(*(shape0 + (n1,) + shape1))\n correct = np.concatenate([x0, x1], axis=axis)\n # TODO(irving): Make tf.concat handle map, then drop list().\n xs = list(map(tf.constant, [x0, x1]))\n c = tf.concat(axis, xs)\n self.assertAllEqual(c.eval(), correct)\n # Check gradients\n dc = np.random.randn(*c.get_shape().as_list())\n dxs = sess.run(tf.gradients(c, xs, dc))\n self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))\n\n def testTensorConcatDim0Grad(self):\n x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]\n output_shape = [44, 7, 3]\n x_vals = [np.random.random_sample(x_shape).astype(\n np.float64) for x_shape in x_shapes]\n with self.test_session():\n xs = [tf.constant(x_val) for x_val in x_vals]\n output = tf.concat(0, xs)\n err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)\n self.assertLess(err, 1e-11)\n\n def testTensorConcatDim1Grad(self):\n x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]\n output_shape = [20, 11, 3]\n x_vals = [np.random.random_sample(x_shape).astype(\n np.float64) for x_shape in x_shapes]\n with self.test_session():\n xs = [tf.constant(x_val) for x_val in x_vals]\n output = tf.concat(1, xs)\n err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)\n self.assertLess(err, 1e-11)\n\n def testIndexedSlicesConcatDim0Grad(self):\n x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]\n output_shape = [4, 7, 3]\n x_vals = [np.random.random_sample(x_shape).astype(\n np.float64) for x_shape in x_shapes]\n with self.test_session():\n xs = [tf.constant(x_val) for x_val in x_vals]\n x_concat = tf.concat(0, xs)\n output = tf.gather(x_concat, [1, 2, 0, 5])\n err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)\n self.assertLess(err, 1e-11)\n\n def testIndexedSlicesConcatDim1Grad(self):\n x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]\n output_shape = [4, 11, 3]\n x_vals = [np.random.random_sample(x_shape).astype(\n np.float64) for x_shape in x_shapes]\n with self.test_session():\n xs = [tf.constant(x_val) for x_val in x_vals]\n x_concat = tf.concat(1, xs)\n output = tf.gather(x_concat, [1, 2, 0, 5])\n err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)\n self.assertLess(err, 1e-11)\n\n def testIndexedSlicesConcatDim2Grad(self):\n x_shapes = [[20, 7, 3], [20, 7, 1], [20, 7, 2]]\n output_shape = [4, 7, 6]\n x_vals = [np.random.random_sample(x_shape).astype(\n np.float64) for x_shape in x_shapes]\n with self.test_session():\n xs = [tf.constant(x_val) for x_val in x_vals]\n x_concat = tf.concat(2, xs)\n output = tf.gather(x_concat, [1, 2, 0, 5])\n err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)\n self.assertLess(err, 1e-11)\n\n def testConcatTuple(self):\n c1 = np.random.rand(4, 4)\n c2 = np.random.rand(4, 4)\n with self.test_session():\n concat_list_t = tf.concat(0, [c1, c2])\n concat_tuple_t = tf.concat(0, (c1, c2))\n self.assertAllEqual(concat_list_t.eval(), concat_tuple_t.eval())\n\n def testConcatNoScalars(self):\n with self.test_session():\n scalar = tf.constant(7)\n dim = tf.placeholder(tf.int32)\n with self.assertRaisesRegexp(\n ValueError, r\"Can't concatenate scalars \\(use tf\\.pack instead\\)\"):\n tf.concat(dim, [scalar, scalar, scalar])\n\n def testConcatGradNumNodes(self):\n g = tf.Graph()\n n = 10\n with g.as_default():\n x = tf.constant([1, 1])\n y = tf.concat(0, [x] * n)\n before = len(g.get_operations())\n _ = tf.gradients([y], [x], [y])\n after = len(g.get_operations())\n self.assertEqual(2 * n + 2, after - before)\n print(\"graph = \", [x.name for x in g.get_operations()])\n\n def testConcatLargeTensors(self):\n # CPU-only test, because it fails on GPUs with <= 4GB memory.\n with tf.device(\"/cpu:0\"):\n a = tf.ones([2**31 + 6], dtype=tf.int8)\n b = tf.zeros([1024], dtype=tf.int8)\n onezeros = tf.concat(0, [a, b])\n with self.test_session(use_gpu=False):\n # TODO(dga): Add more depth to this test to validate correctness,\n # not just non-crashingness, once other large tensor fixes have gone in.\n _ = onezeros.eval()\n\n # important as gpu implementation could fail if\n # shared memory is not large for all the inputs\n def testConcatLargeNumberOfTensors(self):\n with self.test_session(use_gpu=True):\n for concat_dim in range(2):\n params = {}\n p = []\n shape = np.array([7, 13])\n if tf.test.is_gpu_available():\n num_tensors = 10000\n else:\n num_tensors = 1000\n for i in np.arange(num_tensors):\n input_shape = shape\n placeholder = tf.placeholder(tf.float32, shape=input_shape)\n p.append(placeholder)\n\n params[placeholder] = np.random.rand(*input_shape).astype(np.float32)\n\n concat_inputs = p\n c = tf.concat(concat_dim, concat_inputs)\n result = c.eval(feed_dict=params)\n\n self.assertEqual(result.shape, c.get_shape())\n cur_offset = 0\n\n for i in np.arange(num_tensors):\n # The index into the result is the ':' along all dimensions\n # except the concat_dim. slice(0, size) is used for ':', and\n # a list of slices is used to index into result.\n index = [slice(0, params[p[i]].shape[j]) for j in np.arange(2)]\n index[concat_dim] = slice(cur_offset,\n cur_offset + params[p[i]].shape[concat_dim])\n cur_offset += params[p[i]].shape[concat_dim]\n self.assertAllEqual(result[index], params[p[i]])\n\n\nclass ConcatOffsetTest(tf.test.TestCase):\n\n def testBasic(self):\n for use_gpu in [False, True]:\n with self.test_session(use_gpu=use_gpu) as sess:\n cdim = tf.constant(1, tf.int32)\n s0 = tf.constant([2, 3, 5], tf.int32)\n s1 = tf.constant([2, 7, 5], tf.int32)\n s2 = tf.constant([2, 20, 5], tf.int32)\n off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])\n ans = sess.run(off)\n self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])\n\n def testNotVector(self):\n with self.test_session() as sess:\n cdim = tf.constant(1, tf.int32)\n s0 = tf.constant([[2, 3, 5]], tf.int32)\n s1 = tf.constant([[2, 7, 5]], tf.int32)\n off = gen_array_ops._concat_offset(cdim, [s0, s1])\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,\n r\"should be a vector\"):\n sess.run(off)\n\n def testConcatDimOutOfRange(self):\n with self.test_session() as sess:\n cdim = tf.constant(4, tf.int32)\n s0 = tf.constant([2, 3, 5], tf.int32)\n s1 = tf.constant([2, 7, 5], tf.int32)\n off = gen_array_ops._concat_offset(cdim, [s0, s1])\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,\n r\"Concat dim is out of range: 4 vs. 3\"):\n sess.run(off)\n\n def testDimMismatch(self):\n with self.test_session() as sess:\n cdim = tf.constant(1, tf.int32)\n s0 = tf.constant([2, 3, 5], tf.int32)\n s1 = tf.constant([2, 7, 5, 10], tf.int32)\n off = gen_array_ops._concat_offset(cdim, [s0, s1])\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,\n r\"should contain 3 elem\"):\n sess.run(off)\n\n def testSizeMismatch(self):\n with self.test_session() as sess:\n cdim = tf.constant(1, tf.int32)\n s0 = tf.constant([2, 3, 5], tf.int32)\n s1 = tf.constant([2, 7, 10], tf.int32)\n off = gen_array_ops._concat_offset(cdim, [s0, s1])\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,\n r\"mismatch: 5 vs. 10\"):\n sess.run(off)\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"## Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Exports an example linear regression inference graph.\n\nExports a TensorFlow graph to /tmp/saved_model/half_plus_two/ based on the\nSavedModel format.\n\nThis graph calculates,\n y = a*x + b\nwhere a and b are variables with a=0.5 and b=2.\n\nOutput from this program is typically used to exercise SavedModel load and\nexecution code.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow.core.protobuf import meta_graph_pb2\nfrom tensorflow.python.saved_model import builder as saved_model_builder\nfrom tensorflow.python.saved_model import constants\nfrom tensorflow.python.saved_model import utils\n\n\ndef _generate_saved_model_for_half_plus_two(export_dir, as_text=False):\n \"\"\"Generates SavedModel for half plus two.\n\n Args:\n export_dir: The directory to which the SavedModel should be written.\n as_text: Writes the SavedModel protocol buffer in text format to disk.\n \"\"\"\n builder = saved_model_builder.SavedModelBuilder(export_dir)\n\n with tf.Session(graph=tf.Graph()) as sess:\n # Set up the model parameters as variables to exercise variable loading\n # functionality upon restore.\n a = tf.Variable(0.5, name=\"a\")\n b = tf.Variable(2.0, name=\"b\")\n\n # Create a placeholder for serialized tensorflow.Example messages to be fed.\n serialized_tf_example = tf.placeholder(tf.string, name=\"tf_example\")\n\n # Parse the tensorflow.Example looking for a feature named \"x\" with a single\n # floating point value.\n feature_configs = {\"x\": tf.FixedLenFeature([1], dtype=tf.float32),}\n tf_example = tf.parse_example(serialized_tf_example, feature_configs)\n # Use tf.identity() to assign name\n x = tf.identity(tf_example[\"x\"], name=\"x\")\n y = tf.add(tf.mul(a, x), b, name=\"y\")\n\n # Set up the signature for regression with input and output tensor\n # specification.\n input_tensor = meta_graph_pb2.TensorInfo()\n input_tensor.name = serialized_tf_example.name\n signature_inputs = {\"input\": input_tensor}\n\n output_tensor = meta_graph_pb2.TensorInfo()\n output_tensor.name = tf.identity(y).name\n signature_outputs = {\"output\": output_tensor}\n signature_def = utils.build_signature_def(signature_inputs,\n signature_outputs, \"regression\")\n\n # Initialize all variables and then save the SavedModel.\n sess.run(tf.initialize_all_variables())\n builder.add_meta_graph_and_variables(\n sess, [constants.TAG_SERVING],\n signature_def_map={\"regression\": signature_def})\n builder.save(as_text)\n\n\ndef main(_):\n export_dir_pb = \"/tmp/saved_model/half_plus_two\"\n _generate_saved_model_for_half_plus_two(export_dir_pb)\n\n export_dir_pbtxt = \"/tmp/saved_model/half_plus_two_pbtxt\"\n _generate_saved_model_for_half_plus_two(export_dir_pbtxt, as_text=True)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] | [
[
"tensorflow.python.training.monitored_session.MonitoredSession",
"tensorflow.python.training.basic_session_run_hooks.StepCounterHook",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.training.basic_session_run_hooks.SummarySaverHook",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.contrib.learn.python.learn.utils.checkpoints.load_variable",
"tensorflow.contrib.learn.python.learn.monitors.NanLossDuringTrainingError",
"tensorflow.python.training.queue_runner.start_queue_runners",
"tensorflow.python.ops.logging_ops.get_summary_op",
"tensorflow.python.training.summary_io.SummaryWriterCache.clear",
"tensorflow.python.training.monitored_session.WorkerSessionCreator",
"tensorflow.python.training.summary_io.SummaryWriterCache.get",
"tensorflow.python.ops.variables.report_uninitialized_variables",
"tensorflow.contrib.framework.python.ops.variables.assert_or_get_global_step",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.training.basic_session_run_hooks.LoggingTensorHook",
"tensorflow.python.training.session_manager.SessionManager",
"tensorflow.python.training.basic_session_run_hooks.CheckpointSaverHook",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.ops.data_flow_ops.initialize_all_tables",
"tensorflow.python.training.basic_session_run_hooks.NanTensorHook",
"tensorflow.python.training.monitored_session.ChiefSessionCreator",
"tensorflow.python.ops.variables.initialize_local_variables",
"tensorflow.python.ops.variables.all_variables",
"numpy.isnan",
"tensorflow.python.training.coordinator.Coordinator",
"tensorflow.python.client.session.Session",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.python.ops.variables.initialize_all_variables",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.training.basic_session_run_hooks.StopAtStepHook",
"tensorflow.core.framework.summary_pb2.Summary",
"tensorflow.python.training.monitored_session.Scaffold.get_or_default",
"tensorflow.python.training.saver.Saver"
],
[
"tensorflow.contrib.learn.python.learn.models.get_rnn_model"
],
[
"tensorflow.test.compute_gradient_error",
"tensorflow.device",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.cast",
"numpy.random.random_sample",
"numpy.concatenate",
"numpy.random.randn",
"numpy.random.randint",
"tensorflow.Graph",
"tensorflow.Variable",
"numpy.arange",
"tensorflow.gradients",
"tensorflow.test.main",
"tensorflow.gather",
"tensorflow.initialize_all_variables",
"tensorflow.placeholder",
"numpy.random.rand",
"tensorflow.python.ops.gen_array_ops._concat_offset",
"numpy.array",
"tensorflow.constant",
"numpy.random.seed",
"tensorflow.ones",
"tensorflow.test.is_gpu_available"
],
[
"tensorflow.Graph",
"tensorflow.python.saved_model.builder.SavedModelBuilder",
"tensorflow.FixedLenFeature",
"tensorflow.Variable",
"tensorflow.parse_example",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.mul",
"tensorflow.initialize_all_variables",
"tensorflow.core.protobuf.meta_graph_pb2.TensorInfo",
"tensorflow.python.saved_model.utils.build_signature_def",
"tensorflow.app.run"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OakCityLabs/numpy | [
"09f5c5a64eb019b3e058c7183ca1ead6190bdbc8"
] | [
"numpy/distutils/system_info.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\nThis file defines a set of system_info classes for getting\ninformation about various resources (libraries, library directories,\ninclude directories, etc.) in the system. Usage:\n info_dict = get_info(<name>)\n where <name> is a string 'atlas','x11','fftw','lapack','blas',\n 'lapack_src', 'blas_src', etc. For a complete list of allowed names,\n see the definition of get_info() function below.\n\n Returned info_dict is a dictionary which is compatible with\n distutils.setup keyword arguments. If info_dict == {}, then the\n asked resource is not available (system_info could not find it).\n\n Several *_info classes specify an environment variable to specify\n the locations of software. When setting the corresponding environment\n variable to 'None' then the software will be ignored, even when it\n is available in system.\n\nGlobal parameters:\n system_info.search_static_first - search static libraries (.a)\n in precedence to shared ones (.so, .sl) if enabled.\n system_info.verbosity - output the results to stdout if enabled.\n\nThe file 'site.cfg' is looked for in\n\n1) Directory of main setup.py file being run.\n2) Home directory of user running the setup.py file as ~/.numpy-site.cfg\n3) System wide directory (location of this file...)\n\nThe first one found is used to get system configuration options The\nformat is that used by ConfigParser (i.e., Windows .INI style). The\nsection ALL is not intended for general use.\n\nAppropriate defaults are used if nothing is specified.\n\nThe order of finding the locations of resources is the following:\n 1. environment variable\n 2. section in site.cfg\n 3. DEFAULT section in site.cfg\n 4. System default search paths (see ``default_*`` variables below).\nOnly the first complete match is returned.\n\nCurrently, the following classes are available, along with their section names:\n\n Numeric_info:Numeric\n _numpy_info:Numeric\n _pkg_config_info:None\n accelerate_info:accelerate\n agg2_info:agg2\n amd_info:amd\n atlas_3_10_blas_info:atlas\n atlas_3_10_blas_threads_info:atlas\n atlas_3_10_info:atlas\n atlas_3_10_threads_info:atlas\n atlas_blas_info:atlas\n atlas_blas_threads_info:atlas\n atlas_info:atlas\n atlas_threads_info:atlas\n blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix)\n blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS)\n blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix)\n blas_info:blas\n blas_mkl_info:mkl\n blas_opt_info:ALL # usage recommended\n blas_src_info:blas_src\n blis_info:blis\n boost_python_info:boost_python\n dfftw_info:fftw\n dfftw_threads_info:fftw\n djbfft_info:djbfft\n f2py_info:ALL\n fft_opt_info:ALL\n fftw2_info:fftw\n fftw3_info:fftw3\n fftw_info:fftw\n fftw_threads_info:fftw\n flame_info:flame\n freetype2_info:freetype2\n gdk_2_info:gdk_2\n gdk_info:gdk\n gdk_pixbuf_2_info:gdk_pixbuf_2\n gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2\n gdk_x11_2_info:gdk_x11_2\n gtkp_2_info:gtkp_2\n gtkp_x11_2_info:gtkp_x11_2\n lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix)\n lapack_atlas_3_10_info:atlas\n lapack_atlas_3_10_threads_info:atlas\n lapack_atlas_info:atlas\n lapack_atlas_threads_info:atlas\n lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK)\n lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix)\n lapack_info:lapack\n lapack_mkl_info:mkl\n lapack_opt_info:ALL # usage recommended\n lapack_src_info:lapack_src\n mkl_info:mkl\n numarray_info:numarray\n numerix_info:numerix\n numpy_info:numpy\n openblas64__info:openblas64_\n openblas64__lapack_info:openblas64_\n openblas_clapack_info:openblas\n openblas_ilp64_info:openblas_ilp64\n openblas_ilp64_lapack_info:openblas_ilp64\n openblas_info:openblas\n openblas_lapack_info:openblas\n sfftw_info:fftw\n sfftw_threads_info:fftw\n system_info:ALL\n umfpack_info:umfpack\n wx_info:wx\n x11_info:x11\n xft_info:xft\n\nNote that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER\nand NPY_LAPACK_ORDER environment variables to determine the order in which\nspecific BLAS and LAPACK libraries are searched for.\n\nThis search (or autodetection) can be bypassed by defining the environment\nvariables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the\nexact linker flags to use (language will be set to F77). Building against\nNetlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK\nimplementations at runtime. If using this to build NumPy itself, it is\nrecommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a\nCBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized\notherwise).\n\nExample:\n----------\n[DEFAULT]\n# default section\nlibrary_dirs = /usr/lib:/usr/local/lib:/opt/lib\ninclude_dirs = /usr/include:/usr/local/include:/opt/include\nsrc_dirs = /usr/local/src:/opt/src\n# search static libraries (.a) in preference to shared ones (.so)\nsearch_static_first = 0\n\n[fftw]\nlibraries = rfftw, fftw\n\n[atlas]\nlibrary_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas\n# for overriding the names of the atlas libraries\nlibraries = lapack, f77blas, cblas, atlas\n\n[x11]\nlibrary_dirs = /usr/X11R6/lib\ninclude_dirs = /usr/X11R6/include\n----------\n\nNote that the ``libraries`` key is the default setting for libraries.\n\nAuthors:\n Pearu Peterson <[email protected]>, February 2002\n David M. Cooke <[email protected]>, April 2002\n\nCopyright 2002 Pearu Peterson all rights reserved,\nPearu Peterson <[email protected]>\nPermission to use, modify, and distribute this software is given under the\nterms of the NumPy (BSD style) license. See LICENSE.txt that came with\nthis distribution for specifics.\n\nNO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.\n\n\"\"\"\nimport sys\nimport os\nimport re\nimport copy\nimport warnings\nimport subprocess\nimport textwrap\n\nfrom glob import glob\nfrom functools import reduce\nfrom configparser import NoOptionError\nfrom configparser import RawConfigParser as ConfigParser\n# It seems that some people are importing ConfigParser from here so is\n# good to keep its class name. Use of RawConfigParser is needed in\n# order to be able to load path names with percent in them, like\n# `feature%2Fcool` which is common on git flow branch names.\n\nfrom distutils.errors import DistutilsError\nfrom distutils.dist import Distribution\nimport sysconfig\nfrom numpy.distutils import log\nfrom distutils.util import get_platform\n\nfrom numpy.distutils.exec_command import (\n find_executable, filepath_from_subprocess_output,\n )\nfrom numpy.distutils.misc_util import (is_sequence, is_string,\n get_shared_lib_extension)\nfrom numpy.distutils.command.config import config as cmd_config\nfrom numpy.distutils import customized_ccompiler as _customized_ccompiler\nfrom numpy.distutils import _shell_utils\nimport distutils.ccompiler\nimport tempfile\nimport shutil\n\n__all__ = ['system_info']\n\n# Determine number of bits\nimport platform\n_bits = {'32bit': 32, '64bit': 64}\nplatform_bits = _bits[platform.architecture()[0]]\n\n\nglobal_compiler = None\n\ndef customized_ccompiler():\n global global_compiler\n if not global_compiler:\n global_compiler = _customized_ccompiler()\n return global_compiler\n\n\ndef _c_string_literal(s):\n \"\"\"\n Convert a python string into a literal suitable for inclusion into C code\n \"\"\"\n # only these three characters are forbidden in C strings\n s = s.replace('\\\\', r'\\\\')\n s = s.replace('\"', r'\\\"')\n s = s.replace('\\n', r'\\n')\n return '\"{}\"'.format(s)\n\n\ndef libpaths(paths, bits):\n \"\"\"Return a list of library paths valid on 32 or 64 bit systems.\n\n Inputs:\n paths : sequence\n A sequence of strings (typically paths)\n bits : int\n An integer, the only valid values are 32 or 64. A ValueError exception\n is raised otherwise.\n\n Examples:\n\n Consider a list of directories\n >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']\n\n For a 32-bit platform, this is already valid:\n >>> np.distutils.system_info.libpaths(paths,32)\n ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']\n\n On 64 bits, we prepend the '64' postfix\n >>> np.distutils.system_info.libpaths(paths,64)\n ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',\n '/usr/lib64', '/usr/lib']\n \"\"\"\n if bits not in (32, 64):\n raise ValueError(\"Invalid bit size in libpaths: 32 or 64 only\")\n\n # Handle 32bit case\n if bits == 32:\n return paths\n\n # Handle 64bit case\n out = []\n for p in paths:\n out.extend([p + '64', p])\n\n return out\n\n\nif sys.platform == 'win32':\n default_lib_dirs = ['C:\\\\',\n os.path.join(sysconfig.get_config_var('exec_prefix'),\n 'libs')]\n default_runtime_dirs = []\n default_include_dirs = []\n default_src_dirs = ['.']\n default_x11_lib_dirs = []\n default_x11_include_dirs = []\n _include_dirs = [\n 'include',\n 'include/suitesparse',\n ]\n _lib_dirs = [\n 'lib',\n ]\n\n _include_dirs = [d.replace('/', os.sep) for d in _include_dirs]\n _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs]\n def add_system_root(library_root):\n \"\"\"Add a package manager root to the include directories\"\"\"\n global default_lib_dirs\n global default_include_dirs\n\n library_root = os.path.normpath(library_root)\n\n default_lib_dirs.extend(\n os.path.join(library_root, d) for d in _lib_dirs)\n default_include_dirs.extend(\n os.path.join(library_root, d) for d in _include_dirs)\n\n # VCpkg is the de-facto package manager on windows for C/C++\n # libraries. If it is on the PATH, then we append its paths here.\n vcpkg = shutil.which('vcpkg')\n if vcpkg:\n vcpkg_dir = os.path.dirname(vcpkg)\n if platform.architecture()[0] == '32bit':\n specifier = 'x86'\n else:\n specifier = 'x64'\n\n vcpkg_installed = os.path.join(vcpkg_dir, 'installed')\n for vcpkg_root in [\n os.path.join(vcpkg_installed, specifier + '-windows'),\n os.path.join(vcpkg_installed, specifier + '-windows-static'),\n ]:\n add_system_root(vcpkg_root)\n\n # Conda is another popular package manager that provides libraries\n conda = shutil.which('conda')\n if conda:\n conda_dir = os.path.dirname(conda)\n add_system_root(os.path.join(conda_dir, '..', 'Library'))\n add_system_root(os.path.join(conda_dir, 'Library'))\n\nelse:\n default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',\n '/opt/local/lib', '/sw/lib'], platform_bits)\n default_runtime_dirs = []\n default_include_dirs = ['/usr/local/include',\n '/opt/include',\n # path of umfpack under macports\n '/opt/local/include/ufsparse',\n '/opt/local/include', '/sw/include',\n '/usr/include/suitesparse']\n default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']\n\n default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',\n '/usr/lib'], platform_bits)\n default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include']\n\n if os.path.exists('/usr/lib/X11'):\n globbed_x11_dir = glob('/usr/lib/*/libX11.so')\n if globbed_x11_dir:\n x11_so_dir = os.path.split(globbed_x11_dir[0])[0]\n default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])\n default_x11_include_dirs.extend(['/usr/lib/X11/include',\n '/usr/include/X11'])\n\n # iOS: we need to cancel this call\n if (not os.getenv('PLATFORM').startswith('iphone')):\n with open(os.devnull, 'w') as tmp:\n try:\n p = subprocess.Popen([\"gcc\", \"-print-multiarch\"], stdout=subprocess.PIPE,\n stderr=tmp)\n except (OSError, DistutilsError):\n # OSError if gcc is not installed, or SandboxViolation (DistutilsError\n # subclass) if an old setuptools bug is triggered (see gh-3160).\n pass\n else:\n triplet = str(p.communicate()[0].decode().strip())\n if p.returncode == 0:\n # gcc supports the \"-print-multiarch\" option\n default_x11_lib_dirs += [os.path.join(\"/usr/lib/\", triplet)]\n default_lib_dirs += [os.path.join(\"/usr/lib/\", triplet)]\n\n\nif os.path.join(sys.prefix, 'lib') not in default_lib_dirs:\n default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))\n default_include_dirs.append(os.path.join(sys.prefix, 'include'))\n default_src_dirs.append(os.path.join(sys.prefix, 'src'))\n\ndefault_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]\ndefault_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]\ndefault_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]\ndefault_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]\n\nso_ext = get_shared_lib_extension()\n\n\ndef get_standard_file(fname):\n \"\"\"Returns a list of files named 'fname' from\n 1) System-wide directory (directory-location of this module)\n 2) Users HOME directory (os.environ['HOME'])\n 3) Local directory\n \"\"\"\n # System-wide file\n filenames = []\n try:\n f = __file__\n except NameError:\n f = sys.argv[0]\n sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],\n fname)\n if os.path.isfile(sysfile):\n filenames.append(sysfile)\n\n # Home directory\n # And look for the user config file\n try:\n f = os.path.expanduser('~')\n except KeyError:\n pass\n else:\n user_file = os.path.join(f, fname)\n if os.path.isfile(user_file):\n filenames.append(user_file)\n\n # Local file\n if os.path.isfile(fname):\n filenames.append(os.path.abspath(fname))\n\n return filenames\n\n\ndef _parse_env_order(base_order, env):\n \"\"\" Parse an environment variable `env` by splitting with \",\" and only returning elements from `base_order`\n\n This method will sequence the environment variable and check for their\n individual elements in `base_order`.\n\n The items in the environment variable may be negated via '^item' or '!itema,itemb'.\n It must start with ^/! to negate all options.\n\n Raises\n ------\n ValueError: for mixed negated and non-negated orders or multiple negated orders\n\n Parameters\n ----------\n base_order : list of str\n the base list of orders\n env : str\n the environment variable to be parsed, if none is found, `base_order` is returned\n\n Returns\n -------\n allow_order : list of str\n allowed orders in lower-case\n unknown_order : list of str\n for values not overlapping with `base_order`\n \"\"\"\n order_str = os.environ.get(env, None)\n\n # ensure all base-orders are lower-case (for easier comparison)\n base_order = [order.lower() for order in base_order]\n if order_str is None:\n return base_order, []\n\n neg = order_str.startswith('^') or order_str.startswith('!')\n # Check format\n order_str_l = list(order_str)\n sum_neg = order_str_l.count('^') + order_str_l.count('!')\n if neg:\n if sum_neg > 1:\n raise ValueError(f\"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}\")\n # remove prefix\n order_str = order_str[1:]\n elif sum_neg > 0:\n raise ValueError(f\"Environment variable '{env}' may not mix negated an non-negated items: {order_str}\")\n\n # Split and lower case\n orders = order_str.lower().split(',')\n\n # to inform callee about non-overlapping elements\n unknown_order = []\n\n # if negated, we have to remove from the order\n if neg:\n allow_order = base_order.copy()\n\n for order in orders:\n if not order:\n continue\n\n if order not in base_order:\n unknown_order.append(order)\n continue\n\n if order in allow_order:\n allow_order.remove(order)\n\n else:\n allow_order = []\n\n for order in orders:\n if not order:\n continue\n\n if order not in base_order:\n unknown_order.append(order)\n continue\n\n if order not in allow_order:\n allow_order.append(order)\n\n return allow_order, unknown_order\n\n\ndef get_info(name, notfound_action=0):\n \"\"\"\n notfound_action:\n 0 - do nothing\n 1 - display warning message\n 2 - raise error\n \"\"\"\n cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead\n 'atlas_threads': atlas_threads_info, # ditto\n 'atlas_blas': atlas_blas_info,\n 'atlas_blas_threads': atlas_blas_threads_info,\n 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead\n 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto\n 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead\n 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto\n 'atlas_3_10_blas': atlas_3_10_blas_info,\n 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,\n 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead\n 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto\n 'flame': flame_info, # use lapack_opt instead\n 'mkl': mkl_info,\n # openblas which may or may not have embedded lapack\n 'openblas': openblas_info, # use blas_opt instead\n # openblas with embedded lapack\n 'openblas_lapack': openblas_lapack_info, # use blas_opt instead\n 'openblas_clapack': openblas_clapack_info, # use blas_opt instead\n 'blis': blis_info, # use blas_opt instead\n 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead\n 'blas_mkl': blas_mkl_info, # use blas_opt instead\n 'accelerate': accelerate_info, # use blas_opt instead\n 'openblas64_': openblas64__info,\n 'openblas64__lapack': openblas64__lapack_info,\n 'openblas_ilp64': openblas_ilp64_info,\n 'openblas_ilp64_lapack': openblas_ilp64_lapack_info,\n 'x11': x11_info,\n 'fft_opt': fft_opt_info,\n 'fftw': fftw_info,\n 'fftw2': fftw2_info,\n 'fftw3': fftw3_info,\n 'dfftw': dfftw_info,\n 'sfftw': sfftw_info,\n 'fftw_threads': fftw_threads_info,\n 'dfftw_threads': dfftw_threads_info,\n 'sfftw_threads': sfftw_threads_info,\n 'djbfft': djbfft_info,\n 'blas': blas_info, # use blas_opt instead\n 'lapack': lapack_info, # use lapack_opt instead\n 'lapack_src': lapack_src_info,\n 'blas_src': blas_src_info,\n 'numpy': numpy_info,\n 'f2py': f2py_info,\n 'Numeric': Numeric_info,\n 'numeric': Numeric_info,\n 'numarray': numarray_info,\n 'numerix': numerix_info,\n 'lapack_opt': lapack_opt_info,\n 'lapack_ilp64_opt': lapack_ilp64_opt_info,\n 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info,\n 'lapack64__opt': lapack64__opt_info,\n 'blas_opt': blas_opt_info,\n 'blas_ilp64_opt': blas_ilp64_opt_info,\n 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info,\n 'blas64__opt': blas64__opt_info,\n 'boost_python': boost_python_info,\n 'agg2': agg2_info,\n 'wx': wx_info,\n 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,\n 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,\n 'gdk_pixbuf_2': gdk_pixbuf_2_info,\n 'gdk-pixbuf-2.0': gdk_pixbuf_2_info,\n 'gdk': gdk_info,\n 'gdk_2': gdk_2_info,\n 'gdk-2.0': gdk_2_info,\n 'gdk_x11_2': gdk_x11_2_info,\n 'gdk-x11-2.0': gdk_x11_2_info,\n 'gtkp_x11_2': gtkp_x11_2_info,\n 'gtk+-x11-2.0': gtkp_x11_2_info,\n 'gtkp_2': gtkp_2_info,\n 'gtk+-2.0': gtkp_2_info,\n 'xft': xft_info,\n 'freetype2': freetype2_info,\n 'umfpack': umfpack_info,\n 'amd': amd_info,\n }.get(name.lower(), system_info)\n return cl().get_info(notfound_action)\n\n\nclass NotFoundError(DistutilsError):\n \"\"\"Some third-party program or library is not found.\"\"\"\n\n\nclass AliasedOptionError(DistutilsError):\n \"\"\"\n Aliases entries in config files should not be existing.\n In section '{section}' we found multiple appearances of options {options}.\"\"\"\n\n\nclass AtlasNotFoundError(NotFoundError):\n \"\"\"\n Atlas (http://github.com/math-atlas/math-atlas) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [atlas]) or by setting\n the ATLAS environment variable.\"\"\"\n\n\nclass FlameNotFoundError(NotFoundError):\n \"\"\"\n FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [flame]).\"\"\"\n\n\nclass LapackNotFoundError(NotFoundError):\n \"\"\"\n Lapack (http://www.netlib.org/lapack/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [lapack]) or by setting\n the LAPACK environment variable.\"\"\"\n\n\nclass LapackSrcNotFoundError(LapackNotFoundError):\n \"\"\"\n Lapack (http://www.netlib.org/lapack/) sources not found.\n Directories to search for the sources can be specified in the\n numpy/distutils/site.cfg file (section [lapack_src]) or by setting\n the LAPACK_SRC environment variable.\"\"\"\n\n\nclass LapackILP64NotFoundError(NotFoundError):\n \"\"\"\n 64-bit Lapack libraries not found.\n Known libraries in numpy/distutils/site.cfg file are:\n openblas64_, openblas_ilp64\n \"\"\"\n\nclass BlasOptNotFoundError(NotFoundError):\n \"\"\"\n Optimized (vendor) Blas libraries are not found.\n Falls back to netlib Blas library which has worse performance.\n A better performance should be easily gained by switching\n Blas library.\"\"\"\n\nclass BlasNotFoundError(NotFoundError):\n \"\"\"\n Blas (http://www.netlib.org/blas/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [blas]) or by setting\n the BLAS environment variable.\"\"\"\n\nclass BlasILP64NotFoundError(NotFoundError):\n \"\"\"\n 64-bit Blas libraries not found.\n Known libraries in numpy/distutils/site.cfg file are:\n openblas64_, openblas_ilp64\n \"\"\"\n\nclass BlasSrcNotFoundError(BlasNotFoundError):\n \"\"\"\n Blas (http://www.netlib.org/blas/) sources not found.\n Directories to search for the sources can be specified in the\n numpy/distutils/site.cfg file (section [blas_src]) or by setting\n the BLAS_SRC environment variable.\"\"\"\n\n\nclass FFTWNotFoundError(NotFoundError):\n \"\"\"\n FFTW (http://www.fftw.org/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [fftw]) or by setting\n the FFTW environment variable.\"\"\"\n\n\nclass DJBFFTNotFoundError(NotFoundError):\n \"\"\"\n DJBFFT (https://cr.yp.to/djbfft.html) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [djbfft]) or by setting\n the DJBFFT environment variable.\"\"\"\n\n\nclass NumericNotFoundError(NotFoundError):\n \"\"\"\n Numeric (https://www.numpy.org/) module not found.\n Get it from above location, install it, and retry setup.py.\"\"\"\n\n\nclass X11NotFoundError(NotFoundError):\n \"\"\"X11 libraries not found.\"\"\"\n\n\nclass UmfpackNotFoundError(NotFoundError):\n \"\"\"\n UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/)\n not found. Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [umfpack]) or by setting\n the UMFPACK environment variable.\"\"\"\n\n\nclass system_info:\n\n \"\"\" get_info() is the only public method. Don't use others.\n \"\"\"\n dir_env_var = None\n # XXX: search_static_first is disabled by default, may disappear in\n # future unless it is proved to be useful.\n search_static_first = 0\n # The base-class section name is a random word \"ALL\" and is not really\n # intended for general use. It cannot be None nor can it be DEFAULT as\n # these break the ConfigParser. See gh-15338\n section = 'ALL'\n saved_results = {}\n\n notfounderror = NotFoundError\n\n def __init__(self,\n default_lib_dirs=default_lib_dirs,\n default_include_dirs=default_include_dirs,\n ):\n self.__class__.info = {}\n self.local_prefixes = []\n defaults = {'library_dirs': os.pathsep.join(default_lib_dirs),\n 'include_dirs': os.pathsep.join(default_include_dirs),\n 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs),\n 'rpath': '',\n 'src_dirs': os.pathsep.join(default_src_dirs),\n 'search_static_first': str(self.search_static_first),\n 'extra_compile_args': '', 'extra_link_args': ''}\n self.cp = ConfigParser(defaults)\n self.files = []\n self.files.extend(get_standard_file('.numpy-site.cfg'))\n self.files.extend(get_standard_file('site.cfg'))\n self.parse_config_files()\n\n if self.section is not None:\n self.search_static_first = self.cp.getboolean(\n self.section, 'search_static_first')\n assert isinstance(self.search_static_first, int)\n\n def parse_config_files(self):\n self.cp.read(self.files)\n if not self.cp.has_section(self.section):\n if self.section is not None:\n self.cp.add_section(self.section)\n\n def calc_libraries_info(self):\n libs = self.get_libraries()\n dirs = self.get_lib_dirs()\n # The extensions use runtime_library_dirs\n r_dirs = self.get_runtime_lib_dirs()\n # Intrinsic distutils use rpath, we simply append both entries\n # as though they were one entry\n r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))\n info = {}\n for lib in libs:\n i = self.check_libs(dirs, [lib])\n if i is not None:\n dict_append(info, **i)\n else:\n log.info('Library %s was not found. Ignoring' % (lib))\n\n if r_dirs:\n i = self.check_libs(r_dirs, [lib])\n if i is not None:\n # Swap library keywords found to runtime_library_dirs\n # the libraries are insisting on the user having defined\n # them using the library_dirs, and not necessarily by\n # runtime_library_dirs\n del i['libraries']\n i['runtime_library_dirs'] = i.pop('library_dirs')\n dict_append(info, **i)\n else:\n log.info('Runtime library %s was not found. Ignoring' % (lib))\n\n return info\n\n def set_info(self, **info):\n if info:\n lib_info = self.calc_libraries_info()\n dict_append(info, **lib_info)\n # Update extra information\n extra_info = self.calc_extra_info()\n dict_append(info, **extra_info)\n self.saved_results[self.__class__.__name__] = info\n\n def get_option_single(self, *options):\n \"\"\" Ensure that only one of `options` are found in the section\n\n Parameters\n ----------\n *options : list of str\n a list of options to be found in the section (``self.section``)\n\n Returns\n -------\n str :\n the option that is uniquely found in the section\n\n Raises\n ------\n AliasedOptionError :\n in case more than one of the options are found\n \"\"\"\n found = [self.cp.has_option(self.section, opt) for opt in options]\n if sum(found) == 1:\n return options[found.index(True)]\n elif sum(found) == 0:\n # nothing is found anyways\n return options[0]\n\n # Else we have more than 1 key found\n if AliasedOptionError.__doc__ is None:\n raise AliasedOptionError()\n raise AliasedOptionError(AliasedOptionError.__doc__.format(\n section=self.section, options='[{}]'.format(', '.join(options))))\n\n\n def has_info(self):\n return self.__class__.__name__ in self.saved_results\n\n def calc_extra_info(self):\n \"\"\" Updates the information in the current information with\n respect to these flags:\n extra_compile_args\n extra_link_args\n \"\"\"\n info = {}\n for key in ['extra_compile_args', 'extra_link_args']:\n # Get values\n opt = self.cp.get(self.section, key)\n opt = _shell_utils.NativeParser.split(opt)\n if opt:\n tmp = {key: opt}\n dict_append(info, **tmp)\n return info\n\n def get_info(self, notfound_action=0):\n \"\"\" Return a dictionary with items that are compatible\n with numpy.distutils.setup keyword arguments.\n \"\"\"\n flag = 0\n if not self.has_info():\n flag = 1\n log.info(self.__class__.__name__ + ':')\n if hasattr(self, 'calc_info'):\n self.calc_info()\n if notfound_action:\n if not self.has_info():\n if notfound_action == 1:\n warnings.warn(self.notfounderror.__doc__, stacklevel=2)\n elif notfound_action == 2:\n raise self.notfounderror(self.notfounderror.__doc__)\n else:\n raise ValueError(repr(notfound_action))\n\n if not self.has_info():\n log.info(' NOT AVAILABLE')\n self.set_info()\n else:\n log.info(' FOUND:')\n\n res = self.saved_results.get(self.__class__.__name__)\n if log.get_threshold() <= log.INFO and flag:\n for k, v in res.items():\n v = str(v)\n if k in ['sources', 'libraries'] and len(v) > 270:\n v = v[:120] + '...\\n...\\n...' + v[-120:]\n log.info(' %s = %s', k, v)\n log.info('')\n\n return copy.deepcopy(res)\n\n def get_paths(self, section, key):\n dirs = self.cp.get(section, key).split(os.pathsep)\n env_var = self.dir_env_var\n if env_var:\n if is_sequence(env_var):\n e0 = env_var[-1]\n for e in env_var:\n if e in os.environ:\n e0 = e\n break\n if not env_var[0] == e0:\n log.info('Setting %s=%s' % (env_var[0], e0))\n env_var = e0\n if env_var and env_var in os.environ:\n d = os.environ[env_var]\n if d == 'None':\n log.info('Disabled %s: %s',\n self.__class__.__name__, '(%s is None)'\n % (env_var,))\n return []\n if os.path.isfile(d):\n dirs = [os.path.dirname(d)] + dirs\n l = getattr(self, '_lib_names', [])\n if len(l) == 1:\n b = os.path.basename(d)\n b = os.path.splitext(b)[0]\n if b[:3] == 'lib':\n log.info('Replacing _lib_names[0]==%r with %r' \\\n % (self._lib_names[0], b[3:]))\n self._lib_names[0] = b[3:]\n else:\n ds = d.split(os.pathsep)\n ds2 = []\n for d in ds:\n if os.path.isdir(d):\n ds2.append(d)\n for dd in ['include', 'lib']:\n d1 = os.path.join(d, dd)\n if os.path.isdir(d1):\n ds2.append(d1)\n dirs = ds2 + dirs\n default_dirs = self.cp.get(self.section, key).split(os.pathsep)\n dirs.extend(default_dirs)\n ret = []\n for d in dirs:\n if len(d) > 0 and not os.path.isdir(d):\n warnings.warn('Specified path %s is invalid.' % d, stacklevel=2)\n continue\n\n if d not in ret:\n ret.append(d)\n\n log.debug('( %s = %s )', key, ':'.join(ret))\n return ret\n\n def get_lib_dirs(self, key='library_dirs'):\n return self.get_paths(self.section, key)\n\n def get_runtime_lib_dirs(self, key='runtime_library_dirs'):\n path = self.get_paths(self.section, key)\n if path == ['']:\n path = []\n return path\n\n def get_include_dirs(self, key='include_dirs'):\n return self.get_paths(self.section, key)\n\n def get_src_dirs(self, key='src_dirs'):\n return self.get_paths(self.section, key)\n\n def get_libs(self, key, default):\n try:\n libs = self.cp.get(self.section, key)\n except NoOptionError:\n if not default:\n return []\n if is_string(default):\n return [default]\n return default\n return [b for b in [a.strip() for a in libs.split(',')] if b]\n\n def get_libraries(self, key='libraries'):\n if hasattr(self, '_lib_names'):\n return self.get_libs(key, default=self._lib_names)\n else:\n return self.get_libs(key, '')\n\n def library_extensions(self):\n c = customized_ccompiler()\n static_exts = []\n if c.compiler_type != 'msvc':\n # MSVC doesn't understand binutils\n static_exts.append('.a')\n if sys.platform == 'win32':\n static_exts.append('.lib') # .lib is used by MSVC and others\n if self.search_static_first:\n exts = static_exts + [so_ext]\n else:\n exts = [so_ext] + static_exts\n if sys.platform == 'cygwin':\n exts.append('.dll.a')\n if sys.platform == 'darwin':\n exts.append('.dylib')\n return exts\n\n def check_libs(self, lib_dirs, libs, opt_libs=[]):\n \"\"\"If static or shared libraries are available then return\n their info dictionary.\n\n Checks for all libraries as shared libraries first, then\n static (or vice versa if self.search_static_first is True).\n \"\"\"\n exts = self.library_extensions()\n info = None\n for ext in exts:\n info = self._check_libs(lib_dirs, libs, opt_libs, [ext])\n if info is not None:\n break\n if not info:\n log.info(' libraries %s not found in %s', ','.join(libs),\n lib_dirs)\n return info\n\n def check_libs2(self, lib_dirs, libs, opt_libs=[]):\n \"\"\"If static or shared libraries are available then return\n their info dictionary.\n\n Checks each library for shared or static.\n \"\"\"\n exts = self.library_extensions()\n info = self._check_libs(lib_dirs, libs, opt_libs, exts)\n if not info:\n log.info(' libraries %s not found in %s', ','.join(libs),\n lib_dirs)\n\n return info\n\n def _find_lib(self, lib_dir, lib, exts):\n assert is_string(lib_dir)\n # under windows first try without 'lib' prefix\n if sys.platform == 'win32':\n lib_prefixes = ['', 'lib']\n else:\n lib_prefixes = ['lib']\n # for each library name, see if we can find a file for it.\n for ext in exts:\n for prefix in lib_prefixes:\n p = self.combine_paths(lib_dir, prefix + lib + ext)\n if p:\n break\n if p:\n assert len(p) == 1\n # ??? splitext on p[0] would do this for cygwin\n # doesn't seem correct\n if ext == '.dll.a':\n lib += '.dll'\n if ext == '.lib':\n lib = prefix + lib\n return lib\n\n return False\n\n def _find_libs(self, lib_dirs, libs, exts):\n # make sure we preserve the order of libs, as it can be important\n found_dirs, found_libs = [], []\n for lib in libs:\n for lib_dir in lib_dirs:\n found_lib = self._find_lib(lib_dir, lib, exts)\n if found_lib:\n found_libs.append(found_lib)\n if lib_dir not in found_dirs:\n found_dirs.append(lib_dir)\n break\n return found_dirs, found_libs\n\n def _check_libs(self, lib_dirs, libs, opt_libs, exts):\n \"\"\"Find mandatory and optional libs in expected paths.\n\n Missing optional libraries are silently forgotten.\n \"\"\"\n if not is_sequence(lib_dirs):\n lib_dirs = [lib_dirs]\n # First, try to find the mandatory libraries\n found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts)\n if len(found_libs) > 0 and len(found_libs) == len(libs):\n # Now, check for optional libraries\n opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts)\n found_libs.extend(opt_found_libs)\n for lib_dir in opt_found_dirs:\n if lib_dir not in found_dirs:\n found_dirs.append(lib_dir)\n info = {'libraries': found_libs, 'library_dirs': found_dirs}\n return info\n else:\n return None\n\n def combine_paths(self, *args):\n \"\"\"Return a list of existing paths composed by all combinations\n of items from the arguments.\n \"\"\"\n return combine_paths(*args)\n\n\nclass fft_opt_info(system_info):\n\n def calc_info(self):\n info = {}\n fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')\n djbfft_info = get_info('djbfft')\n if fftw_info:\n dict_append(info, **fftw_info)\n if djbfft_info:\n dict_append(info, **djbfft_info)\n self.set_info(**info)\n return\n\n\nclass fftw_info(system_info):\n #variables to override\n section = 'fftw'\n dir_env_var = 'FFTW'\n notfounderror = FFTWNotFoundError\n ver_info = [{'name':'fftw3',\n 'libs':['fftw3'],\n 'includes':['fftw3.h'],\n 'macros':[('SCIPY_FFTW3_H', None)]},\n {'name':'fftw2',\n 'libs':['rfftw', 'fftw'],\n 'includes':['fftw.h', 'rfftw.h'],\n 'macros':[('SCIPY_FFTW_H', None)]}]\n\n def calc_ver_info(self, ver_param):\n \"\"\"Returns True on successful version detection, else False\"\"\"\n lib_dirs = self.get_lib_dirs()\n incl_dirs = self.get_include_dirs()\n\n opt = self.get_option_single(self.section + '_libs', 'libraries')\n libs = self.get_libs(opt, ver_param['libs'])\n info = self.check_libs(lib_dirs, libs)\n if info is not None:\n flag = 0\n for d in incl_dirs:\n if len(self.combine_paths(d, ver_param['includes'])) \\\n == len(ver_param['includes']):\n dict_append(info, include_dirs=[d])\n flag = 1\n break\n if flag:\n dict_append(info, define_macros=ver_param['macros'])\n else:\n info = None\n if info is not None:\n self.set_info(**info)\n return True\n else:\n log.info(' %s not found' % (ver_param['name']))\n return False\n\n def calc_info(self):\n for i in self.ver_info:\n if self.calc_ver_info(i):\n break\n\n\nclass fftw2_info(fftw_info):\n #variables to override\n section = 'fftw'\n dir_env_var = 'FFTW'\n notfounderror = FFTWNotFoundError\n ver_info = [{'name':'fftw2',\n 'libs':['rfftw', 'fftw'],\n 'includes':['fftw.h', 'rfftw.h'],\n 'macros':[('SCIPY_FFTW_H', None)]}\n ]\n\n\nclass fftw3_info(fftw_info):\n #variables to override\n section = 'fftw3'\n dir_env_var = 'FFTW3'\n notfounderror = FFTWNotFoundError\n ver_info = [{'name':'fftw3',\n 'libs':['fftw3'],\n 'includes':['fftw3.h'],\n 'macros':[('SCIPY_FFTW3_H', None)]},\n ]\n\n\nclass dfftw_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'dfftw',\n 'libs':['drfftw', 'dfftw'],\n 'includes':['dfftw.h', 'drfftw.h'],\n 'macros':[('SCIPY_DFFTW_H', None)]}]\n\n\nclass sfftw_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'sfftw',\n 'libs':['srfftw', 'sfftw'],\n 'includes':['sfftw.h', 'srfftw.h'],\n 'macros':[('SCIPY_SFFTW_H', None)]}]\n\n\nclass fftw_threads_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'fftw threads',\n 'libs':['rfftw_threads', 'fftw_threads'],\n 'includes':['fftw_threads.h', 'rfftw_threads.h'],\n 'macros':[('SCIPY_FFTW_THREADS_H', None)]}]\n\n\nclass dfftw_threads_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'dfftw threads',\n 'libs':['drfftw_threads', 'dfftw_threads'],\n 'includes':['dfftw_threads.h', 'drfftw_threads.h'],\n 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]\n\n\nclass sfftw_threads_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'sfftw threads',\n 'libs':['srfftw_threads', 'sfftw_threads'],\n 'includes':['sfftw_threads.h', 'srfftw_threads.h'],\n 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]\n\n\nclass djbfft_info(system_info):\n section = 'djbfft'\n dir_env_var = 'DJBFFT'\n notfounderror = DJBFFTNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend(self.combine_paths(d, ['djbfft']) + [d])\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n incl_dirs = self.get_include_dirs()\n info = None\n for d in lib_dirs:\n p = self.combine_paths(d, ['djbfft.a'])\n if p:\n info = {'extra_objects': p}\n break\n p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])\n if p:\n info = {'libraries': ['djbfft'], 'library_dirs': [d]}\n break\n if info is None:\n return\n for d in incl_dirs:\n if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:\n dict_append(info, include_dirs=[d],\n define_macros=[('SCIPY_DJBFFT_H', None)])\n self.set_info(**info)\n return\n return\n\n\nclass mkl_info(system_info):\n section = 'mkl'\n dir_env_var = 'MKLROOT'\n _lib_mkl = ['mkl_rt']\n\n def get_mkl_rootdir(self):\n mklroot = os.environ.get('MKLROOT', None)\n if mklroot is not None:\n return mklroot\n paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)\n ld_so_conf = '/etc/ld.so.conf'\n if os.path.isfile(ld_so_conf):\n with open(ld_so_conf, 'r') as f:\n for d in f:\n d = d.strip()\n if d:\n paths.append(d)\n intel_mkl_dirs = []\n for path in paths:\n path_atoms = path.split(os.sep)\n for m in path_atoms:\n if m.startswith('mkl'):\n d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])\n intel_mkl_dirs.append(d)\n break\n for d in paths:\n dirs = glob(os.path.join(d, 'mkl', '*'))\n dirs += glob(os.path.join(d, 'mkl*'))\n for sub_dir in dirs:\n if os.path.isdir(os.path.join(sub_dir, 'lib')):\n return sub_dir\n return None\n\n def __init__(self):\n mklroot = self.get_mkl_rootdir()\n if mklroot is None:\n system_info.__init__(self)\n else:\n from .cpuinfo import cpu\n if cpu.is_Itanium():\n plt = '64'\n elif cpu.is_Intel() and cpu.is_64bit():\n plt = 'intel64'\n else:\n plt = '32'\n system_info.__init__(\n self,\n default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],\n default_include_dirs=[os.path.join(mklroot, 'include')])\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n incl_dirs = self.get_include_dirs()\n opt = self.get_option_single('mkl_libs', 'libraries')\n mkl_libs = self.get_libs(opt, self._lib_mkl)\n info = self.check_libs2(lib_dirs, mkl_libs)\n if info is None:\n return\n dict_append(info,\n define_macros=[('SCIPY_MKL_H', None),\n ('HAVE_CBLAS', None)],\n include_dirs=incl_dirs)\n if sys.platform == 'win32':\n pass # win32 has no pthread library\n else:\n dict_append(info, libraries=['pthread'])\n self.set_info(**info)\n\n\nclass lapack_mkl_info(mkl_info):\n pass\n\n\nclass blas_mkl_info(mkl_info):\n pass\n\n\nclass atlas_info(system_info):\n section = 'atlas'\n dir_env_var = 'ATLAS'\n _lib_names = ['f77blas', 'cblas']\n if sys.platform[:7] == 'freebsd':\n _lib_atlas = ['atlas_r']\n _lib_lapack = ['alapack_r']\n else:\n _lib_atlas = ['atlas']\n _lib_lapack = ['lapack']\n\n notfounderror = AtlasNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',\n 'sse', '3dnow', 'sse2']) + [d])\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n info = {}\n opt = self.get_option_single('atlas_libs', 'libraries')\n atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)\n lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)\n atlas = None\n lapack = None\n atlas_1 = None\n for d in lib_dirs:\n atlas = self.check_libs2(d, atlas_libs, [])\n if atlas is not None:\n lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])\n lapack = self.check_libs2(lib_dirs2, lapack_libs, [])\n if lapack is not None:\n break\n if atlas:\n atlas_1 = atlas\n log.info(self.__class__)\n if atlas is None:\n atlas = atlas_1\n if atlas is None:\n return\n include_dirs = self.get_include_dirs()\n h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])\n h = h[0]\n if h:\n h = os.path.dirname(h)\n dict_append(info, include_dirs=[h])\n info['language'] = 'c'\n if lapack is not None:\n dict_append(info, **lapack)\n dict_append(info, **atlas)\n elif 'lapack_atlas' in atlas['libraries']:\n dict_append(info, **atlas)\n dict_append(info,\n define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])\n self.set_info(**info)\n return\n else:\n dict_append(info, **atlas)\n dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])\n message = textwrap.dedent(\"\"\"\n *********************************************************************\n Could not find lapack library within the ATLAS installation.\n *********************************************************************\n \"\"\")\n warnings.warn(message, stacklevel=2)\n self.set_info(**info)\n return\n\n # Check if lapack library is complete, only warn if it is not.\n lapack_dir = lapack['library_dirs'][0]\n lapack_name = lapack['libraries'][0]\n lapack_lib = None\n lib_prefixes = ['lib']\n if sys.platform == 'win32':\n lib_prefixes.append('')\n for e in self.library_extensions():\n for prefix in lib_prefixes:\n fn = os.path.join(lapack_dir, prefix + lapack_name + e)\n if os.path.exists(fn):\n lapack_lib = fn\n break\n if lapack_lib:\n break\n if lapack_lib is not None:\n sz = os.stat(lapack_lib)[6]\n if sz <= 4000 * 1024:\n message = textwrap.dedent(\"\"\"\n *********************************************************************\n Lapack library (from ATLAS) is probably incomplete:\n size of %s is %sk (expected >4000k)\n\n Follow the instructions in the KNOWN PROBLEMS section of the file\n numpy/INSTALL.txt.\n *********************************************************************\n \"\"\") % (lapack_lib, sz / 1024)\n warnings.warn(message, stacklevel=2)\n else:\n info['language'] = 'f77'\n\n atlas_version, atlas_extra_info = get_atlas_version(**atlas)\n dict_append(info, **atlas_extra_info)\n\n self.set_info(**info)\n\n\nclass atlas_blas_info(atlas_info):\n _lib_names = ['f77blas', 'cblas']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n info = {}\n opt = self.get_option_single('atlas_libs', 'libraries')\n atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)\n atlas = self.check_libs2(lib_dirs, atlas_libs, [])\n if atlas is None:\n return\n include_dirs = self.get_include_dirs()\n h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])\n h = h[0]\n if h:\n h = os.path.dirname(h)\n dict_append(info, include_dirs=[h])\n info['language'] = 'c'\n info['define_macros'] = [('HAVE_CBLAS', None)]\n\n atlas_version, atlas_extra_info = get_atlas_version(**atlas)\n dict_append(atlas, **atlas_extra_info)\n\n dict_append(info, **atlas)\n\n self.set_info(**info)\n return\n\n\nclass atlas_threads_info(atlas_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['ptf77blas', 'ptcblas']\n\n\nclass atlas_blas_threads_info(atlas_blas_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['ptf77blas', 'ptcblas']\n\n\nclass lapack_atlas_info(atlas_info):\n _lib_names = ['lapack_atlas'] + atlas_info._lib_names\n\n\nclass lapack_atlas_threads_info(atlas_threads_info):\n _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names\n\n\nclass atlas_3_10_info(atlas_info):\n _lib_names = ['satlas']\n _lib_atlas = _lib_names\n _lib_lapack = _lib_names\n\n\nclass atlas_3_10_blas_info(atlas_3_10_info):\n _lib_names = ['satlas']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n info = {}\n opt = self.get_option_single('atlas_lib', 'libraries')\n atlas_libs = self.get_libs(opt, self._lib_names)\n atlas = self.check_libs2(lib_dirs, atlas_libs, [])\n if atlas is None:\n return\n include_dirs = self.get_include_dirs()\n h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])\n h = h[0]\n if h:\n h = os.path.dirname(h)\n dict_append(info, include_dirs=[h])\n info['language'] = 'c'\n info['define_macros'] = [('HAVE_CBLAS', None)]\n\n atlas_version, atlas_extra_info = get_atlas_version(**atlas)\n dict_append(atlas, **atlas_extra_info)\n\n dict_append(info, **atlas)\n\n self.set_info(**info)\n return\n\n\nclass atlas_3_10_threads_info(atlas_3_10_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['tatlas']\n _lib_atlas = _lib_names\n _lib_lapack = _lib_names\n\n\nclass atlas_3_10_blas_threads_info(atlas_3_10_blas_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['tatlas']\n\n\nclass lapack_atlas_3_10_info(atlas_3_10_info):\n pass\n\n\nclass lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):\n pass\n\n\nclass lapack_info(system_info):\n section = 'lapack'\n dir_env_var = 'LAPACK'\n _lib_names = ['lapack']\n notfounderror = LapackNotFoundError\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n\n opt = self.get_option_single('lapack_libs', 'libraries')\n lapack_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs(lib_dirs, lapack_libs, [])\n if info is None:\n return\n info['language'] = 'f77'\n self.set_info(**info)\n\n\nclass lapack_src_info(system_info):\n # LAPACK_SRC is deprecated, please do not use this!\n # Build or install a BLAS library via your package manager or from\n # source separately.\n section = 'lapack_src'\n dir_env_var = 'LAPACK_SRC'\n notfounderror = LapackSrcNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'dgesv.f')):\n src_dir = d\n break\n if not src_dir:\n #XXX: Get sources from netlib. May be ask first.\n return\n # The following is extracted from LAPACK-3.0/SRC/Makefile.\n # Added missing names from lapack-lite-3.1.1/SRC/Makefile\n # while keeping removed names for Lapack-3.0 compatibility.\n allaux = '''\n ilaenv ieeeck lsame lsamen xerbla\n iparmq\n ''' # *.f\n laux = '''\n bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1\n laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2\n lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre\n larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4\n lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1\n lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf\n stebz stedc steqr sterf\n\n larra larrc larrd larr larrk larrj larrr laneg laisnan isnan\n lazq3 lazq4\n ''' # [s|d]*.f\n lasrc = '''\n gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak\n gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv\n gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2\n geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd\n gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal\n gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd\n ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein\n hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0\n lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb\n lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp\n laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv\n lartv larz larzb larzt laswp lasyf latbs latdf latps latrd\n latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv\n pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2\n potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri\n pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs\n spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv\n sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2\n tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs\n trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs\n tzrqf tzrzf\n\n lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5\n ''' # [s|c|d|z]*.f\n sd_lasrc = '''\n laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l\n org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr\n orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3\n ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx\n sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd\n stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd\n sygvx sytd2 sytrd\n ''' # [s|d]*.f\n cz_lasrc = '''\n bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev\n heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv\n hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd\n hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf\n hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7\n laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe\n laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv\n spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq\n ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2\n unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr\n ''' # [c|z]*.f\n #######\n sclaux = laux + ' econd ' # s*.f\n dzlaux = laux + ' secnd ' # d*.f\n slasrc = lasrc + sd_lasrc # s*.f\n dlasrc = lasrc + sd_lasrc # d*.f\n clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f\n zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f\n oclasrc = ' icmax1 scsum1 ' # *.f\n ozlasrc = ' izmax1 dzsum1 ' # *.f\n sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \\\n + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \\\n + ['c%s.f' % f for f in (clasrc).split()] \\\n + ['z%s.f' % f for f in (zlasrc).split()] \\\n + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]\n sources = [os.path.join(src_dir, f) for f in sources]\n # Lapack 3.1:\n src_dir2 = os.path.join(src_dir, '..', 'INSTALL')\n sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']\n # Lapack 3.2.1:\n sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']\n sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']\n sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']\n # Should we check here actual existence of source files?\n # Yes, the file listing is different between 3.0 and 3.1\n # versions.\n sources = [f for f in sources if os.path.isfile(f)]\n info = {'sources': sources, 'language': 'f77'}\n self.set_info(**info)\n\natlas_version_c_text = r'''\n/* This file is generated from numpy/distutils/system_info.py */\nvoid ATL_buildinfo(void);\nint main(void) {\n ATL_buildinfo();\n return 0;\n}\n'''\n\n_cached_atlas_version = {}\n\n\ndef get_atlas_version(**config):\n libraries = config.get('libraries', [])\n library_dirs = config.get('library_dirs', [])\n key = (tuple(libraries), tuple(library_dirs))\n if key in _cached_atlas_version:\n return _cached_atlas_version[key]\n c = cmd_config(Distribution())\n atlas_version = None\n info = {}\n try:\n s, o = c.get_output(atlas_version_c_text,\n libraries=libraries, library_dirs=library_dirs,\n )\n if s and re.search(r'undefined reference to `_gfortran', o, re.M):\n s, o = c.get_output(atlas_version_c_text,\n libraries=libraries + ['gfortran'],\n library_dirs=library_dirs,\n )\n if not s:\n warnings.warn(textwrap.dedent(\"\"\"\n *****************************************************\n Linkage with ATLAS requires gfortran. Use\n\n python setup.py config_fc --fcompiler=gnu95 ...\n\n when building extension libraries that use ATLAS.\n Make sure that -lgfortran is used for C++ extensions.\n *****************************************************\n \"\"\"), stacklevel=2)\n dict_append(info, language='f90',\n define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])\n except Exception: # failed to get version from file -- maybe on Windows\n # look at directory name\n for o in library_dirs:\n m = re.search(r'ATLAS_(?P<version>\\d+[.]\\d+[.]\\d+)_', o)\n if m:\n atlas_version = m.group('version')\n if atlas_version is not None:\n break\n\n # final choice --- look at ATLAS_VERSION environment\n # variable\n if atlas_version is None:\n atlas_version = os.environ.get('ATLAS_VERSION', None)\n if atlas_version:\n dict_append(info, define_macros=[(\n 'ATLAS_INFO', _c_string_literal(atlas_version))\n ])\n else:\n dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])\n return atlas_version or '?.?.?', info\n\n if not s:\n m = re.search(r'ATLAS version (?P<version>\\d+[.]\\d+[.]\\d+)', o)\n if m:\n atlas_version = m.group('version')\n if atlas_version is None:\n if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):\n atlas_version = '3.2.1_pre3.3.6'\n else:\n log.info('Status: %d', s)\n log.info('Output: %s', o)\n\n elif atlas_version == '3.2.1_pre3.3.6':\n dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])\n else:\n dict_append(info, define_macros=[(\n 'ATLAS_INFO', _c_string_literal(atlas_version))\n ])\n result = _cached_atlas_version[key] = atlas_version, info\n return result\n\n\nclass lapack_opt_info(system_info):\n notfounderror = LapackNotFoundError\n\n # List of all known LAPACK libraries, in the default order\n lapack_order = ['mkl', 'openblas', 'flame',\n 'accelerate', 'atlas', 'lapack']\n order_env_var_name = 'NPY_LAPACK_ORDER'\n\n def _calc_info_mkl(self):\n info = get_info('lapack_mkl')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_openblas(self):\n info = get_info('openblas_lapack')\n if info:\n self.set_info(**info)\n return True\n info = get_info('openblas_clapack')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_flame(self):\n info = get_info('flame')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_atlas(self):\n info = get_info('atlas_3_10_threads')\n if not info:\n info = get_info('atlas_3_10')\n if not info:\n info = get_info('atlas_threads')\n if not info:\n info = get_info('atlas')\n if info:\n # Figure out if ATLAS has lapack...\n # If not we need the lapack library, but not BLAS!\n l = info.get('define_macros', [])\n if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \\\n or ('ATLAS_WITHOUT_LAPACK', None) in l:\n # Get LAPACK (with possible warnings)\n # If not found we don't accept anything\n # since we can't use ATLAS with LAPACK!\n lapack_info = self._get_info_lapack()\n if not lapack_info:\n return False\n dict_append(info, **lapack_info)\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_accelerate(self):\n info = get_info('accelerate')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _get_info_blas(self):\n # Default to get the optimized BLAS implementation\n info = get_info('blas_opt')\n if not info:\n warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)\n info_src = get_info('blas_src')\n if not info_src:\n warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)\n return {}\n dict_append(info, libraries=[('fblas_src', info_src)])\n return info\n\n def _get_info_lapack(self):\n info = get_info('lapack')\n if not info:\n warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3)\n info_src = get_info('lapack_src')\n if not info_src:\n warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3)\n return {}\n dict_append(info, libraries=[('flapack_src', info_src)])\n return info\n\n def _calc_info_lapack(self):\n info = self._get_info_lapack()\n if info:\n info_blas = self._get_info_blas()\n dict_append(info, **info_blas)\n dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_from_envvar(self):\n info = {}\n info['language'] = 'f77'\n info['libraries'] = []\n info['include_dirs'] = []\n info['define_macros'] = []\n info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split()\n self.set_info(**info)\n return True\n\n def _calc_info(self, name):\n return getattr(self, '_calc_info_{}'.format(name))()\n\n def calc_info(self):\n lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name)\n if len(unknown_order) > 0:\n raise ValueError(\"lapack_opt_info user defined \"\n \"LAPACK order has unacceptable \"\n \"values: {}\".format(unknown_order))\n\n if 'NPY_LAPACK_LIBS' in os.environ:\n # Bypass autodetection, set language to F77 and use env var linker\n # flags directly\n self._calc_info_from_envvar()\n return\n\n for lapack in lapack_order:\n if self._calc_info(lapack):\n return\n\n if 'lapack' not in lapack_order:\n # Since the user may request *not* to use any library, we still need\n # to raise warnings to signal missing packages!\n warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2)\n warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2)\n\n\nclass _ilp64_opt_info_mixin:\n symbol_suffix = None\n symbol_prefix = None\n\n def _check_info(self, info):\n macros = dict(info.get('define_macros', []))\n prefix = macros.get('BLAS_SYMBOL_PREFIX', '')\n suffix = macros.get('BLAS_SYMBOL_SUFFIX', '')\n\n if self.symbol_prefix not in (None, prefix):\n return False\n\n if self.symbol_suffix not in (None, suffix):\n return False\n\n return bool(info)\n\n\nclass lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin):\n notfounderror = LapackILP64NotFoundError\n lapack_order = ['openblas64_', 'openblas_ilp64']\n order_env_var_name = 'NPY_LAPACK_ILP64_ORDER'\n\n def _calc_info(self, name):\n info = get_info(name + '_lapack')\n if self._check_info(info):\n self.set_info(**info)\n return True\n return False\n\n\nclass lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info):\n # Same as lapack_ilp64_opt_info, but fix symbol names\n symbol_prefix = ''\n symbol_suffix = ''\n\n\nclass lapack64__opt_info(lapack_ilp64_opt_info):\n symbol_prefix = ''\n symbol_suffix = '64_'\n\n\nclass blas_opt_info(system_info):\n notfounderror = BlasNotFoundError\n # List of all known BLAS libraries, in the default order\n\n blas_order = ['mkl', 'blis', 'openblas',\n 'accelerate', 'atlas', 'blas']\n order_env_var_name = 'NPY_BLAS_ORDER'\n\n def _calc_info_mkl(self):\n info = get_info('blas_mkl')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_blis(self):\n info = get_info('blis')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_openblas(self):\n info = get_info('openblas')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_atlas(self):\n info = get_info('atlas_3_10_blas_threads')\n if not info:\n info = get_info('atlas_3_10_blas')\n if not info:\n info = get_info('atlas_blas_threads')\n if not info:\n info = get_info('atlas_blas')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_accelerate(self):\n info = get_info('accelerate')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_blas(self):\n # Warn about a non-optimized BLAS library\n warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3)\n info = {}\n dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])\n\n blas = get_info('blas')\n if blas:\n dict_append(info, **blas)\n else:\n # Not even BLAS was found!\n warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)\n\n blas_src = get_info('blas_src')\n if not blas_src:\n warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)\n return False\n dict_append(info, libraries=[('fblas_src', blas_src)])\n\n self.set_info(**info)\n return True\n\n def _calc_info_from_envvar(self):\n info = {}\n info['language'] = 'f77'\n info['libraries'] = []\n info['include_dirs'] = []\n info['define_macros'] = []\n info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split()\n if 'NPY_CBLAS_LIBS' in os.environ:\n info['define_macros'].append(('HAVE_CBLAS', None))\n info['extra_link_args'].extend(\n os.environ['NPY_CBLAS_LIBS'].split())\n self.set_info(**info)\n return True\n\n def _calc_info(self, name):\n return getattr(self, '_calc_info_{}'.format(name))()\n\n def calc_info(self):\n blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name)\n if len(unknown_order) > 0:\n raise ValueError(\"blas_opt_info user defined BLAS order has unacceptable values: {}\".format(unknown_order))\n\n if 'NPY_BLAS_LIBS' in os.environ:\n # Bypass autodetection, set language to F77 and use env var linker\n # flags directly\n self._calc_info_from_envvar()\n return\n\n for blas in blas_order:\n if self._calc_info(blas):\n return\n\n if 'blas' not in blas_order:\n # Since the user may request *not* to use any library, we still need\n # to raise warnings to signal missing packages!\n warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2)\n warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2)\n\n\nclass blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin):\n notfounderror = BlasILP64NotFoundError\n blas_order = ['openblas64_', 'openblas_ilp64']\n order_env_var_name = 'NPY_BLAS_ILP64_ORDER'\n\n def _calc_info(self, name):\n info = get_info(name)\n if self._check_info(info):\n self.set_info(**info)\n return True\n return False\n\n\nclass blas_ilp64_plain_opt_info(blas_ilp64_opt_info):\n symbol_prefix = ''\n symbol_suffix = ''\n\n\nclass blas64__opt_info(blas_ilp64_opt_info):\n symbol_prefix = ''\n symbol_suffix = '64_'\n\n\nclass cblas_info(system_info):\n section = 'cblas'\n dir_env_var = 'CBLAS'\n # No default as it's used only in blas_info\n _lib_names = []\n notfounderror = BlasNotFoundError\n\n\nclass blas_info(system_info):\n section = 'blas'\n dir_env_var = 'BLAS'\n _lib_names = ['blas']\n notfounderror = BlasNotFoundError\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n opt = self.get_option_single('blas_libs', 'libraries')\n blas_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs(lib_dirs, blas_libs, [])\n if info is None:\n return\n else:\n info['include_dirs'] = self.get_include_dirs()\n if platform.system() == 'Windows':\n # The check for windows is needed because get_cblas_libs uses the\n # same compiler that was used to compile Python and msvc is\n # often not installed when mingw is being used. This rough\n # treatment is not desirable, but windows is tricky.\n info['language'] = 'f77' # XXX: is it generally true?\n # If cblas is given as an option, use those\n cblas_info_obj = cblas_info()\n cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries')\n cblas_libs = cblas_info_obj.get_libs(cblas_opt, None)\n if cblas_libs:\n info['libraries'] = cblas_libs + blas_libs\n info['define_macros'] = [('HAVE_CBLAS', None)]\n else:\n lib = self.get_cblas_libs(info)\n if lib is not None:\n info['language'] = 'c'\n info['libraries'] = lib\n info['define_macros'] = [('HAVE_CBLAS', None)]\n self.set_info(**info)\n\n def get_cblas_libs(self, info):\n \"\"\" Check whether we can link with CBLAS interface\n\n This method will search through several combinations of libraries\n to check whether CBLAS is present:\n\n 1. Libraries in ``info['libraries']``, as is\n 2. As 1. but also explicitly adding ``'cblas'`` as a library\n 3. As 1. but also explicitly adding ``'blas'`` as a library\n 4. Check only library ``'cblas'``\n 5. Check only library ``'blas'``\n\n Parameters\n ----------\n info : dict\n system information dictionary for compilation and linking\n\n Returns\n -------\n libraries : list of str or None\n a list of libraries that enables the use of CBLAS interface.\n Returns None if not found or a compilation error occurs.\n\n Since 1.17 returns a list.\n \"\"\"\n # primitive cblas check by looking for the header and trying to link\n # cblas or blas\n c = customized_ccompiler()\n tmpdir = tempfile.mkdtemp()\n s = textwrap.dedent(\"\"\"\\\n #include <cblas.h>\n int main(int argc, const char *argv[])\n {\n double a[4] = {1,2,3,4};\n double b[4] = {5,6,7,8};\n return cblas_ddot(4, a, 1, b, 1) > 10;\n }\"\"\")\n src = os.path.join(tmpdir, 'source.c')\n try:\n with open(src, 'wt') as f:\n f.write(s)\n\n try:\n # check we can compile (find headers)\n obj = c.compile([src], output_dir=tmpdir,\n include_dirs=self.get_include_dirs())\n except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError):\n return None\n\n # check we can link (find library)\n # some systems have separate cblas and blas libs.\n for libs in [info['libraries'], ['cblas'] + info['libraries'],\n ['blas'] + info['libraries'], ['cblas'], ['blas']]:\n try:\n c.link_executable(obj, os.path.join(tmpdir, \"a.out\"),\n libraries=libs,\n library_dirs=info['library_dirs'],\n extra_postargs=info.get('extra_link_args', []))\n return libs\n except distutils.ccompiler.LinkError:\n pass\n finally:\n shutil.rmtree(tmpdir)\n return None\n\n\nclass openblas_info(blas_info):\n section = 'openblas'\n dir_env_var = 'OPENBLAS'\n _lib_names = ['openblas']\n _require_symbols = []\n notfounderror = BlasNotFoundError\n\n @property\n def symbol_prefix(self):\n try:\n return self.cp.get(self.section, 'symbol_prefix')\n except NoOptionError:\n return ''\n\n @property\n def symbol_suffix(self):\n try:\n return self.cp.get(self.section, 'symbol_suffix')\n except NoOptionError:\n return ''\n\n def _calc_info(self):\n c = customized_ccompiler()\n\n lib_dirs = self.get_lib_dirs()\n\n # Prefer to use libraries over openblas_libs\n opt = self.get_option_single('openblas_libs', 'libraries')\n openblas_libs = self.get_libs(opt, self._lib_names)\n\n info = self.check_libs(lib_dirs, openblas_libs, [])\n\n if c.compiler_type == \"msvc\" and info is None:\n from numpy.distutils.fcompiler import new_fcompiler\n f = new_fcompiler(c_compiler=c)\n if f and f.compiler_type == 'gnu95':\n # Try gfortran-compatible library files\n info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs)\n # Skip lapack check, we'd need build_ext to do it\n skip_symbol_check = True\n elif info:\n skip_symbol_check = False\n info['language'] = 'c'\n\n if info is None:\n return None\n\n # Add extra info for OpenBLAS\n extra_info = self.calc_extra_info()\n dict_append(info, **extra_info)\n\n if not (skip_symbol_check or self.check_symbols(info)):\n return None\n\n info['define_macros'] = [('HAVE_CBLAS', None)]\n if self.symbol_prefix:\n info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)]\n if self.symbol_suffix:\n info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)]\n\n return info\n\n def calc_info(self):\n info = self._calc_info()\n if info is not None:\n self.set_info(**info)\n\n def check_msvc_gfortran_libs(self, library_dirs, libraries):\n # First, find the full path to each library directory\n library_paths = []\n for library in libraries:\n for library_dir in library_dirs:\n # MinGW static ext will be .a\n fullpath = os.path.join(library_dir, library + '.a')\n if os.path.isfile(fullpath):\n library_paths.append(fullpath)\n break\n else:\n return None\n\n # Generate numpy.distutils virtual static library file\n basename = self.__class__.__name__\n tmpdir = os.path.join(os.getcwd(), 'build', basename)\n if not os.path.isdir(tmpdir):\n os.makedirs(tmpdir)\n\n info = {'library_dirs': [tmpdir],\n 'libraries': [basename],\n 'language': 'f77'}\n\n fake_lib_file = os.path.join(tmpdir, basename + '.fobjects')\n fake_clib_file = os.path.join(tmpdir, basename + '.cobjects')\n with open(fake_lib_file, 'w') as f:\n f.write(\"\\n\".join(library_paths))\n with open(fake_clib_file, 'w') as f:\n pass\n\n return info\n\n def check_symbols(self, info):\n res = False\n c = customized_ccompiler()\n\n tmpdir = tempfile.mkdtemp()\n\n prototypes = \"\\n\".join(\"void %s%s%s();\" % (self.symbol_prefix,\n symbol_name,\n self.symbol_suffix)\n for symbol_name in self._require_symbols)\n calls = \"\\n\".join(\"%s%s%s();\" % (self.symbol_prefix,\n symbol_name,\n self.symbol_suffix)\n for symbol_name in self._require_symbols)\n s = textwrap.dedent(\"\"\"\\\n %(prototypes)s\n int main(int argc, const char *argv[])\n {\n %(calls)s\n return 0;\n }\"\"\") % dict(prototypes=prototypes, calls=calls)\n src = os.path.join(tmpdir, 'source.c')\n out = os.path.join(tmpdir, 'a.out')\n # Add the additional \"extra\" arguments\n try:\n extra_args = info['extra_link_args']\n except Exception:\n extra_args = []\n try:\n with open(src, 'wt') as f:\n f.write(s)\n obj = c.compile([src], output_dir=tmpdir)\n try:\n c.link_executable(obj, out, libraries=info['libraries'],\n library_dirs=info['library_dirs'],\n extra_postargs=extra_args)\n res = True\n except distutils.ccompiler.LinkError:\n res = False\n finally:\n shutil.rmtree(tmpdir)\n return res\n\nclass openblas_lapack_info(openblas_info):\n section = 'openblas'\n dir_env_var = 'OPENBLAS'\n _lib_names = ['openblas']\n _require_symbols = ['zungqr_']\n notfounderror = BlasNotFoundError\n\nclass openblas_clapack_info(openblas_lapack_info):\n _lib_names = ['openblas', 'lapack']\n\nclass openblas_ilp64_info(openblas_info):\n section = 'openblas_ilp64'\n dir_env_var = 'OPENBLAS_ILP64'\n _lib_names = ['openblas64']\n _require_symbols = ['dgemm_', 'cblas_dgemm']\n notfounderror = BlasILP64NotFoundError\n\n def _calc_info(self):\n info = super()._calc_info()\n if info is not None:\n info['define_macros'] += [('HAVE_BLAS_ILP64', None)]\n return info\n\nclass openblas_ilp64_lapack_info(openblas_ilp64_info):\n _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr']\n\n def _calc_info(self):\n info = super()._calc_info()\n if info:\n info['define_macros'] += [('HAVE_LAPACKE', None)]\n return info\n\nclass openblas64__info(openblas_ilp64_info):\n # ILP64 Openblas, with default symbol suffix\n section = 'openblas64_'\n dir_env_var = 'OPENBLAS64_'\n _lib_names = ['openblas64_']\n symbol_suffix = '64_'\n symbol_prefix = ''\n\nclass openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info):\n pass\n\nclass blis_info(blas_info):\n section = 'blis'\n dir_env_var = 'BLIS'\n _lib_names = ['blis']\n notfounderror = BlasNotFoundError\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n opt = self.get_option_single('blis_libs', 'libraries')\n blis_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs2(lib_dirs, blis_libs, [])\n if info is None:\n return\n\n # Add include dirs\n incl_dirs = self.get_include_dirs()\n dict_append(info,\n language='c',\n define_macros=[('HAVE_CBLAS', None)],\n include_dirs=incl_dirs)\n self.set_info(**info)\n\n\nclass flame_info(system_info):\n \"\"\" Usage of libflame for LAPACK operations\n\n This requires libflame to be compiled with lapack wrappers:\n\n ./configure --enable-lapack2flame ...\n\n Be aware that libflame 5.1.0 has some missing names in the shared library, so\n if you have problems, try the static flame library.\n \"\"\"\n section = 'flame'\n _lib_names = ['flame']\n notfounderror = FlameNotFoundError\n\n def check_embedded_lapack(self, info):\n \"\"\" libflame does not necessarily have a wrapper for fortran LAPACK, we need to check \"\"\"\n c = customized_ccompiler()\n\n tmpdir = tempfile.mkdtemp()\n s = textwrap.dedent(\"\"\"\\\n void zungqr_();\n int main(int argc, const char *argv[])\n {\n zungqr_();\n return 0;\n }\"\"\")\n src = os.path.join(tmpdir, 'source.c')\n out = os.path.join(tmpdir, 'a.out')\n # Add the additional \"extra\" arguments\n extra_args = info.get('extra_link_args', [])\n try:\n with open(src, 'wt') as f:\n f.write(s)\n obj = c.compile([src], output_dir=tmpdir)\n try:\n c.link_executable(obj, out, libraries=info['libraries'],\n library_dirs=info['library_dirs'],\n extra_postargs=extra_args)\n return True\n except distutils.ccompiler.LinkError:\n return False\n finally:\n shutil.rmtree(tmpdir)\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n flame_libs = self.get_libs('libraries', self._lib_names)\n\n info = self.check_libs2(lib_dirs, flame_libs, [])\n if info is None:\n return\n\n # Add the extra flag args to info\n extra_info = self.calc_extra_info()\n dict_append(info, **extra_info)\n\n if self.check_embedded_lapack(info):\n # check if the user has supplied all information required\n self.set_info(**info)\n else:\n # Try and get the BLAS lib to see if we can get it to work\n blas_info = get_info('blas_opt')\n if not blas_info:\n # since we already failed once, this ain't going to work either\n return\n\n # Now we need to merge the two dictionaries\n for key in blas_info:\n if isinstance(blas_info[key], list):\n info[key] = info.get(key, []) + blas_info[key]\n elif isinstance(blas_info[key], tuple):\n info[key] = info.get(key, ()) + blas_info[key]\n else:\n info[key] = info.get(key, '') + blas_info[key]\n\n # Now check again\n if self.check_embedded_lapack(info):\n self.set_info(**info)\n\n\nclass accelerate_info(system_info):\n section = 'accelerate'\n _lib_names = ['accelerate', 'veclib']\n notfounderror = BlasNotFoundError\n\n def calc_info(self):\n # Make possible to enable/disable from config file/env var\n libraries = os.environ.get('ACCELERATE')\n if libraries:\n libraries = [libraries]\n else:\n libraries = self.get_libs('libraries', self._lib_names)\n libraries = [lib.strip().lower() for lib in libraries]\n\n if (sys.platform == 'darwin' and\n not os.getenv('_PYTHON_HOST_PLATFORM', None)):\n # Use the system BLAS from Accelerate or vecLib under OSX\n args = []\n link_args = []\n if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \\\n 'x86_64' in get_platform() or \\\n 'i386' in platform.platform():\n intel = 1\n else:\n intel = 0\n if (os.path.exists('/System/Library/Frameworks'\n '/Accelerate.framework/') and\n 'accelerate' in libraries):\n if intel:\n args.extend(['-msse3'])\n args.extend([\n '-I/System/Library/Frameworks/vecLib.framework/Headers'])\n link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])\n elif (os.path.exists('/System/Library/Frameworks'\n '/vecLib.framework/') and\n 'veclib' in libraries):\n if intel:\n args.extend(['-msse3'])\n args.extend([\n '-I/System/Library/Frameworks/vecLib.framework/Headers'])\n link_args.extend(['-Wl,-framework', '-Wl,vecLib'])\n\n if args:\n self.set_info(extra_compile_args=args,\n extra_link_args=link_args,\n define_macros=[('NO_ATLAS_INFO', 3),\n ('HAVE_CBLAS', None)])\n\n return\n\nclass blas_src_info(system_info):\n # BLAS_SRC is deprecated, please do not use this!\n # Build or install a BLAS library via your package manager or from\n # source separately.\n section = 'blas_src'\n dir_env_var = 'BLAS_SRC'\n notfounderror = BlasSrcNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['blas']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'daxpy.f')):\n src_dir = d\n break\n if not src_dir:\n #XXX: Get sources from netlib. May be ask first.\n return\n blas1 = '''\n caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot\n dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2\n srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg\n dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax\n snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap\n scabs1\n '''\n blas2 = '''\n cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv\n chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv\n dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv\n sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger\n stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc\n zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2\n ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv\n '''\n blas3 = '''\n cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k\n dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm\n ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm\n '''\n sources = [os.path.join(src_dir, f + '.f') \\\n for f in (blas1 + blas2 + blas3).split()]\n #XXX: should we check here actual existence of source files?\n sources = [f for f in sources if os.path.isfile(f)]\n info = {'sources': sources, 'language': 'f77'}\n self.set_info(**info)\n\n\nclass x11_info(system_info):\n section = 'x11'\n notfounderror = X11NotFoundError\n _lib_names = ['X11']\n\n def __init__(self):\n system_info.__init__(self,\n default_lib_dirs=default_x11_lib_dirs,\n default_include_dirs=default_x11_include_dirs)\n\n def calc_info(self):\n if sys.platform in ['win32']:\n return\n lib_dirs = self.get_lib_dirs()\n include_dirs = self.get_include_dirs()\n opt = self.get_option_single('x11_libs', 'libraries')\n x11_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs(lib_dirs, x11_libs, [])\n if info is None:\n return\n inc_dir = None\n for d in include_dirs:\n if self.combine_paths(d, 'X11/X.h'):\n inc_dir = d\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir])\n self.set_info(**info)\n\n\nclass _numpy_info(system_info):\n section = 'Numeric'\n modulename = 'Numeric'\n notfounderror = NumericNotFoundError\n\n def __init__(self):\n include_dirs = []\n try:\n module = __import__(self.modulename)\n prefix = []\n for name in module.__file__.split(os.sep):\n if name == 'lib':\n break\n prefix.append(name)\n\n # Ask numpy for its own include path before attempting\n # anything else\n try:\n include_dirs.append(getattr(module, 'get_include')())\n except AttributeError:\n pass\n\n include_dirs.append(sysconfig.get_path('include'))\n except ImportError:\n pass\n py_incl_dir = sysconfig.get_path('include')\n include_dirs.append(py_incl_dir)\n py_pincl_dir = sysconfig.get_path('platinclude')\n if py_pincl_dir not in include_dirs:\n include_dirs.append(py_pincl_dir)\n for d in default_include_dirs:\n d = os.path.join(d, os.path.basename(py_incl_dir))\n if d not in include_dirs:\n include_dirs.append(d)\n system_info.__init__(self,\n default_lib_dirs=[],\n default_include_dirs=include_dirs)\n\n def calc_info(self):\n try:\n module = __import__(self.modulename)\n except ImportError:\n return\n info = {}\n macros = []\n for v in ['__version__', 'version']:\n vrs = getattr(module, v, None)\n if vrs is None:\n continue\n macros = [(self.modulename.upper() + '_VERSION',\n _c_string_literal(vrs)),\n (self.modulename.upper(), None)]\n break\n dict_append(info, define_macros=macros)\n include_dirs = self.get_include_dirs()\n inc_dir = None\n for d in include_dirs:\n if self.combine_paths(d,\n os.path.join(self.modulename,\n 'arrayobject.h')):\n inc_dir = d\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir])\n if info:\n self.set_info(**info)\n return\n\n\nclass numarray_info(_numpy_info):\n section = 'numarray'\n modulename = 'numarray'\n\n\nclass Numeric_info(_numpy_info):\n section = 'Numeric'\n modulename = 'Numeric'\n\n\nclass numpy_info(_numpy_info):\n section = 'numpy'\n modulename = 'numpy'\n\n\nclass numerix_info(system_info):\n section = 'numerix'\n\n def calc_info(self):\n which = None, None\n if os.getenv(\"NUMERIX\"):\n which = os.getenv(\"NUMERIX\"), \"environment var\"\n # If all the above fail, default to numpy.\n if which[0] is None:\n which = \"numpy\", \"defaulted\"\n try:\n import numpy # noqa: F401\n which = \"numpy\", \"defaulted\"\n except ImportError as e:\n msg1 = str(e)\n try:\n import Numeric # noqa: F401\n which = \"numeric\", \"defaulted\"\n except ImportError as e:\n msg2 = str(e)\n try:\n import numarray # noqa: F401\n which = \"numarray\", \"defaulted\"\n except ImportError as e:\n msg3 = str(e)\n log.info(msg1)\n log.info(msg2)\n log.info(msg3)\n which = which[0].strip().lower(), which[1]\n if which[0] not in [\"numeric\", \"numarray\", \"numpy\"]:\n raise ValueError(\"numerix selector must be either 'Numeric' \"\n \"or 'numarray' or 'numpy' but the value obtained\"\n \" from the %s was '%s'.\" % (which[1], which[0]))\n os.environ['NUMERIX'] = which[0]\n self.set_info(**get_info(which[0]))\n\n\nclass f2py_info(system_info):\n def calc_info(self):\n try:\n import numpy.f2py as f2py\n except ImportError:\n return\n f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')\n self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],\n include_dirs=[f2py_dir])\n return\n\n\nclass boost_python_info(system_info):\n section = 'boost_python'\n dir_env_var = 'BOOST'\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['boost*']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',\n 'module.cpp')):\n src_dir = d\n break\n if not src_dir:\n return\n py_incl_dirs = [sysconfig.get_path('include')]\n py_pincl_dir = sysconfig.get_path('platinclude')\n if py_pincl_dir not in py_incl_dirs:\n py_incl_dirs.append(py_pincl_dir)\n srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')\n bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))\n bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))\n info = {'libraries': [('boost_python_src',\n {'include_dirs': [src_dir] + py_incl_dirs,\n 'sources':bpl_srcs}\n )],\n 'include_dirs': [src_dir],\n }\n if info:\n self.set_info(**info)\n return\n\n\nclass agg2_info(system_info):\n section = 'agg2'\n dir_env_var = 'AGG2'\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['agg2*']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):\n src_dir = d\n break\n if not src_dir:\n return\n if sys.platform == 'win32':\n agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',\n 'win32', 'agg_win32_bmp.cpp'))\n else:\n agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))\n agg2_srcs += [os.path.join(src_dir, 'src', 'platform',\n 'X11',\n 'agg_platform_support.cpp')]\n\n info = {'libraries':\n [('agg2_src',\n {'sources': agg2_srcs,\n 'include_dirs': [os.path.join(src_dir, 'include')],\n }\n )],\n 'include_dirs': [os.path.join(src_dir, 'include')],\n }\n if info:\n self.set_info(**info)\n return\n\n\nclass _pkg_config_info(system_info):\n section = None\n config_env_var = 'PKG_CONFIG'\n default_config_exe = 'pkg-config'\n append_config_exe = ''\n version_macro_name = None\n release_macro_name = None\n version_flag = '--modversion'\n cflags_flag = '--cflags'\n\n def get_config_exe(self):\n if self.config_env_var in os.environ:\n return os.environ[self.config_env_var]\n return self.default_config_exe\n\n def get_config_output(self, config_exe, option):\n cmd = config_exe + ' ' + self.append_config_exe + ' ' + option\n try:\n o = subprocess.check_output(cmd)\n except (OSError, subprocess.CalledProcessError):\n pass\n else:\n o = filepath_from_subprocess_output(o)\n return o\n\n def calc_info(self):\n config_exe = find_executable(self.get_config_exe())\n if not config_exe:\n log.warn('File not found: %s. Cannot determine %s info.' \\\n % (config_exe, self.section))\n return\n info = {}\n macros = []\n libraries = []\n library_dirs = []\n include_dirs = []\n extra_link_args = []\n extra_compile_args = []\n version = self.get_config_output(config_exe, self.version_flag)\n if version:\n macros.append((self.__class__.__name__.split('.')[-1].upper(),\n _c_string_literal(version)))\n if self.version_macro_name:\n macros.append((self.version_macro_name + '_%s'\n % (version.replace('.', '_')), None))\n if self.release_macro_name:\n release = self.get_config_output(config_exe, '--release')\n if release:\n macros.append((self.release_macro_name + '_%s'\n % (release.replace('.', '_')), None))\n opts = self.get_config_output(config_exe, '--libs')\n if opts:\n for opt in opts.split():\n if opt[:2] == '-l':\n libraries.append(opt[2:])\n elif opt[:2] == '-L':\n library_dirs.append(opt[2:])\n else:\n extra_link_args.append(opt)\n opts = self.get_config_output(config_exe, self.cflags_flag)\n if opts:\n for opt in opts.split():\n if opt[:2] == '-I':\n include_dirs.append(opt[2:])\n elif opt[:2] == '-D':\n if '=' in opt:\n n, v = opt[2:].split('=')\n macros.append((n, v))\n else:\n macros.append((opt[2:], None))\n else:\n extra_compile_args.append(opt)\n if macros:\n dict_append(info, define_macros=macros)\n if libraries:\n dict_append(info, libraries=libraries)\n if library_dirs:\n dict_append(info, library_dirs=library_dirs)\n if include_dirs:\n dict_append(info, include_dirs=include_dirs)\n if extra_link_args:\n dict_append(info, extra_link_args=extra_link_args)\n if extra_compile_args:\n dict_append(info, extra_compile_args=extra_compile_args)\n if info:\n self.set_info(**info)\n return\n\n\nclass wx_info(_pkg_config_info):\n section = 'wx'\n config_env_var = 'WX_CONFIG'\n default_config_exe = 'wx-config'\n append_config_exe = ''\n version_macro_name = 'WX_VERSION'\n release_macro_name = 'WX_RELEASE'\n version_flag = '--version'\n cflags_flag = '--cxxflags'\n\n\nclass gdk_pixbuf_xlib_2_info(_pkg_config_info):\n section = 'gdk_pixbuf_xlib_2'\n append_config_exe = 'gdk-pixbuf-xlib-2.0'\n version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'\n\n\nclass gdk_pixbuf_2_info(_pkg_config_info):\n section = 'gdk_pixbuf_2'\n append_config_exe = 'gdk-pixbuf-2.0'\n version_macro_name = 'GDK_PIXBUF_VERSION'\n\n\nclass gdk_x11_2_info(_pkg_config_info):\n section = 'gdk_x11_2'\n append_config_exe = 'gdk-x11-2.0'\n version_macro_name = 'GDK_X11_VERSION'\n\n\nclass gdk_2_info(_pkg_config_info):\n section = 'gdk_2'\n append_config_exe = 'gdk-2.0'\n version_macro_name = 'GDK_VERSION'\n\n\nclass gdk_info(_pkg_config_info):\n section = 'gdk'\n append_config_exe = 'gdk'\n version_macro_name = 'GDK_VERSION'\n\n\nclass gtkp_x11_2_info(_pkg_config_info):\n section = 'gtkp_x11_2'\n append_config_exe = 'gtk+-x11-2.0'\n version_macro_name = 'GTK_X11_VERSION'\n\n\nclass gtkp_2_info(_pkg_config_info):\n section = 'gtkp_2'\n append_config_exe = 'gtk+-2.0'\n version_macro_name = 'GTK_VERSION'\n\n\nclass xft_info(_pkg_config_info):\n section = 'xft'\n append_config_exe = 'xft'\n version_macro_name = 'XFT_VERSION'\n\n\nclass freetype2_info(_pkg_config_info):\n section = 'freetype2'\n append_config_exe = 'freetype2'\n version_macro_name = 'FREETYPE2_VERSION'\n\n\nclass amd_info(system_info):\n section = 'amd'\n dir_env_var = 'AMD'\n _lib_names = ['amd']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n\n opt = self.get_option_single('amd_libs', 'libraries')\n amd_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs(lib_dirs, amd_libs, [])\n if info is None:\n return\n\n include_dirs = self.get_include_dirs()\n\n inc_dir = None\n for d in include_dirs:\n p = self.combine_paths(d, 'amd.h')\n if p:\n inc_dir = os.path.dirname(p[0])\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir],\n define_macros=[('SCIPY_AMD_H', None)],\n swig_opts=['-I' + inc_dir])\n\n self.set_info(**info)\n return\n\n\nclass umfpack_info(system_info):\n section = 'umfpack'\n dir_env_var = 'UMFPACK'\n notfounderror = UmfpackNotFoundError\n _lib_names = ['umfpack']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n\n opt = self.get_option_single('umfpack_libs', 'libraries')\n umfpack_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs(lib_dirs, umfpack_libs, [])\n if info is None:\n return\n\n include_dirs = self.get_include_dirs()\n\n inc_dir = None\n for d in include_dirs:\n p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')\n if p:\n inc_dir = os.path.dirname(p[0])\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir],\n define_macros=[('SCIPY_UMFPACK_H', None)],\n swig_opts=['-I' + inc_dir])\n\n dict_append(info, **get_info('amd'))\n\n self.set_info(**info)\n return\n\n\ndef combine_paths(*args, **kws):\n \"\"\" Return a list of existing paths composed by all combinations of\n items from arguments.\n \"\"\"\n r = []\n for a in args:\n if not a:\n continue\n if is_string(a):\n a = [a]\n r.append(a)\n args = r\n if not args:\n return []\n if len(args) == 1:\n result = reduce(lambda a, b: a + b, map(glob, args[0]), [])\n elif len(args) == 2:\n result = []\n for a0 in args[0]:\n for a1 in args[1]:\n result.extend(glob(os.path.join(a0, a1)))\n else:\n result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))\n log.debug('(paths: %s)', ','.join(result))\n return result\n\nlanguage_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}\ninv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}\n\n\ndef dict_append(d, **kws):\n languages = []\n for k, v in kws.items():\n if k == 'language':\n languages.append(v)\n continue\n if k in d:\n if k in ['library_dirs', 'include_dirs',\n 'extra_compile_args', 'extra_link_args',\n 'runtime_library_dirs', 'define_macros']:\n [d[k].append(vv) for vv in v if vv not in d[k]]\n else:\n d[k].extend(v)\n else:\n d[k] = v\n if languages:\n l = inv_language_map[max([language_map.get(l, 0) for l in languages])]\n d['language'] = l\n return\n\n\ndef parseCmdLine(argv=(None,)):\n import optparse\n parser = optparse.OptionParser(\"usage: %prog [-v] [info objs]\")\n parser.add_option('-v', '--verbose', action='store_true', dest='verbose',\n default=False,\n help='be verbose and print more messages')\n\n opts, args = parser.parse_args(args=argv[1:])\n return opts, args\n\n\ndef show_all(argv=None):\n import inspect\n if argv is None:\n argv = sys.argv\n opts, args = parseCmdLine(argv)\n if opts.verbose:\n log.set_threshold(log.DEBUG)\n else:\n log.set_threshold(log.INFO)\n show_only = []\n for n in args:\n if n[-5:] != '_info':\n n = n + '_info'\n show_only.append(n)\n show_all = not show_only\n _gdict_ = globals().copy()\n for name, c in _gdict_.items():\n if not inspect.isclass(c):\n continue\n if not issubclass(c, system_info) or c is system_info:\n continue\n if not show_all:\n if name not in show_only:\n continue\n del show_only[show_only.index(name)]\n conf = c()\n conf.verbosity = 2\n # we don't need the result, but we want\n # the side effect of printing diagnostics\n conf.get_info()\n if show_only:\n log.info('Info classes not defined: %s', ','.join(show_only))\n\nif __name__ == \"__main__\":\n show_all()\n"
] | [
[
"numpy.distutils._shell_utils.NativeParser.split",
"numpy.distutils.log.set_threshold",
"numpy.distutils.misc_util.is_string",
"numpy.distutils.misc_util.get_shared_lib_extension",
"numpy.distutils.misc_util.is_sequence",
"numpy.distutils.log.warn",
"numpy.distutils.exec_command.filepath_from_subprocess_output",
"numpy.distutils.customized_ccompiler",
"numpy.distutils.log.get_threshold",
"numpy.distutils.log.info",
"numpy.distutils.fcompiler.new_fcompiler"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.24",
"1.22",
"1.23"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NVIDIA/Torch-TensorRT | [
"1a22204fecec690bc3c2a318dab4f57b98c57f05",
"1a22204fecec690bc3c2a318dab4f57b98c57f05",
"1a22204fecec690bc3c2a318dab4f57b98c57f05",
"1a22204fecec690bc3c2a318dab4f57b98c57f05"
] | [
"py/torch_tensorrt/fx/test/converters/acc_op/test_eq.py",
"py/torch_tensorrt/fx/test/converters/acc_op/test_embedding.py",
"py/torch_tensorrt/fx/input_tensor_spec.py",
"py/torch_tensorrt/fx/test/converters/acc_op/test_clamp.py"
] | [
"import torch\nimport torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops\nfrom parameterized import parameterized\nfrom torch.testing._internal.common_utils import run_tests\nfrom torch_tensorrt.fx.tools.common_fx2trt import AccTestCase\n\n\nclass TestEqConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"rand_2d\", torch.randn(3, 4), torch.randn(3, 4)),\n (\"rand_3d\", torch.randn(3, 4, 5), torch.randn(3, 4, 5)),\n (\"rand_4d\", torch.randn(3, 4, 5, 6), torch.randn(3, 4, 5, 6)),\n (\n \"rand_2d_int_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_bool\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_int\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.int),\n ),\n (\n \"rand_4d_float_bool_dim\",\n torch.randn(3, 4, 5, 6).to(torch.float),\n torch.randn(3, 1, 1, 6).to(torch.bool),\n ),\n ]\n )\n def test_eq(self, _, input, other):\n class Eq(torch.nn.Module):\n def forward(self, x, y):\n mask = torch.eq(x, y)\n return x.masked_fill(mask, 5)\n\n inputs = [\n input,\n other,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nclass TestEqMethodConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"rand_2d\", torch.randn(3, 4), torch.randn(3, 4)),\n (\"rand_3d\", torch.randn(3, 4, 5), torch.randn(3, 4, 5)),\n (\"rand_4d\", torch.randn(3, 4, 5, 6), torch.randn(3, 4, 5, 6)),\n (\n \"rand_2d_int_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_bool\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_int\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.int),\n ),\n (\n \"rand_4d_float_bool_dim\",\n torch.randn(3, 4, 5, 6).to(torch.float),\n torch.randn(3, 1, 1, 6).to(torch.bool),\n ),\n ]\n )\n def test_eq(self, _, input, other):\n class Eq(torch.nn.Module):\n def forward(self, x, y):\n mask = x.eq(y)\n return x.masked_fill(mask, 5)\n\n inputs = [\n input,\n other,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nclass TestEqOperatorConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"rand_2d\", torch.randn(3, 4), torch.randn(3, 4)),\n (\"rand_3d\", torch.randn(3, 4, 5), torch.randn(3, 4, 5)),\n (\"rand_4d\", torch.randn(3, 4, 5, 6), torch.randn(3, 4, 5, 6)),\n (\n \"rand_2d_int_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_bool\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_int\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.int),\n ),\n (\n \"rand_4d_float_bool_dim\",\n torch.randn(3, 4, 5, 6).to(torch.float),\n torch.randn(3, 1, 1, 6).to(torch.bool),\n ),\n ]\n )\n def test_eq(self, _, input, other):\n class Eq(torch.nn.Module):\n def forward(self, x, y):\n mask = x == y\n return x.masked_fill(mask, 5)\n\n inputs = [\n input,\n other,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nclass TestEqOperatorSimpleConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"rand_2d_float_bool\", torch.randn(3, 4), torch.randn(3, 4).to(torch.bool)),\n (\n \"rand_2d_int_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.randn(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_bool_bool\",\n torch.randn(3, 4).to(torch.bool),\n torch.randn(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_int\",\n torch.randn(3, 4).to(torch.float),\n torch.randn(3, 4).to(torch.int),\n ),\n (\n \"rand_2d_float_single_bool\",\n torch.randn(3, 4),\n torch.tensor(0).to(torch.bool),\n ),\n (\n \"rand_2d_int_single_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.tensor(0).to(torch.bool),\n ),\n (\n \"rand_2d_bool_single_bool\",\n torch.randn(3, 4).to(torch.bool),\n torch.tensor(0).to(torch.bool),\n ),\n ]\n )\n def test_eq(self, _, input, other):\n class Eq(torch.nn.Module):\n def forward(self, x, y):\n return x == y\n\n inputs = [\n input,\n other,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nclass TestEqOperatorConstantConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"rand_2d_float_bool\", torch.randn(3, 4), torch.randn(3, 4).to(torch.bool)),\n (\n \"rand_2d_int_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.randn(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_bool_bool\",\n torch.randn(3, 4).to(torch.bool),\n torch.randn(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_int\",\n torch.randn(3, 4).to(torch.float),\n torch.randn(3, 4).to(torch.int),\n ),\n (\"rand_2d_float_single_bool\", torch.randn(3, 4), False),\n (\"rand_2d_int_single_bool\", torch.randn(3, 4).to(torch.int), False),\n (\"rand_2d_bool_single_bool\", torch.randn(3, 4).to(torch.bool), False),\n ]\n )\n def test_eq(self, _, input, other):\n class Eq(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.other = other\n\n def forward(self, x):\n return x == self.other\n\n inputs = [\n input,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nclass TestConstInputConverter(AccTestCase):\n def test_eq(self):\n class Eq(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return x.shape[0] == 4\n\n input = torch.randn(3, 4)\n inputs = [\n input,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nif __name__ == \"__main__\":\n run_tests()\n",
"import unittest\n\nimport torch\n\nimport torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops\nfrom parameterized import param, parameterized\nfrom torch.testing._internal.common_utils import run_tests\nfrom torch_tensorrt.fx.tools.common_fx2trt import AccTestCase\n\n\[email protected](\n \"Current implementation is limited. All implementations in hf use int64. T113156424\"\n)\nclass TestEmbeddingConverter(AccTestCase):\n @parameterized.expand(\n [\n param(\n test_name=\"1d_indices\",\n indices_tensor=torch.tensor([3, 1, 2]),\n weights_tensor=torch.randn(5, 10),\n ),\n param(\n test_name=\"2d_indices\",\n indices_tensor=torch.tensor([[3, 1, 2], [4, 1, 3]]),\n weights_tensor=torch.randn(5, 10),\n ),\n param(\n test_name=\"3d_indices\",\n indices_tensor=torch.tensor([[[0, 1], [2, 3]], [[3, 4], [4, 0]]]),\n weights_tensor=torch.randn(5, 10),\n ),\n ]\n )\n def test_embedding(\n self,\n test_name,\n indices_tensor,\n weights_tensor,\n padding_idx=None,\n max_norm=None,\n norm_type=2.0,\n scale_grad_by_freq=False,\n sparse=False,\n ):\n class TestEmbedding(torch.nn.Module):\n def forward(self, indices, weights):\n return torch.nn.functional.embedding(\n input=indices,\n weight=weights,\n padding_idx=padding_idx,\n max_norm=max_norm,\n norm_type=norm_type,\n scale_grad_by_freq=scale_grad_by_freq,\n sparse=sparse,\n )\n\n self.run_test(\n TestEmbedding(),\n inputs=[indices_tensor.int(), weights_tensor.float()],\n expected_ops={acc_ops.embedding},\n test_implicit_batch_dim=False,\n test_explicit_batch_dim=True,\n )\n\n\nif __name__ == \"__main__\":\n run_tests()\n",
"from typing import Iterable, List, NamedTuple, Sequence, Tuple\n\nimport torch\n\nfrom .types import Shape, ShapeRange\nfrom .utils import get_dynamic_dims\n\n\nclass InputTensorSpec(NamedTuple):\n \"\"\"\n This class contains the information of a input tensor.\n\n shape: shape of the tensor.\n\n dtype: dtyep of the tensor.\n\n device: device of the tensor. This is only used to generate inputs to the given model\n in order to run shape prop. For TensorRT engine, inputs have to be on cuda device.\n\n shape_ranges: If dynamic shape is needed (shape has dimensions of -1), then this field\n has to be provided (default is empty list). Every shape_range is a tuple of three\n tuples ((min_input_shape), (optimized_input_shape), (max_input_shape)). Each shape_range\n is used to populate a TensorRT optimization profile.\n e.g. If the input shape varies from (1, 224) to (100, 224) and we want to optimize\n for (25, 224) because it's the most common input shape, then we set shape_ranges to\n ((1, 224), (25, 225), (100, 224)).\n\n has_batch_dim: Whether the shape includes batch dimension. Batch dimension has to be provided\n if the engine want to run with dynamic shape.\n \"\"\"\n\n shape: Shape\n dtype: torch.dtype\n device: torch.device = torch.device(\"cpu\")\n shape_ranges: List[ShapeRange] = []\n has_batch_dim: bool = True\n\n @classmethod\n def from_tensor(cls, tensor: torch.Tensor) -> \"InputTensorSpec\":\n \"\"\"\n Produce an InputTenosrSpec named tuple which contains the\n information of the given PyTorch tensor.\n\n Args:\n tensor (torch.Tensor): A PyTorch tensor.\n\n Returns:\n An InputTensorSpec named tuple.\n \"\"\"\n return cls(tensor.shape, tensor.dtype, tensor.device)\n\n @classmethod\n def from_tensors(cls, tensors: Sequence[torch.Tensor]) -> List[\"InputTensorSpec\"]:\n \"\"\"\n Produce a list of InputTenosrSpec named tuples which contain\n the information of all the given PyTorch tensors.\n\n Args:\n tensors (Iterable[torch.Tensor]): A list of PyTorch tensors.\n\n Returns:\n A list of InputTensorSpec named tuples.\n \"\"\"\n assert isinstance(tensors, (list, tuple))\n return [cls.from_tensor(t) for t in tensors]\n\n @classmethod\n def from_tensors_with_dynamic_batch_size(\n cls,\n tensors: Sequence[torch.Tensor],\n batch_size_range: Tuple[int, int, int],\n opt_profile_replica: int = 1,\n ) -> List[\"InputTensorSpec\"]:\n \"\"\"\n Produce a list of InputTenosrSpec named tuples which would contain\n the information of all the given PyTorch tensors. The produced input\n tensor specs will treat all tensors' first dimension as batch dimension\n and mark them as dynmaic.\n\n Args:\n tensors (Sequence[torch.Tensor]): A list of PyTorch tensors.\n batch_size_range (Tuple[int, int, int]): The first integer indicates\n the smallest batch size allowed. The second integer indiceates\n the batch size that we'll optimize for. The third integer indicates\n the largest batch size allowed.\n\n Returns:\n A list of InputTensorSpec named tuples with dynamic ranges.\n \"\"\"\n input_specs = []\n batch_size = tensors[0].size(0)\n\n for i, tensor in enumerate(tensors):\n assert batch_size == tensor.size(\n 0\n ), f\"The {i}th tensor (shape: {tensor.shape}) doesn't have the correct batch size: {batch_size}.\"\n shape = list(tensor.shape)\n shape[0] = -1\n shape_ranges: List[ShapeRange] = [tuple(tuple([bs] + shape[1:]) for bs in batch_size_range)] * opt_profile_replica # type: ignore[list-item]\n input_specs.append(\n cls(tuple(shape), tensor.dtype, tensor.device, shape_ranges)\n )\n\n return input_specs\n\n def to_random_tensor(self):\n shape = tuple(self.shape)\n if len(get_dynamic_dims(shape)):\n shape = tuple(self.shape_ranges[0][1])\n elif not self.has_batch_dim:\n shape = (1,) + tuple(shape)\n\n return torch.randn(shape).to(dtype=self.dtype, device=self.device)\n\n @staticmethod\n def create_inputs_from_specs(input_specs: Iterable[\"InputTensorSpec\"]):\n inputs = []\n\n for spec in input_specs:\n inputs.append(spec.to_random_tensor())\n\n return inputs\n",
"import torch\nimport torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops\nfrom parameterized import param, parameterized\nfrom torch.testing._internal.common_utils import run_tests\nfrom torch_tensorrt.fx.tools.common_fx2trt import AccTestCase\n\n\nclass TestClampConverter(AccTestCase):\n @parameterized.expand(\n [\n param(\"default\", min=-1, max=0),\n param(\"min\", min=0.5),\n param(\"max\", max=0.5),\n param(\"minBiggerThanMax\", min=1, max=0),\n ]\n )\n def test_clamp(\n self,\n test_name,\n min=None,\n max=None,\n ):\n class TestModule(torch.nn.Module):\n def forward(self, x):\n return torch.clamp(x, min, max)\n\n inputs = [torch.randn(3, 4)]\n self.run_test(TestModule(), inputs, expected_ops={acc_ops.clamp})\n\n\nif __name__ == \"__main__\":\n run_tests()\n"
] | [
[
"torch.zeros",
"torch.eq",
"torch.randn",
"torch.tensor",
"torch.testing._internal.common_utils.run_tests"
],
[
"torch.nn.functional.embedding",
"torch.randn",
"torch.testing._internal.common_utils.run_tests",
"torch.tensor"
],
[
"torch.device",
"torch.randn"
],
[
"torch.randn",
"torch.clamp",
"torch.testing._internal.common_utils.run_tests"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hbrunie/PeleLM | [
"8b8c07aa1770c07e087f8976b6e16a71de68f751"
] | [
"Exec/RegTests/FlameSheet/pprocConvOrder.py"
] | [
"#!/usr/bin/env python3\n\n# Template post-processing script for PeleLM convergence analysis\n# Must be used after multirun.py script\n# Input are limited by the regression framework.\n\n# Usage:\n# ./pprocConvOrder.py --pproc_exec prog.exe --test_name DummyTest\n\n# Input:\n# * --pproc_exec: the processing executable path\n# * --test_name: a TESTNAME that will looked for during the postprocessing\n\n# \"Internal\" user input \n# * pproc_type:\n# - pproc_type == \"fcompare\". fcompare is used to get the error from the initial solution (== analytical solution) \n# - pproc_type == \"diffsamedomain\". Analytical solution is not known and errors are computed from the next finer grid \n# * vars : a list of the variables of interest (no check is done on whether it exists in plt ...)\n# * resolution : a list of the resolutions to post-process (should be consistent with multirun.py, if used)\n\n# Output:\n# * Convergence_${TESTNAME}.png file with the log-log plot of the error vs. resolution.\n# * ConvTable_${TESTNAME}.tex file with the convergence rate formatted in an LaTeX table.\n# * Convergence_${TESTNAME}.dat plain text file with the convergence rate.\n\n# Head's up : \n# - The script will get a copy of the post-processing program (if not already there) in the testing folder. The name of this folder is assumed to be the TESTNAME. \n# - The plt files naming convention is: ${TESTNAME}_plt_${resolution}_*****. It is used to get the first and last solution of a test at a given resolution.\n# - Errors are parsed from the screen output of the standard fcompare/diffsamedomain. Beware of any change of these programs. \n\nimport sys\nimport os\nimport fnmatch\nimport shutil\nimport argparse\nimport numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nUSAGE = \"\"\"\n Template post-processing script for PeleLM convergence analysis\n\"\"\"\n\ndef pproc(args):\n\n # User data\n vars=[\"y_velocity\", \"density\", \"temp\", \"Y(O2)\", \"Y(CH4)\", \"Y(CO2)\", \"Y(CO)\", \"Y(H2O)\" ]\n resolution = [64,128,256,512] \n pproc_type = \"diffsamedomain\"\n\n # Get a local copy of post-processing executable\n run_dir = os.getcwd()\n if ( not os.path.isfile(os.path.basename(args.pproc_exe)) ):\n shutil.copy(args.pproc_exe, run_dir)\n\n # Check the test name: current folder name is default\n if ( args.test_name == \"None\" ):\n args.test_name = run_dir.split(\"/\")[-1]\n\n # Run the postprocessing\n if ( pproc_type == \"fcompare\" ): # running fcompare since analytical solution is known\n errors = np.empty([len(resolution),len(vars)+1])\n pltfile=[]\n for res in range(len(resolution)):\n case = resolution[res]\n errors[res,0] = case\n\n # Get the fcompare inputs: first and last solution of current case\n # TODO: the analytical solution might not be plt****_00000 ...\n for f in os.listdir(run_dir):\n if ( not fnmatch.fnmatch(f, '*old*')):\n if (f.startswith(\"{}_plt_{}_\".format(args.test_name,case))):\n pltfile.append(f)\n pltfile.sort()\n outfile = \"error_{}.analysis.out\".format(case)\n os.system(\"./{} -n 2 {} {} > {}\".format(os.path.basename(args.pproc_exe), pltfile[0], pltfile[-1], outfile))\n pltfile.clear()\n \n # Extract errors on each variable\n with open(outfile) as fp:\n for i, line in enumerate(fp):\n if (i >= 5):\n var = line.split()[0]\n for v in range(len(vars)):\n if ( var == vars[v] ):\n errors[res,v+1] = line.split()[1]\n os.system(\"rm {}\".format(outfile))\n elif ( pproc_type == \"diffsamedomain\" ): # running diffsamedomain. No analytical sol ...\n errors = np.empty([len(resolution)-1,len(vars)+1])\n pltfile=[]\n pltfilenext=[]\n for res in range(len(resolution)-1):\n case = resolution[res]\n nextcase = resolution[res+1]\n errors[res,0] = case\n\n # Get the diffsamedomain inputs: last solutions of current \n # and next finer cases. These run should have been runned to the same final time\n for f in os.listdir(run_dir):\n if ( not fnmatch.fnmatch(f, '*old*')):\n if (f.startswith(\"{}_plt_{}_\".format(args.test_name,case))):\n pltfile.append(f)\n if (f.startswith(\"{}_plt_{}_\".format(args.test_name,nextcase))):\n pltfilenext.append(f)\n pltfile.sort()\n pltfilenext.sort()\n outfile = \"error_{}.analysis.out\".format(case)\n os.system(\"./{} infile1={} reffile={} > {}\".format(os.path.basename(args.pproc_exe), pltfile[-1], pltfilenext[-1], outfile))\n pltfile.clear()\n pltfilenext.clear()\n\n # Extract errors on each variable\n with open(outfile) as fp:\n for i, line in enumerate(fp):\n if (i >= 5):\n var = line.split(\":\")[0]\n for v in range(len(vars)):\n if ( var.split(\" \")[0] == vars[v] ):\n errors[res,v+1] = line.split(\":\")[1]\n os.system(\"rm {}\".format(outfile))\n else:\n print(\"Wrong pproc_type: {}. should be either fcompare or diffsamedomain\".format(pproc_type))\n return\n\n\n print(errors)\n # Plot data\n plotdata(errors, args.test_name, vars)\n writetex(errors, args.test_name, vars)\n writeRegTestFile(errors, args.test_name, vars)\n\ndef plotdata(data, test_name, vars):\n # Evaluate 2nd order slope\n snd_order = data[:,1]*1.05\n for i in range(1,len(data[:,1])):\n snd_order[i] = snd_order[i-1]/np.exp(2.0*np.log(2.0))\n for i in range(0, len(vars)): \n plt.plot(data[:,0], data[:,i+1], label=\"{}\".format(vars[i]))\n plt.plot(data[:,0], snd_order[:],linestyle='--',color='k', label='2nd-order')\n plt.xlabel(\"Resolution\")\n plt.ylabel(\"Error L2norm\")\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.grid(which='both',color='k', linestyle=':', linewidth=1)\n plt.legend(bbox_to_anchor=(0.9, 0.9), loc=1, borderaxespad=0.)\n plt.savefig(\"Convergence_{}.png\".format(test_name))\n\ndef writetex(data, test_name, vars):\n # Evaluate order\n conv_order = np.empty([len(data[:,0])-1,len(vars)])\n for v in range(len(vars)):\n for i in range(len(conv_order[:,0])):\n conv_order[i,v] = np.log(data[i,v+1]/data[i+1,v+1])/np.log(2.0)\n fout = open(\"ConvTable_{}.tex\".format(test_name), \"w\") \n fout.write(\"\\\\begin{table}[ht!]\\n\")\n fout.write(\"\\centering\\n\")\n fout.write(\"\\\\begin{tabular}{l|\")\n for i in range(len(conv_order[:,0])):\n fout.write(\"c \")\n fout.write(\"}\\n\") \n fout.write(\"\\hline\\n\")\n fout.write(\"Variable \")\n for i in range(len(conv_order[:,0])):\n fout.write(\"& {}/{} \".format(data[i+1,0],data[i,0]))\n fout.write(\"\\\\\\\\\\n\\hline\\hline\\n\")\n for v in range(len(vars)):\n fout.write(\"{} \".format(vars[v].replace(\"_\",\"\\_\")))\n for i in range(len(conv_order[:,0])):\n fout.write(\"& {:.3f} \".format(conv_order[i,v]))\n fout.write(\"\\\\\\\\\\n\")\n fout.write(\"\\end{tabular}\\n\")\n fout.write(\"\\caption{PeleLM convergence order}\\n\")\n fout.write(\"\\label{table:conv}\\n\")\n fout.write(\"\\end{table}\\n\")\n fout.close()\n\ndef writeRegTestFile(data, test_name, vars):\n # Evaluate order\n conv_order = np.empty([len(data[:,0])-1,len(vars)])\n for v in range(len(vars)):\n for i in range(len(conv_order[:,0])):\n conv_order[i,v] = np.log(data[i,v+1]/data[i+1,v+1])/np.log(2.0)\n fout = open(\"Convergence_{}.dat\".format(test_name), \"w\") \n fout.write(\" Variables \")\n for i in range(len(conv_order[:,0])):\n fout.write(\" {}/{} \".format(data[i+1,0],data[i,0]))\n fout.write(\"\\n\")\n for v in range(len(vars)):\n fout.write(\"{} \".format(vars[v]))\n for i in range(len(conv_order[:,0])):\n fout.write(\" {:.3f} \".format(conv_order[i,v]))\n fout.write(\"\\n\")\n fout.close()\n\ndef parse_args(arg_string=None):\n parser = argparse.ArgumentParser(description=USAGE)\n\n parser.add_argument(\"--test_name\", type=str, default=\"None\", metavar=\"test-name\",\n help=\"name of the test. Default = current folder name\")\n\n parser.add_argument(\"--pproc_exe\", type=str, default=\"None\", metavar=\"pproc.exe\",\n help=\"path to the executable required for the analysis.\")\n\n if not arg_string is None:\n args, unknown = parser.parse_known_args(arg_string)\n else:\n args, unknown = parser.parse_known_args()\n\n return args \n\nif __name__ == \"__main__\":\n arg_string_prepend = [\"--pproc_exe\"]+sys.argv[1:]\n args = parse_args(arg_string=arg_string_prepend)\n pproc(args)\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.log",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Dou-Yu-xuan/deep-learning-visal | [
"da1ebc527d44c8c5a524e757a1d784ba37ec2d5c",
"da1ebc527d44c8c5a524e757a1d784ba37ec2d5c",
"da1ebc527d44c8c5a524e757a1d784ba37ec2d5c",
"da1ebc527d44c8c5a524e757a1d784ba37ec2d5c",
"da1ebc527d44c8c5a524e757a1d784ba37ec2d5c"
] | [
"models/ObjectDetection/FoveaBox.py",
"models/Attention/NonLocalBlock.py",
"models/ClassicNetwork/blocks/non_local_dot_product.py",
"models/ClassicNetwork/csp_densenet.py",
"models/SemanticSegmentation/FisheyeMODNet.py"
] | [
"import torch\nimport torch.nn as nn\nimport torchvision\n\ndef Conv3x3ReLU(in_channels,out_channels):\n return nn.Sequential(\n nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=3,stride=1,padding=1),\n nn.ReLU6(inplace=True)\n )\n\ndef locLayer(in_channels,out_channels):\n return nn.Sequential(\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),\n )\n\ndef confLayer(in_channels,out_channels):\n return nn.Sequential(\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),\n )\n\nclass FoveaBox(nn.Module):\n def __init__(self, num_classes=80):\n super(FoveaBox, self).__init__()\n self.num_classes = num_classes\n resnet = torchvision.models.resnet50()\n layers = list(resnet.children())\n\n self.layer1 = nn.Sequential(*layers[:5])\n self.layer2 = nn.Sequential(*layers[5])\n self.layer3 = nn.Sequential(*layers[6])\n self.layer4 = nn.Sequential(*layers[7])\n\n self.lateral5 = nn.Conv2d(in_channels=2048, out_channels=256, kernel_size=1)\n self.lateral4 = nn.Conv2d(in_channels=1024, out_channels=256, kernel_size=1)\n self.lateral3 = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=1)\n\n self.upsample4 = nn.ConvTranspose2d(in_channels=256, out_channels=256, kernel_size=4, stride=2, padding=1)\n self.upsample3 = nn.ConvTranspose2d(in_channels=256, out_channels=256, kernel_size=4, stride=2, padding=1)\n\n self.downsample6 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=2, padding=1)\n self.downsample6_relu = nn.ReLU6(inplace=True)\n self.downsample5 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=2, padding=1)\n\n self.loc_layer3 = locLayer(in_channels=256,out_channels=4)\n self.conf_layer3 = confLayer(in_channels=256,out_channels=self.num_classes)\n\n self.loc_layer4 = locLayer(in_channels=256, out_channels=4)\n self.conf_layer4 = confLayer(in_channels=256, out_channels=self.num_classes)\n\n self.loc_layer5 = locLayer(in_channels=256, out_channels=4)\n self.conf_layer5 = confLayer(in_channels=256, out_channels=self.num_classes)\n\n self.loc_layer6 = locLayer(in_channels=256, out_channels=4)\n self.conf_layer6 = confLayer(in_channels=256, out_channels=self.num_classes)\n\n self.loc_layer7 = locLayer(in_channels=256, out_channels=4)\n self.conf_layer7 = confLayer(in_channels=256, out_channels=self.num_classes)\n\n self.init_params()\n\n def init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n x = self.layer1(x)\n c3 =x = self.layer2(x)\n c4 =x = self.layer3(x)\n c5 = x = self.layer4(x)\n\n p5 = self.lateral5(c5)\n p4 = self.upsample4(p5) + self.lateral4(c4)\n p3 = self.upsample3(p4) + self.lateral3(c3)\n\n p6 = self.downsample5(p5)\n p7 = self.downsample6_relu(self.downsample6(p6))\n\n loc3 = self.loc_layer3(p3)\n conf3 = self.conf_layer3(p3)\n\n loc4 = self.loc_layer4(p4)\n conf4 = self.conf_layer4(p4)\n\n loc5 = self.loc_layer5(p5)\n conf5 = self.conf_layer5(p5)\n\n loc6 = self.loc_layer6(p6)\n conf6 = self.conf_layer6(p6)\n\n loc7 = self.loc_layer7(p7)\n conf7 = self.conf_layer7(p7)\n\n locs = torch.cat([loc3.permute(0, 2, 3, 1).contiguous().view(loc3.size(0), -1),\n loc4.permute(0, 2, 3, 1).contiguous().view(loc4.size(0), -1),\n loc5.permute(0, 2, 3, 1).contiguous().view(loc5.size(0), -1),\n loc6.permute(0, 2, 3, 1).contiguous().view(loc6.size(0), -1),\n loc7.permute(0, 2, 3, 1).contiguous().view(loc7.size(0), -1)],dim=1)\n\n confs = torch.cat([conf3.permute(0, 2, 3, 1).contiguous().view(conf3.size(0), -1),\n conf4.permute(0, 2, 3, 1).contiguous().view(conf4.size(0), -1),\n conf5.permute(0, 2, 3, 1).contiguous().view(conf5.size(0), -1),\n conf6.permute(0, 2, 3, 1).contiguous().view(conf6.size(0), -1),\n conf7.permute(0, 2, 3, 1).contiguous().view(conf7.size(0), -1),], dim=1)\n\n out = (locs, confs)\n return out\n\nif __name__ == '__main__':\n model = FoveaBox()\n print(model)\n\n input = torch.randn(1, 3, 800, 800)\n out = model(input)\n print(out[0].shape)\n print(out[1].shape)\n",
"import torch\nimport torch.nn as nn\nimport torchvision\n\n\nclass NonLocalBlock(nn.Module):\n def __init__(self, channel):\n super(NonLocalBlock, self).__init__()\n self.inter_channel = channel // 2\n self.conv_phi = nn.Conv2d(in_channels=channel, out_channels=self.inter_channel, kernel_size=1, stride=1,padding=0, bias=False)\n self.conv_theta = nn.Conv2d(in_channels=channel, out_channels=self.inter_channel, kernel_size=1, stride=1, padding=0, bias=False)\n self.conv_g = nn.Conv2d(in_channels=channel, out_channels=self.inter_channel, kernel_size=1, stride=1, padding=0, bias=False)\n self.softmax = nn.Softmax(dim=1)\n self.conv_mask = nn.Conv2d(in_channels=self.inter_channel, out_channels=channel, kernel_size=1, stride=1, padding=0, bias=False)\n\n def forward(self, x):\n # [N, C, H , W]\n b, c, h, w = x.size()\n # [N, C/2, H * W]\n x_phi = self.conv_phi(x).view(b, c, -1)\n # [N, H * W, C/2]\n x_theta = self.conv_theta(x).view(b, c, -1).permute(0, 2, 1).contiguous()\n x_g = self.conv_g(x).view(b, c, -1).permute(0, 2, 1).contiguous()\n # [N, H * W, H * W]\n mul_theta_phi = torch.matmul(x_theta, x_phi)\n mul_theta_phi = self.softmax(mul_theta_phi)\n # [N, H * W, C/2]\n mul_theta_phi_g = torch.matmul(mul_theta_phi, x_g)\n # [N, C/2, H, W]\n mul_theta_phi_g = mul_theta_phi_g.permute(0,2,1).contiguous().view(b,self.inter_channel, h, w)\n # [N, C, H , W]\n mask = self.conv_mask(mul_theta_phi_g)\n out = mask + x\n return out\n\n\nif __name__=='__main__':\n model = NonLocalBlock(channel=16)\n print(model)\n\n input = torch.randn(1, 16, 64, 64)\n out = model(input)\n print(out.shape)",
"import torch\r\nfrom torch import nn\r\nfrom torch.nn import functional as F\r\n\r\n\r\nclass _NonLocalBlockND(nn.Module):\r\n def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):\r\n super(_NonLocalBlockND, self).__init__()\r\n\r\n assert dimension in [1, 2, 3]\r\n\r\n self.dimension = dimension\r\n self.sub_sample = sub_sample\r\n\r\n self.in_channels = in_channels\r\n self.inter_channels = inter_channels\r\n\r\n if self.inter_channels is None:\r\n self.inter_channels = in_channels // 2\r\n if self.inter_channels == 0:\r\n self.inter_channels = 1\r\n\r\n if dimension == 3:\r\n conv_nd = nn.Conv3d\r\n max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))\r\n bn = nn.BatchNorm3d\r\n elif dimension == 2:\r\n conv_nd = nn.Conv2d\r\n max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))\r\n bn = nn.BatchNorm2d\r\n else:\r\n conv_nd = nn.Conv1d\r\n max_pool_layer = nn.MaxPool1d(kernel_size=(2))\r\n bn = nn.BatchNorm1d\r\n\r\n self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\r\n kernel_size=1, stride=1, padding=0)\r\n\r\n if bn_layer:\r\n self.W = nn.Sequential(\r\n conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\r\n kernel_size=1, stride=1, padding=0),\r\n bn(self.in_channels)\r\n )\r\n nn.init.constant_(self.W[1].weight, 0)\r\n nn.init.constant_(self.W[1].bias, 0)\r\n else:\r\n self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\r\n kernel_size=1, stride=1, padding=0)\r\n nn.init.constant_(self.W.weight, 0)\r\n nn.init.constant_(self.W.bias, 0)\r\n\r\n self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\r\n kernel_size=1, stride=1, padding=0)\r\n\r\n self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\r\n kernel_size=1, stride=1, padding=0)\r\n\r\n if sub_sample:\r\n self.g = nn.Sequential(self.g, max_pool_layer)\r\n self.phi = nn.Sequential(self.phi, max_pool_layer)\r\n\r\n def forward(self, x, return_nl_map=False):\r\n \"\"\"\r\n :param x: (b, c, t, h, w)\r\n :param return_nl_map: if True return z, nl_map, else only return z.\r\n :return:\r\n \"\"\"\r\n\r\n batch_size = x.size(0)\r\n\r\n g_x = self.g(x).view(batch_size, self.inter_channels, -1)\r\n g_x = g_x.permute(0, 2, 1)\r\n\r\n theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)\r\n theta_x = theta_x.permute(0, 2, 1)\r\n phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)\r\n f = torch.matmul(theta_x, phi_x)\r\n N = f.size(-1)\r\n f_div_C = f / N\r\n\r\n y = torch.matmul(f_div_C, g_x)\r\n y = y.permute(0, 2, 1).contiguous()\r\n y = y.view(batch_size, self.inter_channels, *x.size()[2:])\r\n W_y = self.W(y)\r\n z = W_y + x\r\n\r\n if return_nl_map:\r\n return z, f_div_C\r\n return z\r\n\r\n\r\nclass NONLocalBlock1D(_NonLocalBlockND):\r\n def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):\r\n super(NONLocalBlock1D, self).__init__(in_channels,\r\n inter_channels=inter_channels,\r\n dimension=1, sub_sample=sub_sample,\r\n bn_layer=bn_layer)\r\n\r\n\r\nclass NONLocalBlock2D(_NonLocalBlockND):\r\n def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):\r\n super(NONLocalBlock2D, self).__init__(in_channels,\r\n inter_channels=inter_channels,\r\n dimension=2, sub_sample=sub_sample,\r\n bn_layer=bn_layer)\r\n\r\n\r\nclass NONLocalBlock3D(_NonLocalBlockND):\r\n def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):\r\n super(NONLocalBlock3D, self).__init__(in_channels,\r\n inter_channels=inter_channels,\r\n dimension=3, sub_sample=sub_sample,\r\n bn_layer=bn_layer)\r\n\r\n\r\nif __name__ == '__main__':\r\n import torch\r\n\r\n for (sub_sample_, bn_layer_) in [(True, True), (False, False), (True, False), (False, True)]:\r\n img = torch.zeros(2, 3, 20)\r\n net = NONLocalBlock1D(3, sub_sample=sub_sample_, bn_layer=bn_layer_)\r\n out = net(img)\r\n print(out.size())\r\n\r\n img = torch.zeros(2, 3, 20, 20)\r\n net = NONLocalBlock2D(3, sub_sample=sub_sample_, bn_layer=bn_layer_)\r\n out = net(img)\r\n print(out.size())\r\n\r\n img = torch.randn(2, 3, 8, 20, 20)\r\n net = NONLocalBlock3D(3, sub_sample=sub_sample_, bn_layer=bn_layer_)\r\n out = net(img)\r\n print(out.size())",
"# -*- coding: UTF-8 -*-\n\"\"\"\nAn unofficial implementation of DenseNet with pytorch\n@Cai Yichao 2020_09_15\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchsummary import summary\n\nfrom models.blocks.conv_bn import BN_Conv2d\nfrom models.blocks.dense_block import DenseBlock, CSP_DenseBlock\n\n\nclass DenseNet(nn.Module):\n\n def __init__(self, layers: object, k, theta, num_classes, part_ratio=0) -> object:\n super(DenseNet, self).__init__()\n # params\n self.layers = layers\n self.k = k\n self.theta = theta\n self.Block = DenseBlock if part_ratio == 0 else CSP_DenseBlock # 通过part_tatio参数控制block type\n # layers\n self.conv = BN_Conv2d(3, 2 * k, 7, 2, 3)\n self.blocks, patches = self.__make_blocks(2 * k)\n self.fc = nn.Linear(patches, num_classes)\n\n def __make_transition(self, in_chls):\n out_chls = int(self.theta * in_chls)\n return nn.Sequential(\n BN_Conv2d(in_chls, out_chls, 1, 1, 0),\n nn.AvgPool2d(2)\n ), out_chls\n\n def __make_blocks(self, k0):\n \"\"\"\n make block-transition structures\n :param k0:\n :return:\n \"\"\"\n layers_list = []\n patches = 0\n for i in range(len(self.layers)):\n layers_list.append(self.Block(k0, self.layers[i], self.k))\n patches = k0 + self.layers[i] * self.k # output feature patches from Dense Block\n if i != len(self.layers) - 1:\n transition, k0 = self.__make_transition(patches)\n layers_list.append(transition)\n return nn.Sequential(*layers_list), patches\n\n def forward(self, x):\n out = self.conv(x)\n out = F.max_pool2d(out, 3, 2, 1)\n # print(out.shape)\n out = self.blocks(out)\n # print(out.shape)\n out = F.avg_pool2d(out, 7)\n # print(out.shape)\n out = out.view(out.size(0), -1)\n # out = F.softmax(self.fc(out))\n out = self.fc(out)\n return out\n\n\ndef densenet_121(num_classes=1000):\n return DenseNet([6, 12, 24, 16], k=32, theta=0.5, num_classes=num_classes)\n\n\ndef densenet_169(num_classes=1000):\n return DenseNet([6, 12, 32, 32], k=32, theta=0.5, num_classes=num_classes)\n\n\ndef densenet_201(num_classes=1000):\n return DenseNet([6, 12, 48, 32], k=32, theta=0.5, num_classes=num_classes)\n\n\ndef densenet_264(num_classes=1000):\n return DenseNet([6, 12, 64, 48], k=32, theta=0.5, num_classes=num_classes)\n\n\ndef csp_densenet_121(num_classes=1000):\n return DenseNet([6, 12, 24, 16], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)\n\n\ndef csp_densenet_169(num_classes=1000):\n return DenseNet([6, 12, 32, 32], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)\n\n\ndef csp_densenet_201(num_classes=1000):\n return DenseNet([6, 12, 48, 32], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)\n\n\ndef csp_densenet_264(num_classes=1000):\n return DenseNet([6, 12, 64, 48], k=32, theta=0.5, num_classes=num_classes, part_ratio=0.5)\n\n\n# def test():\n# net = densenet_264()\n# summary(net, (3, 224, 224))\n# x = torch.randn((2, 3, 224, 224))\n# y = net(x)\n# print(y.shape)\n#\n#\n# test()\n",
"import torch\nimport torch.nn as nn\n\ndef Conv3x3BNReLU(in_channels,out_channels,stride,groups):\n return nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1,groups=groups),\n nn.BatchNorm2d(out_channels),\n nn.ReLU6(inplace=True)\n )\n\ndef Conv1x1BNReLU(in_channels,out_channels,groups):\n return nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1,groups=groups),\n nn.BatchNorm2d(out_channels),\n nn.ReLU6(inplace=True)\n )\n\ndef Conv1x1BN(in_channels,out_channels,groups):\n return nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1,groups=groups),\n nn.BatchNorm2d(out_channels)\n )\n\nclass ChannelShuffle(nn.Module):\n def __init__(self, groups):\n super(ChannelShuffle, self).__init__()\n self.groups = groups\n\n def forward(self, x):\n '''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''\n N, C, H, W = x.size()\n g = self.groups\n return x.view(N, g, int(C / g), H, W).permute(0, 2, 1, 3, 4).contiguous().view(N, C, H, W)\n\n\nclass ShuffleNetUnits(nn.Module):\n def __init__(self, in_channels, out_channels, stride, groups):\n super(ShuffleNetUnits, self).__init__()\n self.stride = stride\n out_channels = out_channels - in_channels if self.stride>1 else out_channels\n mid_channels = out_channels // 4\n\n self.bottleneck = nn.Sequential(\n Conv1x1BNReLU(in_channels, mid_channels,groups),\n ChannelShuffle(groups),\n Conv3x3BNReLU(mid_channels, mid_channels, stride,groups),\n Conv1x1BN(mid_channels, out_channels,groups)\n )\n if self.stride>1:\n self.shortcut = nn.MaxPool2d(kernel_size=3,stride=2,padding=1)\n\n self.relu = nn.ReLU6(inplace=True)\n\n def forward(self, x):\n out = self.bottleneck(x)\n out = torch.cat([self.shortcut(x), out], dim=1) if self.stride > 1 else (out + x)\n return self.relu(out)\n\nclass FisheyeMODNet(nn.Module):\n def __init__(self, groups=1, num_classes=2):\n super(FisheyeMODNet, self).__init__()\n layers = [4, 8, 4]\n\n self.stage1a = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=24, kernel_size=3,stride=2, padding=1),\n nn.MaxPool2d(kernel_size=2,stride=2),\n )\n self.stage2a = self._make_layer(24, 120, groups, layers[0])\n\n self.stage1b = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=24, kernel_size=3, stride=2, padding=1),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n self.stage2b = self._make_layer(24, 120, groups, layers[0])\n\n self.stage3 = self._make_layer(240, 480, groups, layers[1])\n self.stage4 = self._make_layer(480, 960, groups, layers[2])\n\n self.adapt_conv3 = nn.Conv2d(960, num_classes, kernel_size=1)\n self.adapt_conv2 = nn.Conv2d(480, num_classes, kernel_size=1)\n self.adapt_conv1 = nn.Conv2d(240, num_classes, kernel_size=1)\n\n self.up_sampling3 = nn.ConvTranspose2d(in_channels=num_classes, out_channels=num_classes, kernel_size=4, stride=2, padding=1)\n self.up_sampling2 = nn.ConvTranspose2d(in_channels=num_classes, out_channels=num_classes, kernel_size=4, stride=2, padding=1)\n self.up_sampling1 = nn.ConvTranspose2d(in_channels=num_classes, out_channels=num_classes, kernel_size=16, stride=8, padding=4)\n\n self.softmax = nn.Softmax(dim=1)\n\n self.init_params()\n\n def _make_layer(self, in_channels, out_channels, groups, block_num):\n layers = []\n layers.append(ShuffleNetUnits(in_channels=in_channels, out_channels=out_channels, stride=2, groups=groups))\n for idx in range(1, block_num):\n layers.append(ShuffleNetUnits(in_channels=out_channels, out_channels=out_channels, stride=1, groups=groups))\n return nn.Sequential(*layers)\n\n def init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.Linear):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x, y):\n x = self.stage2a(self.stage1a(x))\n y = self.stage2b(self.stage1b(y))\n feature1 = torch.cat([x, y], dim=1)\n feature2 = self.stage3(feature1)\n feature3 = self.stage4(feature2)\n\n out3 = self.up_sampling3(self.adapt_conv3(feature3))\n out2 = self.up_sampling2(self.adapt_conv2(feature2) + out3)\n out1 = self.up_sampling1(self.adapt_conv1(feature1) + out2)\n\n out = self.softmax(out1)\n return out\n\n\nif __name__ == '__main__':\n model = FisheyeMODNet()\n\n input1 = torch.randn(1, 3, 640, 640)\n input2 = torch.randn(1, 3, 640, 640)\n\n out = model(input1, input2)\n print(out.shape)"
] | [
[
"torch.nn.Sequential",
"torch.nn.ReLU6",
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.init.kaiming_normal_"
],
[
"torch.randn",
"torch.nn.Softmax",
"torch.nn.Conv2d",
"torch.matmul"
],
[
"torch.nn.Sequential",
"torch.zeros",
"torch.nn.init.constant_",
"torch.randn",
"torch.matmul",
"torch.nn.MaxPool3d",
"torch.nn.MaxPool2d",
"torch.nn.MaxPool1d"
],
[
"torch.nn.Sequential",
"torch.nn.functional.avg_pool2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.functional.max_pool2d"
],
[
"torch.nn.Softmax",
"torch.nn.Sequential",
"torch.nn.ReLU6",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Flodip/WaterMonitor | [
"5f7d8d6f266d35e7d4dd655e6e47933abb28c697"
] | [
"pimonitor.py"
] | [
"from sense_hat import SenseHat\nimport psycopg2\nimport numpy as np\nimport time\n\nsense = SenseHat()\nsense.set_imu_config(True, False, False) # compass, not gyro, not accel\n\ndatabase = \"watermonitor\"\n\ntry:\n try:\n conn = psycopg2.connect(\n user=\"pi\",\n password=\"piwater\",\n host=\"127.0.0.1\",\n port=\"5432\",\n database=database\n )\n except Exception:\n message = \"Error db conn\"\n raise\n\n while True:\n # time.sleep(0.02) # already a lag of 0.02s without sleep\n xyz = sense.get_compass_raw() # get values in microteslas\n\n # get timestamp in ms\n timestamp = int(round(time.time() * 1000))\n # get norm of compass xyz values\n value = np.linalg.norm([xyz[\"x\"], xyz[\"y\"], xyz[\"z\"]])\n try:\n curs = conn.cursor()\n print(str(timestamp) + \", \" + str(value))\n curs.execute(\"INSERT INTO water_consumption (timestamp, value) VALUES(%s, %s);\", (timestamp, value))\n conn.commit()\n curs.close()\n except Exception:\n message = \"Error cursor db\"\n raise\nexcept Exception as e:\n print(message + str(e))\n"
] | [
[
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
csbrasnett/lipid-md | [
"22ac04a01277da7e64e58ba10a1e7a9791393fcc"
] | [
"QIIDcurvature.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\nauthor: Chris Brasnett, University of Bristol, [email protected]\n\n\"\"\"\n\nimport numpy as np\nfrom QIIDderivative import derivative\n\ndef nominator(F_x, F_y, F_z, F_xx, F_xy, F_yy, F_yz, F_zz, F_xz):\n m = np.array([[F_xx, F_xy, F_xz, F_x],\n [F_xy, F_yy, F_yz, F_y],\n [F_xz, F_yz, F_zz, F_z],\n [F_x, F_y, F_z, 0]])\n \n d = np.linalg.det(m)\n \n return d\n\ndef denominator(F_x,F_y, F_z):\n \n g = np.array([F_x,F_y,F_z])\n \n mag_g = np.linalg.norm(g)\n \n return mag_g**4\n\ndef main(x, y, z, lamb):\n vals = derivative(x, y, z, lamb)\n \n n = nominator(vals[0],vals[1],vals[2],vals[3],vals[4],vals[5],vals[6],vals[7],vals[8])\n d = denominator(vals[0],vals[1],vals[2])\n K = -(n/d)\n \n return K"
] | [
[
"numpy.linalg.det",
"numpy.array",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rungjoo/KoreaBERT_description | [
"ad35b14ac8fb65593c0fe987680c2759e47478ab",
"ad35b14ac8fb65593c0fe987680c2759e47478ab"
] | [
"run_squad_debug.py",
"run_squad_korea.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run BERT on SQuAD 1.1 and SQuAD 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport math\nimport os\nimport random\nimport modeling\nimport optimization\nimport tokenization\nimport six\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"bert_config_file\", \"./pretrained_model/cased_L-12_H-768_A-12/bert_config.json\",\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"vocab_file\", \"./pretrained_model/cased_L-12_H-768_A-12/vocab.txt\",\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", \"/squad/squad_base/\",\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\nflags.DEFINE_string(\"train_file\", \"./squad/train-v1.1.json\",\n \"SQuAD json for training. E.g., train-v1.1.json\")\n\nflags.DEFINE_string(\n \"predict_file\", \"./squad/dev-v1.1.json\",\n \"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", \"./tfrecord/pretraining_output/model.ckpt-20\",\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 384,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_integer(\n \"doc_stride\", 128,\n \"When splitting up a long document into chunks, how much stride to \"\n \"take between chunks.\")\n\nflags.DEFINE_integer(\n \"max_query_length\", 64,\n \"The maximum number of tokens for the question. Questions longer than \"\n \"this will be truncated to this length.\")\n\nflags.DEFINE_bool(\"do_train\", True, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_predict\", True, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 12, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8,\n \"Total batch size for predictions.\")\n\nflags.DEFINE_float(\"learning_rate\", 3e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 2.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\n \"n_best_size\", 20,\n \"The total number of n-best predictions to generate in the \"\n \"nbest_predictions.json output file.\")\n\nflags.DEFINE_integer(\n \"max_answer_length\", 30,\n \"The maximum length of an answer that can be generated. This is needed \"\n \"because the start and end predictions are not conditioned on one another.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\nflags.DEFINE_bool(\n \"verbose_logging\", False,\n \"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\")\n\nflags.DEFINE_bool(\n \"version_2_with_negative\", False,\n \"If true, the SQuAD examples contain some that do not have an answer.\")\n\nflags.DEFINE_float(\n \"null_score_diff_threshold\", 0.0,\n \"If null_score - best_non_null is greater than the threshold predict null.\")\n\n\nclass SquadExample(object):\n \"\"\"A single training/test example for simple sequence classification.\n\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(self,\n qas_id,\n question_text,\n doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False):\n self.qas_id = qas_id\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\n s += \", question_text: %s\" % (\n tokenization.printable_text(self.question_text))\n s += \", doc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n if self.start_position:\n s += \", start_position: %d\" % (self.start_position)\n if self.start_position:\n s += \", end_position: %d\" % (self.end_position)\n if self.start_position:\n s += \", is_impossible: %r\" % (self.is_impossible)\n return s\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n unique_id,\n example_index,\n doc_span_index,\n tokens,\n token_to_orig_map,\n token_is_max_context,\n input_ids,\n input_mask,\n segment_ids,\n start_position=None,\n end_position=None,\n is_impossible=None):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n self.token_is_max_context = token_is_max_context\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n\ndef read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n break_count = 0 # 수정\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n break_count += 1\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n\n if FLAGS.version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length -\n 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n tf.logging.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n if break_count == 8:\n break\n if break_count == 8:\n break\n if break_count == 8:\n break\n return examples\n\n\ndef convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\n \"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls/squad/output_weights\", [2, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls/squad/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)\n\n\ndef model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) / 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn\n\n\ndef input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"start_logits\", \"end_logits\"])\n\n\ndef write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n\n\ndef get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text\n\n\ndef _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes\n\n\ndef _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n\n\nclass FeatureWriter(object):\n \"\"\"Writes InputFeature to TF example file.\"\"\"\n\n def __init__(self, filename, is_training):\n self.filename = filename\n self.is_training = is_training\n self.num_features = 0\n self._writer = tf.python_io.TFRecordWriter(filename)\n\n def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())\n\n def close(self):\n self._writer.close()\n\n\ndef validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\n \"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\n \"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n validate_flags_or_throw(bert_config)\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = read_squad_examples(\n input_file=FLAGS.train_file, is_training=True)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n # Pre-shuffle the input to avoid having to make a very large shuffle\n # buffer in in the `input_fn`.\n rng = random.Random(12345)\n rng.shuffle(train_examples)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n # We write to a temporary file to avoid storing very large constant tensors\n # in memory.\n train_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"train.tf_record\"),\n is_training=True)\n convert_examples_to_features(\n examples=train_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=True,\n output_fn=train_writer.process_feature)\n train_writer.close()\n\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num orig examples = %d\", len(train_examples))\n tf.logging.info(\" Num split examples = %d\", train_writer.num_features)\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n del train_examples\n\n train_input_fn = input_fn_builder(\n input_file=train_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n\n if FLAGS.do_predict:\n eval_examples = read_squad_examples(\n input_file=FLAGS.predict_file, is_training=False)\n\n eval_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"eval.tf_record\"),\n is_training=False)\n eval_features = []\n\n def append_feature(feature):\n eval_features.append(feature)\n eval_writer.process_feature(feature)\n\n convert_examples_to_features(\n examples=eval_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=False,\n output_fn=append_feature)\n eval_writer.close()\n\n tf.logging.info(\"***** Running predictions *****\")\n tf.logging.info(\" Num orig examples = %d\", len(eval_examples))\n tf.logging.info(\" Num split examples = %d\", len(eval_features))\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n all_results = []\n\n predict_input_fn = input_fn_builder(\n input_file=eval_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=False)\n\n # If running eval on the TPU, you will need to specify the number of\n # steps.\n all_results = []\n for result in estimator.predict(\n predict_input_fn, yield_single_examples=True):\n if len(all_results) % 1000 == 0:\n tf.logging.info(\"Processing example: %d\" % (len(all_results)))\n unique_id = int(result[\"unique_ids\"])\n start_logits = [float(x) for x in result[\"start_logits\"].flat]\n end_logits = [float(x) for x in result[\"end_logits\"].flat]\n all_results.append(\n RawResult(\n unique_id=unique_id,\n start_logits=start_logits,\n end_logits=end_logits))\n\n output_prediction_file = os.path.join(FLAGS.output_dir, \"predictions.json\")\n output_nbest_file = os.path.join(FLAGS.output_dir, \"nbest_predictions.json\")\n output_null_log_odds_file = os.path.join(FLAGS.output_dir, \"null_odds.json\")\n\n write_predictions(eval_examples, eval_features, all_results,\n FLAGS.n_best_size, FLAGS.max_answer_length,\n FLAGS.do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n",
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run BERT on SQuAD 1.1 and SQuAD 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport math\nimport os\nimport random\nimport modeling\nimport optimization\nimport tokenization\nimport six\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"bert_config_file\", \"./pretrained_model/mecab_model/bert_config_mecab_base.json\",\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"vocab_file\", \"./pretrained_model/mecab_model/mecab_vocab_128000.txt\",\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", \"./korquad/pretrained_mecab/\",\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\nflags.DEFINE_string(\"train_file\", \"./korquad/KorQuAD_v1.0_train.json\",\n \"SQuAD json for training. E.g., train-v1.1.json\")\n\nflags.DEFINE_string(\n \"predict_file\", \"./korquad/KorQuAD_v1.0_dev.json\",\n \"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", \"./pretrained_model/mecab_model/model.ckpt\",\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 384,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_integer(\n \"doc_stride\", 128,\n \"When splitting up a long document into chunks, how much stride to \"\n \"take between chunks.\")\n\nflags.DEFINE_integer(\n \"max_query_length\", 64,\n \"The maximum number of tokens for the question. Questions longer than \"\n \"this will be truncated to this length.\")\n\nflags.DEFINE_bool(\"do_train\", True, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_predict\", True, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 12, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8,\n \"Total batch size for predictions.\")\n\nflags.DEFINE_float(\"learning_rate\", 3e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 2.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\n \"n_best_size\", 20,\n \"The total number of n-best predictions to generate in the \"\n \"nbest_predictions.json output file.\")\n\nflags.DEFINE_integer(\n \"max_answer_length\", 30,\n \"The maximum length of an answer that can be generated. This is needed \"\n \"because the start and end predictions are not conditioned on one another.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\nflags.DEFINE_bool(\n \"verbose_logging\", False,\n \"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\")\n\nflags.DEFINE_bool(\n \"version_2_with_negative\", False,\n \"If true, the SQuAD examples contain some that do not have an answer.\")\n\nflags.DEFINE_float(\n \"null_score_diff_threshold\", 0.0,\n \"If null_score - best_non_null is greater than the threshold predict null.\")\n\n\nclass SquadExample(object):\n \"\"\"A single training/test example for simple sequence classification.\n\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(self,\n qas_id,\n question_text,\n doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False):\n self.qas_id = qas_id\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\n s += \", question_text: %s\" % (\n tokenization.printable_text(self.question_text))\n s += \", doc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n if self.start_position:\n s += \", start_position: %d\" % (self.start_position)\n if self.start_position:\n s += \", end_position: %d\" % (self.end_position)\n if self.start_position:\n s += \", is_impossible: %r\" % (self.is_impossible)\n return s\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n unique_id,\n example_index,\n doc_span_index,\n tokens,\n token_to_orig_map,\n token_is_max_context,\n input_ids,\n input_mask,\n segment_ids,\n start_position=None,\n end_position=None,\n is_impossible=None):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n self.token_is_max_context = token_is_max_context\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n\ndef read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n # 수정\n tokenizer = tokenization.mecabTokenizer(\n vocab_file=FLAGS.vocab_file) \n examples = []\n break_count = 0 # 수정\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = tokenizer.tokenize(paragraph_text)\n words = paragraph_text.split(' ')\n\n token_position = []\n char_to_word_offset = []\n word = ''\n token_position = 0\n word_position = 0\n for k in range(len(doc_tokens)-1):\n while words[word_position] == '':\n word_position += 1\n char_to_word_offset.append(token_position)\n token = doc_tokens[k].replace('##', '')\n for _ in range(len(token)): char_to_word_offset.append(token_position)\n word += token\n if word == words[word_position]: ## 띄어쓰기가 있다는 것\n char_to_word_offset.append(token_position)\n word_position += 1\n word = ''\n token_position += 1\n token = doc_tokens[-1].replace('##', '')\n for _ in range(len(token)): char_to_word_offset.append(token_position)\n\n # if break_count == 53660:\n # print('ok')\n\n assert len(char_to_word_offset) == len(paragraph_text)\n\n for qa in paragraph[\"qas\"]:\n break_count += 1\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_length = len(orig_answer_text)\n answer_offset = answer[\"answer_start\"] \n \n # if break_count == 53660:\n # print('ok')\n # print(break_count)\n # print(doc_tokens, char_to_word_offset)\n\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length - 1] \n\n x1 = doc_tokens[start_position]\n x2 = doc_tokens[end_position]\n \n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example) \n \n # if break_count == 8:\n # break\n # if break_count == 8:\n # break\n # if break_count == 8:\n # break\n return examples \n\n\ndef convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n # 수정\n count = 0 \n for (example_index, example) in enumerate(examples):\n count += 1\n if count == 43:\n print(count)\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n \n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\n \"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls/squad/output_weights\", [2, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls/squad/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)\n\n\ndef model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) / 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn\n\n\ndef input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"start_logits\", \"end_logits\"])\n\n\ndef write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n\n\ndef get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text\n\n\ndef _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes\n\n\ndef _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n\n\nclass FeatureWriter(object):\n \"\"\"Writes InputFeature to TF example file.\"\"\"\n\n def __init__(self, filename, is_training):\n self.filename = filename\n self.is_training = is_training\n self.num_features = 0\n self._writer = tf.python_io.TFRecordWriter(filename)\n\n def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())\n\n def close(self):\n self._writer.close()\n\n\ndef validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\n \"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\n \"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n validate_flags_or_throw(bert_config)\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n ## 수정\n tokenizer = tokenization.mecabTokenizer(vocab_file=FLAGS.vocab_file) \n # tokenizer = tokenization.FullTokenizer(\n # vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n ## 수정\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = read_squad_examples(\n input_file=FLAGS.train_file, is_training=True)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n # Pre-shuffle the input to avoid having to make a very large shuffle\n # buffer in in the `input_fn`.\n rng = random.Random(12345)\n rng.shuffle(train_examples)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n # We write to a temporary file to avoid storing very large constant tensors\n # in memory.\n train_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"train.tf_record\"),\n is_training=True)\n convert_examples_to_features(\n examples=train_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=True,\n output_fn=train_writer.process_feature)\n train_writer.close()\n\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num orig examples = %d\", len(train_examples))\n tf.logging.info(\" Num split examples = %d\", train_writer.num_features)\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n del train_examples\n\n train_input_fn = input_fn_builder(\n input_file=train_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n\n if FLAGS.do_predict:\n eval_examples = read_squad_examples(\n input_file=FLAGS.predict_file, is_training=False)\n\n eval_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"eval.tf_record\"),\n is_training=False)\n eval_features = []\n\n def append_feature(feature):\n eval_features.append(feature)\n eval_writer.process_feature(feature)\n\n convert_examples_to_features(\n examples=eval_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=False,\n output_fn=append_feature)\n eval_writer.close()\n\n tf.logging.info(\"***** Running predictions *****\")\n tf.logging.info(\" Num orig examples = %d\", len(eval_examples))\n tf.logging.info(\" Num split examples = %d\", len(eval_features))\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n all_results = []\n\n predict_input_fn = input_fn_builder(\n input_file=eval_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=False)\n\n # If running eval on the TPU, you will need to specify the number of\n # steps.\n all_results = []\n for result in estimator.predict(\n predict_input_fn, yield_single_examples=True):\n if len(all_results) % 1000 == 0:\n tf.logging.info(\"Processing example: %d\" % (len(all_results)))\n unique_id = int(result[\"unique_ids\"])\n start_logits = [float(x) for x in result[\"start_logits\"].flat]\n end_logits = [float(x) for x in result[\"end_logits\"].flat]\n all_results.append(\n RawResult(\n unique_id=unique_id,\n start_logits=start_logits,\n end_logits=end_logits))\n\n output_prediction_file = os.path.join(FLAGS.output_dir, \"predictions.json\")\n output_nbest_file = os.path.join(FLAGS.output_dir, \"nbest_predictions.json\")\n output_null_log_odds_file = os.path.join(FLAGS.output_dir, \"null_odds.json\")\n\n write_predictions(eval_examples, eval_features, all_results,\n FLAGS.n_best_size, FLAGS.max_answer_length,\n FLAGS.do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n"
] | [
[
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.logging.warning",
"tensorflow.FixedLenFeature",
"tensorflow.nn.log_softmax",
"tensorflow.gfile.GFile",
"tensorflow.reduce_sum",
"tensorflow.train.init_from_checkpoint",
"tensorflow.gfile.MakeDirs",
"tensorflow.to_int32",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.data.TFRecordDataset",
"tensorflow.truncated_normal_initializer",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.logging.set_verbosity",
"tensorflow.trainable_variables",
"tensorflow.parse_single_example",
"tensorflow.app.run",
"tensorflow.matmul",
"tensorflow.unstack",
"tensorflow.gfile.Open",
"tensorflow.zeros_initializer",
"tensorflow.logging.info",
"tensorflow.one_hot",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.train.Features",
"tensorflow.nn.bias_add",
"tensorflow.train.Scaffold",
"tensorflow.transpose",
"tensorflow.flags.DEFINE_string",
"tensorflow.reshape"
],
[
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.FixedLenFeature",
"tensorflow.nn.log_softmax",
"tensorflow.gfile.GFile",
"tensorflow.reduce_sum",
"tensorflow.train.init_from_checkpoint",
"tensorflow.gfile.MakeDirs",
"tensorflow.to_int32",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.data.TFRecordDataset",
"tensorflow.truncated_normal_initializer",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.logging.set_verbosity",
"tensorflow.trainable_variables",
"tensorflow.parse_single_example",
"tensorflow.app.run",
"tensorflow.matmul",
"tensorflow.unstack",
"tensorflow.gfile.Open",
"tensorflow.zeros_initializer",
"tensorflow.logging.info",
"tensorflow.one_hot",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.train.Features",
"tensorflow.nn.bias_add",
"tensorflow.train.Scaffold",
"tensorflow.transpose",
"tensorflow.flags.DEFINE_string",
"tensorflow.reshape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
elusenji/transformers | [
"b18dfd95e1f60ae65a959a7b255fc06522170d1b",
"b18dfd95e1f60ae65a959a7b255fc06522170d1b",
"b18dfd95e1f60ae65a959a7b255fc06522170d1b",
"b18dfd95e1f60ae65a959a7b255fc06522170d1b",
"af14c61973effd8b8077ac61b3f24bdd4a632f25"
] | [
"tests/openai/test_modeling_tf_openai.py",
"src/transformers/models/flaubert/modeling_tf_flaubert.py",
"tests/electra/test_modeling_tf_electra.py",
"tests/dpr/test_modeling_tf_dpr.py",
"examples/pytorch/question-answering/run_qa_no_trainer.py"
] | [
"# coding=utf-8\n# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\n\nfrom transformers import OpenAIGPTConfig, is_tf_available\nfrom transformers.testing_utils import require_tf, slow\n\nfrom ..test_configuration_common import ConfigTester\nfrom ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask\n\n\nif is_tf_available():\n import tensorflow as tf\n\n from transformers.models.openai.modeling_tf_openai import (\n TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,\n TFOpenAIGPTDoubleHeadsModel,\n TFOpenAIGPTForSequenceClassification,\n TFOpenAIGPTLMHeadModel,\n TFOpenAIGPTModel,\n )\n\n\nclass TFOpenAIGPTModelTester:\n def __init__(\n self,\n parent,\n ):\n self.parent = parent\n self.batch_size = 13\n self.seq_length = 7\n self.is_training = True\n self.use_token_type_ids = True\n self.use_input_mask = True\n self.use_labels = True\n self.use_mc_token_ids = True\n self.vocab_size = 99\n self.hidden_size = 32\n self.num_hidden_layers = 5\n self.num_attention_heads = 4\n self.intermediate_size = 37\n self.hidden_act = \"gelu\"\n self.hidden_dropout_prob = 0.1\n self.attention_probs_dropout_prob = 0.1\n self.max_position_embeddings = 512\n self.type_vocab_size = 16\n self.type_sequence_label_size = 2\n self.initializer_range = 0.02\n self.num_labels = 3\n self.num_choices = 4\n self.scope = None\n self.pad_token_id = self.vocab_size - 1\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)\n\n mc_token_ids = None\n if self.use_mc_token_ids:\n mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)\n\n sequence_labels = None\n token_labels = None\n choice_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)\n choice_labels = ids_tensor([self.batch_size], self.num_choices)\n\n config = OpenAIGPTConfig(\n vocab_size=self.vocab_size,\n n_embd=self.hidden_size,\n n_layer=self.num_hidden_layers,\n n_head=self.num_attention_heads,\n # intermediate_size=self.intermediate_size,\n # hidden_act=self.hidden_act,\n # hidden_dropout_prob=self.hidden_dropout_prob,\n # attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n n_positions=self.max_position_embeddings,\n # type_vocab_size=self.type_vocab_size,\n # initializer_range=self.initializer_range,\n pad_token_id=self.pad_token_id,\n )\n\n head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)\n\n return (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n mc_token_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n )\n\n def create_and_check_openai_gpt_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = TFOpenAIGPTModel(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n\n inputs = [input_ids, input_mask]\n result = model(inputs)\n\n result = model(input_ids)\n\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n\n def create_and_check_openai_gpt_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = TFOpenAIGPTLMHeadModel(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_openai_gpt_double_head(\n self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args\n ):\n model = TFOpenAIGPTDoubleHeadsModel(config=config)\n\n multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))\n multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))\n multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))\n\n inputs = {\n \"input_ids\": multiple_choice_inputs_ids,\n \"mc_token_ids\": mc_token_ids,\n \"attention_mask\": multiple_choice_input_mask,\n \"token_type_ids\": multiple_choice_token_type_ids,\n }\n result = model(inputs)\n self.parent.assertEqual(\n result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)\n )\n self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))\n\n def create_and_check_openai_gpt_for_sequence_classification(\n self, config, input_ids, input_mask, head_mask, token_type_ids, *args\n ):\n config.num_labels = self.num_labels\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n inputs = {\n \"input_ids\": input_ids,\n \"attention_mask\": input_mask,\n \"token_type_ids\": token_type_ids,\n \"labels\": sequence_labels,\n }\n model = TFOpenAIGPTForSequenceClassification(config)\n result = model(inputs)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n\n (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n mc_token_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = config_and_inputs\n\n inputs_dict = {\"input_ids\": input_ids, \"token_type_ids\": token_type_ids, \"attention_mask\": input_mask}\n return config, inputs_dict\n\n\n@require_tf\nclass TFOpenAIGPTModelTest(TFModelTesterMixin, unittest.TestCase):\n\n all_model_classes = (\n (TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification)\n if is_tf_available()\n else ()\n )\n all_generative_model_classes = (\n (TFOpenAIGPTLMHeadModel,) if is_tf_available() else ()\n ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly\n test_head_masking = False\n test_onnx = False\n\n def setUp(self):\n self.model_tester = TFOpenAIGPTModelTester(self)\n self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_openai_gpt_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs)\n\n def test_openai_gpt_lm_head(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_openai_gpt_lm_head(*config_and_inputs)\n\n def test_openai_gpt_double_head(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_openai_gpt_double_head(*config_and_inputs)\n\n def test_model_common_attributes(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)\n\n if model_class in self.all_generative_model_classes:\n x = model.get_output_embeddings()\n assert isinstance(x, tf.keras.layers.Layer)\n name = model.get_bias()\n assert name is None\n else:\n x = model.get_output_embeddings()\n assert x is None\n name = model.get_bias()\n assert name is None\n\n def test_openai_gpt_sequence_classification_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = TFOpenAIGPTModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n@require_tf\nclass TFOPENAIGPTModelLanguageGenerationTest(unittest.TestCase):\n @slow\n def test_lm_generate_openai_gpt(self):\n model = TFOpenAIGPTLMHeadModel.from_pretrained(\"openai-gpt\")\n input_ids = tf.convert_to_tensor([[481, 4735, 544]], dtype=tf.int32) # the president is\n expected_output_ids = [\n 481,\n 4735,\n 544,\n 246,\n 963,\n 870,\n 762,\n 239,\n 244,\n 40477,\n 244,\n 249,\n 719,\n 881,\n 487,\n 544,\n 240,\n 244,\n 603,\n 481,\n ] # the president is a very good man. \" \\n \" i\\'m sure he is, \" said the\n\n output_ids = model.generate(input_ids, do_sample=False)\n self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)\n",
"# coding=utf-8\n# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n TF 2.0 Flaubert model.\n\"\"\"\n\nimport itertools\nimport random\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...activations_tf import get_tf_activation\nfrom ...modeling_tf_outputs import TFBaseModelOutput\nfrom ...modeling_tf_utils import (\n TFPreTrainedModel,\n TFSharedEmbeddings,\n get_initializer,\n keras_serializable,\n unpack_inputs,\n)\nfrom ...tf_utils import shape_list\nfrom ...utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n logging,\n)\nfrom ..xlm.modeling_tf_xlm import (\n TFXLMForMultipleChoice,\n TFXLMForQuestionAnsweringSimple,\n TFXLMForSequenceClassification,\n TFXLMForTokenClassification,\n)\nfrom .configuration_flaubert import FlaubertConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"flaubert/flaubert_base_cased\"\n_CONFIG_FOR_DOC = \"FlaubertConfig\"\n_TOKENIZER_FOR_DOC = \"FlaubertTokenizer\"\n\nTF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n # See all Flaubert models at https://huggingface.co/models?filter=flaubert\n]\n\nFLAUBERT_START_DOCSTRING = r\"\"\"\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n <Tip>\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the\n first positional argument :\n\n - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n </Tip>\n\n Parameters:\n config ([`FlaubertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nFLAUBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`FlaubertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - `1` for tokens that are **not masked**,\n - `0` for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n langs (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are\n languages ids which can be obtained from the language names by using two conversion mappings provided in\n the configuration of the model (only provided for multilingual models). More precisely, the *language name\n to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the\n *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).\n\n See usage examples detailed in the [multilingual documentation](../multilingual).\n token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - `0` corresponds to a *sentence A* token,\n - `1` corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n lengths (`tf.Tensor` or `Numpy array` of shape `(batch_size,)`, *optional*):\n Length of each sentence that can be used to avoid performing attention on padding token indices. You can\n also use *attention_mask* for the same result (see above), kept here for compatibility Indices selected in\n `[0, ..., input_ids.size(-1)]`:\n cache (`Dict[str, tf.Tensor]`, *optional*):\n Dictionary string to `tf.FloatTensor` that contains precomputed hidden states (key and values in the\n attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential\n decoding.\n\n The dictionary object will be modified in-place during the forward pass to add newly computed\n hidden-states.\n head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - `1` indicates the head is **not masked**,\n - `0` indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\ndef get_masks(slen, lengths, causal, padding_mask=None):\n \"\"\"\n Generate hidden states mask, and optionally an attention mask.\n \"\"\"\n bs = shape_list(lengths)[0]\n if padding_mask is not None:\n mask = padding_mask\n else:\n # assert lengths.max().item() <= slen\n alen = tf.range(slen)\n mask = tf.math.less(alen, tf.expand_dims(lengths, axis=1))\n\n # attention mask is the same as mask, or triangular inferior attention (causal)\n if causal:\n attn_mask = tf.less_equal(\n tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1))\n )\n else:\n attn_mask = mask\n\n # sanity check\n # assert shape_list(mask) == [bs, slen]\n if tf.executing_eagerly():\n tf.debugging.assert_equal(shape_list(mask), [bs, slen])\n assert causal is False or shape_list(attn_mask) == [bs, slen, slen]\n\n return mask, attn_mask\n\n\nclass TFFlaubertPreTrainedModel(TFPreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = FlaubertConfig\n base_model_prefix = \"transformer\"\n\n @property\n def dummy_inputs(self):\n # Sometimes XLM has language embeddings so don't forget to build them as well if needed\n inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])\n attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])\n if self.config.use_lang_emb and self.config.n_langs > 1:\n return {\n \"input_ids\": inputs_list,\n \"attention_mask\": attns_list,\n \"langs\": tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]]),\n }\n else:\n return {\"input_ids\": inputs_list, \"attention_mask\": attns_list}\n\n\n@add_start_docstrings(\n \"The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertModel(TFFlaubertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFBaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n langs: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,\n cache: Optional[Dict[str, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[tf.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[Tuple, TFBaseModelOutput]:\n outputs = self.transformer(\n input_ids=input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n return outputs\n\n # Copied from transformers.models.distilbert.modeling_tf_distilbert.TFDistilBertModel.serving_output\n def serving_output(self, output):\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)\n\n\n# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMMultiHeadAttention with XLM->Flaubert\nclass TFFlaubertMultiHeadAttention(tf.keras.layers.Layer):\n NEW_ID = itertools.count()\n\n def __init__(self, n_heads, dim, config, **kwargs):\n super().__init__(**kwargs)\n self.layer_id = next(TFFlaubertMultiHeadAttention.NEW_ID)\n self.dim = dim\n self.n_heads = n_heads\n self.output_attentions = config.output_attentions\n assert self.dim % self.n_heads == 0\n\n self.q_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"q_lin\")\n self.k_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"k_lin\")\n self.v_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"v_lin\")\n self.out_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"out_lin\")\n self.dropout = tf.keras.layers.Dropout(config.attention_dropout)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n raise NotImplementedError\n\n def call(self, input, mask, kv, cache, head_mask, output_attentions, training=False):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n bs, qlen, dim = shape_list(input)\n\n if kv is None:\n klen = qlen if cache is None else cache[\"slen\"] + qlen\n else:\n klen = shape_list(kv)[1]\n\n # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'\n dim_per_head = self.dim // self.n_heads\n mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen)\n\n def shape(x):\n \"\"\"projection\"\"\"\n return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))\n\n def unshape(x):\n \"\"\"compute context\"\"\"\n return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))\n\n q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n\n if kv is None:\n k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n elif cache is None or self.layer_id not in cache:\n k = v = kv\n k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if cache is not None:\n if self.layer_id in cache:\n if kv is None:\n k_, v_ = cache[self.layer_id]\n k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head)\n v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = cache[self.layer_id]\n\n cache[self.layer_id] = (k, v)\n\n f_dim_per_head = tf.cast(dim_per_head, dtype=q.dtype)\n q = tf.multiply(q, tf.math.rsqrt(f_dim_per_head)) # (bs, n_heads, qlen, dim_per_head)\n k = tf.cast(k, dtype=q.dtype)\n scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen)\n mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)\n # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)\n mask = tf.cast(mask, dtype=scores.dtype)\n scores = scores - 1e30 * (1.0 - mask)\n weights = tf.nn.softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)\n weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n context = unshape(context) # (bs, qlen, dim)\n outputs = (self.out_lin(context),)\n\n if output_attentions:\n outputs = outputs + (weights,)\n\n return outputs\n\n\n# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMTransformerFFN\nclass TFFlaubertTransformerFFN(tf.keras.layers.Layer):\n def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs):\n super().__init__(**kwargs)\n\n self.lin1 = tf.keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name=\"lin1\")\n self.lin2 = tf.keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name=\"lin2\")\n self.act = get_tf_activation(\"gelu\") if config.gelu_activation else get_tf_activation(\"relu\")\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n\n def call(self, input, training=False):\n x = self.lin1(input)\n x = self.act(x)\n x = self.lin2(x)\n x = self.dropout(x, training=training)\n\n return x\n\n\n@keras_serializable\nclass TFFlaubertMainLayer(tf.keras.layers.Layer):\n config_class = FlaubertConfig\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n self.n_heads = config.n_heads\n self.n_langs = config.n_langs\n self.dim = config.emb_dim\n self.hidden_dim = self.dim * 4\n self.n_words = config.n_words\n self.pad_index = config.pad_index\n self.causal = config.causal\n self.n_layers = config.n_layers\n self.use_lang_emb = config.use_lang_emb\n self.layerdrop = getattr(config, \"layerdrop\", 0.0)\n self.pre_norm = getattr(config, \"pre_norm\", False)\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.return_dict = config.use_return_dict\n self.max_position_embeddings = config.max_position_embeddings\n self.embed_init_std = config.embed_init_std\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.embeddings = TFSharedEmbeddings(\n self.n_words, self.dim, initializer_range=config.embed_init_std, name=\"embeddings\"\n )\n self.layer_norm_emb = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"layer_norm_emb\")\n self.attentions = []\n self.layer_norm1 = []\n self.ffns = []\n self.layer_norm2 = []\n\n for i in range(self.n_layers):\n self.attentions.append(\n TFFlaubertMultiHeadAttention(self.n_heads, self.dim, config=config, name=f\"attentions_._{i}\")\n )\n self.layer_norm1.append(\n tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f\"layer_norm1_._{i}\")\n )\n # if self.is_decoder:\n # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))\n self.ffns.append(\n TFFlaubertTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name=f\"ffns_._{i}\")\n )\n self.layer_norm2.append(\n tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f\"layer_norm2_._{i}\")\n )\n\n def build(self, input_shape):\n with tf.name_scope(\"position_embeddings\"):\n self.position_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.max_position_embeddings, self.dim],\n initializer=get_initializer(self.embed_init_std),\n )\n\n if self.n_langs > 1 and self.use_lang_emb:\n with tf.name_scope(\"lang_embeddings\"):\n self.lang_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.n_langs, self.dim],\n initializer=get_initializer(self.embed_init_std),\n )\n\n super().build(input_shape)\n\n def get_input_embeddings(self):\n return self.embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.weight = value\n self.embeddings.vocab_size = shape_list(value)[0]\n\n @unpack_inputs\n def call(\n self,\n input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n langs: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,\n cache: Optional[Dict[str, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[tf.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[Tuple, TFBaseModelOutput]:\n # removed: src_enc=None, src_len=None\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n bs, slen = shape_list(input_ids)\n elif inputs_embeds is not None:\n bs, slen = shape_list(inputs_embeds)[:2]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if lengths is None:\n if input_ids is not None:\n lengths = tf.reduce_sum(\n tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=input_ids.dtype), axis=1\n )\n else:\n lengths = tf.convert_to_tensor([slen] * bs)\n # mask = input_ids != self.pad_index\n\n # check inputs\n # assert shape_list(lengths)[0] == bs\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(lengths)[0], bs\n ), f\"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched\"\n # assert lengths.max().item() <= slen\n # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0\n # assert (src_enc is None) == (src_len is None)\n # if src_enc is not None:\n # assert self.is_decoder\n # assert src_enc.size(0) == bs\n\n # generate masks\n mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)\n # if self.is_decoder and src_enc is not None:\n # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]\n\n # position_ids\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(slen), axis=0)\n position_ids = tf.tile(position_ids, (bs, 1))\n\n if tf.executing_eagerly():\n # assert shape_list(position_ids) == [bs, slen] # (slen, bs)\n tf.debugging.assert_equal(\n shape_list(position_ids), [bs, slen]\n ), f\"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched\"\n # position_ids = position_ids.transpose(0, 1)\n\n # langs\n if langs is not None and tf.executing_eagerly():\n # assert shape_list(langs) == [bs, slen] # (slen, bs)\n tf.debugging.assert_equal(\n shape_list(langs), [bs, slen]\n ), f\"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched\"\n # langs = langs.transpose(0, 1)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]\n if head_mask is not None:\n raise NotImplementedError\n else:\n head_mask = [None] * self.n_layers\n\n # do not recompute cached elements\n if cache is not None and input_ids is not None:\n _slen = slen - cache[\"slen\"]\n input_ids = input_ids[:, -_slen:]\n position_ids = position_ids[:, -_slen:]\n if langs is not None:\n langs = langs[:, -_slen:]\n mask = mask[:, -_slen:]\n attn_mask = attn_mask[:, -_slen:]\n\n # embeddings\n if inputs_embeds is None:\n inputs_embeds = self.embeddings(input_ids)\n\n tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids)\n\n if langs is not None and self.use_lang_emb:\n tensor = tensor + tf.gather(self.lang_embeddings, langs)\n if token_type_ids is not None:\n tensor = tensor + self.embeddings(token_type_ids)\n\n tensor = self.layer_norm_emb(tensor)\n tensor = self.dropout(tensor, training=training)\n mask = tf.cast(mask, dtype=tensor.dtype)\n tensor = tensor * tf.expand_dims(mask, axis=-1)\n\n # hidden_states and attentions cannot be None in graph mode.\n hidden_states = () if output_hidden_states else None\n attentions = () if output_attentions else None\n\n # transformer layers\n for i in range(self.n_layers):\n # LayerDrop\n dropout_probability = random.uniform(0, 1)\n\n if training and (dropout_probability < self.layerdrop):\n continue\n\n if output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # self attention\n if not self.pre_norm:\n attn_outputs = self.attentions[i](\n tensor,\n attn_mask,\n None,\n cache,\n head_mask[i],\n output_attentions,\n training=training,\n )\n attn = attn_outputs[0]\n\n if output_attentions:\n attentions = attentions + (attn_outputs[1],)\n\n attn = self.dropout(attn, training=training)\n tensor = tensor + attn\n tensor = self.layer_norm1[i](tensor)\n else:\n tensor_normalized = self.layer_norm1[i](tensor)\n attn_outputs = self.attentions[i](\n tensor_normalized,\n attn_mask,\n None,\n cache,\n head_mask[i],\n output_attentions,\n training=training,\n )\n attn = attn_outputs[0]\n\n if output_attentions:\n attentions = attentions + (attn_outputs[1],)\n\n attn = self.dropout(attn, training=training)\n tensor = tensor + attn\n\n # encoder attention (for decoder only)\n # if self.is_decoder and src_enc is not None:\n # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)\n # attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)\n # tensor = tensor + attn\n # tensor = self.layer_norm15[i](tensor)\n\n # FFN\n if not self.pre_norm:\n tensor = tensor + self.ffns[i](tensor)\n tensor = self.layer_norm2[i](tensor)\n else:\n tensor_normalized = self.layer_norm2[i](tensor)\n tensor = tensor + self.ffns[i](tensor_normalized)\n\n tensor = tensor * tf.expand_dims(mask, axis=-1)\n\n # Add last hidden state\n if output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # update cache length\n if cache is not None:\n cache[\"slen\"] += tensor.size(1)\n\n # move back sequence length to dimension 0\n # tensor = tensor.transpose(0, 1)\n\n if not return_dict:\n return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)\n\n return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)\n\n\n# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMPredLayer\nclass TFFlaubertPredLayer(tf.keras.layers.Layer):\n \"\"\"\n Prediction layer (cross_entropy or adaptive_softmax).\n \"\"\"\n\n def __init__(self, config, input_embeddings, **kwargs):\n super().__init__(**kwargs)\n\n self.asm = config.asm\n self.n_words = config.n_words\n self.pad_index = config.pad_index\n\n if config.asm is False:\n self.input_embeddings = input_embeddings\n else:\n raise NotImplementedError\n # self.proj = nn.AdaptiveLogSoftmaxWithLoss(\n # in_features=dim,\n # n_classes=config.n_words,\n # cutoffs=config.asm_cutoffs,\n # div_value=config.asm_div_value,\n # head_bias=True, # default is False\n # )\n\n def build(self, input_shape):\n # The output weights are the same as the input embeddings, but there is an output-only bias for each token.\n self.bias = self.add_weight(shape=(self.n_words,), initializer=\"zeros\", trainable=True, name=\"bias\")\n\n super().build(input_shape)\n\n def get_output_embeddings(self):\n return self.input_embeddings\n\n def set_output_embeddings(self, value):\n self.input_embeddings.weight = value\n self.input_embeddings.vocab_size = shape_list(value)[0]\n\n def get_bias(self):\n return {\"bias\": self.bias}\n\n def set_bias(self, value):\n self.bias = value[\"bias\"]\n self.vocab_size = shape_list(value[\"bias\"])[0]\n\n def call(self, hidden_states):\n hidden_states = self.input_embeddings(hidden_states, mode=\"linear\")\n hidden_states = hidden_states + self.bias\n\n return hidden_states\n\n\n@dataclass\nclass TFFlaubertWithLMHeadModelOutput(ModelOutput):\n \"\"\"\n Base class for [`TFFlaubertWithLMHeadModel`] outputs.\n\n Args:\n logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n logits: tf.Tensor = None\n hidden_states: Optional[Tuple[tf.Tensor]] = None\n attentions: Optional[Tuple[tf.Tensor]] = None\n\n\n@add_start_docstrings(\n \"\"\"\n The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n \"\"\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertWithLMHeadModel(TFFlaubertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n self.pred_layer = TFFlaubertPredLayer(config, self.transformer.embeddings, name=\"pred_layer_._proj\")\n\n def get_lm_head(self):\n return self.pred_layer\n\n def get_prefix_bias_name(self):\n warnings.warn(\"The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.\", FutureWarning)\n return self.name + \"/\" + self.pred_layer.name\n\n def prepare_inputs_for_generation(self, inputs, **kwargs):\n mask_token_id = self.config.mask_token_id\n lang_id = self.config.lang_id\n\n effective_batch_size = inputs.shape[0]\n mask_token = tf.fill((effective_batch_size, 1), 1) * mask_token_id\n inputs = tf.concat([inputs, mask_token], axis=1)\n\n if lang_id is not None:\n langs = tf.ones_like(inputs) * lang_id\n else:\n langs = None\n return {\"input_ids\": inputs, \"langs\": langs}\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFFlaubertWithLMHeadModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n langs: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,\n cache: Optional[Dict[str, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[tf.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[Tuple, TFFlaubertWithLMHeadModelOutput]:\n\n transformer_outputs = self.transformer(\n input_ids=input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n output = transformer_outputs[0]\n outputs = self.pred_layer(output)\n\n if not return_dict:\n return (outputs,) + transformer_outputs[1:]\n\n return TFFlaubertWithLMHeadModelOutput(\n logits=outputs, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions\n )\n\n def serving_output(self, output):\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFFlaubertWithLMHeadModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"\n Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n e.g. for GLUE tasks.\n \"\"\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertForSequenceClassification(TFXLMForSequenceClassification):\n config_class = FlaubertConfig\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n\n\n@add_start_docstrings(\n \"\"\"\n Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertForQuestionAnsweringSimple(TFXLMForQuestionAnsweringSimple):\n config_class = FlaubertConfig\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n\n\n@add_start_docstrings(\n \"\"\"\n Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertForTokenClassification(TFXLMForTokenClassification):\n config_class = FlaubertConfig\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n\n\n@add_start_docstrings(\n \"\"\"\n Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertForMultipleChoice(TFXLMForMultipleChoice):\n config_class = FlaubertConfig\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n",
"# coding=utf-8\n# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\n\nfrom transformers import ElectraConfig, is_tf_available\nfrom transformers.testing_utils import require_tf, slow\n\nfrom ..test_configuration_common import ConfigTester\nfrom ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask\n\n\nif is_tf_available():\n import tensorflow as tf\n\n from transformers.models.electra.modeling_tf_electra import (\n TFElectraForMaskedLM,\n TFElectraForMultipleChoice,\n TFElectraForPreTraining,\n TFElectraForQuestionAnswering,\n TFElectraForSequenceClassification,\n TFElectraForTokenClassification,\n TFElectraModel,\n )\n\n\nclass TFElectraModelTester:\n def __init__(\n self,\n parent,\n ):\n self.parent = parent\n self.batch_size = 13\n self.seq_length = 7\n self.is_training = True\n self.use_input_mask = True\n self.use_token_type_ids = True\n self.use_labels = True\n self.vocab_size = 99\n self.hidden_size = 32\n self.num_hidden_layers = 5\n self.num_attention_heads = 4\n self.intermediate_size = 37\n self.hidden_act = \"gelu\"\n self.hidden_dropout_prob = 0.1\n self.attention_probs_dropout_prob = 0.1\n self.max_position_embeddings = 512\n self.type_vocab_size = 16\n self.type_sequence_label_size = 2\n self.initializer_range = 0.02\n self.num_labels = 3\n self.num_choices = 4\n self.scope = None\n self.embedding_size = 128\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)\n\n sequence_labels = None\n token_labels = None\n choice_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)\n choice_labels = ids_tensor([self.batch_size], self.num_choices)\n\n config = ElectraConfig(\n vocab_size=self.vocab_size,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n max_position_embeddings=self.max_position_embeddings,\n type_vocab_size=self.type_vocab_size,\n initializer_range=self.initializer_range,\n )\n\n return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n\n def prepare_config_and_inputs_for_decoder(self):\n (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = self.prepare_config_and_inputs()\n\n config.is_decoder = True\n encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])\n encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)\n\n return (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n\n def create_and_check_model(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = TFElectraModel(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n\n inputs = [input_ids, input_mask]\n result = model(inputs)\n\n result = model(input_ids)\n\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n\n def create_and_check_causal_lm_base_model(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.is_decoder = True\n\n model = TFElectraModel(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n\n inputs = [input_ids, input_mask]\n result = model(inputs)\n\n result = model(input_ids)\n\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n\n def create_and_check_model_as_decoder(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n ):\n config.add_cross_attention = True\n\n model = TFElectraModel(config=config)\n inputs = {\n \"input_ids\": input_ids,\n \"attention_mask\": input_mask,\n \"token_type_ids\": token_type_ids,\n \"encoder_hidden_states\": encoder_hidden_states,\n \"encoder_attention_mask\": encoder_attention_mask,\n }\n result = model(inputs)\n\n inputs = [input_ids, input_mask]\n result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states)\n\n # Also check the case where encoder outputs are not passed\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)\n\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n\n def create_and_check_causal_lm_base_model_past(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ):\n config.is_decoder = True\n\n model = TFElectraModel(config=config)\n\n # first forward pass\n outputs = model(input_ids, use_cache=True)\n outputs_use_cache_conf = model(input_ids)\n outputs_no_past = model(input_ids, use_cache=False)\n\n self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))\n self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)\n\n past_key_values = outputs.past_key_values\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # append to next input_ids and attn_mask\n next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)\n\n output_from_no_past = model(next_input_ids, output_hidden_states=True).hidden_states[0]\n output_from_past = model(\n next_tokens, past_key_values=past_key_values, output_hidden_states=True\n ).hidden_states[0]\n\n # select random slice\n random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]\n output_from_past_slice = output_from_past[:, 0, random_slice_idx]\n\n # test that outputs are equal for slice\n tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6)\n\n def create_and_check_causal_lm_base_model_past_with_attn_mask(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ):\n config.is_decoder = True\n\n model = TFElectraModel(config=config)\n\n # create attention mask\n half_seq_length = self.seq_length // 2\n attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32)\n attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32)\n attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1)\n\n # first forward pass\n outputs = model(input_ids, attention_mask=attn_mask, use_cache=True)\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n past_key_values = outputs.past_key_values\n\n # change a random masked slice from input_ids\n random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1\n random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size)\n vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change)\n condition = tf.transpose(\n tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size))\n )\n input_ids = tf.where(condition, random_other_next_tokens, input_ids)\n\n # append to next input_ids and\n next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)\n attn_mask = tf.concat(\n [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)],\n axis=1,\n )\n\n output_from_no_past = model(\n next_input_ids,\n attention_mask=attn_mask,\n output_hidden_states=True,\n ).hidden_states[0]\n output_from_past = model(\n next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_hidden_states=True\n ).hidden_states[0]\n\n # select random slice\n random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]\n output_from_past_slice = output_from_past[:, 0, random_slice_idx]\n\n # test that outputs are equal for slice\n tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6)\n\n def create_and_check_causal_lm_base_model_past_large_inputs(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ):\n config.is_decoder = True\n\n model = TFElectraModel(config=config)\n\n input_ids = input_ids[:1, :]\n input_mask = input_mask[:1, :]\n self.batch_size = 1\n\n # first forward pass\n outputs = model(input_ids, attention_mask=input_mask, use_cache=True)\n past_key_values = outputs.past_key_values\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)\n next_attn_mask = ids_tensor((self.batch_size, 3), 2)\n\n # append to next input_ids and\n next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)\n next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1)\n\n output_from_no_past = model(\n next_input_ids,\n attention_mask=next_attention_mask,\n output_hidden_states=True,\n ).hidden_states[0]\n output_from_past = model(\n next_tokens,\n attention_mask=next_attention_mask,\n past_key_values=past_key_values,\n output_hidden_states=True,\n ).hidden_states[0]\n\n self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])\n\n # select random slice\n random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))\n output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]\n output_from_past_slice = output_from_past[:, :, random_slice_idx]\n\n # test that outputs are equal for slice\n tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)\n\n def create_and_check_decoder_model_past_large_inputs(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n ):\n config.add_cross_attention = True\n\n model = TFElectraModel(config=config)\n\n input_ids = input_ids[:1, :]\n input_mask = input_mask[:1, :]\n encoder_hidden_states = encoder_hidden_states[:1, :, :]\n encoder_attention_mask = encoder_attention_mask[:1, :]\n self.batch_size = 1\n\n # first forward pass\n outputs = model(\n input_ids,\n attention_mask=input_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=True,\n )\n past_key_values = outputs.past_key_values\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)\n next_attn_mask = ids_tensor((self.batch_size, 3), 2)\n\n # append to next input_ids and\n next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)\n next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1)\n\n output_from_no_past = model(\n next_input_ids,\n attention_mask=next_attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_hidden_states=True,\n ).hidden_states[0]\n output_from_past = model(\n next_tokens,\n attention_mask=next_attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n output_hidden_states=True,\n ).hidden_states[0]\n\n self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])\n\n # select random slice\n random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))\n output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]\n output_from_past_slice = output_from_past[:, :, random_slice_idx]\n\n # test that outputs are equal for slice\n tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)\n\n def create_and_check_for_masked_lm(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = TFElectraForMaskedLM(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_for_pretraining(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = TFElectraForPreTraining(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))\n\n def create_and_check_for_sequence_classification(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.num_labels = self.num_labels\n model = TFElectraForSequenceClassification(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))\n\n def create_and_check_for_multiple_choice(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.num_choices = self.num_choices\n model = TFElectraForMultipleChoice(config=config)\n multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))\n multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))\n multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))\n inputs = {\n \"input_ids\": multiple_choice_inputs_ids,\n \"attention_mask\": multiple_choice_input_mask,\n \"token_type_ids\": multiple_choice_token_type_ids,\n }\n result = model(inputs)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))\n\n def create_and_check_for_question_answering(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = TFElectraForQuestionAnswering(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))\n self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))\n\n def create_and_check_for_token_classification(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.num_labels = self.num_labels\n model = TFElectraForTokenClassification(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = config_and_inputs\n inputs_dict = {\"input_ids\": input_ids, \"token_type_ids\": token_type_ids, \"attention_mask\": input_mask}\n return config, inputs_dict\n\n\n@require_tf\nclass TFElectraModelTest(TFModelTesterMixin, unittest.TestCase):\n\n all_model_classes = (\n (\n TFElectraModel,\n TFElectraForMaskedLM,\n TFElectraForPreTraining,\n TFElectraForTokenClassification,\n TFElectraForMultipleChoice,\n TFElectraForSequenceClassification,\n TFElectraForQuestionAnswering,\n )\n if is_tf_available()\n else ()\n )\n test_head_masking = False\n test_onnx = False\n\n def setUp(self):\n self.model_tester = TFElectraModelTester(self)\n self.config_tester = ConfigTester(self, config_class=ElectraConfig, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_model(self):\n \"\"\"Test the base model\"\"\"\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n def test_causal_lm_base_model(self):\n \"\"\"Test the base model of the causal LM model\n\n is_deocder=True, no cross_attention, no encoder outputs\n \"\"\"\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_causal_lm_base_model(*config_and_inputs)\n\n def test_model_as_decoder(self):\n \"\"\"Test the base model as a decoder (of an encoder-decoder architecture)\n\n is_deocder=True + cross_attention + pass encoder outputs\n \"\"\"\n config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()\n self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)\n\n def test_causal_lm_base_model_past(self):\n \"\"\"Test causal LM base model with `past_key_values`\"\"\"\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_causal_lm_base_model_past(*config_and_inputs)\n\n def test_causal_lm_base_model_past_with_attn_mask(self):\n \"\"\"Test the causal LM base model with `past_key_values` and `attention_mask`\"\"\"\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_causal_lm_base_model_past_with_attn_mask(*config_and_inputs)\n\n def test_causal_lm_base_model_past_with_large_inputs(self):\n \"\"\"Test the causal LM base model with `past_key_values` and a longer decoder sequence length\"\"\"\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_causal_lm_base_model_past_large_inputs(*config_and_inputs)\n\n def test_decoder_model_past_with_large_inputs(self):\n \"\"\"Similar to `test_causal_lm_base_model_past_with_large_inputs` but with cross-attention\"\"\"\n config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()\n self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)\n\n def test_for_masked_lm(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)\n\n def test_for_pretraining(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_pretraining(*config_and_inputs)\n\n def test_for_question_answering(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_question_answering(*config_and_inputs)\n\n def test_for_sequence_classification(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)\n\n def test_for_multiple_choice(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)\n\n def test_for_token_classification(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_token_classification(*config_and_inputs)\n\n @slow\n def test_model_from_pretrained(self):\n # for model_name in TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n for model_name in [\"google/electra-small-discriminator\"]:\n model = TFElectraModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n@require_tf\nclass TFElectraModelIntegrationTest(unittest.TestCase):\n @slow\n def test_inference_masked_lm(self):\n model = TFElectraForPreTraining.from_pretrained(\"lysandre/tiny-electra-random\")\n input_ids = tf.constant([[0, 1, 2, 3, 4, 5]])\n output = model(input_ids)[0]\n\n expected_shape = [1, 6]\n self.assertEqual(output.shape, expected_shape)\n\n print(output[:, :3])\n\n expected_slice = tf.constant([[-0.24651965, 0.8835437, 1.823782]])\n tf.debugging.assert_near(output[:, :3], expected_slice, atol=1e-4)\n",
"# coding=utf-8\n# Copyright 2020 Huggingface\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom transformers import is_tf_available\nfrom transformers.testing_utils import require_tf, slow\n\nfrom ..test_configuration_common import ConfigTester\nfrom ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask\n\n\nif is_tf_available():\n import numpy\n import tensorflow as tf\n\n from transformers import (\n TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,\n TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,\n TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,\n BertConfig,\n DPRConfig,\n TFDPRContextEncoder,\n TFDPRQuestionEncoder,\n TFDPRReader,\n )\n\n\nclass TFDPRModelTester:\n def __init__(\n self,\n parent,\n batch_size=13,\n seq_length=7,\n is_training=True,\n use_input_mask=True,\n use_token_type_ids=True,\n use_labels=True,\n vocab_size=99,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n type_sequence_label_size=2,\n initializer_range=0.02,\n num_labels=3,\n num_choices=4,\n scope=None,\n projection_dim=0,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_input_mask = use_input_mask\n self.use_token_type_ids = use_token_type_ids\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.num_choices = num_choices\n self.scope = scope\n self.projection_dim = projection_dim\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n # follow test_modeling_tf_ctrl.py\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)\n\n sequence_labels = None\n token_labels = None\n choice_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)\n choice_labels = ids_tensor([self.batch_size], self.num_choices)\n\n config = BertConfig(\n vocab_size=self.vocab_size,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n max_position_embeddings=self.max_position_embeddings,\n type_vocab_size=self.type_vocab_size,\n is_decoder=False,\n initializer_range=self.initializer_range,\n )\n config = DPRConfig(projection_dim=self.projection_dim, **config.to_dict())\n\n return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n\n def create_and_check_dpr_context_encoder(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = TFDPRContextEncoder(config=config)\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)\n result = model(input_ids, token_type_ids=token_type_ids)\n result = model(input_ids)\n self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size))\n\n def create_and_check_dpr_question_encoder(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = TFDPRQuestionEncoder(config=config)\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)\n result = model(input_ids, token_type_ids=token_type_ids)\n result = model(input_ids)\n self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size))\n\n def create_and_check_dpr_reader(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = TFDPRReader(config=config)\n result = model(input_ids, attention_mask=input_mask)\n\n self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))\n self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))\n self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = config_and_inputs\n inputs_dict = {\"input_ids\": input_ids}\n return config, inputs_dict\n\n\n@require_tf\nclass TFDPRModelTest(TFModelTesterMixin, unittest.TestCase):\n\n all_model_classes = (\n (\n TFDPRContextEncoder,\n TFDPRQuestionEncoder,\n TFDPRReader,\n )\n if is_tf_available()\n else ()\n )\n\n test_resize_embeddings = False\n test_missing_keys = False\n test_pruning = False\n test_head_masking = False\n test_onnx = False\n\n def setUp(self):\n self.model_tester = TFDPRModelTester(self)\n self.config_tester = ConfigTester(self, config_class=DPRConfig, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_dpr_context_encoder_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_dpr_context_encoder(*config_and_inputs)\n\n def test_dpr_question_encoder_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_dpr_question_encoder(*config_and_inputs)\n\n def test_dpr_reader_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_dpr_reader(*config_and_inputs)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = TFDPRContextEncoder.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = TFDPRContextEncoder.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = TFDPRQuestionEncoder.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = TFDPRReader.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n@require_tf\nclass TFDPRModelIntegrationTest(unittest.TestCase):\n @slow\n def test_inference_no_head(self):\n model = TFDPRQuestionEncoder.from_pretrained(\"facebook/dpr-question_encoder-single-nq-base\")\n\n input_ids = tf.constant(\n [[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]]\n ) # [CLS] hello, is my dog cute? [SEP]\n output = model(input_ids)[0] # embedding shape = (1, 768)\n # compare the actual values for a slice.\n expected_slice = tf.constant(\n [\n [\n 0.03236253,\n 0.12753335,\n 0.16818509,\n 0.00279786,\n 0.3896933,\n 0.24264945,\n 0.2178971,\n -0.02335227,\n -0.08481959,\n -0.14324117,\n ]\n ]\n )\n self.assertTrue(numpy.allclose(output[:, :10].numpy(), expected_slice.numpy(), atol=1e-4))\n",
"#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning a 🤗 Transformers model for question answering using 🤗 Accelerate.\n\"\"\"\n# You can also adapt this script on your own question answering task. Pointers for this are left as comments.\n\nimport argparse\nimport logging\nimport math\nimport os\nimport random\nfrom pathlib import Path\n\nimport datasets\nimport numpy as np\nimport torch\nfrom datasets import load_dataset, load_metric\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm\n\nimport transformers\nfrom accelerate import Accelerator\nfrom huggingface_hub import Repository\nfrom transformers import (\n CONFIG_MAPPING,\n MODEL_MAPPING,\n AdamW,\n AutoConfig,\n AutoModelForQuestionAnswering,\n AutoTokenizer,\n DataCollatorWithPadding,\n EvalPrediction,\n SchedulerType,\n default_data_collator,\n get_scheduler,\n set_seed,\n)\nfrom transformers.utils import check_min_version, get_full_repo_name\nfrom transformers.utils.versions import require_version\nfrom utils_qa import postprocess_qa_predictions\n\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.19.0.dev0\")\n\nrequire_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/question-answering/requirements.txt\")\n\nlogger = logging.getLogger(__name__)\n# You should update this to your particular problem to have better documentation of `model_type`\nMODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a Question Answering task\")\n parser.add_argument(\n \"--dataset_name\",\n type=str,\n default=None,\n help=\"The name of the dataset to use (via the datasets library).\",\n )\n parser.add_argument(\n \"--dataset_config_name\",\n type=str,\n default=None,\n help=\"The configuration name of the dataset to use (via the datasets library).\",\n )\n parser.add_argument(\n \"--train_file\", type=str, default=None, help=\"A csv or a json file containing the training data.\"\n )\n parser.add_argument(\n \"--preprocessing_num_workers\", type=int, default=4, help=\"A csv or a json file containing the training data.\"\n )\n parser.add_argument(\"--do_predict\", action=\"store_true\", help=\"To do prediction on the question answering model\")\n parser.add_argument(\n \"--validation_file\", type=str, default=None, help=\"A csv or a json file containing the validation data.\"\n )\n parser.add_argument(\n \"--test_file\", type=str, default=None, help=\"A csv or a json file containing the Prediction data.\"\n )\n parser.add_argument(\n \"--max_seq_length\",\n type=int,\n default=384,\n help=\"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,\"\n \" sequences shorter will be padded if `--pad_to_max_lengh` is passed.\",\n )\n parser.add_argument(\n \"--pad_to_max_length\",\n action=\"store_true\",\n help=\"If passed, pad all samples to `max_seq_length`. Otherwise, dynamic padding is used.\",\n )\n parser.add_argument(\n \"--model_name_or_path\",\n type=str,\n help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n required=True,\n )\n parser.add_argument(\n \"--config_name\",\n type=str,\n default=None,\n help=\"Pretrained config name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n type=str,\n default=None,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--use_slow_tokenizer\",\n action=\"store_true\",\n help=\"If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).\",\n )\n parser.add_argument(\n \"--per_device_train_batch_size\",\n type=int,\n default=8,\n help=\"Batch size (per device) for the training dataloader.\",\n )\n parser.add_argument(\n \"--per_device_eval_batch_size\",\n type=int,\n default=8,\n help=\"Batch size (per device) for the evaluation dataloader.\",\n )\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=5e-5,\n help=\"Initial learning rate (after the potential warmup period) to use.\",\n )\n parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\n parser.add_argument(\"--num_train_epochs\", type=int, default=3, help=\"Total number of training epochs to perform.\")\n parser.add_argument(\n \"--max_train_steps\",\n type=int,\n default=None,\n help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\n \"--lr_scheduler_type\",\n type=SchedulerType,\n default=\"linear\",\n help=\"The scheduler type to use.\",\n choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\n )\n parser.add_argument(\n \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\n )\n parser.add_argument(\"--output_dir\", type=str, default=None, help=\"Where to store the final model.\")\n parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n parser.add_argument(\n \"--doc_stride\",\n type=int,\n default=128,\n help=\"When splitting up a long document into chunks how much stride to take between chunks.\",\n )\n parser.add_argument(\n \"--n_best_size\",\n type=int,\n default=20,\n help=\"The total number of n-best predictions to generate when looking for an answer.\",\n )\n parser.add_argument(\n \"--null_score_diff_threshold\",\n type=float,\n default=0.0,\n help=\"The threshold used to select the null answer: if the best answer has a score that is less than \"\n \"the score of the null answer minus this threshold, the null answer is selected for this example. \"\n \"Only useful when `version_2_with_negative=True`.\",\n )\n parser.add_argument(\n \"--version_2_with_negative\",\n type=bool,\n default=False,\n help=\"If true, some of the examples do not have an answer.\",\n )\n parser.add_argument(\n \"--max_answer_length\",\n type=int,\n default=30,\n help=\"The maximum length of an answer that can be generated. This is needed because the start \"\n \"and end predictions are not conditioned on one another.\",\n )\n parser.add_argument(\n \"--max_train_samples\",\n type=int,\n default=None,\n help=\"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\",\n )\n parser.add_argument(\n \"--max_eval_samples\",\n type=int,\n default=None,\n help=\"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\",\n )\n parser.add_argument(\n \"--overwrite_cache\", type=bool, default=False, help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\n \"--max_predict_samples\",\n type=int,\n default=None,\n help=\"For debugging purposes or quicker training, truncate the number of prediction examples to this\",\n )\n parser.add_argument(\n \"--model_type\",\n type=str,\n default=None,\n help=\"Model type to use if training from scratch.\",\n choices=MODEL_TYPES,\n )\n parser.add_argument(\"--push_to_hub\", action=\"store_true\", help=\"Whether or not to push the model to the Hub.\")\n parser.add_argument(\n \"--hub_model_id\", type=str, help=\"The name of the repository to keep in sync with the local `output_dir`.\"\n )\n parser.add_argument(\"--hub_token\", type=str, help=\"The token to use to push to the Model Hub.\")\n parser.add_argument(\n \"--checkpointing_steps\",\n type=str,\n default=None,\n help=\"Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.\",\n )\n parser.add_argument(\n \"--resume_from_checkpoint\",\n type=str,\n default=None,\n help=\"If the training should continue from a checkpoint folder.\",\n )\n parser.add_argument(\n \"--with_tracking\",\n required=False,\n help=\"Whether to load in all available experiment trackers from the environment and use them for logging.\",\n )\n args = parser.parse_args()\n\n # Sanity checks\n if (\n args.dataset_name is None\n and args.train_file is None\n and args.validation_file is None\n and args.test_file is None\n ):\n raise ValueError(\"Need either a dataset name or a training/validation/test file.\")\n else:\n if args.train_file is not None:\n extension = args.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if args.validation_file is not None:\n extension = args.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n if args.test_file is not None:\n extension = args.test_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`test_file` should be a csv or a json file.\"\n\n if args.push_to_hub:\n assert args.output_dir is not None, \"Need an `output_dir` to create a repo when `--push_to_hub` is passed.\"\n\n return args\n\n\ndef main():\n args = parse_args()\n\n # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n # If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment\n accelerator = Accelerator(log_with=\"all\") if args.with_tracking else Accelerator()\n # Make one log on every process with the configuration for debugging.\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n logger.info(accelerator.state)\n\n # Setup logging, we only want one process per machine to log things on the screen.\n # accelerator.is_local_main_process is only True for one process per machine.\n logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)\n if accelerator.is_local_main_process:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n\n # If passed along, set the training seed now.\n if args.seed is not None:\n set_seed(args.seed)\n\n # Handle the repository creation\n if accelerator.is_main_process:\n if args.push_to_hub:\n if args.hub_model_id is None:\n repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)\n else:\n repo_name = args.hub_model_id\n repo = Repository(args.output_dir, clone_from=repo_name)\n elif args.output_dir is not None:\n os.makedirs(args.output_dir, exist_ok=True)\n accelerator.wait_for_everyone()\n\n # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n # 'text' is found. You can easily tweak this behavior (see below).\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if args.dataset_name is not None:\n # Downloading and loading a dataset from the hub.\n raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)\n else:\n data_files = {}\n if args.train_file is not None:\n data_files[\"train\"] = args.train_file\n if args.validation_file is not None:\n data_files[\"validation\"] = args.validation_file\n if args.test_file is not None:\n data_files[\"test\"] = args.test_file\n extension = args.train_file.split(\".\")[-1]\n raw_datasets = load_dataset(extension, data_files=data_files, field=\"data\")\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Load pretrained model and tokenizer\n #\n # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n\n if args.config_name:\n config = AutoConfig.from_pretrained(args.config_name)\n elif args.model_name_or_path:\n config = AutoConfig.from_pretrained(args.model_name_or_path)\n else:\n config = CONFIG_MAPPING[args.model_type]()\n logger.warning(\"You are instantiating a new config instance from scratch.\")\n\n if args.tokenizer_name:\n tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)\n elif args.model_name_or_path:\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=True)\n else:\n raise ValueError(\n \"You are instantiating a new tokenizer from scratch. This is not supported by this script.\"\n \"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\n )\n\n if args.model_name_or_path:\n model = AutoModelForQuestionAnswering.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n )\n else:\n logger.info(\"Training new model from scratch\")\n model = AutoModelForQuestionAnswering.from_config(config)\n\n # Preprocessing the datasets.\n # Preprocessing is slighlty different for training and evaluation.\n\n column_names = raw_datasets[\"train\"].column_names\n\n question_column_name = \"question\" if \"question\" in column_names else column_names[0]\n context_column_name = \"context\" if \"context\" in column_names else column_names[1]\n answer_column_name = \"answers\" if \"answers\" in column_names else column_names[2]\n\n # Padding side determines if we do (question|context) or (context|question).\n pad_on_right = tokenizer.padding_side == \"right\"\n\n if args.max_seq_length > tokenizer.model_max_length:\n logger.warning(\n f\"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the\"\n f\"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.\"\n )\n\n max_seq_length = min(args.max_seq_length, tokenizer.model_max_length)\n\n # Training preprocessing\n def prepare_train_features(examples):\n # Some of the questions have lots of whitespace on the left, which is not useful and will make the\n # truncation of the context fail (the tokenized question will take a lots of space). So we remove that\n # left whitespace\n examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]\n\n # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results\n # in one example possible giving several features when a context is long, each of those features having a\n # context that overlaps a bit the context of the previous feature.\n tokenized_examples = tokenizer(\n examples[question_column_name if pad_on_right else context_column_name],\n examples[context_column_name if pad_on_right else question_column_name],\n truncation=\"only_second\" if pad_on_right else \"only_first\",\n max_length=max_seq_length,\n stride=args.doc_stride,\n return_overflowing_tokens=True,\n return_offsets_mapping=True,\n padding=\"max_length\" if args.pad_to_max_length else False,\n )\n\n # Since one example might give us several features if it has a long context, we need a map from a feature to\n # its corresponding example. This key gives us just that.\n sample_mapping = tokenized_examples.pop(\"overflow_to_sample_mapping\")\n # The offset mappings will give us a map from token to character position in the original context. This will\n # help us compute the start_positions and end_positions.\n offset_mapping = tokenized_examples.pop(\"offset_mapping\")\n\n # Let's label those examples!\n tokenized_examples[\"start_positions\"] = []\n tokenized_examples[\"end_positions\"] = []\n\n for i, offsets in enumerate(offset_mapping):\n # We will label impossible answers with the index of the CLS token.\n input_ids = tokenized_examples[\"input_ids\"][i]\n cls_index = input_ids.index(tokenizer.cls_token_id)\n\n # Grab the sequence corresponding to that example (to know what is the context and what is the question).\n sequence_ids = tokenized_examples.sequence_ids(i)\n\n # One example can give several spans, this is the index of the example containing this span of text.\n sample_index = sample_mapping[i]\n answers = examples[answer_column_name][sample_index]\n # If no answers are given, set the cls_index as answer.\n if len(answers[\"answer_start\"]) == 0:\n tokenized_examples[\"start_positions\"].append(cls_index)\n tokenized_examples[\"end_positions\"].append(cls_index)\n else:\n # Start/end character index of the answer in the text.\n start_char = answers[\"answer_start\"][0]\n end_char = start_char + len(answers[\"text\"][0])\n\n # Start token index of the current span in the text.\n token_start_index = 0\n while sequence_ids[token_start_index] != (1 if pad_on_right else 0):\n token_start_index += 1\n\n # End token index of the current span in the text.\n token_end_index = len(input_ids) - 1\n while sequence_ids[token_end_index] != (1 if pad_on_right else 0):\n token_end_index -= 1\n\n # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).\n if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):\n tokenized_examples[\"start_positions\"].append(cls_index)\n tokenized_examples[\"end_positions\"].append(cls_index)\n else:\n # Otherwise move the token_start_index and token_end_index to the two ends of the answer.\n # Note: we could go after the last offset if the answer is the last word (edge case).\n while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:\n token_start_index += 1\n tokenized_examples[\"start_positions\"].append(token_start_index - 1)\n while offsets[token_end_index][1] >= end_char:\n token_end_index -= 1\n tokenized_examples[\"end_positions\"].append(token_end_index + 1)\n\n return tokenized_examples\n\n if \"train\" not in raw_datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n train_dataset = raw_datasets[\"train\"]\n if args.max_train_samples is not None:\n # We will select sample from whole data if agument is specified\n train_dataset = train_dataset.select(range(args.max_train_samples))\n\n # Create train feature from dataset\n with accelerator.main_process_first():\n train_dataset = train_dataset.map(\n prepare_train_features,\n batched=True,\n num_proc=args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not args.overwrite_cache,\n desc=\"Running tokenizer on train dataset\",\n )\n if args.max_train_samples is not None:\n # Number of samples might increase during Feature Creation, We select only specified max samples\n train_dataset = train_dataset.select(range(args.max_train_samples))\n\n # Validation preprocessing\n def prepare_validation_features(examples):\n # Some of the questions have lots of whitespace on the left, which is not useful and will make the\n # truncation of the context fail (the tokenized question will take a lots of space). So we remove that\n # left whitespace\n examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]\n\n # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results\n # in one example possible giving several features when a context is long, each of those features having a\n # context that overlaps a bit the context of the previous feature.\n tokenized_examples = tokenizer(\n examples[question_column_name if pad_on_right else context_column_name],\n examples[context_column_name if pad_on_right else question_column_name],\n truncation=\"only_second\" if pad_on_right else \"only_first\",\n max_length=max_seq_length,\n stride=args.doc_stride,\n return_overflowing_tokens=True,\n return_offsets_mapping=True,\n padding=\"max_length\" if args.pad_to_max_length else False,\n )\n\n # Since one example might give us several features if it has a long context, we need a map from a feature to\n # its corresponding example. This key gives us just that.\n sample_mapping = tokenized_examples.pop(\"overflow_to_sample_mapping\")\n\n # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the\n # corresponding example_id and we will store the offset mappings.\n tokenized_examples[\"example_id\"] = []\n\n for i in range(len(tokenized_examples[\"input_ids\"])):\n # Grab the sequence corresponding to that example (to know what is the context and what is the question).\n sequence_ids = tokenized_examples.sequence_ids(i)\n context_index = 1 if pad_on_right else 0\n\n # One example can give several spans, this is the index of the example containing this span of text.\n sample_index = sample_mapping[i]\n tokenized_examples[\"example_id\"].append(examples[\"id\"][sample_index])\n\n # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token\n # position is part of the context or not.\n tokenized_examples[\"offset_mapping\"][i] = [\n (o if sequence_ids[k] == context_index else None)\n for k, o in enumerate(tokenized_examples[\"offset_mapping\"][i])\n ]\n\n return tokenized_examples\n\n if \"validation\" not in raw_datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_examples = raw_datasets[\"validation\"]\n if args.max_eval_samples is not None:\n # We will select sample from whole data\n eval_examples = eval_examples.select(range(args.max_eval_samples))\n # Validation Feature Creation\n with accelerator.main_process_first():\n eval_dataset = eval_examples.map(\n prepare_validation_features,\n batched=True,\n num_proc=args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not args.overwrite_cache,\n desc=\"Running tokenizer on validation dataset\",\n )\n\n if args.max_eval_samples is not None:\n # During Feature creation dataset samples might increase, we will select required samples again\n eval_dataset = eval_dataset.select(range(args.max_eval_samples))\n\n if args.do_predict:\n if \"test\" not in raw_datasets:\n raise ValueError(\"--do_predict requires a test dataset\")\n predict_examples = raw_datasets[\"test\"]\n if args.max_predict_samples is not None:\n # We will select sample from whole data\n predict_examples = predict_examples.select(range(args.max_predict_samples))\n # Predict Feature Creation\n with accelerator.main_process_first():\n predict_dataset = predict_examples.map(\n prepare_validation_features,\n batched=True,\n num_proc=args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not args.overwrite_cache,\n desc=\"Running tokenizer on prediction dataset\",\n )\n if args.max_predict_samples is not None:\n # During Feature creation dataset samples might increase, we will select required samples again\n predict_dataset = predict_dataset.select(range(args.max_predict_samples))\n\n # Log a few random samples from the training set:\n for index in random.sample(range(len(train_dataset)), 3):\n logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n # DataLoaders creation:\n if args.pad_to_max_length:\n # If padding was already done ot max length, we use the default data collator that will just convert everything\n # to tensors.\n data_collator = default_data_collator\n else:\n # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of\n # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple\n # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).\n data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))\n\n train_dataloader = DataLoader(\n train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size\n )\n\n eval_dataset_for_model = eval_dataset.remove_columns([\"example_id\", \"offset_mapping\"])\n eval_dataloader = DataLoader(\n eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size\n )\n\n if args.do_predict:\n predict_dataset_for_model = predict_dataset.remove_columns([\"example_id\", \"offset_mapping\"])\n predict_dataloader = DataLoader(\n predict_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size\n )\n\n # Post-processing:\n def post_processing_function(examples, features, predictions, stage=\"eval\"):\n # Post-processing: we match the start logits and end logits to answers in the original context.\n predictions = postprocess_qa_predictions(\n examples=examples,\n features=features,\n predictions=predictions,\n version_2_with_negative=args.version_2_with_negative,\n n_best_size=args.n_best_size,\n max_answer_length=args.max_answer_length,\n null_score_diff_threshold=args.null_score_diff_threshold,\n output_dir=args.output_dir,\n prefix=stage,\n )\n # Format the result to the format the metric expects.\n if args.version_2_with_negative:\n formatted_predictions = [\n {\"id\": k, \"prediction_text\": v, \"no_answer_probability\": 0.0} for k, v in predictions.items()\n ]\n else:\n formatted_predictions = [{\"id\": k, \"prediction_text\": v} for k, v in predictions.items()]\n\n references = [{\"id\": ex[\"id\"], \"answers\": ex[answer_column_name]} for ex in examples]\n return EvalPrediction(predictions=formatted_predictions, label_ids=references)\n\n metric = load_metric(\"squad_v2\" if args.version_2_with_negative else \"squad\")\n\n # Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor\n def create_and_fill_np_array(start_or_end_logits, dataset, max_len):\n \"\"\"\n Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor\n\n Args:\n start_or_end_logits(:obj:`tensor`):\n This is the output predictions of the model. We can only enter either start or end logits.\n eval_dataset: Evaluation dataset\n max_len(:obj:`int`):\n The maximum length of the output tensor. ( See the model.eval() part for more details )\n \"\"\"\n\n step = 0\n # create a numpy array and fill it with -100.\n logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64)\n # Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather\n for i, output_logit in enumerate(start_or_end_logits): # populate columns\n # We have to fill it such that we have to take the whole tensor and replace it on the newly created array\n # And after every iteration we have to change the step\n\n batch_size = output_logit.shape[0]\n cols = output_logit.shape[1]\n\n if step + batch_size < len(dataset):\n logits_concat[step : step + batch_size, :cols] = output_logit\n else:\n logits_concat[step:, :cols] = output_logit[: len(dataset) - step]\n\n step += batch_size\n\n return logits_concat\n\n # Optimizer\n # Split weights in two groups, one with weight decay and the other not.\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n\n # Scheduler and math around the number of training steps.\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n if args.max_train_steps is None:\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n else:\n args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n lr_scheduler = get_scheduler(\n name=args.lr_scheduler_type,\n optimizer=optimizer,\n num_warmup_steps=args.num_warmup_steps,\n num_training_steps=args.max_train_steps,\n )\n\n # Prepare everything with our `accelerator`.\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n\n # Figure out how many steps we should save the Accelerator states\n if hasattr(args.checkpointing_steps, \"isdigit\"):\n checkpointing_steps = args.checkpointing_steps\n if args.checkpointing_steps.isdigit():\n checkpointing_steps = int(args.checkpointing_steps)\n else:\n checkpointing_steps = None\n\n # We need to initialize the trackers we use, and also store our configuration\n if args.with_tracking:\n accelerator.init_trackers(\"clm_no_trainer\", args)\n\n # Train!\n total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n\n # Only show the progress bar once on each machine.\n progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)\n completed_steps = 0\n\n # Potentially load in the weights and states from a previous save\n if args.resume_from_checkpoint:\n if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != \"\":\n accelerator.print(f\"Resumed from checkpoint: {args.resume_from_checkpoint}\")\n accelerator.load_state(args.resume_from_checkpoint)\n resume_step = None\n path = args.resume_from_checkpoint\n else:\n # Get the most recent checkpoint\n dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]\n dirs.sort(key=os.path.getctime)\n path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last\n if \"epoch\" in path:\n args.num_train_epochs -= int(path.replace(\"epoch_\", \"\"))\n else:\n resume_step = int(path.replace(\"step_\", \"\"))\n args.num_train_epochs -= resume_step // len(train_dataloader)\n resume_step = (args.num_train_epochs * len(train_dataloader)) - resume_step\n\n for epoch in range(args.num_train_epochs):\n model.train()\n if args.with_tracking:\n total_loss = 0\n for step, batch in enumerate(train_dataloader):\n # We need to skip steps until we reach the resumed step\n if args.resume_from_checkpoint and epoch == 0 and step < resume_step:\n continue\n outputs = model(**batch)\n loss = outputs.loss\n # We keep track of the loss at each epoch\n if args.with_tracking:\n total_loss += loss.detach().float()\n loss = loss / args.gradient_accumulation_steps\n accelerator.backward(loss)\n if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar.update(1)\n completed_steps += 1\n\n if isinstance(checkpointing_steps, int):\n if completed_steps % checkpointing_steps == 0:\n accelerator.save_state(f\"step_{completed_steps}\")\n\n if completed_steps >= args.max_train_steps:\n break\n\n if args.push_to_hub and epoch < args.num_train_epochs - 1:\n accelerator.wait_for_everyone()\n unwrapped_model = accelerator.unwrap_model(model)\n unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)\n if accelerator.is_main_process:\n tokenizer.save_pretrained(args.output_dir)\n repo.push_to_hub(\n commit_message=f\"Training in progress epoch {epoch}\", blocking=False, auto_lfs_prune=True\n )\n\n # Evaluation\n logger.info(\"***** Running Evaluation *****\")\n logger.info(f\" Num examples = {len(eval_dataset)}\")\n logger.info(f\" Batch size = {args.per_device_eval_batch_size}\")\n\n all_start_logits = []\n all_end_logits = []\n for step, batch in enumerate(eval_dataloader):\n with torch.no_grad():\n outputs = model(**batch)\n start_logits = outputs.start_logits\n end_logits = outputs.end_logits\n\n if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered\n start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)\n end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)\n\n all_start_logits.append(accelerator.gather(start_logits).cpu().numpy())\n all_end_logits.append(accelerator.gather(end_logits).cpu().numpy())\n\n max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor\n\n # concatenate the numpy array\n start_logits_concat = create_and_fill_np_array(all_start_logits, eval_dataset, max_len)\n end_logits_concat = create_and_fill_np_array(all_end_logits, eval_dataset, max_len)\n\n # delete the list of numpy arrays\n del all_start_logits\n del all_end_logits\n\n outputs_numpy = (start_logits_concat, end_logits_concat)\n prediction = post_processing_function(eval_examples, eval_dataset, outputs_numpy)\n eval_metric = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)\n logger.info(f\"Evaluation metrics: {eval_metric}\")\n\n # Prediction\n if args.do_predict:\n logger.info(\"***** Running Prediction *****\")\n logger.info(f\" Num examples = {len(predict_dataset)}\")\n logger.info(f\" Batch size = {args.per_device_eval_batch_size}\")\n\n all_start_logits = []\n all_end_logits = []\n for step, batch in enumerate(predict_dataloader):\n with torch.no_grad():\n outputs = model(**batch)\n start_logits = outputs.start_logits\n end_logits = outputs.end_logits\n\n if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered\n start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)\n end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)\n\n all_start_logits.append(accelerator.gather(start_logits).cpu().numpy())\n all_end_logits.append(accelerator.gather(end_logits).cpu().numpy())\n\n max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor\n # concatenate the numpy array\n start_logits_concat = create_and_fill_np_array(all_start_logits, predict_dataset, max_len)\n end_logits_concat = create_and_fill_np_array(all_end_logits, predict_dataset, max_len)\n\n # delete the list of numpy arrays\n del all_start_logits\n del all_end_logits\n\n outputs_numpy = (start_logits_concat, end_logits_concat)\n prediction = post_processing_function(predict_examples, predict_dataset, outputs_numpy)\n predict_metric = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)\n logger.info(f\"Predict metrics: {predict_metric}\")\n\n if args.with_tracking:\n log = {\n \"squad_v2\" if args.version_2_with_negative else \"squad\": eval_metric,\n \"train_loss\": total_loss,\n \"epoch\": epoch,\n }\n if args.do_predict:\n log[\"squad_v2_predict\" if args.version_2_with_negative else \"squad_predict\"] = predict_metric\n\n accelerator.log(log, step=completed_steps)\n\n if args.checkpointing_steps == \"epoch\":\n accelerator.save_state(f\"epoch_{epoch}\")\n\n if args.output_dir is not None:\n accelerator.wait_for_everyone()\n unwrapped_model = accelerator.unwrap_model(model)\n unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)\n if accelerator.is_main_process:\n tokenizer.save_pretrained(args.output_dir)\n if args.push_to_hub:\n repo.push_to_hub(commit_message=\"End of training\", auto_lfs_prune=True)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.expand_dims"
],
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.cast",
"tensorflow.gather",
"tensorflow.name_scope",
"tensorflow.tile",
"tensorflow.matmul",
"tensorflow.fill",
"tensorflow.executing_eagerly",
"tensorflow.math.rsqrt",
"tensorflow.not_equal",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.keras.layers.Dropout"
],
[
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.zeros",
"tensorflow.ones",
"tensorflow.expand_dims",
"tensorflow.debugging.assert_near",
"tensorflow.where"
],
[
"tensorflow.constant"
],
[
"torch.no_grad",
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.4",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nihalsid/texture_fields | [
"dcd091a5f40fe433dbc47f2055d1cd2d3d2a1b87",
"dcd091a5f40fe433dbc47f2055d1cd2d3d2a1b87"
] | [
"scripts/sample_mesh.py",
"mesh2tex/texnet/generation.py"
] | [
"import argparse\nimport trimesh\nimport numpy as np\nimport os\nimport glob\nimport sys\nfrom multiprocessing import Pool\nfrom functools import partial\n# TODO: do this better\nsys.path.append('..')\n\nparser = argparse.ArgumentParser('Sample a watertight mesh.')\nparser.add_argument('in_folder', type=str,\n help='Path to input watertight meshes.')\nparser.add_argument('--ext', type=str, default=\"off\",\n help='Mesh extension')\nparser.add_argument('--n_proc', type=int, default=0,\n help='Number of processes to use.')\n\nparser.add_argument('--resize', action='store_true',\n help='When active, resizes the mesh to bounding box.')\n\nparser.add_argument('--rotate_xz', type=float, default=0.,\n help='Angle to rotate around y axis.')\n\nparser.add_argument('--bbox_padding', type=float, default=0.,\n help='Padding for bounding box')\nparser.add_argument('--bbox_in_folder', type=str,\n help='Path to other input folder to extract'\n 'bounding boxes.')\n\nparser.add_argument('--pointcloud_folder', type=str,\n help='Output path for point cloud.')\nparser.add_argument('--pointcloud_size', type=int, default=100000,\n help='Size of point cloud.')\n\nparser.add_argument('--voxels_folder', type=str,\n help='Output path for voxelization.')\nparser.add_argument('--voxels_res', type=int, default=32,\n help='Resolution for voxelization.')\n\nparser.add_argument('--points_folder', type=str,\n help='Output path for points.')\nparser.add_argument('--points_size', type=int, default=100000,\n help='Size of points.')\nparser.add_argument('--points_uniform_ratio', type=float, default=1.,\n help='Ratio of points to sample uniformly'\n 'in bounding box.')\nparser.add_argument('--points_sigma', type=float, default=0.01,\n help='Standard deviation of gaussian noise added to points'\n 'samples on the surfaces.')\nparser.add_argument('--points_padding', type=float, default=0.1,\n help='Additional padding applied to the uniformly'\n 'sampled points on both sides (in total).')\n\nparser.add_argument('--mesh_folder', type=str,\n help='Output path for mesh.')\n\nparser.add_argument('--overwrite', action='store_true',\n help='Whether to overwrite output.')\nparser.add_argument('--float16', action='store_true',\n help='Whether to use half precision.')\nparser.add_argument('--packbits', action='store_true',\n help='Whether to save truth values as bit array.')\nparser.add_argument('--fixed_bbox', action='store_true',\n help='96x96x96 bbox')\n\n \ndef main(args):\n input_files = glob.glob(os.path.join(args.in_folder, \"*\"))\n if args.n_proc != 0:\n with Pool(args.n_proc) as p:\n p.map(partial(process_path, args=args), input_files)\n else:\n for p in input_files:\n process_path(p, args)\n \n\ndef process_path(in_path, args):\n modelname = os.path.basename(in_path)\n in_path = os.path.join(in_path, \"model_c.obj\")\n mesh = trimesh.load(in_path, process=False)\n\n # Determine bounding box\n if not args.resize:\n # Standard bounding boux\n loc = np.zeros(3)\n scale = 1.\n else:\n if args.bbox_in_folder is not None:\n in_path_tmp = os.path.join(args.bbox_in_folder, modelname + '.off')\n mesh_tmp = trimesh.load(in_path_tmp, process=False)\n bbox = mesh_tmp.bounding_box.bounds\n elif args.fixed_bbox:\n bbox = np.array([[0, 0, 0], [96, 96, 96]], dtype=np.float32)\n else:\n bbox = mesh.bounding_box.bounds\n\n # Compute location and scale\n loc = (bbox[0] + bbox[1]) / 2\n scale = (bbox[1] - bbox[0]).max() / (1 - args.bbox_padding)\n\n # Transform input mesh\n mesh.apply_translation(-loc)\n mesh.apply_scale(1 / scale)\n\n if args.rotate_xz != 0:\n angle = args.rotate_xz / 180 * np.pi\n R = trimesh.transformations.rotation_matrix(angle, [0, 1, 0])\n mesh.apply_transform(R)\n\n # Expert various modalities\n if args.pointcloud_folder is not None:\n export_pointcloud(mesh, modelname, loc, scale, args)\n\n if args.voxels_folder is not None:\n export_voxels(mesh, modelname, loc, scale, args)\n\n if args.points_folder is not None:\n export_points(mesh, modelname, loc, scale, args)\n\n if args.mesh_folder is not None:\n export_mesh(mesh, modelname, loc, scale, args)\n\n\ndef export_pointcloud(mesh, modelname, loc, scale, args):\n filename = os.path.join(args.pointcloud_folder,\n modelname, 'pointcloud.npz')\n if not args.overwrite and os.path.exists(filename):\n print('Pointcloud already exist: %s' % filename)\n return\n elif not os.path.exists(os.path.join(args.pointcloud_folder, modelname)):\n print(f\"folder for {modelname} doesnt exist.. skipping\")\n return\n\n points, face_idx = mesh.sample(args.pointcloud_size, return_index=True)\n normals = mesh.face_normals[face_idx]\n\n # Compress\n if args.float16:\n dtype = np.float16\n else:\n dtype = np.float32\n\n points = points.astype(dtype)\n normals = normals.astype(dtype)\n\n print('Writing pointcloud: %s' % filename)\n np.savez(filename, points=points, normals=normals, loc=loc, scale=scale)\n\n\ndef export_mesh(mesh, modelname, loc, scale, args):\n filename = os.path.join(args.mesh_folder, modelname + '.off') \n if not args.overwrite and os.path.exists(filename):\n print('Mesh already exist: %s' % filename)\n return\n print('Writing mesh: %s' % filename)\n mesh.export(filename)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n main(args)\n",
"import torch\nimport numpy as np\nimport os\nfrom trimesh.util import array_to_string\nfrom mesh2tex import geometry\nfrom torchvision.utils import save_image\nfrom torch.nn.functional import interpolate\n\n#TODO comment the generation functions\n\n\nclass Generator3D(object):\n def __init__(self, model, device=None):\n\n self.model = model\n self.device = device\n\n def save_mesh(self, mesh, out_file, digits=10):\n '''\n Saving meshes to OFF file\n\n '''\n digits = int(digits)\n # prepend a 3 (face count) to each face\n if mesh.visual.face_colors is None:\n faces_stacked = np.column_stack((\n np.ones(len(mesh.faces)) * 3,\n mesh.faces)).astype(np.int64)\n else:\n assert(mesh.visual.face_colors.shape[0] == mesh.faces.shape[0])\n faces_stacked = np.column_stack((\n np.ones(len(mesh.faces)) * 3,\n mesh.faces, mesh.visual.face_colors[:, :3])).astype(np.int64)\n export = 'OFF\\n'\n # the header is vertex count, face count, edge number\n export += str(len(mesh.vertices)) + ' ' + str(len(mesh.faces)) + ' 0\\n'\n export += array_to_string(\n mesh.vertices, col_delim=' ', row_delim='\\n', digits=digits) + '\\n'\n export += array_to_string(faces_stacked, col_delim=' ', row_delim='\\n')\n\n with open(out_file, 'w') as f:\n f.write(export)\n\n return mesh\n\n def generate_images_4eval_condi(self, batch, out_dir, model_names):\n '''\n Generate textures in the conditional setting (given image)\n\n '''\n\n # Extract depth, gt, camera info, shape pc and condition\n depth = batch['2d.depth'].to(self.device)\n img_real = batch['2d.img'].to(self.device)\n cam_K = batch['2d.camera_mat'].to(self.device)\n cam_W = batch['2d.world_mat'].to(self.device)\n mesh_repr = geometry.get_representation(batch, self.device)\n mesh_points = mesh_repr['points']\n mesh_normals = mesh_repr['normals']\n condition = batch['condition'].to(self.device)\n\n # Determine constants and check\n batch_size = depth.size(0)\n num_views = depth.size(1)\n\n # Define Output folders\n out_dir_real = out_dir + \"/real/\"\n out_dir_fake = out_dir + \"/fake/\"\n out_dir_condition = out_dir + \"/condition/\"\n if not os.path.exists(out_dir_real):\n os.makedirs(out_dir_real)\n if not os.path.exists(out_dir_fake):\n os.makedirs(out_dir_fake)\n if not os.path.exists(out_dir_condition):\n os.makedirs(out_dir_condition)\n\n # Batch loop\n for j in range(batch_size):\n \n # Expand shape info to tensors\n # for all views of the same objects\n geom_repr = {\n 'points': mesh_points[j][:num_views].expand(\n num_views, mesh_points.size(1),\n mesh_points.size(2)),\n 'normals': mesh_normals[j][:num_views].expand(\n num_views, mesh_normals.size(1),\n mesh_normals.size(2)),\n }\n\n depth_ = depth[j][:num_views]\n img_real_ = img_real[j][:num_views]\n condition_ = condition[j][:num_views].expand(\n num_views, condition.size(1),\n condition.size(2), condition.size(3))\n cam_K_ = cam_K[j][:num_views]\n cam_W_ = cam_W[j][:num_views]\n\n # Generate images and save\n self.model.eval()\n with torch.no_grad():\n img_fake = self.model(depth_, cam_K_, cam_W_,\n geom_repr, condition_)\n\n save_image(\n condition[j].cpu(),\n os.path.join(out_dir_condition,\n '%s.png' % (model_names[j])))\n\n for v in range(num_views):\n save_image(\n img_real_[v],\n os.path.join(out_dir_real,\n '%s%03d.png' % (model_names[j], v)))\n save_image(\n img_fake[v].cpu(),\n os.path.join(out_dir_fake,\n '%s%03d.png' % (model_names[j], v)))\n\n def generate_images_4eval_condi_hd(self, batch, out_dir, model_names):\n '''\n Generate textures in hd images given condition\n\n '''\n\n # Extract depth, gt, camera info, shape pc and condition\n depth = batch['2d.depth']\n img_real = batch['2d.img']\n cam_K = batch['2d.camera_mat']\n cam_W = batch['2d.world_mat']\n mesh_repr = geometry.get_representation(batch, self.device)\n mesh_points = mesh_repr['points']\n mesh_normals = mesh_repr['normals'] \n condition = batch['condition']\n\n # Determine constants and check\n batch_size = depth.size(0)\n num_views = depth.size(1)\n\n # Define Output folders\n out_dir_real = out_dir + \"/real/\"\n out_dir_fake = out_dir + \"/fake/\"\n out_dir_condition = out_dir + \"/condition/\"\n if not os.path.exists(out_dir_real):\n os.makedirs(out_dir_real)\n if not os.path.exists(out_dir_fake):\n os.makedirs(out_dir_fake)\n if not os.path.exists(out_dir_condition):\n os.makedirs(out_dir_condition)\n \n # Loop through batch and views, because of memory requirement\n viewbatchsize = 1\n viewbatchnum = int(num_views / viewbatchsize)\n for j in range(batch_size):\n for vidx in range(viewbatchnum):\n lower = vidx * viewbatchsize\n upper = (vidx + 1) * viewbatchsize\n\n # Expand shape info to tensors\n # for all views of the same objects\n geom_repr = {\n 'points': mesh_points[j][:4].expand(\n viewbatchsize, mesh_points.size(1),\n mesh_points.size(2)),\n 'normals': mesh_normals[j][:4].expand(\n viewbatchsize, mesh_normals.size(1),\n mesh_normals.size(2)),\n }\n\n depth_ = depth[j][lower:upper].to(self.device)\n img_real_ = img_real[j][lower:upper]\n if len(condition.size()) == 1:\n condition_ = condition[j:j+1].expand(\n viewbatchsize)\n else:\n condition_ = condition[j:j+1][:4].expand(\n viewbatchsize, condition.size(1),\n condition.size(2), condition.size(3)).to(self.device)\n cam_K_ = cam_K[j][lower:upper].to(self.device)\n cam_W_ = cam_W[j][lower:upper].to(self.device)\n\n # Generate images and save\n self.model.eval()\n with torch.no_grad():\n img_fake = self.model(depth_, cam_K_, cam_W_,\n geom_repr, condition_)\n if len(condition.size()) != 1:\n save_image(\n condition[j].cpu(),\n os.path.join(out_dir_condition,\n '%s.png' % (model_names[j])))\n\n for v in range(viewbatchsize):\n save_image(\n img_real_[v],\n os.path.join(\n out_dir_real,\n '%s%03d.png' % (model_names[j],\n vidx * viewbatchsize + v)))\n save_image(\n img_fake[v].cpu(),\n os.path.join(\n out_dir_fake,\n '%s%03d.png' % (model_names[j],\n vidx * viewbatchsize + v)))\n\n def generate_images_4eval_vae(self, batch, out_dir, model_names):\n '''\n Generate texture using the VAE\n\n '''\n # Extract depth, gt, camera info, shape pc and condition\n depth = batch['2d.depth'].to(self.device)\n img_real = batch['2d.img'].to(self.device)\n cam_K = batch['2d.camera_mat'].to(self.device)\n cam_W = batch['2d.world_mat'].to(self.device)\n mesh_repr = geometry.get_representation(batch, self.device)\n mesh_points = mesh_repr['points']\n mesh_normals = mesh_repr['normals']\n\n # Determine constants and check\n batch_size = depth.size(0)\n num_views = depth.size(1)\n if depth.size(1) >= 10:\n num_views = 10\n\n # Define Output folders\n out_dir_real = out_dir + \"/real/\"\n out_dir_fake = out_dir + \"/fake/\"\n if not os.path.exists(out_dir_real):\n os.makedirs(out_dir_real)\n if not os.path.exists(out_dir_fake):\n os.makedirs(out_dir_fake)\n\n # batch loop\n for j in range(batch_size):\n geom_repr = {\n 'points': mesh_points[j][:num_views].expand(\n num_views, mesh_points.size(1), mesh_points.size(2)),\n 'normals': mesh_normals[j][:num_views].expand(\n num_views, mesh_normals.size(1), mesh_normals.size(2)),\n }\n depth_ = depth[j][:num_views]\n img_real_ = img_real[j][:num_views]\n cam_K_ = cam_K[j][:num_views]\n cam_W_ = cam_W[j][:num_views]\n\n # Sample latent code\n z_ = np.random.normal(0, 1, 512)\n inter = torch.from_numpy(z_).float().to(self.device)\n z = inter.expand(num_views, 512)\n\n # Generate images and save\n self.model.eval()\n with torch.no_grad():\n img_fake = self.model(depth_, cam_K_, cam_W_,\n geom_repr, z=z, sample=False)\n\n for v in range(num_views):\n save_image(\n img_real_[v],\n os.path.join(out_dir_real, '%s%03d.png'\n % (model_names[j], v)))\n save_image(\n img_fake[v].cpu(),\n os.path.join(out_dir_fake, '%s%03d.png'\n % (model_names[j], v)))\n\n def generate_images_4eval_vae_interpol(self, batch, out_dir, model_names):\n '''\n Interpolates between latent encoding \n of first and second element of batch \n\n '''\n # Extract depth, gt, camera info, shape pc and condition\n depth = batch['2d.depth'].to(self.device)\n img_real = batch['2d.img'].to(self.device)\n cam_K = batch['2d.camera_mat'].to(self.device)\n cam_W = batch['2d.world_mat'].to(self.device)\n mesh_repr = geometry.get_representation(batch, self.device)\n mesh_points = mesh_repr['points']\n mesh_normals = mesh_repr['normals']\n\n # Determine constants and check\n batch_size = depth.size(0)\n num_views = depth.size(1)\n if depth.size(1) >= 10:\n num_views = 10\n\n # Define Output folders\n out_dir_real = out_dir + \"/real/\"\n out_dir_fake = out_dir + \"/fake/\"\n if not os.path.exists(out_dir_real):\n os.makedirs(out_dir_real)\n if not os.path.exists(out_dir_fake):\n os.makedirs(out_dir_fake)\n\n # Derive latent texture code as starting point of interpolation\n geom_repr = {\n 'points': mesh_points[:1],\n 'normals': mesh_normals[:1],\n }\n self.model.eval()\n shape_encoding = self.model.encode_geometry(geom_repr)\n image_input = img_real[0][:1]\n img = interpolate(image_input, size=[128, 128])\n latent_input = self.model.infer_z_transfer(img, shape_encoding)\n\n # Derive latent texture code as end point of interpolation\n geom_repr2 = {\n 'points': mesh_points[1:2],\n 'normals': mesh_normals[1:2],\n }\n shape_encoding2 = self.model.encode_geometry(geom_repr2)\n image_input2 = img_real[1][:1]\n img2 = interpolate(image_input2, size=[128, 128])\n latent_input2 = self.model.infer_z_transfer(img2, shape_encoding2)\n\n # Derive stepsize\n steps = 20\n step = (latent_input2-latent_input)/steps\n\n # batch loop\n for j in range(1, batch_size):\n \n geom_repr = {\n 'points': mesh_points[j][:num_views].expand(\n num_views, mesh_points.size(1), mesh_points.size(2)),\n 'normals': mesh_normals[j][:num_views].expand(\n num_views, mesh_normals.size(1), mesh_normals.size(2)),\n }\n\n depth_ = depth[j][:num_views]\n img_real_ = img_real[j][:num_views]\n cam_K_ = cam_K[j][:num_views]\n cam_W_ = cam_W[j][:num_views]\n \n self.model.eval()\n # steps loop\n for num in range(steps):\n inter = latent_input + step*num\n z = inter.expand(num_views, 512)\n with torch.no_grad():\n img_fake = self.model(depth_, cam_K_, cam_W_,\n geom_repr, z=z, sample=False)\n for v in range(1):\n save_image(\n img_real_[v],\n os.path.join(\n out_dir_real, '%s%03d_%03d.png'\n % (model_names[j], v, num)))\n save_image(\n img_fake[v].cpu(),\n os.path.join(\n out_dir_fake, '%s%03d_%03d.png'\n % (model_names[j], v, num)))\n\n def generate_images_4eval_gan(self, batch, out_dir, model_names):\n '''\n Generate Texture using a GAN\n\n '''\n # Extract depth, gt, camera info, shape pc and condition\n depth = batch['2d.depth'].to(self.device)\n img_real = batch['2d.img'].to(self.device)\n cam_K = batch['2d.camera_mat'].to(self.device)\n cam_W = batch['2d.world_mat'].to(self.device)\n mesh_repr = geometry.get_representation(batch, self.device)\n mesh_points = mesh_repr['points']\n mesh_normals = mesh_repr['normals']\n\n # Determine constants and check\n batch_size = depth.size(0)\n num_views = depth.size(1)\n if depth.size(1) >= 10:\n num_views = 10\n\n # Define Output folders\n out_dir_real = out_dir + \"/real/\"\n out_dir_fake = out_dir + \"/fake/\"\n out_dir_condition = out_dir + \"/condition/\"\n if not os.path.exists(out_dir_real):\n os.makedirs(out_dir_real)\n if not os.path.exists(out_dir_fake):\n os.makedirs(out_dir_fake)\n if not os.path.exists(out_dir_condition):\n os.makedirs(out_dir_condition)\n\n # batch loop\n for j in range(batch_size):\n \n geom_repr = {\n 'points': mesh_points[j][:num_views].expand(\n num_views, mesh_points.size(1),\n mesh_points.size(2)),\n 'normals': mesh_normals[j][:num_views].expand(\n num_views, mesh_normals.size(1),\n mesh_normals.size(2)),\n }\n\n depth_ = depth[j][:num_views]\n img_real_ = img_real[j][:num_views]\n cam_K_ = cam_K[j][:num_views]\n cam_W_ = cam_W[j][:num_views]\n\n self.model.eval()\n with torch.no_grad():\n img_fake = self.model(depth_, cam_K_, cam_W_,\n geom_repr, sample=False)\n for v in range(num_views):\n save_image(\n img_real_[v],\n os.path.join(\n out_dir_real, '%s%03d.png' % (model_names[j], v)))\n save_image(\n img_fake[v].cpu(),\n os.path.join(\n out_dir_fake, '%s%03d.png' % (model_names[j], v)))\n\n\ndef make_3d_grid(bb_min, bb_max, shape):\n '''\n Outputs gird points of a 3d grid\n\n '''\n size = shape[0] * shape[1] * shape[2]\n\n pxs = torch.linspace(bb_min[0], bb_max[0], shape[0])\n pys = torch.linspace(bb_min[1], bb_max[1], shape[1])\n pzs = torch.linspace(bb_min[2], bb_max[2], shape[2])\n\n pxs = pxs.view(-1, 1, 1).expand(*shape).contiguous().view(size)\n pys = pys.view(1, -1, 1).expand(*shape).contiguous().view(size)\n pzs = pzs.view(1, 1, -1).expand(*shape).contiguous().view(size)\n p = torch.stack([pxs, pys, pzs], dim=1)\n\n return p\n"
] | [
[
"numpy.array",
"numpy.savez",
"numpy.zeros"
],
[
"torch.linspace",
"torch.from_numpy",
"numpy.random.normal",
"torch.no_grad",
"torch.nn.functional.interpolate",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chineseocr/table-detect | [
"92488f30ffaf486d29791aab63802beeb1eaca32"
] | [
"table_line.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 9 23:11:51 2020\ntable line detect\n@author: chineseocr\n\"\"\"\n\nfrom tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\nfrom tensorflow.keras.layers import LeakyReLU\nfrom tensorflow.keras.models import Model\n\n\ndef table_net(input_shape=(512, 512, 3), num_classes=1):\n inputs = Input(shape=input_shape)\n # 512\n use_bias = False\n down0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(inputs)\n down0a = BatchNormalization()(down0a)\n down0a = LeakyReLU(alpha=0.1)(down0a)\n down0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(down0a)\n down0a = BatchNormalization()(down0a)\n down0a = LeakyReLU(alpha=0.1)(down0a)\n down0a_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0a)\n # 256\n\n down0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down0a_pool)\n down0 = BatchNormalization()(down0)\n\n down0 = LeakyReLU(alpha=0.1)(down0)\n down0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down0)\n down0 = BatchNormalization()(down0)\n down0 = LeakyReLU(alpha=0.1)(down0)\n down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)\n # 128\n\n down1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down0_pool)\n down1 = BatchNormalization()(down1)\n down1 = LeakyReLU(alpha=0.1)(down1)\n down1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down1)\n down1 = BatchNormalization()(down1)\n down1 = LeakyReLU(alpha=0.1)(down1)\n down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)\n # 64\n\n down2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(down1_pool)\n down2 = BatchNormalization()(down2)\n down2 = LeakyReLU(alpha=0.1)(down2)\n down2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(down2)\n down2 = BatchNormalization()(down2)\n down2 = LeakyReLU(alpha=0.1)(down2)\n down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)\n # 32\n\n down3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(down2_pool)\n down3 = BatchNormalization()(down3)\n down3 = LeakyReLU(alpha=0.1)(down3)\n down3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(down3)\n down3 = BatchNormalization()(down3)\n down3 = LeakyReLU(alpha=0.1)(down3)\n down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)\n # 16\n\n down4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(down3_pool)\n down4 = BatchNormalization()(down4)\n down4 = LeakyReLU(alpha=0.1)(down4)\n down4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(down4)\n down4 = BatchNormalization()(down4)\n down4 = LeakyReLU(alpha=0.1)(down4)\n down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)\n # 8\n\n center = Conv2D(1024, (3, 3), padding='same', use_bias=use_bias)(down4_pool)\n center = BatchNormalization()(center)\n center = LeakyReLU(alpha=0.1)(center)\n center = Conv2D(1024, (3, 3), padding='same', use_bias=use_bias)(center)\n center = BatchNormalization()(center)\n center = LeakyReLU(alpha=0.1)(center)\n # center\n\n up4 = UpSampling2D((2, 2))(center)\n up4 = concatenate([down4, up4], axis=3)\n up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)\n up4 = BatchNormalization()(up4)\n up4 = LeakyReLU(alpha=0.1)(up4)\n up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)\n up4 = BatchNormalization()(up4)\n up4 = LeakyReLU(alpha=0.1)(up4)\n up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)\n up4 = BatchNormalization()(up4)\n up4 = LeakyReLU(alpha=0.1)(up4)\n # 16\n\n up3 = UpSampling2D((2, 2))(up4)\n up3 = concatenate([down3, up3], axis=3)\n up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)\n up3 = BatchNormalization()(up3)\n up3 = LeakyReLU(alpha=0.1)(up3)\n up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)\n up3 = BatchNormalization()(up3)\n up3 = LeakyReLU(alpha=0.1)(up3)\n up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)\n up3 = BatchNormalization()(up3)\n up3 = LeakyReLU(alpha=0.1)(up3)\n # 32\n\n up2 = UpSampling2D((2, 2))(up3)\n up2 = concatenate([down2, up2], axis=3)\n up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)\n up2 = BatchNormalization()(up2)\n up2 = LeakyReLU(alpha=0.1)(up2)\n up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)\n up2 = BatchNormalization()(up2)\n up2 = LeakyReLU(alpha=0.1)(up2)\n up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)\n up2 = BatchNormalization()(up2)\n up2 = LeakyReLU(alpha=0.1)(up2)\n # 64\n\n up1 = UpSampling2D((2, 2))(up2)\n up1 = concatenate([down1, up1], axis=3)\n up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)\n up1 = BatchNormalization()(up1)\n up1 = LeakyReLU(alpha=0.1)(up1)\n up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)\n up1 = BatchNormalization()(up1)\n up1 = LeakyReLU(alpha=0.1)(up1)\n up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)\n up1 = BatchNormalization()(up1)\n up1 = LeakyReLU(alpha=0.1)(up1)\n # 128\n\n up0 = UpSampling2D((2, 2))(up1)\n up0 = concatenate([down0, up0], axis=3)\n up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)\n up0 = BatchNormalization()(up0)\n up0 = LeakyReLU(alpha=0.1)(up0)\n up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)\n up0 = BatchNormalization()(up0)\n up0 = LeakyReLU(alpha=0.1)(up0)\n up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)\n up0 = BatchNormalization()(up0)\n up0 = LeakyReLU(alpha=0.1)(up0)\n # 256\n\n up0a = UpSampling2D((2, 2))(up0)\n up0a = concatenate([down0a, up0a], axis=3)\n up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)\n up0a = BatchNormalization()(up0a)\n up0a = LeakyReLU(alpha=0.1)(up0a)\n up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)\n up0a = BatchNormalization()(up0a)\n up0a = LeakyReLU(alpha=0.1)(up0a)\n up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)\n up0a = BatchNormalization()(up0a)\n up0a = LeakyReLU(alpha=0.1)(up0a)\n # 512\n\n classify = Conv2D(num_classes, (1, 1), activation='sigmoid')(up0a)\n\n model = Model(inputs=inputs, outputs=classify)\n\n return model\n\n\nfrom config import tableModeLinePath\nfrom utils import letterbox_image, get_table_line, adjust_lines, line_to_line\nimport numpy as np\nimport cv2\n\nmodel = table_net((None, None, 3), 2)\nmodel.load_weights(tableModeLinePath)\n\n\ndef table_line(img, size=(512, 512), hprob=0.5, vprob=0.5, row=50, col=30, alph=15):\n sizew, sizeh = size\n inputBlob, fx, fy = letterbox_image(img[..., ::-1], (sizew, sizeh))\n pred = model.predict(np.array([np.array(inputBlob) / 255.0]))\n pred = pred[0]\n vpred = pred[..., 1] > vprob ##竖线\n hpred = pred[..., 0] > hprob ##横线\n vpred = vpred.astype(int)\n hpred = hpred.astype(int)\n colboxes = get_table_line(vpred, axis=1, lineW=col)\n rowboxes = get_table_line(hpred, axis=0, lineW=row)\n ccolbox = []\n crowlbox = []\n if len(rowboxes) > 0:\n rowboxes = np.array(rowboxes)\n rowboxes[:, [0, 2]] = rowboxes[:, [0, 2]] / fx\n rowboxes[:, [1, 3]] = rowboxes[:, [1, 3]] / fy\n xmin = rowboxes[:, [0, 2]].min()\n xmax = rowboxes[:, [0, 2]].max()\n ymin = rowboxes[:, [1, 3]].min()\n ymax = rowboxes[:, [1, 3]].max()\n ccolbox = [[xmin, ymin, xmin, ymax], [xmax, ymin, xmax, ymax]]\n rowboxes = rowboxes.tolist()\n\n if len(colboxes) > 0:\n colboxes = np.array(colboxes)\n colboxes[:, [0, 2]] = colboxes[:, [0, 2]] / fx\n colboxes[:, [1, 3]] = colboxes[:, [1, 3]] / fy\n\n xmin = colboxes[:, [0, 2]].min()\n xmax = colboxes[:, [0, 2]].max()\n ymin = colboxes[:, [1, 3]].min()\n ymax = colboxes[:, [1, 3]].max()\n colboxes = colboxes.tolist()\n crowlbox = [[xmin, ymin, xmax, ymin], [xmin, ymax, xmax, ymax]]\n\n rowboxes += crowlbox\n colboxes += ccolbox\n\n rboxes_row_, rboxes_col_ = adjust_lines(rowboxes, colboxes, alph=alph)\n rowboxes += rboxes_row_\n colboxes += rboxes_col_\n nrow = len(rowboxes)\n ncol = len(colboxes)\n for i in range(nrow):\n for j in range(ncol):\n rowboxes[i] = line_to_line(rowboxes[i], colboxes[j], 10)\n colboxes[j] = line_to_line(colboxes[j], rowboxes[i], 10)\n\n return rowboxes, colboxes\n\n\nif __name__ == '__main__':\n import time\n\n p = 'img/table-detect.jpg'\n from utils import draw_lines\n\n img = cv2.imread(p)\n t = time.time()\n rowboxes, colboxes = table_line(img[..., ::-1], size=(512, 512), hprob=0.5, vprob=0.5)\n img = draw_lines(img, rowboxes + colboxes, color=(255, 0, 0), lineW=2)\n\n print(time.time() - t, len(rowboxes), len(colboxes))\n cv2.imwrite('img/table-line.png', img)\n"
] | [
[
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.layers.BatchNormalization",
"numpy.array",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
OmerMughal31/RetinaNet_modified | [
"207ec4fba35ef390af42fa0266ae95b86ecb9b08"
] | [
"keras_retinanet/bin/train.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport warnings\n\nimport keras\nimport keras.preprocessing.image\nimport tensorflow as tf\n\n# Allow relative imports when being executed as script.\nif __name__ == \"__main__\" and __package__ is None:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), \"..\", \"..\"))\n import keras_retinanet.bin # noqa: F401\n\n __package__ = \"keras_retinanet.bin\"\n\n# Change these to absolute imports if you copy this script outside the keras_retinanet package.\nfrom .. import layers # noqa: F401\nfrom .. import losses\nfrom .. import models\nfrom ..callbacks import RedirectModel\nfrom ..callbacks.eval import Evaluate\nfrom ..models.retinanet import retinanet_bbox\nfrom ..preprocessing.csv_generator import CSVGenerator\nfrom ..utils.anchors import make_shapes_callback\nfrom ..utils.config import read_config_file, parse_anchor_parameters\nfrom ..utils.gpu import setup_gpu\nfrom ..utils.image import random_visual_effect_generator\nfrom ..utils.keras_version import check_keras_version\nfrom ..utils.model import freeze as freeze_model\nfrom ..utils.tf_version import check_tf_version\nfrom ..utils.transform import random_transform_generator\n\n\ndef makedirs(path):\n # Intended behavior: try to create the directory,\n # pass if the directory exists already, fails otherwise.\n # Meant for Python 2.7/3.n compatibility.\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\ndef model_with_weights(model, weights, skip_mismatch):\n \"\"\" Load weights for model.\n\n Args\n model : The model to load weights for.\n weights : The weights to load.\n skip_mismatch : If True, skips layers whose shape of weights doesn't match with the model.\n \"\"\"\n if weights is not None:\n model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)\n return model\n\n\ndef create_models(\n backbone_retinanet,\n num_classes,\n weights,\n multi_gpu=0,\n freeze_backbone=False,\n lr=1e-5,\n config=None,\n):\n \"\"\" Creates three models (model, training_model, prediction_model).\n\n Args\n backbone_retinanet : A function to call to create a retinanet model with a given backbone.\n num_classes : The number of classes to train.\n weights : The weights to load into the model.\n multi_gpu : The number of GPUs to use for training.\n freeze_backbone : If True, disables learning for the backbone.\n config : Config parameters, None indicates the default configuration.\n\n Returns\n model : The base model. This is also the model that is saved in snapshots.\n training_model : The training model. If multi_gpu=0, this is identical to model.\n prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).\n \"\"\"\n\n modifier = freeze_model if freeze_backbone else None\n\n # load anchor parameters, or pass None (so that defaults will be used)\n anchor_params = None\n num_anchors = None\n if config and \"anchor_parameters\" in config:\n anchor_params = parse_anchor_parameters(config)\n num_anchors = anchor_params.num_anchors()\n\n # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.\n # optionally wrap in a parallel model\n if multi_gpu > 1:\n from keras.utils import multi_gpu_model\n\n with tf.device(\"/cpu:0\"):\n model = model_with_weights(\n backbone_retinanet(\n num_classes, num_anchors=num_anchors, modifier=modifier\n ),\n weights=weights,\n skip_mismatch=True,\n )\n training_model = multi_gpu_model(model, gpus=multi_gpu)\n else:\n model = model_with_weights(\n backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier),\n weights=weights,\n skip_mismatch=True,\n )\n training_model = model\n\n # make prediction model\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)\n\n # compile model\n training_model.compile(\n loss={\"regression\": losses.smooth_l1(), \"classification\": losses.focal()},\n optimizer=keras.optimizers.adam(lr=lr, clipnorm=0.001),\n metrics=[\"accuracy\"],\n )\n\n return model, training_model, prediction_model\n\n\ndef create_callbacks(\n model, training_model, prediction_model, validation_generator, args\n):\n \"\"\" Creates the callbacks to use during training.\n\n Args\n model: The base model.\n training_model: The model that is used for training.\n prediction_model: The model that should be used for validation.\n validation_generator: The generator for creating validation data.\n args: parseargs args object.\n\n Returns:\n A list of callbacks used for training.\n \"\"\"\n callbacks = []\n\n tensorboard_callback = None\n\n if args.tensorboard_dir:\n makedirs(args.tensorboard_dir)\n tensorboard_callback = keras.callbacks.TensorBoard(\n log_dir=args.tensorboard_dir,\n histogram_freq=0,\n batch_size=args.batch_size,\n write_graph=True,\n write_grads=False,\n write_images=False,\n embeddings_freq=0,\n embeddings_layer_names=None,\n embeddings_metadata=None,\n )\n\n if args.evaluation and validation_generator:\n evaluation = Evaluate(\n validation_generator,\n tensorboard=tensorboard_callback,\n weighted_average=args.weighted_average,\n )\n evaluation = RedirectModel(evaluation, prediction_model)\n callbacks.append(evaluation)\n\n # save the model\n if args.snapshots:\n # ensure directory created first; otherwise h5py will error after epoch.\n makedirs(args.snapshot_path)\n checkpoint = keras.callbacks.ModelCheckpoint(\n os.path.join(\n args.snapshot_path,\n \"{backbone}_{dataset_type}_{{epoch:02d}}.h5\".format(\n backbone=args.backbone, dataset_type=args.dataset_type\n ),\n ),\n verbose=1,\n # save_best_only=True,\n # monitor=\"mAP\",\n # mode='max'\n )\n checkpoint = RedirectModel(checkpoint, model)\n callbacks.append(checkpoint)\n\n callbacks.append(\n keras.callbacks.ReduceLROnPlateau(\n monitor=\"loss\",\n factor=0.1,\n patience=2,\n verbose=1,\n mode=\"auto\",\n min_delta=0.0001,\n cooldown=0,\n min_lr=0,\n )\n )\n\n if args.tensorboard_dir:\n callbacks.append(tensorboard_callback)\n\n return callbacks\n\n\ndef create_generators(args, preprocess_image):\n \"\"\" Create generators for training and validation.\n\n Args\n args : parseargs object containing configuration for generators.\n preprocess_image : Function that preprocesses an image for the network.\n \"\"\"\n common_args = {\n \"batch_size\": args.batch_size,\n \"config\": args.config,\n \"image_min_side\": args.image_min_side,\n \"image_max_side\": args.image_max_side,\n \"no_resize\": args.no_resize,\n \"preprocess_image\": preprocess_image,\n }\n\n # create random transform generator for augmenting training data\n if args.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.5,\n )\n visual_effect_generator = random_visual_effect_generator(\n contrast_range=(0.9, 1.1),\n brightness_range=(-0.1, 0.1),\n hue_range=(-0.05, 0.05),\n saturation_range=(0.95, 1.05),\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n visual_effect_generator = None\n\n if args.dataset_type == \"csv\":\n train_generator = CSVGenerator(\n args.annotations,\n args.classes,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n if args.val_annotations:\n validation_generator = CSVGenerator(\n args.val_annotations, args.classes, shuffle_groups=False, **common_args\n )\n else:\n validation_generator = None\n else:\n raise ValueError(\"Invalid data type received: {}\".format(args.dataset_type))\n\n return train_generator, validation_generator\n\n\ndef check_args(parsed_args):\n \"\"\" Function to check for inherent contradictions within parsed arguments.\n For example, batch_size < num_gpus\n Intended to raise errors prior to backend initialisation.\n\n Args\n parsed_args: parser.parse_args()\n\n Returns\n parsed_args\n \"\"\"\n\n if parsed_args.multi_gpu > 1 and parsed_args.batch_size < parsed_args.multi_gpu:\n raise ValueError(\n \"Batch size ({}) must be equal to or higher than the number of GPUs ({})\".format(\n parsed_args.batch_size, parsed_args.multi_gpu\n )\n )\n\n if parsed_args.multi_gpu > 1 and parsed_args.snapshot:\n raise ValueError(\n \"Multi GPU training ({}) and resuming from snapshots ({}) is not supported.\".format(\n parsed_args.multi_gpu, parsed_args.snapshot\n )\n )\n\n if parsed_args.multi_gpu > 1 and not parsed_args.multi_gpu_force:\n raise ValueError(\n \"Multi-GPU support is experimental, use at own risk! Run with --multi-gpu-force if you wish to continue.\"\n )\n\n if \"resnet\" not in parsed_args.backbone:\n warnings.warn(\n \"Using experimental backbone {}. Only resnet50 has been properly tested.\".format(\n parsed_args.backbone\n )\n )\n\n return parsed_args\n\n\ndef parse_args(args):\n \"\"\" Parse the arguments.\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Simple training script for training a RetinaNet network.\"\n )\n subparsers = parser.add_subparsers(\n help=\"Arguments for specific dataset types.\", dest=\"dataset_type\"\n )\n subparsers.required = True\n\n csv_parser = subparsers.add_parser(\"csv\")\n csv_parser.add_argument(\n \"annotations\", help=\"Path to CSV file containing annotations for training.\"\n )\n csv_parser.add_argument(\n \"classes\", help=\"Path to a CSV file containing class label mapping.\"\n )\n csv_parser.add_argument(\n \"--val-annotations\",\n help=\"Path to CSV file containing annotations for validation (optional).\",\n )\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"--snapshot\", help=\"Resume training from a snapshot.\")\n group.add_argument(\n \"--imagenet-weights\",\n help=\"Initialize the model with pretrained imagenet weights. This is the default behaviour.\",\n action=\"store_const\",\n const=True,\n default=True,\n )\n group.add_argument(\n \"--weights\", help=\"Initialize the model with weights from a file.\"\n )\n group.add_argument(\n \"--no-weights\",\n help=\"Don't initialize the model with any weights.\",\n dest=\"imagenet_weights\",\n action=\"store_const\",\n const=False,\n )\n parser.add_argument(\n \"--backbone\",\n help=\"Backbone model used by retinanet.\",\n default=\"resnet50\",\n type=str,\n )\n parser.add_argument(\n \"--batch-size\", help=\"Size of the batches.\", default=1, type=int\n )\n parser.add_argument(\n \"--gpu\", help=\"Id of the GPU to use (as reported by nvidia-smi).\", type=int\n )\n parser.add_argument(\n \"--multi-gpu\",\n help=\"Number of GPUs to use for parallel processing.\",\n type=int,\n default=0,\n )\n parser.add_argument(\n \"--multi-gpu-force\",\n help=\"Extra flag needed to enable (experimental) multi-gpu support.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--initial-epoch\",\n help=\"Epoch from which to begin the train, useful if resuming from snapshot.\",\n type=int,\n default=0,\n )\n parser.add_argument(\n \"--epochs\", help=\"Number of epochs to train.\", type=int, default=50\n )\n parser.add_argument(\n \"--steps\", help=\"Number of steps per epoch.\", type=int, default=10000\n )\n parser.add_argument(\"--lr\", help=\"Learning rate.\", type=float, default=1e-5)\n parser.add_argument(\n \"--snapshot-path\",\n help=\"Path to store snapshots of models during training (defaults to './snapshots')\",\n default=\"./snapshots\",\n )\n parser.add_argument(\n \"--tensorboard-dir\", help=\"Log directory for Tensorboard output\", default=\"\"\n ) # default='./logs') => https://github.com/tensorflow/tensorflow/pull/34870\n parser.add_argument(\n \"--no-snapshots\",\n help=\"Disable saving snapshots.\",\n dest=\"snapshots\",\n action=\"store_false\",\n )\n parser.add_argument(\n \"--no-evaluation\",\n help=\"Disable per epoch evaluation.\",\n dest=\"evaluation\",\n action=\"store_false\",\n )\n parser.add_argument(\n \"--freeze-backbone\",\n help=\"Freeze training of backbone layers.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--random-transform\",\n help=\"Randomly transform image and annotations.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--image-min-side\",\n help=\"Rescale the image so the smallest side is min_side.\",\n type=int,\n default=800,\n )\n parser.add_argument(\n \"--image-max-side\",\n help=\"Rescale the image if the largest side is larger than max_side.\",\n type=int,\n default=1333,\n )\n parser.add_argument(\n \"--no-resize\", help=\"Don\" \"t rescale the image.\", action=\"store_true\"\n )\n parser.add_argument(\n \"--config\", help=\"Path to a configuration parameters .ini file.\"\n )\n parser.add_argument(\n \"--weighted-average\",\n help=\"Compute the mAP using the weighted average of precisions among classes.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--compute-val-loss\",\n help=\"Compute validation loss during training\",\n dest=\"compute_val_loss\",\n action=\"store_true\",\n )\n\n # Fit generator arguments\n parser.add_argument(\n \"--multiprocessing\",\n help=\"Use multiprocessing in fit_generator.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--workers\", help=\"Number of generator workers.\", type=int, default=1\n )\n parser.add_argument(\n \"--max-queue-size\",\n help=\"Queue length for multiprocessing workers in fit_generator.\",\n type=int,\n default=10,\n )\n\n return check_args(parser.parse_args(args))\n\n\ndef main(args=None):\n # parse arguments\n if args is None:\n args = sys.argv[1:]\n args = parse_args(args)\n\n # create object that stores backbone information\n backbone = models.backbone(args.backbone)\n\n # make sure keras and tensorflow are the minimum required version\n check_keras_version()\n check_tf_version()\n\n # optionally choose specific GPU\n if args.gpu is not None:\n setup_gpu(args.gpu)\n\n # optionally load config parameters\n if args.config:\n args.config = read_config_file(args.config)\n\n # create the generators\n train_generator, validation_generator = create_generators(\n args, backbone.preprocess_image\n )\n\n # create the model\n if args.snapshot is not None:\n print(\"Loading model, this may take a second...\")\n model = models.load_model(args.snapshot, backbone_name=args.backbone)\n training_model = model\n anchor_params = None\n if args.config and \"anchor_parameters\" in args.config:\n anchor_params = parse_anchor_parameters(args.config)\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)\n else:\n weights = args.weights\n # default to imagenet if nothing else is specified\n if weights is None and args.imagenet_weights:\n weights = backbone.download_imagenet()\n\n print(\"Creating model, this may take a second...\")\n model, training_model, prediction_model = create_models(\n backbone_retinanet=backbone.retinanet,\n num_classes=train_generator.num_classes(),\n weights=weights,\n multi_gpu=args.multi_gpu,\n freeze_backbone=args.freeze_backbone,\n lr=args.lr,\n config=args.config,\n )\n\n # print model summary\n print(model.summary())\n\n # this lets the generator compute backbone layer shapes using the actual backbone model\n if \"vgg\" in args.backbone or \"densenet\" in args.backbone:\n train_generator.compute_shapes = make_shapes_callback(model)\n if validation_generator:\n validation_generator.compute_shapes = train_generator.compute_shapes\n\n # create the callbacks\n callbacks = create_callbacks(\n model, training_model, prediction_model, validation_generator, args,\n )\n\n if not args.compute_val_loss:\n validation_generator = None\n\n # start training\n return training_model.fit_generator(\n generator=train_generator,\n steps_per_epoch=args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks,\n workers=args.workers,\n use_multiprocessing=args.multiprocessing,\n max_queue_size=args.max_queue_size,\n validation_data=validation_generator,\n initial_epoch=args.initial_epoch,\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"tensorflow.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
Jie-Yuan/Torchappy | [
"e722db1085fa2ff8e0267f7e6745875531c00f8b"
] | [
"models/lr.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom tqdm import tqdm, tqdm_notebook\nfrom ml_metrics import auc\nfrom sklearn.datasets import make_classification\n\n\nclass LogsticRegression(nn.Module):\n def __init__(self, in_dim, n_class):\n super().__init__()\n self.fc1 = nn.Linear(in_dim, in_dim // 2)\n self.fc2 = nn.Linear(in_dim // 2, n_class)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n # return x\n return F.softmax(x, 1)\n\n\nepochs = 5\nbatch_size = 128\nX, y = make_classification(1000000)\nt_X, t_y = map(torch.FloatTensor, (X, y))\n\nnet = LogsticRegression(20, 2)\nloss_func = torch.nn.modules.loss.CrossEntropyLoss()\noptimizer = torch.optim.Adam(net.parameters())\n\nbar_epochs = tqdm_notebook(range(epochs))\nfor e in bar_epochs:\n bar_epochs.set_description(f\"Epoch {e}:\")\n t = tqdm_notebook(range(0, t_X.size(0), batch_size))\n for b in t: # for each training step\n # train your data...\n b_X = t_X[b:b + batch_size]\n b_y = t_y[b:b + batch_size]\n output = net(b_X) # rnn output\n loss = loss_func(\n output,\n b_y.long().view(-1)) # cross entropy loss and y is not one-hotted\n optimizer.zero_grad() # clear gradients for this training step\n loss.backward() # backpropagation, compute gradients\n optimizer.step()\n if b % 10000 == 0:\n t.set_description(\n f\"Epoch {e}:\"\n f\"Loss: {loss.data.numpy():.5f} | \"\n f\"Auc: {auc(b_y.numpy(), output.data.numpy()[:, 1]):.5}\")\n\n_net = net.eval()\nauc(y, _net(t_X).data.numpy()[:, -1])\n"
] | [
[
"torch.nn.Linear",
"torch.nn.functional.softmax",
"sklearn.datasets.make_classification",
"torch.nn.modules.loss.CrossEntropyLoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jebediah/libwave | [
"c04998c964f0dc7d414783c6e8cf989a2716ad54"
] | [
"wave_utils/scripts/plot_matrix.py"
] | [
"import sys\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\nif __name__ == \"__main__\":\n file = open(sys.argv[1], \"r\")\n X = np.loadtxt(file)\n X = np.matrix(X)\n print(X.shape)\n\n fig, ax = plt.subplots()\n cax = ax.matshow(X)\n ax.set_xticks(range(0, X.shape[1]))\n ax.set_yticks(range(0, X.shape[0]))\n fig.colorbar(cax)\n plt.show()\n"
] | [
[
"numpy.matrix",
"matplotlib.pylab.show",
"matplotlib.pylab.subplots",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jinzhuoran/CogKGE | [
"b0e819a1d34cf61a7d70c33808da3377b73c8fd6",
"70d851d6489600c1e90eb25b0388a3ceba2f078c",
"b0e819a1d34cf61a7d70c33808da3377b73c8fd6",
"b0e819a1d34cf61a7d70c33808da3377b73c8fd6"
] | [
"cogkge/modules/gnn/helper.py",
"cogkge/modules/gnn/gat.py",
"examples/eventkg240k/example_eventkg240k_transh.py",
"tests/test_fb15k_run_transe.py"
] | [
"import numpy as np, sys, os, random, pdb, json, uuid, time, argparse\nfrom pprint import pprint\nimport logging, logging.config\nfrom collections import defaultdict as ddict\n# from ordered_set import OrderedSet\n\n# PyTorch related imports\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.init import xavier_normal_\nfrom torch.utils.data import DataLoader\nfrom torch.nn import Parameter\n# from torch_scatter import scatter_add\nfrom .util_scatter import scatter_add\n\ntry:\n from torch import irfft\n from torch import rfft\nexcept ImportError:\n from torch.fft import irfft2\n from torch.fft import rfft2\n\n\n def rfft(x, d):\n t = rfft2(x, dim=(-d))\n return torch.stack((t.real, t.imag), -1)\n\n\n def irfft(x, d, signal_sizes):\n return irfft2(torch.complex(x[:, :, 0], x[:, :, 1]), s=signal_sizes, dim=(-d))\n\nnp.set_printoptions(precision=4)\n\n\ndef set_gpu(gpus):\n \"\"\"\n\tSets the GPU to be used for the run\n\n\tParameters\n\t----------\n\tgpus: List of GPUs to be used for the run\n\t\n\tReturns\n\t-------\n\t\t\n\t\"\"\"\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus\n\n\ndef get_logger(name, log_dir, config_dir):\n \"\"\"\n\tCreates a logger object\n\n\tParameters\n\t----------\n\tname: Name of the logger file\n\tlog_dir: Directory where logger file needs to be stored\n\tconfig_dir: Directory from where log_config.json needs to be read\n\t\n\tReturns\n\t-------\n\tA logger object which writes to both file and stdout\n\t\t\n\t\"\"\"\n config_dict = json.load(open(config_dir + 'log_config.json'))\n config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')\n logging.config.dictConfig(config_dict)\n logger = logging.getLogger(name)\n\n std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logging.Formatter(std_out_format))\n logger.addHandler(consoleHandler)\n\n return logger\n\n\ndef get_combined_results(left_results, right_results):\n results = {}\n count = float(left_results['count'])\n\n results['left_mr'] = round(left_results['mr'] / count, 5)\n results['left_mrr'] = round(left_results['mrr'] / count, 5)\n results['right_mr'] = round(right_results['mr'] / count, 5)\n results['right_mrr'] = round(right_results['mrr'] / count, 5)\n results['mr'] = round((left_results['mr'] + right_results['mr']) / (2 * count), 5)\n results['mrr'] = round((left_results['mrr'] + right_results['mrr']) / (2 * count), 5)\n\n for k in range(10):\n results['left_hits@{}'.format(k + 1)] = round(left_results['hits@{}'.format(k + 1)] / count, 5)\n results['right_hits@{}'.format(k + 1)] = round(right_results['hits@{}'.format(k + 1)] / count, 5)\n results['hits@{}'.format(k + 1)] = round(\n (left_results['hits@{}'.format(k + 1)] + right_results['hits@{}'.format(k + 1)]) / (2 * count), 5)\n return results\n\n\ndef get_param(shape):\n param = Parameter(torch.Tensor(*shape));\n xavier_normal_(param.data)\n return param\n\n\ndef com_mult(a, b):\n r1, i1 = a[..., 0], a[..., 1]\n r2, i2 = b[..., 0], b[..., 1]\n return torch.stack([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim=-1)\n\n\ndef conj(a):\n a[..., 1] = -a[..., 1]\n return a\n\n\ndef cconv(a, b):\n return irfft(com_mult(rfft(a, 1), rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))\n\n\ndef ccorr(a, b):\n return irfft(com_mult(conj(rfft(a, 1)), rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))\n\n\ndef construct_adj(train_dataset, relation_dict_len):\n edge_index, edge_type = [], []\n if train_dataset.data.shape[1] == 3: # score_based\n for sub, rel, obj in train_dataset.data:\n edge_index.append((sub, obj))\n edge_type.append(rel)\n\n for sub, rel, obj in train_dataset.data:\n edge_index.append((obj, sub))\n edge_type.append(rel + relation_dict_len)\n else: # classification-based\n label = train_dataset.label_data\n for j,(sub, rel) in enumerate(train_dataset.data):\n for elem in torch.nonzero(label[j]):\n e2_idx = elem.item()\n edge_index.append((sub,e2_idx))\n edge_type.append(rel)\n\n for j,(sub, rel) in enumerate(train_dataset.data):\n for elem in torch.nonzero(label[j]):\n e2_idx = elem.item()\n edge_index.append((e2_idx,sub))\n edge_type.append(rel + relation_dict_len)\n\n return edge_index,edge_type",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass GraphAttentionLayer(nn.Module):\n def __init__(self, in_features, out_features, dropout, alpha, concat=True):\n super(GraphAttentionLayer, self).__init__()\n self.dropout = dropout\n self.in_features = in_features\n self.out_features = out_features\n self.alpha = alpha\n self.concat = concat\n self.W = nn.Linear(in_features, out_features, bias=False)\n # self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))\n nn.init.xavier_uniform_(self.W.weight, gain=1.414)\n # self.a = nn.Parameter(torch.zeros(size=(2*out_features, 1)))\n self.a1 = nn.Parameter(torch.zeros(size=(out_features, 1)))\n self.a2 = nn.Parameter(torch.zeros(size=(out_features, 1)))\n nn.init.xavier_uniform_(self.a1.data, gain=1.414)\n nn.init.xavier_uniform_(self.a2.data, gain=1.414)\n self.leakyrelu = nn.LeakyReLU(self.alpha)\n\n def forward(self, input, adj):\n h = self.W(input)\n # [batch_size, N, out_features]\n batch_size, N, _ = h.size()\n middle_result1 = torch.matmul(h, self.a1).expand(-1, -1, N)\n middle_result2 = torch.matmul(h, self.a2).expand(-1, -1, N).transpose(1, 2)\n e = self.leakyrelu(middle_result1 + middle_result2)\n attention = e.masked_fill(adj == 0, -1e9)\n attention = F.softmax(attention, dim=2)\n attention = F.dropout(attention, self.dropout, training=self.training)\n h_prime = torch.matmul(attention, h)\n if self.concat:\n return F.elu(h_prime)\n else:\n return h_prime\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n\n\nclass GAT(nn.Module):\n def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads, layer):\n super(GAT, self).__init__()\n self.dropout = dropout\n self.layer = layer\n if self.layer == 1:\n self.attentions = [GraphAttentionLayer(nfeat, nclass, dropout=dropout, alpha=alpha, concat=True) for _ in\n range(nheads)]\n else:\n self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in\n range(nheads)]\n self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n def forward(self, vector, adj):\n x = vector.unsqueeze(0)\n x = F.dropout(x, self.dropout, training=self.training)\n if self.layer == 1:\n x = torch.stack([att(x, adj) for att in self.attentions], dim=2)\n x = x.sum(2)\n x = F.dropout(x, self.dropout, training=self.training)\n return F.log_softmax(x, dim=2)\n else:\n x = torch.cat([att(x, adj) for att in self.attentions], dim=2)\n x = F.dropout(x, self.dropout, training=self.training)\n x = F.elu(self.out_att(x, adj))\n return F.log_softmax(x, dim=2)\n",
"import sys\nimport torch\nfrom pathlib import Path\nfrom torch.utils.data import RandomSampler\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[0].parents[0].parents[0] # CogKGE root directory\nif str(ROOT) not in sys.path:\n sys.path.append(str(ROOT)) # add CogKGE root directory to PATH\nfrom cogkge import *\n\ndevice = init_cogkge(device_id=\"1\", seed=0)\n\nloader = EVENTKG240KLoader(dataset_path=\"../../dataset\", download=True)\ntrain_data, valid_data, test_data = loader.load_all_data()\nnode_lut, relation_lut, time_lut = loader.load_all_lut()\n\nprocessor = EVENTKG240KProcessor(node_lut, relation_lut, time_lut, reprocess=True,mode=\"normal\")\ntrain_dataset = processor.process(train_data)\nvalid_dataset = processor.process(valid_data)\ntest_dataset = processor.process(test_data)\nnode_lut, relation_lut, time_lut = processor.process_lut()\n\ntrain_sampler = RandomSampler(train_dataset)\nvalid_sampler = RandomSampler(valid_dataset)\ntest_sampler = RandomSampler(test_dataset)\n\nmodel = TransH(entity_dict_len=len(node_lut),\n relation_dict_len=len(relation_lut),\n embedding_dim=50,\n p_norm=1,\n penalty_weight=0.1)\n\nloss = MarginLoss(margin=1.0)\n\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0)\n\nmetric = Link_Prediction(node_lut=node_lut,\n relation_lut=relation_lut,\n link_prediction_raw=True,\n link_prediction_filt=False,\n batch_size=1000000,\n reverse=False)\n\nlr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, mode='min', patience=3, threshold_mode='abs', threshold=5,\n factor=0.5, min_lr=1e-9, verbose=True\n)\n\nnegative_sampler = UnifNegativeSampler(triples=train_dataset,\n entity_dict_len=len(node_lut),\n relation_dict_len=len(relation_lut),\n node_lut=node_lut)\n\ntrainer = Trainer(\n train_dataset=train_dataset,\n valid_dataset=valid_dataset,\n train_sampler=train_sampler,\n valid_sampler=valid_sampler,\n test_dataset=test_dataset,\n test_sampler=test_sampler,\n model=model,\n loss=loss,\n optimizer=optimizer,\n negative_sampler=negative_sampler,\n device=device,\n output_path=\"../../dataset\",\n lookuptable_E=node_lut,\n lookuptable_R=relation_lut,\n metric=metric,\n lr_scheduler=lr_scheduler,\n trainer_batch_size=100000,\n total_epoch=3000,\n apex=True,\n dataloaderX=True,\n num_workers=1,\n pin_memory=True,\n use_tensorboard_epoch=100,\n use_matplotlib_epoch=100,\n use_savemodel_epoch=100,\n use_metric_epoch=100\n)\ntrainer.train()\n\n\n",
"import torch\nfrom torch.utils.data import RandomSampler\nfrom pathlib import Path\nimport sys\n\nFILE = Path(__file__).resolve()\nROOT = FILE.parents[0].parents[0] # CogKGE root directory\nif str(ROOT) not in sys.path:\n sys.path.append(str(ROOT)) # add CogKGE root directory to PATH\n\n\nfrom cogkge import *\ndevice=init_cogkge(device_id=\"9\",seed=1)\n\nloader =FB15KLoader(dataset_path=\"../dataset\",download=True)\ntrain_data, valid_data, test_data = loader.load_all_data()\nnode_lut, relation_lut= loader.load_all_lut()\n\nprocessor = FB15KProcessor(node_lut, relation_lut,reprocess=True,mode=\"normal\")\ntrain_dataset = processor.process(train_data)\nvalid_dataset = processor.process(valid_data)\ntest_dataset = processor.process(test_data)\nnode_lut,relation_lut=processor.process_lut()\n\ntrain_sampler = RandomSampler(train_dataset)\nvalid_sampler = RandomSampler(valid_dataset)\ntest_sampler = RandomSampler(test_dataset)\n\nmodel = TransE(entity_dict_len=len(node_lut),\n relation_dict_len=len(relation_lut),\n embedding_dim=50,\n p_norm=1)\n\nloss = MarginLoss(margin=1.0)\n\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=0)\n\nmetric = Link_Prediction(node_lut=node_lut,\n relation_lut=relation_lut,\n link_prediction_raw=True,\n link_prediction_filt=False,\n batch_size=1000000,\n reverse=False)\n\nlr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, mode='min', patience=3, threshold_mode='abs', threshold=5,\n factor=0.5, min_lr=1e-9, verbose=True\n)\n\nnegative_sampler = UnifNegativeSampler(triples=train_dataset,\n entity_dict_len=len(node_lut),\n relation_dict_len=len(relation_lut),\n node_lut=node_lut)\n\ntrainer = Trainer(\n train_dataset=train_dataset,\n valid_dataset=valid_dataset,\n test_dataset=test_dataset,\n train_sampler=train_sampler,\n valid_sampler=valid_sampler,\n test_sampler=test_sampler,\n model=model,\n loss=loss,\n optimizer=optimizer,\n negative_sampler=negative_sampler,\n device=device,\n output_path=\"../dataset\",\n lookuptable_E=node_lut,\n lookuptable_R=relation_lut,\n metric=metric,\n trainer_batch_size=2000000,\n total_epoch=500,\n lr_scheduler=lr_scheduler,\n apex=True,\n dataloaderX=True,\n num_workers=4,\n pin_memory=True,\n use_tensorboard_epoch=100,\n use_matplotlib_epoch=100,\n use_savemodel_epoch=100,\n use_metric_epoch=100\n)\ntrainer.train()\n\n\n"
] | [
[
"torch.Tensor",
"numpy.set_printoptions",
"torch.nn.init.xavier_normal_",
"torch.rfft",
"torch.nonzero",
"torch.fft.rfft2",
"torch.stack",
"torch.complex"
],
[
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"torch.nn.functional.dropout",
"torch.zeros",
"torch.nn.Linear",
"torch.matmul",
"torch.nn.LeakyReLU",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.elu"
],
[
"torch.utils.data.RandomSampler",
"torch.optim.lr_scheduler.ReduceLROnPlateau"
],
[
"torch.utils.data.RandomSampler",
"torch.optim.lr_scheduler.ReduceLROnPlateau"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chenxiaoyu523/FEAT3D | [
"ba45ba7c26628a7cc0070b010f4f33893cdac926"
] | [
"train_matchnet.py"
] | [
"import argparse\nimport os.path as osp\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom feat.dataloader.samplers import CategoriesSampler\nfrom feat.models.matchnet import MatchNet \nfrom feat.utils import pprint, set_gpu, ensure_path, Averager, Timer, count_acc, euclidean_metric, compute_confidence_interval\nfrom tensorboardX import SummaryWriter\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--max_epoch', type=int, default=200)\n parser.add_argument('--way', type=int, default=5) \n parser.add_argument('--shot', type=int, default=1)\n parser.add_argument('--query', type=int, default=15)\n parser.add_argument('--lr', type=float, default=0.0001)\n parser.add_argument('--lr_mul', type=float, default=1) # lr is the basic learning rate, while lr * lr_mul is the lr for other parts\n parser.add_argument('--step_size', type=int, default=10)\n parser.add_argument('--gamma', type=float, default=0.2) \n parser.add_argument('--temperature', type=float, default=1)\n parser.add_argument('--use_bilstm', type=bool, default=False)\n parser.add_argument('--model_type', type=str, default='ConvNet', choices=['ConvNet', 'ResNet'])\n parser.add_argument('--dataset', type=str, default='MiniImageNet', choices=['MiniImageNet', 'CUB', 'TieredImageNet']) \n # MiniImageNet, ConvNet, './saves/initialization/miniimagenet/con-pre.pth'\n # MiniImageNet, ResNet, './saves/initialization/miniimagenet/res-pre.pth'\n # CUB, ConvNet, './saves/initialization/cub/con-pre.pth' \n parser.add_argument('--init_weights', type=str, default=None) \n parser.add_argument('--gpu', default='0')\n args = parser.parse_args()\n pprint(vars(args))\n\n set_gpu(args.gpu)\n save_path1 = '-'.join([args.dataset, args.model_type, 'MatchNet'])\n save_path2 = '_'.join([str(args.shot), str(args.query), str(args.way), \n str(args.step_size), str(args.gamma), str(args.lr), str(args.temperature)])\n if args.use_bilstm:\n save_path2 = save_path2 + '_' + str(args.lr_mul) + '_BiLSTM'\n args.save_path = osp.join(save_path1, save_path2) \n ensure_path(save_path1, remove=False)\n ensure_path(args.save_path) \n\n if args.dataset == 'MiniImageNet':\n # Handle MiniImageNet\n from feat.dataloader.mini_imagenet import MiniImageNet as Dataset\n elif args.dataset == 'CUB':\n from feat.dataloader.cub import CUB as Dataset\n elif args.dataset == 'TieredImageNet':\n from feat.dataloader.tiered_imagenet import tieredImageNet as Dataset \n else:\n raise ValueError('Non-supported Dataset.')\n \n trainset = Dataset('train', args)\n train_sampler = CategoriesSampler(trainset.label, 100, args.way, args.shot + args.query)\n train_loader = DataLoader(dataset=trainset, batch_sampler=train_sampler, num_workers=0, pin_memory=True)\n\n valset = Dataset('val', args)\n val_sampler = CategoriesSampler(valset.label, 500, args.way, args.shot + args.query)\n val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler, num_workers=0, pin_memory=True)\n \n model = MatchNet(args)\n if args.model_type == 'ConvNet':\n if args.use_bilstm:\n optimizer = torch.optim.Adam([{'params': model.encoder.parameters()},\n {'params': model.bilstm.parameters(), 'lr': args.lr * args.lr_mul}], lr=args.lr) \n else:\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n elif args.model_type == 'ResNet':\n if args.use_bilstm:\n optimizer = torch.optim.SGD([{'params': model.encoder.parameters()},\n {'params': model.bilstm.parameters(), 'lr': args.lr * args.lr_mul}], lr=args.lr, momentum=0.9, nesterov=True, weight_decay=0.0005) \n else: \n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, nesterov=True, weight_decay=0.0005) \n else:\n raise ValueError('No Such Encoder')\n \n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) \n \n # load pre-trained model (no FC weights)\n model_dict = model.state_dict()\n if args.init_weights is not None:\n pretrained_dict = torch.load(args.init_weights)['params']\n # remove weights for FC\n pretrained_dict = {'encoder.'+k: v for k, v in pretrained_dict.items()}\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n print(pretrained_dict.keys())\n model_dict.update(pretrained_dict) \n model.load_state_dict(model_dict) \n \n if torch.cuda.is_available():\n torch.backends.cudnn.benchmark = True\n model = model.cuda()\n \n def save_model(name):\n torch.save(dict(params=model.state_dict()), osp.join(args.save_path, name + '.pth'))\n \n trlog = {}\n trlog['args'] = vars(args)\n trlog['train_loss'] = []\n trlog['val_loss'] = []\n trlog['train_acc'] = []\n trlog['val_acc'] = []\n trlog['max_acc'] = 0.0\n trlog['max_acc_epoch'] = 0\n\n timer = Timer()\n global_count = 0\n writer = SummaryWriter(logdir=args.save_path)\n \n label = torch.arange(args.way).repeat(args.query)\n if torch.cuda.is_available():\n label = label.type(torch.cuda.LongTensor)\n else:\n label = label.type(torch.LongTensor)\n \n label_support = torch.arange(args.way).repeat(args.shot)\n label_support = label_support.type(torch.LongTensor)\n # transform to one-hot form\n label_support_onehot = torch.zeros(args.way * args.shot, args.way)\n label_support_onehot.scatter_(1, label_support.unsqueeze(1), 1) \n if torch.cuda.is_available():\n label_support_onehot = label_support_onehot.cuda() # KN x N\n \n for epoch in range(1, args.max_epoch + 1):\n lr_scheduler.step()\n model.train()\n tl = Averager()\n ta = Averager()\n \n for i, batch in enumerate(train_loader, 1):\n global_count = global_count + 1\n if torch.cuda.is_available():\n data, _ = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n p = args.shot * args.way\n data_shot, data_query = data[:p], data[p:]\n\n logits = model(data_shot, data_query) # KqN x KN x 1\n # use logits to weights all labels, KN x N\n prediction = torch.sum(torch.mul(logits, label_support_onehot.unsqueeze(0)), 1) # KqN x N\n # compute loss\n loss = F.cross_entropy(prediction, label)\n acc = count_acc(prediction, label)\n writer.add_scalar('data/loss', float(loss), global_count)\n writer.add_scalar('data/acc', float(acc), global_count)\n print('epoch {}, train {}/{}, loss={:.4f} acc={:.4f}'\n .format(epoch, i, len(train_loader), loss.item(), acc))\n\n tl.add(loss.item())\n ta.add(acc)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n tl = tl.item()\n ta = ta.item()\n\n model.eval()\n\n vl = Averager()\n va = Averager()\n\n label = torch.arange(args.way).repeat(args.query)\n if torch.cuda.is_available():\n label = label.type(torch.cuda.LongTensor)\n else:\n label = label.type(torch.LongTensor)\n \n print('best epoch {}, best val acc={:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc']))\n with torch.no_grad():\n for i, batch in enumerate(val_loader, 1):\n if torch.cuda.is_available():\n data, _ = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n p = args.shot * args.way\n data_shot, data_query = data[:p], data[p:]\n \n logits = model(data_shot, data_query) # KqN x KN x 1\n # use logits to weights all labels, KN x N\n prediction = torch.sum(torch.mul(logits, label_support_onehot.unsqueeze(0)), 1) # KqN x N\n # compute loss\n loss = F.cross_entropy(prediction, label)\n acc = count_acc(prediction, label)\n vl.add(loss.item())\n va.add(acc)\n\n vl = vl.item()\n va = va.item()\n writer.add_scalar('data/val_loss', float(vl), epoch)\n writer.add_scalar('data/val_acc', float(va), epoch) \n print('epoch {}, val, loss={:.4f} acc={:.4f}'.format(epoch, vl, va))\n\n if va > trlog['max_acc']:\n trlog['max_acc'] = va\n trlog['max_acc_epoch'] = epoch\n save_model('max_acc')\n\n trlog['train_loss'].append(tl)\n trlog['train_acc'].append(ta)\n trlog['val_loss'].append(vl)\n trlog['val_acc'].append(va)\n\n torch.save(trlog, osp.join(args.save_path, 'trlog'))\n\n save_model('epoch-last')\n\n print('ETA:{}/{}'.format(timer.measure(), timer.measure(epoch / args.max_epoch)))\n writer.close()\n\n # Test Phase\n trlog = torch.load(osp.join(args.save_path, 'trlog'))\n test_set = Dataset('test', args)\n sampler = CategoriesSampler(test_set.label, 10000, args.way, args.shot + args.query)\n loader = DataLoader(test_set, batch_sampler=sampler, num_workers=0, pin_memory=True)\n test_acc_record = np.zeros((10000,))\n\n model.load_state_dict(torch.load(osp.join(args.save_path, 'max_acc' + '.pth'))['params'])\n model.eval()\n\n ave_acc = Averager()\n \n with torch.no_grad():\n for i, batch in enumerate(loader, 1):\n if torch.cuda.is_available():\n data, _ = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n k = args.way * args.shot\n data_shot, data_query = data[:k], data[k:]\n logits = model(data_shot, data_query) # KqN x KN x 1\n # use logits to weights all labels, KN x N\n prediction = torch.sum(torch.mul(logits, label_support_onehot.unsqueeze(0)), 1) # KqN x N\n acc = count_acc(prediction, label)\n ave_acc.add(acc)\n test_acc_record[i-1] = acc\n print('batch {}: {:.2f}({:.2f})'.format(i, ave_acc.item() * 100, acc * 100))\n \n m, pm = compute_confidence_interval(test_acc_record)\n print('Val Best Acc {:.4f}, Test Acc {:.4f}'.format(trlog['max_acc'], ave_acc.item()))\n print('Test Acc {:.4f} + {:.4f}'.format(m, pm))\n"
] | [
[
"torch.zeros",
"torch.load",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.arange",
"numpy.zeros",
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AutodidactaMx/cocid_python | [
"11628f465ff362807a692c79ede26bf30dd8e26a",
"11628f465ff362807a692c79ede26bf30dd8e26a",
"11628f465ff362807a692c79ede26bf30dd8e26a"
] | [
"Modulo_3/Semana 4/matplotlib/practica4.py",
"Modulo_3/Semana 4/matplotlib/practica2.py",
"Modulo_5/practica_sabado/Operaciones.py"
] | [
"import tkinter as tk\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n\ndata = {\n 'Basquet': 11,\n 'Futbol': 222,\n 'Natacion': 121,\n 'Esqui': 321,\n 'Tenis': 44\n }\nclave = data.keys()\nvalor = data.values()\n\nventana= tk.Tk() \n \nfigura = plt.Figure(figsize=(6,5), dpi=100)\nlienzo_figura = FigureCanvasTkAgg(figura, ventana)\nlienzo_figura.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH)\n\nax1 = figura.add_subplot()\nax1.set_title('Alumnos')\nax1.plot(clave, valor)\nax1.set_ylabel('Cantidad alumnos')\nax1.set_xlabel('Materias')\n\ntoolbar =NavigationToolbar2Tk(lienzo_figura, ventana)\ntoolbar.update()\ntoolbar.pack(side=tk.BOTTOM, fill=tk.Y)\nventana.mainloop()",
"import tkinter as tk\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n\ndata = {\n '0-9': 5000,\n '10-19': 2000,\n '20-29': 30000,\n '30-39': 43490,\n '40-49': 39898\n }\nclave = data.keys()\nvalor = data.values()\n\nventana= tk.Tk() \n \nfigura = plt.Figure(figsize=(6,5), dpi=100)\nlienzo_figura = FigureCanvasTkAgg(figura, ventana)\nlienzo_figura.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH)\n\nax1 = figura.add_subplot()\nax1.set_title('Habitantes')\nax1.barh(list(clave), list(valor))\nax1.set_ylabel('Rango de edad')\nax1.set_xlabel('Cantidad')\n\ntoolbar =NavigationToolbar2Tk(lienzo_figura, ventana)\ntoolbar.update()\ntoolbar.pack(side=tk.BOTTOM, fill=tk.Y)\nventana.mainloop()",
"import numpy as np\na = np.array([0,10, 10, 30, 110, 20], dtype=np.int32) \nprint(\"Suma\")\nprint(a * 2)\nprint(\"division\")\nprint(a / 2) \n\nprint(\"Condicionales\") \nprint(a[(a > 5) & (a < 100)])\n\nprint(\"Acumulamiento\")\nprint(a.cumsum())\n\na = np.array([[1,10, 10, 30, 110, 20],[1,10, 10, 30, 110, 20]], dtype=np.int32) \n\nprint(\"Explicacion\")\nprint(np.where([True, False, True], [1,2,3], [4, 5, 6]))\n\nprint(\"Where\")\nprint(np.where((a > 5) & (a < 11), a, a+1000))\n\n\n(unique, counts) = np.unique(a, return_counts=True)\nprint(f'Únicos: {unique}')\nprint(f'Frecuencia: {counts}')"
] | [
[
"matplotlib.backends.backend_tkagg.NavigationToolbar2Tk",
"matplotlib.pyplot.Figure",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
],
[
"matplotlib.backends.backend_tkagg.NavigationToolbar2Tk",
"matplotlib.pyplot.Figure",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
],
[
"numpy.array",
"numpy.where",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.