repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
fepegar/resseg-ijcars | [
"963e5548fb02c777038ef550c969149377071cfc"
] | [
"datasets.py"
] | [
"import hashlib\nfrom pathlib import Path\n\nimport torch\nimport pandas as pd\nimport torchio as tio\nfrom tqdm import tqdm\nfrom resector import RandomResection\nfrom sklearn.model_selection import KFold\n\nfrom utils import sglob, get_stem\n\n\nclass DataModule:\n def __init__(\n self,\n datasets_dir,\n train_batch_size,\n num_workers,\n ):\n self.train_batch_size = train_batch_size\n self.num_workers = num_workers\n self.datasets_dir = Path(datasets_dir).expanduser()\n\n def get_train_loader(self, dataset):\n return torch.utils.data.DataLoader(\n dataset,\n batch_size=self.train_batch_size,\n num_workers=self.num_workers,\n pin_memory=True,\n shuffle=True,\n )\n\n def get_val_loader(self, dataset):\n return torch.utils.data.DataLoader(\n dataset,\n batch_size=self.train_batch_size,\n num_workers=self.num_workers,\n pin_memory=True,\n shuffle=False,\n )\n\n def get_train_transform(self, resect=True):\n return get_train_transform(self.landmarks_path, resection_params=self.resection_params)\n\n def print_lengths(self, test=True):\n f = print if self.log is None else self.log.info\n f(f'{len(self.train_dataset):4} training instances')\n f(f'{len(self.train_loader):4} training batches')\n f(f'{len(self.val_dataset):4} validation instances')\n f(f'{len(self.val_loader):4} validation batches')\n if not test:\n return\n f(f'{len(self.test_dataset):4} test instances')\n f(f'{len(self.test_loader):4} test batches')\n\n def get_public_subjects(self):\n public_dataset_names = (\n 'IXI',\n 'ADNI1_15T',\n 'ADNI1_3T',\n 'ADNI2',\n 'OASIS_download',\n )\n all_subjects = []\n for name in public_dataset_names:\n subjects = get_subjects_list_from_dir(self.datasets_dir / name)\n all_subjects.extend(subjects)\n return all_subjects\n\n\nclass DataModulePublic(DataModule):\n def __init__(\n self,\n datasets_dir,\n real_dataset_dir,\n resection_params,\n train_batch_size,\n num_workers,\n pseudo_dir=None,\n split_ratio=0.9,\n split_seed=42,\n debug_ratio=0.02,\n log=None,\n debug=False,\n augment=True,\n verbose=False,\n cache_validation_set=True,\n histogram_standardization=True,\n ):\n super().__init__(datasets_dir, train_batch_size, num_workers)\n self.resection_params = resection_params\n\n # Precomputed from 90% of the public training data\n if histogram_standardization:\n self.landmarks_path = Path(__file__).parent / 'landmarks' / 'histogram_landmarks_default.npy'\n else:\n self.landmarks_path = None\n\n public_subjects = self.get_public_subjects()\n train_public, val_public = self.split_subjects(public_subjects, split_ratio, split_seed)\n\n train_transform = self.get_train_transform() if augment else self.get_val_transform()\n self.train_dataset = tio.SubjectsDataset(train_public, transform=train_transform)\n self.val_dataset = tio.SubjectsDataset(val_public, transform=train_transform)\n if cache_validation_set:\n self.val_dataset = cache(self.val_dataset, resection_params, augment=augment)\n test_transform = get_test_transform(self.landmarks_path)\n self.test_dataset = get_real_resection_dataset(real_dataset_dir, transform=test_transform)\n if debug:\n self.train_dataset = reduce_dataset(self.train_dataset, debug_ratio)\n self.val_dataset = reduce_dataset(self.val_dataset, debug_ratio)\n self.test_dataset = reduce_dataset(self.test_dataset, debug_ratio)\n\n self.train_loader = self.get_train_loader(self.train_dataset)\n self.val_loader = self.get_val_loader(self.val_dataset)\n self.test_loader = self.get_val_loader(self.test_dataset)\n\n self.log = log\n\n if verbose:\n self.print_lengths()\n\n @staticmethod\n def split_subjects(subjects, ratio, seed):\n len_subjects = len(subjects)\n len_training = int(len_subjects * ratio)\n len_validation = len_subjects - len_training\n lengths = len_training, len_validation\n with torch.random.fork_rng([]):\n torch.manual_seed(seed)\n train, val = torch.utils.data.random_split(subjects, lengths)\n return train, val\n\n def get_val_transform(self):\n return tio.Compose((get_simulation_transform(self.resection_params), get_test_transform(self.landmarks_path)))\n\n\nclass DataModuleCV(DataModule):\n def __init__(\n self,\n fold,\n num_folds,\n datasets_dir,\n dataset_name,\n train_batch_size,\n num_workers,\n use_public_landmarks=False,\n pseudo_dirname=None,\n split_seed=42,\n log=None,\n verbose=True,\n ):\n super().__init__(datasets_dir, train_batch_size, num_workers)\n self.resection_params = None\n real_dataset_dir = self.datasets_dir / 'real' / dataset_name\n real_subjects = get_real_resection_subjects(real_dataset_dir)\n train_subjects, val_subjects = self.split_subjects(real_subjects, fold, num_folds, split_seed)\n self.train_dataset = tio.SubjectsDataset(train_subjects)\n if use_public_landmarks:\n self.landmarks_path = get_landmarks_path()\n else:\n self.landmarks_path = get_landmarks_path(dataset=self.train_dataset)\n train_transform = self.get_train_transform(resect=False)\n self.train_dataset.set_transform(train_transform)\n test_transform = get_test_transform(self.landmarks_path)\n self.val_dataset = tio.SubjectsDataset(val_subjects, transform=test_transform)\n\n if pseudo_dirname is not None:\n pseudo_dir = self.datasets_dir / 'real' / pseudo_dirname\n pseudo_dataset = get_real_resection_dataset(pseudo_dir, transform=train_transform)\n self.train_dataset = torch.utils.data.ConcatDataset((self.train_dataset, pseudo_dataset))\n\n self.train_loader = self.get_train_loader(self.train_dataset)\n self.val_loader = self.test_loader = self.get_val_loader(self.val_dataset)\n\n self.log = log\n if verbose:\n self.print_lengths(test=False)\n\n @staticmethod\n def split_subjects(real_subjects, fold, num_folds, split_seed):\n kf = KFold(n_splits=num_folds, shuffle=True, random_state=split_seed)\n folds = list(kf.split(real_subjects))\n train_indices, val_indices = folds[fold]\n train_subjects = [real_subjects[i] for i in train_indices]\n val_subjects = [real_subjects[i] for i in val_indices]\n return train_subjects, val_subjects\n\n\ndef get_train_transform(landmarks_path, resection_params=None):\n spatial_transform = tio.Compose((\n tio.OneOf({\n tio.RandomAffine(): 0.9,\n tio.RandomElasticDeformation(): 0.1,\n }),\n tio.RandomFlip(),\n ))\n resolution_transform = tio.OneOf((\n tio.RandomAnisotropy(),\n tio.RandomBlur(),\n ),\n p=0.75,\n )\n transforms = []\n if resection_params is not None:\n transforms.append(get_simulation_transform(resection_params))\n if landmarks_path is not None:\n transforms.append(tio.HistogramStandardization({'image': landmarks_path}))\n transforms.extend([\n # tio.RandomGamma(p=0.2),\n resolution_transform,\n tio.RandomGhosting(p=0.2),\n tio.RandomSpike(p=0.2),\n tio.RandomMotion(p=0.2),\n tio.RandomBiasField(p=0.5),\n tio.ZNormalization(masking_method=tio.ZNormalization.mean),\n tio.RandomNoise(p=0.75), # always after ZNorm and after blur!\n spatial_transform,\n get_tight_crop(),\n ])\n return tio.Compose(transforms)\n\n\ndef get_subjects_list_from_dir(dataset_dir):\n dataset_dir = Path(dataset_dir)\n mni_dir = dataset_dir / 'mni'\n resection_dir = dataset_dir / 'resection'\n noise_paths = sglob(resection_dir, '*noise*')\n subjects_list = []\n for noise_path in noise_paths:\n stem = noise_path.stem.split('_noise')[0]\n image_path = mni_dir / f'{stem}_on_mni.nii.gz'\n gml_path = resection_dir / f'{stem}_gray_matter_left_seg.nii.gz'\n gmr_path = resection_dir / f'{stem}_gray_matter_right_seg.nii.gz'\n rl_path = resection_dir / f'{stem}_resectable_left_seg.nii.gz'\n rr_path = resection_dir / f'{stem}_resectable_right_seg.nii.gz'\n subject = tio.Subject(\n image=tio.ScalarImage(image_path),\n resection_noise=tio.ScalarImage(noise_path),\n resection_gray_matter_left=tio.LabelMap(gml_path),\n resection_gray_matter_right=tio.LabelMap(gmr_path),\n resection_resectable_left=tio.LabelMap(rl_path),\n resection_resectable_right=tio.LabelMap(rr_path),\n )\n subjects_list.append(subject)\n return subjects_list\n\n\ndef get_landmarks_path(dataset=None):\n landmarks_dir = Path(__file__).parent / 'landmarks'\n landmarks_dir.mkdir(exist_ok=True)\n if dataset is None: # get precomputed landmarks from public data\n landmarks_path = landmarks_dir / 'histogram_landmarks_default.npy'\n else:\n filename = f'histogram_landmarks_{get_stems_hash(dataset)}.npy'\n landmarks_path = landmarks_dir / filename\n if not landmarks_path.is_file():\n from torchio.transforms import train_histogram\n images_paths = [subject.image.path for subject in dataset.subjects]\n print('Training histogram landmarks:', landmarks_path)\n train_histogram(images_paths, output_path=landmarks_path)\n return landmarks_path\n\n\ndef get_stems_hash(dataset):\n # https://stackoverflow.com/a/27522708/3956024\n stems_string = ','.join(get_stem(subject.image.path) for subject in dataset.subjects)\n return hashlib.md5(stems_string.encode()).hexdigest()\n\n\ndef get_tight_crop():\n # Crop from (193, 229, 193) to (176, 216, 160)\n crop = tio.Crop((9, 8, 7, 6, 17, 16))\n return crop\n\n\ndef get_real_resection_subjects(dataset_dir):\n dataset_dir = Path(dataset_dir)\n image_dir = dataset_dir / 'image'\n label_dir = dataset_dir / 'label'\n image_paths = sglob(image_dir)\n label_paths = sglob(label_dir)\n assert len(image_paths) == len(label_paths)\n subjects = []\n for image_path, label_path in zip(image_paths, label_paths):\n subject = tio.Subject(\n image=tio.ScalarImage(image_path),\n label=tio.LabelMap(label_path),\n )\n subjects.append(subject)\n return subjects\n\n\ndef get_real_resection_dataset(dataset_dir, transform=None):\n subjects = get_real_resection_subjects(dataset_dir)\n return tio.SubjectsDataset(subjects, transform=transform)\n\n\ndef reduce_dataset(dataset, ratio):\n n = int(len(dataset) * ratio)\n return torch.utils.data.Subset(dataset, list(range(n)))\n\n\ndef cache(dataset, resection_params, augment=True, caches_dir='/tmp/val_set_cache', num_workers=12):\n caches_dir = Path(caches_dir)\n wm_lesion_p = resection_params['wm_lesion_p']\n clot_p = resection_params['clot_p']\n shape = resection_params['shape']\n texture = resection_params['texture']\n augment_string = '_no_augmentation' if not augment else ''\n dir_name = f'wm_{wm_lesion_p}_clot_{clot_p}_{shape}_{texture}{augment_string}'\n cache_dir = caches_dir / dir_name\n image_dir = cache_dir / 'image'\n label_dir = cache_dir / 'label'\n if not cache_dir.is_dir():\n print('Caching validation set')\n image_dir.mkdir(parents=True)\n label_dir.mkdir(parents=True)\n loader = torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n collate_fn=lambda x: x[0],\n )\n for subject in tqdm(loader):\n image_path = image_dir / subject.image.path.name\n label_path = label_dir / subject.image.path.name # label has no path because it was created not loaded\n subject.image.save(image_path)\n subject.label.save(label_path)\n\n subjects = []\n for im_path, label_path in zip(sglob(image_dir), sglob(label_dir)):\n subject = tio.Subject(\n image=tio.ScalarImage(im_path),\n label=tio.LabelMap(label_path),\n )\n subjects.append(subject)\n return tio.SubjectsDataset(subjects)\n\n\ndef get_test_transform(landmarks_path):\n transforms = []\n if landmarks_path is not None:\n transforms.append(tio.HistogramStandardization({'image': landmarks_path}))\n transforms.extend([\n tio.ZNormalization(masking_method=tio.ZNormalization.mean),\n get_tight_crop(),\n ])\n return tio.Compose(transforms)\n\n\ndef get_simulation_transform(resection_params):\n transform = RandomResection(\n volumes_range=(844, 83757), # percentiles 1 and 99 of volumes in labeled EPISURG\n wm_lesion_p=resection_params['wm_lesion_p'],\n clot_p=resection_params['clot_p'],\n shape=resection_params['shape'],\n texture=resection_params['texture'],\n )\n return transform\n\n\ndef get_pseudo_loader(\n threshold,\n percentile,\n metric,\n summary_path,\n dataset_name,\n num_workers,\n batch_size=2,\n remove_zero_volume=False,\n ):\n subjects = []\n subject_ids = get_certain_subjects(\n threshold,\n percentile,\n metric,\n summary_path,\n remove_zero_volume=remove_zero_volume,\n )\n dataset_dir = Path('/home/fernando/datasets/real/') / dataset_name\n assert dataset_dir.is_dir()\n image_dir = dataset_dir / 'image'\n label_dir = dataset_dir / 'label'\n for subject_id in subject_ids:\n image_path = list(image_dir.glob(f'{subject_id}_*'))[0]\n label_path = list(label_dir.glob(f'{subject_id}_*'))[0]\n subject = tio.Subject(\n image=tio.ScalarImage(image_path),\n label=tio.LabelMap(label_path),\n )\n subjects.append(subject)\n transform = get_train_transform(get_landmarks_path())\n dataset = tio.SubjectsDataset(subjects, transform=transform)\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n pin_memory=True,\n shuffle=True,\n num_workers=num_workers,\n )\n return loader\n\n\ndef get_certain_subjects(\n threshold,\n percentile,\n metric,\n summary_path,\n remove_zero_volume=False,\n ):\n df = pd.read_csv(summary_path, index_col=0, dtype={'Subject': str})\n if remove_zero_volume:\n df = df[df.Volume > 0]\n column = df[metric]\n assert not (threshold is None and percentile is None)\n assert not (threshold is not None and percentile is not None)\n if percentile is not None:\n df = df[column < column.quantile(percentile / 100)]\n elif threshold is not None:\n df = df[column < threshold]\n return df.Subject.values\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.utils.data.ConcatDataset",
"pandas.read_csv",
"torch.manual_seed",
"torch.random.fork_rng",
"torch.utils.data.random_split",
"sklearn.model_selection.KFold"
]
] |
rxjx/autogluon | [
"648c19b8b76a6d663a2a8b42b9f3463e60c63e2c"
] | [
"text/src/autogluon/text/text_prediction/mx/models.py"
] | [
"import numpy as np\nimport scipy.special\nimport os\nimport math\nimport logging\nimport pandas as pd\nimport warnings\nimport time\nimport json\nimport pickle\nimport functools\nimport tqdm\nfrom typing import Tuple\n\nfrom autogluon.core.scheduler.scheduler_factory import scheduler_factory\nfrom autogluon.core.utils import set_logger_verbosity\nfrom sklearn.preprocessing import LabelEncoder\nimport mxnet as mx\nfrom mxnet.util import use_np\nfrom mxnet.lr_scheduler import PolyScheduler, CosineScheduler\nfrom mxnet.gluon.data import DataLoader\nfrom autogluon_contrib_nlp.models import get_backbone\nfrom autogluon_contrib_nlp.lr_scheduler import InverseSquareRootScheduler\nfrom autogluon_contrib_nlp.utils.config import CfgNode\nfrom autogluon_contrib_nlp.utils.misc import grouper, \\\n count_parameters, repeat, get_mxnet_available_ctx\nfrom autogluon_contrib_nlp.utils.parameter import move_to_ctx, clip_grad_global_norm\n\nfrom autogluon.core import args, space\nfrom autogluon.core.utils import in_ipynb, verbosity2loglevel\nfrom autogluon.core.utils.utils import get_cpu_count, get_gpu_count\nfrom autogluon.core.utils.loaders import load_pkl, load_pd\nfrom autogluon.core.task.base import compile_scheduler_options_v2\nfrom autogluon.core.task.base.base_task import schedulers\nfrom autogluon.core.metrics import get_metric, Scorer\nfrom autogluon.core.utils.multiprocessing_utils import force_forkserver\nfrom autogluon.core.dataset import TabularDataset\nfrom autogluon.core.decorator import sample_config\nfrom autogluon.core.constants import BINARY, MULTICLASS, REGRESSION\nfrom autogluon.core.scheduler.reporter import FakeReporter\n\nfrom .modules import MultiModalWithPretrainedTextNN\nfrom .preprocessing import MultiModalTextFeatureProcessor, base_preprocess_cfg,\\\n MultiModalTextBatchify, get_stats_string, auto_shrink_max_length, get_cls_sep_id\nfrom .utils import average_checkpoints, set_seed\nfrom .. import constants as _C\nfrom ..utils import logging_config\nfrom ..presets import ag_text_presets\nfrom ... import version\n\nlogger = logging.getLogger(__name__) # return logger\n\n\n@use_np\ndef get_optimizer(cfg, updates_per_epoch):\n \"\"\"\n\n Parameters\n ----------\n cfg\n Configuration\n updates_per_epoch\n The number of updates per training epoch\n\n Returns\n -------\n optimizer\n The optimizer\n optimizer_params\n Optimization parameters\n max_update\n Maximum update\n \"\"\"\n max_update = max(int(np.ceil(updates_per_epoch * cfg.num_train_epochs)), 3)\n warmup_steps = int(np.ceil(updates_per_epoch * cfg.num_train_epochs * cfg.warmup_portion))\n if cfg.lr_scheduler == 'triangular':\n lr_scheduler = PolyScheduler(max_update=max_update,\n base_lr=cfg.lr,\n warmup_begin_lr=cfg.begin_lr,\n pwr=1,\n final_lr=cfg.final_lr,\n warmup_steps=warmup_steps,\n warmup_mode='linear')\n elif cfg.lr_scheduler == 'inv_sqrt':\n lr_scheduler = InverseSquareRootScheduler(warmup_steps=warmup_steps,\n base_lr=cfg.lr,\n warmup_init_lr=cfg.begin_lr)\n elif cfg.lr_scheduler == 'constant':\n lr_scheduler = None\n elif cfg.lr_scheduler == 'cosine':\n lr_scheduler = CosineScheduler(max_update=max_update,\n base_lr=cfg.lr,\n final_lr=cfg.final_lr,\n warmup_steps=warmup_steps,\n warmup_begin_lr=cfg.begin_lr)\n else:\n raise ValueError('Unsupported lr_scheduler=\"{}\"'\n .format(cfg.lr_scheduler))\n optimizer_params = {'learning_rate': cfg.lr,\n 'wd': cfg.wd,\n 'lr_scheduler': lr_scheduler}\n optimizer = cfg.optimizer\n additional_params = {key: value for key, value in cfg.optimizer_params}\n optimizer_params.update(additional_params)\n return optimizer, optimizer_params, max_update\n\n\n@use_np\ndef apply_layerwise_decay(model, layerwise_decay, backbone_name, not_included=None):\n \"\"\"Apply the layer-wise gradient decay\n\n .. math::\n lr = lr * layerwise_decay^(max_depth - layer_depth)\n\n Parameters:\n ----------\n model\n The backbone model\n layerwise_decay: int\n layer-wise decay power\n not_included: list of str\n A list or parameter names that not included in the layer-wise decay\n \"\"\"\n if not_included is None:\n not_included = []\n # consider the task specific fine-tuning layer as the last layer, following with pooler\n # In addition, the embedding parameters have the smaller learning rate based on this setting.\n if 'albert' in backbone_name:\n # Skip if it is the ALBERT model.\n return\n if 'electra' in backbone_name:\n # For ELECTRA, it's called all_encoder_layers\n all_layers = model.encoder.all_encoder_layers\n else:\n # For other models, it's called all_layers\n all_layers = model.encoder.all_layers\n max_depth = len(all_layers) + 2\n for key, value in model.collect_params().items():\n if 'scores' in key:\n value.lr_mult = layerwise_decay ** 0\n if 'pool' in key:\n value.lr_mult = layerwise_decay ** 1\n if 'embed' in key:\n value.lr_mult = layerwise_decay ** max_depth\n\n for (layer_depth, layer) in enumerate(all_layers):\n layer_params = layer.collect_params()\n for key, value in layer_params.items():\n for pn in not_included:\n if pn in key:\n continue\n value.lr_mult = layerwise_decay ** (max_depth - (layer_depth + 1))\n\n\n@use_np\ndef freeze_layers(model, backbone_name, num_trainable_layers):\n if 'albert' in backbone_name:\n # Skip if it is the ALBERT model.\n return\n if 'electra' in backbone_name:\n # For ELECTRA, it's called all_encoder_layers\n all_layers = model.encoder.all_encoder_layers\n else:\n # For other models, it's called all_layers\n all_layers = model.encoder.all_layers\n if num_trainable_layers < 0:\n return\n assert num_trainable_layers <= len(all_layers)\n for i in range(len(all_layers) - num_trainable_layers):\n for p in all_layers[i].collect_params().values():\n p.grad_req = 'null'\n return\n\n\ndef base_optimization_config():\n \"\"\"The basic optimization phase\"\"\"\n cfg = CfgNode()\n cfg.lr_scheduler = 'triangular'\n cfg.optimizer = 'adamw'\n cfg.early_stopping_patience = 20 # Stop if we cannot find a better checkpoint\n cfg.optimizer_params = [('beta1', 0.9),\n ('beta2', 0.999),\n ('epsilon', 1e-6),\n ('correct_bias', False)]\n cfg.begin_lr = 0.0\n cfg.batch_size = 128\n cfg.nbest = 1 # Keep the top K performed models\n cfg.per_device_batch_size = 16 # Per-device batch-size\n cfg.auto_per_device_batch_size = True # Whether to automatically determine the runnable\n # per-device batch_size.\n cfg.val_batch_size_mult = 2 # By default, we 2X the batch size for validation\n cfg.lr = 1E-4\n cfg.final_lr = 0.0\n cfg.num_train_epochs = 10\n cfg.warmup_portion = 0.1\n cfg.layerwise_lr_decay = 0.8 # The layer_wise decay\n cfg.wd = 0.01 # Weight Decay\n cfg.max_grad_norm = 1.0 # Maximum Gradient Norm\n # The validation frequency = validation frequency * num_updates_in_an_epoch\n cfg.valid_frequency = 0.2\n # Logging frequency = log frequency * num_updates_in_an_epoch\n cfg.log_frequency = 0.05\n return cfg\n\n\ndef base_model_config():\n cfg = CfgNode()\n cfg.backbone = CfgNode()\n cfg.backbone.name = 'google_electra_base'\n cfg.network = MultiModalWithPretrainedTextNN.get_cfg()\n cfg.num_trainable_layers = -1 # Use a negative number to indicate that all layers are trainable.\n cfg.insert_sep = True # Whether to insert sep tokens between columns\n cfg.train_stochastic_chunk = False # Whether to sample a stochastic chunk from the training text\n cfg.test_stochastic_chunk = False # Whether to use stochastic chunk in testing\n cfg.use_avg_nbest = False # Whether to average the top performed models and use that as the final model.\n # This will usually give us better performance.\n cfg._disable_update = False # This is a hack for trying to disable the update. Should not be used usually\n cfg.inference_num_repeat = 1 # Whether to turn on randomness and repeat the inference for multiple times.\n return cfg\n\n\ndef base_misc_config():\n cfg = CfgNode()\n cfg.seed = 123\n cfg.exp_dir = './autonlp'\n return cfg\n\n\ndef base_cfg():\n cfg = CfgNode()\n cfg.version = 1\n cfg.optimization = base_optimization_config()\n cfg.preprocessing = base_preprocess_cfg()\n cfg.model = base_model_config()\n cfg.misc = base_misc_config()\n cfg.freeze()\n return cfg\n\n\n@use_np\ndef _classification_regression_predict(net, dataloader, problem_type, label_scaler,\n has_label=True, extract_embedding=False,\n num_repeat=1):\n \"\"\"\n\n Parameters\n ----------\n net\n The network\n dataloader\n The dataloader\n problem_type\n Types of the labels\n label_scaler\n Label scaler. We will reverse the centering process for regression problem\n has_label\n Whether label is used\n extract_embedding\n Whether to extract the embedding\n num_repeat\n The number of repeats to get the prediction.\n If it is larger than 1, we will average the predictions.\n If it is a regression problem, we will directly average the outputs.\n If it is a classification problem, we will average the logits\n\n Returns\n -------\n predictions\n The predictions\n \"\"\"\n import warnings\n # Filter mxnet warnings\n warnings.filterwarnings('ignore', module='mxnet')\n\n predictions = [[] for _ in range(num_repeat)]\n use_logits = num_repeat > 1 and (problem_type == MULTICLASS or problem_type == BINARY)\\\n and not extract_embedding\n if use_logits:\n logits = [[] for _ in range(num_repeat)]\n ctx_l = net.collect_params().list_ctx()\n for i in range(num_repeat):\n for sample_l in grouper(dataloader, len(ctx_l)):\n iter_pred_l = []\n if use_logits:\n iter_logits_l = []\n for sample, ctx in zip(sample_l, ctx_l):\n if sample is None:\n continue\n if has_label:\n batch_feature, batch_label = sample\n else:\n batch_feature = sample\n batch_feature = move_to_ctx(batch_feature, ctx)\n if extract_embedding:\n _, embeddings = net(batch_feature)\n iter_pred_l.append(embeddings)\n else:\n pred = net(batch_feature)\n if problem_type == MULTICLASS or problem_type == BINARY:\n if num_repeat > 1:\n iter_logits_l.append(pred)\n pred = mx.npx.softmax(pred, axis=-1)\n iter_pred_l.append(pred)\n for pred in iter_pred_l:\n predictions[i].append(pred.asnumpy())\n if use_logits:\n for ele in iter_logits_l:\n logits[i].append(ele.asnumpy())\n predictions[i] = np.concatenate(predictions[i], axis=0)\n if problem_type == REGRESSION and not extract_embedding:\n predictions[i] = label_scaler.inverse_transform(predictions[i])[:, 0]\n if use_logits:\n logits[i] = np.concatenate(logits[i], axis=0)\n if num_repeat == 1:\n return predictions[0]\n else:\n if use_logits:\n logits = np.stack(logits, axis=0).mean(axis=0)\n return scipy.special.softmax(logits, axis=-1)\n else:\n return np.stack(predictions, axis=0).mean(axis=0)\n\n\ndef calculate_metric(scorer, ground_truth, predictions, problem_type):\n if problem_type == BINARY and scorer.name == 'roc_auc':\n # For ROC_AUC, we need to feed in the probability of positive class to the scorer.\n return scorer._sign * scorer(ground_truth, predictions[:, 1])\n else:\n return scorer._sign * scorer(ground_truth, predictions)\n\n\n@use_np\ndef train_function(args, reporter, train_df_path, tuning_df_path,\n time_limit, time_start, base_config,\n problem_type, column_types,\n feature_columns, label_column,\n log_metrics, eval_metric, ngpus_per_trial,\n console_log, seed=None, verbosity=2):\n \"\"\"\n\n Parameters\n ----------\n args\n The arguments\n reporter\n Reporter of the HPO scheduler.\n If it is set to None, we won't use the reporter and will just run a single trial.\n train_df_path\n Path of the training dataframe\n tuning_df_path\n Path of the tuning dataframe\n time_limit\n The time limit of calling this function\n time_start\n The starting timestamp of the experiment\n base_config\n Basic configuration\n problem_type\n Type of the problem.\n column_types\n Type of columns\n feature_columns\n The feature columns\n label_column\n Label column\n log_metrics\n Metrics for logging\n eval_metric\n The stopping metric\n ngpus_per_trial\n The number of GPUs to use per each trial\n console_log\n Whether to log it to console\n seed\n The random seed\n verbosity\n The verbosity\n\n \"\"\"\n import warnings\n warnings.filterwarnings('ignore', module='mxnet')\n warnings.filterwarnings('ignore', module='sklearn')\n set_seed(seed)\n is_fake_reporter = isinstance(reporter, FakeReporter)\n if time_limit is not None:\n start_train_tick = time.time()\n time_left = time_limit - (start_train_tick - time_start)\n if time_left <= 0:\n if not is_fake_reporter:\n reporter.terminate()\n return\n if is_fake_reporter:\n search_space = args.rand\n task_id = 0\n else:\n search_space = args['search_space']\n task_id = args.task_id\n # Get the log metric scorers\n if isinstance(log_metrics, str):\n log_metrics = [log_metrics]\n # Load the training and tuning data from the parquet file\n train_data = pd.read_pickle(train_df_path)\n tuning_data = pd.read_pickle(tuning_df_path)\n log_metric_scorers = [get_metric(ele) for ele in log_metrics]\n eval_metric_scorer = get_metric(eval_metric)\n greater_is_better = eval_metric_scorer.greater_is_better\n cfg = base_config.clone()\n specified_values = []\n for key in search_space.keys():\n specified_values.append(key)\n specified_values.append(search_space[key])\n cfg.merge_from_list(specified_values)\n exp_dir = cfg.misc.exp_dir\n exp_dir = os.path.join(exp_dir, 'task{}'.format(task_id))\n os.makedirs(exp_dir, exist_ok=True)\n cfg.defrost()\n cfg.misc.exp_dir = exp_dir\n cfg.freeze()\n logger = logging.getLogger()\n set_logger_verbosity(verbosity, logger)\n logging_config(folder=exp_dir, name='training', logger=logger, console=console_log,\n level=logging.DEBUG,\n console_level=verbosity2loglevel(verbosity))\n logger.log(10, cfg)\n\n # Load backbone model\n backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \\\n = get_backbone(cfg.model.backbone.name)\n if 'roberta' in cfg.model.backbone.name:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)\n else:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg)\n # Build Preprocessor + Preprocess the training dataset + Inference problem type\n # TODO Dynamically cache the preprocessor that has been fitted.\n if problem_type == MULTICLASS or problem_type == BINARY:\n label_generator = LabelEncoder()\n label_generator.fit(pd.concat([train_data[label_column], tuning_data[label_column]]))\n else:\n label_generator = None\n preprocessor = MultiModalTextFeatureProcessor(column_types=column_types,\n label_column=label_column,\n tokenizer_name=cfg.model.backbone.name,\n label_generator=label_generator,\n cfg=cfg.preprocessing)\n logger.info('Fitting and transforming the train data...')\n train_dataset = preprocessor.fit_transform(train_data[feature_columns],\n train_data[label_column])\n with open(os.path.join(exp_dir, 'preprocessor.pkl'), 'wb') as of:\n pickle.dump(preprocessor, of)\n logger.info(f'Done! Preprocessor saved to {os.path.join(exp_dir, \"preprocessor.pkl\")}')\n logger.log(10, 'Train Data')\n logger.log(10, get_stats_string(preprocessor, train_dataset, is_train=True))\n logger.info('Process dev set...')\n tuning_dataset = preprocessor.transform(tuning_data[feature_columns],\n tuning_data[label_column])\n logger.info('Done!')\n # Auto Max Length\n if cfg.preprocessing.text.auto_max_length:\n max_length = auto_shrink_max_length(\n train_dataset,\n insert_sep=cfg.model.insert_sep,\n num_text_features=len(preprocessor.text_feature_names),\n auto_max_length_quantile=cfg.preprocessing.text.auto_max_length_quantile,\n round_to=cfg.preprocessing.text.auto_max_length_round_to,\n max_length=cfg.preprocessing.text.max_length)\n else:\n max_length = cfg.preprocessing.text.max_length\n train_stochastic_chunk = cfg.model.train_stochastic_chunk\n test_stochastic_chunk = cfg.model.test_stochastic_chunk\n inference_num_repeat = cfg.model.inference_num_repeat\n if max_length < cfg.preprocessing.text.max_length:\n inference_num_repeat = 1\n cfg.defrost()\n cfg.preprocessing.text.max_length = max_length\n cfg.model.inference_num_repeat = inference_num_repeat\n cfg.freeze()\n with open(os.path.join(exp_dir, 'cfg.yml'), 'w') as f:\n f.write(str(cfg))\n logger.info(f'Max length for chunking text: {max_length}, '\n f'Stochastic chunk: Train-{train_stochastic_chunk}/Test-{test_stochastic_chunk}, '\n f'Test #repeat: {inference_num_repeat}.')\n cls_id, sep_id = get_cls_sep_id(tokenizer)\n train_batchify_fn = MultiModalTextBatchify(\n num_text_inputs=len(preprocessor.text_feature_names),\n num_categorical_inputs=len(preprocessor.categorical_feature_names),\n num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,\n cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,\n mode='train', stochastic_chunk=train_stochastic_chunk,\n insert_sep=cfg.model.insert_sep)\n test_batchify_fn = MultiModalTextBatchify(\n num_text_inputs=len(preprocessor.text_feature_names),\n num_categorical_inputs=len(preprocessor.categorical_feature_names),\n num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,\n cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,\n mode='test', stochastic_chunk=test_stochastic_chunk,\n insert_sep=cfg.model.insert_sep)\n\n # Get the ground-truth dev labels\n gt_dev_labels = np.array([ele[-1] for ele in tuning_dataset])\n if problem_type == REGRESSION:\n gt_dev_labels = preprocessor.label_scaler.inverse_transform(np.expand_dims(gt_dev_labels,\n axis=-1))[:, 0]\n ctx_l = get_mxnet_available_ctx()\n if ngpus_per_trial == 0:\n ctx_l = [mx.cpu()]\n else:\n ctx_l = ctx_l[:ngpus_per_trial]\n base_batch_size = cfg.optimization.per_device_batch_size\n num_accumulated = int(np.ceil(cfg.optimization.batch_size / (base_batch_size * len(ctx_l))))\n inference_base_batch_size = base_batch_size * cfg.optimization.val_batch_size_mult\n train_dataloader = DataLoader(train_dataset,\n batch_size=base_batch_size,\n shuffle=True,\n batchify_fn=train_batchify_fn)\n dev_dataloader = DataLoader(tuning_dataset,\n batch_size=inference_base_batch_size,\n shuffle=False,\n batchify_fn=test_batchify_fn)\n if problem_type == REGRESSION:\n out_shape = 1\n elif problem_type == MULTICLASS:\n out_shape = len(label_generator.classes_)\n elif problem_type == BINARY:\n assert len(label_generator.classes_) == 2\n out_shape = 2\n else:\n raise NotImplementedError\n net = MultiModalWithPretrainedTextNN(\n text_backbone=text_backbone,\n num_text_features=1,\n num_categorical_features=len(preprocessor.categorical_feature_names),\n num_numerical_features=len(preprocessor.numerical_feature_names) > 0,\n numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0 else len(preprocessor.numerical_feature_names),\n num_categories=preprocessor.categorical_num_categories,\n get_embedding=False,\n cfg=cfg.model.network,\n out_shape=out_shape)\n net.initialize_with_pretrained_backbone(backbone_params_path, ctx=ctx_l)\n net.hybridize()\n num_total_params, num_total_fixed_params = count_parameters(net.collect_params())\n logger.info('#Total Params/Fixed Params={}/{}'.format(num_total_params,\n num_total_fixed_params))\n # Initialize the optimizer\n updates_per_epoch = int(np.ceil(len(train_dataloader) / (num_accumulated * len(ctx_l))))\n optimizer, optimizer_params, max_update \\\n = get_optimizer(cfg.optimization,\n updates_per_epoch=updates_per_epoch)\n valid_interval = int(math.ceil(cfg.optimization.valid_frequency * updates_per_epoch))\n train_log_interval = int(math.ceil(cfg.optimization.log_frequency * updates_per_epoch))\n\n if 0 < cfg.optimization.layerwise_lr_decay < 1:\n apply_layerwise_decay(net.text_backbone,\n cfg.optimization.layerwise_lr_decay,\n backbone_name=cfg.model.backbone.name)\n freeze_layers(net.text_backbone,\n backbone_name=cfg.model.backbone.name,\n num_trainable_layers=cfg.model.num_trainable_layers)\n\n # Do not apply weight decay to all the LayerNorm and bias\n for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():\n v.wd_mult = 0.0\n params = [p for p in net.collect_params().values() if p.grad_req != 'null']\n trainer = mx.gluon.Trainer(params,\n optimizer, optimizer_params,\n update_on_kvstore=False)\n # Set grad_req if gradient accumulation is required\n if num_accumulated > 1:\n logger.log(15, 'Using gradient accumulation.'\n ' Global batch size = {}'.format(cfg.optimization.batch_size))\n for p in params:\n p.grad_req = 'add'\n net.collect_params().zero_grad()\n train_loop_dataloader = grouper(repeat(train_dataloader), len(ctx_l))\n log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]\n log_num_samples_l = [0 for _ in ctx_l]\n logging_start_tick = time.time()\n nbest = cfg.optimization.nbest\n best_performance_score = [] # Stores the best performing checkpoints\n best_performance_update_idx = [] # Stores the update index that reached the best validation performance\n best_score = None\n mx.npx.waitall()\n no_better_rounds = 0\n report_idx = 0\n start_tick = time.time()\n if time_limit is not None:\n time_limit -= start_tick - time_start\n if time_limit <= 0:\n if not is_fake_reporter:\n reporter.terminate()\n return\n best_report_items = None\n report_local_jsonl_f = open(os.path.join(exp_dir, 'results_local.jsonl'), 'w')\n logger.info(f'Local training results will be saved to '\n f'{os.path.join(exp_dir, \"results_local.jsonl\")}.')\n for update_idx in range(max_update):\n for accum_idx in range(num_accumulated):\n sample_l = next(train_loop_dataloader)\n loss_l = []\n for i, (sample, ctx) in enumerate(zip(sample_l, ctx_l)):\n feature_batch, label_batch = sample\n feature_batch = move_to_ctx(feature_batch, ctx)\n label_batch = move_to_ctx(label_batch, ctx)\n with mx.autograd.record():\n pred = net(feature_batch)\n if problem_type == MULTICLASS or problem_type == BINARY:\n logits = mx.npx.log_softmax(pred, axis=-1)\n loss = - mx.npx.pick(logits,\n mx.np.expand_dims(label_batch, axis=-1))\n elif problem_type == REGRESSION:\n loss = mx.np.square(pred - mx.np.expand_dims(label_batch, axis=-1))\n loss_l.append(loss.mean() / len(ctx_l) / num_accumulated)\n log_loss_l[i] += loss_l[i] * len(ctx_l) * loss.shape[0] * num_accumulated\n log_num_samples_l[i] += loss.shape[0]\n for loss in loss_l:\n loss.backward()\n # Begin to update\n trainer.allreduce_grads()\n total_norm, ratio, is_finite = clip_grad_global_norm(params, cfg.optimization.max_grad_norm)\n if not cfg.model._disable_update:\n trainer.update(1.0, ignore_stale_grad=True)\n\n # Clear after update\n if num_accumulated > 1:\n net.collect_params().zero_grad()\n if (update_idx + 1) % train_log_interval == 0:\n log_loss = sum([ele.as_in_ctx(ctx_l[0]) for ele in log_loss_l]).asnumpy()\n log_num_samples = sum(log_num_samples_l)\n logger.log(15,\n '[Iter {}/{}, Epoch {}] train loss={:0.2e}, gnorm={:0.2e}, lr={:0.2e}, #samples processed={},'\n ' #sample per second={:.2f}. ETA={:.2f}min'\n .format(update_idx + 1, max_update,\n int(update_idx / updates_per_epoch),\n log_loss / log_num_samples, total_norm, trainer.learning_rate,\n log_num_samples,\n log_num_samples / (time.time() - logging_start_tick),\n (time.time() - start_tick) / (update_idx + 1)\n * (max_update - update_idx - 1) / 60))\n logging_start_tick = time.time()\n log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]\n log_num_samples_l = [0 for _ in ctx_l]\n if (update_idx + 1) % valid_interval == 0 or (update_idx + 1) == max_update:\n valid_start_tick = time.time()\n dev_predictions = \\\n _classification_regression_predict(net,\n dataloader=dev_dataloader,\n problem_type=problem_type,\n label_scaler=preprocessor.label_scaler,\n has_label=False,\n num_repeat=inference_num_repeat)\n log_scores = [calculate_metric(scorer, gt_dev_labels,\n dev_predictions,\n problem_type)\n for scorer in log_metric_scorers]\n dev_score = calculate_metric(eval_metric_scorer, gt_dev_labels,\n dev_predictions,\n problem_type)\n valid_time_spent = time.time() - valid_start_tick\n find_better = False\n find_topn_better = False\n if len(best_performance_score) < nbest:\n best_performance_score.append(dev_score)\n best_performance_update_idx.append(update_idx + 1)\n net.save_parameters(\n os.path.join(exp_dir,\n f'nbest_model{len(best_performance_score) - 1}.params'))\n find_topn_better = True\n if best_score is None or greater_is_better and dev_score >= best_score\\\n or (not greater_is_better and dev_score <= best_score):\n find_better = True\n net.save_parameters(os.path.join(exp_dir, f'best_model.params'))\n best_score = dev_score\n else:\n # First try to update the top-K\n if greater_is_better:\n if dev_score >= min(best_performance_score):\n find_topn_better = True\n replace_idx = np.argmin(best_performance_score)\n best_performance_score[replace_idx] = dev_score\n best_performance_update_idx[replace_idx] = update_idx + 1\n net.save_parameters(\n os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))\n if dev_score >= best_score:\n find_better = True\n net.save_parameters(os.path.join(exp_dir, f'best_model.params'))\n best_score = dev_score\n\n else:\n if dev_score <= max(best_performance_score):\n find_topn_better = True\n replace_idx = np.argmax(best_performance_score)\n best_performance_score[replace_idx] = dev_score\n best_performance_update_idx[replace_idx] = update_idx + 1\n net.save_parameters(\n os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))\n if dev_score <= best_score:\n find_better = True\n net.save_parameters(os.path.join(exp_dir, f'best_model.params'))\n best_score = dev_score\n if not find_better:\n no_better_rounds += 1\n else:\n no_better_rounds = 0\n mx.npx.waitall()\n loss_string = ', '.join(['{}={:0.4e}'.format(metric.name, score)\n for score, metric in zip(log_scores, log_metric_scorers)])\n logger.log(25, '[Iter {}/{}, Epoch {}] valid {}, time spent={:.3f}s,'\n ' total time spent={:.2f}min. Find new best={}, Find new top-{}={}'.format(\n update_idx + 1, max_update, int(update_idx / updates_per_epoch),\n loss_string, valid_time_spent, (time.time() - start_tick) / 60,\n find_better, nbest, find_topn_better))\n if reporter is not None:\n report_items = [('iteration', update_idx + 1),\n ('report_idx', report_idx + 1),\n ('epoch', int(update_idx / updates_per_epoch))] + \\\n [(metric.name, score)\n for score, metric in zip(log_scores, log_metric_scorers)] + \\\n [('find_better', find_better),\n ('find_new_topn', find_topn_better),\n ('nbest_stat', json.dumps([best_performance_score,\n best_performance_update_idx])),\n ('elapsed_time', int(time.time() - start_tick))]\n if eval_metric_scorer._sign < 0:\n report_items.append(('reward_attr', -dev_score))\n else:\n report_items.append(('reward_attr', dev_score))\n report_items.append(('eval_metric', eval_metric_scorer.name))\n report_items.append(('exp_dir', exp_dir))\n if find_better:\n best_report_items = report_items\n reporter(**dict(report_items))\n report_local_jsonl_f.write(json.dumps(dict(report_items)) + '\\n')\n report_local_jsonl_f.flush()\n report_idx += 1\n if no_better_rounds >= cfg.optimization.early_stopping_patience:\n logger.info('Early stopping patience reached!')\n break\n total_time_spent = time.time() - start_tick\n if time_limit is not None and total_time_spent > time_limit:\n break\n # Average checkpoints\n best_report_items_dict = dict(best_report_items)\n best_report_items_dict['report_idx'] = report_idx + 1\n reporter(**best_report_items_dict)\n report_local_jsonl_f.write(json.dumps(best_report_items_dict) + '\\n')\n report_local_jsonl_f.close()\n\n\ndef get_recommended_resource(nthreads_per_trial=None,\n ngpus_per_trial=None) -> Tuple[int, int]:\n \"\"\"Get the recommended resources.\n\n Internally, we will try to use GPU whenever it's possible. That means, we will use\n a single GPU for finetuning.\n\n Parameters\n ----------\n nthreads_per_trial\n The number of threads per trial provided by the user.\n ngpus_per_trial\n The number of GPUs per trial provided by the user.\n\n Returns\n -------\n nthreads_per_trial\n The recommended resource.\n ngpus_per_trial\n \"\"\"\n if nthreads_per_trial is None and ngpus_per_trial is None:\n nthreads_per_trial = get_cpu_count()\n ngpus_per_trial = 1\n elif nthreads_per_trial is not None and ngpus_per_trial is None:\n ngpus_per_trial = 1\n elif nthreads_per_trial is None and ngpus_per_trial is not None:\n if ngpus_per_trial != 0:\n num_parallel_jobs = get_gpu_count() // ngpus_per_trial\n nthreads_per_trial = max(get_cpu_count() // num_parallel_jobs, 1)\n else:\n nthreads_per_trial = get_cpu_count()\n nthreads_per_trial = min(nthreads_per_trial, get_cpu_count())\n ngpus_per_trial = min(ngpus_per_trial, get_gpu_count())\n assert nthreads_per_trial > 0 and ngpus_per_trial >= 0,\\\n 'Invalid number of threads and number of GPUs.'\n return nthreads_per_trial, ngpus_per_trial\n\n\n@use_np\nclass MultiModalTextModel:\n \"\"\"Learner of the multimodal text data.\n\n It will be called if the user call `fit()` in TextPredictor.\n\n It is used for making predictions on new data and viewing information about\n models trained during `fit()`.\n \"\"\"\n\n def __init__(self, column_types,\n feature_columns,\n label_columns,\n problem_type,\n eval_metric,\n log_metrics,\n output_directory=None):\n \"\"\"Creates model object.\n\n Parameters\n ----------\n column_types\n The column types.\n feature_columns\n Name of the feature columns\n label_columns\n Name of the label columns.\n problem_type\n Type of the problem\n eval_metric\n The evaluation metric\n log_metrics\n The metrics for logging\n output_directory\n The output directory to save the model\n logger\n The logger\n \"\"\"\n super(MultiModalTextModel, self).__init__()\n self._base_config = base_cfg()\n self._base_config.defrost()\n if output_directory is not None:\n self._output_directory = self._base_config.misc.exp_dir = output_directory\n self._base_config.misc.exp_dir = os.path.abspath(self._base_config.misc.exp_dir)\n self._base_config.freeze()\n self._output_directory = self._base_config.misc.exp_dir\n self._column_types = column_types\n self._eval_metric = eval_metric\n self._log_metrics = log_metrics\n\n self._label_columns = label_columns\n self._feature_columns = feature_columns\n self._problem_type = problem_type\n\n # Need to be set in the train call\n self._net = None # Network for training and inference\n self._embed_net = None # Network for extract the embedding\n self._config = None\n self._results = None\n self._preprocessor = None\n\n @property\n def results(self):\n return self._results\n\n @property\n def preprocessor(self):\n return self._preprocessor\n\n @property\n def output_directory(self):\n \"\"\" Get the output directory. The trained model and the training logs\n will be saved to this folder \"\"\"\n return self._output_directory\n\n @property\n def label_columns(self):\n \"\"\"Name of the label columns\"\"\"\n return self._label_columns\n\n @property\n def problem_type(self):\n \"\"\"Types of the problem\"\"\"\n return self._problem_type\n\n @property\n def feature_columns(self):\n \"\"\"Name of the features\"\"\"\n return self._feature_columns\n\n @property\n def base_config(self):\n \"\"\"The basic configuration. Internally, we will fill values in the base config by values\n in the search space.\"\"\"\n return self._base_config\n\n @property\n def results(self):\n \"\"\"Results of the final model\"\"\"\n return self._results\n\n @property\n def config(self):\n \"\"\"The configuration of the final trained model.\"\"\"\n return self._config\n\n @property\n def net(self):\n return self._net\n\n def train(self, train_data, tuning_data,\n num_cpus=None,\n num_gpus=None,\n time_limit=None,\n tune_kwargs=None,\n search_space=None,\n plot_results=False,\n console_log=True,\n seed=None,\n verbosity=2):\n \"\"\"The train function.\n\n Parameters\n ----------\n train_data\n The training data\n tuning_data\n The tuning data\n num_cpus\n Number of CPUs for each trial\n num_gpus\n Number of GPUs for each trial\n time_limit\n The time limits\n tune_kwargs\n Parameters of the HPO algorithms. For example, the scheduling\n algorithm, scheduling backend, HPO algorithm.\n search_space\n The search space options\n plot_results\n Whether to plot results or not\n console_log\n Whether to log into the console\n seed\n The seed\n verbosity\n Verbosity\n \"\"\"\n set_seed(seed)\n set_logger_verbosity(verbosity, logger)\n start_tick = time.time()\n assert len(self._label_columns) == 1, 'Currently, we only support single label.'\n # TODO(sxjscience) Try to support S3\n os.makedirs(self._output_directory, exist_ok=True)\n if search_space is None:\n search_space = \\\n ag_text_presets.create('default')['models']['MultimodalTextModel']['search_space']\n search_space_reg = args(search_space=space.Dict(**search_space))\n # Scheduler and searcher for HPO\n if tune_kwargs is None:\n tune_kwargs = ag_text_presets.create('default')['tune_kwargs']\n scheduler_options = tune_kwargs['scheduler_options']\n num_cpus, num_gpus = get_recommended_resource(num_cpus, num_gpus)\n if num_gpus == 0:\n if 'AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU' in os.environ:\n use_warning = int(os.environ['AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU'])\n else:\n use_warning = False\n if use_warning:\n warnings.warn('No GPU is detected in the machine and we will recommend you to '\n 'use TextPredictor on a GPU-enabled instance. Currently, '\n 'training on CPU is slow.')\n else:\n raise RuntimeError('No GPU is detected in the machine and we will '\n 'not proceed to run TextPredictor because they will train '\n 'too slowly with only CPU. You may try to set `ngpus_per_trial` '\n 'to a number larger than 0 when calling `.fit()`. '\n 'Also, you can set the environment variable '\n '\"AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU=1\" to force the model to '\n 'use CPU for training.')\n logger.info(f\"The GluonNLP V0 backend is used. \"\n f\"We will use {num_cpus} cpus and \"\n f\"{num_gpus} gpus to train each trial.\")\n if scheduler_options is None:\n scheduler_options = dict()\n if plot_results is None:\n if in_ipynb():\n plot_results = True\n else:\n plot_results = False\n scheduler_options = compile_scheduler_options_v2(\n scheduler_options=scheduler_options,\n scheduler=tune_kwargs['search_strategy'],\n search_strategy=tune_kwargs['searcher'],\n search_options=tune_kwargs['search_options'],\n nthreads_per_trial=num_cpus,\n ngpus_per_trial=num_gpus,\n checkpoint=os.path.join(self._output_directory, 'checkpoint.ag'),\n num_trials=tune_kwargs['num_trials'],\n time_out=time_limit,\n resume=False,\n visualizer=scheduler_options.get('visualizer'),\n time_attr='report_idx',\n reward_attr='reward_attr',\n dist_ip_addrs=scheduler_options.get('dist_ip_addrs'))\n # Create a temporary cache file. The internal train function will load the\n # temporary cache.\n os.makedirs(os.path.join(self._output_directory, 'data_cache'), exist_ok=True)\n train_df_path = os.path.join(self._output_directory, 'data_cache',\n 'cache_train_dataframe.pd.pkl')\n tuning_df_path = os.path.join(self._output_directory, 'data_cache',\n 'cache_tuning_dataframe.pd.pkl')\n train_data.to_pickle(train_df_path)\n tuning_data.to_pickle(tuning_df_path)\n train_fn = search_space_reg(functools.partial(train_function,\n train_df_path=train_df_path,\n time_limit=time_limit,\n time_start=start_tick,\n tuning_df_path=tuning_df_path,\n base_config=self.base_config,\n problem_type=self.problem_type,\n column_types=self._column_types,\n feature_columns=self._feature_columns,\n label_column=self._label_columns[0],\n log_metrics=self._log_metrics,\n eval_metric=self._eval_metric,\n ngpus_per_trial=scheduler_options['resource']['num_gpus'],\n console_log=console_log,\n verbosity=verbosity))\n no_job_finished_err_msg =\\\n 'No training job has been completed! '\\\n 'There are two possibilities: '\\\n '1) The time_limit is too small, '\\\n 'or 2) There are some internal errors in AutoGluon. '\\\n 'For the first case, you can increase the time_limit or set it to '\\\n 'None, e.g., setting \"predictor.fit(..., time_limit=None). To '\\\n 'further investigate the root cause, you can also try to set the '\\\n '\"verbosity=3\" and try again, i.e., predictor.set_verbosity(3).'\n if scheduler_options['num_trials'] == 1:\n train_fn(train_fn.args['search_space'],\n train_fn.args['_default_config'])\n best_model_saved_dir_path = os.path.join(self._output_directory, 'task0')\n cfg_path = os.path.join(self._output_directory, 'task0', 'cfg.yml')\n\n # Check whether the job has finished\n if not os.path.exists(cfg_path)\\\n or not os.path.exists(os.path.join(self._output_directory,\n 'task0', 'best_model.params')):\n raise RuntimeError(no_job_finished_err_msg)\n cfg = self.base_config.clone_merge(cfg_path)\n local_results = pd.read_json(os.path.join(self._output_directory, 'task0',\n 'results_local.jsonl'), lines=True)\n if plot_results:\n plot_training_curves = os.path.join(self._output_directory,\n 'plot_training_curves.png')\n import matplotlib.pyplot as plt\n plt.ylabel(self._eval_metric)\n plt.xlabel('report_idx')\n plt.title(\"Performance vs Training-Time\")\n plt.plot(local_results['report_idx'].iloc[:-1],\n local_results[local_results['eval_metric'][0]].iloc[:-1], label=f'task0')\n plt.legend(loc='best')\n plt.savefig(plot_training_curves)\n plt.show()\n self._results = local_results\n else:\n if tune_kwargs['search_strategy'] != 'local':\n # Force forkserver if it's not using the local sequential HPO\n force_forkserver()\n scheduler_cls, scheduler_params = scheduler_factory(scheduler_options)\n # Create scheduler, run HPO experiment\n scheduler = scheduler_cls(train_fn, **scheduler_options)\n scheduler.run()\n scheduler.join_jobs()\n if len(scheduler.config_history) == 0:\n raise RuntimeError(no_job_finished_err_msg)\n best_config = scheduler.get_best_config()\n logger.info('Results=', scheduler.searcher._results)\n logger.info('Best_config={}'.format(best_config))\n best_task_id = scheduler.get_best_task_id()\n best_model_saved_dir_path = os.path.join(self._output_directory,\n 'task{}'.format(best_task_id))\n best_cfg_path = os.path.join(best_model_saved_dir_path, 'cfg.yml')\n cfg = self.base_config.clone_merge(best_cfg_path)\n if plot_results:\n plot_training_curves = os.path.join(self._output_directory,\n 'plot_training_curves.png')\n scheduler.get_training_curves(filename=plot_training_curves,\n plot=plot_results,\n use_legend=True)\n self._results = dict()\n self._results.update(best_reward=scheduler.get_best_reward(),\n best_config=scheduler.get_best_config(),\n total_time=time.time() - start_tick,\n metadata=scheduler.metadata,\n training_history=scheduler.training_history,\n config_history=scheduler.config_history,\n reward_attr=scheduler._reward_attr,\n config=cfg)\n # Consider to move this to a separate predictor\n self._config = cfg\n # Average parameters\n # TODO(sxjscience) Clean up the temporary spaces used to store the intermediate checkpoints.\n if cfg.model.use_avg_nbest:\n nbest_path_l = []\n for best_id in range(cfg.optimization.nbest):\n nbest_path = os.path.join(best_model_saved_dir_path, f'nbest_model{best_id}.params')\n if os.path.exists(nbest_path):\n nbest_path_l.append(nbest_path)\n avg_nbest_path = os.path.join(best_model_saved_dir_path, 'nbest_model_avg.params')\n average_checkpoints(nbest_path_l, avg_nbest_path)\n with open(os.path.join(best_model_saved_dir_path, 'preprocessor.pkl'), 'rb') as in_f:\n self._preprocessor = pickle.load(in_f)\n backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \\\n = get_backbone(cfg.model.backbone.name)\n if 'roberta' in cfg.model.backbone.name:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)\n else:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg)\n if self._problem_type == REGRESSION:\n out_shape = 1\n elif self._problem_type == MULTICLASS:\n out_shape = len(self._preprocessor.label_generator.classes_)\n elif self._problem_type == BINARY:\n assert len(self._preprocessor.label_generator.classes_) == 2\n out_shape = 2\n else:\n raise NotImplementedError\n net = MultiModalWithPretrainedTextNN(\n text_backbone=text_backbone,\n num_text_features=1,\n num_categorical_features=len(self._preprocessor.categorical_feature_names),\n num_numerical_features=len(self._preprocessor.numerical_feature_names) > 0,\n numerical_input_units=None if len(self._preprocessor.numerical_feature_names) == 0 else len(\n self._preprocessor.numerical_feature_names),\n num_categories=self._preprocessor.categorical_num_categories,\n get_embedding=False,\n cfg=cfg.model.network,\n out_shape=out_shape)\n net.hybridize()\n if cfg.model.use_avg_nbest:\n net.load_parameters(avg_nbest_path, ctx=mx.cpu())\n else:\n net.load_parameters(os.path.join(best_model_saved_dir_path, 'best_model.params'),\n ctx=mx.cpu())\n self._net = net\n mx.npx.waitall()\n\n def evaluate(self, data, metrics=None, stochastic_chunk=None, num_repeat=None):\n \"\"\" Report the predictive performance evaluated for a given dataset.\n\n Parameters\n ----------\n data : str or :class:`TabularDataset` or `pandas.DataFrame`\n This Dataset must also contain the label-column with the same column-name as specified during `fit()`.\n If str is passed, `valid_data` will be loaded using the str value as the file path.\n metrics : str or List[str] or None\n Name of metric or a list of names of metrics to report.\n If it is not given, we will return the score of the stored eval_metric.\n stochastic_chunk\n Whether to use stochastic chunk\n num_repeat\n The number of repeats\n\n Returns\n -------\n ret : single number or a dict of metric --> metric scores\n Output\n \"\"\"\n if isinstance(metrics, str):\n metrics = [metrics]\n elif metrics is None:\n metrics = [self._eval_metric]\n assert self.net is not None\n # We will always use all resources that are available for evaluation\n ctx_l = get_mxnet_available_ctx()\n self.net.collect_params().reset_ctx(ctx_l)\n\n if not isinstance(data, pd.DataFrame):\n if isinstance(data, (list, dict)):\n data = pd.DataFrame(data)\n elif isinstance(data, str):\n data = load_pd.load(data)\n else:\n raise NotImplementedError(f'The format of data is not understood. '\n f'We have type(data)=\"{type(data)}\"')\n data = data[self._feature_columns + self._label_columns]\n if self._problem_type == MULTICLASS or self._problem_type == BINARY:\n ground_truth = self.preprocessor.label_generator.transform(\n data[self._label_columns[0]])\n predictions = self.predict_proba(data,\n stochastic_chunk=stochastic_chunk,\n num_repeat=num_repeat)\n else:\n ground_truth = pd.to_numeric(data[self._label_columns[0]]).to_numpy().astype(np.float32)\n predictions = self.predict(data,\n stochastic_chunk=stochastic_chunk,\n num_repeat=num_repeat)\n metric_scores = [calculate_metric(get_metric(metric),\n ground_truth, predictions, self._problem_type)\n for metric in metrics]\n\n # Once the inference is completed, we will cache all parameters back\n # to CPU to avoid memory overflow.\n self.net.collect_params().reset_ctx(mx.cpu())\n if len(metric_scores) == 1:\n return metric_scores[0]\n else:\n return {metric: score for metric, score in zip(metrics, metric_scores)}\n\n def _internal_predict(self, data, get_original_labels=True, get_probabilities=False,\n stochastic_chunk=None, num_repeat=None):\n assert self.net is not None\n assert self.config is not None\n # We will always use all resources that are available for evaluation\n ctx_l = get_mxnet_available_ctx()\n self.net.collect_params().reset_ctx(ctx_l)\n\n if not isinstance(data, pd.DataFrame):\n if isinstance(data, (list, dict)):\n data = pd.DataFrame(data)\n elif isinstance(data, str):\n data = load_pd.load(data)\n else:\n raise NotImplementedError(f'The format of data is not understood. '\n f'We have type(data)=\"{type(data)}\"')\n dataset = self.preprocessor.transform(data[self._feature_columns])\n inference_batch_size = self.config.optimization.per_device_batch_size \\\n * self.config.optimization.val_batch_size_mult\n cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)\n if stochastic_chunk is None:\n stochastic_chunk = self.config.model.test_stochastic_chunk\n batchify_fn = MultiModalTextBatchify(\n num_text_inputs=len(self.preprocessor.text_feature_names),\n num_categorical_inputs=len(self.preprocessor.categorical_feature_names),\n num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,\n cls_token_id=cls_id, sep_token_id=sep_id,\n max_length=self.config.preprocessing.text.max_length,\n mode='test',\n stochastic_chunk=stochastic_chunk,\n insert_sep=self.config.model.insert_sep)\n dataloader = DataLoader(dataset,\n batch_size=inference_batch_size,\n shuffle=False,\n batchify_fn=batchify_fn)\n if num_repeat is None:\n num_repeat = self.config.model.inference_num_repeat\n test_predictions = _classification_regression_predict(\n self._net,\n dataloader=dataloader,\n problem_type=self._problem_type,\n label_scaler=self.preprocessor.label_scaler,\n has_label=False,\n num_repeat=num_repeat)\n\n # Once the inference is completed, we will cache all parameters back\n # to CPU to avoid memory overflow.\n self.net.collect_params().reset_ctx(mx.cpu())\n if self._problem_type == MULTICLASS or self._problem_type == BINARY:\n if get_probabilities:\n return test_predictions\n else:\n test_predictions = test_predictions.argmax(axis=-1)\n if get_original_labels:\n test_predictions = np.array(\n self.preprocessor.label_generator.inverse_transform(test_predictions))\n return test_predictions\n\n @property\n def class_labels(self):\n \"\"\"The original name of the class labels.\n\n For example, the tabular data may contain classes equal to\n \"entailment\", \"contradiction\", \"neutral\". Internally, these will be converted to\n 0, 1, 2, ...\n\n This function returns the original names of these raw labels.\n\n Returns\n -------\n ret\n List that contain the class names. It will be None if it's not a classification problem.\n \"\"\"\n if self.problem_type == MULTICLASS or self.problem_type == BINARY:\n return self._preprocessor.label_generator.classes_\n else:\n warnings.warn('Accessing class names for a non-classification problem. Return None.')\n return None\n\n def predict_proba(self, test_data, stochastic_chunk=None, num_repeat=None):\n \"\"\"Predict class probabilities instead of class labels (for classification tasks).\n\n Parameters\n ----------\n test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str\n The test data to get predictions for. Can be DataFrame/Dataset or a file that can\n be loaded into DataFrame/Dataset.\n stochastic_chunk : bool\n Whether to enable stochastic chunk\n num_repeat : int or None\n The number of repeats for running the inference model.\n\n Returns\n -------\n probabilities : array\n The predicted class probabilities for each sample.\n Shape of this array is (#Samples, num_class).\n Here, the i-th number means the probability of belonging to the i-th class.\n You can access the class names by calling `self.class_names`.\n \"\"\"\n assert self.problem_type == MULTICLASS or self.problem_type == BINARY\n return self._internal_predict(test_data,\n get_original_labels=False,\n get_probabilities=True,\n stochastic_chunk=stochastic_chunk,\n num_repeat=num_repeat)\n\n def predict(self, test_data, get_original_labels=True, stochastic_chunk=None, num_repeat=None):\n \"\"\"Make predictions on new data.\n\n Parameters\n ----------\n test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str\n The test data to get predictions for. Can be DataFrame/Dataset or a file that can be loaded into DataFrame/Dataset.\n get_original_labels : bool, default = True\n Whether or not predictions should be formatted in terms of the original labels.\n For example, the labels might be \"entailment\" or \"not_entailment\" and predictions could either be of this form (if `True`) or integer-indices corresponding to these classes (if `False`).\n stochastic_chunk : bool or None, default = None\n Whether to turn on stochastic chunk\n num_repeat : int or None\n The number of repeats\n\n Returns\n -------\n predictions : array\n The predictions for each sample. Shape of this array is (#Samples,).\n \"\"\"\n return self._internal_predict(test_data,\n get_original_labels=get_original_labels,\n get_probabilities=False,\n stochastic_chunk=stochastic_chunk,\n num_repeat=num_repeat)\n\n def save(self, dir_path):\n \"\"\"Save this model to disk.\n\n Parameters\n ----------\n dir_path : str\n Directory where the model should be saved.\n \"\"\"\n os.makedirs(dir_path, exist_ok=True)\n self.net.save_parameters(os.path.join(dir_path, 'net.params'))\n with open(os.path.join(dir_path, 'cfg.yml'), 'w') as of:\n of.write(self.config.dump())\n # Save preprocessor\n with open(os.path.join(dir_path, 'preprocessor.pkl'), 'wb') as of:\n pickle.dump(self.preprocessor, of)\n if not isinstance(self._eval_metric, str):\n eval_metric = self._eval_metric.name\n else:\n eval_metric = self._eval_metric\n log_metrics = []\n for metric in self._log_metrics:\n if not isinstance(metric, str):\n log_metrics.append(metric.name)\n else:\n log_metrics.append(metric)\n # Save additional assets about the parsed dataset information\n with open(os.path.join(dir_path, 'assets.json'), 'w') as of:\n json.dump(\n {\n 'problem_type': self._problem_type,\n 'label_columns': self._label_columns,\n 'eval_metric': eval_metric,\n 'log_metrics': log_metrics,\n 'feature_columns': self._feature_columns,\n 'column_types': self._column_types,\n 'version': version.__version__,\n }, of, ensure_ascii=True)\n\n @classmethod\n def load(cls, dir_path: str):\n \"\"\"Load a model object previously produced by `fit()` from disk and return this object.\n It is highly recommended the predictor be loaded with the exact AutoGluon version\n it was fit with.\n\n Parameters\n ----------\n dir_path\n Path to directory where this model was previously saved.\n\n Returns\n -------\n model\n A `BertForTextPredictionBasic` object that can be used for making predictions on new data.\n \"\"\"\n cfg = base_cfg().clone_merge(os.path.join(dir_path, 'cfg.yml'))\n with open(os.path.join(dir_path, 'preprocessor.pkl'), 'rb') as in_f:\n preprocessor = pickle.load(in_f)\n with open(os.path.join(dir_path, 'assets.json'), 'r') as f:\n assets = json.load(f)\n label_columns = assets['label_columns']\n feature_columns = assets['feature_columns']\n eval_metric = assets['eval_metric']\n log_metrics = assets['log_metrics']\n problem_type = assets['problem_type']\n column_types = assets['column_types']\n # TODO(sxjscience) Post 0.1. In general, we will need to support compatible version check\n version = assets['version']\n backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \\\n = get_backbone(cfg.model.backbone.name)\n if 'roberta' in cfg.model.backbone.name:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)\n else:\n text_backbone = backbone_model_cls.from_cfg(backbone_cfg)\n if problem_type == REGRESSION:\n out_shape = 1\n elif problem_type == MULTICLASS:\n out_shape = len(preprocessor.label_generator.classes_)\n elif problem_type == BINARY:\n assert len(preprocessor.label_generator.classes_) == 2\n out_shape = 2\n else:\n raise NotImplementedError\n net = MultiModalWithPretrainedTextNN(\n text_backbone=text_backbone,\n num_text_features=1,\n num_categorical_features=len(preprocessor.categorical_feature_names),\n num_numerical_features=len(preprocessor.numerical_feature_names) > 0,\n numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0\n else len(preprocessor.numerical_feature_names),\n num_categories=preprocessor.categorical_num_categories,\n get_embedding=False,\n cfg=cfg.model.network,\n out_shape=out_shape)\n net.hybridize()\n ctx_l = mx.cpu()\n net.load_parameters(os.path.join(dir_path, 'net.params'), ctx=ctx_l)\n model = cls(column_types=column_types,\n label_columns=label_columns,\n feature_columns=feature_columns,\n problem_type=problem_type,\n eval_metric=eval_metric,\n log_metrics=log_metrics)\n model._net = net\n model._config = cfg\n model._preprocessor = preprocessor\n return model\n\n def extract_embedding(self, data, stochastic_chunk=None, num_repeat=None):\n \"\"\"Extract the embedding from the pretrained model.\n\n Parameters\n ----------\n data\n Data that can be parsed to pandas dataframe\n stochastic_chunk\n Whether to use stochastic chunk\n num_repeat\n The number of repeats\n\n Returns\n -------\n embeddings\n The output embeddings will have shape\n (#samples, embedding_dim)\n \"\"\"\n if not isinstance(data, pd.DataFrame):\n if isinstance(data, (list, dict)):\n data = pd.DataFrame(data)\n elif isinstance(data, str):\n data = load_pd.load(data)\n else:\n raise NotImplementedError(f'The format of data is not understood. '\n f'We have type(data)=\"{type(data)}\"')\n dataset = self.preprocessor.transform(data[self.feature_columns])\n inference_batch_size = self.config.optimization.per_device_batch_size \\\n * self.config.optimization.val_batch_size_mult\n cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)\n if stochastic_chunk is None:\n stochastic_chunk = self.config.model.test_stochastic_chunk\n batchify_fn = MultiModalTextBatchify(\n num_text_inputs=len(self.preprocessor.text_feature_names),\n num_categorical_inputs=len(self.preprocessor.categorical_feature_names),\n num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,\n cls_token_id=cls_id, sep_token_id=sep_id,\n max_length=self.config.preprocessing.text.max_length,\n mode='test',\n stochastic_chunk=stochastic_chunk,\n insert_sep=self.config.model.insert_sep)\n dataloader = DataLoader(dataset,\n batch_size=inference_batch_size,\n shuffle=False,\n batchify_fn=batchify_fn)\n if self._embed_net is None:\n embed_net = MultiModalWithPretrainedTextNN(\n text_backbone=self.net.text_backbone,\n num_text_features=1,\n num_categorical_features=len(self.preprocessor.categorical_feature_names),\n num_numerical_features=len(self.preprocessor.numerical_feature_names) > 0,\n numerical_input_units=None if len(self.preprocessor.numerical_feature_names) == 0\n else len(self.preprocessor.numerical_feature_names),\n num_categories=self.preprocessor.categorical_num_categories,\n get_embedding=True,\n cfg=self.config.model.network,\n out_shape=self.net.out_shape,\n params=self.net.collect_params(),\n prefix='embed_net_')\n embed_net.hybridize()\n self._embed_net = embed_net\n\n if num_repeat is None:\n num_repeat = self.config.model.inference_num_repeat\n ctx_l = get_mxnet_available_ctx()\n self._embed_net.collect_params().reset_ctx(ctx_l)\n embeddings = _classification_regression_predict(self._embed_net,\n dataloader=dataloader,\n problem_type=self._problem_type,\n label_scaler=self.preprocessor.label_scaler,\n has_label=False,\n extract_embedding=True,\n num_repeat=num_repeat)\n self._embed_net.collect_params().reset_ctx(mx.cpu())\n return embeddings\n"
] | [
[
"matplotlib.pyplot.ylabel",
"sklearn.preprocessing.LabelEncoder",
"numpy.stack",
"matplotlib.pyplot.plot",
"pandas.to_numeric",
"matplotlib.pyplot.savefig",
"numpy.argmin",
"matplotlib.pyplot.title",
"numpy.expand_dims",
"pandas.read_pickle",
"numpy.ceil",
"numpy.argmax",
"pandas.concat",
"matplotlib.pyplot.legend",
"pandas.DataFrame",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.concatenate",
"matplotlib.pyplot.xlabel"
]
] |
HDembinski/aghast | [
"ed97e9abc870e729d300622253aa7e9c870f77ec"
] | [
"python/tests/test_getitem.py"
] | [
"#!/usr/bin/env python\n\n# BSD 3-Clause License; see https://github.com/scikit-hep/aghast/blob/master/LICENSE\n\nimport sys\nimport unittest\n\nimport numpy\n\nfrom aghast import *\n\n\nclass Test(unittest.TestCase):\n def runTest(self):\n pass\n\n def test_getitem_twodim(self):\n a = Histogram(\n [Axis(IntegerBinning(0, 3)), Axis(IntegerBinning(0, 2))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n numpy.array(\n [\n [10, 100, 1000],\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n )\n )\n ),\n )\n a.checkvalid()\n assert a.axis[0].binning.toCategoryBinning().categories == [\"0\", \"1\", \"2\", \"3\"]\n assert a.axis[1].binning.toCategoryBinning().categories == [\"0\", \"1\", \"2\"]\n assert a.counts.counts.array.tolist() == [\n [10, 100, 1000],\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n\n assert a.counts[None, None] == sum(\n [10, 100, 1000, 20, 200, 2000, 30, 300, 3000, 40, 400, 4000]\n )\n assert a.counts[None, :].tolist() == [100, 1000, 10000]\n assert a.counts[None].tolist() == [100, 1000, 10000]\n assert a.counts[:, None].tolist() == [1110, 2220, 3330, 4440]\n assert a.counts[None, 1] == 1000\n assert a.counts[1, None] == 2220\n assert a.counts[None, 1:].tolist() == [1000, 10000]\n assert a.counts[1:, None].tolist() == [2220, 3330, 4440]\n assert a.counts[None, [2, 1, 1, 0]].tolist() == [10000, 1000, 1000, 100]\n assert a.counts[[3, 2, 2, 0], None].tolist() == [4440, 3330, 3330, 1110]\n assert a.counts[None, [True, False, True]].tolist() == [100, 10000]\n assert a.counts[[False, True, True, False], None].tolist() == [2220, 3330]\n\n assert a.counts[:, :].tolist() == [\n [10, 100, 1000],\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n assert a.counts[:].tolist() == [\n [10, 100, 1000],\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n assert a.counts[1:, :].tolist() == [\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n assert a.counts[1:].tolist() == [\n [20, 200, 2000],\n [30, 300, 3000],\n [40, 400, 4000],\n ]\n assert a.counts[:, 1:].tolist() == [\n [100, 1000],\n [200, 2000],\n [300, 3000],\n [400, 4000],\n ]\n assert a.counts[2:, 1:].tolist() == [[300, 3000], [400, 4000]]\n assert a.counts[:, 1].tolist() == [100, 200, 300, 400]\n assert a.counts[1, :].tolist() == [20, 200, 2000]\n assert a.counts[1].tolist() == [20, 200, 2000]\n assert a.counts[2:, 1].tolist() == [300, 400]\n assert a.counts[1, 2:].tolist() == [2000]\n assert a.counts[:, [2, 0]].tolist() == [\n [1000, 10],\n [2000, 20],\n [3000, 30],\n [4000, 40],\n ]\n assert a.counts[[2, 0], :].tolist() == [[30, 300, 3000], [10, 100, 1000]]\n assert a.counts[1:, [2, 0]].tolist() == [[2000, 20], [3000, 30], [4000, 40]]\n assert a.counts[[2, 0], 1:].tolist() == [[300, 3000], [100, 1000]]\n assert a.counts[:, [True, False, True]].tolist() == [\n [10, 1000],\n [20, 2000],\n [30, 3000],\n [40, 4000],\n ]\n assert a.counts[[False, True, True, False], :].tolist() == [\n [20, 200, 2000],\n [30, 300, 3000],\n ]\n assert a.counts[1:, [True, False, True]].tolist() == [\n [20, 2000],\n [30, 3000],\n [40, 4000],\n ]\n assert a.counts[[False, True, True, False], 1:].tolist() == [\n [200, 2000],\n [300, 3000],\n ]\n\n assert a.counts[1, 2] == 2000\n assert a.counts[1, [2, 2, 0]].tolist() == [2000, 2000, 20]\n assert a.counts[[2, 2, 0], 1].tolist() == [300, 300, 100]\n assert a.counts[1, [True, False, True]].tolist() == [20, 2000]\n assert a.counts[[False, True, True, False], 1].tolist() == [200, 300]\n\n assert a.counts[[1, 2], [2, 0]].tolist() == [[2000, 20], [3000, 30]]\n assert a.counts[[False, True, True, False], [2, 0]].tolist() == [\n [2000, 20],\n [3000, 30],\n ]\n assert a.counts[[False, True, True, False], [True, False, True]].tolist() == [\n [20, 2000],\n [30, 3000],\n ]\n\n assert a.counts[[2, 0], [2, 2, 0]].tolist() == [\n [3000, 3000, 30],\n [1000, 1000, 10],\n ]\n assert a.counts[[2, 0], [True, False, True]].tolist() == [\n [30, 3000],\n [10, 1000],\n ]\n assert a.counts[[True, False, True, False], [True, False, True]].tolist() == [\n [10, 1000],\n [30, 3000],\n ]\n\n def test_getitem_IntegerBinning(self):\n a = Histogram(\n [Axis(IntegerBinning(-5, 5))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.arange(11, dtype=int))\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"-5\",\n \"-4\",\n \"-3\",\n \"-2\",\n \"-1\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n ]\n assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n assert a.counts[None] == 55\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]\n assert a.counts[5] == 5\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[numpy.array([7, 4, 7, 5, -1])].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False, True]\n ].tolist() == [0, 2, 4, 6, 8, 10]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False, True]\n )\n ].tolist() == [0, 2, 4, 6, 8, 10]\n\n a = Histogram(\n [Axis(IntegerBinning(-5, 5, loc_overflow=IntegerBinning.above1))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"-5\",\n \"-4\",\n \"-3\",\n \"-2\",\n \"-1\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"[6, +inf)\",\n ]\n assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]\n\n assert a.counts[None] == 55 + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]\n assert a.counts[5] == 5\n assert a.counts[numpy.inf] == 999\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False, True]\n ].tolist() == [0, 2, 4, 6, 8, 10]\n\n a = Histogram(\n [Axis(IntegerBinning(-5, 5, loc_overflow=IntegerBinning.below1))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[6, +inf)\",\n \"-5\",\n \"-4\",\n \"-3\",\n \"-2\",\n \"-1\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n ]\n assert a.counts.counts.array.tolist() == [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n assert a.counts[None] == 55 + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]\n assert a.counts[5] == 5\n assert a.counts[numpy.inf] == 999\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False, True]\n ].tolist() == [0, 2, 4, 6, 8, 10]\n\n a = Histogram(\n [\n Axis(\n IntegerBinning(\n -5,\n 5,\n loc_underflow=IntegerBinning.below2,\n loc_overflow=IntegerBinning.below1,\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n [123, 999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"(-inf, -6]\",\n \"[6, +inf)\",\n \"-5\",\n \"-4\",\n \"-3\",\n \"-2\",\n \"-1\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n ]\n assert a.counts.counts.array.tolist() == [\n 123,\n 999,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n ]\n\n assert a.counts[None] == 55 + 123 + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]\n assert a.counts[-numpy.inf : 5].tolist() == [123, 0, 1, 2, 3, 4]\n assert a.counts[-numpy.inf : numpy.inf].tolist() == [\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 999,\n ]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]\n assert a.counts[-numpy.inf :].tolist() == [\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n ]\n assert a.counts[5] == 5\n assert a.counts[-numpy.inf] == 123\n assert a.counts[numpy.inf] == 999\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -numpy.inf, -1]].tolist() == [\n 7,\n 4,\n 7,\n 999,\n 5,\n 123,\n 10,\n ]\n assert a.counts[\n [7, -numpy.inf, 4, 7, numpy.inf, 5, -numpy.inf, -1]\n ].tolist() == [7, 123, 4, 7, 999, 5, 123, 10]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False, True]\n ].tolist() == [0, 2, 4, 6, 8, 10]\n\n a = Histogram(\n [\n Axis(\n IntegerBinning(\n -5,\n 5,\n loc_underflow=IntegerBinning.above1,\n loc_overflow=IntegerBinning.below1,\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 123]\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[6, +inf)\",\n \"-5\",\n \"-4\",\n \"-3\",\n \"-2\",\n \"-1\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"(-inf, -6]\",\n ]\n assert a.counts.counts.array.tolist() == [\n 999,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 123,\n ]\n\n assert a.counts[None] == 55 + 123 + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]\n assert a.counts[-numpy.inf : 5].tolist() == [123, 0, 1, 2, 3, 4]\n assert a.counts[-numpy.inf : numpy.inf].tolist() == [\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 999,\n ]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]\n assert a.counts[-numpy.inf :].tolist() == [\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n ]\n assert a.counts[5] == 5\n assert a.counts[-numpy.inf] == 123\n assert a.counts[numpy.inf] == 999\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]\n assert a.counts[[7, 4, 7, numpy.inf, 5, -numpy.inf, -1]].tolist() == [\n 7,\n 4,\n 7,\n 999,\n 5,\n 123,\n 10,\n ]\n assert a.counts[\n [7, -numpy.inf, 4, 7, numpy.inf, 5, -numpy.inf, -1]\n ].tolist() == [7, 123, 4, 7, 999, 5, 123, 10]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False, True]\n ].tolist() == [0, 2, 4, 6, 8, 10]\n\n def test_getitem_RegularBinning(self):\n a = Histogram(\n [Axis(RegularBinning(10, RealInterval(-5, 5)))],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(numpy.arange(10, dtype=int))\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[-5, -4)\",\n \"[-4, -3)\",\n \"[-3, -2)\",\n \"[-2, -1)\",\n \"[-1, 0)\",\n \"[0, 1)\",\n \"[1, 2)\",\n \"[2, 3)\",\n \"[3, 4)\",\n \"[4, 5)\",\n ]\n assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]\n assert a.counts[5] == 5\n assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 9]\n assert a.counts[numpy.array([7, 4, 7, 5, -1])].tolist() == [7, 4, 7, 5, 9]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False]\n ].tolist() == [0, 2, 4, 6, 8]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False]\n )\n ].tolist() == [0, 2, 4, 6, 8]\n\n a = Histogram(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n overflow=RealOverflow(loc_overflow=RealOverflow.above1),\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999], dtype=int)\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[-5, -4)\",\n \"[-4, -3)\",\n \"[-3, -2)\",\n \"[-2, -1)\",\n \"[-1, 0)\",\n \"[0, 1)\",\n \"[1, 2)\",\n \"[2, 3)\",\n \"[3, 4)\",\n \"[4, 5)\",\n \"[5, +inf]\",\n ]\n assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]\n\n assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]\n assert a.counts[5] == 5\n assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.inf, -1]].tolist() == [\n 7,\n 999,\n 4,\n 7,\n 5,\n 999,\n 9,\n ]\n assert a.counts[\n numpy.array([7, numpy.inf, 4, 7, 5, numpy.inf, -1])\n ].tolist() == [7, 999, 4, 7, 5, 999, 9]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False]\n ].tolist() == [0, 2, 4, 6, 8]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False]\n )\n ].tolist() == [0, 2, 4, 6, 8]\n\n a = Histogram(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n overflow=RealOverflow(loc_overflow=RealOverflow.below1),\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n numpy.array([999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int)\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[5, +inf]\",\n \"[-5, -4)\",\n \"[-4, -3)\",\n \"[-3, -2)\",\n \"[-2, -1)\",\n \"[-1, 0)\",\n \"[0, 1)\",\n \"[1, 2)\",\n \"[2, 3)\",\n \"[3, 4)\",\n \"[4, 5)\",\n ]\n assert a.counts.counts.array.tolist() == [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]\n assert a.counts[5] == 5\n assert a.counts[numpy.inf] == 999\n assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.inf, -1]].tolist() == [\n 7,\n 999,\n 4,\n 7,\n 5,\n 999,\n 9,\n ]\n assert a.counts[\n numpy.array([7, numpy.inf, 4, 7, 5, numpy.inf, -1])\n ].tolist() == [7, 999, 4, 7, 5, 999, 9]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False]\n ].tolist() == [0, 2, 4, 6, 8]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False]\n )\n ].tolist() == [0, 2, 4, 6, 8]\n\n a = Histogram(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n overflow=RealOverflow(\n loc_overflow=RealOverflow.below2,\n loc_nanflow=RealOverflow.below1,\n ),\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n numpy.array([999, 123, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int)\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"[5, +inf]\",\n \"{nan}\",\n \"[-5, -4)\",\n \"[-4, -3)\",\n \"[-3, -2)\",\n \"[-2, -1)\",\n \"[-1, 0)\",\n \"[0, 1)\",\n \"[1, 2)\",\n \"[2, 3)\",\n \"[3, 4)\",\n \"[4, 5)\",\n ]\n assert a.counts.counts.array.tolist() == [\n 999,\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n ]\n\n assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999 + 123\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]\n assert a.counts[5] == 5\n assert a.counts[numpy.inf] == 999\n assert a.counts[numpy.nan] == 123\n assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.nan, -1]].tolist() == [\n 7,\n 999,\n 4,\n 7,\n 5,\n 123,\n 9,\n ]\n if sys.version_info[0] >= 3:\n exec(\n \"assert a.counts[[numpy.inf, ..., numpy.nan]].tolist() == [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 123]\"\n )\n assert a.counts[\n numpy.array([7, numpy.inf, 4, 7, 5, numpy.nan, -1])\n ].tolist() == [7, 999, 4, 7, 5, 123, 9]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False]\n ].tolist() == [0, 2, 4, 6, 8]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False]\n )\n ].tolist() == [0, 2, 4, 6, 8]\n\n a = Histogram(\n [\n Axis(\n RegularBinning(\n 10,\n RealInterval(-5, 5),\n overflow=RealOverflow(\n loc_overflow=RealOverflow.above1,\n loc_nanflow=RealOverflow.below1,\n ),\n )\n )\n ],\n UnweightedCounts(\n InterpretedInlineBuffer.fromarray(\n numpy.array([123, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999], dtype=int)\n )\n ),\n )\n assert a.axis[0].binning.toCategoryBinning().categories == [\n \"{nan}\",\n \"[-5, -4)\",\n \"[-4, -3)\",\n \"[-3, -2)\",\n \"[-2, -1)\",\n \"[-1, 0)\",\n \"[0, 1)\",\n \"[1, 2)\",\n \"[2, 3)\",\n \"[3, 4)\",\n \"[4, 5)\",\n \"[5, +inf]\",\n ]\n assert a.counts.counts.array.tolist() == [\n 123,\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n 999,\n ]\n\n assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999 + 123\n assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]\n assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]\n assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]\n assert a.counts[5] == 5\n assert a.counts[numpy.inf] == 999\n assert a.counts[numpy.nan] == 123\n assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.nan, -1]].tolist() == [\n 7,\n 999,\n 4,\n 7,\n 5,\n 123,\n 9,\n ]\n assert a.counts[\n numpy.array([7, numpy.inf, 4, 7, 5, numpy.nan, -1])\n ].tolist() == [7, 999, 4, 7, 5, 123, 9]\n assert a.counts[\n [True, False, True, False, True, False, True, False, True, False]\n ].tolist() == [0, 2, 4, 6, 8]\n assert a.counts[\n numpy.array(\n [True, False, True, False, True, False, True, False, True, False]\n )\n ].tolist() == [0, 2, 4, 6, 8]\n"
] | [
[
"numpy.array",
"numpy.arange"
]
] |
Andy-math/optimizer | [
"a65f5ee54a0ae4e02aefb008d47c2d551d071ef0"
] | [
"optimizer/_internals/common/linneq.py"
] | [
"# -*- coding: utf-8 -*-\n\n\nfrom typing import Optional, Tuple\n\nimport numpy\n\nfrom overloads import bind_checker, dyn_typing\nfrom overloads.shortcuts import assertNoInfNaN, assertNoNaN\nfrom overloads.typedefs import ndarray\n\n\ndef noCheck(_: bool) -> None:\n pass\n\n\ndef constraint_check(\n constraints: Tuple[ndarray, ndarray, ndarray, ndarray],\n *,\n theta: Optional[ndarray] = None\n) -> None:\n A, b, lb, ub = constraints\n if theta is not None:\n assertNoInfNaN(theta)\n assertNoInfNaN(A)\n assertNoInfNaN(b)\n assertNoNaN(lb)\n assertNoNaN(ub)\n\n\ndef _input_checker(\n parameters: Tuple[ndarray, Tuple[ndarray, ndarray, ndarray, ndarray]]\n) -> None:\n theta, constraints = parameters\n constraint_check(constraints, theta=theta)\n\n\nn = dyn_typing.SizeVar()\nnConstraint = dyn_typing.SizeVar()\n\n\n@dyn_typing.dyn_check_2(\n input=(\n dyn_typing.NDArray(numpy.float64, (n,)),\n dyn_typing.Tuple(\n (\n dyn_typing.NDArray(numpy.float64, (nConstraint, n)),\n dyn_typing.NDArray(numpy.float64, (nConstraint,)),\n dyn_typing.NDArray(numpy.float64, (n,)),\n dyn_typing.NDArray(numpy.float64, (n,)),\n )\n ),\n ),\n output=dyn_typing.Bool(),\n)\n@bind_checker.bind_checker_2(input=_input_checker, output=noCheck)\ndef check(\n theta: ndarray, constraints: Tuple[ndarray, ndarray, ndarray, ndarray]\n) -> bool:\n A, b, lb, ub = constraints\n \"\"\"检查参数theta是否满足约束[A @ theta <= b],空约束返回True\"\"\"\n result = bool(\n numpy.all(lb <= theta) and numpy.all(theta <= ub) and numpy.all(A @ theta <= b)\n )\n return result\n\n\nn = dyn_typing.SizeVar()\nnConstraint = dyn_typing.SizeVar()\n\n\n@dyn_typing.dyn_check_2(\n input=(\n dyn_typing.NDArray(numpy.float64, (n,)),\n dyn_typing.Tuple(\n (\n dyn_typing.NDArray(numpy.float64, (nConstraint, n)),\n dyn_typing.NDArray(numpy.float64, (nConstraint,)),\n dyn_typing.NDArray(numpy.float64, (n,)),\n dyn_typing.NDArray(numpy.float64, (n,)),\n )\n ),\n ),\n output=dyn_typing.Tuple(\n (\n dyn_typing.NDArray(numpy.float64, (n,)),\n dyn_typing.NDArray(numpy.float64, (n,)),\n )\n ),\n)\n@bind_checker.bind_checker_2(\n input=_input_checker, output=bind_checker.make_checker_2(assertNoNaN, assertNoNaN)\n)\ndef margin(\n theta: ndarray, constraints: Tuple[ndarray, ndarray, ndarray, ndarray]\n) -> Tuple[ndarray, ndarray]:\n \"\"\"\n 返回theta距离线性约束边界的间距下界和上界(h_lb, h_ub)\n h: 步长, lb: 下界, ub: 上界\n theta超出边界时 AssertionError\n \"\"\"\n assert check(theta, constraints)\n A, b, lb, ub = constraints\n if b.shape[0] == 0:\n h_lb = numpy.full(theta.shape, -numpy.inf)\n h_ub = numpy.full(theta.shape, numpy.inf)\n else:\n \"\"\"\n A @ (theta+h*(arange(n) == i)) == b\n => A @ h*(arange(n) == i) == b - A @ theta\n => h*A[:, i] == b - A @ theta (*must positive as valid point)\n => h == (b - A @ theta)/A[:, i]\n \"\"\"\n residual: ndarray = b - A @ theta # (nConst, )\n residual.shape = (A.shape[0], 1) # (nConst, 1)\n h: ndarray = residual / A # (nConst, n)\n \"\"\"\n lb: 所有负数里面取最大\n ub: 所有正数里面取最小\n 系数A为0,则约束与theta(i)无关\n \"\"\"\n h_lb = h.copy()\n h_ub = h.copy()\n h_lb[A >= 0] = -numpy.inf\n h_ub[A <= 0] = numpy.inf\n h_lb = h_lb.max(axis=0) # type: ignore\n h_ub = h_ub.min(axis=0) # type: ignore\n \"\"\"\n [lb/ub]补丁\n theta+h == [lb/ub]\n => h = [lb/ub]-theta\n \"\"\"\n h_lb = numpy.maximum(h_lb, lb - theta)\n h_ub = numpy.minimum(h_ub, ub - theta)\n return h_lb, h_ub\n"
] | [
[
"numpy.maximum",
"numpy.full",
"numpy.all",
"numpy.minimum"
]
] |
cowirihy/pymc3 | [
"f0b95773047af12f3c0ded04d707f02ddc4d4f6b"
] | [
"pymc3/tests/test_distributions_random.py"
] | [
"# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport numpy as np\nimport numpy.testing as npt\nimport scipy.stats as st\nfrom scipy.special import expit\nfrom scipy import linalg\nimport numpy.random as nr\nimport theano\n\nimport pymc3 as pm\nfrom pymc3.distributions.dist_math import clipped_beta_rvs\nfrom pymc3.distributions.distribution import (draw_values,\n _DrawValuesContext,\n _DrawValuesContextBlocker)\nfrom .helpers import SeededTest\nfrom .test_distributions import (\n build_model, Domain, product, R, Rplus, Rplusbig, Runif, Rplusdunif,\n Unit, Nat, NatSmall, I, Simplex, Vector, PdMatrix,\n PdMatrixChol, PdMatrixCholUpper, RealMatrix, RandomPdMatrix\n)\n\n\ndef pymc3_random(dist, paramdomains, ref_rand, valuedomain=Domain([0]),\n size=10000, alpha=0.05, fails=10, extra_args=None,\n model_args=None):\n if model_args is None:\n model_args = {}\n model = build_model(dist, valuedomain, paramdomains, extra_args)\n domains = paramdomains.copy()\n for pt in product(domains, n_samples=100):\n pt = pm.Point(pt, model=model)\n pt.update(model_args)\n p = alpha\n # Allow KS test to fail (i.e., the samples be different)\n # a certain number of times. Crude, but necessary.\n f = fails\n while p <= alpha and f > 0:\n s0 = model.named_vars['value'].random(size=size, point=pt)\n s1 = ref_rand(size=size, **pt)\n _, p = st.ks_2samp(np.atleast_1d(s0).flatten(),\n np.atleast_1d(s1).flatten())\n f -= 1\n assert p > alpha, str(pt)\n\n\ndef pymc3_random_discrete(dist, paramdomains,\n valuedomain=Domain([0]), ref_rand=None,\n size=100000, alpha=0.05, fails=20):\n model = build_model(dist, valuedomain, paramdomains)\n domains = paramdomains.copy()\n for pt in product(domains, n_samples=100):\n pt = pm.Point(pt, model=model)\n p = alpha\n # Allow Chisq test to fail (i.e., the samples be different)\n # a certain number of times.\n f = fails\n while p <= alpha and f > 0:\n o = model.named_vars['value'].random(size=size, point=pt)\n e = ref_rand(size=size, **pt)\n o = np.atleast_1d(o).flatten()\n e = np.atleast_1d(e).flatten()\n observed = dict(zip(*np.unique(o, return_counts=True)))\n expected = dict(zip(*np.unique(e, return_counts=True)))\n for e in expected.keys():\n expected[e] = (observed.get(e, 0), expected[e])\n k = np.array([v for v in expected.values()])\n if np.all(k[:, 0] == k[:, 1]):\n p = 1.\n else:\n _, p = st.chisquare(k[:, 0], k[:, 1])\n f -= 1\n assert p > alpha, str(pt)\n\n\nclass TestDrawValues(SeededTest):\n def test_draw_scalar_parameters(self):\n with pm.Model():\n y = pm.Normal('y1', mu=0., sigma=1.)\n mu, tau = draw_values([y.distribution.mu, y.distribution.tau])\n npt.assert_almost_equal(mu, 0)\n npt.assert_almost_equal(tau, 1)\n\n def test_draw_dependencies(self):\n with pm.Model():\n x = pm.Normal('x', mu=0., sigma=1.)\n exp_x = pm.Deterministic('exp_x', pm.math.exp(x))\n\n x, exp_x = draw_values([x, exp_x])\n npt.assert_almost_equal(np.exp(x), exp_x)\n\n def test_draw_order(self):\n with pm.Model():\n x = pm.Normal('x', mu=0., sigma=1.)\n exp_x = pm.Deterministic('exp_x', pm.math.exp(x))\n\n # Need to draw x before drawing log_x\n exp_x, x = draw_values([exp_x, x])\n npt.assert_almost_equal(np.exp(x), exp_x)\n\n def test_draw_point_replacement(self):\n with pm.Model():\n mu = pm.Normal('mu', mu=0., tau=1e-3)\n sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)\n y = pm.Normal('y', mu=mu, sigma=sigma)\n mu2, tau2 = draw_values([y.distribution.mu, y.distribution.tau],\n point={'mu': 5., 'sigma': 2.})\n npt.assert_almost_equal(mu2, 5)\n npt.assert_almost_equal(tau2, 1 / 2.**2)\n\n def test_random_sample_returns_nd_array(self):\n with pm.Model():\n mu = pm.Normal('mu', mu=0., tau=1e-3)\n sigma = pm.Gamma('sigma', alpha=1., beta=1., transform=None)\n y = pm.Normal('y', mu=mu, sigma=sigma)\n mu, tau = draw_values([y.distribution.mu, y.distribution.tau])\n assert isinstance(mu, np.ndarray)\n assert isinstance(tau, np.ndarray)\n\n\nclass TestDrawValuesContext:\n def test_normal_context(self):\n with _DrawValuesContext() as context0:\n assert context0.parent is None\n context0.drawn_vars['root_test'] = 1\n with _DrawValuesContext() as context1:\n assert id(context1.drawn_vars) == id(context0.drawn_vars)\n assert context1.parent == context0\n with _DrawValuesContext() as context2:\n assert id(context2.drawn_vars) == id(context0.drawn_vars)\n assert context2.parent == context1\n context2.drawn_vars['leaf_test'] = 2\n assert context1.drawn_vars['leaf_test'] == 2\n context1.drawn_vars['root_test'] = 3\n assert context0.drawn_vars['root_test'] == 3\n assert context0.drawn_vars['leaf_test'] == 2\n\n def test_blocking_context(self):\n with _DrawValuesContext() as context0:\n assert context0.parent is None\n context0.drawn_vars['root_test'] = 1\n with _DrawValuesContext() as context1:\n assert id(context1.drawn_vars) == id(context0.drawn_vars)\n assert context1.parent == context0\n with _DrawValuesContextBlocker() as blocker:\n assert id(blocker.drawn_vars) != id(context0.drawn_vars)\n assert blocker.parent is None\n blocker.drawn_vars['root_test'] = 2\n with _DrawValuesContext() as context2:\n assert id(context2.drawn_vars) == id(blocker.drawn_vars)\n assert context2.parent == blocker\n context2.drawn_vars['root_test'] = 3\n context2.drawn_vars['leaf_test'] = 4\n assert blocker.drawn_vars['root_test'] == 3\n assert 'leaf_test' not in context1.drawn_vars\n assert context0.drawn_vars['root_test'] == 1\n\n\nclass BaseTestCases:\n class BaseTestCase(SeededTest):\n shape = 5\n\n def setup_method(self, *args, **kwargs):\n super().setup_method(*args, **kwargs)\n self.model = pm.Model()\n\n def get_random_variable(self, shape, with_vector_params=False, name=None):\n if with_vector_params:\n params = {key: value * np.ones(self.shape, dtype=np.dtype(type(value))) for\n key, value in self.params.items()}\n else:\n params = self.params\n if name is None:\n name = self.distribution.__name__\n with self.model:\n if shape is None:\n return self.distribution(name, transform=None, **params)\n else:\n try:\n return self.distribution(name, shape=shape, transform=None, **params)\n except TypeError:\n if np.sum(np.atleast_1d(shape)) == 0:\n pytest.skip(\"Timeseries must have positive shape\")\n raise\n\n @staticmethod\n def sample_random_variable(random_variable, size):\n try:\n return random_variable.random(size=size)\n except AttributeError:\n return random_variable.distribution.random(size=size)\n\n @pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)\n def test_scalar_parameter_shape(self, size):\n rv = self.get_random_variable(None)\n if size is None:\n expected = 1,\n else:\n expected = np.atleast_1d(size).tolist()\n actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape\n assert tuple(expected) == actual\n\n @pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)\n def test_scalar_shape(self, size):\n shape = 10\n rv = self.get_random_variable(shape)\n\n if size is None:\n expected = []\n else:\n expected = np.atleast_1d(size).tolist()\n expected.append(shape)\n actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape\n assert tuple(expected) == actual\n\n @pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)\n def test_parameters_1d_shape(self, size):\n rv = self.get_random_variable(self.shape, with_vector_params=True)\n if size is None:\n expected = []\n else:\n expected = np.atleast_1d(size).tolist()\n expected.append(self.shape)\n actual = self.sample_random_variable(rv, size).shape\n assert tuple(expected) == actual\n\n @pytest.mark.parametrize('size', [None, 5, (4, 5)], ids=str)\n def test_broadcast_shape(self, size):\n broadcast_shape = (2 * self.shape, self.shape)\n rv = self.get_random_variable(broadcast_shape, with_vector_params=True)\n if size is None:\n expected = []\n else:\n expected = np.atleast_1d(size).tolist()\n expected.extend(broadcast_shape)\n actual = np.atleast_1d(self.sample_random_variable(rv, size)).shape\n assert tuple(expected) == actual\n\n @pytest.mark.parametrize('shape', [(), (1,), (1, 1), (1, 2), (10, 10, 1), (10, 10, 2)], ids=str)\n def test_different_shapes_and_sample_sizes(self, shape):\n prefix = self.distribution.__name__\n\n rv = self.get_random_variable(shape, name=f'{prefix}_{shape}')\n for size in (None, 1, 5, (4, 5)):\n if size is None:\n s = []\n else:\n try:\n s = list(size)\n except TypeError:\n s = [size]\n if s == [1]:\n s = []\n if shape not in ((), (1,)):\n s.extend(shape)\n e = tuple(s)\n a = self.sample_random_variable(rv, size).shape\n assert e == a\n\n\nclass TestGaussianRandomWalk(BaseTestCases.BaseTestCase):\n distribution = pm.GaussianRandomWalk\n params = {'mu': 1., 'sigma': 1.}\n\n @pytest.mark.xfail(reason=\"Supporting this makes a nasty API\")\n def test_broadcast_shape(self):\n super().test_broadcast_shape()\n\nclass TestNormal(BaseTestCases.BaseTestCase):\n distribution = pm.Normal\n params = {'mu': 0., 'tau': 1.}\n\nclass TestTruncatedNormal(BaseTestCases.BaseTestCase):\n distribution = pm.TruncatedNormal\n params = {'mu': 0., 'tau': 1., 'lower': -0.5, 'upper': 0.5}\n\nclass TestTruncatedNormalLower(BaseTestCases.BaseTestCase):\n distribution = pm.TruncatedNormal\n params = {'mu': 0., 'tau': 1., 'lower': -0.5}\n\nclass TestTruncatedNormalUpper(BaseTestCases.BaseTestCase):\n distribution = pm.TruncatedNormal\n params = {'mu': 0., 'tau': 1., 'upper': 0.5}\n\nclass TestSkewNormal(BaseTestCases.BaseTestCase):\n distribution = pm.SkewNormal\n params = {'mu': 0., 'sigma': 1., 'alpha': 5.}\n\n\nclass TestHalfNormal(BaseTestCases.BaseTestCase):\n distribution = pm.HalfNormal\n params = {'tau': 1.}\n\n\nclass TestUniform(BaseTestCases.BaseTestCase):\n distribution = pm.Uniform\n params = {'lower': 0., 'upper': 1.}\n\n\nclass TestTriangular(BaseTestCases.BaseTestCase):\n distribution = pm.Triangular\n params = {'c': 0.5, 'lower': 0., 'upper': 1.}\n\n\nclass TestWald(BaseTestCases.BaseTestCase):\n distribution = pm.Wald\n params = {'mu': 1., 'lam': 1., 'alpha': 0.}\n\n\nclass TestBeta(BaseTestCases.BaseTestCase):\n distribution = pm.Beta\n params = {'alpha': 1., 'beta': 1.}\n\n\nclass TestKumaraswamy(BaseTestCases.BaseTestCase):\n distribution = pm.Kumaraswamy\n params = {'a': 1., 'b': 1.}\n\n\nclass TestExponential(BaseTestCases.BaseTestCase):\n distribution = pm.Exponential\n params = {'lam': 1.}\n\n\nclass TestLaplace(BaseTestCases.BaseTestCase):\n distribution = pm.Laplace\n params = {'mu': 1., 'b': 1.}\n\n\nclass TestLognormal(BaseTestCases.BaseTestCase):\n distribution = pm.Lognormal\n params = {'mu': 1., 'tau': 1.}\n\n\nclass TestStudentT(BaseTestCases.BaseTestCase):\n distribution = pm.StudentT\n params = {'nu': 5., 'mu': 0., 'lam': 1.}\n\n\nclass TestPareto(BaseTestCases.BaseTestCase):\n distribution = pm.Pareto\n params = {'alpha': 0.5, 'm': 1.}\n\n\nclass TestCauchy(BaseTestCases.BaseTestCase):\n distribution = pm.Cauchy\n params = {'alpha': 1., 'beta': 1.}\n\n\nclass TestHalfCauchy(BaseTestCases.BaseTestCase):\n distribution = pm.HalfCauchy\n params = {'beta': 1.}\n\n\nclass TestGamma(BaseTestCases.BaseTestCase):\n distribution = pm.Gamma\n params = {'alpha': 1., 'beta': 1.}\n\n\nclass TestInverseGamma(BaseTestCases.BaseTestCase):\n distribution = pm.InverseGamma\n params = {'alpha': 0.5, 'beta': 0.5}\n\n\nclass TestChiSquared(BaseTestCases.BaseTestCase):\n distribution = pm.ChiSquared\n params = {'nu': 2.}\n\n\nclass TestWeibull(BaseTestCases.BaseTestCase):\n distribution = pm.Weibull\n params = {'alpha': 1., 'beta': 1.}\n\n\nclass TestExGaussian(BaseTestCases.BaseTestCase):\n distribution = pm.ExGaussian\n params = {'mu': 0., 'sigma': 1., 'nu': 1.}\n\n\nclass TestVonMises(BaseTestCases.BaseTestCase):\n distribution = pm.VonMises\n params = {'mu': 0., 'kappa': 1.}\n\n\nclass TestGumbel(BaseTestCases.BaseTestCase):\n distribution = pm.Gumbel\n params = {'mu': 0., 'beta': 1.}\n\n\nclass TestLogistic(BaseTestCases.BaseTestCase):\n distribution = pm.Logistic\n params = {'mu': 0., 's': 1.}\n\n\nclass TestLogitNormal(BaseTestCases.BaseTestCase):\n distribution = pm.LogitNormal\n params = {'mu': 0., 'sigma': 1.}\n\n\nclass TestBinomial(BaseTestCases.BaseTestCase):\n distribution = pm.Binomial\n params = {'n': 5, 'p': 0.5}\n\n\nclass TestBetaBinomial(BaseTestCases.BaseTestCase):\n distribution = pm.BetaBinomial\n params = {'n': 5, 'alpha': 1., 'beta': 1.}\n\n\nclass TestBernoulli(BaseTestCases.BaseTestCase):\n distribution = pm.Bernoulli\n params = {'p': 0.5}\n\n\nclass TestDiscreteWeibull(BaseTestCases.BaseTestCase):\n distribution = pm.DiscreteWeibull\n params = {'q': 0.25, 'beta': 2.}\n\n\nclass TestPoisson(BaseTestCases.BaseTestCase):\n distribution = pm.Poisson\n params = {'mu': 1.}\n\n\nclass TestNegativeBinomial(BaseTestCases.BaseTestCase):\n distribution = pm.NegativeBinomial\n params = {'mu': 1., 'alpha': 1.}\n\n\nclass TestConstant(BaseTestCases.BaseTestCase):\n distribution = pm.Constant\n params = {'c': 3}\n\n\nclass TestZeroInflatedPoisson(BaseTestCases.BaseTestCase):\n distribution = pm.ZeroInflatedPoisson\n params = {'theta': 1., 'psi': 0.3}\n\n\nclass TestZeroInflatedNegativeBinomial(BaseTestCases.BaseTestCase):\n distribution = pm.ZeroInflatedNegativeBinomial\n params = {'mu': 1., 'alpha': 1., 'psi': 0.3}\n\nclass TestZeroInflatedBinomial(BaseTestCases.BaseTestCase):\n distribution = pm.ZeroInflatedBinomial\n params = {'n': 10, 'p': 0.6, 'psi': 0.3}\n\nclass TestDiscreteUniform(BaseTestCases.BaseTestCase):\n distribution = pm.DiscreteUniform\n params = {'lower': 0., 'upper': 10.}\n\n\nclass TestGeometric(BaseTestCases.BaseTestCase):\n distribution = pm.Geometric\n params = {'p': 0.5}\n\n \nclass TestMoyal(BaseTestCases.BaseTestCase):\n distribution = pm.Moyal\n params = {'mu': 0., 'sigma': 1.}\n\n \nclass TestCategorical(BaseTestCases.BaseTestCase):\n distribution = pm.Categorical\n params = {'p': np.ones(BaseTestCases.BaseTestCase.shape)}\n\n def get_random_variable(self, shape, with_vector_params=False, **kwargs): # don't transform categories\n return super().get_random_variable(shape, with_vector_params=False, **kwargs)\n\n def test_probability_vector_shape(self):\n \"\"\"Check that if a 2d array of probabilities are passed to categorical correct shape is returned\"\"\"\n p = np.ones((10, 5))\n assert pm.Categorical.dist(p=p).random().shape == (10,)\n assert pm.Categorical.dist(p=p).random(size=4).shape == (4, 10)\n p = np.ones((3, 7, 5))\n assert pm.Categorical.dist(p=p).random().shape == (3, 7)\n assert pm.Categorical.dist(p=p).random(size=4).shape == (4, 3, 7)\n\n\nclass TestScalarParameterSamples(SeededTest):\n def test_bounded(self):\n # A bit crude...\n BoundedNormal = pm.Bound(pm.Normal, upper=0)\n\n def ref_rand(size, tau):\n return -st.halfnorm.rvs(size=size, loc=0, scale=tau ** -0.5)\n pymc3_random(BoundedNormal, {'tau': Rplus}, ref_rand=ref_rand)\n\n def test_uniform(self):\n def ref_rand(size, lower, upper):\n return st.uniform.rvs(size=size, loc=lower, scale=upper - lower)\n pymc3_random(pm.Uniform, {'lower': -Rplus, 'upper': Rplus}, ref_rand=ref_rand)\n\n def test_normal(self):\n def ref_rand(size, mu, sigma):\n return st.norm.rvs(size=size, loc=mu, scale=sigma)\n pymc3_random(pm.Normal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)\n\n def test_truncated_normal(self):\n def ref_rand(size, mu, sigma, lower, upper):\n return st.truncnorm.rvs((lower - mu) / sigma, (upper - mu) / sigma, size=size, loc=mu, scale=sigma)\n pymc3_random(pm.TruncatedNormal, {'mu': R, 'sigma': Rplusbig, 'lower': -Rplusbig, 'upper': Rplusbig},\n ref_rand=ref_rand)\n\n def test_truncated_normal_lower(self):\n def ref_rand(size, mu, sigma, lower):\n return st.truncnorm.rvs((lower - mu) / sigma, np.inf, size=size, loc=mu, scale=sigma)\n pymc3_random(pm.TruncatedNormal, {'mu': R, 'sigma': Rplusbig, 'lower': -Rplusbig},\n ref_rand=ref_rand)\n\n def test_truncated_normal_upper(self):\n def ref_rand(size, mu, sigma, upper):\n return st.truncnorm.rvs(-np.inf, (upper - mu) / sigma, size=size, loc=mu, scale=sigma)\n pymc3_random(pm.TruncatedNormal, {'mu': R, 'sigma': Rplusbig, 'upper': Rplusbig},\n ref_rand=ref_rand)\n\n def test_skew_normal(self):\n def ref_rand(size, alpha, mu, sigma):\n return st.skewnorm.rvs(size=size, a=alpha, loc=mu, scale=sigma)\n pymc3_random(pm.SkewNormal, {'mu': R, 'sigma': Rplus, 'alpha': R}, ref_rand=ref_rand)\n\n def test_half_normal(self):\n def ref_rand(size, tau):\n return st.halfnorm.rvs(size=size, loc=0, scale=tau ** -0.5)\n pymc3_random(pm.HalfNormal, {'tau': Rplus}, ref_rand=ref_rand)\n\n def test_wald(self):\n # Cannot do anything too exciting as scipy wald is a\n # location-scale model of the *standard* wald with mu=1 and lam=1\n def ref_rand(size, mu, lam, alpha):\n return st.wald.rvs(size=size, loc=alpha)\n pymc3_random(pm.Wald,\n {'mu': Domain([1., 1., 1.]), 'lam': Domain(\n [1., 1., 1.]), 'alpha': Rplus},\n ref_rand=ref_rand)\n\n def test_beta(self):\n def ref_rand(size, alpha, beta):\n return clipped_beta_rvs(a=alpha, b=beta, size=size)\n pymc3_random(pm.Beta, {'alpha': Rplus, 'beta': Rplus}, ref_rand=ref_rand)\n\n def test_exponential(self):\n def ref_rand(size, lam):\n return nr.exponential(scale=1. / lam, size=size)\n pymc3_random(pm.Exponential, {'lam': Rplus}, ref_rand=ref_rand)\n\n def test_laplace(self):\n def ref_rand(size, mu, b):\n return st.laplace.rvs(mu, b, size=size)\n pymc3_random(pm.Laplace, {'mu': R, 'b': Rplus}, ref_rand=ref_rand)\n\n def test_lognormal(self):\n def ref_rand(size, mu, tau):\n return np.exp(mu + (tau ** -0.5) * st.norm.rvs(loc=0., scale=1., size=size))\n pymc3_random(pm.Lognormal, {'mu': R, 'tau': Rplusbig}, ref_rand=ref_rand)\n\n def test_student_t(self):\n def ref_rand(size, nu, mu, lam):\n return st.t.rvs(nu, mu, lam**-.5, size=size)\n pymc3_random(pm.StudentT, {'nu': Rplus, 'mu': R, 'lam': Rplus}, ref_rand=ref_rand)\n\n def test_cauchy(self):\n def ref_rand(size, alpha, beta):\n return st.cauchy.rvs(alpha, beta, size=size)\n pymc3_random(pm.Cauchy, {'alpha': R, 'beta': Rplusbig}, ref_rand=ref_rand)\n\n def test_half_cauchy(self):\n def ref_rand(size, beta):\n return st.halfcauchy.rvs(scale=beta, size=size)\n pymc3_random(pm.HalfCauchy, {'beta': Rplusbig}, ref_rand=ref_rand)\n\n def test_gamma_alpha_beta(self):\n def ref_rand(size, alpha, beta):\n return st.gamma.rvs(alpha, scale=1. / beta, size=size)\n pymc3_random(pm.Gamma, {'alpha': Rplusbig, 'beta': Rplusbig}, ref_rand=ref_rand)\n\n def test_gamma_mu_sigma(self):\n def ref_rand(size, mu, sigma):\n return st.gamma.rvs(mu**2 / sigma**2, scale=sigma ** 2 / mu, size=size)\n pymc3_random(pm.Gamma, {'mu': Rplusbig, 'sigma': Rplusbig}, ref_rand=ref_rand)\n\n def test_inverse_gamma(self):\n def ref_rand(size, alpha, beta):\n return st.invgamma.rvs(a=alpha, scale=beta, size=size)\n pymc3_random(pm.InverseGamma, {'alpha': Rplus, 'beta': Rplus}, ref_rand=ref_rand)\n\n def test_pareto(self):\n def ref_rand(size, alpha, m):\n return st.pareto.rvs(alpha, scale=m, size=size)\n pymc3_random(pm.Pareto, {'alpha': Rplusbig, 'm': Rplusbig}, ref_rand=ref_rand)\n\n def test_ex_gaussian(self):\n def ref_rand(size, mu, sigma, nu):\n return nr.normal(mu, sigma, size=size) + nr.exponential(scale=nu, size=size)\n pymc3_random(pm.ExGaussian, {'mu': R, 'sigma': Rplus, 'nu': Rplus}, ref_rand=ref_rand)\n\n def test_vonmises(self):\n def ref_rand(size, mu, kappa):\n return st.vonmises.rvs(size=size, loc=mu, kappa=kappa)\n pymc3_random(pm.VonMises, {'mu': R, 'kappa': Rplus}, ref_rand=ref_rand)\n\n def test_triangular(self):\n def ref_rand(size, lower, upper, c):\n scale = upper - lower\n c_ = (c - lower) / scale\n return st.triang.rvs(size=size, loc=lower, scale=scale, c=c_)\n pymc3_random(pm.Triangular, {'lower': Runif, 'upper': Runif + 3, 'c': Runif + 1}, ref_rand=ref_rand)\n\n def test_flat(self):\n with pm.Model():\n f = pm.Flat('f')\n with pytest.raises(ValueError):\n f.random(1)\n\n def test_half_flat(self):\n with pm.Model():\n f = pm.HalfFlat('f')\n with pytest.raises(ValueError):\n f.random(1)\n\n def test_binomial(self):\n pymc3_random_discrete(pm.Binomial, {'n': Nat, 'p': Unit}, ref_rand=st.binom.rvs)\n\n def test_beta_binomial(self):\n pymc3_random_discrete(pm.BetaBinomial, {'n': Nat, 'alpha': Rplus, 'beta': Rplus},\n ref_rand=self._beta_bin)\n\n def _beta_bin(self, n, alpha, beta, size=None):\n return st.binom.rvs(n, st.beta.rvs(a=alpha, b=beta, size=size))\n\n def test_bernoulli(self):\n pymc3_random_discrete(pm.Bernoulli, {'p': Unit},\n ref_rand=lambda size, p=None: st.bernoulli.rvs(p, size=size))\n\n def test_poisson(self):\n pymc3_random_discrete(pm.Poisson, {'mu': Rplusbig}, size=500, ref_rand=st.poisson.rvs)\n\n def test_negative_binomial(self):\n def ref_rand(size, alpha, mu):\n return st.nbinom.rvs(alpha, alpha / (mu + alpha), size=size)\n pymc3_random_discrete(pm.NegativeBinomial, {'mu': Rplusbig, 'alpha': Rplusbig},\n size=100, fails=50, ref_rand=ref_rand)\n\n def test_geometric(self):\n pymc3_random_discrete(pm.Geometric, {'p': Unit}, size=500, fails=50, ref_rand=nr.geometric)\n\n def test_discrete_uniform(self):\n def ref_rand(size, lower, upper):\n return st.randint.rvs(lower, upper + 1, size=size)\n pymc3_random_discrete(pm.DiscreteUniform, {'lower': -NatSmall, 'upper': NatSmall},\n ref_rand=ref_rand)\n\n def test_discrete_weibull(self):\n def ref_rand(size, q, beta):\n u = np.random.uniform(size=size)\n\n return np.ceil(np.power(np.log(1 - u) / np.log(q), 1. / beta)) - 1\n\n pymc3_random_discrete(pm.DiscreteWeibull, {'q': Unit, 'beta': Rplusdunif},\n ref_rand=ref_rand)\n\n @pytest.mark.parametrize('s', [2, 3, 4])\n def test_categorical_random(self, s):\n def ref_rand(size, p):\n return nr.choice(np.arange(p.shape[0]), p=p, size=size)\n pymc3_random_discrete(pm.Categorical, {'p': Simplex(s)}, ref_rand=ref_rand)\n\n def test_constant_dist(self):\n def ref_rand(size, c):\n return c * np.ones(size, dtype=int)\n pymc3_random_discrete(pm.Constant, {'c': I}, ref_rand=ref_rand)\n\n def test_mv_normal(self):\n def ref_rand(size, mu, cov):\n return st.multivariate_normal.rvs(mean=mu, cov=cov, size=size)\n\n def ref_rand_tau(size, mu, tau):\n return ref_rand(size, mu, linalg.inv(tau))\n\n def ref_rand_chol(size, mu, chol):\n return ref_rand(size, mu, np.dot(chol, chol.T))\n\n def ref_rand_uchol(size, mu, chol):\n return ref_rand(size, mu, np.dot(chol.T, chol))\n\n for n in [2, 3]:\n pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'cov': PdMatrix(n)},\n size=100, valuedomain=Vector(R, n), ref_rand=ref_rand)\n pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'tau': PdMatrix(n)},\n size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_tau)\n pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'chol': PdMatrixChol(n)},\n size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_chol)\n pymc3_random(\n pm.MvNormal,\n {'mu': Vector(R, n), 'chol': PdMatrixCholUpper(n)},\n size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_uchol,\n extra_args={'lower': False}\n )\n\n def test_matrix_normal(self):\n def ref_rand(size, mu, rowcov, colcov):\n return st.matrix_normal.rvs(mean=mu, rowcov=rowcov, colcov=colcov, size=size)\n\n # def ref_rand_tau(size, mu, tau):\n # return ref_rand(size, mu, linalg.inv(tau))\n\n def ref_rand_chol(size, mu, rowchol, colchol):\n return ref_rand(size, mu, rowcov=np.dot(rowchol, rowchol.T),\n colcov=np.dot(colchol, colchol.T))\n\n def ref_rand_uchol(size, mu, rowchol, colchol):\n return ref_rand(size, mu, rowcov=np.dot(rowchol.T, rowchol),\n colcov=np.dot(colchol.T, colchol))\n\n for n in [2, 3]:\n pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'rowcov': PdMatrix(n), 'colcov': PdMatrix(n)},\n size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand)\n # pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'tau': PdMatrix(n)},\n # size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_tau)\n pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'rowchol': PdMatrixChol(n), 'colchol': PdMatrixChol(n)},\n size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_chol)\n # pymc3_random(\n # pm.MvNormal,\n # {'mu': RealMatrix(n, n), 'rowchol': PdMatrixCholUpper(n), 'colchol': PdMatrixCholUpper(n)},\n # size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_uchol,\n # extra_args={'lower': False}\n # )\n\n def test_kronecker_normal(self):\n def ref_rand(size, mu, covs, sigma):\n cov = pm.math.kronecker(covs[0], covs[1]).eval()\n cov += sigma**2 * np.identity(cov.shape[0])\n return st.multivariate_normal.rvs(mean=mu, cov=cov, size=size)\n\n def ref_rand_chol(size, mu, chols, sigma):\n covs = [np.dot(chol, chol.T) for chol in chols]\n return ref_rand(size, mu, covs, sigma)\n\n def ref_rand_evd(size, mu, evds, sigma):\n covs = []\n for eigs, Q in evds:\n covs.append(np.dot(Q, np.dot(np.diag(eigs), Q.T)))\n return ref_rand(size, mu, covs, sigma)\n\n sizes = [2, 3]\n sigmas = [0, 1]\n for n, sigma in zip(sizes, sigmas):\n N = n**2\n covs = [RandomPdMatrix(n), RandomPdMatrix(n)]\n chols = list(map(np.linalg.cholesky, covs))\n evds = list(map(np.linalg.eigh, covs))\n dom = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)\n mu = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)\n\n std_args = {'mu': mu}\n cov_args = {'covs': covs}\n chol_args = {'chols': chols}\n evd_args = {'evds': evds}\n if sigma is not None and sigma != 0:\n std_args['sigma'] = Domain([sigma], edges=(None, None))\n else:\n for args in [cov_args, chol_args, evd_args]:\n args['sigma'] = sigma\n\n pymc3_random(\n pm.KroneckerNormal, std_args, valuedomain=dom,\n ref_rand=ref_rand, extra_args=cov_args, model_args=cov_args)\n pymc3_random(\n pm.KroneckerNormal, std_args, valuedomain=dom,\n ref_rand=ref_rand_chol, extra_args=chol_args,\n model_args=chol_args)\n pymc3_random(\n pm.KroneckerNormal, std_args, valuedomain=dom,\n ref_rand=ref_rand_evd, extra_args=evd_args,\n model_args=evd_args)\n\n def test_mv_t(self):\n def ref_rand(size, nu, Sigma, mu):\n normal = st.multivariate_normal.rvs(cov=Sigma, size=size).T\n chi2 = st.chi2.rvs(df=nu, size=size)\n return mu + np.sqrt(nu) * (normal / chi2).T\n for n in [2, 3]:\n pymc3_random(pm.MvStudentT,\n {'nu': Domain([5, 10, 25, 50]), 'Sigma': PdMatrix(\n n), 'mu': Vector(R, n)},\n size=100, valuedomain=Vector(R, n), ref_rand=ref_rand)\n\n def test_dirichlet(self):\n def ref_rand(size, a):\n return st.dirichlet.rvs(a, size=size)\n for n in [2, 3]:\n pymc3_random(pm.Dirichlet, {'a': Vector(Rplus, n)},\n valuedomain=Simplex(n), size=100, ref_rand=ref_rand)\n\n def test_multinomial(self):\n def ref_rand(size, p, n):\n return nr.multinomial(pvals=p, n=n, size=size)\n for n in [2, 3]:\n pymc3_random_discrete(pm.Multinomial, {'p': Simplex(n), 'n': Nat},\n valuedomain=Vector(Nat, n), size=100, ref_rand=ref_rand)\n\n def test_gumbel(self):\n def ref_rand(size, mu, beta):\n return st.gumbel_r.rvs(loc=mu, scale=beta, size=size)\n pymc3_random(pm.Gumbel, {'mu': R, 'beta': Rplus}, ref_rand=ref_rand)\n\n def test_logistic(self):\n def ref_rand(size, mu, s):\n return st.logistic.rvs(loc=mu, scale=s, size=size)\n pymc3_random(pm.Logistic, {'mu': R, 's': Rplus}, ref_rand=ref_rand)\n\n def test_logitnormal(self):\n def ref_rand(size, mu, sigma):\n return expit(st.norm.rvs(loc=mu, scale=sigma, size=size))\n pymc3_random(pm.LogitNormal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)\n\n def test_moyal(self):\n def ref_rand(size, mu, sigma):\n return st.moyal.rvs(loc=mu, scale=sigma, size=size)\n pymc3_random(pm.Moyal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)\n\n \n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_interpolated(self):\n for mu in R.vals:\n for sigma in Rplus.vals:\n #pylint: disable=cell-var-from-loop\n def ref_rand(size):\n return st.norm.rvs(loc=mu, scale=sigma, size=size)\n\n class TestedInterpolated (pm.Interpolated):\n\n def __init__(self, **kwargs):\n x_points = np.linspace(mu - 5 * sigma, mu + 5 * sigma, 100)\n pdf_points = st.norm.pdf(x_points, loc=mu, scale=sigma)\n super().__init__(\n x_points=x_points,\n pdf_points=pdf_points,\n **kwargs\n )\n\n pymc3_random(TestedInterpolated, {}, ref_rand=ref_rand)\n\n @pytest.mark.skip('Wishart random sampling not implemented.\\n'\n 'See https://github.com/pymc-devs/pymc3/issues/538')\n def test_wishart(self):\n # Wishart non current recommended for use:\n # https://github.com/pymc-devs/pymc3/issues/538\n # for n in [2, 3]:\n # pymc3_random_discrete(Wisvaluedomainhart,\n # {'n': Domain([2, 3, 4, 2000]) , 'V': PdMatrix(n) },\n # valuedomain=PdMatrix(n),\n # ref_rand=lambda n=None, V=None, size=None: \\\n # st.wishart(V, df=n, size=size))\n pass\n\n def test_lkj(self):\n for n in [2, 10, 50]:\n #pylint: disable=cell-var-from-loop\n shape = n*(n-1)//2\n\n def ref_rand(size, eta):\n beta = eta - 1 + n/2\n return (st.beta.rvs(size=(size, shape), a=beta, b=beta)-.5)*2\n\n class TestedLKJCorr (pm.LKJCorr):\n\n def __init__(self, **kwargs):\n kwargs.pop('shape', None)\n super().__init__(n=n, **kwargs)\n\n pymc3_random(TestedLKJCorr,\n {'eta': Domain([1., 10., 100.])},\n size=10000//n,\n ref_rand=ref_rand)\n\n def test_normalmixture(self):\n def ref_rand(size, w, mu, sigma):\n component = np.random.choice(w.size, size=size, p=w)\n return np.random.normal(mu[component], sigma[component], size=size)\n\n pymc3_random(pm.NormalMixture, {'w': Simplex(2),\n 'mu': Domain([[.05, 2.5], [-5., 1.]], edges=(None, None)),\n 'sigma': Domain([[1, 1], [1.5, 2.]], edges=(None, None))},\n extra_args={'comp_shape': 2},\n size=1000,\n ref_rand=ref_rand)\n pymc3_random(pm.NormalMixture, {'w': Simplex(3),\n 'mu': Domain([[-5., 1., 2.5]], edges=(None, None)),\n 'sigma': Domain([[1.5, 2., 3.]], edges=(None, None))},\n extra_args={'comp_shape': 3},\n size=1000,\n ref_rand=ref_rand)\n\n\ndef test_mixture_random_shape():\n # test the shape broadcasting in mixture random\n y = np.concatenate([nr.poisson(5, size=10),\n nr.poisson(9, size=10)])\n with pm.Model() as m:\n comp0 = pm.Poisson.dist(mu=np.ones(2))\n w0 = pm.Dirichlet('w0', a=np.ones(2), shape=(2,))\n like0 = pm.Mixture('like0',\n w=w0,\n comp_dists=comp0,\n observed=y)\n\n comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),\n shape=(20, 2))\n w1 = pm.Dirichlet('w1', a=np.ones(2), shape=(2,))\n like1 = pm.Mixture('like1',\n w=w1,\n comp_dists=comp1,\n observed=y)\n\n comp2 = pm.Poisson.dist(mu=np.ones(2))\n w2 = pm.Dirichlet('w2',\n a=np.ones(2),\n shape=(20, 2))\n like2 = pm.Mixture('like2',\n w=w2,\n comp_dists=comp2,\n observed=y)\n\n comp3 = pm.Poisson.dist(mu=np.ones(2),\n shape=(20, 2))\n w3 = pm.Dirichlet('w3',\n a=np.ones(2),\n shape=(20, 2))\n like3 = pm.Mixture('like3',\n w=w3,\n comp_dists=comp3,\n observed=y)\n\n rand0, rand1, rand2, rand3 = draw_values([like0, like1, like2, like3],\n point=m.test_point,\n size=100)\n assert rand0.shape == (100, 20)\n assert rand1.shape == (100, 20)\n assert rand2.shape == (100, 20)\n assert rand3.shape == (100, 20)\n\n with m:\n ppc = pm.sample_posterior_predictive([m.test_point], samples=200)\n assert ppc['like0'].shape == (200, 20)\n assert ppc['like1'].shape == (200, 20)\n assert ppc['like2'].shape == (200, 20)\n assert ppc['like3'].shape == (200, 20)\n\[email protected]\ndef test_mixture_random_shape_fast():\n # test the shape broadcasting in mixture random\n y = np.concatenate([nr.poisson(5, size=10),\n nr.poisson(9, size=10)])\n with pm.Model() as m:\n comp0 = pm.Poisson.dist(mu=np.ones(2))\n w0 = pm.Dirichlet('w0', a=np.ones(2), shape=(2,))\n like0 = pm.Mixture('like0',\n w=w0,\n comp_dists=comp0,\n observed=y)\n\n comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),\n shape=(20, 2))\n w1 = pm.Dirichlet('w1', a=np.ones(2), shape=(2,))\n like1 = pm.Mixture('like1',\n w=w1,\n comp_dists=comp1,\n observed=y)\n\n comp2 = pm.Poisson.dist(mu=np.ones(2))\n w2 = pm.Dirichlet('w2',\n a=np.ones(2),\n shape=(20, 2))\n like2 = pm.Mixture('like2',\n w=w2,\n comp_dists=comp2,\n observed=y)\n\n comp3 = pm.Poisson.dist(mu=np.ones(2),\n shape=(20, 2))\n w3 = pm.Dirichlet('w3',\n a=np.ones(2),\n shape=(20, 2))\n like3 = pm.Mixture('like3',\n w=w3,\n comp_dists=comp3,\n observed=y)\n\n rand0, rand1, rand2, rand3 = draw_values([like0, like1, like2, like3],\n point=m.test_point,\n size=100)\n assert rand0.shape == (100, 20)\n assert rand1.shape == (100, 20)\n assert rand2.shape == (100, 20)\n assert rand3.shape == (100, 20)\n\n # I *think* that the mixture means that this is not going to work,\n # but I could be wrong. [2019/08/22:rpg]\n with m:\n ppc = pm.fast_sample_posterior_predictive([m.test_point], samples=200)\n assert ppc['like0'].shape == (200, 20)\n assert ppc['like1'].shape == (200, 20)\n assert ppc['like2'].shape == (200, 20)\n assert ppc['like3'].shape == (200, 20)\n\n\n\nclass TestDensityDist():\n @pytest.mark.parametrize(\"shape\", [(), (3,), (3, 2)], ids=str)\n def test_density_dist_with_random_sampleable(self, shape):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1, shape=shape)\n obs = pm.DensityDist(\n 'density_dist',\n normal_dist.logp,\n observed=np.random.randn(100, *shape),\n shape=shape,\n random=normal_dist.random)\n trace = pm.sample(100)\n\n samples = 500\n size = 100\n ppc = pm.sample_posterior_predictive(trace, samples=samples, model=model, size=size)\n assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape\n\n # ppc = pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=size)\n # assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape\n\n\n @pytest.mark.parametrize(\"shape\", [(), (3,), (3, 2)], ids=str)\n def test_density_dist_with_random_sampleable_failure(self, shape):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1, shape=shape)\n pm.DensityDist(\n 'density_dist',\n normal_dist.logp,\n observed=np.random.randn(100, *shape),\n shape=shape,\n random=normal_dist.random,\n wrap_random_with_dist_shape=False\n )\n trace = pm.sample(100)\n\n samples = 500\n with pytest.raises(RuntimeError):\n pm.sample_posterior_predictive(trace, samples=samples, model=model, size=100)\n\n with pytest.raises((TypeError, RuntimeError)):\n pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=100)\n\n\n @pytest.mark.parametrize(\"shape\", [(), (3,), (3, 2)], ids=str)\n def test_density_dist_with_random_sampleable_hidden_error(self, shape):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1, shape=shape)\n obs = pm.DensityDist(\n 'density_dist',\n normal_dist.logp,\n observed=np.random.randn(100, *shape),\n shape=shape,\n random=normal_dist.random,\n wrap_random_with_dist_shape=False,\n check_shape_in_random=False\n )\n trace = pm.sample(100)\n\n samples = 500\n ppc = pm.sample_posterior_predictive(trace, samples=samples, model=model)\n assert len(ppc['density_dist']) == samples\n assert ((samples,) + obs.distribution.shape) != ppc['density_dist'].shape\n\n ppc = pm.fast_sample_posterior_predictive(trace, samples=samples, model=model)\n assert len(ppc['density_dist']) == samples\n assert ((samples,) + obs.distribution.shape) != ppc['density_dist'].shape\n\n\n def test_density_dist_with_random_sampleable_handcrafted_success(self):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1)\n rvs = pm.Normal.dist(mu, 1, shape=100).random\n obs = pm.DensityDist(\n 'density_dist',\n normal_dist.logp,\n observed=np.random.randn(100),\n random=rvs,\n wrap_random_with_dist_shape=False\n )\n trace = pm.sample(100)\n\n samples = 500\n size = 100\n ppc = pm.sample_posterior_predictive(trace, samples=samples, model=model, size=size)\n assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape\n\n @pytest.mark.xfail\n def test_density_dist_with_random_sampleable_handcrafted_success_fast(self):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1)\n rvs = pm.Normal.dist(mu, 1, shape=100).random\n obs = pm.DensityDist(\n 'density_dist',\n normal_dist.logp,\n observed=np.random.randn(100),\n random=rvs,\n wrap_random_with_dist_shape=False\n )\n trace = pm.sample(100)\n\n samples = 500\n size = 100\n\n ppc = pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=size)\n assert ppc['density_dist'].shape == (samples, size) + obs.distribution.shape\n\n\n def test_density_dist_without_random_not_sampleable(self):\n with pm.Model() as model:\n mu = pm.Normal('mu', 0, 1)\n normal_dist = pm.Normal.dist(mu, 1)\n pm.DensityDist('density_dist', normal_dist.logp, observed=np.random.randn(100))\n trace = pm.sample(100)\n\n samples = 500\n with pytest.raises(ValueError):\n pm.sample_posterior_predictive(trace, samples=samples, model=model, size=100)\n\n with pytest.raises((TypeError, ValueError)):\n pm.fast_sample_posterior_predictive(trace, samples=samples, model=model, size=100)\n\n\nclass TestNestedRandom(SeededTest):\n def build_model(self, distribution, shape, nested_rvs_info):\n with pm.Model() as model:\n nested_rvs = {}\n for rv_name, info in nested_rvs_info.items():\n try:\n value, nested_shape = info\n loc = 0.\n except ValueError:\n value, nested_shape, loc = info\n if value is None:\n nested_rvs[rv_name] = pm.Uniform(\n rv_name,\n 0 + loc,\n 1 + loc,\n shape=nested_shape,\n )\n else:\n nested_rvs[rv_name] = value * np.ones(nested_shape)\n rv = distribution(\n \"target\",\n shape=shape,\n **nested_rvs,\n )\n return model, rv, nested_rvs\n\n def sample_prior(\n self,\n distribution,\n shape,\n nested_rvs_info,\n prior_samples\n ):\n model, rv, nested_rvs = self.build_model(\n distribution,\n shape,\n nested_rvs_info,\n )\n with model:\n return pm.sample_prior_predictive(prior_samples)\n\n @pytest.mark.parametrize(\n [\"prior_samples\", \"shape\", \"mu\", \"alpha\"],\n [\n [10, (3,), (None, tuple()), (None, (3,))],\n [10, (3,), (None, (3,)), (None, tuple())],\n [10, (4, 3,), (None, (3,)), (None, (3,))],\n [10, (4, 3,), (None, (3,)), (None, (4, 3))],\n ],\n ids=str,\n )\n def test_NegativeBinomial(\n self,\n prior_samples,\n shape,\n mu,\n alpha,\n ):\n prior = self.sample_prior(\n distribution=pm.NegativeBinomial,\n shape=shape,\n nested_rvs_info=dict(mu=mu, alpha=alpha),\n prior_samples=prior_samples,\n )\n assert prior[\"target\"].shape == (prior_samples,) + shape\n\n @pytest.mark.parametrize(\n [\"prior_samples\", \"shape\", \"psi\", \"mu\", \"alpha\"],\n [\n [10, (3,), (0.5, tuple()), (None, tuple()), (None, (3,))],\n [10, (3,), (0.5, (3,)), (None, tuple()), (None, (3,))],\n [10, (3,), (0.5, tuple()), (None, (3,)), (None, tuple())],\n [10, (3,), (0.5, (3,)), (None, (3,)), (None, tuple())],\n [10, (4, 3,), (0.5, (3,)), (None, (3,)), (None, (3,))],\n [10, (4, 3,), (0.5, (3,)), (None, (3,)), (None, (4, 3))],\n ],\n ids=str,\n )\n def test_ZeroInflatedNegativeBinomial(\n self,\n prior_samples,\n shape,\n psi,\n mu,\n alpha,\n ):\n prior = self.sample_prior(\n distribution=pm.ZeroInflatedNegativeBinomial,\n shape=shape,\n nested_rvs_info=dict(psi=psi, mu=mu, alpha=alpha),\n prior_samples=prior_samples,\n )\n assert prior[\"target\"].shape == (prior_samples,) + shape\n\n @pytest.mark.parametrize(\n [\"prior_samples\", \"shape\", \"nu\", \"sigma\"],\n [\n [10, (3,), (None, tuple()), (None, (3,))],\n [10, (3,), (None, tuple()), (None, (3,))],\n [10, (3,), (None, (3,)), (None, tuple())],\n [10, (3,), (None, (3,)), (None, tuple())],\n [10, (4, 3,), (None, (3,)), (None, (3,))],\n [10, (4, 3,), (None, (3,)), (None, (4, 3))],\n ],\n ids=str,\n )\n def test_Rice(\n self,\n prior_samples,\n shape,\n nu,\n sigma,\n ):\n prior = self.sample_prior(\n distribution=pm.Rice,\n shape=shape,\n nested_rvs_info=dict(nu=nu, sigma=sigma),\n prior_samples=prior_samples,\n )\n assert prior[\"target\"].shape == (prior_samples,) + shape\n\n @pytest.mark.parametrize(\n [\"prior_samples\", \"shape\", \"mu\", \"sigma\", \"lower\", \"upper\"],\n [\n [10, (3,), (None, tuple()), (1., tuple()), (None, tuple(), -1), (None, (3,))],\n [10, (3,), (None, tuple()), (1., tuple()), (None, tuple(), -1), (None, (3,))],\n [10, (3,), (None, tuple()), (1., tuple()), (None, (3,), -1), (None, tuple())],\n [10, (3,), (None, tuple()), (1., tuple()), (None, (3,), -1), (None, tuple())],\n [10, (4, 3,), (None, (3,)), (1., tuple()), (None, (3,), -1), (None, (3,))],\n [10, (4, 3,), (None, (3,)), (1., tuple()), (None, (3,), -1), (None, (4, 3))],\n [10, (3,), (0., tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))],\n [10, (3,), (0., tuple()), (None, tuple()), (None, tuple(), -1), (None, (3,))],\n [10, (3,), (0., tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())],\n [10, (3,), (0., tuple()), (None, tuple()), (None, (3,), -1), (None, tuple())],\n [10, (4, 3,), (0., tuple()), (None, (3,)), (None, (3,), -1), (None, (3,))],\n [10, (4, 3,), (0., tuple()), (None, (3,)), (None, (3,), -1), (None, (4, 3))],\n ],\n ids=str,\n )\n def test_TruncatedNormal(\n self,\n prior_samples,\n shape,\n mu,\n sigma,\n lower,\n upper,\n ):\n prior = self.sample_prior(\n distribution=pm.TruncatedNormal,\n shape=shape,\n nested_rvs_info=dict(mu=mu, sigma=sigma, lower=lower, upper=upper),\n prior_samples=prior_samples,\n )\n assert prior[\"target\"].shape == (prior_samples,) + shape\n\n\n @pytest.mark.parametrize(\n [\"prior_samples\", \"shape\", \"c\", \"lower\", \"upper\"],\n [\n [10, (3,), (None, tuple()), (-1., (3,)), (2, tuple())],\n [10, (3,), (None, tuple()), (-1., tuple()), (None, tuple(), 1)],\n [10, (3,), (None, (3,)), (-1., tuple()), (None, tuple(), 1)],\n [10, (4, 3,), (None, (3,)), (-1., tuple()), (None, (3,), 1)],\n [10, (4, 3,), (None, (3,)), (None, tuple(), -1), (None, (3,), 1)],\n ],\n ids=str,\n )\n def test_Triangular(\n self,\n prior_samples,\n shape,\n c,\n lower,\n upper,\n ):\n prior = self.sample_prior(\n distribution=pm.Triangular,\n shape=shape,\n nested_rvs_info=dict(c=c, lower=lower, upper=upper),\n prior_samples=prior_samples,\n )\n assert prior[\"target\"].shape == (prior_samples,) + shape\n"
] | [
[
"scipy.stats.matrix_normal.rvs",
"numpy.ones",
"scipy.stats.laplace.rvs",
"scipy.stats.gumbel_r.rvs",
"numpy.diag",
"scipy.stats.gamma.rvs",
"numpy.random.poisson",
"numpy.log",
"scipy.stats.logistic.rvs",
"numpy.testing.assert_almost_equal",
"scipy.stats.beta.rvs",
"scipy.stats.moyal.rvs",
"numpy.random.choice",
"scipy.stats.nbinom.rvs",
"scipy.stats.randint.rvs",
"scipy.stats.triang.rvs",
"scipy.stats.invgamma.rvs",
"numpy.random.multinomial",
"numpy.identity",
"numpy.unique",
"numpy.linspace",
"numpy.random.uniform",
"numpy.sqrt",
"scipy.stats.cauchy.rvs",
"scipy.stats.pareto.rvs",
"scipy.stats.multivariate_normal.rvs",
"scipy.stats.chisquare",
"scipy.stats.norm.rvs",
"scipy.stats.dirichlet.rvs",
"scipy.linalg.inv",
"numpy.arange",
"numpy.all",
"scipy.stats.chi2.rvs",
"scipy.stats.truncnorm.rvs",
"scipy.stats.vonmises.rvs",
"scipy.stats.halfcauchy.rvs",
"scipy.stats.bernoulli.rvs",
"scipy.stats.uniform.rvs",
"scipy.stats.halfnorm.rvs",
"numpy.random.randn",
"scipy.stats.norm.pdf",
"numpy.exp",
"numpy.atleast_1d",
"numpy.random.exponential",
"scipy.stats.t.rvs",
"numpy.random.normal",
"numpy.dot",
"scipy.stats.skewnorm.rvs",
"scipy.stats.wald.rvs"
]
] |
natsutan/cocytus | [
"53840021eb5a84ab197d96fa37e8b43b0b255566"
] | [
"tools/cqt_diff/cqt_diff_vgg16.py"
] | [
"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn\nimport sys\n\nkeras_dir = '../../example/vgg16/keras/output/'\ncqt_dir = '../../example/vgg16/c_fix/output/'\nqp_file = '../../examplevgg16/c_fix/weight/'\n\nfix16mode = True\n\ndef layer_dump(i, q, fnum = 3):\n \"\"\"\n 引数で指定されたレイヤーの、Keras出力と、コキュートス出力を\n 比較して、画像に落とす。比較するフィルターは先頭から、fnum\n まで。\n 出力はoutputディレクトリーに行われる。\n :param i:int レイヤー番号\n :param q:int 出力データのQ位置\n :param fnum:int 画像化するフィルター数\n :return:\n \"\"\"\n\n for f in range(fnum):\n plt.figure()\n graph_name = 'l%02d_%d' % (i, f)\n kname = os.path.join(keras_dir+'l%02d_%d.npy' % (i, f))\n cname = os.path.join(cqt_dir+'l%02d_%d.npy' % (i, f))\n k_data_1 = np.load(kname)\n c_data_1 = np.load(cname)\n k_data = k_data_1.flatten()\n c_data = c_data_1.flatten()\n\n\n if fix16mode:\n c_data = c_data.astype(np.float32) / (2 ** q)\n\n x = np.arange(len(k_data))\n plt.plot(x, k_data, color='b', label='Keras')\n plt.plot(x, c_data, color='r', label='Cocytus')\n plt.title(graph_name)\n plt.legend()\n\n img_fname = os.path.join('output', graph_name+'.png')\n print('save %s' % img_fname)\n plt.savefig(img_fname)\n\n plt.figure()\n plt.plot(x, k_data - c_data, color='g', label='diff')\n plt.title(graph_name+'diff')\n plt.legend()\n img_fname = os.path.join('output', graph_name + '_diff.png')\n plt.savefig(img_fname)\n\n\ndef read_qpfile(odir):\n \"\"\"qpファイルを読み込み、入力、出力、重みのQ位置をリストにして返す\"\"\"\n iqs = []\n wqs = []\n oqs = []\n fname = os.path.join(odir, 'qp.txt')\n\n for i, l in enumerate(open(fname).readlines()):\n if i < 1:\n continue\n words = l.split(',')\n iqs.append(int(words[0]))\n oqs.append(int(words[1]))\n wqs.append(int(words[2]))\n\n return iqs, oqs, wqs\n\n\n#iqs, oqs, wqs = read_qpfile(qp_file)\n\n#for i in range(31):\n# layer_dump(i, oqs[i])\nlayer_dump(15, 3)\n\nprint('finish')"
] | [
[
"numpy.load",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot"
]
] |
utahnlp/therapist-observer | [
"31eaf9a5c82c6d0f9a62427ac5df030d81547472"
] | [
"tensorflow/classes/bilm/model.py"
] | [
"\nimport numpy as np\nimport tensorflow as tf\nimport h5py\nimport json\nimport re\n\nfrom .data import UnicodeCharsVocabulary, Batcher\n\nDTYPE = 'float32'\nDTYPE_INT = 'int64'\n\n\nclass BidirectionalLanguageModel(object):\n def __init__(\n self,\n options_file,\n weight_file,\n use_character_inputs=True,\n embedding_weight_file=None,\n max_batch_size=128,\n ):\n '''\n Creates the language model computational graph and loads weights\n\n Two options for input type:\n (1) To use character inputs (paired with Batcher)\n pass use_character_inputs=True, and ids_placeholder\n of shape (None, None, max_characters_per_token)\n to __call__\n (2) To use token ids as input (paired with TokenBatcher),\n pass use_character_inputs=False and ids_placeholder\n of shape (None, None) to __call__.\n In this case, embedding_weight_file is also required input\n\n options_file: location of the json formatted file with\n LM hyperparameters\n weight_file: location of the hdf5 file with LM weights\n use_character_inputs: if True, then use character ids as input,\n otherwise use token ids\n max_batch_size: the maximum allowable batch size \n '''\n with open(options_file, 'r') as fin:\n options = json.load(fin)\n\n if not use_character_inputs:\n if embedding_weight_file is None:\n raise ValueError(\n \"embedding_weight_file is required input with \"\n \"not use_character_inputs\"\n )\n\n self._options = options\n self._weight_file = weight_file\n self._embedding_weight_file = embedding_weight_file\n self._use_character_inputs = use_character_inputs\n self._max_batch_size = max_batch_size\n\n self._ops = {}\n self._graphs = {}\n\n def __call__(self, ids_placeholder):\n '''\n Given the input character ids (or token ids), returns a dictionary\n with tensorflow ops:\n\n {'lm_embeddings': embedding_op,\n 'lengths': sequence_lengths_op,\n 'mask': op to compute mask}\n\n embedding_op computes the LM embeddings and is shape\n (None, 3, None, 1024)\n lengths_op computes the sequence lengths and is shape (None, )\n mask computes the sequence mask and is shape (None, None)\n\n ids_placeholder: a tf.placeholder of type int32.\n If use_character_inputs=True, it is shape\n (None, None, max_characters_per_token) and holds the input\n character ids for a batch\n If use_character_input=False, it is shape (None, None) and\n holds the input token ids for a batch\n '''\n if ids_placeholder in self._ops:\n # have already created ops for this placeholder, just return them\n ret = self._ops[ids_placeholder]\n\n else:\n # need to create the graph\n if len(self._ops) == 0:\n # first time creating the graph, don't reuse variables\n lm_graph = BidirectionalLanguageModelGraph(\n self._options,\n self._weight_file,\n ids_placeholder,\n embedding_weight_file=self._embedding_weight_file,\n use_character_inputs=self._use_character_inputs,\n max_batch_size=self._max_batch_size)\n else:\n with tf.variable_scope('', reuse=True):\n lm_graph = BidirectionalLanguageModelGraph(\n self._options,\n self._weight_file,\n ids_placeholder,\n embedding_weight_file=self._embedding_weight_file,\n use_character_inputs=self._use_character_inputs,\n max_batch_size=self._max_batch_size)\n\n ops = self._build_ops(lm_graph)\n self._ops[ids_placeholder] = ops\n self._graphs[ids_placeholder] = lm_graph\n ret = ops\n\n return ret\n\n def _build_ops(self, lm_graph):\n with tf.control_dependencies([lm_graph.update_state_op]):\n # get the LM embeddings\n token_embeddings = lm_graph.embedding\n layers = [\n tf.concat([token_embeddings, token_embeddings], axis=2)\n ]\n\n n_lm_layers = len(lm_graph.lstm_outputs['forward'])\n for i in range(n_lm_layers):\n layers.append(\n tf.concat(\n [lm_graph.lstm_outputs['forward'][i],\n lm_graph.lstm_outputs['backward'][i]],\n axis=-1\n )\n )\n\n # The layers include the BOS/EOS tokens. Remove them\n sequence_length_wo_bos_eos = lm_graph.sequence_lengths - 2\n layers_without_bos_eos = []\n for layer in layers:\n layer_wo_bos_eos = layer[:, 1:, :]\n layer_wo_bos_eos = tf.reverse_sequence(\n layer_wo_bos_eos, \n lm_graph.sequence_lengths - 1,\n seq_axis=1,\n batch_axis=0,\n )\n layer_wo_bos_eos = layer_wo_bos_eos[:, 1:, :]\n layer_wo_bos_eos = tf.reverse_sequence(\n layer_wo_bos_eos,\n sequence_length_wo_bos_eos,\n seq_axis=1,\n batch_axis=0,\n )\n layers_without_bos_eos.append(layer_wo_bos_eos)\n\n # concatenate the layers\n lm_embeddings = tf.concat(\n [tf.expand_dims(t, axis=1) for t in layers_without_bos_eos],\n axis=1\n )\n\n # get the mask op without bos/eos.\n # tf doesn't support reversing boolean tensors, so cast\n # to int then back\n mask_wo_bos_eos = tf.cast(lm_graph.mask[:, 1:], 'int32')\n mask_wo_bos_eos = tf.reverse_sequence(\n mask_wo_bos_eos,\n lm_graph.sequence_lengths - 1,\n seq_axis=1,\n batch_axis=0,\n )\n mask_wo_bos_eos = mask_wo_bos_eos[:, 1:]\n mask_wo_bos_eos = tf.reverse_sequence(\n mask_wo_bos_eos,\n sequence_length_wo_bos_eos,\n seq_axis=1,\n batch_axis=0,\n )\n mask_wo_bos_eos = tf.cast(mask_wo_bos_eos, 'bool')\n\n return {\n 'lm_embeddings': lm_embeddings, \n 'lengths': sequence_length_wo_bos_eos,\n 'token_embeddings': lm_graph.embedding,\n 'mask': mask_wo_bos_eos,\n }\n\n\ndef _pretrained_initializer(varname, weight_file, embedding_weight_file=None):\n '''\n We'll stub out all the initializers in the pretrained LM with\n a function that loads the weights from the file\n '''\n weight_name_map = {}\n for i in range(2):\n for j in range(8): # if we decide to add more layers\n root = 'RNN_{}/RNN/MultiRNNCell/Cell{}'.format(i, j)\n weight_name_map[root + '/rnn/lstm_cell/kernel'] = \\\n root + '/LSTMCell/W_0'\n weight_name_map[root + '/rnn/lstm_cell/bias'] = \\\n root + '/LSTMCell/B'\n weight_name_map[root + '/rnn/lstm_cell/projection/kernel'] = \\\n root + '/LSTMCell/W_P_0'\n\n # convert the graph name to that in the checkpoint\n varname_in_file = varname[5:]\n if varname_in_file.startswith('RNN'):\n varname_in_file = weight_name_map[varname_in_file]\n\n if varname_in_file == 'embedding':\n with h5py.File(embedding_weight_file, 'r') as fin:\n # Have added a special 0 index for padding not present\n # in the original model.\n embed_weights = fin[varname_in_file][...]\n weights = np.zeros(\n (embed_weights.shape[0] + 1, embed_weights.shape[1]),\n dtype=DTYPE\n )\n weights[1:, :] = embed_weights\n else:\n with h5py.File(weight_file, 'r') as fin:\n if varname_in_file == 'char_embed':\n # Have added a special 0 index for padding not present\n # in the original model.\n char_embed_weights = fin[varname_in_file][...]\n weights = np.zeros(\n (char_embed_weights.shape[0] + 1,\n char_embed_weights.shape[1]),\n dtype=DTYPE\n )\n weights[1:, :] = char_embed_weights\n else:\n weights = fin[varname_in_file][...]\n\n # Tensorflow initializers are callables that accept a shape parameter\n # and some optional kwargs\n def ret(shape, **kwargs):\n if list(shape) != list(weights.shape):\n raise ValueError(\n \"Invalid shape initializing {0}, got {1}, expected {2}\".format(\n varname_in_file, shape, weights.shape)\n )\n return weights\n\n return ret\n\n\nclass BidirectionalLanguageModelGraph(object):\n '''\n Creates the computational graph and holds the ops necessary for runnint\n a bidirectional language model\n '''\n def __init__(self, options, weight_file, ids_placeholder,\n use_character_inputs=True, embedding_weight_file=None,\n max_batch_size=128):\n\n self.options = options\n self._max_batch_size = max_batch_size\n self.ids_placeholder = ids_placeholder\n self.use_character_inputs = use_character_inputs\n\n # this custom_getter will make all variables not trainable and\n # override the default initializer\n def custom_getter(getter, name, *args, **kwargs):\n kwargs['trainable'] = False\n kwargs['initializer'] = _pretrained_initializer(\n name, weight_file, embedding_weight_file\n )\n return getter(name, *args, **kwargs)\n\n if embedding_weight_file is not None:\n # get the vocab size\n with h5py.File(embedding_weight_file, 'r') as fin:\n # +1 for padding\n self._n_tokens_vocab = fin['embedding'].shape[0] + 1\n else:\n self._n_tokens_vocab = None\n\n with tf.variable_scope('bilm', custom_getter=custom_getter):\n self._build()\n\n def _build(self):\n if self.use_character_inputs:\n self._build_word_char_embeddings()\n else:\n self._build_word_embeddings()\n self._build_lstms()\n\n def _build_word_char_embeddings(self):\n '''\n options contains key 'char_cnn': {\n\n 'n_characters': 262,\n\n # includes the start / end characters\n 'max_characters_per_token': 50,\n\n 'filters': [\n [1, 32],\n [2, 32],\n [3, 64],\n [4, 128],\n [5, 256],\n [6, 512],\n [7, 512]\n ],\n 'activation': 'tanh',\n\n # for the character embedding\n 'embedding': {'dim': 16}\n\n # for highway layers\n # if omitted, then no highway layers\n 'n_highway': 2,\n }\n '''\n projection_dim = self.options['lstm']['projection_dim']\n\n cnn_options = self.options['char_cnn']\n filters = cnn_options['filters']\n n_filters = sum(f[1] for f in filters)\n max_chars = cnn_options['max_characters_per_token']\n char_embed_dim = cnn_options['embedding']['dim']\n n_chars = cnn_options['n_characters']\n if n_chars != 262:\n raise InvalidNumberOfCharacters(\n \"Set n_characters=262 after training see the README.md\"\n )\n if cnn_options['activation'] == 'tanh':\n activation = tf.nn.tanh\n elif cnn_options['activation'] == 'relu':\n activation = tf.nn.relu\n\n # the character embeddings\n with tf.device(\"/cpu:0\"):\n self.embedding_weights = tf.get_variable(\n \"char_embed\", [n_chars, char_embed_dim],\n dtype=DTYPE,\n initializer=tf.random_uniform_initializer(-1.0, 1.0)\n )\n # shape (batch_size, unroll_steps, max_chars, embed_dim)\n self.char_embedding = tf.nn.embedding_lookup(self.embedding_weights,\n self.ids_placeholder)\n\n # the convolutions\n def make_convolutions(inp):\n with tf.variable_scope('CNN') as scope:\n convolutions = []\n for i, (width, num) in enumerate(filters):\n if cnn_options['activation'] == 'relu':\n # He initialization for ReLU activation\n # with char embeddings init between -1 and 1\n #w_init = tf.random_normal_initializer(\n # mean=0.0,\n # stddev=np.sqrt(2.0 / (width * char_embed_dim))\n #)\n\n # Kim et al 2015, +/- 0.05\n w_init = tf.random_uniform_initializer(\n minval=-0.05, maxval=0.05)\n elif cnn_options['activation'] == 'tanh':\n # glorot init\n w_init = tf.random_normal_initializer(\n mean=0.0,\n stddev=np.sqrt(1.0 / (width * char_embed_dim))\n )\n w = tf.get_variable(\n \"W_cnn_%s\" % i,\n [1, width, char_embed_dim, num],\n initializer=w_init,\n dtype=DTYPE)\n b = tf.get_variable(\n \"b_cnn_%s\" % i, [num], dtype=DTYPE,\n initializer=tf.constant_initializer(0.0))\n\n conv = tf.nn.conv2d(\n inp, w,\n strides=[1, 1, 1, 1],\n padding=\"VALID\") + b\n # now max pool\n conv = tf.nn.max_pool(\n conv, [1, 1, max_chars-width+1, 1],\n [1, 1, 1, 1], 'VALID')\n\n # activation\n conv = activation(conv)\n conv = tf.squeeze(conv, squeeze_dims=[2])\n\n convolutions.append(conv)\n\n return tf.concat(convolutions, 2)\n\n embedding = make_convolutions(self.char_embedding)\n\n # for highway and projection layers\n n_highway = cnn_options.get('n_highway')\n use_highway = n_highway is not None and n_highway > 0\n use_proj = n_filters != projection_dim\n\n if use_highway or use_proj:\n # reshape from (batch_size, n_tokens, dim) to (-1, dim)\n batch_size_n_tokens = tf.shape(embedding)[0:2]\n embedding = tf.reshape(embedding, [-1, n_filters])\n\n # set up weights for projection\n if use_proj:\n assert n_filters > projection_dim\n with tf.variable_scope('CNN_proj') as scope:\n W_proj_cnn = tf.get_variable(\n \"W_proj\", [n_filters, projection_dim],\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=np.sqrt(1.0 / n_filters)),\n dtype=DTYPE)\n b_proj_cnn = tf.get_variable(\n \"b_proj\", [projection_dim],\n initializer=tf.constant_initializer(0.0),\n dtype=DTYPE)\n\n # apply highways layers\n def high(x, ww_carry, bb_carry, ww_tr, bb_tr):\n carry_gate = tf.nn.sigmoid(tf.matmul(x, ww_carry) + bb_carry)\n transform_gate = tf.nn.relu(tf.matmul(x, ww_tr) + bb_tr)\n return carry_gate * transform_gate + (1.0 - carry_gate) * x\n\n if use_highway:\n highway_dim = n_filters\n\n for i in range(n_highway):\n with tf.variable_scope('CNN_high_%s' % i) as scope:\n W_carry = tf.get_variable(\n 'W_carry', [highway_dim, highway_dim],\n # glorit init\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),\n dtype=DTYPE)\n b_carry = tf.get_variable(\n 'b_carry', [highway_dim],\n initializer=tf.constant_initializer(-2.0),\n dtype=DTYPE)\n W_transform = tf.get_variable(\n 'W_transform', [highway_dim, highway_dim],\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),\n dtype=DTYPE)\n b_transform = tf.get_variable(\n 'b_transform', [highway_dim],\n initializer=tf.constant_initializer(0.0),\n dtype=DTYPE)\n\n embedding = high(embedding, W_carry, b_carry,\n W_transform, b_transform)\n\n # finally project down if needed\n if use_proj:\n embedding = tf.matmul(embedding, W_proj_cnn) + b_proj_cnn\n\n # reshape back to (batch_size, tokens, dim)\n if use_highway or use_proj:\n shp = tf.concat([batch_size_n_tokens, [projection_dim]], axis=0)\n embedding = tf.reshape(embedding, shp)\n\n # at last assign attributes for remainder of the model\n self.embedding = embedding\n\n\n def _build_word_embeddings(self):\n projection_dim = self.options['lstm']['projection_dim']\n\n # the word embeddings\n with tf.device(\"/cpu:0\"):\n self.embedding_weights = tf.get_variable(\n \"embedding\", [self._n_tokens_vocab, projection_dim],\n dtype=DTYPE,\n )\n self.embedding = tf.nn.embedding_lookup(self.embedding_weights,\n self.ids_placeholder)\n\n\n def _build_lstms(self):\n # now the LSTMs\n # these will collect the initial states for the forward\n # (and reverse LSTMs if we are doing bidirectional)\n\n # parse the options\n lstm_dim = self.options['lstm']['dim']\n projection_dim = self.options['lstm']['projection_dim']\n n_lstm_layers = self.options['lstm'].get('n_layers', 1)\n cell_clip = self.options['lstm'].get('cell_clip')\n proj_clip = self.options['lstm'].get('proj_clip')\n use_skip_connections = self.options['lstm']['use_skip_connections']\n if use_skip_connections:\n print(\"USING SKIP CONNECTIONS\")\n else:\n print(\"NOT USING SKIP CONNECTIONS\")\n\n # the sequence lengths from input mask\n if self.use_character_inputs:\n mask = tf.reduce_any(self.ids_placeholder > 0, axis=2)\n else:\n mask = self.ids_placeholder > 0\n sequence_lengths = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1)\n batch_size = tf.shape(sequence_lengths)[0]\n\n # for each direction, we'll store tensors for each layer\n self.lstm_outputs = {'forward': [], 'backward': []}\n self.lstm_state_sizes = {'forward': [], 'backward': []}\n self.lstm_init_states = {'forward': [], 'backward': []}\n self.lstm_final_states = {'forward': [], 'backward': []}\n\n update_ops = []\n for direction in ['forward', 'backward']:\n if direction == 'forward':\n layer_input = self.embedding\n else:\n layer_input = tf.reverse_sequence(\n self.embedding,\n sequence_lengths,\n seq_axis=1,\n batch_axis=0\n )\n\n for i in range(n_lstm_layers):\n if projection_dim < lstm_dim:\n # are projecting down output\n lstm_cell = tf.nn.rnn_cell.LSTMCell(\n lstm_dim, num_proj=projection_dim,\n cell_clip=cell_clip, proj_clip=proj_clip)\n else:\n lstm_cell = tf.nn.rnn_cell.LSTMCell(\n lstm_dim,\n cell_clip=cell_clip, proj_clip=proj_clip)\n\n if use_skip_connections:\n # ResidualWrapper adds inputs to outputs\n if i == 0:\n # don't add skip connection from token embedding to\n # 1st layer output\n pass\n else:\n # add a skip connection\n lstm_cell = tf.nn.rnn_cell.ResidualWrapper(lstm_cell)\n\n # collect the input state, run the dynamic rnn, collect\n # the output\n state_size = lstm_cell.state_size\n # the LSTMs are stateful. To support multiple batch sizes,\n # we'll allocate size for states up to max_batch_size,\n # then use the first batch_size entries for each batch\n init_states = [\n tf.Variable(\n tf.zeros([self._max_batch_size, dim]),\n trainable=False\n )\n for dim in lstm_cell.state_size\n ]\n batch_init_states = [\n state[:batch_size, :] for state in init_states\n ]\n\n if direction == 'forward':\n i_direction = 0\n else:\n i_direction = 1\n variable_scope_name = 'RNN_{0}/RNN/MultiRNNCell/Cell{1}'.format(\n i_direction, i)\n with tf.variable_scope(variable_scope_name):\n layer_output, final_state = tf.nn.dynamic_rnn(\n lstm_cell,\n layer_input,\n sequence_length=sequence_lengths,\n initial_state=tf.nn.rnn_cell.LSTMStateTuple(\n *batch_init_states),\n )\n\n self.lstm_state_sizes[direction].append(lstm_cell.state_size)\n self.lstm_init_states[direction].append(init_states)\n self.lstm_final_states[direction].append(final_state)\n if direction == 'forward':\n self.lstm_outputs[direction].append(layer_output)\n else:\n self.lstm_outputs[direction].append(\n tf.reverse_sequence(\n layer_output,\n sequence_lengths,\n seq_axis=1,\n batch_axis=0\n )\n )\n\n with tf.control_dependencies([layer_output]):\n # update the initial states\n for i in range(2):\n new_state = tf.concat(\n [final_state[i][:batch_size, :],\n init_states[i][batch_size:, :]], axis=0)\n state_update_op = tf.assign(init_states[i], new_state)\n update_ops.append(state_update_op)\n \n layer_input = layer_output\n\n self.mask = mask\n self.sequence_lengths = sequence_lengths\n self.update_state_op = tf.group(*update_ops)\n\n\ndef dump_token_embeddings(vocab_file, options_file, weight_file, outfile):\n '''\n Given an input vocabulary file, dump all the token embeddings to the\n outfile. The result can be used as the embedding_weight_file when\n constructing a BidirectionalLanguageModel.\n '''\n with open(options_file, 'r') as fin:\n options = json.load(fin)\n max_word_length = options['char_cnn']['max_characters_per_token']\n\n vocab = UnicodeCharsVocabulary(vocab_file, max_word_length)\n batcher = Batcher(vocab_file, max_word_length)\n\n ids_placeholder = tf.placeholder('int32',\n shape=(None, None, max_word_length)\n )\n model = BidirectionalLanguageModel(options_file, weight_file)\n embedding_op = model(ids_placeholder)['token_embeddings']\n\n n_tokens = vocab.size\n embed_dim = int(embedding_op.shape[2])\n\n embeddings = np.zeros((n_tokens, embed_dim), dtype=DTYPE)\n\n config = tf.ConfigProto(allow_soft_placement=True)\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n for k in range(n_tokens):\n token = vocab.id_to_word(k)\n char_ids = batcher.batch_sentences([[token]])[0, 1, :].reshape(\n 1, 1, -1)\n embeddings[k, :] = sess.run(\n embedding_op, feed_dict={ids_placeholder: char_ids}\n )\n\n with h5py.File(outfile, 'w') as fout:\n ds = fout.create_dataset(\n 'embedding', embeddings.shape, dtype='float32', data=embeddings\n )\n\ndef dump_bilm_embeddings(vocab_file, dataset_file, options_file,\n weight_file, outfile):\n with open(options_file, 'r') as fin:\n options = json.load(fin)\n max_word_length = options['char_cnn']['max_characters_per_token']\n\n vocab = UnicodeCharsVocabulary(vocab_file, max_word_length)\n batcher = Batcher(vocab_file, max_word_length)\n\n ids_placeholder = tf.placeholder('int32',\n shape=(None, None, max_word_length)\n )\n model = BidirectionalLanguageModel(options_file, weight_file)\n ops = model(ids_placeholder)\n\n config = tf.ConfigProto(allow_soft_placement=True)\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n sentence_id = 0\n with open(dataset_file, 'r') as fin, h5py.File(outfile, 'w') as fout:\n for line in fin:\n sentence = line.strip().split()\n char_ids = batcher.batch_sentences([sentence])\n embeddings = sess.run(\n ops['lm_embeddings'], feed_dict={ids_placeholder: char_ids}\n )\n ds = fout.create_dataset(\n '{}'.format(sentence_id),\n embeddings.shape[1:], dtype='float32',\n data=embeddings[0, :, :, :]\n )\n\n sentence_id += 1\n\n"
] | [
[
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.squeeze",
"tensorflow.concat",
"tensorflow.nn.rnn_cell.ResidualWrapper",
"tensorflow.nn.rnn_cell.LSTMStateTuple",
"tensorflow.nn.max_pool",
"tensorflow.global_variables_initializer",
"tensorflow.device",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.random_uniform_initializer",
"tensorflow.constant_initializer",
"tensorflow.shape",
"numpy.zeros",
"tensorflow.expand_dims",
"tensorflow.reverse_sequence",
"tensorflow.cast",
"tensorflow.assign",
"tensorflow.Session",
"tensorflow.reduce_any",
"tensorflow.group",
"tensorflow.ConfigProto",
"tensorflow.nn.embedding_lookup",
"tensorflow.control_dependencies",
"tensorflow.placeholder",
"tensorflow.zeros",
"tensorflow.nn.conv2d",
"numpy.sqrt",
"tensorflow.get_variable"
]
] |
winstonolson/isofit_imgspec | [
"b6a56ba1abade7e08f14aa9264e6984a77e40a79"
] | [
"isofit/radiative_transfer/look_up_tables.py"
] | [
"#! /usr/bin/env python3\n#\n# Copyright 2018 California Institute of Technology\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ISOFIT: Imaging Spectrometer Optimal FITting\n# Author: David R Thompson, [email protected]\n#\n\nimport os\nimport numpy as np\nimport logging\nimport ray\nfrom collections import OrderedDict\nimport subprocess\nimport time\nimport atexit\n\nfrom isofit.core import common\nfrom isofit.configs import Config\nfrom isofit.configs.sections.radiative_transfer_config import RadiativeTransferEngineConfig\nfrom isofit.configs.sections.statevector_config import StateVectorElementConfig\nfrom isofit.configs.sections.implementation_config import ImplementationConfig\n\n\n### Functions ###\n\[email protected]\ndef spawn_rt(cmd, local_dir=None):\n \"\"\"Run a CLI command.\"\"\"\n\n print(cmd)\n\n # Add a very slight timing offset to prevent all subprocesses\n # starting simultaneously\n time.sleep(float(np.random.random(1))*2)\n\n subprocess.call(cmd, shell=True, cwd=local_dir)\n\n### Classes ###\n\nclass FileExistsError(Exception):\n \"\"\"FileExistsError with a message.\"\"\"\n\n def __init__(self, message):\n super(FileExistsError, self).__init__(message)\n\n\nclass TabularRT:\n \"\"\"A model of photon transport including the atmosphere.\"\"\"\n\n def __init__(self, engine_config: RadiativeTransferEngineConfig, full_config: Config):\n\n self.implementation_config: ImplementationConfig = full_config.implementation\n self.wl, self.fwhm = common.load_wavelen(full_config.forward_model.instrument.wavelength_file)\n if engine_config.wavelength_range is not None:\n valid_wl = np.logical_and(self.wl >= engine_config.wavelength_range[0],\n self.wl <= engine_config.wavelength_range[1])\n self.wl = self.wl[valid_wl]\n self.fwhm = self.fwhm[valid_wl]\n\n self.n_chan = len(self.wl)\n\n self.auto_rebuild = full_config.implementation.rte_auto_rebuild\n self.configure_and_exit = full_config.implementation.rte_configure_and_exit\n\n # We use a sorted dictionary here so that filenames for lookup\n # table (LUT) grid points are always constructed the same way, with\n # consistent dimesion ordering). Every state vector element has\n # a lookup table dimension, but some lookup table dimensions\n # (like geometry parameters) may not be in the state vector.\n # TODO: enforce a requirement that makes all SV elements be inside the LUT\n full_lut_grid = full_config.forward_model.radiative_transfer.lut_grid\n # selectively get lut components that are in this particular RTE\n self.lut_grid_config = OrderedDict()\n if engine_config.lut_names is not None:\n lut_names = engine_config.lut_names\n else:\n lut_names = full_config.forward_model.radiative_transfer.lut_grid.keys()\n\n for key, value in full_lut_grid.items():\n if key in lut_names:\n self.lut_grid_config[key] = value\n\n # selectively get statevector components that are in this particular RTE\n full_sv_names = full_config.forward_model.radiative_transfer.statevector.get_element_names()\n self.statevector_names = full_sv_names\n\n self.lut_dir = engine_config.lut_path\n self.n_point = len(self.lut_grid_config)\n self.n_state = len(self.statevector_names)\n\n self.luts = {}\n\n # Retrieved variables. We establish scaling, bounds, and\n # initial guesses for each state vector element. The state\n # vector elements are all free parameters in the RT lookup table,\n # and they all have associated dimensions in the LUT grid.\n self.bounds, self.scale, self.init = [], [], []\n self.prior_mean, self.prior_sigma = [], []\n for key in self.statevector_names:\n element: StateVectorElementConfig = full_config.forward_model.radiative_transfer.statevector.get_single_element_by_name(\n key)\n self.bounds.append(element.bounds)\n self.scale.append(element.scale)\n self.init.append(element.init)\n self.prior_sigma.append(element.prior_sigma)\n self.prior_mean.append(element.prior_mean)\n self.bounds = np.array(self.bounds)\n self.scale = np.array(self.scale)\n self.init = np.array(self.init)\n self.prior_mean = np.array(self.prior_mean)\n self.prior_sigma = np.array(self.prior_sigma)\n\n self.lut_dims = []\n self.lut_grids = []\n self.lut_names = []\n self.lut_interp_types = []\n for key, grid_values in self.lut_grid_config.items():\n\n # do some quick checks on the values\n if len(grid_values) == 1:\n err = 'Only 1 value in LUT grid {}. ' +\\\n '1-d LUT grids cannot be interpreted.'.format(key)\n raise ValueError(err)\n if grid_values != sorted(grid_values):\n logging.error('Lookup table grid needs ascending order')\n raise ValueError('Lookup table grid needs ascending order')\n\n # Store the values\n self.lut_grids.append(grid_values)\n self.lut_dims.append(len(grid_values))\n self.lut_names.append(key)\n\n # Store in an indication of the type of value each key is\n # (normal - n, degree - d, radian - r)\n if key in self.angular_lut_keys_radians:\n self.lut_interp_types.append('r')\n elif key in self.angular_lut_keys_degrees:\n self.lut_interp_types.append('d')\n else:\n self.lut_interp_types.append('n')\n\n # Cast as array for faster reference later\n self.lut_interp_types = np.array(self.lut_interp_types)\n\n # \"points\" contains all combinations of grid points\n # We will have one filename prefix per point\n self.points = common.combos(self.lut_grids)\n self.files = self.get_lut_filenames()\n\n def build_lut(self, rebuild=False):\n \"\"\"Each LUT is associated with a source directory. We build a lookup \n table by: \n (1) defining the LUT dimensions, state vector names, and the \n grid of values; \n (2) running the radiative transfer solver if needed, with each \n run defining a different point in the LUT; and \n (3) loading the LUTs, one per key atmospheric coefficient vector,\n into memory as VectorInterpolator objects.\"\"\"\n\n # Build the list of radiative transfer run commands. This\n # rebuild_cmd() function will be overriden by the child class to\n # perform setup activities unique to each RTM.\n rebuild_cmds = []\n for point, fn in zip(self.points, self.files):\n try:\n cmd = self.rebuild_cmd(point, fn)\n rebuild_cmds.append(cmd)\n except FileExistsError:\n pass\n\n if self.configure_and_exit:\n raise SystemExit\n # sys.exit(0)\n\n elif len(rebuild_cmds) > 0 and self.auto_rebuild:\n logging.info(\"Rebuilding radiative transfer look up table\")\n\n # check to make sure lut directory is there, create if not\n if os.path.isdir(self.lut_dir) is False:\n os.mkdir(self.lut_dir)\n\n # Make the LUT calls (in parallel if specified)\n results = ray.get([spawn_rt.remote(rebuild_cmd, self.lut_dir) for rebuild_cmd in rebuild_cmds])\n\n\n def get_lut_filenames(self):\n files = []\n for point in self.points:\n outf = '_'.join(['%s-%6.4f' % (n, x)\n for n, x in zip(self.lut_names, point)])\n files.append(outf)\n return files\n\n def summarize(self, x_RT, geom):\n \"\"\"Summary of state vector.\"\"\"\n\n if len(x_RT) < 1:\n return ''\n return 'Atmosphere: '+' '.join(['%s: %5.3f' % (si, xi) for si, xi in\n zip(self.statevector_names, x_RT)])\n"
] | [
[
"numpy.array",
"numpy.logical_and",
"numpy.random.random"
]
] |
B-tronics/KinemAutomation | [
"853e9ad2c9e702e1830571152393172960c0d055"
] | [
"poseestimation/poseestimation.py"
] | [
"import cv2\nimport numpy as np\nimport argparse\nimport csv\nimport os\nimport glob\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-c\", \"--csv\", help=\"Path to the CSV file holding the 2D data for the video.\")\nap.add_argument(\"-v\", \"--video\", help=\"Path to the video file.\")\nargs = vars(ap.parse_args())\n\ndir_name = args[\"csv\"]\ncsv_list = [os.path.basename(x) for x in glob.glob(dir_name+\"*.csv\")]\ncsv_list.sort()\n\noutput_path = os.getcwd() + \"/data/Knot_Tying/\"\ntry:\n os.makedirs(output_path)\nexcept FileExistsError as e:\n pass\n\ndir_name = args[\"video\"]\nvideo_list = [os.path.basename(x) for x in glob.glob(dir_name+\"*.avi\")]\nvideo_list.sort()\n\nfor i, csvs_file in enumerate(csv_list):\n\n video_path = args[\"video\"] + video_list[i]\n cap = cv2.VideoCapture(video_path)\n frame = cap.read()[1]\n frameSize = frame.shape\n cap.release()\n\n rows = []\n \n result_file = output_path + csvs_file\n csv_file_path = args[\"csv\"] + csvs_file\n\n with open(csv_file_path, \"r\") as f:\n csvReader = csv.reader(f)\n \n for i, row in enumerate(csvReader):\n rows.append(list(row))\n\n modelPoints = np.array([\n (0.0, 0.0, 0.0), # Origo\n (2.0, 0.0, 2.8), # Left from Origo \n (10.83, 0.5, 0.5), # RightAbove from Origo\n (10.83, -0.5, 0.5), # RightBelow from Origo\n (0.0, -3.16, 0.5), # Below Origo\n (0.0, 3.16, 0.5) # Above Orgio\n ])\n\n focalLength = frameSize[1]\n center = (frameSize[1]/2, frameSize[0]/2)\n cameraMatrix = np.array([\n [focalLength, 0, center[0]],\n [0, focalLength, center[1]],\n [0,0,1]\n ], dtype=\"double\")\n\n distCoeffs = np.zeros((4,1))\n\n with open(result_file, 'w') as r:\n rwriter = csv.writer(r)\n for row in rows:\n imagePoints = np.array([\n (float(row[0]), float(row[1])), # Origo\n (float(row[2]), float(row[3])), # Left from Origo\n (float(row[4]), float(row[5])), # RightAbove from Origo\n (float(row[6]), float(row[7])), # RightBelow from Origo\n (float(row[8]), float(row[9])), # Below Origo\n (float(row[10]), float(row[11])) # Above Origo\n ])\n\n (success, rotationVector, translationVector) = cv2.solvePnP(\n modelPoints, \n imagePoints,\n cameraMatrix,\n distCoeffs,\n flags=cv2.SOLVEPNP_ITERATIVE)\n\n data = [translationVector[0][0], translationVector[1][0], translationVector[2][0]]\n rwriter.writerow(data)"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
ThomasLecat/ray | [
"eb025ea8cb27583e8ef6287f5654f23d1ab270ef",
"eb025ea8cb27583e8ef6287f5654f23d1ab270ef"
] | [
"python/ray/util/sgd/tests/test_torch.py",
"python/ray/tune/integration/torch.py"
] | [
"from unittest.mock import patch\nimport numpy as np\nimport os\nimport pytest\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.utils.data import DataLoader\n\nimport ray\nfrom ray import tune\nfrom ray.util.sgd.torch import TorchTrainer\nfrom ray.util.sgd.torch.training_operator import (\n get_test_operator, get_test_metrics_operator, TrainingOperator)\nfrom ray.util.sgd.torch.constants import SCHEDULER_STEP\nfrom ray.util.sgd.utils import (check_for_failure, NUM_SAMPLES, BATCH_COUNT,\n BATCH_SIZE)\n\nfrom ray.util.sgd.data.examples import mlp_identity\nfrom ray.util.sgd.torch.examples.train_example import (\n model_creator, optimizer_creator, data_creator, LinearDataset)\n\n\[email protected]\ndef ray_start_2_cpus():\n address_info = ray.init(num_cpus=2)\n yield address_info\n # The code after the yield will run as teardown code.\n ray.shutdown()\n # Ensure that tests don't ALL fail\n if dist.is_initialized():\n dist.destroy_process_group()\n\n\[email protected]\ndef ray_start_4_cpus():\n address_info = ray.init(num_cpus=4)\n yield address_info\n # The code after the yield will run as teardown code.\n ray.shutdown()\n # Ensure that tests don't ALL fail\n if dist.is_initialized():\n dist.destroy_process_group()\n\n\nOperator = TrainingOperator.from_creators(\n model_creator, optimizer_creator, data_creator, loss_creator=nn.MSELoss)\n\n\ndef test_single_step(ray_start_2_cpus): # noqa: F811\n trainer = TorchTrainer(training_operator_cls=Operator, num_workers=1)\n metrics = trainer.train(num_steps=1)\n assert metrics[BATCH_COUNT] == 1\n\n val_metrics = trainer.validate(num_steps=1)\n assert val_metrics[BATCH_COUNT] == 1\n trainer.shutdown()\n\n\ndef test_dead_trainer(ray_start_2_cpus): # noqa: F811\n TestOperator = get_test_operator(Operator)\n trainer = TorchTrainer(training_operator_cls=TestOperator, num_workers=2)\n trainer.train(num_steps=1)\n trainer.shutdown()\n with pytest.raises(RuntimeError):\n trainer.train()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_train(ray_start_2_cpus, num_workers): # noqa: F811\n trainer = TorchTrainer(\n training_operator_cls=Operator, num_workers=num_workers)\n for i in range(3):\n train_loss1 = trainer.train()[\"train_loss\"]\n validation_loss1 = trainer.validate()[\"val_loss\"]\n\n for i in range(3):\n train_loss2 = trainer.train()[\"train_loss\"]\n validation_loss2 = trainer.validate()[\"val_loss\"]\n\n assert train_loss2 <= train_loss1, (train_loss2, train_loss1)\n assert validation_loss2 <= validation_loss1, (validation_loss2,\n validation_loss1)\n trainer.shutdown()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_multi_model(ray_start_2_cpus, num_workers):\n def train(*, model=None, criterion=None, optimizer=None, iterator=None):\n model.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(iterator):\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n return {\n \"accuracy\": correct / total,\n \"train_loss\": train_loss / (batch_idx + 1)\n }\n\n def train_epoch(self, iterator, info):\n result = {}\n data = list(iterator)\n for i, (model, optimizer) in enumerate(\n zip(self.models, self.optimizers)):\n result[f\"model_{i}\"] = train(\n model=model,\n criterion=self.criterion,\n optimizer=optimizer,\n iterator=iter(data))\n return result\n\n class MultiModelOperator(TrainingOperator):\n def setup(self, config):\n models = nn.Linear(1, 1), nn.Linear(1, 1)\n opts = [\n torch.optim.SGD(model.parameters(), lr=0.0001)\n for model in models\n ]\n loss = nn.MSELoss()\n train_dataloader, val_dataloader = data_creator(config)\n self.models, self.optimizers, self.criterion = self.register(\n models=models, optimizers=opts, criterion=loss)\n self.register_data(\n train_loader=train_dataloader,\n validation_loader=val_dataloader)\n\n TestOperator = get_test_operator(MultiModelOperator)\n\n trainer1 = TorchTrainer(\n config={\"custom_func\": train_epoch},\n training_operator_cls=TestOperator,\n num_workers=num_workers)\n trainer1.train()\n state = trainer1.state_dict()\n\n models1 = trainer1.get_model()\n\n trainer1.shutdown()\n\n trainer2 = TorchTrainer(\n config={\"custom_func\": train_epoch},\n training_operator_cls=TestOperator,\n num_workers=num_workers)\n trainer2.load_state_dict(state)\n\n models2 = trainer2.get_model()\n\n for model_1, model_2 in zip(models1, models2):\n\n model1_state_dict = model_1.state_dict()\n model2_state_dict = model_2.state_dict()\n\n assert set(model1_state_dict.keys()) == set(model2_state_dict.keys())\n\n for k in model1_state_dict:\n assert torch.equal(model1_state_dict[k], model2_state_dict[k])\n\n trainer2.shutdown()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_multi_model_matrix(ray_start_2_cpus, num_workers): # noqa: F811\n def train_epoch(self, iterator, info):\n if self.config.get(\"models\", 1) > 1:\n assert len(self.models) == self.config[\"models\"], self.config\n\n if self.config.get(\"optimizers\", 1) > 1:\n assert len(\n self.optimizers) == self.config[\"optimizers\"], self.config\n\n if self.config.get(\"schedulers\", 1) > 1:\n assert len(\n self.schedulers) == self.config[\"schedulers\"], self.config\n return {\"done\": 1}\n\n def multi_model_creator(config):\n models = []\n for i in range(config.get(\"models\", 1)):\n models += [nn.Linear(1, 1)]\n return models[0] if len(models) == 1 else models\n\n def multi_optimizer_creator(models, config):\n optimizers = []\n main_model = models[0] if type(models) is list else models\n for i in range(config.get(\"optimizers\", 1)):\n optimizers += [torch.optim.SGD(main_model.parameters(), lr=0.0001)]\n return optimizers[0] if len(optimizers) == 1 else optimizers\n\n def multi_scheduler_creator(optimizer, config):\n schedulers = []\n main_opt = optimizer[0] if type(optimizer) is list else optimizer\n for i in range(config.get(\"schedulers\", 1)):\n schedulers += [\n torch.optim.lr_scheduler.StepLR(\n main_opt, step_size=30, gamma=0.1)\n ]\n return schedulers[0] if len(schedulers) == 1 else schedulers\n\n class MultiModelOperator(TrainingOperator):\n def setup(self, config):\n models = multi_model_creator(config)\n optimizers = multi_optimizer_creator(models, config)\n schedulers = multi_scheduler_creator(optimizers, config)\n train_loader, val_loader = data_creator(config)\n loss = nn.MSELoss()\n\n self.models, self.optimizers, self.criterion, self.schedulers = \\\n self.register(models=models, optimizers=optimizers,\n schedulers=schedulers,\n criterion=loss)\n self.register_data(\n train_loader=train_loader, validation_loader=val_loader)\n\n TestOperator = get_test_operator(MultiModelOperator)\n\n for model_count in range(1, 3):\n for optimizer_count in range(1, 3):\n for scheduler_count in range(1, 3):\n trainer = TorchTrainer(\n scheduler_step_freq=\"epoch\",\n training_operator_cls=TestOperator,\n num_workers=num_workers,\n config={\n \"models\": model_count,\n \"optimizers\": optimizer_count,\n \"schedulers\": scheduler_count,\n \"custom_func\": train_epoch\n })\n trainer.train()\n trainer.shutdown()\n\n\[email protected](\"scheduler_freq\", [\"epoch\", \"batch\", \"manual\", None])\ndef test_scheduler_freq(ray_start_2_cpus, scheduler_freq): # noqa: F811\n def train_epoch(self, iterator, info):\n assert info[SCHEDULER_STEP] == scheduler_freq\n return {\"done\": 1}\n\n def scheduler_creator(optimizer, config):\n return torch.optim.lr_scheduler.StepLR(\n optimizer, step_size=30, gamma=0.1)\n\n class TestTrainingOperator(TrainingOperator):\n def setup(self, config):\n model = model_creator(config)\n optimizer = optimizer_creator(model, config)\n train_loader, val_loader = data_creator(config)\n scheduler = scheduler_creator(optimizer, config)\n loss = nn.MSELoss()\n\n self.model, self.optimizer, self.criterion, self.scheduler = \\\n self.register(\n models=model, optimizers=optimizer,\n criterion=loss, schedulers=scheduler)\n self.register_data(\n train_loader=train_loader, validation_loader=val_loader)\n\n if scheduler_freq is None:\n with pytest.raises(ValueError):\n trainer = TorchTrainer(\n config={\"custom_func\": train_epoch},\n training_operator_cls=TestTrainingOperator,\n scheduler_step_freq=scheduler_freq)\n else:\n trainer = TorchTrainer(\n config={\"custom_func\": train_epoch},\n training_operator_cls=TestTrainingOperator,\n scheduler_step_freq=scheduler_freq)\n\n for i in range(3):\n trainer.train()\n trainer.shutdown()\n\n\ndef test_profiling(ray_start_2_cpus): # noqa: F811\n trainer = TorchTrainer(training_operator_cls=Operator)\n\n stats = trainer.train(profile=True)\n assert \"profile\" in stats\n stats = trainer.validate(profile=True)\n assert \"profile\" in stats\n trainer.shutdown()\n\n\ndef test_dataset(ray_start_4_cpus):\n \"\"\"\n This test tries training the mlp_identity example. We check the accuracy of\n the model as an all inclusive way of ensuring that we are properly sharding\n and iterating over the entire dataset (instead of repeating the first set\n of points for example).\n \"\"\"\n\n model_creator = mlp_identity.model_creator\n optimizer_creator = mlp_identity.optimizer_creator\n dataset_creator = mlp_identity.dataset_creator\n\n DatasetOperator = TrainingOperator.from_creators(\n model_creator=model_creator,\n optimizer_creator=optimizer_creator,\n loss_creator=nn.MSELoss)\n\n trainer = TorchTrainer(\n training_operator_cls=DatasetOperator,\n num_workers=2,\n )\n\n dataset = dataset_creator()\n for i in range(5):\n trainer.train(dataset=dataset, num_steps=100)\n\n input = mlp_identity.to_mat(0.5)\n prediction = float(trainer.get_model()(input)[0][0])\n assert 0.4 <= prediction <= 0.6\n trainer.shutdown()\n\n\ndef test_split_batch(ray_start_2_cpus):\n if not dist.is_available():\n return\n\n def data_creator(config):\n \"\"\"Returns training dataloader, validation dataloader.\"\"\"\n train_dataset = LinearDataset(2, 5, size=config[\"data_size\"])\n return DataLoader(\n train_dataset,\n batch_size=config[BATCH_SIZE],\n )\n\n data_size = 600\n batch_size = 21\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n data_creator,\n loss_creator=lambda config: nn.MSELoss())\n trainer = TorchTrainer(\n training_operator_cls=TestOperator,\n num_workers=2,\n config={\n BATCH_SIZE: batch_size,\n \"data_size\": data_size,\n })\n stats = trainer.train()\n assert trainer.config[BATCH_SIZE] == (batch_size - 1)\n assert stats[NUM_SAMPLES] == 600\n assert stats[BATCH_COUNT] == (data_size // 20)\n trainer.shutdown()\n\n\ndef test_reduce_result(ray_start_2_cpus):\n if not dist.is_available():\n return\n\n def data_creator(config):\n \"\"\"Returns training dataloader, validation dataloader.\"\"\"\n train_dataset = LinearDataset(2, 5, size=config[\"data_size\"])\n test_dataset = LinearDataset(2, 5, size=config[\"data_size\"])\n return DataLoader(\n train_dataset, batch_size=1), DataLoader(\n test_dataset, batch_size=1)\n\n data_size = 600\n\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n data_creator,\n loss_creator=lambda config: nn.MSELoss())\n trainer = TorchTrainer(\n training_operator_cls=TestOperator,\n num_workers=2,\n config={\"data_size\": data_size})\n list_stats = trainer.train(reduce_results=False, profile=True)\n assert len(list_stats) == 2\n assert [stats[NUM_SAMPLES] == data_size for stats in list_stats]\n assert [stats[BATCH_COUNT] == (data_size // 2) for stats in list_stats]\n list_stats = trainer.validate(reduce_results=False, profile=True)\n assert len(list_stats) == 2\n assert [stats[NUM_SAMPLES] == data_size for stats in list_stats]\n assert [stats[BATCH_COUNT] == (data_size // 2) for stats in list_stats]\n trainer.shutdown()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_metrics(ray_start_2_cpus, num_workers):\n data_size, val_size = 600, 500\n batch_size = 4\n\n num_train_steps = int(data_size / batch_size)\n num_val_steps = int(val_size / batch_size)\n\n train_scores = [1] + ([0] * num_train_steps)\n val_scores = [1] + ([0] * num_val_steps)\n\n TestOperator = get_test_metrics_operator(Operator)\n trainer = TorchTrainer(\n training_operator_cls=TestOperator,\n num_workers=num_workers,\n config={\n \"scores\": train_scores,\n \"val_scores\": val_scores,\n \"key\": \"score\",\n \"batch_size\": batch_size,\n \"data_size\": data_size,\n \"val_size\": val_size\n })\n\n stats = trainer.train(num_steps=num_train_steps)\n # Test that we output mean and last of custom metrics in an epoch\n assert \"score\" in stats\n assert stats[\"last_score\"] == 0\n\n assert stats[NUM_SAMPLES] == num_train_steps * batch_size\n expected_score = num_workers * (sum(train_scores) /\n (num_train_steps * batch_size))\n assert np.allclose(stats[\"score\"], expected_score)\n\n val_stats = trainer.validate()\n # Test that we output mean and last of custom metrics in validation\n assert val_stats[\"last_score\"] == 0\n expected_score = (sum(val_scores) /\n (num_val_steps * batch_size)) * num_workers\n assert np.allclose(val_stats[\"score\"], expected_score)\n assert val_stats[BATCH_COUNT] == np.ceil(num_val_steps / num_workers)\n assert val_stats[NUM_SAMPLES] == num_val_steps * batch_size\n assert val_stats[NUM_SAMPLES] == val_size\n\n trainer.shutdown()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_metrics_nan(ray_start_2_cpus, num_workers):\n data_size, val_size = 100, 100\n batch_size = 10\n\n num_train_steps = int(data_size / batch_size)\n num_val_steps = int(val_size / batch_size)\n\n train_scores = [np.nan] + ([0] * num_train_steps)\n val_scores = [np.nan] + ([0] * num_val_steps)\n TestOperator = get_test_metrics_operator(Operator)\n trainer = TorchTrainer(\n training_operator_cls=TestOperator,\n num_workers=num_workers,\n config={\n \"scores\": train_scores,\n \"val_scores\": val_scores,\n \"key\": \"score\",\n \"batch_size\": batch_size,\n \"data_size\": data_size,\n \"val_size\": val_size\n })\n\n stats = trainer.train(num_steps=num_train_steps)\n assert \"score\" in stats\n assert stats[\"last_score\"] == 0\n assert np.isnan(stats[\"score\"])\n\n stats = trainer.validate()\n assert \"score\" in stats\n assert stats[\"last_score\"] == 0\n assert np.isnan(stats[\"score\"])\n trainer.shutdown()\n\n\ndef test_scheduler_validate(ray_start_2_cpus): # noqa: F811\n from torch.optim.lr_scheduler import ReduceLROnPlateau\n\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n data_creator,\n scheduler_creator=lambda optimizer, cfg: ReduceLROnPlateau(optimizer),\n loss_creator=lambda config: nn.MSELoss())\n TestOperator = get_test_operator(TestOperator)\n trainer = TorchTrainer(\n scheduler_step_freq=\"manual\", training_operator_cls=TestOperator)\n trainer.update_scheduler(0.5)\n trainer.update_scheduler(0.5)\n assert all(\n trainer.apply_all_operators(\n lambda op: op._schedulers[0].last_epoch == 2))\n trainer.shutdown()\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_tune_train(ray_start_2_cpus, num_workers): # noqa: F811\n TorchTrainable = TorchTrainer.as_trainable(\n **{\n \"training_operator_cls\": Operator,\n \"num_workers\": num_workers,\n \"use_gpu\": False,\n \"backend\": \"gloo\",\n \"config\": {\n \"batch_size\": 512,\n \"lr\": 0.001\n }\n })\n\n analysis = tune.run(\n TorchTrainable,\n num_samples=2,\n stop={\"training_iteration\": 2},\n verbose=1)\n\n # checks loss decreasing for every trials\n for path, df in analysis.trial_dataframes.items():\n mean_train_loss1 = df.loc[0, \"train_loss\"]\n mean_train_loss2 = df.loc[1, \"train_loss\"]\n mean_val_loss1 = df.loc[0, \"val_loss\"]\n mean_val_loss2 = df.loc[1, \"val_loss\"]\n\n assert mean_train_loss2 <= mean_train_loss1\n assert mean_val_loss2 <= mean_val_loss1\n\n\[email protected](\"num_workers\", [1, 2] if dist.is_available() else [1])\ndef test_save_and_restore(ray_start_2_cpus, num_workers,\n tmp_path): # noqa: F811\n trainer1 = TorchTrainer(\n training_operator_cls=Operator, num_workers=num_workers)\n trainer1.train()\n checkpoint_path = os.path.join(tmp_path, \"checkpoint\")\n trainer1.save(checkpoint_path)\n\n model1 = trainer1.get_model()\n\n trainer1.shutdown()\n\n trainer2 = TorchTrainer(\n training_operator_cls=Operator, num_workers=num_workers)\n trainer2.load(checkpoint_path)\n\n model2 = trainer2.get_model()\n\n model1_state_dict = model1.state_dict()\n model2_state_dict = model2.state_dict()\n\n assert set(model1_state_dict.keys()) == set(model2_state_dict.keys())\n\n for k in model1_state_dict:\n assert torch.equal(model1_state_dict[k], model2_state_dict[k])\n trainer2.shutdown()\n\n\ndef test_wrap_ddp(ray_start_2_cpus, tmp_path): # noqa: F811\n if not dist.is_available():\n return\n trainer1 = TorchTrainer(\n training_operator_cls=Operator, wrap_ddp=False, num_workers=2)\n trainer1.train()\n checkpoint_path = os.path.join(tmp_path, \"checkpoint\")\n trainer1.save(checkpoint_path)\n\n model1 = trainer1.get_model()\n assert not hasattr(trainer1.local_worker.training_operator.model, \"module\")\n assert hasattr(trainer1.local_worker.training_operator, \"device_ids\")\n trainer1.shutdown()\n\n trainer2 = TorchTrainer(\n training_operator_cls=Operator, wrap_ddp=False, num_workers=2)\n trainer2.load(checkpoint_path)\n\n model2 = trainer2.get_model()\n\n model1_state_dict = model1.state_dict()\n model2_state_dict = model2.state_dict()\n\n assert set(model1_state_dict.keys()) == set(model2_state_dict.keys())\n\n for k in model1_state_dict:\n assert torch.equal(model1_state_dict[k], model2_state_dict[k])\n trainer2.shutdown()\n\n\ndef gen_step_with_fail(num_fails):\n def step_with_fail(self,\n num_steps=None,\n profile=False,\n info=None,\n dataset=None):\n params = dict(num_steps=num_steps, profile=profile, info=info)\n remote_worker_stats = [\n w.train_epoch.remote(**params) for w in self.remote_workers\n ]\n\n if self._num_failures < num_fails:\n time.sleep(1) # Make the batch will fail correctly.\n ray.kill(self.remote_workers[0])\n\n try:\n local_worker_stats = self.local_worker.train_epoch(**params)\n except RuntimeError:\n return False, None\n\n success = check_for_failure(remote_worker_stats)\n if success:\n return success, [local_worker_stats] + ray.get(remote_worker_stats)\n\n return success, None\n\n return step_with_fail\n\n\ndef test_fail_with_recover(ray_start_2_cpus): # noqa: F811\n if not dist.is_available():\n return\n\n def single_loader(config):\n dataset = LinearDataset(2, 5, size=1000000)\n return DataLoader(dataset, batch_size=config.get(\"batch_size\", 32))\n\n step_with_fail = gen_step_with_fail(3)\n\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n single_loader,\n loss_creator=lambda config: nn.MSELoss())\n with patch.object(TorchTrainer, \"_train_epoch\", step_with_fail):\n trainer1 = TorchTrainer(\n training_operator_cls=TestOperator,\n config={\"batch_size\": 100000},\n num_workers=2)\n\n with pytest.raises(RuntimeError):\n trainer1.train(max_retries=1)\n\n trainer1.shutdown(force=True)\n\n\ndef test_resize(ray_start_2_cpus): # noqa: F811\n if not dist.is_available():\n return\n\n def single_loader(config):\n dataset = LinearDataset(2, 5, size=1000000)\n return DataLoader(dataset, batch_size=config.get(\"batch_size\", 32))\n\n step_with_fail = gen_step_with_fail(1)\n\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n single_loader,\n loss_creator=lambda config: nn.MSELoss())\n with patch.object(TorchTrainer, \"_train_epoch\", step_with_fail):\n trainer1 = TorchTrainer(\n training_operator_cls=TestOperator,\n config={\"batch_size\": 100000},\n num_workers=2)\n\n @ray.remote\n def try_test():\n import time\n time.sleep(100)\n\n try_test.remote()\n trainer1.train(max_retries=1)\n assert len(trainer1.remote_workers) == 1\n\n trainer1.shutdown()\n\n\ndef test_fail_twice(ray_start_2_cpus): # noqa: F811\n if not dist.is_available():\n return\n\n def single_loader(config):\n dataset = LinearDataset(2, 5, size=1000000)\n return DataLoader(dataset, batch_size=config.get(\"batch_size\", 32))\n\n step_with_fail = gen_step_with_fail(2)\n\n TestOperator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n single_loader,\n loss_creator=lambda config: nn.MSELoss())\n\n with patch.object(TorchTrainer, \"_train_epoch\", step_with_fail):\n trainer1 = TorchTrainer(\n training_operator_cls=TestOperator,\n config={\"batch_size\": 100000},\n num_workers=2)\n\n # MAX RETRIES SHOULD BE ON BY DEFAULT\n trainer1.train()\n trainer1.shutdown()\n\n\ndef test_multi_input_model(ray_start_2_cpus):\n def model_creator(config):\n class MultiInputModel(nn.Module):\n def __init__(self):\n super(MultiInputModel, self).__init__()\n self._fc1 = torch.nn.Linear(1, 1)\n self._fc2 = torch.nn.Linear(1, 1)\n\n def forward(self, x, y):\n return self._fc1(x) + self._fc2(y)\n\n return MultiInputModel()\n\n def data_creator(config):\n class LinearDataset(torch.utils.data.Dataset):\n def __init__(self, a, b, size=1000):\n x = np.random.randn(size)\n y = np.random.randn(size)\n self.x = torch.tensor(x, dtype=torch.float32)\n self.y = torch.tensor(y, dtype=torch.float32)\n self.z = torch.tensor(a * (x + y) + 2 * b, dtype=torch.float32)\n\n def __getitem__(self, index):\n return (self.x[index, None], self.y[index, None],\n self.z[index, None])\n\n def __len__(self):\n return len(self.x)\n\n train_dataset = LinearDataset(3, 4)\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=config.get(\"batch_size\", 32),\n )\n return train_loader, None\n\n Operator = TrainingOperator.from_creators(\n model_creator,\n optimizer_creator,\n data_creator,\n loss_creator=lambda config: nn.MSELoss())\n\n trainer = TorchTrainer(training_operator_cls=Operator, num_workers=1)\n\n metrics = trainer.train(num_steps=1)\n assert metrics[BATCH_COUNT] == 1\n\n trainer.shutdown()\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n\n sys.exit(pytest.main([\"-v\", \"-x\", __file__]))\n",
"# Original Code here:\n# https://github.com/pytorch/examples/blob/master/mnist/main.py\nfrom contextlib import contextmanager\nimport os\nimport logging\nimport shutil\nimport tempfile\nimport torch\nfrom datetime import timedelta\n\nimport ray\nfrom ray import tune\nfrom ray.tune.result import RESULT_DUPLICATE\nfrom ray.tune.logger import NoopLogger\nfrom ray.tune.function_runner import wrap_function\nfrom ray.tune.resources import Resources\nfrom ray.tune.trainable import TrainableUtil\nfrom ray.tune.utils import detect_checkpoint_function\nfrom ray.util.sgd.torch.utils import setup_process_group, setup_address\nfrom ray.util.sgd.torch.constants import NCCL_TIMEOUT_S\n\nlogger = logging.getLogger(__name__)\n\n_distributed_enabled = False\n\n\ndef is_distributed_trainable():\n \"\"\"Returns True if executing within a DistributedTrainable.\"\"\"\n return _distributed_enabled\n\n\ndef enable_distributed_trainable():\n global _distributed_enabled\n _distributed_enabled = True\n\n\ndef logger_creator(log_config, logdir, rank):\n worker_dir = os.path.join(logdir, \"worker_{}\".format(rank))\n os.makedirs(worker_dir, exist_ok=True)\n return NoopLogger(log_config, worker_dir)\n\n\nclass _TorchTrainable(tune.Trainable):\n \"\"\"Base class for distributed training on Tune.\n\n A wrapper class is needed to actually create a working\n version of this trainable.\n \"\"\"\n _function = None\n _num_workers = None\n _use_gpu = None\n _num_cpus_per_worker = None\n\n __slots__ = [\"workers\", \"_finished\"]\n\n @classmethod\n def default_process_group_parameters(self):\n return dict(timeout=timedelta(NCCL_TIMEOUT_S), backend=\"gloo\")\n\n @classmethod\n def get_remote_worker_options(self):\n num_gpus = 1 if self._use_gpu else 0\n num_cpus = int(self._num_cpus_per_worker or 1)\n return dict(num_cpus=num_cpus, num_gpus=num_gpus)\n\n def setup(self, config):\n self._finished = False\n num_workers = self._num_workers\n logdir = self.logdir\n assert self._function\n\n func_trainable = wrap_function(self.__class__._function)\n\n remote_trainable = ray.remote(func_trainable)\n remote_trainable = remote_trainable.options(\n **self.get_remote_worker_options())\n\n address = setup_address()\n self.workers = [\n remote_trainable.remote(\n config=config,\n logger_creator=lambda cfg: logger_creator(cfg, logdir, rank))\n for rank in range(num_workers)\n ]\n\n pgroup_params = self.default_process_group_parameters()\n from functools import partial\n setup_on_worker = partial(\n setup_process_group,\n url=address,\n world_size=num_workers,\n **pgroup_params)\n ray.get([\n w.execute.remote(lambda _: setup_on_worker(world_rank=rank))\n for rank, w in enumerate(self.workers)\n ])\n\n ray.get([\n w.execute.remote(lambda _: enable_distributed_trainable())\n for rank, w in enumerate(self.workers)\n ])\n\n def step(self):\n if self._finished:\n raise RuntimeError(\"Training has already finished.\")\n result = ray.get([w.step.remote() for w in self.workers])[0]\n if RESULT_DUPLICATE in result:\n self._finished = True\n return result\n\n def save_checkpoint(self, checkpoint_dir):\n # TODO: optimize if colocated\n save_obj = ray.get(self.workers[0].save_to_object.remote())\n checkpoint_path = TrainableUtil.create_from_pickle(\n save_obj, checkpoint_dir)\n return checkpoint_path\n\n def load_checkpoint(self, checkpoint_dir):\n checkpoint_obj = TrainableUtil.checkpoint_to_object(checkpoint_dir)\n return ray.get(\n w.restore_from_object.remote(checkpoint_obj) for w in self.workers)\n\n def stop(self):\n ray.get([worker.stop.remote() for worker in self.workers])\n\n\ndef DistributedTrainableCreator(func,\n use_gpu=False,\n num_workers=1,\n num_cpus_per_worker=1,\n backend=\"gloo\",\n timeout_s=NCCL_TIMEOUT_S):\n \"\"\"Creates a class that executes distributed training.\n\n Similar to running `torch.distributed.launch`.\n\n Note that you typically should not instantiate the object\n created.\n\n Args:\n func (callable): This function is a Tune trainable function.\n This function must have 2 args in the signature, and the\n latter arg must contain `checkpoint_dir`. For example:\n `func(config, checkpoint_dir=None)`.\n use_gpu (bool): Sets resource allocation for workers to 1 GPU\n if true. Also automatically sets CUDA_VISIBLE_DEVICES\n for each training worker.\n num_workers (int): Number of training workers to include in\n world.\n num_cpus_per_worker (int): Number of CPU resources to reserve\n per training worker.\n backend (str): One of \"gloo\", \"nccl\".\n timeout_s (float): Seconds before the torch process group\n times out. Useful when machines are unreliable. Defaults\n to 60 seconds.\n\n Returns:\n A trainable class object that can be passed to Tune. Resources\n are automatically set within the object, so users do\n not need to set `resources_per_trainable`.\n\n Example:\n\n .. code-block:: python\n\n trainable_cls = DistributedTrainableCreator(\n train_func, num_workers=2)\n analysis = tune.run(trainable_cls)\n \"\"\"\n detect_checkpoint_function(func, abort=True)\n\n class WrappedDistributedTorchTrainable(_TorchTrainable):\n _function = func\n _num_workers = num_workers\n _use_gpu = use_gpu\n _num_cpus_per_worker = num_cpus_per_worker\n\n @classmethod\n def default_process_group_parameters(self):\n return dict(timeout=timedelta(timeout_s), backend=backend)\n\n @classmethod\n def default_resource_request(cls, config):\n num_workers_ = int(config.get(\"num_workers\", num_workers))\n num_cpus = int(\n config.get(\"num_cpus_per_worker\", num_cpus_per_worker))\n use_gpu_ = config.get(\"use_gpu\", use_gpu)\n\n return Resources(\n cpu=0,\n gpu=0,\n extra_cpu=num_cpus * num_workers_,\n extra_gpu=num_workers_ if use_gpu_ else 0)\n\n return WrappedDistributedTorchTrainable\n\n\n@contextmanager\ndef distributed_checkpoint_dir(step, disable=False):\n \"\"\"ContextManager for creating a distributed checkpoint.\n\n Only checkpoints a file on the \"main\" training actor, avoiding\n redundant work.\n\n Args:\n step (int): Used to label the checkpoint\n disable (bool): Disable for prototyping.\n\n Yields:\n path (str): A path to a directory. This path will be used\n again when invoking the training_function.\n Example:\n\n .. code-block:: python\n\n def train_func(config, checkpoint_dir):\n if checkpoint_dir:\n path = os.path.join(checkpoint_dir, \"checkpoint\")\n model_state_dict = torch.load(path)\n\n if epoch % 3 == 0:\n with distributed_checkpoint_dir(step=epoch) as checkpoint_dir:\n path = os.path.join(checkpoint_dir, \"checkpoint\")\n torch.save(model.state_dict(), path)\n \"\"\"\n\n if torch.distributed.get_rank() == 0 and not disable:\n with tune.checkpoint_dir(step=step) as checkpoint_dir:\n yield checkpoint_dir\n else:\n path = tempfile.mkdtemp()\n yield path\n shutil.rmtree(path)\n\n\ndef _train_check_global(config, checkpoint_dir=None):\n \"\"\"For testing only. Putting this here because Ray has problems\n serializing within the test file.\"\"\"\n assert is_distributed_trainable()\n import time\n time.sleep(0.1)\n tune.report(is_distributed=True)\n\n\ndef _train_simple(config, checkpoint_dir=None):\n \"\"\"For testing only. Putting this here because Ray has problems\n serializing within the test file.\"\"\"\n import torch.nn as nn\n from torch.nn.parallel import DistributedDataParallel\n import torch.optim as optim\n # N is batch size; D_in is input dimension;\n # H is hidden dimension; D_out is output dimension.\n N, D_in, H, D_out = 8, 5, 5, 5\n\n # Create random Tensors to hold inputs and outputs\n x = torch.randn(N, D_in)\n y = torch.randn(N, D_out)\n loss_fn = nn.MSELoss()\n\n # Use the nn package to define our model and loss function.\n model = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n )\n optimizer = optim.SGD(model.parameters(), lr=0.1)\n\n if checkpoint_dir:\n with open(os.path.join(checkpoint_dir, \"checkpoint\")) as f:\n model_state, optimizer_state = torch.load(f)\n\n model.load_state_dict(model_state)\n optimizer.load_state_dict(optimizer_state)\n\n model = DistributedDataParallel(model)\n\n for epoch in range(config.get(\"epochs\", 10)):\n optimizer.zero_grad()\n output = model(x)\n loss = loss_fn(output, y)\n loss.backward()\n optimizer.step()\n\n if epoch % 3 == 0:\n if config.get(\"enable_checkpoint\", True):\n with distributed_checkpoint_dir(step=epoch) as checkpoint_dir:\n path = os.path.join(checkpoint_dir, \"checkpoint\")\n torch.save((model.state_dict(), optimizer.state_dict()),\n path)\n tune.report(mean_loss=loss.item())\n"
] | [
[
"numpy.allclose",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"numpy.ceil",
"torch.nn.MSELoss",
"torch.distributed.is_available",
"numpy.random.randn",
"torch.equal",
"torch.tensor",
"torch.distributed.is_initialized",
"numpy.isnan",
"torch.optim.lr_scheduler.StepLR",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.distributed.destroy_process_group"
],
[
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.distributed.get_rank",
"torch.load",
"torch.randn",
"torch.nn.parallel.DistributedDataParallel",
"torch.nn.ReLU"
]
] |
yangxu0110/yysScript | [
"079101f57fb1a64b871924c988760d9e74063a71"
] | [
"yys/YuHunModule.py"
] | [
"# -*- coding: utf-8 -*-\nimport datetime\nimport logging\nimport os\nimport random\nimport time\nfrom tkinter import END\n\nimport cv2\nimport numpy\nimport numpy as np\nimport pyautogui\nfrom PIL import ImageGrab\nfrom matplotlib import pyplot as plt\n\npyautogui.FAILSAFE = False\nlogging.basicConfig(format=\"%(asctime)s :%(levelname)s:%(message)s\", datefmt=\"%d-%M-%Y %H:%M:%S\", level=logging.DEBUG)\n# 初始化SIFT探测器\nSIFT = cv2.xfeatures2d.SIFT_create()\n\n\ndef ComputeScreenShot(screenShot):\n \"\"\"\n 由于屏幕分辨率高,计算耗时,这里优化一下\n :return:\n \"\"\"\n kp2, des2 = SIFT.detectAndCompute(screenShot, None)\n return kp2, des2\n\n\ndef GetLocation(target, kp2, des2):\n \"\"\"\n 获取目标图像在截图中的位置\n :param target:\n :param screenShot:\n :return: 返回坐标(x,y) 与opencv坐标系对应\n \"\"\"\n MIN_MATCH_COUNT = 10\n img1 = target # cv2.cvtColor(target,cv2.COLOR_BGR2GRAY)# 查询图片\n # img2 = screenShot\n # img2 = cv2.cvtColor(screenShot, cv2.COLOR_BGR2GRAY) # 训练图片\n # img2 = cv2.resize(img2, dsize=None, fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST)\n # 用SIFT找到关键点和描述符\n\n kp1, des1 = SIFT.detectAndCompute(img1, None)\n\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=4)\n search_params = dict(checks=50)\n\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matches = flann.knnMatch(des1, des2, k=2)\n good = []\n for m, n in matches:\n if m.distance < 0.7 * n.distance:\n good.append(m)\n if len(good) > MIN_MATCH_COUNT:\n src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n matchesMask = mask.ravel().tolist()\n h, w = img1.shape\n pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n if M is not None:\n dst = cv2.perspectiveTransform(pts, M)\n arr = np.int32(dst) #\n midPosArr = arr[0] + (arr[2] - arr[0]) // 2\n midPos = (midPosArr[0][0], midPosArr[0][1])\n # show=cv2.circle(img2,midPos,30,(255,255,255),thickness=5)\n # cv2.imshow('s',show)\n # cv2.waitKey()\n # cv2.destroyAllWindows()\n return midPos\n else:\n return None\n else:\n return None\n\n\ndef CheatPos(originPos, factor=5):\n \"\"\"\n 对原始点击坐标进行随机偏移,防止封号\n :param originPos:原始坐标\n :return:\n \"\"\"\n x, y = random.randint(-factor, factor), random.randint(-factor, factor)\n newPos = (originPos[0] + x, originPos[1] + y)\n return newPos\n\n\ndef Click(targetPosition):\n \"\"\"\n 点击屏幕上的某个点\n :param targetPosition:\n :return:\n \"\"\"\n if targetPosition is None:\n print('未检测到目标')\n else:\n\n pyautogui.moveTo(targetPosition, duration=0.20)\n pyautogui.click()\n time.sleep(random.randint(500, 1000) / 1000)\n\n # time.sleep(random.randint(100, 150) / 1000)\n\n\ndef loadImgs():\n \"\"\"\n 加载所有需要检测的目标图像\n :return:\n \"\"\"\n obj = {}\n path = os.getcwd() + '/img'\n file_list = os.listdir(path)\n\n for file in file_list:\n name = file.split('.')[0]\n file_path = path + '/' + file\n a = cv2.imread(file_path, 0)\n obj[name] = a\n\n return obj\n\n\ndef GetScreenShot():\n \"\"\"\n 获取屏幕截图\n :return:\n \"\"\"\n screen = ImageGrab.grab()\n # screen.save('screen.jpg')\n # screen = cv2.imread('screen.jpg')\n screen = cv2.cvtColor(numpy.asarray(screen), cv2.COLOR_RGB2BGR)\n logging.info('截屏成功')\n return screen\n\n\nclass YuHun():\n def __init__(self):\n self._flag = False\n self.NeedCloseGame=False\n self.NeedCloseSystem=False\n\n def Run(self, LogUI, NeedCloseGame, NeedCloseSystem):\n imgs = loadImgs()\n LogUI.insert(END,\n time.strftime('%Y-%m-%d %H:%M:%S ',\n time.localtime(time.time())) + '开始挑战\\n')\n Count = 1\n while self._flag is not True:\n logging.debug('开始挑战')\n screen = GetScreenShot()\n WindowShape = screen.shape\n result = []\n\n # 为了优化速度,把计算屏幕截图的特征提取出来,避免重复运算\n kp2, des2 = ComputeScreenShot(screen)\n for i in ['tili60', 'tili80', 'auto', 'jieshou2', 'jieshou1', 'end1', 'end2', 'reject', 'queding',\n 'tiaozhan']:\n obj = imgs[i]\n # begin = time.clock()\n pos = GetLocation(obj, kp2, des2)\n # logging.debug('检测结算目标图像')\n # print(time.clock()-begin)\n if pos is not None:\n if i == 'tili60' or i == 'tili80':\n print('window.py', NeedCloseSystem)\n if self.NeedCloseSystem:\n print('log')\n os.system('shutdown -s -t 60')\n return\n if not self.NeedCloseGame:\n # 需要手动关闭游戏\n LogUI.insert(END,\n time.strftime('%Y-%m-%d %H:%M:%S ',\n time.localtime(time.time())) + '体力用完,需要手动关闭加成或游戏\\n')\n return\n # 结束进程\n hasProcess = True\n while hasProcess:\n if 'onmyoji' in os.popen('tasklist /FI \"IMAGENAME eq onmyoji.exe\"').read():\n os.system('TASKKILL /F /IM onmyoji.exe')\n hasProcess = True\n else:\n hasProcess = False\n # 线程结束返回\n return\n elif i == 'end1':\n time.sleep(random.randint(300, 800) / 1000)\n pos = CheatPos(pos, 50)\n elif i == 'end2':\n newPos = (pos[0] + 80, pos[1] + 80)\n pos = CheatPos(newPos, 5)\n elif i == 'tiaozhan':\n LogUI.insert(END,\n time.strftime('%Y-%m-%d %H:%M:%S ',\n time.localtime(time.time())) + '第' + str(Count) + '轮开始\\n')\n Count += 1\n elif i == 'reject':\n pos = CheatPos(pos, 3)\n else:\n pos = CheatPos(pos, 10)\n result.append(pos)\n\n LogUI.see(END)\n else:\n result.append(None)\n # 开始检查结果\n for i in result:\n if i is not None:\n print(WindowShape[1] * 0.06)\n print(WindowShape[0] * 0.96)\n if i[0] < WindowShape[1] * 0.06 or i[1] > WindowShape[0] * 0.96:\n continue\n else:\n Click(i)\n if len(LogUI.get('1.0', 'end-1c')) > 6000:\n LogUI.delete(1.0, END) # 使用 delete\n LogUI.insert(END, ' 清空日志\\n')\n LogUI.see(END)\n\n def Terminate(self):\n self._flag = True\n\n\n# def YuHunTwoWindow(LogUI, NeedCloseGame, NeedCloseSystem):\n# \"\"\"\n# 自动御魂,双开模式\n# \"\"\"\n# imgs = loadImgs()\n# LogUI.insert(END,\n# time.strftime('%Y-%m-%d %H:%M:%S ',\n# time.localtime(time.time())) + '开始挑战\\n')\n# Count = 1\n# while True:\n#\n# logging.debug('开始挑战')\n# screen = GetScreenShot()\n# WindowShape = screen.shape\n# result = []\n#\n# # 为了优化速度,把计算屏幕截图的特征提取出来,避免重复运算\n# kp2, des2 = ComputeScreenShot(screen)\n# for i in ['tili60', 'tili80', 'auto', 'jieshou2', 'jieshou1', 'end1', 'end2', 'reject', 'queding', 'tiaozhan']:\n# obj = imgs[i]\n# # begin = time.clock()\n# pos = GetLocation(obj, kp2, des2)\n# # logging.debug('检测结算目标图像')\n# # print(time.clock()-begin)\n# if pos is not None:\n# if i == 'tili60' or i == 'tili80':\n# print('window.py', NeedCloseSystem)\n# if NeedCloseSystem:\n# print('log')\n# os.system('shutdown -s -t 60')\n# return\n# if not NeedCloseGame:\n# # print('体力用完,需要手动关闭加成或游戏')\n# LogUI.insert(END,\n# time.strftime('%Y-%m-%d %H:%M:%S ',\n# time.localtime(time.time())) + '体力用完,需要手动关闭加成或游戏\\n')\n# return\n# # 结束进程\n# hasProcess = True\n# while hasProcess:\n# if 'onmyoji' in os.popen('tasklist /FI \"IMAGENAME eq onmyoji.exe\"').read():\n# os.system('TASKKILL /F /IM onmyoji.exe')\n# hasProcess = True\n# else:\n# hasProcess = False\n# # 线程结束返回\n# return\n# elif i == 'end1':\n# time.sleep(random.randint(300, 800) / 1000)\n# pos = CheatPos(pos, 50)\n# elif i == 'end2':\n# newPos = (pos[0] + 80, pos[1] + 80)\n# pos = CheatPos(newPos, 5)\n# elif i == 'tiaozhan':\n# LogUI.insert(END,\n# time.strftime('%Y-%m-%d %H:%M:%S ',\n# time.localtime(time.time())) + '第' + str(Count) + '轮开始\\n')\n# Count += 1\n# elif i == 'reject':\n# pos = CheatPos(pos, 3)\n# else:\n# pos = CheatPos(pos, 10)\n# result.append(pos)\n#\n# LogUI.see(END)\n# else:\n# result.append(None)\n# # 开始检查结果\n# for i in result:\n# if i is not None:\n# print(WindowShape[1] * 0.06)\n# print(WindowShape[0] * 0.96)\n# if i[0] < WindowShape[1] * 0.06 or i[1] > WindowShape[0] * 0.96:\n# continue\n# else:\n# Click(i)\n# if len(LogUI.get('1.0', 'end-1c')) > 6000:\n# LogUI.delete(1.0, END) # 使用 delete\n# LogUI.insert(END, ' 清空日志\\n')\n# LogUI.see(END)\n\n\nif __name__ == '__main__':\n pass\n"
] | [
[
"numpy.int32",
"numpy.float32",
"numpy.asarray"
]
] |
KeDengMS/CNTK | [
"fce86cd9581e7ba746d1ec75bbd67dd35d35d11c"
] | [
"bindings/python/examples/test/SLUHandsOn_test.py"
] | [
"# Copyright (c) Microsoft. All rights reserved.\n\n# Licensed under the MIT license. See LICENSE.md file in the project root\n# for full license information.\n# ==============================================================================\n\n# TODO: This does not work yet, need to figure out the right pattern.\n\nimport numpy as np\nfrom cntk import DeviceDescriptor\n\n# this emulates a \"main\" function for SLUHandsOn\nfrom examples.SLUHandsOn.SLUHandsOn import *\nfrom examples.SLUHandsOn.SLUHandsOn import _Infer # TODO: remove\ndef slu_hands_on():\n reader = create_reader(data_dir + \"/atis.train.ctf\")\n model = create_model(_inf=_Infer(shape=input_dim, axis=[Axis.default_batch_axis(), Axis.default_dynamic_axis()]))\n loss, metric = train(reader, model, max_epochs=1)\n return metric, loss # note: strange order\n\nTOLERANCE_ABSOLUTE = 1E-1\n\ndef test_seq_classification_error(device_id):\n from cntk.utils import cntk_device\n DeviceDescriptor.set_default_device(cntk_device(device_id))\n\n evaluation_avg, loss_avg = slu_hands_on()\n\n expected_avg = [0.15570838301766451, 0.7846451368305728]\n assert np.allclose([evaluation_avg, loss_avg], expected_avg, atol=TOLERANCE_ABSOLUTE)\n\nif __name__=='__main__':\n test_seq_classification_error(0)\n"
] | [
[
"numpy.allclose"
]
] |
backwardn/imagededup | [
"38ce34c35187ec33bd996d833293f8ee95ff8202"
] | [
"imagededup/utils/data_generator.py"
] | [
"from pathlib import PurePath\nfrom typing import Tuple, List, Callable\n\nimport numpy as np\nfrom tensorflow.keras.utils import Sequence\n\nfrom imagededup.utils.image_utils import load_image\n\n\nclass DataGenerator(Sequence):\n \"\"\"Class inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator.\n\n Attributes:\n image_dir: Path of image directory.\n batch_size: Number of images per batch.\n basenet_preprocess: Basenet specific preprocessing function.\n target_size: Dimensions that images get resized into when loaded.\n \"\"\"\n\n def __init__(\n self,\n image_dir: PurePath,\n batch_size: int,\n basenet_preprocess: Callable,\n target_size: Tuple[int, int],\n ) -> None:\n \"\"\"Init DataGenerator object.\n \"\"\"\n self.image_dir = image_dir\n self.batch_size = batch_size\n self.basenet_preprocess = basenet_preprocess\n self.target_size = target_size\n self.counter = 0\n\n self._get_image_files()\n self.on_epoch_end()\n\n def _get_image_files(self) -> None:\n self.invalid_image_idx = []\n self.image_files = sorted(\n [\n i.absolute()\n for i in self.image_dir.glob('*')\n if not i.name.startswith('.')]\n ) # ignore hidden files\n\n def on_epoch_end(self) -> None:\n \"\"\"Method called at the end of every epoch.\n \"\"\"\n self.indexes = np.arange(len(self.image_files))\n self.valid_image_files = [\n j for i, j in enumerate(self.image_files) if i not in self.invalid_image_idx\n ]\n\n def __len__(self) -> int:\n \"\"\"Number of batches in the Sequence.\"\"\"\n return int(np.ceil(len(self.image_files) / self.batch_size))\n\n def __getitem__(self, index: int) -> Tuple[np.array, np.array]:\n \"\"\"Get batch at position `index`.\n \"\"\"\n batch_indexes = self.indexes[\n index * self.batch_size : (index + 1) * self.batch_size\n ]\n batch_samples = [self.image_files[i] for i in batch_indexes]\n X = self._data_generator(batch_samples)\n return X\n\n def _data_generator(\n self, image_files: List[PurePath]\n ) -> Tuple[np.array, np.array]:\n \"\"\"Generate data from samples in specified batch.\"\"\"\n # initialize images and labels tensors for faster processing\n X = np.empty((len(image_files), *self.target_size, 3))\n\n invalid_image_idx = []\n for i, image_file in enumerate(image_files):\n # load and randomly augment image\n img = load_image(\n image_file=image_file, target_size=self.target_size, grayscale=False\n )\n\n if img is not None:\n X[i, :] = img\n\n else:\n invalid_image_idx.append(i)\n self.invalid_image_idx.append(self.counter)\n\n self.counter += 1\n\n if invalid_image_idx:\n X = np.delete(X, invalid_image_idx, axis=0)\n\n # apply basenet specific preprocessing\n # input is 4D numpy array of RGB values within [0, 255]\n X = self.basenet_preprocess(X)\n\n return X\n"
] | [
[
"numpy.delete"
]
] |
dendisuhubdy/flow_synthesizer | [
"7df51b574765c7834ebdda8a8936b2c0d363a93a"
] | [
"code/semantic.py"
] | [
"#!/usr/bin/env python3\n\n#%% Plotting\nimport matplotlib\nmatplotlib.use('agg')\nimport os\nimport time\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n# Internal imports\nfrom utils.data import load_dataset, meta_pairs\nfrom models.basic import GatedMLP, GatedCNN, construct_encoder_decoder, construct_flow, construct_regressor\nfrom evaluate import evaluate_model\n\n# Define arguments\nparser = argparse.ArgumentParser()\n# Data arguments\nparser.add_argument('--path', type=str, default='', help='')\nparser.add_argument('--test_sounds', type=str, default='', help='')\nparser.add_argument('--output', type=str, default='outputs', help='')\nparser.add_argument('--dataset', type=str, default='32par', help='')\nparser.add_argument('--data', type=str, default='mel', help='')\nparser.add_argument('--train_type', type=str, default='fixed', help='')\nparser.add_argument('--nbworkers', type=int, default=0, help='')\n# Model arguments\nparser.add_argument('--model', type=str, default='cnn', help='')\nparser.add_argument('--loss', type=str, default='mse', help='')\nparser.add_argument('--rec_loss', type=str, default='mse', help='')\nparser.add_argument('--n_classes', type=int, default=32, help='')\nparser.add_argument('--n_hidden', type=int, default=1024, help='')\nparser.add_argument('--n_layers', type=int, default=4, help='')\n# Optimization arguments\nparser.add_argument('--batch_size', type=int, default=128, help='')\nparser.add_argument('--epochs', type=int, default=200, help='')\nparser.add_argument('--eval', type=int, default=100, help='')\nparser.add_argument('--lr', type=float, default=2e-4, help='')\n# Semantic arguments\nparser.add_argument('--semantic_dim', type=int, default=-1, help='')\nparser.add_argument('--semantic_type', type=str, default='cnn', help='')\n# CUDA arguments\nparser.add_argument('--device', type=str, default='cpu', help='Device for CUDA')\nargs = parser.parse_args()\n# Track start time (for HPC)\nstart_time = time.time()\n# In case we are CPU\nargs.synthesize = False\n# Parameter checking\nif (len(args.path) == 0):\n args.path = (args.device == 'cpu') and '/Users/esling/Datasets/diva_dataset' or '/fast-2/datasets/diva_dataset/'\n args.test_sounds = (args.device == 'cpu') and '/Users/esling/Datasets/synth_testing' or '/fast-2/datasets/flow_synthesizer/synth_testing'\n args.vocal_sounds = '/fast-2/datasets/flow_synthesizer/vocal_testing'\n #args.output = (args.device == 'cpu') and 'outputs' or '/fast-1/philippe/flow_results'\nif (args.device != 'cpu'):\n args.synthesize = True\n # Enable CuDNN optimization\n torch.backends.cudnn.benchmark=True\n# Handling cuda\nargs.cuda = not args.device == 'cpu' and torch.cuda.is_available()\nargs.device = torch.device(args.device if torch.cuda.is_available() else 'cpu')\nprint('Optimization will be on ' + str(args.device) + '.')\n\n\"\"\"\n###################\nBasic definitions\n################### \n\"\"\"\nprint('[Loading dataset]')\nref_split = args.path + '/reference_split_' + args.dataset+ \"_\" +args.data + '.npz'\nif (args.train_type == 'random' or (not os.path.exists(ref_split))):\n train_loader, valid_loader, test_loader, args = load_dataset(args)\n # Take fixed batch\n fixed_data, fixed_params, fixed_meta, fixed_audio = next(iter(test_loader))\n fixed_data, fixed_params, fixed_meta, fixed_audio = fixed_data.to(args.device), fixed_params.to(args.device), fixed_meta, fixed_audio\n fixed_batch = (fixed_data, fixed_params, fixed_meta, fixed_audio)\n if (args.train_type == 'fixed'):\n np.savez(ref_split, [train_loader, valid_loader, test_loader])\nelse:\n data = np.load(ref_split)['arr_0']\n train_loader, valid_loader, test_loader = data[0], data[1], data[2]\n fixed_data, fixed_params, fixed_meta, fixed_audio = next(iter(test_loader))\n fixed_data, fixed_params, fixed_meta, fixed_audio = fixed_data.to(args.device), fixed_params.to(args.device), fixed_meta, fixed_audio\n fixed_batch = (fixed_data, fixed_params, fixed_meta, fixed_audio)\n args.output_size = train_loader.dataset.output_size\n args.input_size = train_loader.dataset.input_size\n\n#%%\n\"\"\"\n###################\nSemantic data analysis\n################### \n\"\"\"\nfavs = [0, 1, 3, 4]\nprint('Favorite dims : ')\nprint(meta_pairs[favs[0] + 1])\nprint(meta_pairs[favs[1] + 1])\nprint(meta_pairs[favs[2] + 1])\nprint(meta_pairs[favs[3] + 1])\nprint('Analyzing basic tags properties')\nprint(meta_pairs)\n# Create basic structures for stats\nfull_params = []\nfull_meta = []\nnb_tags = torch.zeros(len(favs), 3).float()\nbatch_seen = torch.zeros(len(favs), 3).float()\n# Checking up basic features\nfor (loader, l_name) in [(train_loader, 'train'), (valid_loader, 'valid'), (test_loader, 'test')]:\n print(['Analyzing ' + l_name])\n for (_, params, meta, _) in loader:\n nb_tags += meta[:, favs].sum(dim=0).float()\n batch_seen += meta[:, favs].sum(dim=0).float().clamp(0, 1)\n full_params.append(params)\n full_meta.append(meta[:, favs])\n print('Number per batch')\n print(nb_tags / len(loader))\n print(batch_seen / len(loader))\nfull_params = torch.cat(full_params, dim=0)\nfull_meta = torch.cat(full_meta, dim=0)\n#%%\n# Now analyze global tags properties\nfull_var = full_params.std(dim=0)\nfor f in range(len(favs)):\n print('Full variance :')\n print(full_var)\n for c in range(3):\n print(meta_pairs[favs[f] + 1][c])\n # Find ids of that class\n ids = (full_meta[:, f, c] == 1)\n cur_var = (full_params[ids].std(dim=0))\n print(torch.abs(cur_var - full_var)/full_var)\n\n#%% Now try to optimize our favs (based on parameters)\n\"\"\"\nfor f in range(len(favs)):\n print('About to classify this from parameters')\n print(meta_pairs[favs[f] + 1])\n cur_tag = favs[f]\n # Create baseline classifier on parameters\n model = GatedMLP(full_params.shape[1], 2, hidden_size = 128, n_layers = 5, type_mod='normal').float().to(args.device)\n # Optimizer model\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n # Learning rate scheduler\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=20, verbose=True, threshold=1e-7)\n # Create loss\n loss = nn.CrossEntropyLoss(reduction='none').float().to(args.device)\n # Vector of final losses\n losses = torch.zeros(args.epochs, 3)\n # Train the model\n for i in range(args.epochs):\n print('Epoch ' + str(i))\n # Go through the 3 sets\n for (loader, l_name, l_i) in [(train_loader, 'train', 0), (valid_loader, 'valid', 1), (test_loader, 'test', 2)]:\n if (l_name == 'train'):\n model.train()\n else:\n model.eval()\n full_loss = 0\n for (_, y, meta, _) in loader:\n meta = meta[:, favs[f]].squeeze(1)\n target = meta[:, 1].long().to(args.device)\n y, meta = y.to(args.device).float(), meta.to(args.device).float()\n # Separate examples\n loss_mask = 1 - meta[:, 2]\n observed_examples = loss_mask.eq(1)\n unknown_examples = loss_mask.eq(0)\n optimizer.zero_grad()\n out = torch.softmax(model(y), dim=1)\n b_loss = (loss_mask * loss(out, target)).sum()\n if (l_name == 'train'):\n b_loss.backward()\n optimizer.step()\n full_loss += b_loss.item()\n full_loss /= len(loader)\n losses[i, l_i] = full_loss\n if (l_name == 'valid'):\n scheduler.step(full_loss)\n print(losses[i, :])\n # Now save reference results\n torch.save(losses, args.output + '/models/classify_params_' + meta_pairs[0][favs[f]] + '.results')\n\"\"\"\n \n#%% Now try to optimize our favs (based on audio)\nargs.kernel = 5\nargs.dilation = 3\nfor f in range(len(favs)):\n print('About to classify this from audio')\n print(meta_pairs[favs[f] + 1])\n cur_tag = favs[f]\n # Create baseline classifier on parameters\n model = GatedCNN(args.input_size, 2, n_layers = 3, type_mod='normal', args=args).float().to(args.device)\n # Optimizer model\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n # Learning rate scheduler\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=20, verbose=True, threshold=1e-7)\n # Create loss\n loss = nn.CrossEntropyLoss(reduction='none').float().to(args.device)\n # Vector of final losses\n losses = torch.zeros(args.epochs, 3)\n # Train the model\n for i in range(args.epochs):\n print('Epoch ' + str(i))\n # Go through the 3 sets\n for (loader, l_name, l_i) in [(train_loader, 'train', 0)]:\n model.train()\n full_loss = 0\n for (x, _, meta, _) in loader:\n meta = meta[:, favs[f]].squeeze(1)\n target = meta[:, 1].long().to(args.device)\n x, meta = x.float().to(args.device), meta.float().to(args.device)\n # Separate examples\n loss_mask = 1 - meta[:, 2]\n observed_examples = loss_mask.eq(1)\n unknown_examples = loss_mask.eq(0)\n optimizer.zero_grad()\n out = torch.softmax(model(x), dim=1)\n b_loss = (loss_mask * loss(out, target)).sum()\n b_loss.backward()\n optimizer.step()\n full_loss += b_loss\n full_loss /= len(loader)\n losses[i, l_i] = full_loss\n if (l_name == 'valid'):\n scheduler.step(full_loss)\n for (loader, l_name, l_i) in [(valid_loader, 'valid', 1), (test_loader, 'test', 2)]:\n model.eval()\n full_loss = 0\n with torch.no_grad():\n for (x, _, meta, _) in loader:\n meta = meta[:, favs[f]].squeeze(1)\n target = meta[:, 1].long().to(args.device)\n x, meta = x.float().to(args.device), meta.float().to(args.device)\n # Separate examples\n loss_mask = 1 - meta[:, 2]\n observed_examples = loss_mask.eq(1)\n unknown_examples = loss_mask.eq(0)\n optimizer.zero_grad()\n out = torch.softmax(model(x), dim=1)\n b_loss = (loss_mask * loss(out, target)).sum()\n if (l_name == 'train'):\n b_loss.backward()\n optimizer.step()\n full_loss += b_loss\n full_loss /= len(loader)\n losses[i, l_i] = full_loss\n if (l_name == 'valid'):\n scheduler.step(full_loss)\n print(losses[i, :])\n # Now save reference results\n torch.save(losses, args.output + '/models/classify_audios_' + meta_pairs[0][favs[f]] + '.results')\n "
] | [
[
"numpy.load",
"numpy.savez",
"torch.save",
"torch.zeros",
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.cuda.is_available",
"torch.abs",
"matplotlib.use",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.cat"
]
] |
JesseTG/Liar | [
"a952ebc99fe1907e0f40ec4b40a725c75e25ac01"
] | [
"liar/public/views.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Public section, including homepage and signup.\"\"\"\n\nfrom collections import Counter, defaultdict\nimport operator\nimport re\nimport itertools\nimport math\n\nfrom flask import Blueprint, flash, redirect, render_template, request, url_for\nfrom flask import current_app\n\nfrom nltk.corpus import stopwords\nimport nltk\n\nfrom liar.utils import flash_errors\nfrom liar.extensions import cache, mongo\nfrom .. import queries\n\nimport scipy\nimport pandas as pd\nfrom sklearn import manifold\nfrom scipy.interpolate import interp1d\nfrom scipy.spatial.distance import squareform, pdist\n\nfrom numpy import amax\n\nfrom colour import Color\n\n\ndef take(n, iterable):\n \"Return first n items of the iterable as a list\"\n return list(islice(iterable, n))\n\n\ngag_list=[\"EX\",\"RP\",\"TO\",\"VB\",\"WP\",\"PRP\",\"DT\",\"VBP\",\"IN\",\"POS\",\".\",\"CD\",\"``\"]\n\ndef split_sentence(text):\n sentence=nltk.word_tokenize(text)\n tagged = nltk.pos_tag(sentence)\n tagged=[tag for tag in tagged if tag[1] not in gag_list]\n pass_list=[tag[0] for tag in tagged]\n return pass_list\n\n\n\n\ndef gen_dict(statement_text):\n words=[split_sentence(sentence) for sentence in statement_text]\n word_dict=defaultdict(int)\n for word_list in words:\n temp_dict=dict(Counter(word_list))\n word_dict={**word_dict,**temp_dict}\n return word_dict\n\nblueprint = Blueprint('public', __name__, static_folder='../static')\n\nCOLORS = tuple(map(Color, (\"#661a00\", \"#E71F28\", \"#EE9022\", \"#FFD503\", \"#C3D52D\", \"#83BF44\")))\ninterval = tuple(i/(len(COLORS) - 1) for i in range(len(COLORS)))\nred = interp1d(interval, [c.red for c in COLORS])\ngreen = interp1d(interval, [c.green for c in COLORS])\nblue = interp1d(interval, [c.blue for c in COLORS])\n\n\ndef gradient(i):\n return Color(rgb=(red(i), green(i), blue(i)))\n\[email protected](timeout=300)\ndef compute_points(combos):\n subjects = tuple(sorted(tuple(queries.subjects())))\n\n length = len(subjects)\n matrix = scipy.zeros((length, length))\n\n for c in combos:\n _id = c['_id']\n count = c['count']\n i_index = subjects.index(_id[0])\n j_index = subjects.index(_id[1])\n matrix[i_index, j_index] = count\n matrix[j_index, i_index] = count\n\n most = matrix.max()\n\n mds = manifold.MDS(n_components=2, n_init=10, max_iter=1000, eps=1e-9, dissimilarity=\"precomputed\", n_jobs=-1)\n return scipy.array(mds.fit_transform(most - matrix))\n\n\ndef viewbox(points):\n am = amax(points)\n margin = am * 0.05\n return \"{0} {1} {2} {3}\".format(-am - margin, -am - margin, am*2 + margin, am*2 + margin)\n\ndef build_data(points):\n nodes = tuple(queries.nodes())\n\n assert len(nodes) == len(points)\n # The MDS should provide one 2D point for each topic...\n\n for i in range(len(nodes)):\n node = nodes[i]\n point = points[i]\n node['x'] = point[0]\n node['y'] = point[1]\n node['radius'] = math.sqrt(node['numberOfRulings'])\n\n return { n['_id'] : n for n in nodes}\n\n\n#######################Word cloud#####################\ndef word_cloud():\n statements=mongo.db.statements\n statement_text=statements_df['statement'].tolist()\n wordcount=defaultdict(int)\n word_dict=gen_dict(statement_text)\n word_dict=dict(sorted(word_dict.items(), key=operator.itemgetter(1), reverse=True))\n return word_cloud\n#####################################################\n\ndef compute_edges(nodes, combos):\n def make_edge(combo):\n return {\n 'a': nodes[combo['_id'][0]],\n 'b': nodes[combo['_id'][1]],\n 'count': combo['count']\n }\n\n def allow_edge(edge):\n a = edge['a']\n b = edge['b']\n count = edge['count']\n\n return (count / a['numberOfRulings'] >= 0.05) or (count / b['numberOfRulings'] >= 0.05)\n\n return tuple(e for e in map(make_edge, combos))\n\n\[email protected]('/', methods=['GET'])\n#@cache.cached(timeout=10)\ndef home():\n combos = tuple(queries.combos())\n points = compute_points(combos)\n nodes = build_data(points)\n edges = compute_edges(nodes, combos)\n v = viewbox(points)\n\n \"\"\"Home page.\"\"\"\n return render_template('public/home.html', nodes=nodes, edges=edges, viewbox=v, gradient=gradient, colors=COLORS)\n\n\[email protected]('/about/')\ndef about():\n \"\"\"About page.\"\"\"\n return render_template('public/about.html')\n"
] | [
[
"sklearn.manifold.MDS",
"numpy.amax",
"scipy.zeros",
"scipy.interpolate.interp1d"
]
] |
Jet132/keras-tuner | [
"be682573c6f6be1e3f3e6dcac786a34ccac19d3b"
] | [
"keras_tuner/engine/base_tuner.py"
] | [
"# Copyright 2019 The KerasTuner Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"Tuner base class.\"\n\n\nimport copy\nimport os\nimport warnings\n\nimport tensorflow as tf\n\nfrom keras_tuner import utils\nfrom keras_tuner.distribute import oracle_chief\nfrom keras_tuner.distribute import oracle_client\nfrom keras_tuner.distribute import utils as dist_utils\nfrom keras_tuner.engine import hypermodel as hm_module\nfrom keras_tuner.engine import oracle as oracle_module\nfrom keras_tuner.engine import stateful\nfrom keras_tuner.engine import trial as trial_module\nfrom keras_tuner.engine import tuner_utils\n\n\nclass BaseTuner(stateful.Stateful):\n \"\"\"Tuner base class.\n\n `BaseTuner` is the base class for all Tuners, which manages the search\n loop, Oracle, logging, saving, etc. Tuners for non-Keras models can be\n created by subclassing `BaseTuner`.\n\n Args:\n oracle: Instance of Oracle class.\n hypermodel: Instance of `HyperModel` class (or callable that takes\n hyperparameters and returns a `Model` instance). It is optional\n when `Tuner.run_trial()` is overriden and does not use\n `self.hypermodel`.\n directory: A string, the relative path to the working directory.\n project_name: A string, the name to use as prefix for files saved by\n this Tuner.\n logger: Optional instance of `kerastuner.Logger` class for\n streaming logs for monitoring.\n overwrite: Boolean, defaults to `False`. If `False`, reloads an\n existing project of the same name if one is found. Otherwise,\n overwrites the project.\n\n Attributes:\n remaining_trials: Number of trials remaining, `None` if `max_trials` is\n not set. This is useful when resuming a previously stopped search.\n \"\"\"\n\n def __init__(\n self,\n oracle,\n hypermodel=None,\n directory=None,\n project_name=None,\n logger=None,\n overwrite=False,\n ):\n # Ops and metadata\n self.directory = directory or \".\"\n self.project_name = project_name or \"untitled_project\"\n if overwrite and tf.io.gfile.exists(self.project_dir):\n tf.io.gfile.rmtree(self.project_dir)\n\n if not isinstance(oracle, oracle_module.Oracle):\n raise ValueError(\n \"Expected `oracle` argument to be an instance of `Oracle`. \"\n f\"Received: oracle={oracle} (of type ({type(oracle)}).\"\n )\n self.oracle = oracle\n self.oracle._set_project_dir(\n self.directory, self.project_name, overwrite=overwrite\n )\n\n # Run in distributed mode.\n if dist_utils.is_chief_oracle():\n # Blocks forever.\n oracle_chief.start_server(self.oracle)\n elif dist_utils.has_chief_oracle():\n # Proxies requests to the chief oracle.\n self.oracle = oracle_client.OracleClient(self.oracle)\n\n # To support tuning distribution.\n self.tuner_id = os.environ.get(\"KERASTUNER_TUNER_ID\", \"tuner0\")\n\n self.hypermodel = hm_module.get_hypermodel(hypermodel)\n\n # Logs etc\n self.logger = logger\n self._display = tuner_utils.Display(oracle=self.oracle)\n\n self._populate_initial_space()\n\n if not overwrite and tf.io.gfile.exists(self._get_tuner_fname()):\n tf.get_logger().info(\n \"Reloading Tuner from {}\".format(self._get_tuner_fname())\n )\n self.reload()\n\n def _populate_initial_space(self):\n \"\"\"Populate initial search space for oracle.\n\n Keep this function as a subroutine for AutoKeras to override. The space\n may not be ready at the initialization of the tuner, but after seeing\n the training data.\n\n Build hypermodel multiple times to find all conditional hps. It\n generates hp values based on the not activated `conditional_scopes`\n found in the builds.\n \"\"\"\n if self.hypermodel is None:\n return\n\n hp = self.oracle.get_space()\n\n # Lists of stacks of conditions used during `explore_space()`.\n scopes_never_active = []\n scopes_once_active = []\n\n while True:\n self.hypermodel.build(hp)\n\n # Update the recored scopes.\n for conditions in hp.active_scopes:\n if conditions not in scopes_once_active:\n scopes_once_active.append(copy.deepcopy(conditions))\n if conditions in scopes_never_active:\n scopes_never_active.remove(conditions)\n\n for conditions in hp.inactive_scopes:\n if conditions not in scopes_once_active:\n scopes_never_active.append(copy.deepcopy(conditions))\n\n # All conditional scopes are activated.\n if len(scopes_never_active) == 0:\n break\n\n # Generate new values to activate new conditions.\n conditions = scopes_never_active[0]\n for condition in conditions:\n hp.values[condition.name] = condition.values[0]\n\n self.oracle.update_space(hp)\n\n def search(self, *fit_args, **fit_kwargs):\n \"\"\"Performs a search for best hyperparameter configuations.\n\n Args:\n *fit_args: Positional arguments that should be passed to\n `run_trial`, for example the training and validation data.\n **fit_kwargs: Keyword arguments that should be passed to\n `run_trial`, for example the training and validation data.\n \"\"\"\n if \"verbose\" in fit_kwargs:\n self._display.verbose = fit_kwargs.get(\"verbose\")\n self.on_search_begin()\n while True:\n trial = self.oracle.create_trial(self.tuner_id)\n if trial.status == trial_module.TrialStatus.STOPPED:\n # Oracle triggered exit.\n tf.get_logger().info(\"Oracle triggered exit\")\n break\n if trial.status == trial_module.TrialStatus.IDLE:\n # Oracle is calculating, resend request.\n continue\n\n self.on_trial_begin(trial)\n results = self.run_trial(trial, *fit_args, **fit_kwargs)\n # `results` is None indicates user updated oracle in `run_trial()`.\n if results is None:\n warnings.warn(\n \"`Tuner.run_trial()` returned None. It should return one of \"\n \"float, dict, keras.callbacks.History, or a list of one \"\n \"of these types. The use case of calling \"\n \"`Tuner.oracle.update_trial()` in `Tuner.run_trial()` is \"\n \"deprecated, and will be removed in the future.\",\n DeprecationWarning,\n stacklevel=2,\n )\n else:\n self.oracle.update_trial(\n trial.trial_id,\n # Convert to dictionary before calling `update_trial()`\n # to pass it from gRPC.\n tuner_utils.convert_to_metrics_dict(\n results, self.oracle.objective, \"Tuner.run_trial()\"\n ),\n )\n self.on_trial_end(trial)\n self.on_search_end()\n\n def run_trial(self, trial, *fit_args, **fit_kwargs):\n \"\"\"Evaluates a set of hyperparameter values.\"\"\"\n raise NotImplementedError\n\n def save_model(self, trial_id, model, step=0):\n \"\"\"Saves a Model for a given trial.\n\n Args:\n trial_id: The ID of the `Trial` corresponding to this Model.\n model: The trained model.\n step: Integer, for models that report intermediate results to the\n `Oracle`, the step the saved file correspond to. For example, for\n Keras models this is the number of epochs trained.\n \"\"\"\n raise NotImplementedError\n\n def load_model(self, trial):\n \"\"\"Loads a Model from a given trial.\n\n For models that report intermediate results to the `Oracle`, generally\n `load_model` should load the best reported `step` by relying of\n `trial.best_step`.\n\n Args:\n trial: A `Trial` instance, the `Trial` corresponding to the model\n to load.\n \"\"\"\n raise NotImplementedError\n\n def on_trial_begin(self, trial):\n \"\"\"Called at the beginning of a trial.\n\n Args:\n trial: A `Trial` instance.\n \"\"\"\n if self.logger:\n self.logger.register_trial(trial.trial_id, trial.get_state())\n self._display.on_trial_begin(self.oracle.get_trial(trial.trial_id))\n\n def on_trial_end(self, trial):\n \"\"\"Called at the end of a trial.\n\n Args:\n trial: A `Trial` instance.\n \"\"\"\n # Send status to Logger\n if self.logger:\n self.logger.report_trial_state(trial.trial_id, trial.get_state())\n\n self.oracle.end_trial(trial.trial_id, trial_module.TrialStatus.COMPLETED)\n self.oracle.update_space(trial.hyperparameters)\n # Display needs the updated trial scored by the Oracle.\n self._display.on_trial_end(self.oracle.get_trial(trial.trial_id))\n self.save()\n\n def on_search_begin(self):\n \"\"\"Called at the beginning of the `search` method.\"\"\"\n if self.logger:\n self.logger.register_tuner(self.get_state())\n\n def on_search_end(self):\n \"\"\"Called at the end of the `search` method.\"\"\"\n if self.logger:\n self.logger.exit()\n\n def get_best_models(self, num_models=1):\n \"\"\"Returns the best model(s), as determined by the objective.\n\n This method is for querying the models trained during the search.\n For best performance, it is recommended to retrain your Model on the\n full dataset using the best hyperparameters found during `search`,\n which can be obtained using `tuner.get_best_hyperparameters()`.\n\n Args:\n num_models: Optional number of best models to return.\n Defaults to 1.\n\n Returns:\n List of trained models sorted from the best to the worst.\n \"\"\"\n best_trials = self.oracle.get_best_trials(num_models)\n models = [self.load_model(trial) for trial in best_trials]\n return models\n\n def get_best_hyperparameters(self, num_trials=1):\n \"\"\"Returns the best hyperparameters, as determined by the objective.\n\n This method can be used to reinstantiate the (untrained) best model\n found during the search process.\n\n Example:\n\n ```python\n best_hp = tuner.get_best_hyperparameters()[0]\n model = tuner.hypermodel.build(best_hp)\n ```\n\n Args:\n num_trials: Optional number of `HyperParameters` objects to return.\n\n Returns:\n List of `HyperParameter` objects sorted from the best to the worst.\n \"\"\"\n return [t.hyperparameters for t in self.oracle.get_best_trials(num_trials)]\n\n def search_space_summary(self, extended=False):\n \"\"\"Print search space summary.\n\n The methods prints a summary of the hyperparameters in the search\n space, which can be called before calling the `search` method.\n\n Args:\n extended: Optional boolean, whether to display an extended summary.\n Defaults to False.\n \"\"\"\n print(\"Search space summary\")\n hp = self.oracle.get_space()\n print(\"Default search space size: %d\" % len(hp.space))\n for p in hp.space:\n config = p.get_config()\n name = config.pop(\"name\")\n print(\"%s (%s)\" % (name, p.__class__.__name__))\n print(config)\n\n def results_summary(self, num_trials=10):\n \"\"\"Display tuning results summary.\n\n The method prints a summary of the search results including the\n hyperparameter values and evaluation results for each trial.\n\n Args:\n num_trials: Optional number of trials to display. Defaults to 10.\n \"\"\"\n print(\"Results summary\")\n print(\"Results in %s\" % self.project_dir)\n print(\"Showing %d best trials\" % num_trials)\n print(\"{}\".format(self.oracle.objective))\n\n best_trials = self.oracle.get_best_trials(num_trials)\n for trial in best_trials:\n trial.summary()\n\n @property\n def remaining_trials(self):\n \"\"\"Returns the number of trials remaining.\n\n Will return `None` if `max_trials` is not set. This is useful when\n resuming a previously stopped search.\n \"\"\"\n return self.oracle.remaining_trials()\n\n def get_state(self):\n return {}\n\n def set_state(self, state):\n pass\n\n def save(self):\n \"\"\"Saves this object to its project directory.\"\"\"\n if not dist_utils.has_chief_oracle():\n self.oracle.save()\n super(BaseTuner, self).save(self._get_tuner_fname())\n\n def reload(self):\n \"\"\"Reloads this object from its project directory.\"\"\"\n if not dist_utils.has_chief_oracle():\n self.oracle.reload()\n super(BaseTuner, self).reload(self._get_tuner_fname())\n\n @property\n def project_dir(self):\n dirname = os.path.join(str(self.directory), self.project_name)\n utils.create_directory(dirname)\n return dirname\n\n def get_trial_dir(self, trial_id):\n dirname = os.path.join(str(self.project_dir), \"trial_\" + str(trial_id))\n utils.create_directory(dirname)\n return dirname\n\n def _get_tuner_fname(self):\n return os.path.join(str(self.project_dir), str(self.tuner_id) + \".json\")\n"
] | [
[
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.rmtree",
"tensorflow.get_logger"
]
] |
choderalab/gin | [
"9082431d8b664699a898c1e2fa490a18737d6e2d"
] | [
"lime/scripts/qc_datasets/ht_off_opt.py"
] | [
"# =============================================================================\n# imports\n# =============================================================================\nimport os\nimport sys\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\ntf.autograph.set_verbosity(3)\nfrom sklearn import metrics\nimport gin\nimport lime\nimport pandas as pd\nimport numpy as np\n# import qcportal as ptl\n# client = ptl.FractalClient()\n\n\nTRANSLATION = {\n 6: 0,\n 7: 1,\n 8: 2,\n 16: 3,\n 15: 4,\n 9: 5,\n 17: 6,\n 35: 7,\n 53: 8,\n 1: 9\n}\n\n\n# ds_qc = client.get_collection(\"OptimizationDataset\", \"OpenFF Full Optimization Benchmark 1\")\n# ds_name = tf.data.Dataset.from_tensor_slices(list(ds_qc.data.records))\n\ndef data_generator():\n for record_name in list(ds_qc.data.records):\n r = ds_qc.get_record(record_name, specification='default')\n if r is not None:\n traj = r.get_trajectory()\n if traj is not None:\n for snapshot in traj:\n energy = tf.convert_to_tensor(\n snapshot.properties.scf_total_energy,\n dtype=tf.float32)\n\n mol = snapshot.get_molecule()\n\n atoms = tf.convert_to_tensor(\n [TRANSLATION[atomic_number] for atomic_number in mol.atomic_numbers],\n dtype=tf.int64)\n\n adjacency_map = tf.tensor_scatter_nd_update(\n tf.zeros(\n (\n tf.shape(atoms, tf.int64)[0],\n tf.shape(atoms, tf.int64)[0]\n ),\n dtype=tf.float32),\n tf.convert_to_tensor(\n np.array(mol.connectivity)[:, :2],\n dtype=tf.int64),\n tf.convert_to_tensor(\n np.array(mol.connectivity)[:, 2],\n dtype=tf.float32))\n\n features = gin.probabilistic.featurization.featurize_atoms(\n atoms, adjacency_map)\n\n xyz = tf.convert_to_tensor(\n mol.geometry,\n dtype=tf.float32)\n\n jacobian = tf.convert_to_tensor(\n snapshot.return_result,\n dtype=tf.float32)\n\n atoms = tf.concat(\n [\n features,\n xyz,\n jacobian\n ],\n axis=1)\n\n yield(atoms, adjacency_map, energy)\n\n\ndef data_loader(idx):\n atoms_path = 'data/atoms/' + str(idx.numpy()) + '.npy'\n adjacency_map_path = 'data/adjacency_map/' + str(idx.numpy()) + '.npy'\n energy_path = 'data/energy/' + str(idx.numpy()) + '.npy'\n\n atoms = tf.convert_to_tensor(\n np.load(atoms_path))\n\n adjacency_map = tf.convert_to_tensor(\n np.load(adjacency_map_path))\n\n energy = tf.convert_to_tensor(\n np.load(energy_path))\n\n return atoms, adjacency_map, energy\n\n\n'''\nds = tf.data.Dataset.from_generator(\n data_generator,\n (tf.float32, tf.float32, tf.float32))\n'''\n\nds_path = tf.data.Dataset.from_tensor_slices(list(range(5000)))\n\nds = ds_path.map(\n lambda idx: tf.py_function(\n data_loader,\n [idx],\n [tf.float32, tf.float32, tf.float32]))\n\n\nds = ds.shuffle(100000, seed=2666)\n\n\nds = gin.probabilistic.gn.GraphNet.batch(\n ds, 128, feature_dimension=18, atom_dtype=tf.float32).cache(\n str(os.getcwd()) + '/temp')\n\nn_batches = int(gin.probabilistic.gn.GraphNet.get_number_batches(ds))\nn_te = n_batches // 10\n\nds_te = ds.take(n_te)\nds_vl = ds.skip(n_te).take(n_te)\nds_tr = ds.skip(2 * n_te)\n\nconfig_space = {\n 'D_V': [16, 32, 64, 128, 256],\n 'D_E': [16, 32, 64, 128, 256],\n 'D_A': [16, 32, 64, 128, 256],\n 'D_T': [16, 32, 64, 128, 256],\n 'D_U': [16, 32, 64, 128, 256],\n\n\n 'phi_e_0': [32, 64, 128],\n 'phi_e_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'phi_e_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'phi_v_0': [32, 64, 128],\n 'phi_v_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'phi_v_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'phi_a_0': [32, 64, 128],\n 'phi_a_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'phi_a_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'phi_t_0': [32, 64, 128],\n 'phi_t_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'phi_t_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'phi_u_0': [32, 64, 128],\n 'phi_u_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'phi_u_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'f_e_0': [32, 64, 128],\n 'f_e_a_0': ['elu', 'relu', 'tanh', 'sigmoid'],\n 'f_e_a_1': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'f_r': [32, 64, 128],\n 'f_r_a': ['elu', 'relu', 'tanh', 'sigmoid'],\n\n 'learning_rate': [1e-5, 1e-4, 1e-3]\n\n}\n\n# @tf.function\ndef flow(y_e, y_a, y_t, y_pair, atoms, adjacency_map, coordinates, atom_in_mol,\n bond_in_mol, angle_in_mol, torsion_in_mol, attr_in_mol):\n\n\n per_mol_mask = tf.stop_gradient(tf.matmul(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32),\n name='per_mol_mask_0'),\n tf.transpose(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32),\n name='per_mol_mask_1'))))\n\n bond_idxs, angle_idxs, torsion_idxs = gin.probabilistic.gn_hyper\\\n .get_geometric_idxs(atoms, adjacency_map)\n\n is_bond = tf.stop_gradient(tf.greater(\n adjacency_map,\n tf.constant(0, dtype=tf.float32)))\n\n distance_matrix = gin.deterministic.md.get_distance_matrix(\n coordinates)\n\n bond_distances = tf.boolean_mask(\n distance_matrix,\n is_bond,\n name='bond_mask')\n\n angle_angles = gin.deterministic.md.get_angles_cos(\n coordinates,\n angle_idxs)\n\n torsion_dihedrals = gin.deterministic.md.get_dihedrals_cos(\n coordinates,\n torsion_idxs)\n\n y_e_0, y_e_1 = tf.split(y_e, 2, 1)\n y_e_0 = tf.squeeze(y_e_0)\n y_e_1 = tf.squeeze(y_e_1)\n u_bond = tf.math.multiply(\n y_e_1,\n tf.math.pow(\n tf.math.subtract(\n bond_distances,\n tf.pow(\n y_e_0,\n tf.constant(2, dtype=tf.float32))),\n tf.constant(2, dtype=tf.float32)))\n\n\n y_a_0, y_a_1 = tf.split(y_a, 2, 1)\n y_a_0 = tf.squeeze(y_a_0)\n y_a_1 = tf.squeeze(y_a_1)\n u_angle = tf.math.multiply(\n y_a_1,\n tf.math.pow(\n tf.math.subtract(\n angle_angles,\n tf.tanh(\n y_a_1)),\n tf.constant(2, dtype=tf.float32)))\n\n y_t_0, y_t_1 = tf.split(y_t, 2, 1)\n y_t_0 = tf.squeeze(y_t_0)\n y_t_1 = tf.squeeze(y_t_1)\n u_dihedral = tf.math.multiply(\n y_t_1,\n tf.math.pow(\n tf.math.subtract(\n torsion_dihedrals,\n tf.tanh(\n y_t_0)),\n tf.constant(2, dtype=tf.float32)))\n\n u_pair_mask = tf.linalg.band_part(\n tf.nn.relu(\n tf.subtract(\n tf.subtract(\n per_mol_mask,\n adjacency_map),\n tf.eye(\n tf.shape(per_mol_mask)[0]))),\n 0, -1)\n\n _distance_matrix = tf.where(\n tf.greater(\n u_pair_mask,\n tf.constant(0, dtype=tf.float32)),\n distance_matrix,\n tf.ones_like(distance_matrix))\n\n _distance_matrix_inverse = tf.multiply(\n u_pair_mask,\n tf.pow(\n tf.math.add(\n _distance_matrix,\n tf.constant(1e-2, dtype=tf.float32)),\n tf.constant(-1, dtype=tf.float32)))\n\n y_pair_0, y_pair_1, y_pair_2 = tf.split(y_pair, 3, 2)\n y_pair_0 = tf.squeeze(y_pair_0)\n y_pair_1 = tf.squeeze(y_pair_1)\n y_pair_2 = tf.squeeze(y_pair_2)\n\n u_pair = tf.reduce_sum(\n [\n tf.multiply(\n y_pair_0,\n tf.pow(\n _distance_matrix_inverse,\n tf.constant(2, dtype=tf.float32))),\n tf.multiply(\n y_pair_1,\n tf.pow(\n _distance_matrix_inverse,\n tf.constant(6, dtype=tf.float32))),\n tf.multiply(\n y_pair_2,\n tf.pow(\n _distance_matrix_inverse,\n tf.constant(12, dtype=tf.float32)))\n ],\n axis=0)\n\n u_bond_tot = tf.matmul(\n tf.transpose(\n tf.where(\n bond_in_mol,\n tf.ones_like(bond_in_mol, dtype=tf.float32),\n tf.zeros_like(bond_in_mol, dtype=tf.float32))),\n tf.expand_dims(\n u_bond,\n axis=1))\n\n u_angle_tot = tf.matmul(\n tf.transpose(\n tf.where(\n angle_in_mol,\n tf.ones_like(angle_in_mol, dtype=tf.float32),\n tf.zeros_like(angle_in_mol, dtype=tf.float32))),\n tf.expand_dims(\n u_angle,\n axis=1))\n\n u_dihedral_tot = tf.matmul(\n tf.transpose(\n tf.where(\n torsion_in_mol,\n tf.ones_like(torsion_in_mol, dtype=tf.float32),\n tf.zeros_like(torsion_in_mol, dtype=tf.float32))),\n tf.expand_dims(\n u_dihedral,\n axis=1))\n\n u_pair_tot = tf.boolean_mask(\n tf.matmul(\n tf.transpose(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32))),\n tf.reduce_sum(\n u_pair,\n axis=1,\n keepdims=True)),\n attr_in_mol)\n\n u_tot = tf.squeeze(\n u_bond_tot + u_angle_tot + u_dihedral_tot + u_pair_tot)\n\n return u_tot\n\ndef init(point):\n global gn\n global optimizer\n\n class f_v(tf.keras.Model):\n \"\"\" Featurization of nodes.\n Here we simply featurize atoms using one-hot encoding.\n\n \"\"\"\n def __init__(self, units=point['D_V']):\n super(f_v, self).__init__()\n self.d = tf.keras.layers.Dense(units)\n\n # @tf.function\n def call(self, x):\n return self.d(x)\n\n class f_r(tf.keras.Model):\n \"\"\" Readout function\n \"\"\"\n def __init__(self, units=point['f_r'], f_r_a=point['f_r_a']):\n super(f_r, self).__init__()\n self.d_k = tf.keras.layers.Dense(units, activation='relu')\n self.d_q = tf.keras.layers.Dense(units, activation='relu')\n self.d_pair_0 = tf.keras.layers.Dense(units, activation='relu')\n self.d_pair_1 = tf.keras.layers.Dense(3,\n kernel_initializer='random_uniform',\n activity_regularizer=tf.keras.regularizers.l2(1e-5))\n\n self.d_e_1 = tf.keras.layers.Dense(2,\n kernel_initializer='random_uniform')\n\n self.d_e_0 = tf.keras.layers.Dense(units, activation='relu')\n\n self.d_a_1 = tf.keras.layers.Dense(2,\n kernel_initializer='random_uniform')\n self.d_a_0 = tf.keras.layers.Dense(units, activation='relu')\n\n self.d_t_1 = tf.keras.layers.Dense(2,\n kernel_initializer='random_uniform')\n self.d_t_0 = tf.keras.layers.Dense(units, activation='relu')\n\n self.d_e0_0 = lime.nets.for_gn.ConcatenateThenFullyConnect((units,\n 'relu', units, 'relu'))\n\n self.d_e0_1 = tf.keras.layers.Dense(1)\n\n self.units = units\n self.d_v = point['D_V']\n self.d_e = point['D_E']\n self.d_a = point['D_A']\n self.d_t = point['D_T']\n self.d_u = point['D_U']\n\n # @tf.function\n def call(self, h_v, h_e, h_a, h_t, h_u,\n h_v_history, h_e_history, h_a_history,\n h_t_history, h_u_history,\n atom_in_mol, bond_in_mol, angle_in_mol, torsion_in_mol,\n adjacency_map, coordinates):\n\n\n h_e_history.set_shape([None, 6, self.d_e])\n h_u_history.set_shape([None, 6, self.d_u])\n h_v_history.set_shape([None, 6, self.d_v])\n\n h_e_bar_history = tf.reduce_sum( # (n_mols, t, d_e)\n tf.multiply(\n tf.tile(\n tf.expand_dims(\n tf.expand_dims(\n tf.where( # (n_bonds, n_mols)\n tf.boolean_mask(\n bond_in_mol,\n tf.reduce_any(\n bond_in_mol,\n axis=1),\n axis=0),\n tf.ones_like(\n tf.boolean_mask(\n bond_in_mol,\n tf.reduce_any(\n bond_in_mol,\n axis=1),\n axis=0),\n dtype=tf.float32),\n tf.zeros_like(\n tf.boolean_mask(\n bond_in_mol,\n tf.reduce_any(\n bond_in_mol,\n axis=1),\n axis=0),\n dtype=tf.float32)),\n 2),\n 3),\n [\n 1,\n 1,\n tf.shape(h_e_history)[1],\n tf.shape(h_e)[1]\n ]),\n tf.tile( # (n_bonds, n_mols, t, d_e)\n tf.expand_dims(\n h_e_history, # (n_bonds, t, d_e)\n 1),\n [1, tf.shape(bond_in_mol)[1], 1, 1])),\n axis=0)\n\n h_v_bar_history = tf.reduce_sum( # (n_mols, t, d_e)\n tf.multiply(\n tf.tile(\n tf.expand_dims(\n tf.expand_dims(\n tf.where( # (n_atoms, n_mols)\n atom_in_mol,\n tf.ones_like(\n atom_in_mol,\n dtype=tf.float32),\n tf.zeros_like(\n atom_in_mol,\n dtype=tf.float32)),\n 2),\n 3),\n [1, 1, tf.shape(h_v_history)[1], tf.shape(h_v)[1]]),\n tf.tile( # (n_atoms, n_mols, t, d_e)\n tf.expand_dims(\n h_v_history, # (n_atoms, t, d_e)\n 1),\n [1, tf.shape(atom_in_mol)[1], 1, 1])),\n axis=0)\n\n e0 = tf.squeeze(self.d_e0_1(self.d_e0_0(\n tf.reshape(\n h_v_bar_history,\n [-1, 6 * self.d_v]),\n tf.reshape(\n h_e_bar_history,\n [-1, 6 * self.d_e]),\n tf.reshape(\n h_u_history,\n [-1, 6 * self.d_u]))))\n\n adjacency_map_full = tf.math.add(\n tf.transpose(\n adjacency_map),\n adjacency_map)\n\n per_mol_mask = tf.matmul(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32)),\n tf.transpose(\n tf.where(\n atom_in_mol,\n tf.ones_like(atom_in_mol, dtype=tf.float32),\n tf.zeros_like(atom_in_mol, dtype=tf.float32))))\n\n # get distance matrix\n distance = gin.deterministic.md.get_distance_matrix(coordinates)\n\n distance = tf.expand_dims(\n distance,\n 2)\n\n n_atoms = tf.shape(distance, tf.int64)[0]\n\n # (n_atoms, n_atoms, units)\n k = tf.multiply(\n tf.tile(\n tf.expand_dims(\n per_mol_mask,\n 2),\n [1, 1, self.units]),\n tf.tile(\n tf.expand_dims(\n self.d_k(h_v),\n 1),\n [1, n_atoms, 1]))\n\n # (n_atoms, n_atoms, units)\n q = tf.multiply(\n tf.tile(\n tf.expand_dims(\n per_mol_mask,\n 2),\n [1, 1, self.units]),\n tf.tile(\n tf.expand_dims(\n self.d_q(h_v),\n 0),\n [n_atoms, 1, 1]))\n\n h_pair = tf.concat(\n [\n k,\n q,\n ],\n axis=2)\n\n h_pair = tf.math.multiply(\n tf.tile(\n tf.expand_dims(\n tf.math.multiply(\n tf.math.subtract(\n per_mol_mask,\n tf.eye(\n tf.shape(per_mol_mask)[0])),\n tf.where(\n tf.equal(\n adjacency_map_full,\n tf.constant(0, dtype=tf.float32)),\n tf.ones_like(adjacency_map),\n tf.zeros_like(adjacency_map))),\n 2),\n [1, 1, 3]),\n self.d_pair_1(self.d_pair_0(h_pair)))\n\n y_pair = h_pair\n\n y_a = self.d_a_1(\n self.d_a_0(\n tf.reshape(\n h_a_history,\n [\n tf.shape(h_a_history)[0],\n 6 * self.d_a\n ])))\n\n y_e = self.d_e_1(\n self.d_e_0(\n tf.reshape(\n h_e_history,\n [\n tf.shape(h_e_history)[0],\n 6 * self.d_e\n ])))\n\n\n y_t = self.d_t_1(\n self.d_t_0(\n tf.reshape(\n h_t_history,\n [\n tf.shape(h_t_history)[0],\n 6 * self.d_t\n ])))\n\n return e0, y_e, y_a, y_t, y_pair, bond_in_mol, angle_in_mol, torsion_in_mol\n\n\n gn = gin.probabilistic.gn_hyper.HyperGraphNet(\n f_e=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['f_e_0'], 'elu', point['D_E'], 'tanh')),\n f_a=tf.keras.layers.Dense(point['D_A'], activation='tanh'),\n f_t=tf.keras.layers.Dense(point['D_T'], activation='tanh'),\n f_v=f_v(),\n f_u=(lambda atoms, adjacency_map, batched_attr_in_mol: \\\n tf.tile(\n tf.zeros((1, point['D_U'])),\n [\n tf.math.count_nonzero(batched_attr_in_mol),\n 1\n ]\n )),\n phi_e=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['phi_e_0'], point['phi_e_a_0'], point['D_E'],\n point['phi_e_a_1'])),\n phi_u=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['phi_u_0'], point['phi_u_a_0'], point['D_U'],\n point['phi_u_a_1'])),\n phi_v=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['phi_v_0'], point['phi_v_a_0'], point['D_V'],\n point['phi_v_a_1'])),\n phi_a=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['phi_a_0'], point['phi_a_a_0'], point['D_A'],\n point['phi_a_a_1'])),\n phi_t=lime.nets.for_gn.ConcatenateThenFullyConnect(\n (point['phi_t_0'], point['phi_t_a_0'], point['D_T'],\n point['phi_t_a_1'])),\n f_r=f_r(),\n repeat=5)\n\n optimizer = tf.keras.optimizers.Adam(1e-4)\n\ndef obj_fn(point):\n point = dict(zip(config_space.keys(), point))\n init(point)\n\n for dummy_idx in range(10):\n for atoms_, adjacency_map, atom_in_mol, bond_in_mol, u, attr_in_mol in ds_tr:\n atoms = atoms_[:, :12]\n coordinates = tf.Variable(atoms_[:, 12:15])\n jacobian = atoms_[:, 15:]\n with tf.GradientTape() as tape:\n e0, y_e, y_a, y_t, y_pair, bond_in_mol, angle_in_mol, torsion_in_mol = gn(\n atoms, adjacency_map, coordinates, atom_in_mol, attr_in_mol)\n\n\n with tf.GradientTape() as tape1:\n\n u_hat = flow(y_e, y_a, y_t, y_pair, atoms, adjacency_map,\n coordinates, atom_in_mol, bond_in_mol, angle_in_mol,\n torsion_in_mol, attr_in_mol)\n\n jacobian_hat = tape1.gradient(u_hat, coordinates)\n\n jacobian_hat = tf.boolean_mask(\n jacobian_hat,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n jacobian = tf.boolean_mask(\n jacobian,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n u = tf.boolean_mask(\n u,\n attr_in_mol)\n\n loss = tf.math.add(\n tf.reduce_sum(\n tf.keras.losses.MSE(\n tf.math.log(\n tf.norm(\n jacobian,\n axis=1)),\n tf.math.log(\n tf.norm(\n jacobian_hat,\n axis=1)))),\n tf.reduce_sum(\n tf.losses.cosine_similarity(\n jacobian,\n jacobian_hat,\n axis=1)))\n\n\n variables = gn.variables\n grad = tape.gradient(loss, variables)\n\n # if not tf.reduce_any([tf.reduce_any(tf.math.is_nan(_grad)) for _grad in grad]).numpy():\n\n optimizer.apply_gradients(\n zip(grad, variables))\n\n del loss\n del coordinates\n del tape\n del tape1\n\n y_true_tr = -1. * tf.ones([1, ], dtype=tf.float32)\n y_pred_tr = -1. * tf.ones([1, ], dtype=tf.float32)\n\n y_true_vl = -1. * tf.ones([1, ], dtype=tf.float32)\n y_pred_vl = -1. * tf.ones([1, ], dtype=tf.float32)\n\n y_true_te = -1. * tf.ones([1, ], dtype=tf.float32)\n y_pred_te = -1. * tf.ones([1, ], dtype=tf.float32)\n\n for atoms_, adjacency_map, atom_in_mol, bond_in_mol, u, attr_in_mol in ds_tr:\n atoms = atoms_[:, :12]\n coordinates = tf.Variable(atoms_[:, 12:15])\n jacobian = atoms_[:, 15:]\n with tf.GradientTape() as tape:\n e0, y_e, y_a, y_t, y_pair, bond_in_mol, angle_in_mol, torsion_in_mol = gn(\n atoms, adjacency_map, coordinates, atom_in_mol, attr_in_mol)\n\n\n with tf.GradientTape() as tape1:\n\n u_hat = flow(y_e, y_a, y_t, y_pair, atoms, adjacency_map,\n coordinates, atom_in_mol, bond_in_mol, angle_in_mol,\n torsion_in_mol, attr_in_mol)\n\n jacobian_hat = tape1.gradient(u_hat, coordinates)\n\n jacobian_hat = tf.boolean_mask(\n jacobian_hat,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n jacobian = tf.boolean_mask(\n jacobian,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n y_true_tr = tf.concat([y_true_tr, tf.reshape(jacobian, [-1])], axis=0)\n y_pred_tr = tf.concat([y_pred_tr, tf.reshape(jacobian_hat, [-1])], axis=0)\n\n for atoms_, adjacency_map, atom_in_mol, bond_in_mol, u, attr_in_mol in ds_te:\n atoms = atoms_[:, :12]\n coordinates = tf.Variable(atoms_[:, 12:15])\n jacobian = atoms_[:, 15:]\n with tf.GradientTape() as tape:\n e0, y_e, y_a, y_t, y_pair, bond_in_mol, angle_in_mol, torsion_in_mol = gn(\n atoms, adjacency_map, coordinates, atom_in_mol, attr_in_mol)\n\n\n with tf.GradientTape() as tape1:\n\n u_hat = flow(y_e, y_a, y_t, y_pair, atoms, adjacency_map,\n coordinates, atom_in_mol, bond_in_mol, angle_in_mol,\n torsion_in_mol, attr_in_mol)\n\n jacobian_hat = tape1.gradient(u_hat, coordinates)\n\n jacobian_hat = tf.boolean_mask(\n jacobian_hat,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n jacobian = tf.boolean_mask(\n jacobian,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n y_true_te = tf.concat([y_true_te, tf.reshape(jacobian, [-1])], axis=0)\n y_pred_te = tf.concat([y_pred_te, tf.reshape(jacobian_hat, [-1])], axis=0)\n\n for atoms_, adjacency_map, atom_in_mol, bond_in_mol, u, attr_in_mol in ds_vl:\n atoms = atoms_[:, :12]\n coordinates = tf.Variable(atoms_[:, 12:15])\n jacobian = atoms_[:, 15:]\n with tf.GradientTape() as tape:\n e0, y_e, y_a, y_t, y_pair, bond_in_mol, angle_in_mol, torsion_in_mol = gn(\n atoms, adjacency_map, coordinates, atom_in_mol, attr_in_mol)\n\n\n with tf.GradientTape() as tape1:\n\n u_hat = flow(y_e, y_a, y_t, y_pair, atoms, adjacency_map,\n coordinates, atom_in_mol, bond_in_mol, angle_in_mol,\n torsion_in_mol, attr_in_mol)\n\n jacobian_hat = tape1.gradient(u_hat, coordinates)\n\n jacobian_hat = tf.boolean_mask(\n jacobian_hat,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n jacobian = tf.boolean_mask(\n jacobian,\n tf.reduce_any(\n atom_in_mol,\n axis=1))\n\n y_true_vl = tf.concat([y_true_vl, tf.reshape(jacobian, [-1])], axis=0)\n y_pred_vl = tf.concat([y_pred_vl, tf.reshape(jacobian_hat, [-1])], axis=0)\n\n try:\n r2_tr = metrics.r2_score(y_true_tr[1:].numpy(), y_pred_tr[1:].numpy())\n rmse_tr = metrics.mean_squared_error(y_true_tr[1:].numpy(), y_pred_tr[1:].numpy())\n\n r2_vl = metrics.r2_score(y_true_vl[1:].numpy(), y_pred_vl[1:].numpy())\n rmse_vl = metrics.mean_squared_error(y_true_vl[1:].numpy(), y_pred_vl[1:].numpy())\n\n r2_te = metrics.r2_score(y_true_te[1:].numpy(), y_pred_te[1:].numpy())\n rmse_te = metrics.mean_squared_error(y_true_te[1:].numpy(), y_pred_te[1:].numpy())\n\n\n np.save('y_true_tr', y_true_tr[1:].numpy())\n np.save('y_pred_tr', y_pred_tr[1:].numpy())\n np.save('y_true_te', y_true_te[1:].numpy())\n np.save('y_pred_te', y_pred_te[1:].numpy())\n np.save('y_true_vl', y_true_vl[1:].numpy())\n np.save('y_pred_vl', y_pred_vl[1:].numpy())\n\n print(tf.stack([y_true_tr, y_pred_tr], axis=1))\n\n print(point, flush=True)\n print(r2_tr, flush=True)\n print(rmse_tr, flush=True)\n print(r2_vl, flush=True)\n print(rmse_vl, flush=True)\n print(r2_te, flush=True)\n print(rmse_te, flush=True)\n\n gn.save_weights('gn.h5')\n\n return rmse_vl\n\n except:\n print('nan')\n return None\n\nlime.optimize.dummy.optimize(obj_fn, config_space.values(), 1000)\n"
] | [
[
"tensorflow.keras.optimizers.Adam",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.squeeze",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.Variable",
"tensorflow.reduce_sum",
"tensorflow.split",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.autograph.set_verbosity",
"numpy.load",
"tensorflow.norm",
"tensorflow.stack",
"tensorflow.shape",
"tensorflow.subtract",
"tensorflow.ones_like",
"tensorflow.tanh",
"tensorflow.expand_dims",
"tensorflow.zeros_like",
"tensorflow.keras.regularizers.l2",
"tensorflow.boolean_mask",
"tensorflow.math.count_nonzero",
"tensorflow.reduce_any",
"tensorflow.losses.cosine_similarity",
"tensorflow.zeros",
"tensorflow.py_function",
"numpy.array"
]
] |
jacksonwalters/wordchef | [
"3edc2f8d5cbbaa064245ebaae923da68cae6556f"
] | [
"gen_vocab_vecs.py"
] | [
"import spacy, numpy, random, pickle, pandas, sys\nimport sklearn.neighbors as nbs\nfrom spacy.lookups import load_lookups\n\n\nMIN_PROB = -18\n\n#load NLP tool spaCy\nprint(\"Loading spaCy...\")\nnlp=spacy.load(\"en_core_web_lg\")\nprint(\"spaCy loaded.\")\n\n#load lexeme probability table\nlookups = load_lookups(\"en\", [\"lexeme_prob\"])\nnlp.vocab.lookups.add_table(\"lexeme_prob\", lookups.get_table(\"lexeme_prob\"))\n\n#get plaintext words as list from spacy vocab. ensure they have wordvector, are lowercase, and aren't too rare\nprint(\"Total number of words in spaCy vocab=\",len(nlp.vocab.strings))\nprint(\"Getting words...\")\nwords = [word for word in nlp.vocab.strings if nlp.vocab.has_vector(word) and word.islower() and nlp.vocab[word].prob >= MIN_PROB]\nprint(\"Retrieved \",len(words),\"lowercase words with vectors and prob >=.\",MIN_PROB)\n\n#get wordvectors for all words as numpy array\nprint(\"Total number of wordvectors=\",len(nlp.vocab.vectors))\nprint(\"Getting wordvectors...\")\nwordvecs = numpy.array([nlp.vocab.get_vector(word) for word in words])\nprint(\"Retrieved=\",len(wordvecs),\"wordvectors.\")\n\n#ensure the list of words corresponds to the list of wordvectors\nassert len(words) == len(wordvecs)\nspot_check = random.choice(range(0,len(words)))\nassert numpy.array_equal(nlp(words[spot_check]).vector,wordvecs[spot_check])\nprint(\"Spot check passed.\")\n\n#pickle the entire vocab\n#pickle.HIGHEST_PROTOCOL depends on Python version\nwith open('vocab.pkl', 'wb') as f:\n\t\tpickle.dump(words,f,protocol=4)\nprint(\"Dumped vocab words to pickle file vocab.pkl\")\n\n#place all wordvectors in balltree, and pickle entire tree\ntree = nbs.BallTree(wordvecs)\nwith open('balltree.pkl', 'wb') as f:\n\t\tpickle.dump(tree,f,protocol=4)\nprint(\"Dumped wordvector BallTree to pickle file balltree.pkl\")\n\n#create word:vector dict and pickle it\ndict = dict(zip(words,wordvecs))\nwith open('dict.pkl', 'wb') as f:\n\t\tpickle.dump(dict,f,protocol=4)\nprint(\"Dumped word2vec dictionary in dict.pkl\")\n"
] | [
[
"sklearn.neighbors.BallTree"
]
] |
naik-aakash/pymatgen | [
"394e0d71bf1d1025fcf75498cbb16aa3f41ce78c"
] | [
"pymatgen/analysis/interfaces/coherent_interfaces.py"
] | [
"# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\"\"\"\nThis module provides classes to store, generate, and manipulate material interfaces.\n\"\"\"\n\nfrom itertools import product\nfrom typing import Iterator, Optional, Tuple, Union\n\nimport numpy as np\nfrom scipy.linalg import polar\n\nfrom pymatgen.analysis.elasticity.strain import Deformation\nfrom pymatgen.analysis.interfaces.zsl import ZSLGenerator, fast_norm\nfrom pymatgen.core import Structure\nfrom pymatgen.core.interface import Interface, label_termination\nfrom pymatgen.core.surface import SlabGenerator\n\nVector3D = Tuple[float, float, float]\nMatrix3D = Tuple[Vector3D, Vector3D, Vector3D]\nMatrix2D = Tuple[Vector3D, Vector3D]\n\n\nclass CoherentInterfaceBuilder:\n \"\"\"\n This class constructs the coherent interfaces between two crystalline slabs\n Coherency is defined by matching lattices not sub-planes.\n \"\"\"\n\n def __init__(\n self,\n substrate_structure: Structure,\n film_structure: Structure,\n film_miller: Tuple[int, int, int],\n substrate_miller: Tuple[int, int, int],\n zslgen: Optional[ZSLGenerator] = None,\n ):\n \"\"\"\n Args:\n substrate_structure: structure of substrate\n film_structure: structure of film\n film_miller: miller index of the film layer\n substrate_miller: miller index for the substrate layer\n zslgen: BiDirectionalZSL if you want custom lattice matching tolerances for coherency\n \"\"\"\n\n # Bulk structures\n self.substrate_structure = substrate_structure\n self.film_structure = film_structure\n self.film_miller = film_miller\n self.substrate_miller = substrate_miller\n self.zslgen = zslgen or ZSLGenerator(bidirectional=True)\n\n self._find_matches()\n self._find_terminations()\n\n def _find_matches(self) -> None:\n \"\"\"\n Finds and stores the ZSL matches\n \"\"\"\n self.zsl_matches = []\n\n film_sg = SlabGenerator(\n self.film_structure,\n self.film_miller,\n min_slab_size=1,\n min_vacuum_size=3,\n in_unit_planes=True,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n sub_sg = SlabGenerator(\n self.substrate_structure,\n self.substrate_miller,\n min_slab_size=1,\n min_vacuum_size=3,\n in_unit_planes=True,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n film_slab = film_sg.get_slab(shift=0)\n sub_slab = sub_sg.get_slab(shift=0)\n\n film_vectors = film_slab.lattice.matrix\n substrate_vectors = sub_slab.lattice.matrix\n\n # Generate all possible interface matches\n self.zsl_matches = list(self.zslgen(film_vectors[:2], substrate_vectors[:2], lowest=False))\n\n for match in self.zsl_matches:\n xform = get_2d_transform(film_vectors, match.film_vectors)\n strain, rot = polar(xform)\n assert np.allclose(\n strain, np.round(strain)\n ), \"Film lattice vectors changed during ZSL match, check your ZSL Generator parameters\"\n\n xform = get_2d_transform(substrate_vectors, match.substrate_vectors)\n strain, rot = polar(xform)\n assert np.allclose(\n strain, strain.astype(int)\n ), \"Substrate lattice vectors changed during ZSL match, check your ZSL Generator parameters\"\n\n def _find_terminations(self):\n \"\"\"\n Finds all terminations\n \"\"\"\n\n film_sg = SlabGenerator(\n self.film_structure,\n self.film_miller,\n min_slab_size=1,\n min_vacuum_size=3,\n in_unit_planes=True,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n sub_sg = SlabGenerator(\n self.substrate_structure,\n self.substrate_miller,\n min_slab_size=1,\n min_vacuum_size=3,\n in_unit_planes=True,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n film_slabs = film_sg.get_slabs()\n sub_slabs = sub_sg.get_slabs()\n\n film_shits = [s.shift for s in film_slabs]\n film_terminations = [label_termination(s) for s in film_slabs]\n\n sub_shifts = [s.shift for s in sub_slabs]\n sub_terminations = [label_termination(s) for s in sub_slabs]\n\n self._terminations = {\n (film_label, sub_label): (film_shift, sub_shift)\n for (film_label, film_shift), (sub_label, sub_shift) in product(\n zip(film_terminations, film_shits), zip(sub_terminations, sub_shifts)\n )\n }\n self.terminations = list(self._terminations.keys())\n\n def get_interfaces(\n self,\n termination: Tuple[str, str],\n gap: float = 2.0,\n vacuum_over_film: float = 20.0,\n film_thickness: Union[float, int] = 1,\n substrate_thickness: Union[float, int] = 1,\n in_layers: bool = True,\n ) -> Iterator[Interface]:\n \"\"\"\n Generates interface structures given the film and substrate structure\n as well as the desired terminations\n\n\n Args:\n terminations: termination from self.termination list\n gap: gap between film and substrate\n vacuum_over_film: vacuum over the top of the film\n film_thickness: the film thickness\n substrate_thickness: substrate thickness\n in_layers: set the thickness in layer units\n \"\"\"\n film_sg = SlabGenerator(\n self.film_structure,\n self.film_miller,\n min_slab_size=film_thickness,\n min_vacuum_size=3,\n in_unit_planes=in_layers,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n sub_sg = SlabGenerator(\n self.substrate_structure,\n self.substrate_miller,\n min_slab_size=substrate_thickness,\n min_vacuum_size=3,\n in_unit_planes=in_layers,\n center_slab=True,\n primitive=True,\n reorient_lattice=False, # This is necessary to not screw up the lattice\n )\n\n film_shift, sub_shift = self._terminations[termination]\n\n film_slab = film_sg.get_slab(shift=film_shift)\n sub_slab = sub_sg.get_slab(shift=sub_shift)\n\n for match in self.zsl_matches:\n # Build film superlattice\n super_film_transform = np.round(\n from_2d_to_3d(get_2d_transform(film_slab.lattice.matrix[:2], match.film_sl_vectors))\n ).astype(int)\n film_sl_slab = film_slab.copy()\n film_sl_slab.make_supercell(super_film_transform)\n assert np.allclose(\n film_sl_slab.lattice.matrix[2], film_slab.lattice.matrix[2]\n ), \"2D transformation affected C-axis for Film transformation\"\n assert np.allclose(\n film_sl_slab.lattice.matrix[:2], match.film_sl_vectors\n ), \"Transformation didn't make proper supercell for film\"\n\n # Build substrate superlattice\n super_sub_transform = np.round(\n from_2d_to_3d(get_2d_transform(sub_slab.lattice.matrix[:2], match.substrate_sl_vectors))\n ).astype(int)\n sub_sl_slab = sub_slab.copy()\n sub_sl_slab.make_supercell(super_sub_transform)\n assert np.allclose(\n sub_sl_slab.lattice.matrix[2], sub_slab.lattice.matrix[2]\n ), \"2D transformation affected C-axis for Film transformation\"\n assert np.allclose(\n sub_sl_slab.lattice.matrix[:2], match.substrate_sl_vectors\n ), \"Transformation didn't make proper supercell for substrate\"\n\n # Add extra info\n match_dict = match.as_dict()\n interface_properties = {k: match_dict[k] for k in match_dict.keys() if not k.startswith(\"@\")}\n\n dfm = Deformation(match.match_transformation)\n\n strain = dfm.green_lagrange_strain\n interface_properties[\"strain\"] = strain\n interface_properties[\"von_mises_strain\"] = strain.von_mises_strain\n interface_properties[\"termination\"] = termination\n interface_properties[\"film_thickness\"] = film_thickness\n interface_properties[\"substrate_thickness\"] = substrate_thickness\n\n yield (\n Interface.from_slabs(\n substrate_slab=sub_sl_slab,\n film_slab=film_sl_slab,\n gap=gap,\n vacuum_over_film=vacuum_over_film,\n interface_properties=interface_properties,\n )\n )\n\n\ndef get_rot_3d_for_2d(film_matrix, sub_matrix) -> np.ndarray:\n \"\"\"\n Finds a trasnformation matrix that will rotate and strain the film to the subtrate while preserving the c-axis\n \"\"\"\n film_matrix = np.array(film_matrix)\n film_matrix = film_matrix.tolist()[:2]\n film_matrix.append(np.cross(film_matrix[0], film_matrix[1]))\n\n # Generate 3D lattice vectors for substrate super lattice\n # Out of plane substrate super lattice has to be same length as\n # Film out of plane vector to ensure no extra deformation in that\n # direction\n sub_matrix = np.array(sub_matrix)\n sub_matrix = sub_matrix.tolist()[:2]\n temp_sub = np.cross(sub_matrix[0], sub_matrix[1])\n temp_sub = temp_sub / fast_norm(temp_sub)\n temp_sub = temp_sub * fast_norm(film_matrix[2])\n sub_matrix.append(temp_sub)\n\n transform_matrix = np.transpose(np.linalg.solve(film_matrix, sub_matrix))\n\n rot, _ = polar(transform_matrix)\n\n return rot\n\n\ndef get_2d_transform(start: np.ndarray, end: np.ndarray) -> np.ndarray:\n \"\"\"\n Gets a 2d transformation matrix\n that converts start to end\n \"\"\"\n return np.dot(end, np.linalg.pinv(start))\n\n\ndef from_2d_to_3d(mat: np.ndarray) -> np.ndarray:\n \"\"\"Converts a 2D matrix to a 3D matrix\"\"\"\n new_mat = np.diag([1.0, 1.0, 1.0])\n new_mat[:2, :2] = mat\n return new_mat\n"
] | [
[
"numpy.allclose",
"numpy.linalg.solve",
"numpy.diag",
"numpy.cross",
"numpy.array",
"numpy.linalg.pinv",
"numpy.round",
"scipy.linalg.polar"
]
] |
venom12138/active_tracking_rl | [
"813342c322f8f710fc0f9ccf2a5d0746f955144f"
] | [
"envs/gym-track2d/gym_track2d/envs/track_1v1.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nfrom gym_track2d.envs.generators import RandomMazeGenerator, RandomBlockMazeGenerator\nfrom gym_track2d.envs.navigator import Navigator, RamAgent\n\n\nclass Track1v1Env(gym.Env):\n metadata = {'render.modes': ['human', 'rgb_array']}\n\n def __init__(self,\n map_type='Block',\n pob_size=6,\n action_type='VonNeumann',\n obs_type='Partial',\n target_mode='PZR',\n live_display=True,\n render_trace=True,\n level=0):\n \"\"\"Initialize the maze. DType: list\"\"\"\n # Random seed with internal gym seeding\n self.seed()\n self.num_agents_max = self.num_agents = 2\n self.map_type = map_type\n self.level = level\n # Size of the partial observable window\n self.pob_size = pob_size\n self.render_trace = render_trace\n self.traces = []\n self.traces_relative = []\n self.action_type = action_type\n self.obs_type = obs_type\n self.target_mode = target_mode\n\n # If True, show the updated display each time render is called rather\n # than storing the frames and creating an animation at the end\n self.live_display = live_display\n\n self.state = None\n\n # Maze: 0: free space, 1: wall\n self.init_maze(self.map_type)\n\n # Action space\n tracker_action_space = self.define_action(self.action_type)\n target_action_space = self.define_action(self.action_type)\n self.action_space = [tracker_action_space, target_action_space]\n\n # Observation space\n tracker_obs_space = self.define_observation(self.obs_type)\n target_obs_space = self.define_observation(self.obs_type)\n self.observation_space = [tracker_obs_space, target_obs_space]\n\n # nav\n self.Target = []\n for i in range(self.num_agents-1):\n if 'Nav' in self.target_mode or 'RPF' in self.target_mode:\n self.Target.append(Navigator(self.action_space[i+1], self.maze_generator))\n if 'Ram' in self.target_mode:\n self.Target.append(RamAgent(self.action_space[i+1]))\n\n # Colormap: order of color is, free space, wall, agent, food, poison\n self.cmap = colors.ListedColormap(['white', 'black', 'blue', 'green', 'red', 'gray', 'yellow'])\n self.bounds = [0, 1, 2, 3, 4, 5, 6] # values for each color\n self.norm = colors.BoundaryNorm(self.bounds, self.cmap.N)\n self.C_step = 0\n\n def step(self, action):\n # Player 0: try to catch player 1\n # Player 1: try to reach the goal and avoid player 0\n old_state = self.state.copy()\n # Update current state\n rewards = np.zeros(self.num_agents)\n done = False\n action = list(action)\n # move agents\n for i in range(self.num_agents - 1):\n if 'Ram' in self.target_mode:\n action[i+1] = self.Target[i].step()\n if 'Nav' in self.target_mode or 'RPF' in self.target_mode:\n action[i+1], goal = self.Target[i].step(old_state[i + 1], self.maze_generator, None)\n\n for i in range(self.num_agents):\n self.state[i], self.C_collision[i] = self._next_state(self.state[i], int(action[i]),\n self.action_type)\n\n self.traces_relative = []\n for j in range(self.num_agents):\n self.traces_relative.append([np.array(self.init_states[i]) - np.array(self.init_states[j]) for i in\n range(self.num_agents)])\n d_all = np.array([np.linalg.norm(np.array(self.state[i]) - np.array(self.state[0])) for i in range(self.num_agents)])\n\n max_distance = float(self.pob_size)\n distance = d_all[1]\n\n r_track = 1 - 2*distance/max_distance\n r_track = max(r_track, -1) # [-1, 1]\n r_target = -r_track - self.w_p * max(distance - max_distance, 0)/max_distance\n r_target = max(r_target, -1)\n rewards[0] = r_track\n rewards[1] = r_target\n\n if distance <= max_distance:\n self.C_far = 0\n else:\n self.C_far += 1\n if self.C_far > 10:\n done = True\n\n self.C_reward += rewards\n self.C_step += 1\n\n # Additional info\n info = {}\n self.distance = info['distance'] = d_all[1]\n # Footprint: Record agent trajectory\n self.traces.append(self.state[1])\n obs = self._get_obs()\n info['traces'] = self.traces\n info['traces_relative'] = self.traces_relative\n if 'Nav' in self.target_mode or 'Ram' in self.target_mode:\n # 相当于只取前两个,也就是两个的obs\n obs = obs[:2]\n rewards = rewards[:2]\n # print(\"num_agents:{}\".format(self.num_agents))\n # print(\"obs:{}\".format(np.array(obs).shape))\n # print('rewards:{}'.format(rewards))\n # print(obs.shape)\n return obs, rewards, done, info\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n\n return [seed]\n\n def reset(self):\n # Reset maze\n self.init_maze(self.map_type)\n self.state = self.init_states\n # set target\n if 'Nav' in self.target_mode or 'RPF' in self.target_mode:\n for i in range(self.num_agents-1):\n self.Target[i].reset(self.init_states[i+1], self.goal_states[i+1], self.maze_generator)\n if 'Ram' in self.target_mode:\n for i in range(self.num_agents-1):\n self.Target[i].reset()\n\n # set target reward\n if self.target_mode == 'PZR':\n self.w_p = 1\n elif self.target_mode == 'Far':\n self.w_p = -0.5\n else:\n self.w_p = 0\n\n self.distance = np.sum(np.abs(np.array(self.state[0]) - np.array(self.state[1])))\n self.C_reward = np.zeros(self.num_agents)\n self.C_step = 0\n self.C_collision = np.zeros(self.num_agents)\n self.C_far = 0\n\n # Clean the list of ax_imgs, the buffer for generating videos\n self.ax_imgs = []\n # Clean the traces of the trajectory\n self.traces = [self.init_states[0]]\n self.traces_relative = [np.array(self.init_states[i]) - np.array(self.init_states[0]) for i in range(self.num_agents)]\n obs = self._get_obs()\n if 'Nav' in self.target_mode or 'Ram' in self.target_mode or 'RPF' in self.target_mode:\n obs = obs[:2]\n return obs\n\n def render(self, mode='human', close=False):\n import time\n time.sleep(0.03)\n if close:\n plt.close()\n return\n\n obs = self._get_full_obs()\n partial_obs = self._get_partial_obs(0, self.pob_size)\n\n # For rendering traces: Only for visualization, does not affect the observation data\n if self.render_trace:\n obs[list(zip(*self.traces[:-1]))] = 6\n\n # Create Figure for rendering\n if not hasattr(self, 'fig'): # initialize figure and plotting axes\n self.fig, (self.ax_full, self.ax_partial) = plt.subplots(nrows=1, ncols=2)\n self.ax_full.axis('off')\n self.ax_partial.axis('off')\n\n self.fig.show()\n if self.live_display:\n # Only create the image the first time\n if not hasattr(self, 'ax_full_img'):\n self.ax_full_img = self.ax_full.imshow(obs, cmap=self.cmap, norm=self.norm, animated=True)\n if not hasattr(self, 'ax_partial_img'):\n self.ax_partial_img = self.ax_partial.imshow(partial_obs, cmap=self.cmap, norm=self.norm, animated=True)\n # Update the image data for efficient live video\n self.ax_full_img.set_data(obs)\n self.ax_partial_img.set_data(partial_obs)\n else:\n # Create a new image each time to allow an animation to be created\n self.ax_full_img = self.ax_full.imshow(obs, cmap=self.cmap, norm=self.norm, animated=True)\n self.ax_partial_img = self.ax_partial.imshow(partial_obs, cmap=self.cmap, norm=self.norm, animated=True)\n\n plt.draw()\n\n if self.live_display:\n # Update the figure display immediately\n self.fig.canvas.draw()\n else:\n # Put in AxesImage buffer for video generation\n self.ax_imgs.append([self.ax_full_img, self.ax_partial_img]) # List of axes to update figure frame\n\n self.fig.set_dpi(200)\n\n return self.fig\n\n def init_maze(self, map_type):\n if map_type == 'Maze':\n if self.level > 0:\n r = self.level * 0.02\n else:\n r = .03*np.random.random()\n self.maze_generator = RandomMazeGenerator(width=80, height=80, complexity=r, density=r)\n elif map_type == 'Block':\n if self.level > 0:\n r = self.level * 0.05\n else:\n r = 0.15*np.random.random()\n self.maze_generator = RandomBlockMazeGenerator(maze_size=80, obstacle_ratio=r)\n elif map_type == 'Empty':\n self.maze_generator = RandomBlockMazeGenerator(maze_size=80, obstacle_ratio=0)\n self.maze = np.array(self.maze_generator.get_maze())\n self.maze_size = self.maze.shape\n if 'RPF' in self.target_mode:\n self.maze_generator.static_goals()\n self.goal_states = self.maze_generator.sample_goal(self.num_agents)\n self.init_states = self.maze_generator.sample_close_states(self.num_agents, 1)\n while self.goal_test(self.init_states[0]): # Goal check\n self.goal_states = self.maze_generator.sample_goal(self.num_agents)\n\n def define_action(self, action_type):\n if action_type == 'VonNeumann': # Von Neumann neighborhood\n num_actions = 4\n elif action_type == 'Moore': # Moore neighborhood\n num_actions = 8\n else:\n raise TypeError('Action type must be either \\'VonNeumann\\' or \\'Moore\\'')\n return spaces.Discrete(num_actions)\n\n def define_observation(self, obs_type):\n low_obs = 0 # Lowest integer in observation\n high_obs = 6 # Highest integer in observation\n if obs_type == 'Full':\n obs_space = spaces.Box(low=low_obs, high=high_obs,\n shape=(1, self.maze_size[0], self.maze_size[1]), dtype=np.float32)\n elif self.obs_type == 'Partial':\n obs_space = spaces.Box(low=low_obs, high=high_obs,\n shape=(1, self.pob_size*2+1, self.pob_size*2+1), dtype=np.float32)\n else:\n raise TypeError('Observation type must be either \\'full\\' or \\'partial\\'')\n return obs_space\n\n def goal_test(self, state):\n \"\"\"Return True if current state is a goal state.\"\"\"\n if type(self.goal_states[0]) == list:\n return list(state) in self.goal_states\n elif type(self.goal_states[0]) == tuple:\n return tuple(state) in self.goal_states\n\n def _next_state(self, state, action, action_type='VonNeumann'):\n \"\"\"Return the next state from a given state by taking a given action.\"\"\"\n\n # Transition table to define movement for each action\n if action_type == 'VonNeumann':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1]}\n elif action_type == 'Moore':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1],\n 4: [-1, +1], 5: [+1, +1], 6: [-1, -1], 7: [+1, -1]}\n\n new_state = [state[0] + transitions[action][0], state[1] + transitions[action][1]]\n if self.maze[new_state[0]][new_state[1]] == 1: # Hit wall, stay there\n return state, True\n else: # Valid move for 0, 2, 3, 4\n return new_state, False\n\n def _get_obs(self):\n if self.obs_type == 'Full':\n obs = [np.expand_dims(self._get_full_obs(), 0) for i in range(self.num_agents)]\n return np.array(obs)\n elif self.obs_type == 'Partial':\n obs = [np.expand_dims(self._get_partial_obs(i, self.pob_size), 0) for i in range(self.num_agents)]\n return np.array(obs)\n\n def _get_full_obs(self):\n \"\"\"Return a 2D array representation of maze.\"\"\"\n obs = np.array(self.maze)\n\n # Set current position\n for i in range(self.num_agents):\n if i < 2:\n color = 2+2*i\n else:\n color = 2 + np.random.randint(0, 3)\n obs[self.state[i][0]][self.state[i][1]] = color\n\n return obs\n\n def _get_partial_obs(self, id=0, size=1, vec=False):\n \"\"\"Get partial observable window according to Moore neighborhood\"\"\"\n # Get maze with indicated location of current position and goal positions\n maze = self._get_full_obs()\n maze[self.state[id][0]][self.state[id][1]] = 2+2*id\n pos = np.array(self.state[id])\n\n under_offset = np.min(pos - size)\n over_offset = np.min(len(maze) - (pos + size + 1))\n offset = np.min([under_offset, over_offset])\n\n if offset < 0: # Need padding\n maze = np.pad(maze, np.abs(offset), 'constant', constant_values=1)\n pos += np.abs(offset)\n maze_p = maze[pos[0] - size: pos[0] + size + 1, pos[1] - size: pos[1] + size + 1]\n if vec:\n maze_p = maze_p.reshape(self.v_len)\n return maze_p"
] | [
[
"matplotlib.pyplot.draw",
"matplotlib.colors.BoundaryNorm",
"numpy.zeros",
"matplotlib.colors.ListedColormap",
"numpy.abs",
"matplotlib.pyplot.subplots",
"numpy.random.random",
"numpy.min",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.random.randint"
]
] |
djroxx2000/transformers | [
"77770ec79883343d32051cfb6a04f64523cd8df1",
"76cadb7943c8492ec481f4f3925e9e8793a32c9d"
] | [
"src/transformers/models/roberta/modeling_roberta.py",
"src/transformers/models/deberta/modeling_deberta.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch RoBERTa model. \"\"\"\n\nimport math\n\nimport torch\nimport torch.utils.checkpoint\nfrom packaging import version\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN, gelu\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_roberta import RobertaConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"roberta-base\"\n_CONFIG_FOR_DOC = \"RobertaConfig\"\n_TOKENIZER_FOR_DOC = \"RobertaTokenizer\"\n\nROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"roberta-base\",\n \"roberta-large\",\n \"roberta-large-mnli\",\n \"distilroberta-base\",\n \"roberta-base-openai-detector\",\n \"roberta-large-openai-detector\",\n # See all RoBERTa models at https://huggingface.co/models?filter=roberta\n]\n\n\nclass RobertaEmbeddings(nn.Module):\n \"\"\"\n Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.\n \"\"\"\n\n # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n if version.parse(torch.__version__) > version.parse(\"1.6.0\"):\n self.register_buffer(\n \"token_type_ids\",\n torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),\n persistent=False,\n )\n\n # End copy\n self.padding_idx = config.pad_token_id\n self.position_embeddings = nn.Embedding(\n config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx\n )\n\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if position_ids is None:\n if input_ids is not None:\n # Create the position ids from the input token ids. Any padded tokens remain padded.\n position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)\n else:\n position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)\n\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs\n # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves\n # issue #5664\n if token_type_ids is None:\n if hasattr(self, \"token_type_ids\"):\n buffered_token_type_ids = self.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n \"\"\"\n We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\n Args:\n inputs_embeds: torch.Tensor\n\n Returns: torch.Tensor\n \"\"\"\n input_shape = inputs_embeds.size()[:-1]\n sequence_length = input_shape[1]\n\n position_ids = torch.arange(\n self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device\n )\n return position_ids.unsqueeze(0).expand(input_shape)\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta\nclass RobertaSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n seq_length = hidden_states.size()[1]\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput\nclass RobertaSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta\nclass RobertaAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = RobertaSelfAttention(config)\n self.output = RobertaSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate\nclass RobertaIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput\nclass RobertaOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta\nclass RobertaLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = RobertaAttention(config)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n self.crossattention = RobertaAttention(config)\n self.intermediate = RobertaIntermediate(config)\n self.output = RobertaOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n past_key_value=self_attn_past_key_value,\n )\n attention_output = self_attention_outputs[0]\n\n # if decoder, the last output is tuple of self-attn cache\n if self.is_decoder:\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n else:\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n cross_attn_present_key_value = None\n if self.is_decoder and encoder_hidden_states is not None:\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n\n # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n cross_attn_past_key_value,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n\n # add cross-attn cache to positions 3,4 of present_key_value tuple\n cross_attn_present_key_value = cross_attention_outputs[-1]\n present_key_value = present_key_value + cross_attn_present_key_value\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n\n # if decoder, return the attn key/values as the last output\n if self.is_decoder:\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta\nclass RobertaEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n if self.gradient_checkpointing and self.training:\n\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, past_key_value, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_decoder_cache,\n all_hidden_states,\n all_self_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler\nclass RobertaPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass RobertaPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = RobertaConfig\n base_model_prefix = \"roberta\"\n supports_gradient_checkpointing = True\n\n # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, RobertaEncoder):\n module.gradient_checkpointing = value\n\n def update_keys_to_ignore(self, config, del_keys_to_ignore):\n \"\"\"Remove some keys from ignore list\"\"\"\n if not config.tie_word_embeddings:\n # must make a new list, or the class variable gets modified!\n self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]\n self._keys_to_ignore_on_load_missing = [\n k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore\n ]\n\n\nROBERTA_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nROBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaModel(RobertaPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz\n Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n\n .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762\n\n \"\"\"\n\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = RobertaEmbeddings(config)\n self.encoder = RobertaEncoder(config)\n\n self.pooler = RobertaPooler(config) if add_pooling_layer else None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n # Copied from transformers.models.bert.modeling_bert.BertModel.forward\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n batch_size, seq_length = input_shape\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n if token_type_ids is None:\n if hasattr(self.embeddings, \"token_type_ids\"):\n buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. \"\"\", ROBERTA_START_DOCSTRING\n)\nclass RobertaForCausalLM(RobertaPreTrainedModel):\n _keys_to_ignore_on_save = [r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`\")\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n # The LM head weights require special treatment only when they are tied with the word embeddings\n self.update_keys_to_ignore(config, [\"lm_head.decoder.weight\"])\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n Returns:\n\n Example::\n\n >>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig\n >>> import torch\n\n >>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n >>> config = RobertaConfig.from_pretrained(\"roberta-base\")\n >>> config.is_decoder = True\n >>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"past_key_values\": past}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past\n\n\n@add_start_docstrings(\"\"\"RoBERTa Model with a `language modeling` head on top. \"\"\", ROBERTA_START_DOCSTRING)\nclass RobertaForMaskedLM(RobertaPreTrainedModel):\n _keys_to_ignore_on_save = [r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n # The LM head weights require special treatment only when they are tied with the word embeddings\n self.update_keys_to_ignore(config, [\"lm_head.decoder.weight\"])\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n mask=\"<mask>\",\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaLMHead(nn.Module):\n \"\"\"Roberta Head for masked language modeling.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n self.decoder.bias = self.bias\n\n def forward(self, features, **kwargs):\n x = self.dense(features)\n x = gelu(x)\n x = self.layer_norm(x)\n\n # project back to size of vocabulary with bias\n x = self.decoder(x)\n\n return x\n\n def _tie_weights(self):\n # To tie those two weights if they get disconnected (on TPU or when the bias is resized)\n self.bias = self.decoder.bias\n\n\n@add_start_docstrings(\n \"\"\"\n RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForSequenceClassification(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.classifier = RobertaClassificationHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForMultipleChoice(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.roberta = RobertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n token_type_ids=None,\n attention_mask=None,\n labels=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n flat_inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.roberta(\n flat_input_ids,\n position_ids=flat_position_ids,\n token_type_ids=flat_token_type_ids,\n attention_mask=flat_attention_mask,\n head_mask=head_mask,\n inputs_embeds=flat_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForTokenClassification(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForQuestionAnswering(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\ndef create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n \"\"\"\n Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\n are ignored. This is modified from fairseq's `utils.make_positions`.\n\n Args:\n x: torch.Tensor x:\n\n Returns: torch.Tensor\n \"\"\"\n # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.\n mask = input_ids.ne(padding_idx).int()\n incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask\n return incremental_indices.long() + padding_idx\n",
"# coding=utf-8\n# Copyright 2020 Microsoft and the Hugging Face Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch DeBERTa model. \"\"\"\n\nimport math\nfrom collections.abc import Sequence\n\nimport torch\nfrom torch import _softmax_backward_data, nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward\nfrom ...modeling_outputs import (\n BaseModelOutput,\n MaskedLMOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom .configuration_deberta import DebertaConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"DebertaConfig\"\n_TOKENIZER_FOR_DOC = \"DebertaTokenizer\"\n_CHECKPOINT_FOR_DOC = \"microsoft/deberta-base\"\n\nDEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/deberta-base\",\n \"microsoft/deberta-large\",\n \"microsoft/deberta-xlarge\",\n \"microsoft/deberta-base-mnli\",\n \"microsoft/deberta-large-mnli\",\n \"microsoft/deberta-xlarge-mnli\",\n]\n\n\nclass ContextPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)\n self.dropout = StableDropout(config.pooler_dropout)\n self.config = config\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n\n context_token = hidden_states[:, 0]\n context_token = self.dropout(context_token)\n pooled_output = self.dense(context_token)\n pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)\n return pooled_output\n\n @property\n def output_dim(self):\n return self.config.hidden_size\n\n\nclass XSoftmax(torch.autograd.Function):\n \"\"\"\n Masked Softmax which is optimized for saving memory\n\n Args:\n input (:obj:`torch.tensor`): The input tensor that will apply softmax.\n mask (:obj:`torch.IntTensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.\n dim (int): The dimension that will apply softmax\n\n Example::\n\n >>> import torch\n >>> from transformers.models.deberta.modeling_deberta import XSoftmax\n\n >>> # Make a tensor\n >>> x = torch.randn([4,20,100])\n\n >>> # Create a mask\n >>> mask = (x>0).int()\n\n >>> y = XSoftmax.apply(x, mask, dim=-1)\n \"\"\"\n\n @staticmethod\n def forward(self, input, mask, dim):\n self.dim = dim\n rmask = ~(mask.bool())\n\n output = input.masked_fill(rmask, float(\"-inf\"))\n output = torch.softmax(output, self.dim)\n output.masked_fill_(rmask, 0)\n self.save_for_backward(output)\n return output\n\n @staticmethod\n def backward(self, grad_output):\n (output,) = self.saved_tensors\n inputGrad = _softmax_backward_data(grad_output, output, self.dim, output)\n return inputGrad, None, None\n\n\nclass DropoutContext(object):\n def __init__(self):\n self.dropout = 0\n self.mask = None\n self.scale = 1\n self.reuse_mask = True\n\n\ndef get_mask(input, local_context):\n if not isinstance(local_context, DropoutContext):\n dropout = local_context\n mask = None\n else:\n dropout = local_context.dropout\n dropout *= local_context.scale\n mask = local_context.mask if local_context.reuse_mask else None\n\n if dropout > 0 and mask is None:\n mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool()\n\n if isinstance(local_context, DropoutContext):\n if local_context.mask is None:\n local_context.mask = mask\n\n return mask, dropout\n\n\nclass XDropout(torch.autograd.Function):\n \"\"\"Optimized dropout function to save computation and memory by using mask operation instead of multiplication.\"\"\"\n\n @staticmethod\n def forward(ctx, input, local_ctx):\n mask, dropout = get_mask(input, local_ctx)\n ctx.scale = 1.0 / (1 - dropout)\n if dropout > 0:\n ctx.save_for_backward(mask)\n return input.masked_fill(mask, 0) * ctx.scale\n else:\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.scale > 1:\n (mask,) = ctx.saved_tensors\n return grad_output.masked_fill(mask, 0) * ctx.scale, None\n else:\n return grad_output, None\n\n\nclass StableDropout(nn.Module):\n \"\"\"\n Optimized dropout module for stabilizing the training\n\n Args:\n drop_prob (float): the dropout probabilities\n \"\"\"\n\n def __init__(self, drop_prob):\n super().__init__()\n self.drop_prob = drop_prob\n self.count = 0\n self.context_stack = None\n\n def forward(self, x):\n \"\"\"\n Call the module\n\n Args:\n x (:obj:`torch.tensor`): The input tensor to apply dropout\n \"\"\"\n if self.training and self.drop_prob > 0:\n return XDropout.apply(x, self.get_context())\n return x\n\n def clear_context(self):\n self.count = 0\n self.context_stack = None\n\n def init_context(self, reuse_mask=True, scale=1):\n if self.context_stack is None:\n self.context_stack = []\n self.count = 0\n for c in self.context_stack:\n c.reuse_mask = reuse_mask\n c.scale = scale\n\n def get_context(self):\n if self.context_stack is not None:\n if self.count >= len(self.context_stack):\n self.context_stack.append(DropoutContext())\n ctx = self.context_stack[self.count]\n ctx.dropout = self.drop_prob\n self.count += 1\n return ctx\n else:\n return self.drop_prob\n\n\nclass DebertaLayerNorm(nn.Module):\n \"\"\"LayerNorm module in the TF style (epsilon inside the square root).\"\"\"\n\n def __init__(self, size, eps=1e-12):\n super().__init__()\n self.weight = nn.Parameter(torch.ones(size))\n self.bias = nn.Parameter(torch.zeros(size))\n self.variance_epsilon = eps\n\n def forward(self, hidden_states):\n input_type = hidden_states.dtype\n hidden_states = hidden_states.float()\n mean = hidden_states.mean(-1, keepdim=True)\n variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)\n hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon)\n hidden_states = hidden_states.to(input_type)\n y = self.weight * hidden_states + self.bias\n return y\n\n\nclass DebertaSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass DebertaAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = DisentangledSelfAttention(config)\n self.output = DebertaSelfOutput(config)\n self.config = config\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n self_output = self.self(\n hidden_states,\n attention_mask,\n return_att,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if return_att:\n self_output, att_matrix = self_output\n if query_states is None:\n query_states = hidden_states\n attention_output = self.output(self_output, query_states)\n\n if return_att:\n return (attention_output, att_matrix)\n else:\n return attention_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta\nclass DebertaIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass DebertaOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass DebertaLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = DebertaAttention(config)\n self.intermediate = DebertaIntermediate(config)\n self.output = DebertaOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n attention_output = self.attention(\n hidden_states,\n attention_mask,\n return_att=return_att,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if return_att:\n attention_output, att_matrix = attention_output\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n if return_att:\n return (layer_output, att_matrix)\n else:\n return layer_output\n\n\nclass DebertaEncoder(nn.Module):\n \"\"\"Modified BertEncoder with relative position bias support\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)])\n self.relative_attention = getattr(config, \"relative_attention\", False)\n if self.relative_attention:\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size)\n\n def get_rel_embedding(self):\n rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None\n return rel_embeddings\n\n def get_attention_mask(self, attention_mask):\n if attention_mask.dim() <= 2:\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)\n attention_mask = attention_mask.byte()\n elif attention_mask.dim() == 3:\n attention_mask = attention_mask.unsqueeze(1)\n\n return attention_mask\n\n def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):\n if self.relative_attention and relative_pos is None:\n q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)\n relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device)\n return relative_pos\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n output_hidden_states=True,\n output_attentions=False,\n query_states=None,\n relative_pos=None,\n return_dict=True,\n ):\n attention_mask = self.get_attention_mask(attention_mask)\n relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)\n\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[0]\n else:\n next_kv = hidden_states\n rel_embeddings = self.get_rel_embedding()\n for i, layer_module in enumerate(self.layer):\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n hidden_states = layer_module(\n next_kv,\n attention_mask,\n output_attentions,\n query_states=query_states,\n relative_pos=relative_pos,\n rel_embeddings=rel_embeddings,\n )\n if output_attentions:\n hidden_states, att_m = hidden_states\n\n if query_states is not None:\n query_states = hidden_states\n if isinstance(hidden_states, Sequence):\n next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None\n else:\n next_kv = hidden_states\n\n if output_attentions:\n all_attentions = all_attentions + (att_m,)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\n )\n\n\ndef build_relative_position(query_size, key_size, device):\n \"\"\"\n Build relative position according to the query and key\n\n We assume the absolute position of query :math:`P_q` is range from (0, query_size) and the absolute position of key\n :math:`P_k` is range from (0, key_size), The relative positions from query to key is :math:`R_{q \\\\rightarrow k} =\n P_q - P_k`\n\n Args:\n query_size (int): the length of query\n key_size (int): the length of key\n\n Return:\n :obj:`torch.LongTensor`: A tensor with shape [1, query_size, key_size]\n\n \"\"\"\n\n q_ids = torch.arange(query_size, dtype=torch.long, device=device)\n k_ids = torch.arange(key_size, dtype=torch.long, device=device)\n rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1)\n rel_pos_ids = rel_pos_ids[:query_size, :]\n rel_pos_ids = rel_pos_ids.unsqueeze(0)\n return rel_pos_ids\n\n\[email protected]\ndef c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):\n return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])\n\n\[email protected]\ndef p2c_dynamic_expand(c2p_pos, query_layer, key_layer):\n return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])\n\n\[email protected]\ndef pos_dynamic_expand(pos_index, p2c_att, key_layer):\n return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))\n\n\nclass DisentangledSelfAttention(nn.Module):\n \"\"\"\n Disentangled self-attention module\n\n Parameters:\n config (:obj:`str`):\n A model config class instance with the configuration to build a new model. The schema is similar to\n `BertConfig`, for more details, please refer :class:`~transformers.DebertaConfig`\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False)\n self.q_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))\n self.v_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))\n self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []\n\n self.relative_attention = getattr(config, \"relative_attention\", False)\n self.talking_head = getattr(config, \"talking_head\", False)\n\n if self.talking_head:\n self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)\n self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)\n\n if self.relative_attention:\n self.max_relative_positions = getattr(config, \"max_relative_positions\", -1)\n if self.max_relative_positions < 1:\n self.max_relative_positions = config.max_position_embeddings\n self.pos_dropout = StableDropout(config.hidden_dropout_prob)\n\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False)\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = StableDropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=None,\n relative_pos=None,\n rel_embeddings=None,\n ):\n \"\"\"\n Call the module\n\n Args:\n hidden_states (:obj:`torch.FloatTensor`):\n Input states to the module usually the output from previous layer, it will be the Q,K and V in\n `Attention(Q,K,V)`\n\n attention_mask (:obj:`torch.ByteTensor`):\n An attention mask matrix of shape [`B`, `N`, `N`] where `B` is the batch size, `N` is the maximum\n sequence length in which element [i,j] = `1` means the `i` th token in the input can attend to the `j`\n th token.\n\n return_att (:obj:`bool`, optional):\n Whether return the attention matrix.\n\n query_states (:obj:`torch.FloatTensor`, optional):\n The `Q` state in `Attention(Q,K,V)`.\n\n relative_pos (:obj:`torch.LongTensor`):\n The relative position encoding between the tokens in the sequence. It's of shape [`B`, `N`, `N`] with\n values ranging in [`-max_relative_positions`, `max_relative_positions`].\n\n rel_embeddings (:obj:`torch.FloatTensor`):\n The embedding of relative distances. It's a tensor of shape [:math:`2 \\\\times\n \\\\text{max_relative_positions}`, `hidden_size`].\n\n\n \"\"\"\n if query_states is None:\n qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)\n query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1)\n else:\n\n def linear(w, b, x):\n if b is not None:\n return torch.matmul(x, w.t()) + b.t()\n else:\n return torch.matmul(x, w.t()) # + b.t()\n\n ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0)\n qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)]\n qkvb = [None] * 3\n\n q = linear(qkvw[0], qkvb[0], query_states)\n k, v = [linear(qkvw[i], qkvb[i], hidden_states) for i in range(1, 3)]\n query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]]\n\n query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])\n value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])\n\n rel_att = None\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n scale_factor = 1 + len(self.pos_att_type)\n scale = math.sqrt(query_layer.size(-1) * scale_factor)\n query_layer = query_layer / scale\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n if self.relative_attention:\n rel_embeddings = self.pos_dropout(rel_embeddings)\n rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)\n\n if rel_att is not None:\n attention_scores = attention_scores + rel_att\n\n # bxhxlxd\n if self.talking_head:\n attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\n\n attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)\n attention_probs = self.dropout(attention_probs)\n if self.talking_head:\n attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\n\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (-1,)\n context_layer = context_layer.view(*new_context_layer_shape)\n if return_att:\n return (context_layer, attention_probs)\n else:\n return context_layer\n\n def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):\n if relative_pos is None:\n q = query_layer.size(-2)\n relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device)\n if relative_pos.dim() == 2:\n relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)\n elif relative_pos.dim() == 3:\n relative_pos = relative_pos.unsqueeze(1)\n # bxhxqxk\n elif relative_pos.dim() != 4:\n raise ValueError(f\"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}\")\n\n att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions)\n relative_pos = relative_pos.long().to(query_layer.device)\n rel_embeddings = rel_embeddings[\n self.max_relative_positions - att_span : self.max_relative_positions + att_span, :\n ].unsqueeze(0)\n if \"c2p\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_key_layer = self.pos_proj(rel_embeddings)\n pos_key_layer = self.transpose_for_scores(pos_key_layer)\n\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_query_layer = self.pos_q_proj(rel_embeddings)\n pos_query_layer = self.transpose_for_scores(pos_query_layer)\n\n score = 0\n # content->position\n if \"c2p\" in self.pos_att_type:\n c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2))\n c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)\n c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos))\n score += c2p_att\n\n # position->content\n if \"p2c\" in self.pos_att_type or \"p2p\" in self.pos_att_type:\n pos_query_layer /= math.sqrt(pos_query_layer.size(-1) * scale_factor)\n if query_layer.size(-2) != key_layer.size(-2):\n r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device)\n else:\n r_pos = relative_pos\n p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)\n if query_layer.size(-2) != key_layer.size(-2):\n pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)\n\n if \"p2c\" in self.pos_att_type:\n p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2))\n p2c_att = torch.gather(\n p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer)\n ).transpose(-1, -2)\n if query_layer.size(-2) != key_layer.size(-2):\n p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer))\n score += p2c_att\n\n return score\n\n\nclass DebertaEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n pad_token_id = getattr(config, \"pad_token_id\", 0)\n self.embedding_size = getattr(config, \"embedding_size\", config.hidden_size)\n self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)\n\n self.position_biased_input = getattr(config, \"position_biased_input\", True)\n if not self.position_biased_input:\n self.position_embeddings = None\n else:\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)\n\n if config.type_vocab_size > 0:\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)\n\n if self.embedding_size != config.hidden_size:\n self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)\n self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)\n self.dropout = StableDropout(config.hidden_dropout_prob)\n self.config = config\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n if self.position_embeddings is not None:\n position_embeddings = self.position_embeddings(position_ids.long())\n else:\n position_embeddings = torch.zeros_like(inputs_embeds)\n\n embeddings = inputs_embeds\n if self.position_biased_input:\n embeddings += position_embeddings\n if self.config.type_vocab_size > 0:\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings += token_type_embeddings\n\n if self.embedding_size != self.config.hidden_size:\n embeddings = self.embed_proj(embeddings)\n\n embeddings = self.LayerNorm(embeddings)\n\n if mask is not None:\n if mask.dim() != embeddings.dim():\n if mask.dim() == 4:\n mask = mask.squeeze(1).squeeze(1)\n mask = mask.unsqueeze(2)\n mask = mask.to(embeddings.dtype)\n\n embeddings = embeddings * mask\n\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass DebertaPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = DebertaConfig\n base_model_prefix = \"deberta\"\n _keys_to_ignore_on_load_missing = [\"position_ids\"]\n _keys_to_ignore_on_load_unexpected = [\"position_embeddings\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n\nDEBERTA_START_DOCSTRING = r\"\"\"\n The DeBERTa model was proposed in `DeBERTa: Decoding-enhanced BERT with Disentangled Attention\n <https://arxiv.org/abs/2006.03654>`_ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build on top of\n BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two\n improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.```\n\n\n Parameters:\n config (:class:`~transformers.DebertaConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nDEBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.DebertaTokenizer`. See\n :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaModel(DebertaPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.embeddings = DebertaEmbeddings(config)\n self.encoder = DebertaEncoder(config)\n self.z_steps = 0\n self.config = config\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings.word_embeddings = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n raise NotImplementedError(\"The prune function is not implemented in DeBERTa model.\")\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n mask=attention_mask,\n inputs_embeds=inputs_embeds,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask,\n output_hidden_states=True,\n output_attentions=output_attentions,\n return_dict=return_dict,\n )\n encoded_layers = encoder_outputs[1]\n\n if self.z_steps > 1:\n hidden_states = encoded_layers[-2]\n layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]\n query_states = encoded_layers[-1]\n rel_embeddings = self.encoder.get_rel_embedding()\n attention_mask = self.encoder.get_attention_mask(attention_mask)\n rel_pos = self.encoder.get_rel_pos(embedding_output)\n for layer in layers[1:]:\n query_states = layer(\n hidden_states,\n attention_mask,\n return_att=False,\n query_states=query_states,\n relative_pos=rel_pos,\n rel_embeddings=rel_embeddings,\n )\n encoded_layers.append(query_states)\n\n sequence_output = encoded_layers[-1]\n\n if not return_dict:\n return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]\n\n return BaseModelOutput(\n last_hidden_state=sequence_output,\n hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\"\"\"DeBERTa Model with a `language modeling` head on top. \"\"\", DEBERTA_START_DOCSTRING)\nclass DebertaForMaskedLM(DebertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.deberta = DebertaModel(config)\n self.cls = DebertaOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n# copied from transformers.models.bert.BertPredictionHeadTransform with bert -> deberta\nclass DebertaPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\n# copied from transformers.models.bert.BertLMPredictionHead with bert -> deberta\nclass DebertaLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = DebertaPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\n# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta\nclass DebertaOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = DebertaLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaForSequenceClassification(DebertaPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n num_labels = getattr(config, \"num_labels\", 2)\n self.num_labels = num_labels\n\n self.deberta = DebertaModel(config)\n self.pooler = ContextPooler(config)\n output_dim = self.pooler.output_dim\n\n self.classifier = nn.Linear(output_dim, num_labels)\n drop_out = getattr(config, \"cls_dropout\", None)\n drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out\n self.dropout = StableDropout(drop_out)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.deberta.get_input_embeddings()\n\n def set_input_embeddings(self, new_embeddings):\n self.deberta.set_input_embeddings(new_embeddings)\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n encoder_layer = outputs[0]\n pooled_output = self.pooler(encoder_layer)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # regression task\n loss_fn = nn.MSELoss()\n logits = logits.view(-1).to(labels.dtype)\n loss = loss_fn(logits, labels.view(-1))\n elif labels.dim() == 1 or labels.size(-1) == 1:\n label_index = (labels >= 0).nonzero()\n labels = labels.long()\n if label_index.size(0) > 0:\n labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1)))\n labels = torch.gather(labels, 0, label_index.view(-1))\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))\n else:\n loss = torch.tensor(0).to(logits)\n else:\n log_softmax = nn.LogSoftmax(-1)\n loss = -((log_softmax(logits) * labels).sum(-1)).mean()\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n else:\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaForTokenClassification(DebertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.deberta = DebertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n DEBERTA_START_DOCSTRING,\n)\nclass DebertaForQuestionAnswering(DebertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.deberta = DebertaModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.deberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] | [
[
"torch.ones",
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.nn.Softmax",
"torch.tensor",
"torch.nn.Embedding",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.cumsum",
"torch.nn.LayerNorm",
"torch.arange",
"torch.tanh",
"torch.nn.BCEWithLogitsLoss",
"torch.zeros",
"torch.einsum",
"torch.cat",
"torch.nn.Dropout",
"torch.matmul"
],
[
"torch.ones",
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.nn.Dropout",
"torch.clamp",
"torch.empty_like",
"torch.zeros_like",
"torch.sqrt",
"torch.nn.Embedding",
"torch.nn.CrossEntropyLoss",
"torch.nn.LogSoftmax",
"torch.tensor",
"torch.arange",
"torch._softmax_backward_data",
"torch.nn.LayerNorm",
"torch.zeros",
"torch.softmax",
"torch.matmul"
]
] |
ranigb/Set-Tree | [
"fa3971f9a8ef98dbfd0f6de654efcde3006a197b"
] | [
"settree/set_rf.py"
] | [
"import numbers\nfrom warnings import catch_warnings, simplefilter, warn\nimport threading\n\nfrom abc import ABCMeta, abstractmethod\nimport numpy as np\nfrom scipy.sparse import issparse\nfrom scipy.sparse import hstack as sparse_hstack\nfrom joblib import Parallel, delayed\n\nfrom sklearn.base import ClassifierMixin, RegressorMixin, MultiOutputMixin\nfrom sklearn.metrics import r2_score\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.tree import (DecisionTreeClassifier, DecisionTreeRegressor,\n ExtraTreeClassifier, ExtraTreeRegressor)\nfrom sklearn.tree._tree import DTYPE, DOUBLE\nfrom sklearn.utils import check_random_state, check_array, compute_sample_weight\nfrom sklearn.exceptions import DataConversionWarning\nfrom sklearn.ensemble._base import BaseEnsemble, _partition_estimators\nfrom sklearn.utils.fixes import _joblib_parallel_args\nfrom sklearn.utils.multiclass import check_classification_targets\nfrom sklearn.utils.validation import check_is_fitted, _check_sample_weight\nfrom sklearn.utils.validation import _deprecate_positional_args\n\nfrom settree.set_tree import SetTree\nfrom settree.set_data import OPERATIONS\n\n__all__ = [\"SetRandomForestClassifier\",\n \"SetRandomForestRegressor\"]\n\nMAX_INT = np.iinfo(np.int32).max\n\n\ndef _get_n_samples_bootstrap(n_samples, max_samples):\n \"\"\"\n Get the number of samples in a bootstrap sample.\n Parameters\n ----------\n n_samples : int\n Number of samples in the dataset.\n max_samples : int or float\n The maximum number of samples to draw from the total available:\n - if float, this indicates a fraction of the total and should be\n the interval `(0, 1)`;\n - if int, this indicates the exact number of samples;\n - if None, this indicates the total number of samples.\n Returns\n -------\n n_samples_bootstrap : int\n The total number of samples to draw for the bootstrap sample.\n \"\"\"\n if max_samples is None:\n return n_samples\n\n if isinstance(max_samples, numbers.Integral):\n if not (1 <= max_samples <= n_samples):\n msg = \"`max_samples` must be in range 1 to {} but got value {}\"\n raise ValueError(msg.format(n_samples, max_samples))\n return max_samples\n\n if isinstance(max_samples, numbers.Real):\n if not (0 < max_samples < 1):\n msg = \"`max_samples` must be in range (0, 1) but got value {}\"\n raise ValueError(msg.format(max_samples))\n return int(round(n_samples * max_samples))\n\n msg = \"`max_samples` should be int or float, but got type '{}'\"\n raise TypeError(msg.format(type(max_samples)))\n\n\ndef _generate_sample_indices(random_state, n_samples, n_samples_bootstrap):\n \"\"\"\n Private function used to _parallel_build_trees function.\"\"\"\n\n random_instance = check_random_state(random_state)\n sample_indices = random_instance.randint(0, n_samples, n_samples_bootstrap)\n\n return sample_indices\n\n\ndef _generate_unsampled_indices(random_state, n_samples, n_samples_bootstrap):\n \"\"\"\n Private function used to forest._set_oob_score function.\"\"\"\n sample_indices = _generate_sample_indices(random_state, n_samples,\n n_samples_bootstrap)\n sample_counts = np.bincount(sample_indices, minlength=n_samples)\n unsampled_mask = sample_counts == 0\n indices_range = np.arange(n_samples)\n unsampled_indices = indices_range[unsampled_mask]\n\n return unsampled_indices\n\n\ndef _parallel_build_trees(tree, forest, X_set, y, sample_weight, tree_idx, n_trees,\n verbose=0, class_weight=None,\n n_samples_bootstrap=None):\n \"\"\"\n Private function used to fit a single tree in parallel.\"\"\"\n if verbose > 1:\n print(\"building tree %d of %d\" % (tree_idx + 1, n_trees))\n\n if forest.bootstrap:\n n_samples = X_set.shape[0]\n if sample_weight is None:\n curr_sample_weight = np.ones((n_samples,), dtype=np.float64)\n else:\n curr_sample_weight = sample_weight.copy()\n\n indices = _generate_sample_indices(tree.random_state, n_samples,\n n_samples_bootstrap)\n\n X_subset = X_set.get_subset(indices)\n y_subset = y.take(indices)\n sample_weights_subset = None if sample_weight is None else curr_sample_weight.take(indices)\n\n # todo: currently not supporting those options\n # sample_counts = np.bincount(indices, minlength=n_samples)\n # curr_sample_weight *= sample_counts\n #\n # if class_weight == 'subsample':\n # with catch_warnings():\n # simplefilter('ignore', DeprecationWarning)\n # curr_sample_weight *= compute_sample_weight('auto', y,\n # indices=indices)\n # elif class_weight == 'balanced_subsample':\n # curr_sample_weight *= compute_sample_weight('balanced', y,\n # indices=indices)\n\n tree.fit(X_subset, y_subset, sample_weights_subset)\n else:\n tree.fit(X_set, y, sample_weight)\n\n return tree\n\n\nclass BaseForest(MultiOutputMixin, BaseEnsemble, metaclass=ABCMeta):\n \"\"\"\n Base class for forests of trees.\n Warning: This class should not be used directly. Use derived classes\n instead.\n \"\"\"\n\n @abstractmethod\n def __init__(self,\n base_estimator,\n n_estimators=100, *,\n estimator_params=tuple(),\n bootstrap=False,\n oob_score=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n class_weight=None,\n max_samples=None):\n super().__init__(\n base_estimator=base_estimator,\n n_estimators=n_estimators,\n estimator_params=estimator_params)\n\n self.bootstrap = bootstrap\n self.oob_score = oob_score\n self.n_jobs = n_jobs\n self.random_state = random_state\n self.verbose = verbose\n self.warm_start = warm_start\n self.class_weight = class_weight\n self.max_samples = max_samples\n\n def apply(self, X_set):\n \"\"\"\n Apply trees in the forest to X, return leaf indices.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n X_leaves : ndarray of shape (n_samples, n_estimators)\n For each datapoint x in X and for each tree in the forest,\n return the index of the leaf x ends up in.\n \"\"\"\n #X = self._validate_X_predict(X)\n results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(prefer=\"threads\"))(\n delayed(tree.apply)(X_set)\n for tree in self.estimators_)\n\n return np.array(results).T\n\n def decision_path(self, X_set):\n # todo currently not working\n\n \"\"\"\n Return the decision path in the forest.\n .. versionadded:: 0.18\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n indicator : sparse matrix of shape (n_samples, n_nodes)\n Return a node indicator matrix where non zero elements indicates\n that the samples goes through the nodes. The matrix is of CSR\n format.\n n_nodes_ptr : ndarray of shape (n_estimators + 1,)\n The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]\n gives the indicator value for the i-th estimator.\n \"\"\"\n #X = self._validate_X_predict(X)\n indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(prefer='threads'))(\n delayed(tree.decision_path)(X_set)\n for tree in self.estimators_)\n\n n_nodes = [0]\n n_nodes.extend([i.shape[1] for i in indicators])\n n_nodes_ptr = np.array(n_nodes).cumsum()\n\n return sparse_hstack(indicators).tocsr(), n_nodes_ptr\n\n def fit(self, X_set, y, sample_weight=None):\n # Validate or convert input data\n\n if issparse(y):\n raise ValueError(\n \"sparse multilabel-indicator for y is not supported.\"\n )\n # X, y = self._validate_data(X, y, multi_output=True,\n # accept_sparse=\"csc\", dtype=DTYPE)\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X_set)\n\n # Remap output\n self.n_features_ = X_set.shape[1]\n\n y = np.atleast_1d(y)\n if y.ndim == 2 and y.shape[1] == 1:\n warn(\"A column-vector y was passed when a 1d array was\"\n \" expected. Please change the shape of y to \"\n \"(n_samples,), for example using ravel().\",\n DataConversionWarning, stacklevel=2)\n\n if y.ndim == 1:\n # reshape is necessary to preserve the data contiguity against vs\n # [:, np.newaxis] that does not.\n y = np.reshape(y, (-1, 1))\n\n self.n_outputs_ = y.shape[1]\n\n y, expanded_class_weight = self._validate_y_class_weight(y)\n\n # todo: the default was to cast y into float - keep it with it's current dtype\n #if getattr(y, \"dtype\", None) != DOUBLE or not y.flags.contiguous:\n # y = np.ascontiguousarray(y, dtype=DOUBLE)\n\n if expanded_class_weight is not None:\n if sample_weight is not None:\n sample_weight = sample_weight * expanded_class_weight\n else:\n sample_weight = expanded_class_weight\n\n # Get bootstrap sample size\n n_samples_bootstrap = _get_n_samples_bootstrap(\n n_samples=X_set.shape[0],\n max_samples=self.max_samples\n )\n\n # Check parameters\n self._validate_estimator()\n\n if not self.bootstrap and self.oob_score:\n raise ValueError(\"Out of bag estimation only available\"\n \" if bootstrap=True\")\n\n random_state = check_random_state(self.random_state)\n\n if not self.warm_start or not hasattr(self, \"estimators_\"):\n # Free allocated memory, if any\n self.estimators_ = []\n\n n_more_estimators = self.n_estimators - len(self.estimators_)\n\n if n_more_estimators < 0:\n raise ValueError('n_estimators=%d must be larger or equal to '\n 'len(estimators_)=%d when warm_start==True'\n % (self.n_estimators, len(self.estimators_)))\n\n elif n_more_estimators == 0:\n warn(\"Warm-start fitting without increasing n_estimators does not \"\n \"fit new trees.\")\n else:\n if self.warm_start and len(self.estimators_) > 0:\n # We draw from the random state to get the random state we\n # would have got if we hadn't used a warm_start.\n random_state.randint(MAX_INT, size=len(self.estimators_))\n\n trees = [self._make_estimator(append=False,\n random_state=random_state)\n for i in range(n_more_estimators)]\n\n # Parallel loop: we prefer the threading backend as the Cython code\n # for fitting the trees is internally releasing the Python GIL\n # making threading more efficient than multiprocessing in\n # that case. However, for joblib 0.12+ we respect any\n # parallel_backend contexts set at a higher level,\n # since correctness does not rely on using threads.\n trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(prefer='threads'))(\n delayed(_parallel_build_trees)(\n t, self, X_set, y, sample_weight, i, len(trees),\n verbose=self.verbose, class_weight=self.class_weight,\n n_samples_bootstrap=n_samples_bootstrap)\n for i, t in enumerate(trees))\n\n # Collect newly grown trees\n self.estimators_.extend(trees)\n\n if self.oob_score:\n self._set_oob_score(X_set, y)\n\n # Decapsulate classes_ attributes\n if hasattr(self, \"classes_\") and self.n_outputs_ == 1:\n self.n_classes_ = self.n_classes_[0]\n self.classes_ = self.classes_[0]\n\n return self\n\n @abstractmethod\n def _set_oob_score(self, X_set, y):\n \"\"\"\n Calculate out of bag predictions and score.\"\"\"\n\n def _validate_y_class_weight(self, y):\n # Default implementation\n return y, None\n\n def _validate_X_predict(self, X):\n \"\"\"\n Validate X whenever one tries to predict, apply, predict_proba.\"\"\"\n check_is_fitted(self)\n\n return self.estimators_[0]._validate_X_predict(X, check_input=True)\n\n @property\n def feature_importances_(self):\n \"\"\"\n The impurity-based feature importances.\n The higher, the more important the feature.\n The importance of a feature is computed as the (normalized)\n total reduction of the criterion brought by that feature. It is also\n known as the Gini importance.\n Warning: impurity-based feature importances can be misleading for\n high cardinality features (many unique values). See\n :func:`sklearn.inspection.permutation_importance` as an alternative.\n Returns\n -------\n feature_importances_ : ndarray of shape (n_features,)\n The values of this array sum to 1, unless all trees are single node\n trees consisting of only the root node, in which case it will be an\n array of zeros.\n \"\"\"\n check_is_fitted(self)\n\n all_importances = Parallel(n_jobs=self.n_jobs,\n **_joblib_parallel_args(prefer='threads'))(\n delayed(getattr)(tree, 'feature_importances_')\n for tree in self.estimators_ if tree.tree_.node_count > 1)\n\n if not all_importances:\n return np.zeros(self.n_features_, dtype=np.float64)\n\n all_importances = np.mean(all_importances,\n axis=0, dtype=np.float64)\n return all_importances / np.sum(all_importances)\n\n\ndef _accumulate_prediction(predict, X_set, out, lock):\n \"\"\"\n This is a utility function for joblib's Parallel.\n It can't go locally in ForestClassifier or ForestRegressor, because joblib\n complains that it cannot pickle it when placed there.\n \"\"\"\n prediction = predict(X_set)\n with lock:\n if len(out) == 1:\n out[0] += prediction\n else:\n for i in range(len(out)):\n out[i] += prediction[i]\n\n\nclass SetForestClassifier(ClassifierMixin, BaseForest, metaclass=ABCMeta):\n \"\"\"\n Base class for forest of trees-based classifiers.\n Warning: This class should not be used directly. Use derived classes\n instead.\n \"\"\"\n\n @abstractmethod\n def __init__(self,\n base_estimator,\n n_estimators=100, *,\n estimator_params=tuple(),\n bootstrap=False,\n oob_score=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n class_weight=None,\n max_samples=None):\n super().__init__(\n base_estimator,\n n_estimators=n_estimators,\n estimator_params=estimator_params,\n bootstrap=bootstrap,\n oob_score=oob_score,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose,\n warm_start=warm_start,\n class_weight=class_weight,\n max_samples=max_samples)\n\n def _set_oob_score(self, X_set, y):\n \"\"\"\n Compute out-of-bag score.\"\"\"\n #X = check_array(X, dtype=DTYPE, accept_sparse='csr')\n\n n_classes_ = self.n_classes_\n n_samples = y.shape[0]\n\n oob_decision_function = []\n oob_score = 0.0\n predictions = [np.zeros((n_samples, n_classes_[k]))\n for k in range(self.n_outputs_)]\n\n n_samples_bootstrap = _get_n_samples_bootstrap(\n n_samples, self.max_samples\n )\n\n for estimator in self.estimators_:\n unsampled_indices = _generate_unsampled_indices(\n estimator.random_state, n_samples, n_samples_bootstrap)\n X_subsample = X_set.get_subset(unsampled_indices)\n p_estimator = estimator.predict_proba(X_subsample)\n\n if self.n_outputs_ == 1:\n p_estimator = [p_estimator]\n\n for k in range(self.n_outputs_):\n predictions[k][unsampled_indices, :] += p_estimator[k]\n\n for k in range(self.n_outputs_):\n if (predictions[k].sum(axis=1) == 0).any():\n warn(\"Some inputs do not have OOB scores. \"\n \"This probably means too few trees were used \"\n \"to compute any reliable oob estimates.\")\n\n decision = (predictions[k] /\n predictions[k].sum(axis=1)[:, np.newaxis])\n oob_decision_function.append(decision)\n oob_score += np.mean(y[:, k] ==\n np.argmax(predictions[k], axis=1), axis=0)\n\n if self.n_outputs_ == 1:\n self.oob_decision_function_ = oob_decision_function[0]\n else:\n self.oob_decision_function_ = oob_decision_function\n\n self.oob_score_ = oob_score / self.n_outputs_\n\n def _validate_y_class_weight(self, y):\n check_classification_targets(y)\n\n y = np.copy(y)\n expanded_class_weight = None\n\n if self.class_weight is not None:\n y_original = np.copy(y)\n\n self.classes_ = []\n self.n_classes_ = []\n\n y_store_unique_indices = np.zeros(y.shape, dtype=np.int)\n for k in range(self.n_outputs_):\n classes_k, y_store_unique_indices[:, k] = \\\n np.unique(y[:, k], return_inverse=True)\n self.classes_.append(classes_k)\n self.n_classes_.append(classes_k.shape[0])\n y = y_store_unique_indices\n\n if self.class_weight is not None:\n valid_presets = ('balanced', 'balanced_subsample')\n if isinstance(self.class_weight, str):\n if self.class_weight not in valid_presets:\n raise ValueError('Valid presets for class_weight include '\n '\"balanced\" and \"balanced_subsample\".'\n 'Given \"%s\".'\n % self.class_weight)\n if self.warm_start:\n warn('class_weight presets \"balanced\" or '\n '\"balanced_subsample\" are '\n 'not recommended for warm_start if the fitted data '\n 'differs from the full dataset. In order to use '\n '\"balanced\" weights, use compute_class_weight '\n '(\"balanced\", classes, y). In place of y you can use '\n 'a large enough sample of the full training set '\n 'target to properly estimate the class frequency '\n 'distributions. Pass the resulting weights as the '\n 'class_weight parameter.')\n\n if (self.class_weight != 'balanced_subsample' or\n not self.bootstrap):\n if self.class_weight == \"balanced_subsample\":\n class_weight = \"balanced\"\n else:\n class_weight = self.class_weight\n expanded_class_weight = compute_sample_weight(class_weight,\n y_original)\n\n return y, expanded_class_weight\n\n def predict(self, X_set):\n \"\"\"\n Predict class for X.\n The predicted class of an input sample is a vote by the trees in\n the forest, weighted by their probability estimates. That is,\n the predicted class is the one with highest mean probability\n estimate across the trees.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n y : ndarray of shape (n_samples,) or (n_samples, n_outputs)\n The predicted classes.\n \"\"\"\n proba = self.predict_proba(X_set)\n\n if self.n_outputs_ == 1:\n return self.classes_.take(np.argmax(proba, axis=1), axis=0)\n\n else:\n n_samples = proba[0].shape[0]\n # all dtypes should be the same, so just take the first\n class_type = self.classes_[0].dtype\n predictions = np.empty((n_samples, self.n_outputs_),\n dtype=class_type)\n\n for k in range(self.n_outputs_):\n predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],\n axis=1),\n axis=0)\n\n return predictions\n\n def predict_proba(self, X_set):\n \"\"\"\n Predict class probabilities for X.\n The predicted class probabilities of an input sample are computed as\n the mean predicted class probabilities of the trees in the forest.\n The class probability of a single tree is the fraction of samples of\n the same class in a leaf.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes), or a list of n_outputs\n such arrays if n_outputs > 1.\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n \"\"\"\n check_is_fitted(self)\n # Check data\n # X = self._validate_X_predict(X)\n\n # Assign chunk of trees to jobs\n n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)\n\n # avoid storing the output of every estimator by summing them here\n all_proba = [np.zeros((X_set.shape[0], j), dtype=np.float64)\n for j in np.atleast_1d(self.n_classes_)]\n lock = threading.Lock()\n Parallel(n_jobs=n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(require=\"sharedmem\"))(\n delayed(_accumulate_prediction)(e.predict_proba, X_set, all_proba,\n lock)\n for e in self.estimators_)\n\n for proba in all_proba:\n proba /= len(self.estimators_)\n\n if len(all_proba) == 1:\n return all_proba[0]\n else:\n return all_proba\n\n def predict_log_proba(self, X_set):\n \"\"\"\n Predict class log-probabilities for X.\n The predicted class log-probabilities of an input sample is computed as\n the log of the mean predicted class probabilities of the trees in the\n forest.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n p : ndarray of shape (n_samples, n_classes), or a list of n_outputs\n such arrays if n_outputs > 1.\n The class probabilities of the input samples. The order of the\n classes corresponds to that in the attribute :term:`classes_`.\n \"\"\"\n proba = self.predict_proba(X_set)\n\n if self.n_outputs_ == 1:\n return np.log(proba)\n\n else:\n for k in range(self.n_outputs_):\n proba[k] = np.log(proba[k])\n\n return proba\n\n\nclass SetForestRegressor(RegressorMixin, BaseForest, metaclass=ABCMeta):\n \"\"\"\n Base class for forest of trees-based regressors.\n Warning: This class should not be used directly. Use derived classes\n instead.\n \"\"\"\n\n @abstractmethod\n def __init__(self,\n base_estimator,\n n_estimators=100, *,\n estimator_params=tuple(),\n bootstrap=False,\n oob_score=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n max_samples=None):\n super().__init__(\n base_estimator,\n n_estimators=n_estimators,\n estimator_params=estimator_params,\n bootstrap=bootstrap,\n oob_score=oob_score,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose,\n warm_start=warm_start,\n max_samples=max_samples)\n\n def predict(self, X_set):\n \"\"\"\n Predict regression target for X.\n The predicted regression target of an input sample is computed as the\n mean predicted regression targets of the trees in the forest.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input samples. Internally, its dtype will be converted to\n ``dtype=np.float32``. If a sparse matrix is provided, it will be\n converted into a sparse ``csr_matrix``.\n Returns\n -------\n y : ndarray of shape (n_samples,) or (n_samples, n_outputs)\n The predicted values.\n \"\"\"\n check_is_fitted(self)\n # Check data\n # X = self._validate_X_predict(X)\n\n # Assign chunk of trees to jobs\n n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)\n\n # avoid storing the output of every estimator by summing them here\n if self.n_outputs_ > 1:\n y_hat = np.zeros((X_set.shape[0], self.n_outputs_), dtype=np.float64)\n else:\n y_hat = np.zeros((X_set.shape[0]), dtype=np.float64)\n\n # Parallel loop\n lock = threading.Lock()\n Parallel(n_jobs=n_jobs, verbose=self.verbose,\n **_joblib_parallel_args(require=\"sharedmem\"))(\n delayed(_accumulate_prediction)(e.predict, X_set, [y_hat], lock)\n for e in self.estimators_)\n\n y_hat /= len(self.estimators_)\n\n return y_hat\n\n def _set_oob_score(self, X_set, y):\n \"\"\"\n Compute out-of-bag scores.\"\"\"\n # X = check_array(X, dtype=DTYPE, accept_sparse='csr')\n\n n_samples = y.shape[0]\n\n predictions = np.zeros((n_samples, self.n_outputs_))\n n_predictions = np.zeros((n_samples, self.n_outputs_))\n\n n_samples_bootstrap = _get_n_samples_bootstrap(\n n_samples, self.max_samples\n )\n\n for estimator in self.estimators_:\n unsampled_indices = _generate_unsampled_indices(\n estimator.random_state, n_samples, n_samples_bootstrap)\n X_subset = X_set.get_subset(unsampled_indices)\n p_estimator = estimator.predict(X_subset)\n\n if self.n_outputs_ == 1:\n p_estimator = p_estimator[:, np.newaxis]\n\n predictions[unsampled_indices, :] += p_estimator\n n_predictions[unsampled_indices, :] += 1\n\n if (n_predictions == 0).any():\n warn(\"Some inputs do not have OOB scores. \"\n \"This probably means too few trees were used \"\n \"to compute any reliable oob estimates.\")\n n_predictions[n_predictions == 0] = 1\n\n predictions /= n_predictions\n self.oob_prediction_ = predictions\n\n if self.n_outputs_ == 1:\n self.oob_prediction_ = \\\n self.oob_prediction_.reshape((n_samples, ))\n\n self.oob_score_ = 0.0\n\n for k in range(self.n_outputs_):\n self.oob_score_ += r2_score(y[:, k],\n predictions[:, k])\n\n self.oob_score_ /= self.n_outputs_\n\n def _compute_partial_dependence_recursion(self, grid, target_features):\n \"\"\"Fast partial dependence computation.\n Parameters\n ----------\n grid : ndarray of shape (n_samples, n_target_features)\n The grid points on which the partial dependence should be\n evaluated.\n target_features : ndarray of shape (n_target_features)\n The set of target features for which the partial dependence\n should be evaluated.\n Returns\n -------\n averaged_predictions : ndarray of shape (n_samples,)\n The value of the partial dependence function on each grid point.\n \"\"\"\n grid = np.asarray(grid, dtype=DTYPE, order='C')\n averaged_predictions = np.zeros(shape=grid.shape[0],\n dtype=np.float64, order='C')\n\n for tree in self.estimators_:\n # Note: we don't sum in parallel because the GIL isn't released in\n # the fast method.\n tree.tree_.compute_partial_dependence(\n grid, target_features, averaged_predictions)\n # Average over the forest\n averaged_predictions /= len(self.estimators_)\n\n return averaged_predictions\n\n\nclass SetRandomForestClassifier(SetForestClassifier):\n @_deprecate_positional_args\n def __init__(self,\n n_estimators=100, *,\n criterion=\"gini\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=\"auto\",\n max_leaf_nodes=None,\n min_impurity_decrease=0.,\n min_impurity_split=None,\n splitter='xgboost',\n operations=OPERATIONS,\n use_attention_set=True,\n attention_set_limit=1,\n bootstrap=True,\n oob_score=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n class_weight=None,\n ccp_alpha=0.0,\n max_samples=None):\n super().__init__(\n base_estimator=SetTree(),\n n_estimators=n_estimators,\n estimator_params=tuple(SetTree().get_params()),\n bootstrap=bootstrap,\n oob_score=oob_score,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose,\n warm_start=warm_start,\n class_weight=class_weight,\n max_samples=max_samples)\n\n self.criterion = criterion\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.max_features = max_features\n self.max_leaf_nodes = max_leaf_nodes\n self.min_impurity_decrease = min_impurity_decrease\n self.min_impurity_split = min_impurity_split\n self.operations = operations\n self.splitter = splitter\n self.use_attention_set = use_attention_set\n self.attention_set_limit = attention_set_limit\n self.classifier = True\n self.ccp_alpha = ccp_alpha\n\n\nclass SetRandomForestRegressor(SetForestRegressor):\n @_deprecate_positional_args\n def __init__(self,\n n_estimators=100, *,\n criterion=\"mse\",\n max_depth=None,\n min_samples_split=2,\n min_samples_leaf=1,\n min_weight_fraction_leaf=0.,\n max_features=\"auto\",\n max_leaf_nodes=None,\n min_impurity_decrease=0.,\n min_impurity_split=None,\n splitter='xgboost',\n operations=OPERATIONS,\n use_attention_set=True,\n attention_set_limit=1,\n bootstrap=True,\n oob_score=False,\n n_jobs=None,\n random_state=None,\n verbose=0,\n warm_start=False,\n ccp_alpha=0.0,\n max_samples=None):\n super().__init__(\n base_estimator=SetTree(),\n n_estimators=n_estimators,\n estimator_params=tuple(SetTree().get_params()),\n bootstrap=bootstrap,\n oob_score=oob_score,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose,\n warm_start=warm_start,\n max_samples=max_samples)\n\n self.criterion = criterion\n self.max_depth = max_depth\n self.min_samples_split = min_samples_split\n self.min_samples_leaf = min_samples_leaf\n self.min_weight_fraction_leaf = min_weight_fraction_leaf\n self.max_features = max_features\n self.max_leaf_nodes = max_leaf_nodes\n self.min_impurity_decrease = min_impurity_decrease\n self.min_impurity_split = min_impurity_split\n self.operations = operations\n self.splitter = splitter\n self.use_attention_set = use_attention_set\n self.attention_set_limit = attention_set_limit\n self.classifier = False\n self.ccp_alpha = ccp_alpha\n"
] | [
[
"sklearn.utils.validation.check_is_fitted",
"numpy.ones",
"numpy.sum",
"numpy.asarray",
"numpy.copy",
"numpy.log",
"sklearn.ensemble._base._partition_estimators",
"sklearn.utils.multiclass.check_classification_targets",
"sklearn.utils.fixes._joblib_parallel_args",
"sklearn.utils.validation._check_sample_weight",
"numpy.reshape",
"numpy.unique",
"numpy.mean",
"sklearn.utils.check_random_state",
"numpy.bincount",
"numpy.zeros",
"numpy.argmax",
"numpy.arange",
"sklearn.metrics.r2_score",
"numpy.empty",
"scipy.sparse.issparse",
"numpy.atleast_1d",
"sklearn.utils.compute_sample_weight",
"numpy.iinfo",
"numpy.array",
"scipy.sparse.hstack"
]
] |
loevlie/ce_expansion | [
"17417b9467914dd91ee8e0325cfdc3bd19ad7f1e"
] | [
"example/ex_4_phase_diagrams/individual_size_plots.py"
] | [
"import collections\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as tick\nimport numpy as np\n\ndata = os.path.join(os.path.realpath(__file__), '..', '..', '..', 'data', 'larson_et_al')\nsys.path.append(data)\nimport ce_expansion.npdb.db_inter\n\nDEFAULT_DPI = 600 # Dots per inch\nDEFAULT_POINTSIZE = 15\nDEFAULT_MARKER = \"o\" # square\n\n\nclass Result(object):\n def __init__(self, shape, size, composition, excess_energy, temp):\n self.shape = shape\n self.size = size\n self.composition = composition\n self.excess_energy = excess_energy\n self.free_energy = self.get_free_energy_mix(temp)\n\n def get_free_energy_mix(self, T):\n \"\"\"\n Calculates Excess energies plus an entropic contribution.\n\n :param excess_energy: Excess energies from DB query\n :param comp: Compositions from DB query\n :param T: Temperature\n\n :return: Free energy of mixing = excess energy (related to enthalpy of mixing) - entropy of mixing\n \"\"\"\n\n if self.composition == 1 or self.composition == 0:\n return 0\n\n # k_b T [eV] = (25.7 mEV at 298 K)\n kt = 25.7E-3 / 298 * T\n del_s = self.composition * np.log(self.composition) + (1 - self.composition) * np.log(1 - self.composition)\n del_s *= -kt\n\n free_energy = self.excess_energy - del_s\n return free_energy\n\n\nclass OrderedSet(collections.UserList):\n \"\"\"\n Wrapper around a list that allows it to operate somewhat like a set.\n \"\"\"\n\n def add(self, value):\n \"\"\"\n If the value passed in is not in the set, then adds it to the set. Otherwise, does nothing.\n\n :param value: The value to be added.\n \"\"\"\n if value in self.data:\n pass\n else:\n self.data.append(value)\n\n\ndef get_data(alloy,\n size,\n temperature):\n \"\"\"\n Gets data for phase diagram\n\n :param alloy: Alloy of interest\n :param size: Size to consider\n :param temperature: Temperature to use\n :return: results object.\n \"\"\"\n\n # Book-keeping and initialization\n shapes = [\"icosahedron\", \"cuboctahedron\", \"elongated-pentagonal-bipyramid\"]\n\n # DB Query\n results = []\n for shape in shapes:\n query = ce_expansion.npdb.db_inter.get_bimet_result(metals=alloy, shape=shape, num_atoms=size)\n for result in query:\n # Calculate composition\n composition = result.n_metal1 / result.num_atoms\n # Calculate EE\n excess_energy = result.EE\n # Add to the list of results objects\n results.append(Result(shape, size, composition, excess_energy, temperature))\n return results\n\n\ndef make_plot(results, axis, size):\n \"\"\"\n Plots some results, y'know?\n\n :param results: A list of Results objects containing the shape, composition, and free energy of mixing\n :param axis: Pyplot axis to plot to\n :param size: size\n :return: None. Drops the plot in the working directory.\n \"\"\"\n # Split into 3 lists, for icosahedrons, cubs, and epbs\n # Each list is of the format (composition, free energy of mixing)\n icos = []\n cubs = []\n epbs = []\n types = {\"icosahedron\": icos,\n \"cuboctahedron\": cubs,\n \"elongated-pentagonal-bipyramid\": epbs}\n\n colors = {\"icosahedron\": \"red\",\n \"cuboctahedron\": \"blue\",\n \"elongated-pentagonal-bipyramid\": \"green\"}\n for result in results:\n types[result.shape].append((result.composition, result.free_energy, colors[result.shape]))\n\n for shape in [icos, cubs, epbs]:\n x = [i[0] * 100 for i in shape]\n y = [i[1] for i in shape]\n color = shape[0][2]\n axis.plot(x, y, color)\n\n # Label size\n axis.text(0.9, 0.5, f\"N={size}\", transform=axis.transAxes, size=20)\n\n\nalloys = [\"AgCu\"]#[\"AgAu\", \"AuCu\", \"AgCu\"]\nfor alloy in alloys:\n tens_sizes = [3871, 2869, 2057, 1415, 561] # sizes where we skipped 10% increments\n all_sizes = [309, 147, 55, 13] # sizes where we looked at all possible compositions\n\n for sizes in [tens_sizes, all_sizes]:\n fig, axes = plt.subplots(nrows=5, ncols=1, sharex=True, sharey=True)\n ymin = 0\n ymax = 0\n for plot_index, size in enumerate(sizes):\n # Query\n results = get_data(alloy, size, 250)\n results.sort(key=lambda i: i.composition)\n\n # Plot\n make_plot(results, axes[abs(plot_index)], size)\n\n # plot labels\n fig.text(0.5, 0.04, \"Composition (%)\", ha=\"center\", size=20)\n fig.text(0, 0.5, \"Free Energy of Mixing (eV/atom)\", va=\"center\", rotation=\"vertical\", size=20)\n fig.text(0.5, 0.95, f\"{alloy} @ 250K\", size=25, ha=\"center\")\n\n # Tickmarks\n plt.xlim(0, 100)\n ylimits = {\"AgAu\": [-0.1, 0],\n \"AgCu\": [-0.1+0.025, 0.025],\n \"AuCu\": [-0.3, 0]}\n\n ymin = ylimits[alloy][0]\n ymax = ylimits[alloy][1]\n\n plt.ylim(ymin, ymax)\n for axis in axes:\n # Set up X tickmarks\n axis.tick_params(axis=\"x\", labelsize=15)\n axis.xaxis.set_major_locator(tick.MultipleLocator(20))\n axis.xaxis.set_major_formatter(tick.FormatStrFormatter(\"%d\"))\n axis.xaxis.set_minor_locator(tick.MultipleLocator(10))\n axis.xaxis.grid(True, which='major')\n\n # Set up Y tickmarks\n axis.tick_params(axis=\"y\", labelsize=15)\n axis.yaxis.set_major_locator(tick.MultipleLocator((ymax - ymin) / 2))\n axis.yaxis.set_major_formatter(tick.FormatStrFormatter(\"%2.2f\"))\n axis.yaxis.set_minor_locator(tick.MultipleLocator((ymax - ymin) / 4))\n\n # Save and quit\n plt.savefig(f\"{alloy},{sizes[-1]}-{sizes[0]}.png\")\n plt.close()\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlim",
"numpy.log",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ylim",
"matplotlib.ticker.MultipleLocator"
]
] |
aerdem4/cuml | [
"088763cda9fd5e363af092b1d05c155f256cf0d7"
] | [
"python/cuml/benchmark/datagen.py"
] | [
"# Copyright (c) 2019, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Data generators for cuML benchmarks\n\nThe main entry point for consumers is gen_data, which\nwraps the underlying data generators.\n\nNotes when writing new generators:\n\nEach generator is a function that accepts:\n * n_samples (set to 0 for 'default')\n * n_features (set to 0 for 'default')\n * random_state\n * (and optional generator-specific parameters)\n\nThe function should return a 2-tuple (X, y), where X is a Pandas\ndataframe and y is a Pandas series. If the generator does not produce\nlabels, it can return (X, None)\n\nA set of helper functions (convert_*) can convert these to alternative\nformats. Future revisions may support generating cudf dataframes or\nGPU arrays directly instead.\n\n\"\"\"\n\nimport cudf\nimport gzip\nimport functools\nimport numpy as np\nimport os\nimport pandas as pd\n\nimport cuml.datasets\nimport sklearn.model_selection\n\nfrom urllib.request import urlretrieve\nfrom cuml.common import input_utils\nfrom numba import cuda\n\n\ndef _gen_data_regression(n_samples, n_features, random_state=42):\n \"\"\"Wrapper for sklearn make_regression\"\"\"\n if n_samples == 0:\n n_samples = int(1e6)\n if n_features == 0:\n n_features = 100\n X_arr, y_arr = cuml.datasets.make_regression(\n n_samples=n_samples, n_features=n_features, random_state=random_state)\n return cudf.DataFrame(X_arr), cudf.Series(y_arr)\n\n\ndef _gen_data_blobs(n_samples, n_features, random_state=42, centers=None):\n \"\"\"Wrapper for sklearn make_blobs\"\"\"\n if n_samples == 0:\n n_samples = int(1e6)\n if n_features == 0:\n n_samples = 100\n X_arr, y_arr = cuml.datasets.make_blobs(\n n_samples=n_samples, n_features=n_features, centers=centers,\n random_state=random_state)\n print(type(X_arr), type(y_arr))\n return (\n cudf.DataFrame(X_arr.astype(np.float32)),\n cudf.Series(y_arr.astype(np.float32)),\n )\n\n\ndef _gen_data_zeros(n_samples, n_features, random_state=42):\n \"\"\"Dummy generator for use in testing - returns all 0s\"\"\"\n return (\n cudf.DataFrame(np.zeros((n_samples, n_features), dtype=np.float32)),\n cudf.Series(np.zeros(n_samples, dtype=np.float32)),\n )\n\n\ndef _gen_data_classification(\n n_samples, n_features, random_state=42, n_classes=2\n):\n \"\"\"Wrapper for sklearn make_blobs\"\"\"\n if n_samples == 0:\n n_samples = int(1e6)\n if n_features == 0:\n n_samples = 100\n\n X_arr, y_arr = cuml.datasets.make_classification(\n n_samples=n_samples, n_features=n_features, n_classes=n_classes,\n random_state=random_state)\n\n return (\n cudf.DataFrame(X_arr.astype(np.float32)),\n cudf.Series(y_arr.astype(np.float32)),\n )\n\n\ndef _gen_data_higgs(n_samples=None, n_features=None, random_state=42):\n \"\"\"Wrapper returning Higgs in Pandas format\"\"\"\n X_df, y_df = load_higgs()\n if n_samples == 0:\n n_samples = X_df.shape[0]\n if n_features == 0:\n n_features = X_df.shape[1]\n if n_features > X_df.shape[1]:\n raise ValueError(\n \"Higgs dataset has only %d features, cannot support %d\"\n % (X_df.shape[1], n_features)\n )\n if n_samples > X_df.shape[0]:\n raise ValueError(\n \"Higgs dataset has only %d rows, cannot support %d\"\n % (X_df.shape[0], n_samples)\n )\n return X_df.iloc[:n_samples, :n_features], y_df.iloc[:n_samples]\n\n\ndef _download_and_cache(url, compressed_filepath, decompressed_filepath):\n if not os.path.isfile(compressed_filepath):\n urlretrieve(url, compressed_filepath)\n if not os.path.isfile(decompressed_filepath):\n cf = gzip.GzipFile(compressed_filepath)\n with open(decompressed_filepath, 'wb') as df:\n df.write(cf.read())\n return decompressed_filepath\n\n\n# Default location to cache datasets\nDATASETS_DIRECTORY = '.'\n\n\ndef load_higgs():\n \"\"\"Returns the Higgs Boson dataset as an X, y tuple of dataframes.\"\"\"\n higgs_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz' # noqa\n decompressed_filepath = _download_and_cache(\n higgs_url,\n os.path.join(DATASETS_DIRECTORY, \"HIGGS.csv.gz\"),\n os.path.join(DATASETS_DIRECTORY, \"HIGGS.csv\"),\n )\n col_names = ['label'] + [\n \"col-{}\".format(i) for i in range(2, 30)\n ] # Assign column names\n dtypes_ls = [np.int32] + [\n np.float32 for _ in range(2, 30)\n ] # Assign dtypes to each column\n data_df = pd.read_csv(\n decompressed_filepath, names=col_names,\n dtype={k: v for k, v in zip(col_names, dtypes_ls)}\n )\n X_df = data_df[data_df.columns.difference(['label'])]\n y_df = data_df['label']\n return cudf.DataFrame.from_pandas(X_df), cudf.Series.from_pandas(y_df)\n\n\ndef _convert_to_numpy(data):\n \"\"\"Returns tuple data with all elements converted to numpy ndarrays\"\"\"\n if data is None:\n return None\n elif isinstance(data, tuple):\n return tuple([_convert_to_numpy(d) for d in data])\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, cudf.DataFrame):\n return data.as_matrix()\n elif isinstance(data, cudf.Series):\n return data.to_array()\n elif isinstance(data, (pd.DataFrame, pd.Series)):\n return data.to_numpy()\n else:\n raise Exception(\"Unsupported type %s\" % str(type(data)))\n\n\ndef _convert_to_cudf(data):\n if data is None:\n return None\n elif isinstance(data, tuple):\n return tuple([_convert_to_cudf(d) for d in data])\n elif isinstance(data, (cudf.DataFrame, cudf.Series)):\n return data\n elif isinstance(data, pd.DataFrame):\n return cudf.DataFrame.from_pandas(data)\n elif isinstance(data, pd.Series):\n return cudf.Series.from_pandas(data)\n else:\n raise Exception(\"Unsupported type %s\" % str(type(data)))\n\n\ndef _convert_to_pandas(data):\n if data is None:\n return None\n elif isinstance(data, tuple):\n return tuple([_convert_to_pandas(d) for d in data])\n elif isinstance(data, (pd.DataFrame, pd.Series)):\n return data\n elif isinstance(data, (cudf.DataFrame, cudf.Series)):\n return data.to_pandas()\n else:\n raise Exception(\"Unsupported type %s\" % str(type(data)))\n\n\ndef _convert_to_gpuarray(data, order='F'):\n if data is None:\n return None\n elif isinstance(data, tuple):\n return tuple([_convert_to_gpuarray(d, order=order) for d in data])\n elif isinstance(data, pd.DataFrame):\n return _convert_to_gpuarray(cudf.DataFrame.from_pandas(data),\n order=order)\n elif isinstance(data, pd.Series):\n gs = cudf.Series.from_pandas(data)\n return cuda.as_cuda_array(gs)\n else:\n return input_utils.input_to_dev_array(data, order=order)[0]\n\n\ndef _convert_to_gpuarray_c(data):\n return _convert_to_gpuarray(data, order='C')\n\n\n_data_generators = {\n 'blobs': _gen_data_blobs,\n 'zeros': _gen_data_zeros,\n 'classification': _gen_data_classification,\n 'regression': _gen_data_regression,\n 'higgs': _gen_data_higgs,\n}\n_data_converters = {\n 'numpy': _convert_to_numpy,\n 'cudf': _convert_to_cudf,\n 'pandas': _convert_to_pandas,\n 'gpuarray': _convert_to_gpuarray,\n 'gpuarray-c': _convert_to_gpuarray_c,\n}\n\n\ndef all_datasets():\n return _data_generators\n\n\[email protected]_cache(maxsize=8)\ndef gen_data(\n dataset_name,\n dataset_format,\n n_samples=0,\n n_features=0,\n random_state=42,\n test_fraction=0.0,\n **kwargs\n):\n \"\"\"Returns a tuple of data from the specified generator.\n\n Output\n -------\n (train_features, train_labels, test_features, test_labels) tuple\n containing matrices or dataframes of the requested format.\n test_features and test_labels may be None if no splitting was done.\n\n Parameters\n ----------\n dataset_name : str\n Dataset to use. Can be a synthetic generator (blobs or regression)\n or a specified dataset (higgs currently, others coming soon)\n\n dataset_format : str\n Type of data to return. (One of cudf, numpy, pandas, gpuarray)\n\n n_samples : int\n Number of samples to include in training set (regardless of test split)\n test_fraction : float\n Fraction of the dataset to partition randomly into the test set.\n If this is 0.0, no test set will be created.\n \"\"\"\n data = _data_generators[dataset_name](\n int(n_samples / (1 - test_fraction)),\n n_features,\n random_state,\n **kwargs\n )\n if test_fraction != 0.0:\n if n_samples == 0:\n n_samples = int(data[0].shape[0] * (1 - test_fraction))\n X_train, X_test, y_train, y_test = tuple(\n sklearn.model_selection.train_test_split(\n *data, train_size=n_samples, random_state=random_state\n )\n )\n data = (X_train, y_train, X_test, y_test)\n else:\n data = (*data, None, None) # No test set\n\n data = _data_converters[dataset_format](data)\n return data\n"
] | [
[
"numpy.zeros"
]
] |
rist-ro/argo | [
"a10c33346803239db8a64c104db7f22ec4e05bef"
] | [
"word_embeddings/test/core/readers.py"
] | [
"import numpy as np\nimport operator, os, itertools\nfrom abc import ABC, abstractmethod\nimport numexpr as ne\nne.set_num_threads(20)\n\ndef rmtxt(s):\n if s.endswith(\".txt\"):\n s=os.path.splitext(s)[0]\n return s\n\ndef get_reader(inputfilename):\n basename=os.path.basename(inputfilename)\n reader=None\n if basename.startswith('glove'):\n reader=GloVeEmbeddingsFileReader()\n elif basename.startswith('word2vec'):\n reader=Word2vecEmbeddingsFileReader()\n else:\n raise RuntimeError('the inputfilename \\'%s\\'does not start with either glove or word2vec so I do not know how to read the word embeddings'%basename)\n \n return reader\n\ndef read_selected_words(inputname):\n with open(inputname, 'r') as fin:\n words = [line.rstrip().split()[0] for line in fin.readlines()]\n return words\n\n\nclass EmbeddingsFileReader(ABC):\n \n @abstractmethod\n def preprocess(self, fin):\n \"\"\" what to do to the newly opened text file as preprocessing \"\"\"\n pass\n \n @abstractmethod\n def tuple_from_params(self, parameters):\n pass\n\n def read_dictionary(self, inputname):\n \"\"\" read the dictionary from inputfile \"\"\"\n with open(inputname, 'r') as fin:\n self.preprocess(fin)\n words = [line.rstrip().split(' ')[0] for line in fin.readlines()]\n return self.dicts_from_wordslist(words)\n\n def read_word_counts(self, inputname):\n \"\"\" read the word counts from inputfile \"\"\"\n with open(inputname, 'r') as fin:\n self.preprocess(fin)\n counts = [int(line.rstrip().split(' ')[1]) for line in fin.readlines()]\n return counts\n\n def dicts_from_wordslist(self, words):\n dictionary_size = len(words)\n dictionary = {w: idx for idx, w in enumerate(words)}\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return (dictionary_size, dictionary, reversed_dictionary)\n \n def unpack_split_line(self, line, vecsize, onlyu):\n arr=line.rstrip().split(' ')\n word=arr[0]\n parameters = np.array(arr[1:], dtype=np.float)\n \n return list(itertools.chain([word], self.tuple_from_params(parameters, vecsize, onlyu)))\n \n def read_embeddings(self, inputname, vecsize, consideronlyfirstvec, words_set=None):\n \"\"\" read the embeddings from inputfile \"\"\"\n with open(inputname, 'r') as fin:\n self.preprocess(fin)\n words, u_embeddings, v_embeddings = self.get_embeddings_fromfile(fin, vecsize, consideronlyfirstvec, words_set)\n \n dictionary_size, dictionary, reversed_dictionary = self.dicts_from_wordslist(words)\n \n # u_biases and v_biases are not returned at the moment since we do not know what to do with them\n return (dictionary_size, dictionary, reversed_dictionary, u_embeddings, v_embeddings)\n\n\nclass GloVeEmbeddingsFileReader(EmbeddingsFileReader):\n \n def preprocess(self, fin):\n pass\n \n def tuple_from_params(self, parameters, vecsize, onlyu):\n \n l=len(parameters)\n if l!=vecsize and l!=2*vecsize+2:\n raise ValueError(\"the vecsize passed is not compatible with the observation of line lenghts in the inputfile: line length = %s\"%l)\n \n u_w = parameters[:vecsize]\n if onlyu:\n bias_u = None\n v_w = None\n bias_v = None\n else:\n bias_u = parameters[vecsize]\n v_w = parameters[vecsize+1:-1]\n bias_v = parameters[-1]\n \n return (u_w,bias_u,v_w,bias_v)\n \n def get_embeddings_fromfile(self, filestream, vecsize, consideronlyfirstvec, words_set=None):\n words, u_embeddings, u_biases, v_embeddings, v_biases = \\\n zip(*[self.unpack_split_line(line, vecsize, consideronlyfirstvec) \\\n for line in filestream.readlines()])\n if words_set:\n words, u_embeddings, u_biases, v_embeddings, v_biases = zip(*[(w,uw,bu,vw,bv) for w,uw,bu,vw,bv in zip(words,u_embeddings,u_biases,v_embeddings,v_biases) if w in words_set])\n u_embeddings = np.array(u_embeddings)\n u_biases = np.array(u_biases)\n v_embeddings = np.array(v_embeddings)\n v_biases = np.array(v_biases)\n return (words, u_embeddings, v_embeddings)\n \n\nclass Word2vecEmbeddingsFileReader(EmbeddingsFileReader):\n \n def preprocess(self, fin):\n \"\"\" here I need to skip the header and the first word if it is <\\s> - (what is this tag that word2vec introduces?) \"\"\"\n fin.readline()\n word = fin.readline().split(' ')[0]\n if not word=='</s>':\n fin.seek(0)\n fin.readline()\n \n def tuple_from_params(self, parameters, vecsize, onlyu):\n l=len(parameters)\n if l!=vecsize and l!=2*vecsize:\n raise ValueError(\"the vecsize passed is not compatible with the observation of line lenghts in the inputfile: line length = %s\"%l)\n \n u_w = parameters[:vecsize]\n if onlyu:\n v_w = None\n else:\n v_w = parameters[vecsize:]\n \n return (u_w,v_w)\n \n def get_embeddings_fromfile(self, filestream, vecsize, consideronlyfirstvec, words_set=None):\n words, u_embeddings, v_embeddings = zip(*[self.unpack_split_line(line, vecsize, consideronlyfirstvec) for line in filestream.readlines()])\n if words_set:\n words, u_embeddings, v_embeddings = zip(*[(w,uw,vw) for w,uw,vw in zip(words,u_embeddings,v_embeddings) if w in words_set])\n u_embeddings = np.array(u_embeddings)\n v_embeddings = np.array(v_embeddings)\n return (words, u_embeddings, v_embeddings)\n \n\ndef extract_vocabulary_from(vocabfile):\n with open(vocabfile, 'r') as fin:\n vocab_words = [line.rstrip().split(' ')[0] for line in fin.readlines()]\n vocab_words=set(vocab_words)\n\n #\n #\n # def __init__(self, dictionary, howmany=10, amonghowmany=None):\n # self.dictionary=dictionary\n # self.dictionary_size=len(dictionary)\n # self.howmany=howmany\n # self.amonghowmany=amonghowmany\n # self.x_0 = np.sqrt(np.ones(self.dictionary_size)/self.dictionary_size)\n #\n # def word_analogy_measures(self, u_a, u_b, u_d, u_embeddings, v_embeddings, space=\"euclidean\"):\n # \"\"\" which vector uc_star in u_embeddings is the one with the highest analogy_measure? \"\"\"\n # if space==\"euclidean\":\n # analogy_measure=self.analogy_measure_euclidean\n # elif space==\"sphere_in_0\":\n # analogy_measure=self.analogy_measure_on_the_sphere_in0\n # elif space==\"sphere_in_a\":\n # analogy_measure=self.analogy_measure_on_the_sphere_ina\n # elif space==\"sphere_logmap\":\n # #follows the logmaps and query the nearest one\n # analogy_measure=self.analogy_measure_on_the_sphere_logmap\n # else:\n # raise ValueError(\"Unrecognized space argument in find_closest function. space was %s\"%space)\n #\n # uc_star = sorted([(i,analogy_measure(u_a, u_b, uc, u_d, v_embeddings)) for (i,uc) in enumerate(u_embeddings[:self.amonghowmany])], key=operator.itemgetter(1))[:self.howmany]\n # return uc_star\n\n # def analogy_nearby(self, word_a, word_b, word_d, u_embeddings, v_embeddings, space=\"euclidean\"):\n # \"\"\"given three words a,b,d I want to find c such that a:b=c:d.\"\"\"\n # try:\n # a=self.dictionary[word_a]\n # b=self.dictionary[word_b]\n # d=self.dictionary[word_d]\n # except KeyError as kerr:\n # print(\"\\nKey Error: {0}\".format(kerr))\n # print(\"The word requested is not present in the dictionary.\\n\")\n # sys.exit(-1)\n #\n # u_a, u_b, u_d = u_embeddings[a], u_embeddings[b], u_embeddings[d]\n #\n # #iam is indexes and analogy_measures ordered by analogy measures. list of (i, measure)\n # iam = self.word_analogy_measures(u_a, u_b, u_d, u_embeddings, v_embeddings, space)\n #\n # return iam\n #\n # #DEPRECATED here just for backward compatibility test\n #\n # def analogy_nearby_sphere_closest(self, word_a, word_b, word_d, u_embeddings, v_embeddings):\n # \"\"\"given three words a,b,d I want to find c such that a:b=c:d.\"\"\"\n # try:\n # a=self.dictionary[word_a]\n # b=self.dictionary[word_b]\n # d=self.dictionary[word_d]\n # except KeyError as kerr:\n # print(\"\\nKey Error: {0}\".format(kerr))\n # print(\"The word requested is not present in the dictionary.\\n\")\n # sys.exit(-1)\n #\n # x_target = self.follow_logmap_on_the_sphere(u_embeddings[a], u_embeddings[b], u_embeddings[d], v_embeddings)\n # x_embeddings = [send_u_to_x_on_the_sphere(u, v_embeddings) for u in u_embeddings]\n # ans = self.find_closest_euclidean(x_target, x_embeddings)\n # return ans\n #\n #\n\ndef print_array(arr):\n mw = max(len(w) for w,d in arr)\n for (w,d) in arr:\n print(\" \"+\"\\t\".join((w.ljust(mw),str(d))))\n\n\ndef write_hdf(x, table_name='embeddings', outputname=\"table_test.hdf\"):\n with tables.open_file(outputname, 'w') as f:\n atom = tables.Atom.from_dtype(x.dtype)\n\n vec_size = 300\n array_c = f.create_earray(f.root, table_name, atom, (0, vec_size))\n\n chunk_size = 500\n for i in range(0, 70000, chunk_size):\n f.root.embeddings.append(x[i: i + chunk_size])\n\n\ndef read_hdf(filename, table_name='embeddings'):\n with tables.open_file(filename) as f:\n # print(f.root.embeddings)\n x = f.root[table_name][:, :]\n\n return x\n"
] | [
[
"numpy.array"
]
] |
aerisweather/rioxarray | [
"1755f90ed827ea66477a235677c1c5ecd245833d"
] | [
"rioxarray/_io.py"
] | [
"\"\"\"\n\nCredits:\n\nThis file was adopted from: https://github.com/pydata/xarray # noqa\nSource file: https://github.com/pydata/xarray/blob/1d7bcbdc75b6d556c04e2c7d7a042e4379e15303/xarray/backends/rasterio_.py # noqa\n\"\"\"\n\nimport contextlib\nimport os\nimport re\nimport threading\nimport warnings\n\nimport numpy as np\nimport rasterio\nfrom packaging import version\nfrom rasterio.errors import NotGeoreferencedWarning\nfrom rasterio.vrt import WarpedVRT\nfrom xarray import Dataset, IndexVariable\nfrom xarray.backends.common import BackendArray\nfrom xarray.backends.file_manager import CachingFileManager, FileManager\nfrom xarray.backends.locks import SerializableLock\nfrom xarray.coding import times, variables\nfrom xarray.core import indexing\nfrom xarray.core.dataarray import DataArray\nfrom xarray.core.dtypes import maybe_promote\nfrom xarray.core.utils import is_scalar\nfrom xarray.core.variable import as_variable\n\nfrom rioxarray.exceptions import RioXarrayError\nfrom rioxarray.rioxarray import _generate_spatial_coords\n\n# TODO: should this be GDAL_LOCK instead?\nRASTERIO_LOCK = SerializableLock()\nNO_LOCK = contextlib.nullcontext()\n\n\nclass FileHandleLocal(threading.local):\n \"\"\"\n This contains the thread local ThreadURIManager\n \"\"\"\n\n def __init__(self): # pylint: disable=super-init-not-called\n self.thread_manager = None # Initialises in each thread\n\n\nclass ThreadURIManager:\n \"\"\"\n This handles opening & closing file handles in each thread.\n \"\"\"\n\n def __init__(\n self,\n opener,\n *args,\n mode=\"r\",\n kwargs=None,\n ):\n self._opener = opener\n self._args = args\n self._mode = mode\n self._kwargs = {} if kwargs is None else dict(kwargs)\n self._file_handle = None\n\n @property\n def file_handle(self):\n \"\"\"\n File handle returned by the opener.\n \"\"\"\n if self._file_handle is not None:\n return self._file_handle\n self._file_handle = self._opener(*self._args, mode=self._mode, **self._kwargs)\n return self._file_handle\n\n def close(self):\n \"\"\"\n Close file handle.\n \"\"\"\n if self._file_handle is not None:\n self._file_handle.close()\n self._file_handle = None\n\n def __del__(self):\n self.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, type_, value, traceback):\n self.close()\n\n\nclass URIManager(FileManager):\n \"\"\"\n The URI manager is used for lockless reading\n \"\"\"\n\n def __init__(\n self,\n opener,\n *args,\n mode=\"r\",\n kwargs=None,\n ):\n self._opener = opener\n self._args = args\n self._mode = mode\n self._kwargs = {} if kwargs is None else dict(kwargs)\n self._local = FileHandleLocal()\n\n def acquire(self, needs_lock=True):\n if self._local.thread_manager is None:\n self._local.thread_manager = ThreadURIManager(\n self._opener, *self._args, mode=self._mode, kwargs=self._kwargs\n )\n return self._local.thread_manager.file_handle\n\n @contextlib.contextmanager\n def acquire_context(self, needs_lock=True):\n try:\n yield self.acquire(needs_lock=needs_lock)\n except Exception:\n self.close(needs_lock=needs_lock)\n raise\n\n def close(self, needs_lock=True):\n if self._local.thread_manager is not None:\n self._local.thread_manager.close()\n self._local.thread_manager = None\n\n def __del__(self):\n self.close(needs_lock=False)\n\n def __getstate__(self):\n \"\"\"State for pickling.\"\"\"\n return (self._opener, self._args, self._mode, self._kwargs)\n\n def __setstate__(self, state):\n \"\"\"Restore from a pickle.\"\"\"\n opener, args, mode, kwargs = state\n self.__init__(opener, *args, mode=mode, kwargs=kwargs)\n\n\nclass RasterioArrayWrapper(BackendArray):\n \"\"\"A wrapper around rasterio dataset objects\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n\n def __init__(\n self,\n manager,\n lock,\n name,\n vrt_params=None,\n masked=False,\n mask_and_scale=False,\n unsigned=False,\n ):\n self.manager = manager\n self.lock = lock\n self.masked = masked or mask_and_scale\n self.mask_and_scale = mask_and_scale\n\n # cannot save riods as an attribute: this would break pickleability\n riods = manager.acquire()\n if vrt_params is not None:\n riods = WarpedVRT(riods, **vrt_params)\n self.vrt_params = vrt_params\n self._shape = (riods.count, riods.height, riods.width)\n\n self._dtype = None\n dtypes = riods.dtypes\n if not np.all(np.asarray(dtypes) == dtypes[0]):\n raise ValueError(\"All bands should have the same dtype\")\n\n dtype = _rasterio_to_numpy_dtype(dtypes)\n\n # handle unsigned case\n if mask_and_scale and unsigned and dtype.kind == \"i\":\n self._dtype = np.dtype(f\"u{dtype.itemsize}\")\n elif mask_and_scale and unsigned:\n warnings.warn(\n f\"variable {name!r} has _Unsigned attribute but is not \"\n \"of integer type. Ignoring attribute.\",\n variables.SerializationWarning,\n stacklevel=3,\n )\n self._fill_value = riods.nodata\n if self._dtype is None:\n if self.masked:\n self._dtype, self._fill_value = maybe_promote(dtype)\n else:\n self._dtype = dtype\n\n @property\n def dtype(self):\n \"\"\"\n Data type of the array\n \"\"\"\n return self._dtype\n\n @property\n def fill_value(self):\n \"\"\"\n Fill value of the array\n \"\"\"\n return self._fill_value\n\n @property\n def shape(self):\n \"\"\"\n Shape of the array\n \"\"\"\n return self._shape\n\n def _get_indexer(self, key):\n \"\"\"Get indexer for rasterio array.\n\n Parameter\n ---------\n key: tuple of int\n\n Returns\n -------\n band_key: an indexer for the 1st dimension\n window: two tuples. Each consists of (start, stop).\n squeeze_axis: axes to be squeezed\n np_ind: indexer for loaded numpy array\n\n See also\n --------\n indexing.decompose_indexer\n \"\"\"\n if len(key) != 3:\n raise RioXarrayError(\"rasterio datasets should always be 3D\")\n\n # bands cannot be windowed but they can be listed\n band_key = key[0]\n np_inds = []\n # bands (axis=0) cannot be windowed but they can be listed\n if isinstance(band_key, slice):\n start, stop, step = band_key.indices(self.shape[0])\n band_key = np.arange(start, stop, step)\n # be sure we give out a list\n band_key = (np.asarray(band_key) + 1).tolist()\n if isinstance(band_key, list): # if band_key is not a scalar\n np_inds.append(slice(None))\n\n # but other dims can only be windowed\n window = []\n squeeze_axis = []\n for iii, (ikey, size) in enumerate(zip(key[1:], self.shape[1:])):\n if isinstance(ikey, slice):\n # step is always positive. see indexing.decompose_indexer\n start, stop, step = ikey.indices(size)\n np_inds.append(slice(None, None, step))\n elif is_scalar(ikey):\n # windowed operations will always return an array\n # we will have to squeeze it later\n squeeze_axis.append(-(2 - iii))\n start = ikey\n stop = ikey + 1\n else:\n start, stop = np.min(ikey), np.max(ikey) + 1\n np_inds.append(ikey - start)\n window.append((start, stop))\n\n if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):\n # do outer-style indexing\n np_inds[-2:] = np.ix_(*np_inds[-2:])\n\n return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)\n\n def _getitem(self, key):\n band_key, window, squeeze_axis, np_inds = self._get_indexer(key)\n\n if not band_key or any(start == stop for (start, stop) in window):\n # no need to do IO\n shape = (len(band_key),) + tuple(stop - start for (start, stop) in window)\n out = np.zeros(shape, dtype=self.dtype)\n else:\n with self.lock:\n riods = self.manager.acquire(needs_lock=False)\n if self.vrt_params is not None:\n riods = WarpedVRT(riods, **self.vrt_params)\n out = riods.read(band_key, window=window, masked=self.masked)\n if self.masked:\n out = np.ma.filled(out.astype(self.dtype), self.fill_value)\n if self.mask_and_scale:\n for iii, band_iii in enumerate(np.atleast_1d(band_key) - 1):\n out[iii] = (\n out[iii] * riods.scales[band_iii] + riods.offsets[band_iii]\n )\n\n if squeeze_axis:\n out = np.squeeze(out, axis=squeeze_axis)\n return out[np_inds]\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER, self._getitem\n )\n\n\ndef _parse_envi(meta):\n \"\"\"Parse ENVI metadata into Python data structures.\n\n See the link for information on the ENVI header file format:\n http://www.harrisgeospatial.com/docs/enviheaderfiles.html\n\n Parameters\n ----------\n meta : dict\n Dictionary of keys and str values to parse, as returned by the rasterio\n tags(ns='ENVI') call.\n\n Returns\n -------\n parsed_meta : dict\n Dictionary containing the original keys and the parsed values\n\n \"\"\"\n\n def parsevec(value):\n return np.fromstring(value.strip(\"{}\"), dtype=\"float\", sep=\",\")\n\n def default(value):\n return value.strip(\"{}\")\n\n parse = {\"wavelength\": parsevec, \"fwhm\": parsevec}\n parsed_meta = {key: parse.get(key, default)(value) for key, value in meta.items()}\n return parsed_meta\n\n\ndef _rasterio_to_numpy_dtype(dtypes):\n \"\"\"Numpy dtype from first entry of rasterio dataset.dtypes\"\"\"\n # rasterio has some special dtype names (complex_int16 -> np.complex64)\n if dtypes[0] == \"complex_int16\":\n dtype = np.dtype(\"complex64\")\n else:\n dtype = np.dtype(dtypes[0])\n\n return dtype\n\n\ndef _to_numeric(value):\n \"\"\"\n Convert the value to a number\n \"\"\"\n try:\n value = int(value)\n except (TypeError, ValueError):\n try:\n value = float(value)\n except (TypeError, ValueError):\n pass\n return value\n\n\ndef _parse_tag(key, value):\n # NC_GLOBAL is appended to tags with netcdf driver and is not really needed\n key = key.split(\"NC_GLOBAL#\")[-1]\n if value.startswith(\"{\") and value.endswith(\"}\"):\n try:\n new_val = np.fromstring(value.strip(\"{}\"), dtype=\"float\", sep=\",\")\n # pylint: disable=len-as-condition\n value = new_val if len(new_val) else _to_numeric(value)\n except ValueError:\n value = _to_numeric(value)\n else:\n value = _to_numeric(value)\n return key, value\n\n\ndef _parse_tags(tags):\n parsed_tags = {}\n for key, value in tags.items():\n key, value = _parse_tag(key, value)\n parsed_tags[key] = value\n return parsed_tags\n\n\nNETCDF_DTYPE_MAP = {\n 0: object, # NC_NAT\n 1: np.byte, # NC_BYTE\n 2: np.char, # NC_CHAR\n 3: np.short, # NC_SHORT\n 4: np.int_, # NC_INT, NC_LONG\n 5: float, # NC_FLOAT\n 6: np.double, # NC_DOUBLE\n 7: np.ubyte, # NC_UBYTE\n 8: np.ushort, # NC_USHORT\n 9: np.uint, # NC_UINT\n 10: np.int64, # NC_INT64\n 11: np.uint64, # NC_UINT64\n 12: object, # NC_STRING\n}\n\n\ndef _load_netcdf_attrs(tags, data_array):\n \"\"\"\n Loads the netCDF attributes into the data array\n\n Attributes stored in this format:\n - variable_name#attr_name: attr_value\n \"\"\"\n for key, value in tags.items():\n key, value = _parse_tag(key, value)\n key_split = key.split(\"#\")\n if len(key_split) != 2:\n continue\n variable_name, attr_name = key_split\n if variable_name in data_array.coords:\n data_array.coords[variable_name].attrs.update({attr_name: value})\n\n\ndef _load_netcdf_1d_coords(tags):\n \"\"\"\n Dimension information:\n - NETCDF_DIM_EXTRA: '{time}' (comma separated list of dim names)\n - NETCDF_DIM_time_DEF: '{2,6}' (dim size, dim dtype)\n - NETCDF_DIM_time_VALUES: '{0,872712.659688}' (comma separated list of data)\n \"\"\"\n dim_names = tags.get(\"NETCDF_DIM_EXTRA\")\n if not dim_names:\n return {}\n dim_names = dim_names.strip(\"{}\").split(\",\")\n coords = {}\n for dim_name in dim_names:\n dim_def = tags.get(f\"NETCDF_DIM_{dim_name}_DEF\")\n if not dim_def:\n continue\n # pylint: disable=unused-variable\n dim_size, dim_dtype = dim_def.strip(\"{}\").split(\",\")\n dim_dtype = NETCDF_DTYPE_MAP.get(int(dim_dtype), object)\n dim_values = tags[f\"NETCDF_DIM_{dim_name}_VALUES\"].strip(\"{}\")\n coords[dim_name] = IndexVariable(\n dim_name, np.fromstring(dim_values, dtype=dim_dtype, sep=\",\")\n )\n return coords\n\n\ndef build_subdataset_filter(group_names, variable_names):\n \"\"\"\n Example::\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\":\n MODIS_Grid_2D:sur_refl_b01_1'\n\n Parameters\n ----------\n group_names: str or list or tuple\n Name or names of netCDF groups to filter by.\n\n variable_names: str or list or tuple\n Name or names of netCDF variables to filter by.\n\n Returns\n -------\n re.SRE_Pattern: output of re.compile()\n \"\"\"\n variable_query = r\"\\w+\"\n if variable_names is not None:\n if not isinstance(variable_names, (tuple, list)):\n variable_names = [variable_names]\n variable_names = [re.escape(variable_name) for variable_name in variable_names]\n variable_query = rf\"(?:{'|'.join(variable_names)})\"\n if group_names is not None:\n if not isinstance(group_names, (tuple, list)):\n group_names = [group_names]\n group_names = [re.escape(group_name) for group_name in group_names]\n group_query = rf\"(?:{'|'.join(group_names)})\"\n else:\n return re.compile(r\"\".join([r\".*(?:\\:/|\\:)(/+)?\", variable_query, r\"$\"]))\n return re.compile(\n r\"\".join(\n [r\".*(?:\\:/|\\:)(/+)?\", group_query, r\"[:/](/+)?\", variable_query, r\"$\"]\n )\n )\n\n\ndef _rio_transform(riods):\n \"\"\"\n Get the transform from a rasterio dataset\n reguardless of rasterio version.\n \"\"\"\n try:\n return riods.transform\n except AttributeError:\n return riods.affine # rasterio < 1.0\n\n\ndef _get_rasterio_attrs(riods):\n \"\"\"\n Get rasterio specific attributes\n \"\"\"\n # pylint: disable=too-many-branches\n # Add rasterio attributes\n attrs = _parse_tags(riods.tags(1))\n if hasattr(riods, \"nodata\") and riods.nodata is not None:\n # The nodata values for the raster bands\n attrs[\"_FillValue\"] = riods.nodata\n if hasattr(riods, \"scales\"):\n # The scale values for the raster bands\n if len(set(riods.scales)) > 1:\n attrs[\"scales\"] = riods.scales\n warnings.warn(\n \"Offsets differ across bands. The 'scale_factor' attribute will \"\n \"not be added. See the 'scales' attribute.\"\n )\n else:\n attrs[\"scale_factor\"] = riods.scales[0]\n if hasattr(riods, \"offsets\"):\n # The offset values for the raster bands\n if len(set(riods.offsets)) > 1:\n attrs[\"offsets\"] = riods.offsets\n warnings.warn(\n \"Offsets differ across bands. The 'add_offset' attribute will \"\n \"not be added. See the 'offsets' attribute.\"\n )\n else:\n attrs[\"add_offset\"] = riods.offsets[0]\n if hasattr(riods, \"descriptions\") and any(riods.descriptions):\n if len(set(riods.descriptions)) == 1:\n attrs[\"long_name\"] = riods.descriptions[0]\n else:\n # Descriptions for each dataset band\n attrs[\"long_name\"] = riods.descriptions\n if hasattr(riods, \"units\") and any(riods.units):\n # A list of units string for each dataset band\n if len(riods.units) == 1:\n attrs[\"units\"] = riods.units[0]\n else:\n attrs[\"units\"] = riods.units\n\n return attrs\n\n\ndef _decode_datetime_cf(data_array, decode_times, decode_timedelta):\n \"\"\"\n Decide the datetime based on CF conventions\n \"\"\"\n if decode_timedelta is None:\n decode_timedelta = decode_times\n\n for coord in data_array.coords:\n time_var = None\n if decode_times and \"since\" in data_array[coord].attrs.get(\"units\", \"\"):\n time_var = times.CFDatetimeCoder(use_cftime=True).decode(\n as_variable(data_array[coord]), name=coord\n )\n elif (\n decode_timedelta\n and data_array[coord].attrs.get(\"units\") in times.TIME_UNITS\n ):\n time_var = times.CFTimedeltaCoder().decode(\n as_variable(data_array[coord]), name=coord\n )\n if time_var is not None:\n dimensions, data, attributes, encoding = variables.unpack_for_decoding(\n time_var\n )\n data_array = data_array.assign_coords(\n {\n coord: IndexVariable(\n dims=dimensions,\n data=data,\n attrs=attributes,\n encoding=encoding,\n )\n }\n )\n return data_array\n\n\ndef _parse_driver_tags(riods, attrs, coords):\n # Parse extra metadata from tags, if supported\n parsers = {\"ENVI\": _parse_envi}\n\n driver = riods.driver\n if driver in parsers:\n meta = parsers[driver](riods.tags(ns=driver))\n\n for key, value in meta.items():\n # Add values as coordinates if they match the band count,\n # as attributes otherwise\n if isinstance(value, (list, np.ndarray)) and len(value) == riods.count:\n coords[key] = (\"band\", np.asarray(value))\n else:\n attrs[key] = value\n\n\ndef _load_subdatasets(\n riods,\n group,\n variable,\n parse_coordinates,\n chunks,\n cache,\n lock,\n masked,\n mask_and_scale,\n decode_times,\n decode_timedelta,\n **open_kwargs,\n):\n \"\"\"\n Load in rasterio subdatasets\n \"\"\"\n base_tags = _parse_tags(riods.tags())\n dim_groups = {}\n subdataset_filter = None\n if any((group, variable)):\n subdataset_filter = build_subdataset_filter(group, variable)\n for subdataset in riods.subdatasets:\n if subdataset_filter is not None and not subdataset_filter.match(subdataset):\n continue\n with rasterio.open(subdataset) as rds:\n shape = rds.shape\n rioda = open_rasterio(\n subdataset,\n parse_coordinates=shape not in dim_groups and parse_coordinates,\n chunks=chunks,\n cache=cache,\n lock=lock,\n masked=masked,\n mask_and_scale=mask_and_scale,\n default_name=subdataset.split(\":\")[-1].lstrip(\"/\").replace(\"/\", \"_\"),\n decode_times=decode_times,\n decode_timedelta=decode_timedelta,\n **open_kwargs,\n )\n if shape not in dim_groups:\n dim_groups[shape] = {rioda.name: rioda}\n else:\n dim_groups[shape][rioda.name] = rioda\n\n if len(dim_groups) > 1:\n dataset = [\n Dataset(dim_group, attrs=base_tags) for dim_group in dim_groups.values()\n ]\n elif not dim_groups:\n dataset = Dataset(attrs=base_tags)\n else:\n dataset = Dataset(list(dim_groups.values())[0], attrs=base_tags)\n return dataset\n\n\ndef _prepare_dask(result, riods, filename, chunks):\n \"\"\"\n Prepare the data for dask computations\n \"\"\"\n # pylint: disable=import-outside-toplevel\n from dask.base import tokenize\n\n # augment the token with the file modification time\n try:\n mtime = os.path.getmtime(filename)\n except OSError:\n # the filename is probably an s3 bucket rather than a regular file\n mtime = None\n\n if chunks in (True, \"auto\"):\n import dask\n from dask.array.core import normalize_chunks\n\n if version.parse(dask.__version__) < version.parse(\"0.18.0\"):\n msg = (\n \"Automatic chunking requires dask.__version__ >= 0.18.0 . \"\n f\"You currently have version {dask.__version__}\"\n )\n raise NotImplementedError(msg)\n block_shape = (1,) + riods.block_shapes[0]\n chunks = normalize_chunks(\n chunks=(1, \"auto\", \"auto\"),\n shape=(riods.count, riods.height, riods.width),\n dtype=riods.dtypes[0],\n previous_chunks=tuple((c,) for c in block_shape),\n )\n token = tokenize(filename, mtime, chunks)\n name_prefix = f\"open_rasterio-{token}\"\n return result.chunk(chunks, name_prefix=name_prefix, token=token)\n\n\ndef _handle_encoding(result, mask_and_scale, masked, da_name):\n \"\"\"\n Make sure encoding handled properly\n \"\"\"\n if \"grid_mapping\" in result.attrs:\n variables.pop_to(result.attrs, result.encoding, \"grid_mapping\", name=da_name)\n if mask_and_scale:\n if \"scale_factor\" in result.attrs:\n variables.pop_to(\n result.attrs, result.encoding, \"scale_factor\", name=da_name\n )\n if \"add_offset\" in result.attrs:\n variables.pop_to(result.attrs, result.encoding, \"add_offset\", name=da_name)\n if masked:\n if \"_FillValue\" in result.attrs:\n variables.pop_to(result.attrs, result.encoding, \"_FillValue\", name=da_name)\n if \"missing_value\" in result.attrs:\n variables.pop_to(\n result.attrs, result.encoding, \"missing_value\", name=da_name\n )\n\n\ndef open_rasterio(\n filename,\n parse_coordinates=None,\n chunks=None,\n cache=None,\n lock=None,\n masked=False,\n mask_and_scale=False,\n variable=None,\n group=None,\n default_name=None,\n decode_times=True,\n decode_timedelta=None,\n **open_kwargs,\n):\n # pylint: disable=too-many-statements,too-many-locals,too-many-branches\n \"\"\"Open a file with rasterio (experimental).\n\n This should work with any file that rasterio can open (most often:\n geoTIFF). The x and y coordinates are generated automatically from the\n file's geoinformation, shifted to the center of each pixel (see\n `\"PixelIsArea\" Raster Space\n <http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_\n for more information).\n\n Parameters\n ----------\n filename: str, rasterio.io.DatasetReader, or rasterio.vrt.WarpedVRT\n Path to the file to open. Or already open rasterio dataset.\n parse_coordinates: bool, optional\n Whether to parse the x and y coordinates out of the file's\n ``transform`` attribute or not. The default is to automatically\n parse the coordinates only if they are rectilinear (1D).\n It can be useful to set ``parse_coordinates=False``\n if your files are very large or if you don't need the coordinates.\n chunks: int, tuple or dict, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new\n DataArray into a dask array. Chunks can also be set to\n ``True`` or ``\"auto\"`` to choose sensible chunk sizes according to\n ``dask.config.get(\"array.chunk-size\")``.\n cache: bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False.\n lock: bool or dask.utils.SerializableLock, optional\n\n If chunks is provided, this argument is used to ensure that only one\n thread per process is reading from a rasterio file object at a time.\n\n By default and when a lock instance is provided,\n a :class:`xarray.backends.CachingFileManager` is used to cache File objects.\n Since rasterio also caches some data, this will make repeated reads from the\n same object fast.\n\n When ``lock=False``, no lock is used, allowing for completely parallel reads\n from multiple threads or processes. However, a new file handle is opened on\n each request.\n\n masked: bool, optional\n If True, read the mask and set values to NaN. Defaults to False.\n mask_and_scale: bool, optional\n Lazily scale (using the `scales` and `offsets` from rasterio) and mask.\n If the _Unsigned attribute is present treat integer arrays as unsigned.\n variable: str or list or tuple, optional\n Variable name or names to use to filter loading.\n group: str or list or tuple, optional\n Group name or names to use to filter loading.\n default_name: str, optional\n The name of the data array if none exists. Default is None.\n decode_times: bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n decode_timedelta: bool, optional\n If True, decode variables and coordinates with time units in\n {“days”, “hours”, “minutes”, “seconds”, “milliseconds”, “microseconds”}\n into timedelta objects. If False, leave them encoded as numbers.\n If None (default), assume the same value of decode_time.\n **open_kwargs: kwargs, optional\n Optional keyword arguments to pass into rasterio.open().\n\n Returns\n -------\n :obj:`xarray.Dataset` | :obj:`xarray.DataArray` | List[:obj:`xarray.Dataset`]:\n The newly created dataset(s).\n \"\"\"\n parse_coordinates = True if parse_coordinates is None else parse_coordinates\n masked = masked or mask_and_scale\n vrt_params = None\n if isinstance(filename, rasterio.io.DatasetReader):\n filename = filename.name\n elif isinstance(filename, rasterio.vrt.WarpedVRT):\n vrt = filename\n filename = vrt.src_dataset.name\n vrt_params = dict(\n src_crs=vrt.src_crs.to_string() if vrt.src_crs else None,\n crs=vrt.crs.to_string() if vrt.crs else None,\n resampling=vrt.resampling,\n tolerance=vrt.tolerance,\n src_nodata=vrt.src_nodata,\n nodata=vrt.nodata,\n width=vrt.width,\n height=vrt.height,\n src_transform=vrt.src_transform,\n transform=vrt.transform,\n dtype=vrt.working_dtype,\n warp_extras=vrt.warp_extras,\n )\n\n if lock in (True, None):\n lock = RASTERIO_LOCK\n elif lock is False:\n lock = NO_LOCK\n\n # ensure default for sharing is False\n # ref https://github.com/mapbox/rasterio/issues/1504\n open_kwargs[\"sharing\"] = open_kwargs.get(\"sharing\", False)\n\n with warnings.catch_warnings(record=True) as rio_warnings:\n if lock is not NO_LOCK:\n manager = CachingFileManager(\n rasterio.open, filename, lock=lock, mode=\"r\", kwargs=open_kwargs\n )\n else:\n manager = URIManager(rasterio.open, filename, mode=\"r\", kwargs=open_kwargs)\n riods = manager.acquire()\n captured_warnings = rio_warnings.copy()\n\n # raise the NotGeoreferencedWarning if applicable\n for rio_warning in captured_warnings:\n if not riods.subdatasets or not isinstance(\n rio_warning.message, NotGeoreferencedWarning\n ):\n warnings.warn(str(rio_warning.message), type(rio_warning.message))\n\n # open the subdatasets if they exist\n if riods.subdatasets:\n return _load_subdatasets(\n riods=riods,\n group=group,\n variable=variable,\n parse_coordinates=parse_coordinates,\n chunks=chunks,\n cache=cache,\n lock=lock,\n masked=masked,\n mask_and_scale=mask_and_scale,\n decode_times=decode_times,\n decode_timedelta=decode_timedelta,\n **open_kwargs,\n )\n\n if vrt_params is not None:\n riods = WarpedVRT(riods, **vrt_params)\n\n if cache is None:\n cache = chunks is None\n\n # Get bands\n if riods.count < 1:\n raise ValueError(\"Unknown dims\")\n\n # parse tags & load alternate coords\n attrs = _get_rasterio_attrs(riods=riods)\n coords = _load_netcdf_1d_coords(riods.tags())\n _parse_driver_tags(riods=riods, attrs=attrs, coords=coords)\n for coord in coords:\n if f\"NETCDF_DIM_{coord}\" in attrs:\n coord_name = coord\n attrs.pop(f\"NETCDF_DIM_{coord}\")\n break\n else:\n coord_name = \"band\"\n coords[coord_name] = np.asarray(riods.indexes)\n\n # Get geospatial coordinates\n if parse_coordinates:\n coords.update(\n _generate_spatial_coords(_rio_transform(riods), riods.width, riods.height)\n )\n\n unsigned = False\n encoding = {}\n if mask_and_scale and \"_Unsigned\" in attrs:\n unsigned = variables.pop_to(attrs, encoding, \"_Unsigned\") == \"true\"\n\n if masked:\n encoding[\"dtype\"] = str(_rasterio_to_numpy_dtype(riods.dtypes))\n\n da_name = attrs.pop(\"NETCDF_VARNAME\", default_name)\n data = indexing.LazilyOuterIndexedArray(\n RasterioArrayWrapper(\n manager,\n lock,\n name=da_name,\n vrt_params=vrt_params,\n masked=masked,\n mask_and_scale=mask_and_scale,\n unsigned=unsigned,\n )\n )\n\n # this lets you write arrays loaded with rasterio\n data = indexing.CopyOnWriteArray(data)\n if cache and chunks is None:\n data = indexing.MemoryCachedArray(data)\n\n result = DataArray(\n data=data, dims=(coord_name, \"y\", \"x\"), coords=coords, attrs=attrs, name=da_name\n )\n result.encoding = encoding\n\n # update attributes from NetCDF attributess\n _load_netcdf_attrs(riods.tags(), result)\n result = _decode_datetime_cf(\n result, decode_times=decode_times, decode_timedelta=decode_timedelta\n )\n\n # make sure the _FillValue is correct dtype\n if \"_FillValue\" in attrs:\n attrs[\"_FillValue\"] = result.dtype.type(attrs[\"_FillValue\"])\n\n # handle encoding\n _handle_encoding(result, mask_and_scale, masked, da_name)\n # Affine transformation matrix (always available)\n # This describes coefficients mapping pixel coordinates to CRS\n # For serialization store as tuple of 6 floats, the last row being\n # always (0, 0, 1) per definition (see\n # https://github.com/sgillies/affine)\n result.rio.write_transform(_rio_transform(riods), inplace=True)\n if hasattr(riods, \"crs\") and riods.crs:\n result.rio.write_crs(riods.crs, inplace=True)\n\n if chunks is not None:\n result = _prepare_dask(result, riods, filename, chunks)\n\n # Make the file closeable\n result.set_close(manager.close)\n result.rio._manager = manager\n # add file path to encoding\n result.encoding[\"source\"] = riods.name\n result.encoding[\"rasterio_dtype\"] = str(riods.dtypes[0])\n return result\n"
] | [
[
"numpy.ix_",
"numpy.zeros",
"numpy.squeeze",
"numpy.dtype",
"numpy.asarray",
"numpy.atleast_1d",
"numpy.arange",
"numpy.max",
"numpy.min",
"numpy.fromstring"
]
] |
suvarnak/GenerativeFSLCovid | [
"0bdeb4ed444c5c9d59697c71d0733fc3a100944c"
] | [
"graphs/models/concept_discriminator.py"
] | [
"\"\"\"\r\n discriminator model\r\n\"\"\"\r\nimport torch\r\nimport torch.nn as nn\r\nimport torchvision.models as models\r\nimport json\r\nfrom easydict import EasyDict as edict\r\nfrom graphs.weights_initializer import weights_init\r\n\r\n\r\nclass EncoderModel(nn.Module):\r\n def __init__(self,config):\r\n super(EncoderModel, self).__init__()\r\n self.config = config\r\n\r\n self.num_classes = self.config.num_classes\r\n\r\n self.progress = 0.0\r\n\r\n self.encoder = nn.Sequential(\r\n nn.Conv2d(in_channels=3,out_channels=32, kernel_size=3, stride=1, padding=1), # b, 32, 224, 224\r\n nn.ReLU(True),\r\n nn.MaxPool2d(2, stride=None), # b, 32, 112, 112\r\n nn.Conv2d(in_channels=32,out_channels=64, kernel_size=3, stride=1, padding=1), # b, 64, 112, 112\r\n nn.ReLU(True),\r\n nn.MaxPool2d(2, stride=None), # b, 64, 56, 56\r\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1), # b, 128, 56, 56\r\n nn.ReLU(True),\r\n nn.MaxPool2d(2, stride=None), # b, 128, 28, 28\r\n\t\t\t\t)\r\n self.linear_layers = nn.Sequential(\t\t\r\n\t\t\t\t\t\tnn.Linear(2*self.config.image_size*self.config.image_size, out_features=128),\r\n nn.Linear(128, out_features=self.config.num_ways),\r\n\r\n )\r\n\r\n\r\n def forward(self, x): \r\n #x = self.encoder(x)\r\n #print(x.size())\r\n #self.discriminator = nn.Sequential(self.encoder, self.fc())\r\n x = self.encoder(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.linear_layers(x)\r\n #print(x.size())\r\n\r\n #x = x.view(1, -1) \r\n #x = self.fc(x)\r\n return x\r\n\r\nclass ConceptDiscriminatorModel(torch.nn.Module): #new model\r\n def __init__(self, pretrained_model):\r\n super(ConceptDiscriminatorModel, self).__init__()\r\n self.new_model = nn.Sequential(\r\n nn.Linear(in_features=512, out_features=30))\r\n self.pretrained_model = pretrained_model\r\n\r\n def forward(self, x):\r\n x = self.pretrained_model(x)\r\n return x\r\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Linear"
]
] |
nhsx-mirror/SynthVAE | [
"64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651"
] | [
"opacus/privacy_analysis.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nr\"\"\"\n*Based on Google's TF Privacy:* https://github.com/tensorflow/privacy/blob/master/tensorflow_privacy/privacy/analysis/rdp_accountant.py.\n*Here, we update this code to Python 3, and optimize dependencies.*\n\nFunctionality for computing Renyi Differential Privacy (RDP) of an additive\nSampled Gaussian Mechanism (SGM).\n\nExample:\n Suppose that we have run an SGM applied to a function with L2-sensitivity of 1.\n\n Its parameters are given as a list of tuples\n ``[(q_1, sigma_1, steps_1), ..., (q_k, sigma_k, steps_k)],``\n and we wish to compute epsilon for a given target delta.\n\n The example code would be:\n\n >>> max_order = 32\n >>> orders = range(2, max_order + 1)\n >>> rdp = np.zeros_like(orders, dtype=float)\n >>> for q, sigma, steps in parameters:\n >>> rdp += privacy_analysis.compute_rdp(q, sigma, steps, orders)\n >>> epsilon, opt_order = privacy_analysis.get_privacy_spent(orders, rdp, delta)\n\n\"\"\"\n\nimport math\nfrom typing import List, Tuple, Union\n\nimport numpy as np\nfrom scipy import special\n\n\n########################\n# LOG-SPACE ARITHMETIC #\n########################\n\n\ndef _log_add(logx: float, logy: float) -> float:\n r\"\"\"Adds two numbers in the log space.\n\n Args:\n logx: First term in log space.\n logy: Second term in log space.\n\n Returns:\n Sum of numbers in log space.\n \"\"\"\n a, b = min(logx, logy), max(logx, logy)\n if a == -np.inf: # adding 0\n return b\n # Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)\n return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)\n\n\ndef _log_sub(logx: float, logy: float) -> float:\n r\"\"\"Subtracts two numbers in the log space.\n\n Args:\n logx: First term in log space. Expected to be greater than the second term.\n logy: First term in log space. Expected to be less than the first term.\n\n Returns:\n Difference of numbers in log space.\n\n Raises:\n ValueError\n If the result is negative.\n \"\"\"\n if logx < logy:\n raise ValueError(\"The result of subtraction must be non-negative.\")\n if logy == -np.inf: # subtracting 0\n return logx\n if logx == logy:\n return -np.inf # 0 is represented as -np.inf in the log space.\n\n try:\n # Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).\n return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1\n except OverflowError:\n return logx\n\n\ndef _compute_log_a_for_int_alpha(q: float, sigma: float, alpha: int) -> float:\n r\"\"\"Computes :math:`log(A_\\alpha)` for integer ``alpha``.\n\n Notes:\n Note that\n :math:`A_\\alpha` is real valued function of ``alpha`` and ``q``,\n and that 0 < ``q`` < 1.\n\n Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf for details.\n\n Args:\n q: Sampling rate of SGM.\n sigma: The standard deviation of the additive Gaussian noise.\n alpha: The order at which RDP is computed.\n\n Returns:\n :math:`log(A_\\alpha)` as defined in Section 3.3 of\n https://arxiv.org/pdf/1908.10530.pdf.\n \"\"\"\n\n # Initialize with 0 in the log space.\n log_a = -np.inf\n\n for i in range(alpha + 1):\n log_coef_i = (\n math.log(special.binom(alpha, i))\n + i * math.log(q)\n + (alpha - i) * math.log(1 - q)\n )\n\n s = log_coef_i + (i * i - i) / (2 * (sigma ** 2))\n log_a = _log_add(log_a, s)\n\n return float(log_a)\n\n\ndef _compute_log_a_for_frac_alpha(q: float, sigma: float, alpha: float) -> float:\n r\"\"\"Computes :math:`log(A_\\alpha)` for fractional ``alpha``.\n\n Notes:\n Note that\n :math:`A_\\alpha` is real valued function of ``alpha`` and ``q``,\n and that 0 < ``q`` < 1.\n\n Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf for details.\n\n Args:\n q: Sampling rate of SGM.\n sigma: The standard deviation of the additive Gaussian noise.\n alpha: The order at which RDP is computed.\n\n Returns:\n :math:`log(A_\\alpha)` as defined in Section 3.3 of\n https://arxiv.org/pdf/1908.10530.pdf.\n \"\"\"\n # The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are\n # initialized to 0 in the log space:\n log_a0, log_a1 = -np.inf, -np.inf\n i = 0\n\n z0 = sigma ** 2 * math.log(1 / q - 1) + 0.5\n\n while True: # do ... until loop\n coef = special.binom(alpha, i)\n log_coef = math.log(abs(coef))\n j = alpha - i\n\n log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)\n log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)\n\n log_e0 = math.log(0.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))\n log_e1 = math.log(0.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))\n\n log_s0 = log_t0 + (i * i - i) / (2 * (sigma ** 2)) + log_e0\n log_s1 = log_t1 + (j * j - j) / (2 * (sigma ** 2)) + log_e1\n\n if coef > 0:\n log_a0 = _log_add(log_a0, log_s0)\n log_a1 = _log_add(log_a1, log_s1)\n else:\n log_a0 = _log_sub(log_a0, log_s0)\n log_a1 = _log_sub(log_a1, log_s1)\n\n i += 1\n if max(log_s0, log_s1) < -30:\n break\n\n return _log_add(log_a0, log_a1)\n\n\ndef _compute_log_a(q: float, sigma: float, alpha: float) -> float:\n r\"\"\"Computes :math:`log(A_\\alpha)` for any positive finite ``alpha``.\n\n Notes:\n Note that\n :math:`A_\\alpha` is real valued function of ``alpha`` and ``q``,\n and that 0 < ``q`` < 1.\n\n Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf\n for details.\n\n Args:\n q: Sampling rate of SGM.\n sigma: The standard deviation of the additive Gaussian noise.\n alpha: The order at which RDP is computed.\n\n Returns:\n :math:`log(A_\\alpha)` as defined in the paper mentioned above.\n \"\"\"\n if float(alpha).is_integer():\n return _compute_log_a_for_int_alpha(q, sigma, int(alpha))\n else:\n return _compute_log_a_for_frac_alpha(q, sigma, alpha)\n\n\ndef _log_erfc(x: float) -> float:\n r\"\"\"Computes :math:`log(erfc(x))` with high accuracy for large ``x``.\n\n Helper function used in computation of :math:`log(A_\\alpha)`\n for a fractional alpha.\n\n Args:\n x: The input to the function\n\n Returns:\n :math:`log(erfc(x))`\n \"\"\"\n return math.log(2) + special.log_ndtr(-x * 2 ** 0.5)\n\n\ndef _compute_rdp(q: float, sigma: float, alpha: float) -> float:\n r\"\"\"Computes RDP of the Sampled Gaussian Mechanism at order ``alpha``.\n\n Args:\n q: Sampling rate of SGM.\n sigma: The standard deviation of the additive Gaussian noise.\n alpha: The order at which RDP is computed.\n\n Returns:\n RDP at order ``alpha``; can be np.inf.\n \"\"\"\n if q == 0:\n return 0\n\n # no privacy\n if sigma == 0:\n return np.inf\n\n if q == 1.0:\n return alpha / (2 * sigma ** 2)\n\n if np.isinf(alpha):\n return np.inf\n\n return _compute_log_a(q, sigma, alpha) / (alpha - 1)\n\n\ndef compute_rdp(\n q: float, noise_multiplier: float, steps: int, orders: Union[List[float], float]\n) -> Union[List[float], float]:\n r\"\"\"Computes Renyi Differential Privacy (RDP) guarantees of the\n Sampled Gaussian Mechanism (SGM) iterated ``steps`` times.\n\n Args:\n q: Sampling rate of SGM.\n noise_multiplier: The ratio of the standard deviation of the\n additive Gaussian noise to the L2-sensitivity of the function\n to which it is added. Note that this is same as the standard\n deviation of the additive Gaussian noise when the L2-sensitivity\n of the function is 1.\n steps: The number of iterations of the mechanism.\n orders: An array (or a scalar) of RDP orders.\n\n Returns:\n The RDP guarantees at all orders; can be ``np.inf``.\n \"\"\"\n if isinstance(orders, float):\n rdp = _compute_rdp(q, noise_multiplier, orders)\n else:\n rdp = np.array([_compute_rdp(q, noise_multiplier, order) for order in orders])\n\n return rdp * steps\n\n\ndef get_privacy_spent(\n orders: Union[List[float], float], rdp: Union[List[float], float], delta: float\n) -> Tuple[float, float]:\n r\"\"\"Computes epsilon given a list of Renyi Differential Privacy (RDP) values at\n multiple RDP orders and target ``delta``.\n The computation of epslion, i.e. conversion from RDP to (eps, delta)-DP,\n is based on the theorem presented in the following work:\n Borja Balle et al. \"Hypothesis testing interpretations and Renyi differential privacy.\"\n International Conference on Artificial Intelligence and Statistics. PMLR, 2020.\n Particullary, Theorem 21 in the arXiv version https://arxiv.org/abs/1905.09982.\n Args:\n orders: An array (or a scalar) of orders (alphas).\n rdp: A list (or a scalar) of RDP guarantees.\n delta: The target delta.\n Returns:\n Pair of epsilon and optimal order alpha.\n Raises:\n ValueError\n If the lengths of ``orders`` and ``rdp`` are not equal.\n \"\"\"\n orders_vec = np.atleast_1d(orders)\n rdp_vec = np.atleast_1d(rdp)\n\n if len(orders_vec) != len(rdp_vec):\n raise ValueError(\n f\"Input lists must have the same length.\\n\"\n f\"\\torders_vec = {orders_vec}\\n\"\n f\"\\trdp_vec = {rdp_vec}\\n\"\n )\n\n eps = (\n rdp_vec\n - (np.log(delta) + np.log(orders_vec)) / (orders_vec - 1)\n + np.log((orders_vec - 1) / orders_vec)\n )\n\n # special case when there is no privacy\n if np.isnan(eps).all():\n return np.inf, np.nan\n\n idx_opt = np.nanargmin(eps) # Ignore NaNs\n return eps[idx_opt], orders_vec[idx_opt]\n"
] | [
[
"numpy.isinf",
"numpy.atleast_1d",
"numpy.nanargmin",
"numpy.log",
"scipy.special.log_ndtr",
"scipy.special.binom",
"numpy.isnan"
]
] |
linksdl/futuretec-project-self_driving_cars_projects | [
"38e8f14543132ec86a8bada8d708eefaef23fee8"
] | [
"udacity-program_self_driving_car_engineer_v1.0/part01-computer vision and deep learning/module03-deep learning/lesson02-miniflow/exercise07-backpropagation/miniflow.py"
] | [
"\"\"\"\n# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n@Time : 2022/3/26 16:58\n@File : miniflow.py\n\"\"\"\n\n\n\n\"\"\"\nImplement the backward method of the Sigmoid node.\n\"\"\"\nimport numpy as np\n\n\nclass Node(object):\n \"\"\"\n Base class for nodes in the network.\n\n Arguments:\n\n `inbound_nodes`: A list of nodes with edges into this node.\n \"\"\"\n def __init__(self, inbound_nodes=[]):\n \"\"\"\n Node's constructor (runs when the object is instantiated). Sets\n properties that all nodes need.\n \"\"\"\n # A list of nodes with edges into this node.\n self.inbound_nodes = inbound_nodes\n # The eventual value of this node. Set by running\n # the forward() method.\n self.value = None\n # A list of nodes that this node outputs to.\n self.outbound_nodes = []\n # New property! Keys are the inputs to this node and\n # their values are the partials of this node with\n # respect to that input.\n self.gradients = {}\n # Sets this node as an outbound node for all of\n # this node's inputs.\n for node in inbound_nodes:\n node.outbound_nodes.append(self)\n\n def forward(self):\n \"\"\"\n Every node that uses this class as a base class will\n need to define its own `forward` method.\n \"\"\"\n raise NotImplementedError\n\n def backward(self):\n \"\"\"\n Every node that uses this class as a base class will\n need to define its own `backward` method.\n \"\"\"\n raise NotImplementedError\n\n\nclass Input(Node):\n \"\"\"\n A generic input into the network.\n \"\"\"\n def __init__(self):\n # The base class constructor has to run to set all\n # the properties here.\n #\n # The most important property on an Input is value.\n # self.value is set during `topological_sort` later.\n Node.__init__(self)\n\n def forward(self):\n # Do nothing because nothing is calculated.\n pass\n\n def backward(self):\n # An Input node has no inputs so the gradient (derivative)\n # is zero.\n # The key, `self`, is reference to this object.\n self.gradients = {self: 0}\n # Weights and bias may be inputs, so you need to sum\n # the gradient from output gradients.\n for n in self.outbound_nodes:\n grad_cost = n.gradients[self]\n self.gradients[self] += grad_cost * 1\n\n\nclass Linear(Node):\n \"\"\"\n Represents a node that performs a linear transform.\n \"\"\"\n def __init__(self, X, W, b):\n # The base class (Node) constructor. Weights and bias\n # are treated like inbound nodes.\n Node.__init__(self, [X, W, b])\n\n def forward(self):\n \"\"\"\n Performs the math behind a linear transform.\n \"\"\"\n X = self.inbound_nodes[0].value\n W = self.inbound_nodes[1].value\n b = self.inbound_nodes[2].value\n self.value = np.dot(X, W) + b\n\n def backward(self):\n \"\"\"\n Calculates the gradient based on the output values.\n \"\"\"\n # Initialize a partial for each of the inbound_nodes.\n self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}\n # Cycle through the outputs. The gradient will change depending\n # on each output, so the gradients are summed over all outputs.\n for n in self.outbound_nodes:\n # Get the partial of the cost with respect to this node.\n grad_cost = n.gradients[self]\n # Set the partial of the loss with respect to this node's inputs.\n self.gradients[self.inbound_nodes[0]] += np.dot(grad_cost, self.inbound_nodes[1].value.T)\n # Set the partial of the loss with respect to this node's weights.\n self.gradients[self.inbound_nodes[1]] += np.dot(self.inbound_nodes[0].value.T, grad_cost)\n # Set the partial of the loss with respect to this node's bias.\n self.gradients[self.inbound_nodes[2]] += np.sum(grad_cost, axis=0, keepdims=False)\n\n\nclass Sigmoid(Node):\n \"\"\"\n Represents a node that performs the sigmoid activation function.\n \"\"\"\n def __init__(self, node):\n # The base class constructor.\n Node.__init__(self, [node])\n\n def _sigmoid(self, x):\n \"\"\"\n This method is separate from `forward` because it\n will be used with `backward` as well.\n\n `x`: A numpy array-like object.\n \"\"\"\n return 1. / (1. + np.exp(-x))\n\n def forward(self):\n \"\"\"\n Perform the sigmoid function and set the value.\n \"\"\"\n input_value = self.inbound_nodes[0].value\n self.value = self._sigmoid(input_value)\n\n def backward(self):\n \"\"\"\n Calculates the gradient using the derivative of\n the sigmoid function.\n \"\"\"\n # Initialize the gradients to 0.\n self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}\n\n # Cycle through the outputs. The gradient will change depending\n # on each output, so the gradients are summed over all outputs.\n for n in self.outbound_nodes:\n # Get the partial of the cost with respect to this node.\n grad_cost = n.gradients[self]\n \"\"\"\n TODO: Your code goes here!\n Set the gradients property to the gradients with respect to each input.\n NOTE: See the Linear node and MSE node for examples.\n \"\"\"\n sigmoid = self.value\n self.gradients[self.inbound_nodes[0]] += sigmoid * (1 - sigmoid) * grad_cost\n\n\nclass MSE(Node):\n def __init__(self, y, a):\n \"\"\"\n The mean squared error cost function.\n Should be used as the last node for a network.\n \"\"\"\n # Call the base class' constructor.\n Node.__init__(self, [y, a])\n\n def forward(self):\n \"\"\"\n Calculates the mean squared error.\n \"\"\"\n # NOTE: We reshape these to avoid possible matrix/vector broadcast\n # errors.\n #\n # For example, if we subtract an array of shape (3,) from an array of shape\n # (3,1) we get an array of shape(3,3) as the result when we want\n # an array of shape (3,1) instead.\n #\n # Making both arrays (3,1) ensures the result is (3,1) and does\n # an elementwise subtraction as expected.\n y = self.inbound_nodes[0].value.reshape(-1, 1)\n a = self.inbound_nodes[1].value.reshape(-1, 1)\n\n self.m = self.inbound_nodes[0].value.shape[0]\n # Save the computed output for backward.\n self.diff = y - a\n self.value = np.mean(self.diff**2)\n\n def backward(self):\n \"\"\"\n Calculates the gradient of the cost.\n\n This is the final node of the network so outbound nodes\n are not a concern.\n \"\"\"\n self.gradients[self.inbound_nodes[0]] = (2 / self.m) * self.diff\n self.gradients[self.inbound_nodes[1]] = (-2 / self.m) * self.diff\n\n\ndef topological_sort(feed_dict):\n \"\"\"\n Sort the nodes in topological order using Kahn's Algorithm.\n\n `feed_dict`: A dictionary where the key is a `Input` Node and the value is the respective value feed to that Node.\n\n Returns a list of sorted nodes.\n \"\"\"\n\n input_nodes = [n for n in feed_dict.keys()]\n\n G = {}\n nodes = [n for n in input_nodes]\n while len(nodes) > 0:\n n = nodes.pop(0)\n if n not in G:\n G[n] = {'in': set(), 'out': set()}\n for m in n.outbound_nodes:\n if m not in G:\n G[m] = {'in': set(), 'out': set()}\n G[n]['out'].add(m)\n G[m]['in'].add(n)\n nodes.append(m)\n\n L = []\n S = set(input_nodes)\n while len(S) > 0:\n n = S.pop()\n\n if isinstance(n, Input):\n n.value = feed_dict[n]\n\n L.append(n)\n for m in n.outbound_nodes:\n G[n]['out'].remove(m)\n G[m]['in'].remove(n)\n # if no other incoming edges add to S\n if len(G[m]['in']) == 0:\n S.add(m)\n return L\n\n\ndef forward_and_backward(graph):\n \"\"\"\n Performs a forward pass and a backward pass through a list of sorted Nodes.\n\n Arguments:\n\n `graph`: The result of calling `topological_sort`.\n \"\"\"\n # Forward pass\n for n in graph:\n n.forward()\n\n # Backward pass\n # see: https://docs.python.org/2.3/whatsnew/section-slices.html\n for n in graph[::-1]:\n n.backward()\n"
] | [
[
"numpy.zeros_like",
"numpy.sum",
"numpy.exp",
"numpy.dot",
"numpy.mean"
]
] |
mailhexu/pymatgen | [
"b80ca9f34c519757d337487c489fb655f7598cc2"
] | [
"pymatgen/electronic_structure/boltztrap.py"
] | [
"# coding: utf-8\n\nfrom __future__ import division, unicode_literals, print_function\n\nimport math\nimport os\nimport subprocess\nimport tempfile\nimport logging\n\nimport numpy as np\nfrom monty.dev import requires\nfrom monty.json import jsanitize\nfrom monty.os import cd\nfrom monty.os.path import which\nfrom scipy.constants import e, m_e\nfrom scipy.spatial import distance\n\nfrom pymatgen.core.lattice import Lattice\nfrom pymatgen.core.units import Energy, Length\nfrom pymatgen.electronic_structure.bandstructure import \\\n BandStructureSymmLine, Kpoint\nfrom pymatgen.electronic_structure.core import Orbital\nfrom pymatgen.electronic_structure.dos import Dos, Spin, CompleteDos\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen.symmetry.bandstructure import HighSymmKpath\n\n\"\"\"\nThis module provides classes to run and analyze boltztrap on pymatgen band\nstructure objects. Boltztrap is a software interpolating band structures and\ncomputing materials properties from this band structure using Boltzmann\nsemi-classical transport theory.\n\nBoltztrap has been developed by Georg Madsen.\n\nhttp://www.icams.de/content/research/software-development/boltztrap/\n\nYou need version 1.2.3 or higher\n\nReferences are::\n\n Madsen, G. K. H., and Singh, D. J. (2006).\n BoltzTraP. A code for calculating band-structure dependent quantities.\n Computer Physics Communications, 175, 67-71\n\"\"\"\n\n__author__ = \"Geoffroy Hautier, Zachary Gibbs, Francesco Ricci, Anubhav Jain\"\n__copyright__ = \"Copyright 2013, The Materials Project\"\n__version__ = \"1.1\"\n__maintainer__ = \"Geoffroy Hautier\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n__date__ = \"August 23, 2013\"\n\n\nclass BoltztrapRunner(object):\n \"\"\"\n This class is used to run Boltztrap on a band structure object.\n\n Args:\n bs:\n A band structure object\n nelec:\n the number of electrons\n dos_type:\n two options for the band structure integration: \"HISTO\"\n (histogram) or \"TETRA\" using the tetrahedon method. TETRA\n typically gives better results (especially for DOSes)\n but takes more time\n energy_grid:\n the energy steps used for the integration (eV)\n lpfac:\n the number of interpolation points in the real space. By\n default 10 gives 10 time more points in the real space than\n the number of kpoints given in reciprocal space\n run_type:\n type of boltztrap usage. by default\n - BOLTZ: (default) compute transport coefficients\n - BANDS: interpolate all bands contained in the energy range\n specified in energy_span_around_fermi variable, along specified\n k-points\n - DOS: compute total and partial dos (custom BoltzTraP code\n needed!)\n - FERMI: compute fermi surface or more correctly to\n get certain bands interpolated\n band_nb:\n indicates a band number. Used for Fermi Surface interpolation\n (run_type=\"FERMI\")\n spin:\n specific spin component (1: up, -1: down) of the band selected\n in FERMI mode (mandatory).\n cond_band:\n if a conduction band is specified in FERMI mode,\n set this variable as True\n tauref:\n reference relaxation time. Only set to a value different than\n zero if we want to model beyond the constant relaxation time.\n tauexp:\n exponent for the energy in the non-constant relaxation time\n approach\n tauen:\n reference energy for the non-constant relaxation time approach\n soc:\n results from spin-orbit coupling (soc) computations give\n typically non-polarized (no spin up or down) results but single\n electron occupations. If the band structure comes from a soc\n computation, you should set soc to True (default False)\n doping:\n the fixed doping levels you want to compute. Boltztrap provides\n both transport values depending on electron chemical potential\n (fermi energy) and for a series of fixed carrier\n concentrations. By default, this is set to 1e16 to 1e22 in\n increments of factors of 10.\n energy_span_around_fermi:\n usually the interpolation is not needed on the entire energy\n range but on a specific range around the fermi level.\n This energy gives this range in eV. by default it is 1.5 eV.\n If DOS or BANDS type are selected, this range is automatically\n set to cover the entire energy range.\n scissor:\n scissor to apply to the band gap (eV). This applies a scissor\n operation moving the band edges without changing the band\n shape. This is useful to correct the often underestimated band\n gap in DFT. Default is 0.0 (no scissor)\n kpt_line:\n list of fractional coordinates of kpoints as arrays or list of\n Kpoint objects for BANDS mode calculation (standard path of\n high symmetry k-points is automatically set as default)\n tmax:\n Maximum temperature (K) for calculation (default=1300)\n tgrid:\n Temperature interval for calculation (default=50)\n symprec: 1e-3 is the default in pymatgen. If the kmesh has been\n generated using a different symprec, it has to be specified\n to avoid a \"factorization error\" in BoltzTraP calculation.\n\n \"\"\"\n\n @requires(which('x_trans'),\n \"BoltztrapRunner requires the executables 'x_trans' to be in \"\n \"the path. Please download the Boltztrap at http://\"\n \"www.icams.de/content/research/software-development/boltztrap/ \"\n \"and follow the instructions in the README to compile \"\n \"Bolztrap accordingly. Then add x_trans to your path\")\n def __init__(self, bs, nelec, dos_type=\"HISTO\", energy_grid=0.005,\n lpfac=10, run_type=\"BOLTZ\", band_nb=None, tauref=0, tauexp=0,\n tauen=0, soc=False, doping=None, energy_span_around_fermi=1.5,\n scissor=0.0, kpt_line=None, spin=None, cond_band=False,\n tmax=1300, tgrid=50, symprec=1e-3):\n self.lpfac = lpfac\n self._bs = bs\n self._nelec = nelec\n self.dos_type = dos_type\n self.energy_grid = energy_grid\n self.error = []\n self.run_type = run_type\n self.band_nb = band_nb\n self.spin = spin\n self.cond_band = cond_band\n self.tauref = tauref\n self.tauexp = tauexp\n self.tauen = tauen\n self.soc = soc\n self.kpt_line = kpt_line\n if doping:\n self.doping = doping\n else:\n self.doping = []\n for d in [1e16, 1e17, 1e18, 1e19, 1e20, 1e21]:\n self.doping.extend([1 * d, 2.5 * d, 5 * d, 7.5 * d])\n self.doping.append(1e22)\n self.energy_span_around_fermi = energy_span_around_fermi\n self.scissor = scissor\n self.tmax = tmax\n self.tgrid = tgrid\n self._symprec = symprec\n if self.run_type in (\"DOS\", \"BANDS\"):\n self._auto_set_energy_range()\n\n def _auto_set_energy_range(self):\n \"\"\"\n automatically determine the energy range as min/max eigenvalue\n minus/plus the buffer_in_ev\n \"\"\"\n emins = [min([e_k[0] for e_k in self._bs.bands[Spin.up]])]\n emaxs = [max([e_k[0] for e_k in self._bs.bands[Spin.up]])]\n\n if self._bs.is_spin_polarized:\n emins.append(min([e_k[0] for e_k in\n self._bs.bands[Spin.down]]))\n\n emaxs.append(max([e_k[0] for e_k in\n self._bs.bands[Spin.down]]))\n\n min_eigenval = Energy(min(emins) - self._bs.efermi, \"eV\"). \\\n to(\"Ry\")\n max_eigenval = Energy(max(emaxs) - self._bs.efermi, \"eV\"). \\\n to(\"Ry\")\n\n # set energy range to buffer around min/max EV\n # buffer does not increase CPU time but will help get equal\n # energies for spin up/down for band structure\n const = Energy(2, \"eV\").to(\"Ry\")\n self._ll = min_eigenval - const\n self._hl = max_eigenval + const\n\n en_range = Energy(max((abs(self._ll), abs(self._hl))),\n \"Ry\").to(\"eV\")\n\n self.energy_span_around_fermi = en_range * 1.01\n print(\"energy_span_around_fermi = \",\n self.energy_span_around_fermi)\n\n @property\n def bs(self):\n return self._bs\n\n @property\n def nelec(self):\n return self._nelec\n\n def write_energy(self, output_file):\n with open(output_file, 'w') as f:\n f.write(\"test\\n\")\n f.write(\"{}\\n\".format(len(self._bs.kpoints)))\n\n if self.run_type == \"FERMI\":\n sign = -1.0 if self.cond_band else 1.0\n for i in range(len(self._bs.kpoints)):\n eigs = []\n eigs.append(Energy(\n self._bs.bands[Spin(self.spin)][self.band_nb][i] -\n self._bs.efermi, \"eV\").to(\"Ry\"))\n f.write(\"%12.8f %12.8f %12.8f %d\\n\"\n % (self._bs.kpoints[i].frac_coords[0],\n self._bs.kpoints[i].frac_coords[1],\n self._bs.kpoints[i].frac_coords[2],\n len(eigs)))\n for j in range(len(eigs)):\n f.write(\"%18.8f\\n\" % (sign * float(eigs[j])))\n\n else:\n for i, kpt in enumerate(self._bs.kpoints):\n eigs = []\n if self.run_type == \"DOS\":\n spin_lst = [self.spin]\n else:\n spin_lst = self._bs.bands\n\n for spin in spin_lst:\n # use 90% of bottom bands since highest eigenvalues\n # are usually incorrect\n # ask Geoffroy Hautier for more details\n nb_bands = int(math.floor(self._bs.nb_bands * 0.9))\n for j in range(nb_bands):\n eigs.append(\n Energy(self._bs.bands[Spin(spin)][j][i] -\n self._bs.efermi, \"eV\").to(\"Ry\"))\n eigs.sort()\n\n if self.run_type == \"DOS\" and self._bs.is_spin_polarized:\n eigs.insert(0, self._ll)\n eigs.append(self._hl)\n\n f.write(\"%12.8f %12.8f %12.8f %d\\n\"\n % (kpt.frac_coords[0],\n kpt.frac_coords[1],\n kpt.frac_coords[2],\n len(eigs)))\n\n for j in range(len(eigs)):\n f.write(\"%18.8f\\n\" % (float(eigs[j])))\n\n def write_struct(self, output_file):\n sym = SpacegroupAnalyzer(self._bs.structure, symprec=self._symprec)\n\n with open(output_file, 'w') as f:\n f.write(\"{} {}\\n\".format(self._bs.structure.composition.formula,\n sym.get_space_group_symbol()))\n\n f.write(\"{}\\n\".format(\"\\n\".join(\n [\" \".join([\"%.5f\" % Length(i, \"ang\").to(\"bohr\") for i in row])\n for row in self._bs.structure.lattice.matrix])))\n\n ops = sym.get_symmetry_dataset()['rotations']\n f.write(\"{}\\n\".format(len(ops)))\n\n for c in ops:\n for row in c:\n f.write(\"{}\\n\".format(\" \".join(str(i) for i in row)))\n\n def write_def(self, output_file):\n # This function is useless in std version of BoltzTraP code\n # because x_trans script overwrite BoltzTraP.def\n with open(output_file, 'w') as f:\n so = \"\"\n if self._bs.is_spin_polarized or self.soc:\n so = \"so\"\n f.write(\"5, 'boltztrap.intrans', 'old', 'formatted',0\\n\" +\n \"6,'boltztrap.outputtrans', 'unknown', \"\n \"'formatted',0\\n\" +\n \"20,'boltztrap.struct', 'old', 'formatted',0\\n\"\n + \"10,'boltztrap.energy\" + so + \"', 'old', \"\n \"'formatted',0\\n\" +\n \"48,'boltztrap.engre', 'unknown', \"\n \"'unformatted',0\\n\" +\n \"49,'boltztrap.transdos', 'unknown', \"\n \"'formatted',0\\n\" +\n \"50,'boltztrap.sigxx', 'unknown', 'formatted',\"\n \"0\\n\" +\n \"51,'boltztrap.sigxxx', 'unknown', 'formatted',\"\n \"0\\n\" +\n \"21,'boltztrap.trace', 'unknown', \"\n \"'formatted',0\\n\" +\n \"22,'boltztrap.condtens', 'unknown', \"\n \"'formatted',0\\n\" +\n \"24,'boltztrap.halltens', 'unknown', \"\n \"'formatted',0\\n\" +\n \"30,'boltztrap_BZ.cube', 'unknown', \"\n \"'formatted',0\\n\")\n\n def write_proj(self, output_file_proj, output_file_def):\n # This function is useless in std version of BoltzTraP code\n # because x_trans script overwrite BoltzTraP.def\n for oi, o in enumerate(Orbital):\n for site_nb in range(0, len(self._bs.structure.sites)):\n if oi < len(self._bs.projections[Spin.up][0][0]):\n with open(output_file_proj + \"_\" + str(site_nb) + \"_\" + str(\n o),\n 'w') as f:\n f.write(self._bs.structure.composition.formula + \"\\n\")\n f.write(str(len(self._bs.kpoints)) + \"\\n\")\n for i in range(len(self._bs.kpoints)):\n tmp_proj = []\n for j in range(\n int(math.floor(self._bs.nb_bands * 0.9))):\n tmp_proj.append(\n self._bs.projections[Spin(self.spin)][j][\n i][oi][site_nb])\n # TODO deal with the sorting going on at\n # the energy level!!!\n # tmp_proj.sort()\n\n if self.run_type == \"DOS\" and \\\n self._bs.is_spin_polarized:\n tmp_proj.insert(0, self._ll)\n tmp_proj.append(self._hl)\n\n f.write(\"%12.8f %12.8f %12.8f %d\\n\"\n % (self._bs.kpoints[i].frac_coords[0],\n self._bs.kpoints[i].frac_coords[1],\n self._bs.kpoints[i].frac_coords[2],\n len(tmp_proj)))\n for j in range(len(tmp_proj)):\n f.write(\"%18.8f\\n\" % float(tmp_proj[j]))\n with open(output_file_def, 'w') as f:\n so = \"\"\n if self._bs.is_spin_polarized:\n so = \"so\"\n f.write(\"5, 'boltztrap.intrans', 'old', 'formatted',0\\n\" +\n \"6,'boltztrap.outputtrans', 'unknown', \"\n \"'formatted',0\\n\" +\n \"20,'boltztrap.struct', 'old', 'formatted',0\\n\"\n + \"10,'boltztrap.energy\" + so + \"', 'old', \"\n \"'formatted',0\\n\" +\n \"48,'boltztrap.engre', 'unknown', \"\n \"'unformatted',0\\n\" +\n \"49,'boltztrap.transdos', 'unknown', \"\n \"'formatted',0\\n\" +\n \"50,'boltztrap.sigxx', 'unknown', 'formatted',\"\n \"0\\n\" +\n \"51,'boltztrap.sigxxx', 'unknown', 'formatted',\"\n \"0\\n\" +\n \"21,'boltztrap.trace', 'unknown', \"\n \"'formatted',0\\n\" +\n \"22,'boltztrap.condtens', 'unknown', \"\n \"'formatted',0\\n\" +\n \"24,'boltztrap.halltens', 'unknown', \"\n \"'formatted',0\\n\" +\n \"30,'boltztrap_BZ.cube', 'unknown', \"\n \"'formatted',0\\n\")\n i = 1000\n for oi, o in enumerate(Orbital):\n for site_nb in range(0, len(self._bs.structure.sites)):\n if oi < len(self._bs.projections[Spin.up][0][0]):\n f.write(str(i) + \",\\'\" + \"boltztrap.proj_\" + str(\n site_nb) + \"_\" + str(o.name) +\n \"\\' \\'old\\', \\'formatted\\',0\\n\")\n i += 1\n\n def write_intrans(self, output_file):\n setgap = 1 if self.scissor > 0.0001 else 0\n\n if self.run_type == \"BOLTZ\" or self.run_type == \"DOS\":\n with open(output_file, 'w') as fout:\n fout.write(\"GENE # use generic interface\\n\")\n fout.write(\n \"1 0 %d %f # iskip (not presently used) idebug \"\n \"setgap shiftgap \\n\"\n % (setgap, Energy(self.scissor, \"eV\").to(\"Ry\")))\n fout.write(\n \"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy \"\n \"span around Fermilevel, number of electrons\\n\"\n % (Energy(self.energy_grid, \"eV\").to(\"Ry\"),\n Energy(self.energy_span_around_fermi, \"eV\").to(\"Ry\"),\n self._nelec))\n fout.write(\n \"CALC # CALC (calculate expansion \"\n \"coeff), NOCALC read from file\\n\")\n fout.write(\n \"%d # lpfac, number of latt-points \"\n \"per k-point\\n\" % self.lpfac)\n fout.write(\n \"%s # run mode (only BOLTZ is \"\n \"supported)\\n\" % self.run_type)\n fout.write(\n \".15 # (efcut) energy range of \"\n \"chemical potential\\n\")\n fout.write(\n \"{} {} # Tmax, temperature grid\\n\". \\\n format(self.tmax, self.tgrid))\n fout.write(\n \"-1. # energyrange of bands given DOS output sig_xxx and \"\n \"dos_xxx (xxx is band number)\\n\")\n fout.write(self.dos_type + \"\\n\") # e.g., HISTO or TETRA\n fout.write(\"{} {} {} 0 0 0\\n\".format(\n self.tauref, self.tauexp, self.tauen))\n fout.write(\"{}\\n\".format(2 * len(self.doping)))\n\n for d in self.doping:\n fout.write(str(d) + \"\\n\")\n for d in self.doping:\n fout.write(str(-d) + \"\\n\")\n\n elif self.run_type == \"FERMI\":\n with open(output_file, 'w') as fout:\n fout.write(\"GENE # use generic interface\\n\")\n fout.write(\n \"1 0 0 0.0 # iskip (not presently used) idebug \"\n \"setgap shiftgap \\n\")\n fout.write(\n \"0.0 %f 0.1 %6.1f # Fermilevel (Ry),energygrid,\"\n \"energy span around Fermilevel, \"\n \"number of electrons\\n\"\n % (Energy(self.energy_grid, \"eV\").to(\"Ry\"), self._nelec))\n fout.write(\n \"CALC # CALC (calculate expansion \"\n \"coeff), NOCALC read from file\\n\")\n fout.write(\n \"%d # lpfac, number of latt-points \"\n \"per k-point\\n\" % self.lpfac)\n fout.write(\n \"FERMI # run mode (only BOLTZ is \"\n \"supported)\\n\")\n fout.write(str(1) +\n \" # actual band selected: \" +\n str(self.band_nb + 1) + \" spin: \" + str(self.spin))\n\n elif self.run_type == \"BANDS\":\n if self.kpt_line is None:\n kpath = HighSymmKpath(self._bs.structure)\n self.kpt_line = [Kpoint(k, self._bs.structure.lattice) for k\n in\n kpath.get_kpoints(coords_are_cartesian=False)[\n 0]]\n self.kpt_line = [kp.frac_coords for kp in self.kpt_line]\n elif type(self.kpt_line[0]) == Kpoint:\n self.kpt_line = [kp.frac_coords for kp in self.kpt_line]\n\n with open(output_file, 'w') as fout:\n fout.write(\"GENE # use generic interface\\n\")\n fout.write(\n \"1 0 %d %f # iskip (not presently used) idebug \"\n \"setgap shiftgap \\n\"\n % (setgap, Energy(self.scissor, \"eV\").to(\"Ry\")))\n fout.write(\n \"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy \"\n \"span around Fermilevel, \"\n \"number of electrons\\n\"\n % (Energy(self.energy_grid, \"eV\").to(\"Ry\"),\n Energy(self.energy_span_around_fermi, \"eV\").to(\"Ry\"),\n self._nelec))\n fout.write(\n \"CALC # CALC (calculate expansion \"\n \"coeff), NOCALC read from file\\n\")\n fout.write(\n \"%d # lpfac, number of latt-points \"\n \"per k-point\\n\" % self.lpfac)\n fout.write(\n \"BANDS # run mode (only BOLTZ is \"\n \"supported)\\n\")\n fout.write(\"P \" + str(len(self.kpt_line)) + \"\\n\")\n for kp in self.kpt_line:\n fout.writelines([str(k) + \" \" for k in kp])\n fout.write('\\n')\n\n def write_input(self, output_dir):\n if self._bs.is_spin_polarized or self.soc:\n self.write_energy(os.path.join(output_dir, \"boltztrap.energyso\"))\n else:\n self.write_energy(os.path.join(output_dir, \"boltztrap.energy\"))\n\n self.write_struct(os.path.join(output_dir, \"boltztrap.struct\"))\n self.write_intrans(os.path.join(output_dir, \"boltztrap.intrans\"))\n self.write_def(os.path.join(output_dir, \"BoltzTraP.def\"))\n\n if len(self.bs.projections) != 0 and self.run_type == \"DOS\":\n self.write_proj(os.path.join(output_dir, \"boltztrap.proj\"),\n os.path.join(output_dir, \"BoltzTraP.def\"))\n\n def run(self, path_dir=None, convergence=True, write_input=True,\n clear_dir=False, max_lpfac=150, min_egrid=0.00005):\n \"\"\"\n Write inputs (optional), run BoltzTraP, and ensure\n convergence (optional)\n Args:\n path_dir (str): directory in which to run BoltzTraP\n convergence (bool): whether to check convergence and make\n corrections if needed\n write_input: (bool) whether to write input files before the run\n (required for convergence mode)\n clear_dir: (bool) whether to remove all files in the path_dir\n before starting\n max_lpfac: (float) maximum lpfac value to try before reducing egrid\n in convergence mode\n min_egrid: (float) minimum egrid value to try before giving up in\n convergence mode\n\n Returns:\n\n \"\"\"\n\n # TODO: consider making this a part of custodian rather than pymatgen\n # A lot of this functionality (scratch dirs, handlers, monitors)\n # is built into custodian framework\n\n if convergence and not write_input:\n raise ValueError(\"Convergence mode requires write_input to be \"\n \"true\")\n\n if self.run_type in (\"BANDS\", \"DOS\", \"FERMI\"):\n convergence = False\n if self.lpfac > max_lpfac:\n max_lpfac = self.lpfac\n\n if self.run_type == \"BANDS\" and self.bs.is_spin_polarized:\n print(\"Reminder: for run_type \" + str(\n self.run_type) + \", spin component are not separated! \"\n \"(you have a spin polarized band structure)\")\n\n if self.run_type in (\"FERMI\", \"DOS\") and self.spin is None:\n if self.bs.is_spin_polarized:\n raise BoltztrapError(\n \"Spin parameter must be specified for spin polarized \"\n \"band structures!\")\n else:\n self.spin = 1\n\n dir_bz_name = \"boltztrap\"\n if path_dir is None:\n temp_dir = tempfile.mkdtemp()\n path_dir = os.path.join(temp_dir, dir_bz_name)\n else:\n path_dir = os.path.abspath(\n os.path.join(path_dir, dir_bz_name))\n\n if not os.path.exists(path_dir):\n os.mkdir(path_dir)\n elif clear_dir:\n for c in os.listdir(path_dir):\n os.remove(os.path.join(path_dir, c))\n\n FORMAT = \"%(message)s\"\n logging.basicConfig(level=logging.INFO, format=FORMAT,\n filename=os.path.join(path_dir, \"../boltztrap.out\"))\n\n with cd(path_dir):\n lpfac_start = self.lpfac\n converged = False\n\n while self.energy_grid >= min_egrid and not converged:\n self.lpfac = lpfac_start\n\n logging.info(\"lpfac, energy_grid: {} {}\".format(self.lpfac, self.energy_grid))\n\n while self.lpfac <= max_lpfac and not converged:\n\n if write_input:\n self.write_input(path_dir)\n\n bt_exe = [\"x_trans\", \"BoltzTraP\"]\n if self._bs.is_spin_polarized or self.soc:\n bt_exe.append(\"-so\")\n\n p = subprocess.Popen(bt_exe, stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n p.wait()\n\n for c in p.communicate():\n logging.info(c.decode())\n if \"error in factorization\" in c.decode():\n raise BoltztrapError(\"error in factorization\")\n\n warning = \"\"\n\n with open(os.path.join(path_dir,\n dir_bz_name + \".outputtrans\")) as f:\n for l in f:\n if \"Option unknown\" in l:\n raise BoltztrapError(\n \"DOS mode needs a custom version of \"\n \"BoltzTraP code is needed\")\n if \"WARNING\" in l:\n warning = l\n break\n if \"Error - Fermi level was not found\" in l:\n warning = l\n break\n\n if not warning and convergence:\n # check convergence for warning\n analyzer = BoltztrapAnalyzer.from_files(path_dir)\n for doping in ['n', 'p']:\n for c in analyzer.mu_doping[doping]:\n if len(analyzer.mu_doping[doping][c]) != len(\n analyzer.doping[doping]):\n warning = \"length of mu_doping array is \" \\\n \"incorrect\"\n break\n\n if doping == 'p' and \\\n sorted(\n analyzer.mu_doping[doping][\n c], reverse=True) != \\\n analyzer.mu_doping[doping][c]:\n warning = \"sorting of mu_doping array \" \\\n \"incorrect for p-type\"\n break\n\n # ensure n-type doping sorted correctly\n if doping == 'n' and sorted(\n analyzer.mu_doping[doping][c]) != \\\n analyzer.mu_doping[doping][c]:\n warning = \"sorting of mu_doping array \" \\\n \"incorrect for n-type\"\n break\n\n if warning:\n self.lpfac += 10\n logging.warn(\"Warning detected: {}! Increase lpfac to \"\n \"{}\".format(warning, self.lpfac))\n\n else:\n converged = True\n\n if not converged:\n self.energy_grid /= 10\n logging.info(\"Could not converge with max lpfac; \"\n \"Decrease egrid to {}\".format(self.energy_grid))\n\n if not converged:\n raise BoltztrapError(\n \"Doping convergence not reached with lpfac=\" + str(\n self.lpfac) + \", energy_grid=\" + str(self.energy_grid))\n\n return path_dir\n\n\nclass BoltztrapError(Exception):\n \"\"\"\n Exception class for boltztrap.\n Raised when the boltztrap gives an error\n \"\"\"\n\n def __init__(self, msg):\n self.msg = msg\n logging.error(self.msg)\n\n def __str__(self):\n return \"BoltztrapError : \" + self.msg\n\n\nclass BoltztrapAnalyzer(object):\n \"\"\"\n Class used to store all the data from a boltztrap run\n \"\"\"\n\n def __init__(self, gap=None, mu_steps=None, cond=None, seebeck=None,\n kappa=None, hall=None, doping=None,\n mu_doping=None, seebeck_doping=None, cond_doping=None,\n kappa_doping=None,\n hall_doping=None, intrans=None, dos=None, dos_partial=None,\n carrier_conc=None, vol=None, warning=None,\n bz_bands=None, bz_kpoints=None, fermi_surface_data=None):\n \"\"\"\n Constructor taking directly all the data generated by Boltztrap. You\n won't probably use it directly but instead use the from_files and\n from_dict methods.\n\n Args:\n gap: The gap after interpolation in eV\n mu_steps: The steps of electron chemical potential (or Fermi\n level) in eV.\n cond: The electronic conductivity tensor divided by a constant\n relaxation time (sigma/tau) at different temperature and\n fermi levels.\n The format is {temperature: [array of 3x3 tensors at each\n fermi level in mu_steps]}. The units are 1/(Ohm*m*s).\n seebeck: The Seebeck tensor at different temperatures and fermi\n levels. The format is {temperature: [array of 3x3 tensors at\n each fermi level in mu_steps]}. The units are V/K\n kappa: The electronic thermal conductivity tensor divided by a\n constant relaxation time (kappa/tau) at different temperature\n and fermi levels. The format is {temperature: [array of 3x3\n tensors at each fermi level in mu_steps]}\n The units are W/(m*K*s)\n hall: The hall tensor at different temperature and fermi levels\n The format is {temperature: [array of 27 coefficients list at\n each fermi level in mu_steps]}\n The units are m^3/C\n doping: The different doping levels that have been given to\n Boltztrap. The format is {'p':[],'n':[]} with an array of\n doping levels. The units are cm^-3\n mu_doping: Gives the electron chemical potential (or Fermi level)\n for a given set of doping.\n Format is {'p':{temperature: [fermi levels],'n':{temperature:\n [fermi levels]}}\n the fermi level array is ordered according to the doping\n levels in doping units for doping are in cm^-3 and for Fermi\n level in eV\n seebeck_doping: The Seebeck tensor at different temperatures and\n doping levels. The format is {'p': {temperature: [Seebeck\n tensors]}, 'n':{temperature: [Seebeck tensors]}}\n The [Seebeck tensors] array is ordered according to the\n doping levels in doping units for doping are in cm^-3 and for\n Seebeck in V/K\n cond_doping: The electronic conductivity tensor divided by a\n constant relaxation time (sigma/tau) at different\n temperatures and doping levels\n The format is {'p':{temperature: [conductivity tensors]},\n 'n':{temperature: [conductivity tensors]}}\n The [conductivity tensors] array is ordered according to the\n doping levels in doping units for doping are in cm^-3 and for\n conductivity in 1/(Ohm*m*s)\n kappa_doping: The thermal conductivity tensor divided by a constant\n relaxation time (kappa/tau) at different temperatures and\n doping levels.\n The format is {'p':{temperature: [thermal conductivity\n tensors]},'n':{temperature: [thermal conductivity tensors]}}\n The [thermal conductivity tensors] array is ordered according\n to the doping levels in doping units for doping are in cm^-3\n and for thermal conductivity in W/(m*K*s)\n hall_doping: The Hall tensor at different temperatures and doping\n levels.\n The format is {'p':{temperature: [Hall tensors]},\n 'n':{temperature: [Hall tensors]}}\n The [Hall tensors] array is ordered according to the doping\n levels in doping and each Hall tensor is represented by a 27\n coefficients list.\n The units are m^3/C\n intrans: a dictionary of inputs e.g. {\"scissor\": 0.0}\n carrier_conc: The concentration of carriers in electron (or hole)\n per unit cell\n dos: The dos computed by Boltztrap given as a pymatgen Dos object\n dos_partial: Data for the partial DOS projected on sites and\n orbitals\n vol: Volume of the unit cell in angstrom cube (A^3)\n warning: string if BoltzTraP outputted a warning, else None\n bz_bands: Data for interpolated bands on a k-point line\n (run_type=BANDS)\n bz_kpoints: k-point in reciprocal coordinates for interpolated\n bands (run_type=BANDS)\n fermi_surface_data: energy values in a 3D grid imported from the\n output .cube file.\n \"\"\"\n self.gap = gap\n self.mu_steps = mu_steps\n self._cond = cond\n self._seebeck = seebeck\n self._kappa = kappa\n self._hall = hall\n self.warning = warning\n self.doping = doping\n self.mu_doping = mu_doping\n self._seebeck_doping = seebeck_doping\n self._cond_doping = cond_doping\n self._kappa_doping = kappa_doping\n self._hall_doping = hall_doping\n self.intrans = intrans\n self._carrier_conc = carrier_conc\n self.dos = dos\n self.vol = vol\n self._dos_partial = dos_partial\n self._bz_bands = bz_bands\n self._bz_kpoints = bz_kpoints\n self.fermi_surface_data = fermi_surface_data\n\n def get_symm_bands(self, structure, efermi, kpt_line=None,\n labels_dict=None):\n \"\"\"\n Function useful to read bands from Boltztrap output and get a\n BandStructureSymmLine object comparable with that one from a DFT\n calculation (if the same kpt_line is provided). Default kpt_line\n and labels_dict is the standard path of high symmetry k-point for\n the specified structure. They could be extracted from the\n BandStructureSymmLine object that you want to compare with. efermi\n variable must be specified to create the BandStructureSymmLine\n object (usually it comes from DFT or Boltztrap calc)\n \"\"\"\n try:\n if kpt_line is None:\n kpath = HighSymmKpath(structure)\n kpt_line = [Kpoint(k, structure.lattice.reciprocal_lattice) for\n k in\n kpath.get_kpoints(coords_are_cartesian=False)[0]]\n labels_dict = {l: k for k, l in zip(\n *kpath.get_kpoints(coords_are_cartesian=False)) if l}\n kpt_line = [kp.frac_coords for kp in kpt_line]\n elif type(kpt_line[0]) == Kpoint:\n kpt_line = [kp.frac_coords for kp in kpt_line]\n labels_dict = {k: labels_dict[k].frac_coords for k in\n labels_dict}\n\n idx_list = []\n # kpt_dense=np.array([kp for kp in self._bz_kpoints])\n for i, kp in enumerate(kpt_line):\n w = []\n prec = 1e-05\n while len(w) == 0:\n w = np.where(np.all(\n np.abs(kp - self._bz_kpoints) < [prec] * 3,\n axis=1))[0]\n prec *= 10\n\n # print( prec )\n idx_list.append([i, w[0]])\n\n # if len(w)>0:\n # idx_list.append([i,w[0]])\n # else:\n # w=np.where(np.all(np.abs(kp.frac_coords-self._bz_kpoints)\n # <[1e-04,1e-04,1e-04],axis=1))[0]\n # idx_list.append([i,w[0]])\n\n idx_list = np.array(idx_list)\n # print( idx_list.shape )\n\n bands_dict = {Spin.up: (self._bz_bands * Energy(1, \"Ry\").to(\n \"eV\") + efermi).T[:, idx_list[:, 1]].tolist()}\n # bz_kpoints = bz_kpoints[idx_list[:,1]].tolist()\n\n sbs = BandStructureSymmLine(kpt_line, bands_dict,\n structure.lattice.reciprocal_lattice,\n efermi,\n labels_dict=labels_dict)\n\n return sbs\n\n except:\n raise BoltztrapError(\n \"Bands are not in output of BoltzTraP.\\nBolztrapRunner must \"\n \"be run with run_type=BANDS\")\n\n @staticmethod\n def check_acc_bzt_bands(sbs_bz, sbs_ref, warn_thr=(0.03, 0.03)):\n \"\"\"\n Compare sbs_bz BandStructureSymmLine calculated with boltztrap with\n the sbs_ref BandStructureSymmLine as reference (from MP for\n instance), computing correlation and energy difference for eight bands\n around the gap (semiconductors) or fermi level (metals).\n warn_thr is a threshold to get a warning in the accuracy of Boltztap\n interpolated bands.\n Return a dictionary with these keys:\n - \"N\": the index of the band compared; inside each there are:\n - \"Corr\": correlation coefficient for the 8 compared bands\n - \"Dist\": energy distance for the 8 compared bands\n - \"branch_name\": energy distance for that branch\n - \"avg_corr\": average of correlation coefficient over the 8 bands\n - \"avg_dist\": average of energy distance over the 8 bands\n - \"nb_list\": list of indexes of the 8 compared bands\n - \"acc_thr\": list of two float corresponing to the two warning\n thresholds in input\n - \"acc_err\": list of two bools:\n True if the avg_corr > warn_thr[0], and\n True if the avg_dist > warn_thr[1]\n See also compare_sym_bands function doc\n \"\"\"\n if not sbs_ref.is_metal() and not sbs_bz.is_metal():\n vbm_idx = sbs_bz.get_vbm()['band_index'][Spin.up][-1]\n cbm_idx = sbs_bz.get_cbm()['band_index'][Spin.up][0]\n nb_list = range(vbm_idx - 3, cbm_idx + 4)\n\n else:\n bnd_around_efermi = []\n delta = 0\n spin = sbs_bz.bands.keys()[0]\n while len(bnd_around_efermi) < 8 and delta < 100:\n delta += 0.1\n bnd_around_efermi = []\n for nb in range(len(sbs_bz.bands[spin])):\n for kp in range(len(sbs_bz.bands[spin][nb])):\n if abs(sbs_bz.bands[spin][nb][\n kp] - sbs_bz.efermi) < delta:\n bnd_around_efermi.append(nb)\n break\n if len(bnd_around_efermi) < 8:\n print(\"Warning! check performed on \" + str(\n len(bnd_around_efermi)))\n nb_list = bnd_around_efermi\n else:\n nb_list = bnd_around_efermi[:8]\n\n # print(nb_list)\n bcheck = compare_sym_bands(sbs_bz, sbs_ref, nb_list)\n # print(bcheck)\n acc_err = [False, False]\n avg_corr = sum([item[1]['Corr'] for item in bcheck.iteritems()]) / 8\n avg_distance = sum([item[1]['Dist'] for item in bcheck.iteritems()]) / 8\n\n if avg_corr > warn_thr[0]: acc_err[0] = True\n if avg_distance > warn_thr[0]: acc_err[1] = True\n\n bcheck['avg_corr'] = avg_corr\n bcheck['avg_distance'] = avg_distance\n bcheck['acc_err'] = acc_err\n bcheck['acc_thr'] = warn_thr\n bcheck['nb_list'] = nb_list\n\n if True in acc_err:\n print(\"Warning! some bands around gap are not accurate\")\n\n return bcheck\n\n def get_seebeck(self, output='eigs', doping_levels=True):\n \"\"\"\n Gives the seebeck coefficient (microV/K) in either a\n full 3x3 tensor form, as 3 eigenvalues, or as the average value\n (trace/3.0) If doping_levels=True, the results are given at\n different p and n doping\n levels (given by self.doping), otherwise it is given as a series\n of electron chemical potential values\n\n Args:\n output (string): the type of output. 'tensor' give the full\n 3x3 tensor, 'eigs' its 3 eigenvalues and\n 'average' the average of the three eigenvalues\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n\n Returns:\n If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.\n The 'p' links to Seebeck at p-type doping\n and 'n' to the Seebeck at n-type doping. Otherwise, returns a\n {temp:[]} dictionary\n The result contains either the sorted three eigenvalues of\n the symmetric\n Seebeck tensor (output='eigs') or a full tensor (3x3 array) (\n output='tensor') or as an average\n (output='average').\n\n units are microV/K\n \"\"\"\n return BoltztrapAnalyzer._format_to_output(self._seebeck,\n self._seebeck_doping,\n output,\n doping_levels, 1e6)\n\n def get_conductivity(self, output='eigs', doping_levels=True,\n relaxation_time=1e-14):\n \"\"\"\n Gives the conductivity (1/Ohm*m) in either a full 3x3 tensor\n form, as 3 eigenvalues, or as the average value\n (trace/3.0) If doping_levels=True, the results are given at\n different p and n doping\n levels (given by self.doping), otherwise it is given as a series\n of electron chemical potential values\n\n Args:\n output (string): the type of output. 'tensor' give the full\n 3x3 tensor, 'eigs' its 3 eigenvalues and\n 'average' the average of the three eigenvalues\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n relaxation_time (float): constant relaxation time in secs\n\n Returns:\n If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.\n The 'p' links to conductivity\n at p-type doping and 'n' to the conductivity at n-type\n doping. Otherwise,\n returns a {temp:[]} dictionary. The result contains either\n the sorted three eigenvalues of the symmetric\n conductivity tensor (format='eigs') or a full tensor (3x3\n array) (output='tensor') or as an average\n (output='average').\n The result includes a given constant relaxation time\n\n units are 1/Ohm*m\n \"\"\"\n return BoltztrapAnalyzer._format_to_output(self._cond,\n self._cond_doping, output,\n doping_levels,\n relaxation_time)\n\n def get_power_factor(self, output='eigs', doping_levels=True,\n relaxation_time=1e-14):\n \"\"\"\n Gives the power factor (Seebeck^2 * conductivity) in units\n microW/(m*K^2) in either a full 3x3 tensor form,\n as 3 eigenvalues, or as the average value (trace/3.0) If\n doping_levels=True, the results are given at\n different p and n doping levels (given by self.doping), otherwise it\n is given as a series of\n electron chemical potential values\n\n Args:\n output (string): the type of output. 'tensor' give the full 3x3\n tensor, 'eigs' its 3 eigenvalues and\n 'average' the average of the three eigenvalues\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n relaxation_time (float): constant relaxation time in secs\n\n Returns:\n If doping_levels=True, a dictionnary {temp:{'p':[],'n':[]}}. The\n 'p' links to power factor\n at p-type doping and 'n' to the conductivity at n-type doping.\n Otherwise,\n returns a {temp:[]} dictionary. The result contains either the\n sorted three eigenvalues of the symmetric\n power factor tensor (format='eigs') or a full tensor (3x3 array) (\n output='tensor') or as an average\n (output='average').\n The result includes a given constant relaxation time\n\n units are microW/(m K^2)\n \"\"\"\n result = None\n result_doping = None\n if doping_levels:\n result_doping = {doping: {t: [] for t in\n self._seebeck_doping[doping]} for\n doping in self._seebeck_doping}\n\n for doping in result_doping:\n for t in result_doping[doping]:\n for i in range(len(self.doping[doping])):\n full_tensor = np.dot(self._cond_doping[doping][t][i],\n np.dot(\n self._seebeck_doping[doping][\n t][i],\n self._seebeck_doping[doping][\n t][i]))\n result_doping[doping][t].append(full_tensor)\n\n else:\n result = {t: [] for t in self._seebeck}\n for t in result:\n for i in range(len(self.mu_steps)):\n full_tensor = np.dot(self._cond[t][i],\n np.dot(self._seebeck[t][i],\n self._seebeck[t][i]))\n result[t].append(full_tensor)\n\n return BoltztrapAnalyzer._format_to_output(result, result_doping,\n output, doping_levels,\n multi=1e6 * relaxation_time)\n\n def get_thermal_conductivity(self, output='eigs', doping_levels=True,\n k_el=True, relaxation_time=1e-14):\n \"\"\"\n Gives the electronic part of the thermal conductivity in either a\n full 3x3 tensor form,\n as 3 eigenvalues, or as the average value (trace/3.0) If\n doping_levels=True, the results are given at\n different p and n doping levels (given by self.doping), otherwise it\n is given as a series of\n electron chemical potential values\n\n Args:\n output (string): the type of output. 'tensor' give the full 3x3\n tensor, 'eigs' its 3 eigenvalues and\n 'average' the average of the three eigenvalues\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n k_el (boolean): True for k_0-PF*T, False for k_0\n relaxation_time (float): constant relaxation time in secs\n\n Returns:\n If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The\n 'p' links to thermal conductivity\n at p-type doping and 'n' to the thermal conductivity at n-type\n doping. Otherwise,\n returns a {temp:[]} dictionary. The result contains either the\n sorted three eigenvalues of the symmetric\n conductivity tensor (format='eigs') or a full tensor (3x3 array) (\n output='tensor') or as an average\n (output='average').\n The result includes a given constant relaxation time\n\n units are W/mK\n \"\"\"\n result = None\n result_doping = None\n if doping_levels:\n result_doping = {doping: {t: [] for t in\n self._seebeck_doping[doping]} for\n doping in self._seebeck_doping}\n for doping in result_doping:\n for t in result_doping[doping]:\n for i in range(len(self.doping[doping])):\n if k_el:\n pf_tensor = np.dot(self._cond_doping[doping][t][i],\n np.dot(\n self._seebeck_doping[doping][\n t][i],\n self._seebeck_doping[doping][\n t][i]))\n result_doping[doping][t].append((\n self._kappa_doping[doping][t][\n i] - pf_tensor * t))\n else:\n result_doping[doping][t].append((\n self._kappa_doping[doping][t][i]))\n else:\n result = {t: [] for t in self._seebeck}\n for t in result:\n for i in range(len(self.mu_steps)):\n if k_el:\n pf_tensor = np.dot(self._cond[t][i],\n np.dot(self._seebeck[t][i],\n self._seebeck[t][i]))\n result[t].append((self._kappa[t][i] - pf_tensor * t))\n else:\n result[t].append((self._kappa[t][i]))\n\n return BoltztrapAnalyzer._format_to_output(result, result_doping,\n output, doping_levels,\n multi=relaxation_time)\n\n def get_zt(self, output='eigs', doping_levels=True, relaxation_time=1e-14,\n kl=1.0):\n \"\"\"\n Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full\n 3x3 tensor form,\n as 3 eigenvalues, or as the average value (trace/3.0) If\n doping_levels=True, the results are given at\n different p and n doping levels (given by self.doping), otherwise it\n is given as a series of\n electron chemical potential values. We assume a constant relaxation\n time and a constant\n lattice thermal conductivity\n\n Args:\n output (string): the type of output. 'tensor' give the full 3x3\n tensor, 'eigs' its 3 eigenvalues and\n 'average' the average of the three eigenvalues\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n relaxation_time (float): constant relaxation time in secs\n k_l (float): lattice thermal cond in W/(m*K)\n\n Returns:\n If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The\n 'p' links to ZT\n at p-type doping and 'n' to the ZT at n-type doping. Otherwise,\n returns a {temp:[]} dictionary. The result contains either the\n sorted three eigenvalues of the symmetric\n ZT tensor (format='eigs') or a full tensor (3x3 array) (\n output='tensor') or as an average\n (output='average').\n The result includes a given constant relaxation time and lattice\n thermal conductivity\n \"\"\"\n result = None\n result_doping = None\n if doping_levels:\n result_doping = {doping: {t: [] for t in\n self._seebeck_doping[doping]} for\n doping in self._seebeck_doping}\n\n for doping in result_doping:\n for t in result_doping[doping]:\n for i in range(len(self.doping[doping])):\n pf_tensor = np.dot(self._cond_doping[doping][t][i],\n np.dot(\n self._seebeck_doping[doping][t][\n i],\n self._seebeck_doping[doping][t][\n i]))\n thermal_conduct = (self._kappa_doping[doping][t][i]\n - pf_tensor * t) * relaxation_time\n result_doping[doping][t].append(\n np.dot(pf_tensor * relaxation_time * t,\n np.linalg.inv(\n thermal_conduct + kl * np.eye(3, 3))))\n else:\n result = {t: [] for t in self._seebeck}\n for t in result:\n for i in range(len(self.mu_steps)):\n pf_tensor = np.dot(self._cond[t][i],\n np.dot(self._seebeck[t][i],\n self._seebeck[t][i]))\n thermal_conduct = (self._kappa[t][i]\n - pf_tensor * t) * relaxation_time\n result[t].append(np.dot(pf_tensor * relaxation_time * t,\n np.linalg.inv(\n thermal_conduct + kl *\n np.eye(3, 3))))\n\n return BoltztrapAnalyzer._format_to_output(result, result_doping,\n output, doping_levels)\n\n def get_average_eff_mass(self, output='eigs', doping_levels=True):\n \"\"\"\n Gives the average effective mass tensor. We call it average because\n it takes into account all the bands\n and regions in the Brillouin zone. This is different than the standard\n textbook effective mass which relates\n often to only one (parabolic) band.\n The average effective mass tensor is defined as the integrated\n average of the second derivative of E(k)\n This effective mass tensor takes into account:\n -non-parabolicity\n -multiple extrema\n -multiple bands\n\n For more information about it. See:\n\n Hautier, G., Miglio, A., Waroquiers, D., Rignanese, G., & Gonze,\n X. (2014).\n How Does Chemistry Influence Electron Effective Mass in Oxides?\n A High-Throughput Computational Analysis. Chemistry of Materials,\n 26(19), 5447–5458. doi:10.1021/cm404079a\n\n or\n\n Hautier, G., Miglio, A., Ceder, G., Rignanese, G.-M., & Gonze,\n X. (2013).\n Identification and design principles of low hole effective mass\n p-type transparent conducting oxides.\n Nature Communications, 4, 2292. doi:10.1038/ncomms3292\n\n Depending on the value of output, we have either the full 3x3\n effective mass tensor,\n its 3 eigenvalues or an average\n\n Args:\n output (string): 'eigs' for eigenvalues, 'tensor' for the full\n tensor and 'average' for an average (trace/3)\n doping_levels (boolean): True for the results to be given at\n different doping levels, False for results\n at different electron chemical potentials\n Returns:\n If doping_levels=True,a dictionary {'p':{temp:[]},'n':{temp:[]}}\n with an array of effective mass tensor, eigenvalues of average\n value (depending on output) for each temperature and for each\n doping level.\n The 'p' links to hole effective mass tensor and 'n' to electron\n effective mass tensor.\n \"\"\"\n result = None\n result_doping = None\n conc = self.get_carrier_concentration()\n if doping_levels:\n result_doping = {doping: {t: [] for t in self._cond_doping[doping]}\n for\n doping in self.doping}\n for doping in result_doping:\n for temp in result_doping[doping]:\n for i in range(len(self.doping[doping])):\n result_doping[doping][temp].append(np.linalg.inv(\n np.array(self._cond_doping[doping][temp][i])) * \\\n self.doping[doping][\n i] * 10 ** 6 * e ** 2 / m_e)\n else:\n result = {t: [] for t in self._seebeck}\n for temp in result:\n for i in range(len(self.mu_steps)):\n try:\n cond_inv = np.linalg.inv(np.array(self._cond[temp][i]))\n except np.linalg.LinAlgError:\n pass\n result[temp].append(cond_inv * \\\n conc[temp][i] * 10 ** 6 * e ** 2 / m_e)\n\n return BoltztrapAnalyzer._format_to_output(result, result_doping,\n output, doping_levels)\n\n def get_extreme(self, target_prop, maximize=True, min_temp=None,\n max_temp=None, min_doping=None, max_doping=None,\n isotropy_tolerance=0.05, use_average=True):\n\n \"\"\"\n This method takes in eigenvalues over a range of carriers,\n temperatures, and doping levels, and tells you what is the \"best\"\n value that can be achieved for the given target_property. Note that\n this method searches the doping dict only, not the full mu dict.\n\n Args:\n target_prop: target property, i.e. \"seebeck\", \"power factor\",\n \"conductivity\", \"kappa\", or \"zt\"\n maximize: True to maximize, False to minimize (e.g. kappa)\n min_temp: minimum temperature allowed\n max_temp: maximum temperature allowed\n min_doping: minimum doping allowed (e.g., 1E18)\n max_doping: maximum doping allowed (e.g., 1E20)\n isotropy_tolerance: tolerance for isotropic (0.05 = 5%)\n use_average: True for avg of eigenval, False for max eigenval\n\n Returns:\n A dictionary with keys {\"p\", \"n\", \"best\"} with sub-keys:\n {\"value\", \"temperature\", \"doping\", \"isotropic\"}\n\n \"\"\"\n\n def is_isotropic(x, isotropy_tolerance):\n \"\"\"\n Internal method to tell you if 3-vector \"x\" is isotropic\n\n Args:\n x: the vector to determine isotropy for\n isotropy_tolerance: tolerance, e.g. 0.05 is 5%\n \"\"\"\n if len(x) != 3:\n raise ValueError(\"Invalid input to is_isotropic!\")\n\n st = sorted(x)\n return bool(all([st[0], st[1], st[2]]) and \\\n (abs((st[1] - st[0]) / st[1]) <= isotropy_tolerance) and \\\n (abs((st[2] - st[0])) / st[2] <= isotropy_tolerance) and \\\n (abs((st[2] - st[1]) / st[2]) <= isotropy_tolerance))\n\n if target_prop.lower() == \"seebeck\":\n d = self.get_seebeck(output=\"eigs\", doping_levels=True)\n\n elif target_prop.lower() == \"power factor\":\n d = self.get_power_factor(output=\"eigs\", doping_levels=True)\n\n elif target_prop.lower() == \"conductivity\":\n d = self.get_conductivity(output=\"eigs\", doping_levels=True)\n\n elif target_prop.lower() == \"kappa\":\n d = self.get_thermal_conductivity(output=\"eigs\",\n doping_levels=True)\n elif target_prop.lower() == \"zt\":\n d = self.get_zt(output=\"eigs\", doping_levels=True)\n\n else:\n raise ValueError(\"Target property: {} not recognized!\".\n format(target_prop))\n\n absval = True # take the absolute value of properties\n\n x_val = None\n x_temp = None\n x_doping = None\n x_isotropic = None\n output = {}\n\n min_temp = min_temp or 0\n max_temp = max_temp or float('inf')\n min_doping = min_doping or 0\n max_doping = max_doping or float('inf')\n\n for pn in ('p', 'n'):\n for t in d[pn]: # temperatures\n if min_temp <= float(t) <= max_temp:\n for didx, evs in enumerate(d[pn][t]):\n doping_lvl = self.doping[pn][didx]\n if min_doping <= doping_lvl <= max_doping:\n isotropic = is_isotropic(evs, isotropy_tolerance)\n if absval:\n evs = [abs(x) for x in evs]\n if use_average:\n val = float(sum(evs)) / len(evs)\n else:\n val = max(evs)\n if x_val is None or (val > x_val and maximize) \\\n or (val < x_val and not maximize):\n x_val = val\n x_temp = t\n x_doping = doping_lvl\n x_isotropic = isotropic\n\n output[pn] = {'value': x_val, 'temperature': x_temp,\n 'doping': x_doping, 'isotropic': x_isotropic}\n x_val = None\n\n if maximize:\n max_type = 'p' if output['p']['value'] >= \\\n output['n']['value'] else 'n'\n else:\n max_type = 'p' if output['p']['value'] <= \\\n output['n']['value'] else 'n'\n\n output['best'] = output[max_type]\n output['best']['carrier_type'] = max_type\n\n return output\n\n @staticmethod\n def _format_to_output(tensor, tensor_doping, output, doping_levels,\n multi=1.0):\n if doping_levels:\n full_tensor = tensor_doping\n result = {doping: {t: [] for t in tensor_doping[doping]} for doping\n in tensor_doping}\n for doping in full_tensor:\n for temp in full_tensor[doping]:\n for i in range(len(full_tensor[doping][temp])):\n if output in ['eig', 'eigs']:\n result[doping][temp].append(sorted(\n np.linalg.eigh(full_tensor[doping][temp][i])[\n 0] * multi))\n elif output == 'tensor':\n result[doping][temp].append(\n np.array(full_tensor[doping][temp][i]) * multi)\n elif output == 'average':\n result[doping][temp].append(\n (full_tensor[doping][temp][i][0][0] \\\n + full_tensor[doping][temp][i][1][1] \\\n + full_tensor[doping][temp][i][2][\n 2]) * multi / 3.0)\n else:\n raise ValueError(\"Unknown output format: \"\n \"{}\".format(output))\n else:\n full_tensor = tensor\n result = {t: [] for t in tensor}\n for temp in full_tensor:\n for i in range(len(tensor[temp])):\n if output in ['eig', 'eigs']:\n result[temp].append(sorted(\n np.linalg.eigh(full_tensor[temp][i])[0] * multi))\n elif output == 'tensor':\n result[temp].append(\n np.array(full_tensor[temp][i]) * multi)\n elif output == 'average':\n result[temp].append((full_tensor[temp][i][0][0]\n + full_tensor[temp][i][1][1]\n + full_tensor[temp][i][2][\n 2]) * multi / 3.0)\n else:\n raise ValueError(\"Unknown output format: {}\".\n format(output))\n return result\n\n def get_complete_dos(self, structure, analyzer_for_second_spin=None):\n \"\"\"\n Gives a CompleteDos object with the DOS from the interpolated\n projected band structure\n Args:\n the structure (necessary to identify sites for projection)\n analyzer_for_second_spin must be specified to have a\n CompleteDos with both Spin components\n Returns:\n a CompleteDos object\n Example of use in case of spin polarized case:\n\n BoltztrapRunner(bs=bs,nelec=10,run_type=\"DOS\",spin=1).run(path_dir='dos_up/')\n an_up=BoltztrapAnalyzer.from_files(\"dos_up/boltztrap/\",dos_spin=1)\n\n BoltztrapRunner(bs=bs,nelec=10,run_type=\"DOS\",spin=-1).run(path_dir='dos_dw/')\n an_dw=BoltztrapAnalyzer.from_files(\"dos_dw/boltztrap/\",dos_spin=-1)\n\n cdos=an_up.get_complete_dos(bs.structure,an_dw)\n\n \"\"\"\n pdoss = {}\n spin_1 = list(self.dos.densities.keys())[0]\n\n if analyzer_for_second_spin:\n if not np.all(self.dos.energies ==\n analyzer_for_second_spin.dos.energies):\n raise BoltztrapError(\n \"Dos merging error: energies of the two dos are different\")\n\n spin_2 = list(analyzer_for_second_spin.dos.densities.keys())[0]\n if spin_1 == spin_2:\n raise BoltztrapError(\n \"Dos merging error: spin component are the same\")\n\n for s in self._dos_partial:\n if structure.sites[int(s)] not in pdoss:\n pdoss[structure.sites[int(s)]] = {}\n for o in self._dos_partial[s]:\n if Orbital[o] not in pdoss[structure.sites[int(s)]]:\n pdoss[structure.sites[int(s)]][Orbital[o]] = {}\n pdoss[structure.sites[int(s)]][Orbital[o]][\n spin_1] = self._dos_partial[s][o]\n if analyzer_for_second_spin:\n pdoss[structure.sites[int(s)]][Orbital[o]][\n spin_2] = analyzer_for_second_spin._dos_partial[s][o]\n if analyzer_for_second_spin:\n tdos = Dos(self.dos.efermi, self.dos.energies,\n {spin_1: self.dos.densities[spin_1],\n spin_2: analyzer_for_second_spin.dos.densities[\n spin_2]})\n else:\n tdos = self.dos\n\n return CompleteDos(structure, total_dos=tdos, pdoss=pdoss)\n\n def get_mu_bounds(self, temp=300):\n return min(self.mu_doping['p'][temp]), max(self.mu_doping['n'][temp])\n\n def get_carrier_concentration(self):\n \"\"\"\n gives the carrier concentration (in cm^-3)\n\n Returns\n a dictionary {temp:[]} with an array of carrier concentration\n (in cm^-3) at each temperature\n The array relates to each step of electron chemical potential\n \"\"\"\n\n return {temp: [1e24 * i / self.vol for i in self._carrier_conc[temp]]\n for temp in self._carrier_conc}\n\n def get_hall_carrier_concentration(self):\n \"\"\"\n gives the Hall carrier concentration (in cm^-3). This is the trace of\n the Hall tensor (see Boltztrap source code) Hall carrier concentration\n are not always exactly the same than carrier concentration.\n\n Returns\n a dictionary {temp:[]} with an array of Hall carrier concentration\n (in cm^-3) at each temperature The array relates to each step of\n electron chemical potential\n \"\"\"\n result = {temp: [] for temp in self._hall}\n for temp in self._hall:\n for i in self._hall[temp]:\n trace = (i[1][2][0] + i[2][0][1] + i[0][1][2]) / 3.0\n if trace != 0.0:\n result[temp].append(1e-6 / (trace * e))\n else:\n result[temp].append(0.0)\n return result\n\n @staticmethod\n def parse_outputtrans(path_dir):\n \"\"\"\n Parses .outputtrans file\n\n Args:\n path_dir: dir containing boltztrap.outputtrans\n\n Returns:\n tuple - (run_type, warning, efermi, gap, doping_levels)\n\n \"\"\"\n run_type = None\n warning = None\n efermi = None\n gap = None\n doping_levels = []\n\n with open(os.path.join(path_dir, \"boltztrap.outputtrans\"), 'r') \\\n as f:\n for line in f:\n if \"WARNING\" in line:\n warning = line\n elif \"Calc type:\" in line:\n run_type = line.split()[-1]\n elif line.startswith(\"VBM\"):\n efermi = Energy(line.split()[1], \"Ry\").to(\"eV\")\n elif line.startswith(\"Egap:\"):\n gap = Energy(float(line.split()[1]), \"Ry\").to(\"eV\")\n elif line.startswith(\"Doping level number\"):\n doping_levels.append(float(line.split()[6]))\n\n return run_type, warning, efermi, gap, doping_levels\n\n @staticmethod\n def parse_transdos(path_dir, efermi, dos_spin=1, trim_dos=False):\n\n \"\"\"\n Parses .transdos (total DOS) and .transdos_x_y (partial DOS) files\n Args:\n path_dir: (str) dir containing DOS files\n efermi: (float) Fermi energy\n dos_spin: (int) -1 for spin down, +1 for spin up\n trim_dos: (bool) whether to post-process / trim DOS\n\n Returns:\n tuple - (DOS, dict of partial DOS)\n \"\"\"\n\n data_dos = {'total': [], 'partial': {}}\n # parse the total DOS data\n ## format is energy, DOS, integrated DOS\n with open(os.path.join(path_dir, \"boltztrap.transdos\"), 'r') as f:\n count_series = 0 # TODO: why is count_series needed?\n for line in f:\n if line.lstrip().startswith(\"#\"):\n count_series += 1\n if count_series > 1:\n break\n else:\n data_dos['total'].append(\n [Energy(float(line.split()[0]), \"Ry\").to(\"eV\"),\n float(line.split()[1])])\n total_elec = float(line.split()[2])\n\n lw_l = 0\n hg_l = -len(data_dos['total'])\n if trim_dos:\n # Francesco knows what this does\n # It has something to do with a trick of adding fake energies\n # at the endpoints of the DOS, and then re-trimming it. This is\n # to get the same energy scale for up and down spin DOS.\n tmp_data = np.array(data_dos['total'])\n tmp_den = np.trim_zeros(tmp_data[:, 1], 'f')[1:]\n lw_l = len(tmp_data[:, 1]) - len(tmp_den)\n tmp_ene = tmp_data[lw_l:, 0]\n tmp_den = np.trim_zeros(tmp_den, 'b')[:-1]\n hg_l = len(tmp_ene) - len(tmp_den)\n tmp_ene = tmp_ene[:-hg_l]\n tmp_data = np.vstack((tmp_ene, tmp_den)).T\n data_dos['total'] = tmp_data.tolist()\n\n # parse partial DOS data\n for file_name in os.listdir(path_dir):\n if file_name.endswith(\n \"transdos\") and file_name != 'boltztrap.transdos':\n tokens = file_name.split(\".\")[1].split(\"_\")\n site = tokens[1]\n orb = '_'.join(tokens[2:])\n with open(os.path.join(path_dir, file_name), 'r') as f:\n for line in f:\n if not line.lstrip().startswith(\" #\"):\n if site not in data_dos['partial']:\n data_dos['partial'][site] = {}\n if orb not in data_dos['partial'][site]:\n data_dos['partial'][site][orb] = []\n data_dos['partial'][site][orb].append(\n float(line.split()[1]))\n data_dos['partial'][site][orb] = data_dos['partial'][site][\n orb][lw_l:-hg_l]\n\n dos_full = {'energy': [], 'density': []}\n\n for t in data_dos['total']:\n dos_full['energy'].append(t[0])\n dos_full['density'].append(t[1])\n\n dos = Dos(efermi, dos_full['energy'],\n {Spin(dos_spin): dos_full['density']})\n dos_partial = data_dos['partial'] # TODO: make this real DOS object?\n\n return dos, dos_partial\n\n @staticmethod\n def parse_intrans(path_dir):\n \"\"\"\n Parses boltztrap.intrans mainly to extract the value of scissor applied to the bands or some other inputs\n Args:\n path_dir: (str) dir containing the boltztrap.intrans file\n Returns:\n intrans (dict): a dictionary containing various inputs that had been used in the Boltztrap run.\n \"\"\"\n intrans = {}\n with open(os.path.join(path_dir, \"boltztrap.intrans\"), 'r') as f:\n for line in f:\n if \"iskip\" in line:\n intrans[\"scissor\"] = Energy(float(line.split(\" \")[3]),\n \"Ry\").to(\"eV\")\n if \"HISTO\" in line or \"TETRA\" in line:\n intrans[\"dos_type\"] = line[:-1]\n return intrans\n\n @staticmethod\n def parse_struct(path_dir):\n \"\"\"\n Parses boltztrap.struct file (only the volume)\n Args:\n path_dir: (str) dir containing the boltztrap.struct file\n\n Returns:\n (float) volume\n \"\"\"\n with open(os.path.join(path_dir, \"boltztrap.struct\"), 'r') as f:\n tokens = f.readlines()\n return Lattice([[Length(float(tokens[i].split()[j]), \"bohr\").\n to(\"ang\") for j in range(3)] for i in\n range(1, 4)]).volume\n\n @staticmethod\n def parse_cond_and_hall(path_dir, doping_levels=None):\n \"\"\"\n Parses the conductivity and Hall tensors\n Args:\n path_dir: Path containing .condtens / .halltens files\n doping_levels: ([float]) - doping lvls, parse outtrans to get this\n\n Returns:\n mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,\n mu_doping, seebeck_doping, cond_doping, kappa_doping,\n hall_doping, carrier_conc\n \"\"\"\n\n # Step 1: parse raw data but do not convert to final format\n t_steps = set()\n mu_steps = set()\n data_full = []\n data_hall = []\n data_doping_full = []\n data_doping_hall = []\n doping_levels = doping_levels or []\n\n # parse the full conductivity/Seebeck/kappa0/etc data\n ## also initialize t_steps and mu_steps\n with open(os.path.join(path_dir, \"boltztrap.condtens\"), 'r') as f:\n for line in f:\n if not line.startswith(\"#\"):\n mu_steps.add(float(line.split()[0]))\n t_steps.add(int(float(line.split()[1])))\n data_full.append([float(c) for c in line.split()])\n\n # parse the full Hall tensor\n with open(os.path.join(path_dir, \"boltztrap.halltens\"), 'r') as f:\n for line in f:\n if not line.startswith(\"#\"):\n data_hall.append([float(c) for c in line.split()])\n\n if len(doping_levels) != 0:\n # parse doping levels version of full cond. tensor, etc.\n with open(\n os.path.join(path_dir, \"boltztrap.condtens_fixdoping\"),\n 'r') as f:\n for line in f:\n if not line.startswith(\"#\") and len(line) > 2:\n data_doping_full.append([float(c)\n for c in line.split()])\n\n # parse doping levels version of full hall tensor\n with open(\n os.path.join(path_dir, \"boltztrap.halltens_fixdoping\"),\n 'r') as f:\n for line in f:\n if not line.startswith(\"#\") and len(line) > 2:\n data_doping_hall.append(\n [float(c) for c in line.split()])\n\n # Step 2: convert raw data to final format\n\n # sort t and mu_steps (b/c they are sets not lists)\n # and convert to correct energy\n t_steps = sorted([t for t in t_steps])\n mu_steps = sorted([Energy(m, \"Ry\").to(\"eV\") for m in mu_steps])\n\n # initialize output variables - could use defaultdict instead\n # I am leaving things like this for clarity\n cond = {t: [] for t in t_steps}\n seebeck = {t: [] for t in t_steps}\n kappa = {t: [] for t in t_steps}\n hall = {t: [] for t in t_steps}\n carrier_conc = {t: [] for t in t_steps}\n dos_full = {'energy': [], 'density': []}\n\n mu_doping = {'p': {t: [] for t in t_steps},\n 'n': {t: [] for t in t_steps}}\n seebeck_doping = {'p': {t: [] for t in t_steps},\n 'n': {t: [] for t in t_steps}}\n cond_doping = {'p': {t: [] for t in t_steps},\n 'n': {t: [] for t in t_steps}}\n kappa_doping = {'p': {t: [] for t in t_steps},\n 'n': {t: [] for t in t_steps}}\n hall_doping = {'p': {t: [] for t in t_steps},\n 'n': {t: [] for t in t_steps}}\n\n # process doping levels\n pn_doping_levels = {'p': [], 'n': []}\n for d in doping_levels:\n if d > 0:\n pn_doping_levels['p'].append(d)\n else:\n pn_doping_levels['n'].append(-d)\n\n # process raw conductivity data, etc.\n for d in data_full:\n temp, doping = d[1], d[2]\n carrier_conc[temp].append(doping)\n\n cond[temp].append(np.reshape(d[3:12], (3, 3)).tolist())\n seebeck[temp].append(np.reshape(d[12:21], (3, 3)).tolist())\n kappa[temp].append(np.reshape(d[21:30], (3, 3)).tolist())\n\n # process raw Hall data\n for d in data_hall:\n temp, doping = d[1], d[2]\n hall_tens = [np.reshape(d[3:12], (3, 3)).tolist(),\n np.reshape(d[12:21], (3, 3)).tolist(),\n np.reshape(d[21:30], (3, 3)).tolist()]\n hall[temp].append(hall_tens)\n\n # process doping conductivity data, etc.\n for d in data_doping_full:\n temp, doping, mu = d[0], d[1], d[-1]\n pn = 'p' if doping > 0 else 'n'\n mu_doping[pn][temp].append(Energy(mu, \"Ry\").to(\"eV\"))\n cond_doping[pn][temp].append(\n np.reshape(d[2:11], (3, 3)).tolist())\n seebeck_doping[pn][temp].append(\n np.reshape(d[11:20], (3, 3)).tolist())\n kappa_doping[pn][temp].append(\n np.reshape(d[20:29], (3, 3)).tolist())\n\n # process doping Hall data\n for d in data_doping_hall:\n temp, doping, mu = d[0], d[1], d[-1]\n pn = 'p' if doping > 0 else 'n'\n hall_tens = [np.reshape(d[2:11], (3, 3)).tolist(),\n np.reshape(d[11:20], (3, 3)).tolist(),\n np.reshape(d[20:29], (3, 3)).tolist()]\n hall_doping[pn][temp].append(hall_tens)\n\n return mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, \\\n mu_doping, seebeck_doping, cond_doping, kappa_doping, \\\n hall_doping, carrier_conc\n\n @staticmethod\n def from_files(path_dir, dos_spin=1):\n \"\"\"\n get a BoltztrapAnalyzer object from a set of files\n\n Args:\n path_dir: directory where the boltztrap files are\n dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down\n\n Returns:\n a BoltztrapAnalyzer object\n\n \"\"\"\n run_type, warning, efermi, gap, doping_levels = \\\n BoltztrapAnalyzer.parse_outputtrans(path_dir)\n\n vol = BoltztrapAnalyzer.parse_struct(path_dir)\n\n intrans = BoltztrapAnalyzer.parse_intrans(path_dir)\n\n if run_type == \"BOLTZ\":\n dos, pdos = BoltztrapAnalyzer.parse_transdos(\n path_dir, efermi, dos_spin=dos_spin, trim_dos=False)\n\n mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, \\\n seebeck_doping, cond_doping, kappa_doping, hall_doping, \\\n carrier_conc = BoltztrapAnalyzer. \\\n parse_cond_and_hall(path_dir, doping_levels)\n\n return BoltztrapAnalyzer(\n gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,\n mu_doping, seebeck_doping, cond_doping, kappa_doping,\n hall_doping, intrans, dos, pdos, carrier_conc, vol, warning)\n\n elif run_type == \"DOS\":\n trim = True if intrans[\"dos_type\"] == \"HISTO\" else False\n dos, pdos = BoltztrapAnalyzer.parse_transdos(\n path_dir, efermi, dos_spin=dos_spin, trim_dos=trim)\n\n return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos,\n warning=warning, vol=vol)\n\n elif run_type == \"BANDS\":\n bz_kpoints = np.loadtxt(\n os.path.join(path_dir, \"boltztrap_band.dat\"))[:, -3:]\n bz_bands = np.loadtxt(\n os.path.join(path_dir, \"boltztrap_band.dat\"))[:, 1:-6]\n return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints,\n warning=warning, vol=vol)\n\n elif run_type == \"FERMI\":\n \"\"\"\n \"\"\"\n\n if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')):\n fs_data = read_cube_file(\n os.path.join(path_dir, 'boltztrap_BZ.cube'))\n elif os.path.exists(os.path.join(path_dir, 'fort.30')):\n fs_data = read_cube_file(os.path.join(path_dir, 'fort.30'))\n else:\n raise BoltztrapError(\"No data file found for fermi surface\")\n return BoltztrapAnalyzer(fermi_surface_data=fs_data)\n\n else:\n raise ValueError(\"Run type: {} not recognized!\".format(run_type))\n\n def as_dict(self):\n\n results = {'gap': self.gap,\n 'mu_steps': self.mu_steps,\n 'scissor': self.intrans[\"scissor\"],\n 'cond': self._cond,\n 'seebeck': self._seebeck,\n 'kappa': self._kappa,\n 'hall': self._hall,\n 'doping': self.doping,\n 'mu_doping': self.mu_doping,\n 'seebeck_doping': self._seebeck_doping,\n 'cond_doping': self._cond_doping,\n 'kappa_doping': self._kappa_doping,\n 'hall_doping': self._hall_doping,\n 'dos': self.dos.as_dict(),\n 'dos_partial': self._dos_partial,\n 'carrier_conc': self._carrier_conc,\n 'vol': self.vol,\n 'warning': self.warning}\n return jsanitize(results)\n\n @staticmethod\n def from_dict(data):\n def _make_float_array(a):\n res = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]\n for i in range(3):\n for j in range(3):\n res[i][j] = float(a[i][j])\n return res\n\n def _make_float_hall(a):\n return [i for i in a[:27]]\n\n gap = data.get('gap')\n mu_steps = [float(d) for d in data['mu_steps']] if \\\n 'mu_steps' in data else None\n cond = {int(d): [_make_float_array(v) for v in data['cond'][d]]\n for d in data['cond']} if 'cond' in data else None\n seebeck = {int(d): [_make_float_array(v) for v in data['seebeck'][d]]\n for d in data['seebeck']} if 'seebeck' in data else None\n kappa = {int(d): [_make_float_array(v) for v in data['kappa'][d]]\n for d in data['kappa']} if 'kappa' in data else None\n hall = {int(d): [_make_float_hall(v) for v in data['hall'][d]]\n for d in data['hall']} if 'hall' in data else None\n doping = {'p': [float(d) for d in data['doping']['p']],\n 'n': [float(d) for d in data['doping']['n']]} if \\\n 'doping' in data else None\n\n mu_doping = {'p': {int(d): [\n float(v) for v in data['mu_doping']['p'][d]] for d in\n data['mu_doping']['p']}, 'n':\n {int(d): [float(v) for v in data['mu_doping']['n'][d]]\n for d in data['mu_doping'][\n 'n']}} if 'mu_doping' in data else None\n\n seebeck_doping = {'p': {int(d): [\n _make_float_array(v) for v in data['seebeck_doping']['p'][d]]\n for d in data['seebeck_doping']['p']}, 'n':\n {int(d): [_make_float_array(v) for v in\n data['seebeck_doping']['n'][d]] for d in\n data['seebeck_doping'][\n 'n']}} if 'seebeck_doping' in data \\\n else None\n\n cond_doping = {'p': {int(d): [_make_float_array(v)\n for v in data['cond_doping']['p'][d]]\n for d in data['cond_doping']['p']}, 'n':\n {int(d): [_make_float_array(v) for v in\n data['cond_doping']['n'][d]] for\n d in data['cond_doping'][\n 'n']}} if 'cond_doping' in data else None\n\n kappa_doping = {'p': {int(d): [_make_float_array(v)\n for v in data['kappa_doping']['p'][d]]\n for d in data['kappa_doping']['p']},\n 'n': {int(d): [_make_float_array(v) for v in\n data['kappa_doping']['n'][d]]\n for d in data['kappa_doping']['n']}} \\\n if 'kappa_doping' in data else None\n\n hall_doping = {'p': {int(d): [_make_float_hall(v) for v in\n data['hall_doping']['p'][d]] for d in\n data['hall_doping']['p']}, 'n':\n {int(d): [_make_float_hall(v) for v in\n data['hall_doping']['n'][d]] for d in\n data['hall_doping'][\n 'n']}} if \"hall_doping\" in data else None\n\n dos = Dos.from_dict(data['dos']) if 'dos' in data else None\n dos_partial = data.get('dos_partial')\n carrier_conc = data.get('carrier_conc')\n vol = data.get('vol')\n warning = data.get('warning')\n\n return BoltztrapAnalyzer(gap, mu_steps, cond, seebeck, kappa, hall,\n doping, mu_doping, seebeck_doping,\n cond_doping, kappa_doping, hall_doping, dos,\n dos_partial, carrier_conc, vol, warning)\n\n\ndef read_cube_file(filename):\n with open(filename, 'rt') as f:\n natoms = 0\n count_line = 0\n for line in f:\n line = line.rstrip(\"\\n\")\n if count_line == 0 and \"CUBE\" not in line:\n raise ValueError(\"CUBE file format not recognized\")\n\n if count_line == 2:\n tokens = line.split()\n origin = [float(tokens[i]) for i in range(1,4)]\n natoms = int(tokens[0])\n if count_line == 3:\n tokens = line.split()\n a1 = [float(tokens[i]) for i in range(1,4)]\n n1 = int(tokens[0])\n elif count_line == 4:\n tokens = line.split()\n a2 = [float(tokens[i]) for i in range(1,4)]\n n2 = int(tokens[0])\n elif count_line == 5:\n tokens = line.split()\n a3 = [float(tokens[i]) for i in range(1,4)]\n n3 = int(tokens[0])\n #kpoints=[[[0 for i in range(0,n1)] for j in range(0,n2)] for l in range(0,n3)]\n elif count_line > 5:\n break\n\n count_line += 1\n\n if 'fort.30' in filename:\n energy_data = np.genfromtxt(filename,skip_header=natoms+6,skip_footer=1)\n nlines_data = len(energy_data)\n last_line = np.genfromtxt(filename,skip_header=nlines_data+natoms+6)\n energy_data = np.append(energy_data.flatten(),last_line).reshape(n1,n2,n3)\n elif 'boltztrap_BZ.cube' in filename:\n energy_data = np.loadtxt(filename,skiprows=natoms+6).reshape(n1,n2,n3)\n \n energy_data /= Energy(1, \"eV\").to(\"Ry\")\n\n return energy_data\n\n\ndef compare_sym_bands(bands_obj, bands_ref_obj, nb=None):\n \"\"\"\n Compute the mean of correlation between bzt and vasp bandstructure on\n sym line, for all bands and locally (for each branches) the difference\n squared (%) if nb is specified.\n \"\"\"\n\n nkpt = len(bands_obj.kpoints)\n if bands_ref_obj.is_spin_polarized:\n nbands = min(bands_obj.nb_bands, 2 * bands_ref_obj.nb_bands)\n else:\n # TODO: why is this needed? Shouldn't pmg take care of nb_bands?\n nbands = min(len(bands_obj.bands[Spin.up]),\n len(bands_ref_obj.bands[Spin.up]))\n # print(nbands)\n arr_bands = np.array(bands_obj.bands[Spin.up][:nbands])\n # arr_bands_lavg = (arr_bands-np.mean(arr_bands,axis=1).reshape(nbands,1))\n\n if bands_ref_obj.is_spin_polarized:\n arr_bands_ref_up = np.array(bands_ref_obj.bands[Spin.up])\n arr_bands_ref_dw = np.array(bands_ref_obj.bands[Spin.down])\n # print(arr_bands_ref_up.shape)\n arr_bands_ref = np.vstack((arr_bands_ref_up, arr_bands_ref_dw))\n arr_bands_ref = np.sort(arr_bands_ref, axis=0)[:nbands]\n # print(arr_bands_ref.shape)\n else:\n arr_bands_ref = np.array(bands_ref_obj.bands[Spin.up][:nbands])\n\n # arr_bands_ref_lavg =\n # (arr_bands_ref-np.mean(arr_bands_ref,axis=1).reshape(nbands,1))\n\n # err = np.sum((arr_bands_lavg-arr_bands_ref_lavg)**2,axis=1)/nkpt\n corr = np.array(\n [distance.correlation(arr_bands[idx], arr_bands_ref[idx]) for idx in\n range(nbands)])\n\n if type(nb) == int: nb = [nb]\n\n bcheck = {}\n\n if max(nb) < nbands:\n branches = [[s['start_index'], s['end_index'], s['name']] for s in\n bands_ref_obj.branches]\n\n if not bands_obj.is_metal() and not bands_ref_obj.is_metal():\n zero_ref = bands_ref_obj.get_vbm()['energy']\n zero = bands_obj.get_vbm()['energy']\n if not zero:\n vbm = bands_ref_obj.get_vbm()['band_index'][Spin.up][-1]\n zero = max(arr_bands[vbm])\n else:\n zero_ref = 0 # bands_ref_obj.efermi\n zero = 0 # bands_obj.efermi\n print(zero, zero_ref)\n\n for nbi in nb:\n bcheck[nbi] = {}\n\n bcheck[nbi]['Dist'] = np.mean(abs(arr_bands[nbi] - zero\n - arr_bands_ref[nbi] + zero_ref))\n bcheck[nbi]['Corr'] = corr[nbi]\n\n for start, end, name in branches:\n # werr.append((sum((arr_bands_corr[nb][start:end+1] -\n # arr_bands_ref_corr[nb][start:end+1])**2)/(end+1-start)*100,name))\n bcheck[nbi][name] = np.mean(abs(arr_bands[nbi][start:end + 1]\n - zero\n - arr_bands_ref[nbi][\n start:end + 1] + zero_ref))\n else:\n bcheck = \"No nb given\"\n\n return bcheck\n"
] | [
[
"numpy.vstack",
"numpy.eye",
"numpy.sort",
"scipy.spatial.distance.correlation",
"numpy.dot",
"numpy.reshape",
"numpy.abs",
"numpy.trim_zeros",
"numpy.linalg.eigh",
"numpy.all",
"numpy.array",
"numpy.genfromtxt",
"numpy.loadtxt"
]
] |
petrapoklukar/DCA | [
"e5b3f3481433306a4b33e712272f8bbf5e9d05ce"
] | [
"dca/visualization.py"
] | [
"from dca.DCA import DelaunayGraph\nfrom dca.schemes import DelaunayGraphVisualizer\nimport numpy as np\nimport os\nimport matplotlib as mpl\n\nif not \"DISPLAY\" in os.environ:\n print(\"no display found. Using non-interactive Agg backend\")\n mpl.use(\"Agg\")\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom matplotlib.collections import LineCollection\nfrom typing import Optional\n\n\n# -------------------------------------------------------------------------- #\n# Matplotlib settings\n# -------------------------------------------------------------------------- #\nSMALL_SIZE = 12\nMEDIUM_SIZE = 15\n\nplt.rc(\"font\", size=SMALL_SIZE) # controls default text sizes\nplt.rc(\"axes\", titlesize=SMALL_SIZE) # fontsize of the axes title\nplt.rc(\"axes\", labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\nplt.rc(\"xtick\", labelsize=MEDIUM_SIZE) # fontsize of the tick labels\nplt.rc(\"ytick\", labelsize=MEDIUM_SIZE) # fontsize of the tick labels\nplt.rc(\"legend\", fontsize=MEDIUM_SIZE) # legend fontsize\nplt.rc(\"figure\", titlesize=SMALL_SIZE) # fontsize of the figure title\n\nR_color, E_color = \"C0\", \"C1\"\n\n# -------------------------------------------------------------------------- #\n# DelaunayGeomCA: visualization\n# -------------------------------------------------------------------------- #\ndef get_color(edge, num_R):\n \"\"\"\n Gets the color of the edge.\n :param edge: edge given as a list of two indices.\n :param num_R: number of R points in the graph.\n :return: color of the edge and its zorder\n \"\"\"\n R_color, E_color = \"C0\", \"C1\"\n edge = sorted(edge)\n if edge[0] < num_R:\n if edge[1] >= num_R:\n comp_color = \"gray\"\n zorder = 10\n else:\n comp_color = R_color\n zorder = 5\n else:\n comp_color = E_color\n zorder = 5\n return comp_color, zorder\n\n\ndef _plot_Delaunay_graph(\n G_visualizer: DelaunayGraphVisualizer,\n edges: np.ndarray,\n filename: str,\n root: str,\n vertices: Optional[np.ndarray] = None,\n labels: Optional[np.ndarray] = None,\n figsize: tuple = (5, 5),\n keep_range: bool = False,\n):\n \"\"\"\n Plots a Delaunay graph.\n :param G_visualizer: DelaunayGraphVisualizer object.\n :param edges: array of edges to plot.\n :param filename: filename of the image.\n :param root: root directory of the experiment.\n :param vertices: array of vertices to plot.\n :param labels: array of vertex labels.\n :param figsize: size of the figure.\n :param keep_range: whether to remember current xlim and ylim.\n :return: xlim and ylim if keep_range else None\n \"\"\"\n input_data = G_visualizer.get_input_array_data()\n Rplot_kwds = {\"alpha\": 0.7, \"s\": 50, \"linewidths\": 0}\n Eplot_kwds = {\"alpha\": 0.7, \"s\": 50, \"linewidths\": 0, \"marker\": \"X\"}\n Rvertices, Evertices = (\n input_data[: G_visualizer.num_R],\n input_data[G_visualizer.num_R :],\n )\n\n if vertices is not None:\n Rcolors = np.empty(shape=G_visualizer.num_R).astype(str)\n Rcolors[vertices[vertices < G_visualizer.num_R].astype(int)] = R_color\n Rcolors[\n np.setdiff1d(\n np.arange(G_visualizer.num_R), vertices[vertices < G_visualizer.num_R]\n )\n ] = \"gray\"\n\n Ecolors = np.empty(shape=G_visualizer.num_E).astype(str)\n Ecolors[\n vertices[vertices >= G_visualizer.num_R].astype(int) - G_visualizer.num_R\n ] = E_color\n Ecolors[\n np.setdiff1d(\n np.arange(G_visualizer.num_E).astype(int),\n vertices[vertices >= G_visualizer.num_R].astype(int)\n - G_visualizer.num_R,\n )\n ] = \"gray\"\n\n if labels is not None:\n labels = labels[vertices]\n else:\n Rcolors = np.repeat(R_color, G_visualizer.num_R).astype(str)\n Ecolors = np.repeat(E_color, G_visualizer.num_E).astype(str)\n\n plt.figure(figsize=figsize)\n plt.clf()\n # Plot vertices\n if labels is not None:\n plt.scatter(\n Rvertices.T[0], Rvertices.T[1], c=labels[: G_visualizer.num_R], **Rplot_kwds\n )\n plt.scatter(\n Evertices.T[0], Evertices.T[1], c=labels[G_visualizer.num_R :], **Eplot_kwds\n )\n\n else:\n plt.scatter(Rvertices.T[0], Rvertices.T[1], color=Rcolors, **Rplot_kwds)\n plt.scatter(Evertices.T[0], Evertices.T[1], color=Ecolors, **Eplot_kwds)\n\n # Plot edges\n # draw edges of correct color\n for e in edges:\n e0, e1 = int(e[0]), int(e[1])\n start = (\n Rvertices[e0]\n if e0 < G_visualizer.num_R\n else Evertices[e0 - G_visualizer.num_R]\n )\n end = (\n Rvertices[e1]\n if e1 < G_visualizer.num_R\n else Evertices[e1 - G_visualizer.num_R]\n )\n color, zorder = get_color(e, G_visualizer.num_R)\n plt.plot(\n [start[0], end[0]],\n [start[1], end[1]],\n \"-\",\n linewidth=1.0,\n color=color,\n zorder=zorder,\n )\n plt.axis(\"off\")\n plt.tight_layout()\n save_path = os.path.join(root, filename)\n if keep_range:\n assert G_visualizer.xlim is not None and G_visualizer.ylim is not None\n plt.xlim(*G_visualizer.xlim)\n plt.ylim(*G_visualizer.ylim)\n current_xlim = plt.xlim()\n current_ylim = plt.ylim()\n plt.savefig(save_path)\n plt.clf()\n plt.close()\n return current_xlim, current_ylim\n\n\ndef _plot_Delaunay_graph_colorbar(\n G_visualizer: DelaunayGraphVisualizer,\n edges: np.ndarray,\n distances: np.ndarray,\n filename: str,\n root: str,\n figsize: tuple = (5, 5),\n):\n \"\"\"\n Plots a Delaunay graph with colored edge lengths.\n :param G_visualizer: DelaunayGraphVisualizer object.\n :param edges: array of edges to plot.\n :param distances: array of edge lengths.\n :param filename: filename of the image.\n :param root: root directory of the experiment.\n :param figsize: size of the figure.\n \"\"\"\n input_data = G_visualizer.get_input_array_data()\n Rplot_kwds = {\"alpha\": 0.7, \"s\": 50, \"linewidths\": 0}\n Eplot_kwds = {\"alpha\": 0.7, \"s\": 50, \"linewidths\": 0, \"marker\": \"X\"}\n Rvertices, Evertices = (\n input_data[: G_visualizer.num_R],\n input_data[G_visualizer.num_R :],\n )\n\n # Plot vertices\n # Plot edges, color = distance\n plt.figure(figsize=figsize)\n axis = plt.gca()\n segments, colors = [], []\n for e in edges:\n start = (\n Rvertices[e[0]]\n if e[0] < G_visualizer.num_R\n else Evertices[e[0] - G_visualizer.num_R]\n )\n end = (\n Rvertices[e[1]]\n if e[1] < G_visualizer.num_R\n else Evertices[e[1] - G_visualizer.num_R]\n )\n segments.append([start, end])\n colors.append(distances[e[0], e[1]])\n colors = np.array(colors)\n lc = LineCollection(segments, cmap=\"viridis_r\")\n lc.set_array(colors)\n axis.add_artist(lc)\n cb = plt.colorbar(lc, ax=axis)\n cb.ax.set_ylabel(filename)\n\n Rcolors = np.repeat(R_color, G_visualizer.num_R).astype(str)\n Ecolors = np.repeat(E_color, G_visualizer.num_E).astype(str)\n axis.scatter(Rvertices.T[0], Rvertices.T[1], color=Rcolors, **Rplot_kwds)\n axis.scatter(Evertices.T[0], Evertices.T[1], color=Ecolors, **Eplot_kwds)\n axis.set_xlim(np.min(input_data[:, 0]) - 1, np.max(input_data[:, 0]) + 1)\n axis.set_ylim(np.min(input_data[:, 1]) - 1, np.max(input_data[:, 1]) + 1)\n save_path = os.path.join(root, filename)\n plt.savefig(save_path)\n plt.clf()\n plt.close()\n\n\ndef _plot_isolated_components(\n G: DelaunayGraph, G_visualizer: DelaunayGraphVisualizer, root: str\n):\n \"\"\"\n Plots outliers in a distilled Delaunay graph.\n :param G: Delaunay graph.\n :param G_visualizer: DelaunayGraphVisualizer object.\n :param root: root directory of the experiment.\n \"\"\"\n # Get outliers\n n_components = len(G.comp_stats)\n R_outliers, E_outliers = [], []\n for i in range(G.first_trivial_component_idx, n_components):\n if G.comp_stats[i].Ridx.size == 1:\n R_outliers.append(G.comp_stats[i].Ridx.item())\n if G.comp_stats[i].Eidx.size == 1:\n E_outliers.append(G.comp_stats[i].Eidx.item())\n vertices = np.concatenate([R_outliers, np.array(E_outliers) + G.num_R])\n _plot_Delaunay_graph(\n G_visualizer,\n edges=np.array([]),\n filename=\"components_isolated\",\n root=root,\n vertices=vertices,\n keep_range=True,\n )\n\n\ndef _plot_RE_components_quality(\n G: DelaunayGraph,\n root: str,\n annotate_largest: bool = True,\n min_comp_size: int = 0,\n display_smaller: bool = False,\n figsize: tuple = (10, 5),\n):\n \"\"\"\n Visualizes components quality as a scatter plot.\n :param G: Delaunay graph.\n :param root: root directory of the experiment.\n :param annotate_largest: if annotate the size (in percentage) of the largest component.\n :param min_comp_size: minimum size (number of vertices) of the components to visualize.\n :param display_smaller: if display aggregated components with size smaller than min_comp_size.\n :param figsize: size of the plot.\n \"\"\"\n n_comp = len(G.comp_stats)\n total_n_pts = G.num_R + G.num_E\n max_score, last_display_comp = 0, 0\n small_R_comp, small_E_comp, small_RE_comp = 0, 0, 0\n quality_scores, ticks_labels = [], []\n fig, ax = plt.subplots(figsize=figsize)\n for comp_id in range(n_comp):\n compR = G.comp_stats[comp_id].Ridx\n compE = G.comp_stats[comp_id].Eidx\n comp_n_points = len(compR) + len(compE)\n if comp_n_points >= min_comp_size:\n comp_quality = np.round(G.comp_stats[comp_id].comp_quality, 2)\n max_score = max(max_score, comp_quality)\n last_display_comp = comp_id + 1\n quality_scores.append(comp_quality)\n if len(compR) != 0:\n if len(compE) != 0:\n comp_color = \"gray\"\n else:\n comp_color = R_color\n else:\n comp_color = E_color\n\n ax.scatter(\n comp_id,\n comp_quality,\n c=comp_color,\n linestyle=\"--\",\n s=1000 * (comp_n_points) / total_n_pts,\n alpha=0.8,\n zorder=10,\n )\n else:\n if len(compR) != 0:\n if len(compE) != 0:\n small_RE_comp += 1\n else:\n small_R_comp += 1\n else:\n small_E_comp += 1\n\n if min_comp_size > 0 and display_smaller:\n if small_RE_comp + small_R_comp + small_E_comp > 0:\n ticks_labels = [last_display_comp]\n\n if small_RE_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_RE_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=\"gray\")\n\n if small_R_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_R_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=R_color)\n\n if small_E_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_E_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=E_color)\n\n # Annotate the largest component\n if annotate_largest:\n largest_comp_size = len(G.comp_stats[0].Ridx) + len(G.comp_stats[0].Eidx)\n ax.annotate(\n round(largest_comp_size / total_n_pts, 2),\n xy=(0, G.comp_stats[0].comp_quality + 0.03),\n ha=\"center\",\n va=\"bottom\",\n color=\"k\",\n )\n if max_score == 0:\n ax.plot(0, G.comp_stats[0].comp_quality, \"kX\")\n\n ax.plot(\n np.arange(last_display_comp),\n quality_scores,\n color=\"gray\",\n linestyle=\"--\",\n alpha=0.5,\n zorder=0,\n )\n displayed_ticks = np.arange(\n last_display_comp, step=max(int(last_display_comp / 10), 1)\n )\n if min_comp_size == 0:\n ax.set_xticks(displayed_ticks)\n ax.set_xticklabels(displayed_ticks)\n else:\n new_ticks = np.arange(\n last_display_comp, last_display_comp + len(ticks_labels) * 2, 2\n )\n ax.set_xticks(np.concatenate([displayed_ticks, new_ticks]))\n ax.set_xticklabels(list(displayed_ticks) + ticks_labels)\n max_score = 1.0 if max_score == 0 else max_score\n ax.set_ylim((-0.05, max_score + 0.1))\n ax.set_yticks(np.arange(0, max_score + 0.1, 0.1))\n\n # ax.tick_params(axis='x', rotation=45)\n ax.set_xlabel(\"component index\")\n ax.set_ylabel(\"component quality\")\n legend_elements = [\n Line2D(\n [0],\n [0],\n markerfacecolor=R_color,\n markersize=10,\n label=\"R\",\n marker=\"o\",\n color=\"w\",\n ),\n Line2D(\n [0],\n [0],\n markerfacecolor=E_color,\n markersize=10,\n label=\"E\",\n marker=\"o\",\n color=\"w\",\n ),\n Line2D(\n [0],\n [0],\n markerfacecolor=\"gray\",\n markersize=10,\n label=\"mix\",\n marker=\"o\",\n color=\"w\",\n ),\n ]\n ax.legend(\n handles=legend_elements,\n ncol=len(legend_elements),\n loc=\"upper center\",\n framealpha=0.5,\n )\n name = \"component_quality_min_size{0}_annotated{1}_displaysmaller{2}\".format(\n min_comp_size, int(annotate_largest), int(display_smaller)\n )\n path = os.path.join(root, name)\n plt.tight_layout()\n plt.savefig(path)\n plt.clf()\n plt.close()\n\n\ndef _plot_RE_components_consistency(\n G: DelaunayGraph,\n root: str,\n annotate_largest: bool = True,\n min_comp_size: int = 0,\n display_smaller: bool = False,\n figsize: tuple = (10, 5),\n):\n \"\"\"\n Visualizes components consistency as a scatter plot.\n :param G: Delaunay graph.\n :param root: root directory of the experiment.\n :param annotate_largest: if annotate the size (in percentage) of the largest component.\n :param min_comp_size: minimum size (number of vertices) of the components to visualize.\n :param display_smaller: if display aggregated components with size smaller than min_comp_size.\n :param figsize: size of the plot.\n \"\"\"\n n_comp = len(G.comp_stats)\n total_n_pts = G.num_R + G.num_E\n max_score, last_display_comp = 0, 0\n small_R_comp, small_E_comp, small_RE_comp = 0, 0, 0\n consistency_scores, ticks_labels = [], []\n\n fig, ax = plt.subplots(figsize=figsize)\n for comp_id in range(n_comp):\n compR = G.comp_stats[comp_id].Ridx\n compE = G.comp_stats[comp_id].Eidx\n comp_n_points = len(compR) + len(compE)\n if comp_n_points >= min_comp_size:\n comp_consistency = np.round(G.comp_stats[comp_id].comp_consistency, 2)\n max_score = max(max_score, comp_consistency)\n last_display_comp = comp_id + 1\n consistency_scores.append(comp_consistency)\n if len(compR) != 0:\n if len(compE) != 0:\n comp_color = \"gray\"\n else:\n comp_color = R_color\n else:\n comp_color = E_color\n\n ax.scatter(\n comp_id,\n comp_consistency,\n c=comp_color,\n linestyle=\"--\",\n s=1000 * (comp_n_points) / total_n_pts,\n alpha=0.8,\n zorder=10,\n )\n else:\n if len(compR) != 0:\n if len(compE) != 0:\n small_RE_comp += 1\n else:\n small_R_comp += 1\n else:\n small_E_comp += 1\n\n if min_comp_size > 0 and display_smaller:\n if small_RE_comp + small_R_comp + small_E_comp > 0:\n ticks_labels = [last_display_comp]\n\n if small_RE_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_RE_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=\"gray\")\n\n if small_R_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_R_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=R_color)\n\n if small_E_comp > 0:\n r = last_display_comp + 2 * len(ticks_labels)\n ticks_labels.append(ticks_labels[-1] + small_E_comp)\n ax.axvspan(r - 2, r, alpha=0.5, color=E_color)\n\n # Annotate the largest component\n if annotate_largest:\n largest_comp_size = len(G.comp_stats[0].Ridx) + len(G.comp_stats[0].Eidx)\n ax.annotate(\n round(largest_comp_size / total_n_pts, 2),\n xy=(0, G.comp_stats[0].comp_consistency + 0.03),\n ha=\"center\",\n va=\"bottom\",\n color=\"k\",\n )\n if max_score == 0:\n ax.plot(0, G.comp_stats[0].comp_consistency, \"kX\")\n\n ax.plot(\n np.arange(last_display_comp),\n consistency_scores,\n color=\"gray\",\n linestyle=\"--\",\n alpha=0.5,\n zorder=0,\n )\n displayed_ticks = np.arange(\n last_display_comp, step=max(int(last_display_comp / 10), 1)\n )\n if min_comp_size == 0:\n ax.set_xticks(displayed_ticks)\n ax.set_xticklabels(displayed_ticks)\n else:\n new_ticks = np.arange(\n last_display_comp, last_display_comp + len(ticks_labels) * 2, 2\n )\n ax.set_xticks(np.concatenate([displayed_ticks, new_ticks]))\n ax.set_xticklabels(list(displayed_ticks) + ticks_labels)\n max_score = 1.0 if max_score == 0 else max_score\n ax.set_ylim((-0.05, max_score + 0.1))\n ax.set_yticks(np.arange(0, max_score + 0.1, 0.1))\n\n # ax.tick_params(axis='x', rotation=45)\n ax.set_xlabel(\"component index\")\n ax.set_ylabel(\"component consistency\")\n legend_elements = [\n Line2D(\n [0],\n [0],\n markerfacecolor=R_color,\n markersize=10,\n label=\"R\",\n marker=\"o\",\n color=\"w\",\n ),\n Line2D(\n [0],\n [0],\n markerfacecolor=E_color,\n markersize=10,\n label=\"E\",\n marker=\"o\",\n color=\"w\",\n ),\n Line2D(\n [0],\n [0],\n markerfacecolor=\"gray\",\n markersize=10,\n label=\"mix\",\n marker=\"o\",\n color=\"w\",\n ),\n ]\n ax.legend(\n handles=legend_elements,\n ncol=len(legend_elements),\n loc=\"upper center\",\n framealpha=0.5,\n )\n name = \"component_consistency_min_size{0}_annotated{1}_displaysmaller{2}\".format(\n min_comp_size, int(annotate_largest), int(display_smaller)\n )\n path = os.path.join(root, name)\n plt.tight_layout()\n plt.savefig(path)\n plt.clf()\n plt.close()\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlim",
"matplotlib.use",
"numpy.round",
"matplotlib.pyplot.scatter",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.axis",
"numpy.repeat",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"matplotlib.collections.LineCollection",
"numpy.empty",
"numpy.array",
"numpy.concatenate"
]
] |
Parita-D/olympic-hero | [
"8a809a6308146c09235af43379f29e7e5e83827d"
] | [
"code.py"
] | [
"# --------------\n#Importing header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#Path of the file\r\ndata = pd.read_csv(path)\r\ndata.rename(columns={\"Total\":\"Total_Medals\"}, inplace=True)\r\ndata.head(10)\r\n#Code starts here\r\n\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n\r\n\r\ndata['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'],'Summer', 'Winter')\r\ndata['Better_Event'] = np.where(data['Total_Summer']==data['Total_Winter'], 'Both', data['Better_Event'])\r\n\r\nbetter_event = data.Better_Event.value_counts().index[0]\r\nprint(better_event)\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n\r\n\r\ntop_countries = data[['Country_Name', 'Total_Summer', 'Total_Winter', 'Total_Medals']]\r\n\r\ntop_countries.drop(top_countries.tail(1).index, inplace=True, axis=0)\r\nprint(top_countries)\r\n\r\ndef top_ten(top_countries,c_name):\r\n country_list=[]\r\n df = top_countries.nlargest(10, [c_name]) \r\n country_list = list(df['Country_Name'])\r\n return(country_list)\r\n\r\ntop_10_summer = top_ten(top_countries,'Total_Summer')\r\ntop_10_winter = top_ten(top_countries,'Total_Winter')\r\ntop_10 = top_ten(top_countries,'Total_Medals')\r\n\r\ncommon = list(set(top_10_summer) & set(top_10_winter) & set(top_10))\r\nprint(common)\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\n\r\nsummer_df = data[data['Country_Name'].isin(top_10_summer)]\r\nwinter_df = data[data['Country_Name'].isin(top_10_winter)]\r\ntop_df = data[data['Country_Name'].isin(top_10)]\r\n\r\nsummer_df.plot(kind='bar')\r\nwinter_df.plot(kind='bar')\r\ntop_df.plot(kind='bar')\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n\r\n\r\nsummer_df['Golden_Ratio'] = summer_df['Gold_Summer']/summer_df['Total_Summer']\r\n\r\nsummer_max_ratio = summer_df.Golden_Ratio.max()\r\nsummer_country_gold = summer_df.loc[summer_df.Golden_Ratio==summer_df.Golden_Ratio.max(), 'Country_Name'].values[0] \r\n\r\nwinter_df['Golden_Ratio'] = winter_df['Gold_Winter']/winter_df['Total_Winter']\r\n\r\nwinter_max_ratio = winter_df.Golden_Ratio.max()\r\n\r\nwinter_country_gold = winter_df.loc[winter_df.Golden_Ratio==winter_df.Golden_Ratio.max(), 'Country_Name'].values[0]\r\n\r\ntop_df['Golden_Ratio'] = top_df['Gold_Total']/top_df['Total_Medals']\r\n\r\ntop_max_ratio = top_df.Golden_Ratio.max()\r\n\r\ntop_country_gold = top_df.loc[top_df.Golden_Ratio==top_df.Golden_Ratio.max(), 'Country_Name'].values[0]\r\n\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n\r\n\r\ndata_1=data[:-1]\r\ndata_1['Total_Points']=data_1['Gold_Total']*3 + data_1['Silver_Total']*2 + data_1['Bronze_Total']*1\r\nmost_points = data_1.Total_Points.max()\r\nbest_country = data_1.loc[data_1.Total_Points==data_1.Total_Points.max(), 'Country_Name'].values[0] \r\n\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\n\r\nbest = data[data['Country_Name']==best_country]\r\n\r\nbest=best[['Gold_Total','Silver_Total','Bronze_Total']]\r\n\r\nbest.plot.bar(stacked=True)\r\nplt.xlabel('United States')\r\nplt.ylabel('Medals Tally')\r\nplt.xticks(rotation=45)\n\n\n"
] | [
[
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel"
]
] |
pjk645/pyGAM | [
"29425798e13651f03c1fd3cc1096071cd752403a"
] | [
"pygam/tests/test_GAM_methods.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport sys\n\nimport numpy as np\nimport pytest\nimport scipy as sp\n\nfrom pygam import *\n\n\ndef test_LinearGAM_prediction(mcycle_X_y, mcycle_gam):\n \"\"\"\n check that we the predictions we get are correct shape\n \"\"\"\n X, y = mcycle_X_y\n preds = mcycle_gam.predict(X)\n assert(preds.shape == y.shape)\n\ndef test_LogisticGAM_accuracy(default_X_y):\n \"\"\"\n check that we can compute accuracy correctly\n \"\"\"\n X, y = default_X_y\n gam = LogisticGAM().fit(X, y)\n\n preds = gam.predict(X)\n acc0 = (preds == y).mean()\n acc1 = gam.accuracy(X, y)\n assert(acc0 == acc1)\n\ndef test_PoissonGAM_exposure(coal_X_y):\n \"\"\"\n check that we can fit a Poisson GAM with exposure, and it scales predictions\n \"\"\"\n X, y = coal_X_y\n gam = PoissonGAM().fit(X, y, exposure=np.ones_like(y))\n assert((gam.predict(X, exposure=np.ones_like(y)*2) == 2 *gam.predict(X)).all())\n\ndef test_PoissonGAM_loglike(coal_X_y):\n \"\"\"\n check that our loglikelihood is scaled by exposure\n\n predictions that are twice as large with twice the exposure\n should have lower loglikelihood\n \"\"\"\n X, y = coal_X_y\n exposure = np.ones_like(y)\n gam_high_var = PoissonGAM().fit(X, y * 2, exposure=exposure * 2)\n gam_low_var = PoissonGAM().fit(X, y, exposure=exposure)\n\n assert gam_high_var.loglikelihood(X, y * 2, exposure * 2) < gam_low_var.loglikelihood(X, y, exposure)\n\ndef test_large_GAM(coal_X_y):\n \"\"\"\n check that we can fit a GAM in py3 when we have more than 50,000 samples\n \"\"\"\n X = np.linspace(0, 100, 100000)\n y = X**2\n gam = LinearGAM().fit(X, y)\n assert(gam._is_fitted)\n\ndef test_summary(mcycle_X_y, mcycle_gam):\n \"\"\"\n check that we can get a summary if we've fitted the model, else not\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n\n try:\n gam.summary()\n except AttributeError:\n assert(True)\n\n mcycle_gam.summary()\n assert(True)\n\ndef test_more_splines_than_samples(mcycle_X_y):\n \"\"\"\n check that gridsearch returns the expected number of models\n \"\"\"\n X, y = mcycle_X_y\n n = len(X)\n\n gam = LinearGAM(s(0, n_splines=n+1)).fit(X, y)\n assert(gam._is_fitted)\n\n # TODO here is our bug:\n # we cannot display the term-by-term effective DoF because we have fewer\n # values than coefficients\n assert len(gam.statistics_['edof_per_coef']) < len(gam.coef_)\n gam.summary()\n\ndef test_deviance_residuals(mcycle_X_y, mcycle_gam):\n \"\"\"\n for linear GAMs, the deviance residuals should be equal to the y - y_pred\n \"\"\"\n X, y = mcycle_X_y\n res = mcycle_gam.deviance_residuals(X, y)\n err = y - mcycle_gam.predict(X)\n assert((res == err).all())\n\ndef test_conf_intervals_return_array(mcycle_X_y, mcycle_gam):\n \"\"\"\n make sure that the confidence_intervals method returns an array\n \"\"\"\n X, y = mcycle_X_y\n conf_ints = mcycle_gam.confidence_intervals(X)\n assert(conf_ints.ndim == 2)\n\ndef test_conf_intervals_quantiles_width_interchangable(mcycle_X_y, mcycle_gam):\n \"\"\"\n getting confidence_intervals via width or specifying quantiles\n should return the same result\n \"\"\"\n X, y = mcycle_X_y\n conf_ints_a = mcycle_gam.confidence_intervals(X, width=.9)\n conf_ints_b = mcycle_gam.confidence_intervals(X, quantiles=[.05, .95])\n assert(np.allclose(conf_ints_a, conf_ints_b))\n\ndef test_conf_intervals_ordered(mcycle_X_y, mcycle_gam):\n \"\"\"\n comfidence intervals returned via width should be ordered\n \"\"\"\n X, y = mcycle_X_y\n conf_ints = mcycle_gam.confidence_intervals(X)\n assert((conf_ints[:,0] <= conf_ints[:,1]).all())\n\ndef test_summary_returns_12_lines(mcycle_gam):\n \"\"\"\n check that the summary method works and returns 24 lines like:\n\n LinearGAM\n =============================================== ==========================================================\n Distribution: NormalDist Effective DoF: 11.2495\n Link Function: IdentityLink Log Likelihood: -952.605\n Number of Samples: 133 AIC: 1929.7091\n AICc: 1932.4197\n GCV: 605.6546\n Scale: 514.2013\n Pseudo R-Squared: 0.7969\n ==========================================================================================================\n Feature Function Data Type Num Splines Spline Order Linear Fit Lambda P > x Sig. Code\n ================== ============== ============= ============= =========== ========== ========== ==========\n feature 1 numerical 25 3 False 1.0 3.43e-03 **\n intercept 6.85e-02 .\n ==========================================================================================================\n Significance codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n\n WARNING: Fitting splines and a linear function to a feature introduces a model identifiability problem\n which can cause p-values to appear significant when they are not.\n\n WARNING: p-values calculated in this manner behave correctly for un-penalized models or models with\n known smoothing parameters, but when smoothing parameters have been estimated, the p-values\n are typically lower than they should be, meaning that the tests reject the null too readily.\n \"\"\"\n if sys.version_info.major == 2:\n from StringIO import StringIO\n if sys.version_info.major == 3:\n from io import StringIO\n stdout = sys.stdout #keep a handle on the real standard output\n sys.stdout = StringIO() #Choose a file-like object to write to\n mcycle_gam.summary()\n assert(len(sys.stdout.getvalue().split('\\n')) == 24)\n\ndef test_is_fitted_predict(mcycle_X_y):\n \"\"\"\n test predict requires fitted model\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.predict(X)\n\ndef test_is_fitted_predict_mu(mcycle_X_y):\n \"\"\"\n test predict_mu requires fitted model\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.predict_mu(X)\n\ndef test_is_fitted_dev_resid(mcycle_X_y):\n \"\"\"\n test deviance_residuals requires fitted model\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.deviance_residuals(X, y)\n\ndef test_is_fitted_conf_intervals(mcycle_X_y):\n \"\"\"\n test confidence_intervals requires fitted model\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.confidence_intervals(X)\n\ndef test_is_fitted_pdep(mcycle_X_y):\n \"\"\"\n test partial_dependence requires fitted model\n \"\"\"\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.partial_dependence(term=0)\n\ndef test_is_fitted_summary(mcycle_X_y):\n \"\"\"\n test summary requires fitted model\n \"\"\"\n X, y = mcycle_X_y\n gam = LinearGAM()\n with pytest.raises(AttributeError):\n gam.summary()\n\ndef test_set_params_with_external_param():\n \"\"\"\n test set_params sets a real parameter\n \"\"\"\n gam = GAM(lam=1)\n gam.set_params(lam=420)\n assert(gam.lam == 420)\n\ndef test_set_params_with_phony_param():\n \"\"\"\n test set_params should not set any phony param\n \"\"\"\n gam = GAM()\n gam.set_params(cat=420)\n assert(not hasattr(gam, 'cat'))\n\ndef test_set_params_with_phony_param_force():\n \"\"\"\n test set_params can set phony params if we use the force=True\n \"\"\"\n gam = GAM()\n assert(not hasattr(gam, 'cat'))\n\n gam.set_params(cat=420, force=True)\n assert(gam.cat == 420)\n\ndef test_get_params():\n \"\"\"\n test gam gets our params\n \"\"\"\n gam = GAM(lam=420)\n params = gam.get_params()\n assert(params['lam'] == 420)\n\n\nclass TestSamplingFromPosterior(object):\n\n def test_drawing_samples_from_unfitted_model(self, mcycle_X_y, mcycle_gam):\n X, y = mcycle_X_y\n gam = LinearGAM()\n\n with pytest.raises(AttributeError):\n gam.sample(X, y)\n\n with pytest.raises(AttributeError):\n gam._sample_coef(X, y)\n\n with pytest.raises(AttributeError):\n gam._bootstrap_samples_of_smoothing(X, y)\n\n assert mcycle_gam._is_fitted\n\n mcycle_gam.sample(X, y, n_draws=2)\n mcycle_gam._sample_coef(X, y, n_draws=2)\n mcycle_gam._bootstrap_samples_of_smoothing(X, y, n_bootstraps=1)\n assert True\n\n def test_sample_quantity(self, mcycle_X_y, mcycle_gam):\n X, y = mcycle_X_y\n for quantity in ['coefficients', 'response']:\n with pytest.raises(ValueError):\n mcycle_gam.sample(X, y, quantity=quantity, n_draws=2)\n for quantity in ['coef', 'mu', 'y']:\n mcycle_gam.sample(X, y, quantity=quantity, n_draws=2)\n assert True\n\n def test_shape_of_random_samples(self, mcycle_X_y, mcycle_gam):\n X, y = mcycle_X_y\n n_samples = len(X)\n n_draws = 5\n\n sample_coef = mcycle_gam.sample(X, y, quantity='coef', n_draws=n_draws)\n sample_mu = mcycle_gam.sample(X, y, quantity='mu', n_draws=n_draws)\n sample_y = mcycle_gam.sample(X, y, quantity='y', n_draws=n_draws)\n assert sample_coef.shape == (n_draws, len(mcycle_gam.coef_))\n assert sample_mu.shape == (n_draws, n_samples)\n assert sample_y.shape == (n_draws, n_samples)\n\n n_samples_in_grid = 500\n idxs = np.random.choice(np.arange(len(X)), n_samples_in_grid)\n XX = X[idxs]\n\n sample_coef = mcycle_gam.sample(X, y, quantity='coef', n_draws=n_draws,\n sample_at_X=XX)\n sample_mu = mcycle_gam.sample(X, y, quantity='mu', n_draws=n_draws,\n sample_at_X=XX)\n sample_y = mcycle_gam.sample(X, y, quantity='y', n_draws=n_draws,\n sample_at_X=XX)\n\n assert sample_coef.shape == (n_draws, len(mcycle_gam.coef_))\n assert sample_mu.shape == (n_draws, n_samples_in_grid)\n assert sample_y.shape == (n_draws, n_samples_in_grid)\n\n def test_shape_bootstrap_samples_of_smoothing(self, mcycle_X_y, mcycle_gam):\n X, y = mcycle_X_y\n\n for n_bootstraps in [1, 2]:\n coef_bootstraps, cov_bootstraps = (\n mcycle_gam._bootstrap_samples_of_smoothing(\n X, y, n_bootstraps=n_bootstraps))\n assert len(coef_bootstraps) == len(cov_bootstraps) == n_bootstraps\n for coef, cov in zip(coef_bootstraps, cov_bootstraps):\n assert coef.shape == mcycle_gam.coef_.shape\n assert cov.shape == mcycle_gam.statistics_['cov'].shape\n\n for n_draws in [1, 2]:\n coef_draws = mcycle_gam._simulate_coef_from_bootstraps(\n n_draws, coef_bootstraps, cov_bootstraps)\n assert coef_draws.shape == (n_draws, len(mcycle_gam.coef_))\n\n def test_bad_sample_params(self, mcycle_X_y, mcycle_gam):\n X, y = mcycle_X_y\n with pytest.raises(ValueError):\n mcycle_gam.sample(X, y, n_draws=0)\n with pytest.raises(ValueError):\n mcycle_gam.sample(X, y, n_bootstraps=0)\n\n\ndef test_prediction_interval_unknown_scale():\n \"\"\"\n the prediction intervals should be correct to a few decimal places\n we test at a large sample limit, where the t distribution becomes normal\n \"\"\"\n n = 1000000\n X = np.linspace(0,1,n)\n y = np.random.randn(n)\n\n gam_a = LinearGAM(terms=l(0)).fit(X, y)\n gam_b = LinearGAM(s(0, n_splines=4)).fit(X, y)\n\n XX = gam_a.generate_X_grid(term=0)\n intervals_a = gam_a.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)\n intervals_b = gam_b.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)\n\n assert np.allclose(intervals_a[0], sp.stats.norm.ppf(0.1), atol=0.01)\n assert np.allclose(intervals_a[1], sp.stats.norm.ppf(0.9), atol=0.01)\n\n assert np.allclose(intervals_b[0], sp.stats.norm.ppf(0.1), atol=0.01)\n assert np.allclose(intervals_b[1], sp.stats.norm.ppf(0.9), atol=0.01)\n\ndef test_prediction_interval_known_scale():\n \"\"\"\n the prediction intervals should be correct to a few decimal places\n we test at a large sample limit.\n \"\"\"\n n = 1000000\n X = np.linspace(0,1,n)\n y = np.random.randn(n)\n\n gam_a = LinearGAM(terms=l(0), scale=1.).fit(X, y)\n gam_b = LinearGAM(s(0, n_splines=4), scale=1.).fit(X, y)\n\n XX = gam_a.generate_X_grid(term=0)\n intervals_a = gam_a.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)\n intervals_b = gam_b.prediction_intervals(XX, quantiles=[0.1, .9]).mean(axis=0)\n\n assert np.allclose(intervals_a[0], sp.stats.norm.ppf(0.1), atol=0.01)\n assert np.allclose(intervals_a[1], sp.stats.norm.ppf(0.9), atol=0.01)\n\n assert np.allclose(intervals_b[0], sp.stats.norm.ppf(0.1), atol=0.01)\n assert np.allclose(intervals_b[1], sp.stats.norm.ppf(0.9), atol=0.01)\n\ndef test_pvalue_rejects_useless_feature(wage_X_y):\n \"\"\"\n check that a p-value can reject a useless feature\n \"\"\"\n X, y = wage_X_y\n\n # add empty feature\n X = np.c_[X, np.arange(X.shape[0])]\n gam = LinearGAM(s(0) + s(1) + f(2) + s(3)).fit(X, y)\n\n # now do the test, with some safety\n p_values = gam._estimate_p_values()\n print(p_values)\n assert(p_values[-2] > .5) # because -1 is intercept\n\ndef test_fit_quantile_is_close_enough(head_circumference_X_y):\n \"\"\"see that we get close to the desired quantile\n\n and check that repeating on an already fitted returns the same\n \"\"\"\n X, y = head_circumference_X_y\n\n quantile = 0.99\n tol = 1e-4\n\n gam = ExpectileGAM().fit_quantile(X, y, quantile=quantile, max_iter=20, tol=tol)\n ratio = gam._get_quantile_ratio(X, y)\n\n assert np.abs(ratio - quantile) <= tol\n\n # now check if we had to refit\n gam2 = gam.fit_quantile(X, y, quantile=quantile, max_iter=20, tol=tol)\n\n assert gam == gam2\n\n\ndef test_fit_quantile_NOT_close_enough(head_circumference_X_y):\n \"\"\"see that we DO NOT get close to the desired quantile\n \"\"\"\n X, y = head_circumference_X_y\n\n quantile = 0.99\n tol = 1e-5\n\n gam = ExpectileGAM().fit_quantile(X, y, quantile=quantile, max_iter=1, tol=tol)\n ratio = gam._get_quantile_ratio(X, y)\n\n assert np.abs(ratio - quantile) > tol\n\ndef test_fit_quantile_raises_ValueError(head_circumference_X_y):\n \"\"\"see that we DO NOT get fit on bad argument requests\n \"\"\"\n X, y = head_circumference_X_y\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, quantile=0)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, quantile=1)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, quantile=-0.1)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, quantile=1.1)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, tol=0, quantile=0.5)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, tol=-0.1, quantile=0.5)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, max_iter=0, quantile=0.5)\n\n with pytest.raises(ValueError):\n ExpectileGAM().fit_quantile(X, y, max_iter=-1, quantile=0.5)\n\nclass TestRegressions(object):\n def test_pvalue_invariant_to_scale(self, wage_X_y):\n \"\"\"\n regression test.\n\n a bug made the F-statistic sensitive to scale changes, when it should be invariant.\n\n check that a p-value should not change when we change the scale of the response\n \"\"\"\n X, y = wage_X_y\n\n gamA = LinearGAM(s(0) + s(1) + f(2)).fit(X, y * 1000000)\n gamB = LinearGAM(s(0) + s(1) + f(2)).fit(X, y)\n\n assert np.allclose(gamA.statistics_['p_values'], gamB.statistics_['p_values'])\n\n def test_2d_y_still_allow_fitting_in_PoissonGAM(self, coal_X_y):\n \"\"\"\n regression test.\n\n there was a bug where we forgot to check the y_array before converting\n exposure to weights.\n \"\"\"\n X, y = coal_X_y\n two_d_data = np.ones_like(y).ravel()[:, None]\n\n # 2d y should cause no problems now\n gam = PoissonGAM().fit(X, y[:, None])\n assert gam._is_fitted\n\n # 2d weghts should cause no problems now\n gam = PoissonGAM().fit(X, y, weights=two_d_data)\n assert gam._is_fitted\n\n # 2d exposure should cause no problems now\n gam = PoissonGAM().fit(X, y, exposure=two_d_data)\n assert gam._is_fitted\n\n def test_non_int_exposure_produced_no_inf_in_PoissonGAM_ll(self, coal_X_y):\n \"\"\"\n regression test.\n\n there was a bug where we forgot to round the rescaled counts before\n computing the loglikelihood. since Poisson requires integer observations,\n small numerical errors caused the pmf to return -inf, which shows up\n in the loglikelihood computations, AIC, AICc..\n \"\"\"\n X, y = coal_X_y\n\n rate = 1.2 + np.cos(np.linspace(0, 2. * np.pi, len(y)))\n\n gam = PoissonGAM().fit(X, y, exposure=rate)\n\n assert np.isfinite(gam.statistics_['loglikelihood'])\n\n def test_initial_estimate_runs_for_int_obseravtions(self, toy_classification_X_y):\n \"\"\"\n regression test\n\n ._initial_estimate would fail when trying to add small numbers to\n integer observations\n\n casting the observations to float in that method fixes that\n \"\"\"\n X, y = toy_classification_X_y\n gam = LogisticGAM().fit(X, y)\n assert gam._is_fitted\n\n def test_r_squared_for_new_dataset(self, mcycle_gam, mcycle_X_y):\n \"\"\"\n regression test\n\n estimate r squared used to refer to a non-existant method when `mu=None`\n \"\"\"\n X, y = mcycle_X_y\n mcycle_gam._estimate_r2(X, y)\n\n def test_score_method(self, mcycle_gam, mcycle_X_y):\n \"\"\"\n regression test\n\n score returns calculated r^2 for X data using trained gam\n\n \"\"\"\n X, y = mcycle_X_y\n assert mcycle_gam.score(X, y) <= 1\n"
] | [
[
"numpy.allclose",
"scipy.stats.norm.ppf",
"numpy.random.randn",
"numpy.ones_like",
"numpy.abs",
"numpy.arange",
"numpy.linspace",
"numpy.isfinite"
]
] |
NCBI-Hackathons/RNAseq_Cancer_Biomarkers | [
"4ad41888f6546f400a451633f964ed7999a05ad8"
] | [
"scripts/cm_work/model_feature_importance.py"
] | [
"from model_blender import important_gene_mask\nfrom sklearn.metrics import log_loss\nimport numpy as np\n\ndef gene_weight_finder(model, X_train, X_test, y_train, y_test):\n \"\"\"\n function that returns the most important features, weights and # of features\n\n inputs\n -------\n model: tree based model\n X_train:\n X_test:\n y_train:\n y_test\n\n outputs\n -------\n all_important: list of all genes with feature importance > 0\n \n top_20_feature_names: top 20 most important column names (gene)\n based on feature importance\n\n top_20_weights: weights of the top 20 columns\n\n num_feats: number of features that are not 0\n\n number_important: number of features with feature importance > 0\n\n log_loss: log loss score\n \"\"\"\n columns = X_train.columns\n model.fit(X_train, y_train)\n y_pred = model.predict_proba(X_test)\n ll = log_loss(y_test, y_pred)\n \n top_20_features = np.argsort(model.feature_importances_)[-20:][::-1]\n top_20_feature_names = columns[top_20_features]\n top_20_weights = model.feature_importances_[top_20_features]\n\n number_important = len(important_gene_mask(columns, model.feature_importances_))\n all_important = important_gene_mask(columns, model.feature_importances_)\n\n return all_important, top_20_feature_names, top_20_weights, number_important, ll\n\n\n\n\n"
] | [
[
"sklearn.metrics.log_loss",
"numpy.argsort"
]
] |
Dragoncall/GPflowOpt | [
"f1c268e6b5dc4d7f458e06c59095901d55b73c32"
] | [
"gpflowopt/domain.py"
] | [
"# Copyright 2017 Joachim van der Herten\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom itertools import chain\nfrom gpflow.param import Parentable\n\nfrom .transforms import LinearTransform\n\n\nclass Domain(Parentable):\n \"\"\"\n A domain representing the mathematical space over which is optimized.\n \"\"\"\n\n def __init__(self, parameters):\n super(Domain, self).__init__()\n self._parameters = parameters\n\n @property\n def lower(self):\n \"\"\"\n Lower bound of the domain, corresponding to a numpy array with the lower value of each parameter\n \"\"\"\n return np.array(list(map(lambda param: param.lower, self._parameters))).flatten()\n\n @property\n def upper(self):\n \"\"\"\n Upper bound of the domain, corresponding to a numpy array with the upper value of each parameter\n \"\"\"\n return np.array(list(map(lambda param: param.upper, self._parameters))).flatten()\n\n def __add__(self, other):\n assert isinstance(other, Domain)\n return Domain(self._parameters + other._parameters)\n\n @property\n def size(self):\n \"\"\"\n Returns the dimensionality of the domain\n \"\"\"\n return sum(map(lambda param: param.size, self._parameters))\n\n def __setattr__(self, key, value):\n super(Domain, self).__setattr__(key, value)\n if key is not '_parent':\n if isinstance(value, Parentable):\n value._parent = self\n if isinstance(value, list):\n for val in (x for x in value if isinstance(x, Parentable)):\n val._parent = self\n\n def __eq__(self, other):\n return self._parameters == other._parameters\n\n def __contains__(self, X):\n X = np.atleast_2d(X)\n if X.shape[1] is not self.size:\n return False\n return np.all(np.logical_and(np.logical_or(self.lower < X, np.isclose(self.lower, X)),\n np.logical_or(X < self.upper, np.isclose(self.upper, X))))\n\n def __iter__(self):\n for v in chain(*map(iter, self._parameters)):\n yield v\n\n def __getitem__(self, items):\n if isinstance(items, list):\n return np.sum([self[item] for item in items])\n\n if isinstance(items, str):\n labels = [param.label for param in self._parameters]\n items = labels.index(items)\n\n return self._parameters[items]\n\n def __rshift__(self, other):\n assert(self.size == other.size)\n A = (other.upper - other.lower) / (self.upper - self.lower)\n b = -self.upper * A + other.upper\n return LinearTransform(A, b)\n\n @property\n def value(self):\n return np.vstack(map(lambda p: p.value, self._parameters)).T\n\n @value.setter\n def value(self, x):\n x = np.atleast_2d(x)\n assert (len(x.shape) == 2)\n assert (x.shape[1] == self.size)\n offset = 0\n for p in self._parameters:\n p.value = x[:, offset:offset + p.size]\n offset += p.size\n\n def _repr_html_(self):\n \"\"\"\n Build html string for table display in jupyter notebooks.\n \"\"\"\n html = [\"<table id='domain' width=100%>\"]\n\n # Table header\n columns = ['Name', 'Type', 'Values']\n header = \"<tr>\"\n header += ''.join(map(lambda l: \"<td>{0}</td>\".format(l), columns))\n header += \"</tr>\"\n html.append(header)\n\n # Add parameters\n html.append(self._html_table_rows())\n html.append(\"</table>\")\n\n return ''.join(html)\n\n def _html_table_rows(self):\n return ''.join(map(lambda l: l._html_table_rows(), self._parameters))\n\n\nclass Parameter(Domain):\n \"\"\"\n Abstract class representing a parameter (which corresponds to a one-dimensional domain)\n This class can be derived for continuous, discrete and categorical parameters\n \"\"\"\n\n def __init__(self, label, xinit):\n super(Parameter, self).__init__([self])\n self.label = label\n self._x = np.atleast_1d(xinit)\n\n @Domain.size.getter\n def size(self):\n \"\"\"\n One parameter has a dimensionality of 1\n :return: 1\n \"\"\"\n return 1\n\n def __iter__(self):\n yield self\n\n @Domain.value.getter\n def value(self):\n return self._x\n\n @value.setter\n def value(self, x):\n x = np.atleast_1d(x)\n self._x = x.ravel()\n\n def _html_table_rows(self):\n \"\"\"\n Html row representation of a Parameter. Should be overwritten in subclasses objects.\n \"\"\"\n return \"<tr><td>{0}</td><td>{1}</td><td>{2}</td></tr>\".format(self.label, 'N/A', 'N/A')\n\n\nclass ContinuousParameter(Parameter):\n def __init__(self, label, lb, ub, xinit=None):\n self._range = np.array([lb, ub], dtype=float)\n super(ContinuousParameter, self).__init__(label, xinit or ((ub + lb) / 2.0))\n\n @Parameter.lower.getter\n def lower(self):\n return np.array([self._range[0]])\n\n @Parameter.upper.getter\n def upper(self):\n return np.array([self._range[1]])\n\n @lower.setter\n def lower(self, value):\n self._range[0] = value\n\n @upper.setter\n def upper(self, value):\n self._range[1] = value\n\n def __eq__(self, other):\n return isinstance(other, ContinuousParameter) and self.lower == other.lower and self.upper == other.upper\n\n def _html_table_rows(self):\n \"\"\"\n Html row representation of a ContinuousParameter.\n \"\"\"\n return \"<tr><td>{0}</td><td>{1}</td><td>{2}</td></tr>\".format(self.label, 'Continuous', str(self._range))\n\n\nclass UnitCube(Domain):\n \"\"\"\n The unit domain [0, 1]^d\n \"\"\"\n def __init__(self, n_inputs):\n params = [ContinuousParameter('u{0}'.format(i), 0, 1) for i in np.arange(n_inputs)]\n super(UnitCube, self).__init__(params)\n"
] | [
[
"numpy.sum",
"numpy.atleast_2d",
"numpy.isclose",
"numpy.atleast_1d",
"numpy.arange",
"numpy.array"
]
] |
AprilXiaoyanLiu/whitenoise-system | [
"0e94d2cc8114b97a61d5d2e45278428f91f1e687"
] | [
"synth/snsynth/pytorch/nn/privacy_utils.py"
] | [
"import torch\nimport torch.nn as nn\nimport math\nimport numpy as np\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\n\n\ndef pate(data, teachers, lap_scale, device=\"cpu\"):\n \"\"\"PATE implementation for GANs.\n \"\"\"\n num_teachers = len(teachers)\n labels = torch.Tensor(num_teachers, data.shape[0]).type(torch.int64).to(device)\n for i in range(num_teachers):\n output = teachers[i](data)\n pred = (output > 0.5).type(torch.Tensor).squeeze().to(device)\n # print(pred.shape)\n # print(labels[i].shape)\n labels[i] = pred\n\n votes = torch.sum(labels, dim=0).unsqueeze(1).type(torch.DoubleTensor).to(device)\n noise = torch.from_numpy(np.random.laplace(loc=0, scale=1 / lap_scale, size=votes.size())).to(\n device\n )\n noisy_votes = votes + noise\n noisy_labels = (noisy_votes > num_teachers / 2).type(torch.DoubleTensor).to(device)\n\n return noisy_labels, votes\n\n\ndef moments_acc(num_teachers, votes, lap_scale, l_list, device=\"cpu\"):\n q = (2 + lap_scale * torch.abs(2 * votes - num_teachers)) / (\n 4 * torch.exp(lap_scale * torch.abs(2 * votes - num_teachers))\n ).to(device)\n\n alpha = []\n for l_val in l_list:\n a = 2 * lap_scale ** 2 * l_val * (l_val + 1)\n t_one = (1 - q) * torch.pow((1 - q) / (1 - math.exp(2 * lap_scale) * q), l_val)\n t_two = q * torch.exp(2 * lap_scale * l_val)\n t = t_one + t_two\n alpha.append(torch.clamp(t, max=a).sum())\n\n return torch.DoubleTensor(alpha).to(device)\n"
] | [
[
"torch.sum",
"torch.nn.init.xavier_uniform_",
"torch.clamp",
"torch.exp",
"torch.DoubleTensor",
"torch.abs",
"torch.Tensor"
]
] |
DewanshuHaswani/fastai | [
"fa3aed62e9f7b842d335a92aa20fa7e1b2a7b266"
] | [
"mnist_pytorch/previewer.py"
] | [
"import torch\nimport matplotlib.pyplot as plt\nfrom torchvision import datasets, transforms\nfrom random import choice\n\nBATCH_SIZE=64\n\n# Load the mnist dataset\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n \"./data\", \n train=True,\n download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n # transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=BATCH_SIZE\n )\n\ntrain_data = train_loader.dataset.train_data\n\nchar = choice(train_data)\n\nprint(char)\n\nplt.imshow(char.numpy())\nplt.show()\n\n"
] | [
[
"matplotlib.pyplot.show"
]
] |
kaniblu/vhda | [
"35941097ef552568c29f66cc55d8ce1927f34978"
] | [
"loopers/inferencer/evaluator.py"
] | [
"__all__ = [\"EvaluatingInferencer\"]\n\nfrom dataclasses import dataclass\nfrom typing import Sequence\n\nimport torch\nimport torch.utils.data as td\n\nimport utils\nfrom datasets import BatchData\nfrom .inferencer import Inferencer\nfrom evaluators import FinegrainedEvaluator\n\n\n@dataclass\nclass EvaluatingInferencer(Inferencer):\n evaluators: Sequence[FinegrainedEvaluator] = tuple()\n _requires_lexical_form: bool = utils.private_field(default=False)\n\n def __post_init__(self):\n super().__post_init__()\n self._requires_lexical_form = any(e.requires_lexical_form\n for e in self.evaluators)\n\n def on_run_started(self, dataloader: td.DataLoader) -> td.DataLoader:\n dataloader = super().on_run_started(dataloader)\n for evaluator in self.evaluators:\n evaluator.reset()\n return dataloader\n\n def on_batch_ended(self, batch: BatchData, pred: BatchData, outputs\n ) -> utils.TensorMap:\n stats = dict(super().on_batch_ended(batch, pred, outputs))\n batch_lex, pred_lex = None, None\n if self._requires_lexical_form:\n batch_lex = list(map(self.processor.lexicalize_global, batch))\n pred_lex = list(map(self.processor.lexicalize_global, pred))\n with torch.no_grad():\n for evaluator in self.evaluators:\n if evaluator.requires_lexical_form:\n eval_stats = evaluator.update(batch_lex, pred_lex, outputs)\n else:\n eval_stats = evaluator.update(batch, pred, outputs)\n stats.update(eval_stats or dict())\n return stats\n\n def on_run_ended(self, stats: utils.TensorMap) -> utils.TensorMap:\n stats = dict(super().on_run_ended(stats))\n with torch.no_grad():\n for evaluator in self.evaluators:\n stats.update(evaluator.get() or dict())\n return stats\n"
] | [
[
"torch.no_grad"
]
] |
shamim-hussain/egt | [
"02187de16fcd672b8070191d29e9c9e7f681eb37"
] | [
"lib/base/xformer_layers/attention.py"
] | [
"\nimport tensorflow as tf\ntfk = tf.keras\nfrom .shaping import move_dim\n\n\n\ndef move_ch2h(maybe_headed_tensor,\n channels_dim=-1, head_dim=1):\n if maybe_headed_tensor.shape.rank == 4:\n return move_dim(maybe_headed_tensor,\n from_dim=channels_dim,\n to_dim=head_dim)\n else:\n return maybe_headed_tensor\n\n\ndef merge_attention_heads(merge_type, headed_tensor):\n if merge_type == 'mean':\n return tf.reduce_mean(headed_tensor, axis=1)\n elif merge_type == 'max':\n return tf.reduce_max(headed_tensor, axis=1)\n elif merge_type == 'sum':\n return tf.reduce_sum(headed_tensor, axis=1)\n elif merge_type == 'prod':\n return tf.reduce_prod(headed_tensor, axis=1)\n else:\n raise ValueError(f'Unknown merge type \"{merge_type}\"')\n\n\ndef dot_product_attention(query, key, value, \n mask = None,\n attn_mask = None,\n scale_factor = None,\n bias = None,\n scale_logits = True,\n clip_logits_value = None,\n causal = False,\n pad = False,\n merge_heads = None,\n attn_scale_factor = None,\n return_logits = False,\n return_matrix = False,\n big_number = 1e9,\n scale_degree = False,\n ):\n\n query_shape = query.shape\n key_shape = key.shape\n value_shape = value.shape\n input_rank = query_shape.rank\n\n attention_dim = query_shape[-1]\n \n if pad:\n paddings = [(0,0)]*(input_rank-2) + [(1,0),(0,0)]\n key = tf.pad(key, paddings)\n value = tf.pad(value, paddings)\n\n # Create Priliminary Logits\n attention_logits = tf.matmul(query, key, transpose_b=True)\n\n\n # Scaling for dot product\n if scale_logits:\n attention_logits = attention_logits*(attention_dim**-.5)\n \n \n # Clipping for numerical stability\n if clip_logits_value is not None:\n if not isinstance(clip_logits_value, list):\n if isinstance(clip_logits_value, tuple):\n clip_logits_value = list(clip_logits_value)\n else:\n clip_logits_value = [-clip_logits_value, clip_logits_value, 0]\n if len(clip_logits_value) == 2:\n clip_logits_value.append(0)\n if len(clip_logits_value) < 3:\n raise ValueError\n \n # Clip before\n if clip_logits_value is not None and (not clip_logits_value[2]):\n attention_logits = tf.clip_by_value(attention_logits, *clip_logits_value[:2])\n\n # Scale factor and bias\n if scale_factor is not None:\n scale_factor = move_ch2h(scale_factor)\n attention_logits = attention_logits * scale_factor\n\n if bias is not None:\n bias = move_ch2h(bias)\n attention_logits = attention_logits + bias\n \n # Save for returning the logits\n logits_matrix = attention_logits\n\n # Clip after\n if clip_logits_value is not None and clip_logits_value[2]:\n attention_logits = tf.clip_by_value(attention_logits, *clip_logits_value[:2])\n\n # Masking\n if not mask is None:\n mask_rank = mask.shape.rank\n\n mask_slice = [Ellipsis]+[None]*(input_rank-mask_rank)+[slice(None)]\n mask = mask[mask_slice]\n\n if not mask.dtype is attention_logits.dtype:\n mask = tf.cast(mask, attention_logits.dtype)\n attention_logits = attention_logits + (mask-1)*big_number\n \n if not attn_mask is None:\n attn_mask = move_ch2h(attn_mask)\n if not attn_mask.dtype is attention_logits.dtype:\n attn_mask = tf.cast(attn_mask, attention_logits.dtype)\n attention_logits = attention_logits + (attn_mask-1)*big_number\n \n if causal:\n causal_mask_shape = [query.shape[-2], key.shape[-2]]\n if None in causal_mask_shape:\n causal_mask_shape = tf.shape(attention_logits)[-2:]\n\n causal_mask = tf.ones(causal_mask_shape,\n dtype=attention_logits.dtype)\n causal_mask = tf.linalg.band_part(causal_mask,-1,0)\n attention_logits = attention_logits + (causal_mask-1)*big_number\n \n \n # Softmax Attention\n attention_matrix = tf.nn.softmax(attention_logits, axis=-1)\n \n # Merge Heads\n if merge_heads is not None:\n attention_matrix = merge_attention_heads(merge_type=merge_heads,\n headed_tensor=attention_matrix)\n \n # Scale Attention Matrix\n if attn_scale_factor is not None:\n attn_scale_factor = move_ch2h(attn_scale_factor)\n attention_matrix = attention_matrix * attn_scale_factor\n \n output = tf.matmul(attention_matrix, value)\n \n if (attn_scale_factor is not None) and scale_degree:\n if mask is None:\n degree = tf.reduce_sum(attn_scale_factor,\n axis=-1, keepdims=True)\n else:\n degree = tf.reduce_sum(attn_scale_factor * mask,\n axis=-1, keepdims=True)\n output = output * tf.math.log(1+degree)\n \n if merge_heads is None:\n output.set_shape(query_shape[:-1]+value_shape[-1:])\n else:\n output.set_shape(query_shape[0:1]+query_shape[2:-1]+value_shape[-1:])\n\n\n # Format Outputs\n outputs = output\n\n if return_logits or return_matrix:\n outputs = (outputs,)\n \n if return_logits:\n logits = move_dim(logits_matrix, from_dim=1, to_dim=4)\n outputs = outputs + (logits,)\n \n if return_matrix:\n outputs = outputs + (attention_matrix,)\n\n return outputs\n"
] | [
[
"tensorflow.pad",
"tensorflow.math.log",
"tensorflow.reduce_max",
"tensorflow.shape",
"tensorflow.ones",
"tensorflow.reduce_mean",
"tensorflow.linalg.band_part",
"tensorflow.matmul",
"tensorflow.cast",
"tensorflow.reduce_prod",
"tensorflow.clip_by_value",
"tensorflow.nn.softmax",
"tensorflow.reduce_sum"
]
] |
theislab/AutoGeneS | [
"22bde0d5eba013e90edb85341e0bd9c28b82e7fd"
] | [
"autogenes/core.py"
] | [
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom .ga import GeneticAlgorithm\n\nfrom . import objectives as ga_objectives\n\nimport deap\nimport warnings\n\nclass AutoGeneS:\n\n PLOT_PARAMS = {\n 'small': {\n 'figsize': (10,5),\n 'all_ms': 8,\n 'sel_ms': 10\n },\n 'large': {\n 'figsize': (15,10),\n 'all_ms': 5,\n 'sel_ms': 10\n }\n }\n\n PLOT_THRESHOLD = 50\n\n def __init__(self, data):\n\n self.data = data\n\n if len(self.data.shape) != 2:\n raise ValueError(\"data is expected to have two dimensions\")\n\n if self.data.shape[0] < 2:\n raise ValueError(\"At least two rows (cell types) expected\")\n\n if self.data.shape[1] < self.data.shape[0]:\n raise ValueError(\"Number of columns (genes) must be >= number of rows (cell types)\")\n\n if not np.isfinite(self.data).all():\n raise ValueError(\"Some entries in data are not scalars\")\n\n self.__has_run = False\n self.selection = None\n self.selection_index = None\n\n def run(self, ngen=2, mode='standard', nfeatures=None, weights=None, objectives=None, seed=0, verbose=True, **kwargs):\n\n # Check modes\n\n if mode == 'standard':\n if nfeatures is not None:\n raise ValueError(\"nfeatures doesn't apply to standard mode (did you mean mode='fixed'?)\")\n\n elif mode == 'fixed':\n if nfeatures is None:\n raise ValueError(\"You need to supply nfeatures\")\n\n if nfeatures > self.data.shape[1]:\n raise ValueError(\"nfeatures must be <= the number of columns (genes)\")\n \n if nfeatures < self.data.shape[0]:\n raise ValueError(\"nfeatures must be >= the number of rows (cell types)\")\n else:\n raise ValueError(\"Invalid mode\")\n\n # Check weights and objectives\n \n if weights is None:\n if objectives is None:\n weights = (-1.0,1.0)\n objectives = ('correlation','distance')\n else:\n raise Exception(\"Need weights for objectives\")\n else:\n if objectives is not None:\n if len(weights) != len(objectives):\n raise ValueError(\"Number of weights does not match number of objectives\")\n weights_l = []\n objectives_l = []\n for i,w in enumerate(weights):\n if w == 0:\n warnings.warn(f\"Ignoring objective '{str(objectives[i])}'\")\n else:\n weights_l.append(w)\n objectives_l.append(objectives[i])\n weights=tuple(weights_l)\n objectives=tuple(objectives_l)\n else:\n raise Exception(\"Need objectives for weights\")\n \n # Store objectives\n\n self.objectives_func = []\n self.objectives_names = []\n\n for f in objectives:\n if callable(f):\n self.objectives_func.append(f)\n self.objectives_names.append(f.__name__)\n elif isinstance(f,str):\n if not hasattr(ga_objectives,f):\n raise ValueError(f\"No such objective: {f}\")\n else:\n self.objectives_names.append(f)\n self.objectives_func.append(getattr(ga_objectives,f))\n else:\n raise ValueError(\"Invalid objective\")\n\n self.objectives_num = len(self.objectives_func)\n self.weights = weights\n\n self.ga = GeneticAlgorithm(\n data=self.data, \n ngen=ngen,\n mode=mode,\n weights=weights, \n objectives_names=self.objectives_names, \n objectives_func=self.objectives_func, \n seed=seed, \n verbose=verbose,\n nfeatures=nfeatures,\n **kwargs\n )\n self.hof = self.ga.run()\n\n self.__has_run = True\n\n def resume(self):\n self.ga.resume()\n\n @property\n def pareto(self):\n self.__assert_run()\n return self.hof.items\n\n @property\n def fitness_matrix(self):\n self.__assert_run()\n\n all = []\n for i in range(self.objectives_num):\n vals = np.array(list(map(lambda x: x.fitness.values[i], self.hof.items)))\n all.append(vals)\n return np.array(all).T\n\n #\n # Plot results\n #\n\n def plot(self,objectives=(0,1), **kwargs):\n\n self.__assert_run()\n\n if self.objectives_num == 1:\n raise Exception(\"Cannot plot for a single objective\")\n\n obj = objectives\n\n if len(obj) != 2:\n raise ValueError(\"Must supply two objectives per plot\") \n\n if not all(map(lambda x: x in range(self.objectives_num), obj)):\n raise ValueError(f\"Invalid objectives, must be 0 <= x <= {self.objectives_num-1}\")\n\n if not kwargs:\n return self.plot(weights=self.weights)\n\n i,desc = self.__from_pareto(**kwargs)\n\n if desc == 'index': legend = f'By index'\n if desc == 'weights': legend = f\"Using weights {kwargs['weights']}\"\n if desc == 'close_to': legend = f\"Close to {kwargs['close_to'][1]}\"\n\n if 'size' in kwargs:\n if kwargs['size'] not in ['small','large']:\n raise ValueError(\"Invalid size\")\n size = kwargs['size']\n else:\n if len(self.pareto) < AutoGeneS.PLOT_THRESHOLD:\n size = 'small' \n else:\n size = 'large'\n\n df = pd.DataFrame(self.fitness_matrix).sort_values(by=obj[0])\n\n df_0 = df[obj[0]]\n df_1 = df[obj[1]]\n\n params = AutoGeneS.PLOT_PARAMS[size]\n\n plt.figure(figsize=params['figsize'])\n\n line = plt.plot(df_0,df_1)\n\n plt_all, = plt.plot(df_0.drop(i),df_1.drop(i),'bo',ms=params['all_ms'])\n plt_sel, = plt.plot(df_0[i],df_1[i],'r^',ms=params['sel_ms'])\n\n plt.xlabel(self.objectives_names[obj[0]])\n plt.ylabel(self.objectives_names[obj[1]])\n\n plt.legend([plt_all, plt_sel], [\"Option\", legend],bbox_to_anchor=(1, 1), loc='upper left')\n\n plt.show()\n\n #\n # Select individual\n #\n\n def select(self, **kwargs):\n self.__assert_run()\n\n if not kwargs:\n return self.select(weights=self.weights)\n\n i,desc = self.__from_pareto(**kwargs)\n self.selection = self.hof[i]\n self.selection_index = i\n\n return self.selection\n\n #\n # Helper\n #\n\n def __from_pareto(self,**kwargs):\n\n if sum([ x in kwargs for x in [\"weights\",\"index\",\"close_to\"]]) != 1:\n raise Exception(\"You need to provide exactly one criterion.\")\n\n if 'weights' in kwargs:\n weights = kwargs['weights']\n i_max = self.__index_by_weights(weights) \n return i_max,'weights'\n\n if 'index' in kwargs:\n index = kwargs['index']\n if isinstance(index,int):\n if index not in range(len(self.pareto)):\n raise ValueError(\"Invalid index\")\n return index,'index'\n else:\n obj,i = index\n fit = pd.DataFrame(data=self.fitness_matrix).sort_values(by=obj)\n return fit.index.values[i],'index'\n \n if 'close_to' in kwargs:\n obj,num = kwargs['close_to']\n fit = self.fitness_matrix[:,obj]\n i = np.argmin(np.abs(fit-num))\n return i,'close_to'\n\n def __index_by_weights(self,weights):\n self.__assert_run()\n if len(weights) != self.objectives_num:\n raise ValueError(f\"Number of weights does not match number of objectives\")\n\n fitness = self.fitness_matrix\n for i in range(self.objectives_num):\n max = np.max(fitness[:,i])\n if max:\n fitness[:,i] *= 1/max\n\n wfitness = fitness.dot(np.array(weights))\n return np.argmax(wfitness)\n\n def __assert_run(self):\n if not self.__has_run:\n raise Exception(\"AutoGeneS did not run yet\")\n\n def __setstate__(self,dict):\n deap.creator.FitnessGA.weights = dict['weights']\n self.__dict__.update(dict)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"numpy.abs",
"numpy.argmax",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.isfinite"
]
] |
gargraghav/tensorflow | [
"a0ea36b9dffc563deae6fa9e2f4d2ca912a3a224"
] | [
"Learning Tensorflow/Examples/handwrittendigit_classifier.py"
] | [
"import tensorflow as tf\nimport time\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\n\nbeginTime=time.time()\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\nlearning_rate = 0.01\ntraining_iterations = 30\nbatch_size = 100\ndisplay_step = 2\n\nx = tf.placeholder(\"float\", [None, 784])\ny = tf.placeholder(\"float\", [None, 10])\n\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\n\nwith tf.name_scope(\"Wx_b\") as scope:\n model = tf.nn.softmax(tf.matmul(x, W) + b)\n\nw_h = tf.summary.histogram(\"weights\", W)\nb_h = tf.summary.histogram(\"biases\", b)\n\nwith tf.name_scope(\"cost_function\") as scope:\n cost_function = -tf.reduce_sum(y*tf.log(model))\n tf.summary.scalar(\"cost function\", cost_function)\n\nwith tf.name_scope(\"train\") as scope:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)\n\ninit = tf.global_variables_initializer()\n\nmerged_summary_op = tf.summary.merge_all()\n\nwith tf.Session() as sess:\n sess.run(init)\n summary_writer = tf.summary.FileWriter(\"/home/raghav/PycharmProjects/tensorflow/tensorboard/\", sess.graph)\n\n for itr in range(training_iterations):\n avg_cost = 0.\n total_batch = int(mnist.train.num_examples / batch_size)\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})\n avg_cost += sess.run(cost_function, feed_dict={x: batch_xs, y: batch_ys})/total_batch\n summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})\n summary_writer.add_summary(summary_str, total_batch + i)\n if itr % display_step == 0:\n print(\"Iteration:\", '%d' % (itr + 1), \"cost=\", \"{:.9f}\".format(avg_cost))\n\n print(\"Training Completed!\")\n\n predictions = tf.equal(tf.argmax(model, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(predictions, \"float\"))\n\n print(\"\\nAccuracy: \",sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})*100)\n endTime = time.time()\n print('\\nTotal time: {:5.2f}s'.format(endTime - beginTime))\n\n f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n\n test1_index = 0\n test1_x = mnist.test.images[test1_index].reshape(1, 784)\n test1_img = mnist.test.images[test1_index].reshape((28, 28))\n test1_y = mnist.test.labels[test1_index].reshape(1, 10)\n test1_pred = sess.run(model, feed_dict={x: test1_x, y: test1_y})\n\n ax1.imshow(test1_img, cmap='gray')\n ax2.bar(list(range(0, 10)), test1_pred[0])\n\n test2_index = 6\n test2_x = mnist.test.images[test2_index].reshape(1, 784)\n test2_img = mnist.test.images[test2_index].reshape((28, 28))\n test2_y = mnist.test.labels[test2_index].reshape(1, 10)\n test2_pred = sess.run(model, feed_dict={x: test2_x, y: test2_y})\n\n ax3.imshow(test2_img, cmap='gray')\n ax4.bar(list(range(0, 10)), test2_pred[0])\n\n plt.show()"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.placeholder",
"tensorflow.summary.histogram",
"tensorflow.zeros",
"tensorflow.summary.merge_all",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.subplots",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.matmul",
"tensorflow.cast",
"tensorflow.name_scope",
"matplotlib.pyplot.show",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.Session",
"tensorflow.argmax",
"tensorflow.log",
"tensorflow.summary.FileWriter"
]
] |
AI-Mart/PaddleNLP | [
"0ababea960427e8b70220ea06d908ed58cbed0ed"
] | [
"examples/language_model/gpt-3/static/run_pretrain_static.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPretrain GPT in static graph mode.\n\"\"\"\nimport argparse\nimport math\nimport os\nimport random\nimport time\nimport sys\n\nos.path.expandvars('$HOME')\nos.path.expanduser('~')\n\nimport numpy as np\nimport paddle\nimport paddle.distributed.fleet as fleet\nfrom paddle.distributed.fleet.meta_optimizers.sharding.utils import save_persistables\nfrom modeling import GPTModel, GPTForPretraining, GPTPretrainingCriterion\nfrom paddlenlp.transformers import GPTTokenizer, GPTChineseTokenizer\nfrom paddlenlp.ops import guard, Topology, get_rng_state_tracker\nfrom paddlenlp.utils.log import logger\nfrom paddlenlp.utils import profiler\nimport paddlenlp.ops as ops\nfrom visualdl import LogWriter\n\n# Used to load the data_tools path, should import before dataset\nfilepath = os.path.abspath(os.path.dirname(__file__))\nsys.path.insert(0, os.path.join(filepath, \"../\"))\nfrom dataset import create_pretrained_dataset\nfrom args import parse_args\nimport lr\n\nMODEL_CLASSES = {\n \"gpt\": (GPTForPretraining, GPTTokenizer),\n \"gpt-cn\": (GPTForPretraining, GPTChineseTokenizer),\n}\n\n\ndef create_data_holder(args):\n \"\"\"creat data holder\"\"\"\n tokens = paddle.static.data(\n name=\"tokens\", shape=[-1, args.max_seq_len], dtype=\"int64\")\n loss_mask = paddle.static.data(\n name=\"loss_mask\", shape=[-1, args.max_seq_len], dtype=\"float32\")\n position_ids = paddle.static.data(\n name=\"position_ids\", shape=[-1, args.max_seq_len], dtype=\"int64\")\n labels = paddle.static.data(\n name=\"labels\", shape=[-1, args.max_seq_len], dtype=\"int64\")\n return [tokens, loss_mask, position_ids, labels]\n\n\ndef dist_optimizer(args, topo):\n default_global_batch_size = topo.data_info.size * args.micro_batch_size\n if args.global_batch_size is None:\n args.global_batch_size = default_global_batch_size\n\n bsz_per_dp = args.global_batch_size // topo.data_info.size\n micro_batch_size = args.micro_batch_size\n assert args.global_batch_size % micro_batch_size == 0, \"cannot do gradient accumulate, global_batch_size: {} micro_batch_size: {}\".format(\n args.global_batch_size, micro_batch_size)\n acc_steps = bsz_per_dp // micro_batch_size\n\n exec_strategy = paddle.fluid.ExecutionStrategy()\n exec_strategy.num_threads = 2\n exec_strategy.num_iteration_per_drop_scope = 1\n\n dist_strategy = fleet.DistributedStrategy()\n dist_strategy.execution_strategy = exec_strategy\n dist_strategy.nccl_comm_num = 3\n\n dist_strategy.recompute = args.use_recompute\n dist_strategy.pipeline = args.pp_degree > 1\n\n if args.use_amp:\n dist_strategy.amp = True\n dist_strategy.amp_configs = {\n \"custom_white_list\": [\n 'softmax', 'layer_norm', 'gelu',\n \"fused_softmax_mask_upper_triangle\", \"elementwise_add\"\n ],\n \"custom_black_list\":\n [\"reduce_sum\", \"c_softmax_with_cross_entropy\", \"elementwise_div\"],\n \"init_loss_scaling\": 32768,\n \"use_dynamic_loss_scaling\": True,\n \"use_pure_fp16\": args.amp_level == \"O2\",\n \"use_fp16_guard\": False\n }\n if args.use_sharding:\n dist_strategy.sharding = True\n dist_strategy.sharding_configs = {\n \"segment_broadcast_MB\": 32,\n \"sharding_degree\": args.sharding_degree,\n \"mp_degree\": args.mp_degree,\n \"pp_degree\": args.pp_degree,\n \"dp_degree\": args.dp_degree,\n \"optimize_offload\": False,\n }\n if args.pp_degree > 1:\n dist_strategy.pipeline_configs = {\n \"schedule_mode\": \"1F1B\",\n \"micro_micro_batch_size\": micro_batch_size,\n \"accumulate_steps\": acc_steps,\n }\n else:\n assert acc_steps == 1, \"Only support accumulate steps in piplinemode. Please set you global_batch_size={}\".format(\n default_global_batch_size)\n\n return dist_strategy\n\n\ndef get_train_data_file(args):\n files = [\n os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)\n if (os.path.isfile(os.path.join(args.input_dir, f)) and str(f).endswith(\n \"_idx.npz\"))\n ]\n files = [x.replace(\"_idx.npz\", \"\") for x in files]\n if len(files) == 0:\n logger.warning(\n \"Not found dataset with name of xxx_ids.npy and xxx_idx.npz! Try to found old compatible xxx_ids.npz file.\"\n )\n else:\n return files\n\n files = [\n os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)\n if (os.path.isfile(os.path.join(args.input_dir, f)) and str(f).endswith(\n \"_ids.npz\"))\n ]\n\n files = [x.replace(\"_ids.npz\", \"\") for x in files]\n return files\n\n\ndef init_static_with_params(model, dygraph_params, topo, prog=None):\n from paddlenlp.utils.tools import dygraph_params_to_static\n static_params = dygraph_params_to_static(model, dygraph_params, topo)\n if prog is None:\n prog = paddle.static.default_main_program()\n paddle.static.set_program_state(prog, static_params)\n\n\ndef run_evaluate(data_loader,\n exe,\n program,\n iter_steps,\n log_writer,\n global_step,\n args,\n epoch,\n is_last,\n eval_fetch,\n task_name=\"valid\"):\n all_loss = []\n local_time = time.time()\n\n for eval_step, batch in enumerate(data_loader):\n loss_return = exe.run(program, feed=batch, fetch_list=eval_fetch)\n if is_last:\n all_loss.append(float(loss_return[0]))\n if eval_step >= iter_steps - 1:\n if not is_last:\n break\n average_loss = sum(all_loss) / len(all_loss)\n logger.info(\n \"%s step %d, epoch: %d, batch: %d, loss: %f, speed: %.0f tokens/s\"\n % (task_name, global_step, epoch, eval_step, average_loss,\n iter_steps * args.micro_batch_size * args.max_seq_len /\n (time.time() - local_time)))\n log_writer.add_scalar(task_name + \"_loss\", average_loss,\n global_step)\n break\n\n\ndef do_train(args):\n # Initialize the paddle and paddle fleet execute environment\n paddle.enable_static()\n fleet.init(is_collective=True)\n\n # Create the random seed for the worker\n random.seed(args.seed)\n np.random.seed(args.seed)\n paddle.seed(args.seed)\n get_rng_state_tracker().add('global_seed', args.seed)\n get_rng_state_tracker().add('local_seed',\n args.seed + fleet.worker_index() + 2021)\n\n if args.use_amp and args.amp_level == \"O2\":\n assert (args.mp_degree == 1 and args.pp_degree == 1\n ), \"When amp level is O2, mp_degree and pp_degree should be 1.\"\n assert (args.use_sharding == False\n ), \"When amp level is O2, use_sharding should be False.\"\n\n assert args.device in [\n \"cpu\", \"gpu\", \"xpu\"\n ], \"Invalid device! Available device should be cpu, gpu, or xpu.\"\n place = paddle.set_device(args.device)\n\n worker_num = fleet.worker_num()\n worker_index = fleet.worker_index()\n local_rank = 0 if fleet.local_rank() is None else int(fleet.local_rank())\n\n topo = Topology(\n device_rank=worker_index,\n world_size=worker_num,\n dp_degree=args.dp_degree,\n pp_degree=args.pp_degree,\n sharding_degree=args.sharding_degree,\n mp_degree=args.mp_degree)\n\n logger.info(\"The topo of hybrid parallelism:\\n{}\".format(topo))\n\n dist_strategy = dist_optimizer(args, topo)\n\n # Create log write, train results show on last card of pipeline.\n if topo.is_last:\n log_writer_path = os.path.join(\n args.output_dir, \"train_log\",\n \"{}_globalbsz_{}_amp_{}_recompute_{}_card_{}\".format(\n args.model_name_or_path, args.global_batch_size, args.use_amp,\n args.use_recompute, worker_index).lower())\n if os.path.exists(log_writer_path):\n import shutil\n shutil.rmtree(log_writer_path)\n log_writer = LogWriter(log_writer_path)\n\n # Define the input data in the static mode\n\n model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n pretrained_models_list = list(\n model_class.pretrained_init_configuration.keys())\n\n data_file = get_train_data_file(args)\n main_program = paddle.static.default_main_program()\n startup_program = paddle.static.default_startup_program()\n with paddle.static.program_guard(main_program, startup_program):\n with paddle.utils.unique_name.guard():\n with paddle.static.device_guard('gpu:0'):\n data_holders = create_data_holder(args)\n [tokens, loss_mask, position_ids, labels] = data_holders\n\n tokenizer = tokenizer_class.from_pretrained(\n args.model_name_or_path)\n eos_id = tokenizer.eos_token_id\n\n train_data_loader, valid_data_loader, test_data_loader = create_pretrained_dataset(\n args,\n data_file,\n local_rank=local_rank,\n data_world_size=topo.data_info.size,\n data_world_rank=topo.data_info.rank,\n eos_id=eos_id,\n max_seq_len=args.max_seq_len,\n places=paddle.static.cuda_places(),\n data_holders=data_holders,\n pipeline_mode=False, )\n\n if args.model_name_or_path in pretrained_models_list:\n model_config = model_class.pretrained_init_configuration[\n args.model_name_or_path]\n\n model_config[\n \"hidden_dropout_prob\"] = args.hidden_dropout_prob\n model_config[\n \"attention_probs_dropout_prob\"] = args.attention_probs_dropout_prob\n model_config[\"topo\"] = topo\n\n model = guard(f'gpu:{args.pp_degree -1}')(\n GPTForPretraining)(guard(f'gpu:0')(GPTModel)(\n **model_config))\n else:\n model, _ = GPTForPretraining.from_pretrained(\n args.model_name_or_path,\n hidden_dropout_prob=args.hidden_dropout_prob,\n attention_probs_dropout_prob=args.\n attention_probs_dropout_prob,\n topo=topo)\n # Create the model for the gpt pretrain\n preds = model(tokens, position_ids)\n\n criterion = guard(f'gpu:{args.pp_degree -1}')(\n GPTPretrainingCriterion)(topo)\n loss = criterion(preds, labels, loss_mask)\n\n # Create the learning_rate sheduler and optimizer\n if args.decay_steps is None:\n args.decay_steps = args.max_steps\n warmup_step = args.warmup_rate * args.decay_steps\n\n # TODO @ZHUI Use paddle network to support lr scheduler\n lr_scheduler = lr.CosineAnnealingWithWarmupDecay(\n max_lr=args.max_lr,\n min_lr=args.min_lr,\n warmup_step=warmup_step,\n decay_step=args.decay_steps)\n\n clip = None\n if args.grad_clip > 0:\n clip = paddle.fluid.clip.GradientClipByGlobalNorm(\n clip_norm=args.grad_clip)\n\n decay_param = [\n p.name for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ]\n optimizer = paddle.optimizer.AdamW(\n learning_rate=lr_scheduler,\n beta1=args.adam_beta1,\n beta2=args.adam_beta2,\n epsilon=args.adam_epsilon,\n grad_clip=clip,\n weight_decay=args.weight_decay,\n apply_decay_param_fun=lambda x: x in decay_param)\n # alias\n optimizer.apply_optimize = optimizer._apply_optimize\n\n if args.use_recompute:\n dist_strategy.recompute = True\n dist_strategy.recompute_configs = {\n \"checkpoints\": model.gpt.checkpoints\n }\n\n # Use the fleet api to compile the distributed optimizer\n optimizer = fleet.distributed_optimizer(\n optimizer, strategy=dist_strategy)\n\n optimizer.minimize(loss)\n logger.info(f'final strategy: {fleet._final_strategy()}')\n logger.info(\"The training meta optimizer is/are %s\" %\n fleet._get_applied_meta_list())\n\n program_desc_dir = os.path.join(args.output_dir, \"program_desc\")\n if not os.path.isdir(program_desc_dir):\n os.mkdir(program_desc_dir)\n\n with open(program_desc_dir + \"/main_program.txt.%d\" % worker_index,\n 'w') as f:\n f.write(str(main_program))\n\n with open(program_desc_dir + \"/startup_program.txt.%d\" % worker_index,\n 'w') as f:\n f.write(str(startup_program))\n\n # Define the Executor for running the static model\n exe = paddle.static.Executor(place)\n exe.run(startup_program)\n test_program = main_program.clone(for_test=True)\n\n if args.use_amp and args.amp_level == \"O2\":\n optimizer.amp_init(place)\n\n if args.model_name_or_path not in pretrained_models_list:\n logger.info(\"Try to load checkpoint from %s \" % args.model_name_or_path)\n dygrah_path = os.path.join(args.model_name_or_path,\n \"model_state.pdparams\")\n static_path = os.path.join(args.model_name_or_path, \"static_vars\")\n\n flag_loaded = False\n if os.path.exists(static_path):\n if args.mp_degree > 1:\n logger.warning(\"MP should init with dygraph params\")\n else:\n logger.info(\"Loading parameters from %s\" % static_path)\n paddle.static.load(main_program, static_path, exe)\n flag_loaded = True\n\n if not flag_loaded and os.path.exists(dygrah_path):\n if args.sharding_degree > 1:\n logger.warning(\"Sharding should init with static vars\")\n else:\n logger.info(\"Loading parameters from %s\" % dygrah_path)\n init_static_with_params(\n model,\n paddle.load(\n dygrah_path, return_numpy=True),\n topo,\n main_program)\n flag_loaded = True\n\n if not flag_loaded:\n logger.error(\"No checkpoint load.\")\n\n global_step = 0\n tic_train = time.time()\n epoch = 0\n learning_rate = main_program.global_block().vars[\"learning_rate_0\"]\n while True:\n fetchs = []\n if topo.is_last:\n fetchs = [loss, learning_rate]\n\n # Bug fix, if not call valid_data_loader, the enumerate will call valid_data_loader\n # many times. and start a new random dataloader.\n valid_data_loader = valid_data_loader()\n test_data_loader = test_data_loader()\n\n train_reader_cost = 0.0\n train_run_cost = 0.0\n reader_start = time.time()\n for step, batch in enumerate(train_data_loader()):\n train_reader_cost += time.time() - reader_start\n train_start = time.time()\n\n global_step += 1\n\n ret = exe.run(main_program,\n feed=batch,\n fetch_list=fetchs,\n use_program_cache=True)\n # In the new 2.0 api, must call this function to change the learning_rate\n lr_scheduler.step()\n train_run_cost += time.time() - train_start\n\n # Profile for model benchmark\n profiler.add_profiler_step(args.profiler_options)\n\n if global_step % args.logging_freq == 0:\n if topo.is_last:\n loss_return, lr_return = ret\n #speed = args.logging_freq / (time.time() - tic_train)\n speed = args.logging_freq / (\n train_reader_cost + train_run_cost)\n avg_reader_cost = train_reader_cost / args.logging_freq\n logger.info(\n \"global step %d, epoch: %d, batch: %d, loss: %.9f, avg_reader_cost: %.5f sec, avg_batch_cost: %.5f sec, speed: %.2f steps/s, ips_total: %.0f tokens/s, ips: %.0f tokens/s, learning rate: %.5e\"\n % (global_step, epoch, step, loss_return[0],\n avg_reader_cost, 1. / speed, speed,\n speed * args.global_batch_size * args.max_seq_len,\n speed * args.global_batch_size * args.max_seq_len /\n worker_num, lr_return[0]))\n log_writer.add_scalar(\"loss\", loss_return[0], global_step)\n log_writer.add_scalar(\"learning_rate\", lr_return[0],\n global_step)\n tic_train = time.time()\n train_reader_cost = 0.0\n train_run_cost = 0.0\n\n if args.check_accuracy:\n if global_step >= args.max_steps:\n return\n else:\n continue\n\n if global_step % args.eval_freq == 0:\n # TODO, check the input data of validation\n eval_fetch = []\n if topo.is_last:\n eval_fetch = [loss]\n\n run_evaluate(valid_data_loader, exe, test_program,\n args.eval_iters, log_writer, global_step, args,\n epoch, topo.is_last, eval_fetch, \"valid\")\n tic_train = time.time()\n\n if global_step % args.save_steps == 0 or global_step >= args.max_steps:\n output_dir = os.path.join(args.output_dir,\n \"model_%d\" % global_step)\n logger.debug(\"saving models to {}\".format(output_dir))\n save_persistables(exe,\n os.path.join(output_dir, \"static_vars\"),\n main_program)\n if global_step <= args.save_steps:\n model.init_config[\"init_args\"][0].init_config.pop(\"topo\",\n None)\n model.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n tic_train = time.time()\n\n if global_step >= args.max_steps:\n eval_fetch = []\n if topo.is_last:\n eval_fetch = [loss]\n\n run_evaluate(test_data_loader, exe, test_program,\n args.test_iters, log_writer, global_step, args,\n epoch, topo.is_last, eval_fetch, \"test\")\n del train_data_loader\n return\n reader_start = time.time()\n\n epoch += 1\n\n\nif __name__ == \"__main__\":\n config = parse_args(MODEL_CLASSES)\n do_train(config)\n"
] | [
[
"numpy.random.seed"
]
] |
ozcelikfu/IC-GAN_fMRI_Reconstruction | [
"31b0dc7659afbf8d12b1e460a38ab6d8d9a4296c"
] | [
"KamitaniData/kamitani_data_handler.py"
] | [
"\nfrom scipy.io import loadmat\nimport numpy as np\nimport pandas as pd\nimport sklearn.preprocessing\nfrom sklearn import preprocessing\n\n\nclass kamitani_data_handler():\n \"\"\"Generate batches for FMRI prediction\n frames_back - how many video frames to take before FMRI frame\n frames_forward - how many video frames to take after FMRI frame\n \"\"\"\n\n def __init__(self, matlab_file ,test_img_csv = 'KamitaniData/imageID_test.csv',train_img_csv = 'KamitaniData/imageID_training.csv',voxel_spacing =3,log = 0 ):\n mat = loadmat(matlab_file)\n self.data = mat['dataSet'][:,3:]\n self.sample_meta = mat['dataSet'][:,:3]\n meta = mat['metaData']\n\n\n self.meta_keys = list(l[0] for l in meta[0][0][0][0])\n self.meta_desc = list(l[0] for l in meta[0][0][1][0])\n self.voxel_meta = np.nan_to_num(meta[0][0][2][:,3:])\n test_img_df = pd.read_csv(test_img_csv, header=None)\n train_img_df =pd.read_csv(train_img_csv, header=None)\n self.test_img_id = test_img_df[0].values\n self.train_img_id = train_img_df[0].values\n self.sample_type = {'train':1 , 'test':2 , 'test_imagine' : 3}\n self.voxel_spacing = voxel_spacing\n\n self.log = log\n\n def get_meta_field(self,field = 'DataType'):\n index = self.meta_keys.index(field)\n if(index <3): # 3 first keys are sample meta\n return self.sample_meta[:,index]\n else:\n return self.voxel_meta[index]\n\n\n def print_meta_desc(self):\n print(self.meta_desc)\n\n def get_labels(self, imag_data = 0,test_run_list = None):\n le = preprocessing.LabelEncoder()\n\n img_ids = self.get_meta_field('Label')\n type = self.get_meta_field('DataType')\n train = (type == self.sample_type['train'])\n test = (type == self.sample_type['test'])\n imag = (type == self.sample_type['test_imagine'])\n\n img_ids_train = img_ids[train]\n img_ids_test = img_ids[test]\n img_ids_imag = img_ids[imag]\n\n\n train_labels = []\n test_labels = []\n imag_labels = []\n for id in img_ids_test:\n idx = (np.abs(id - self.test_img_id)).argmin()\n test_labels.append(idx)\n\n for id in img_ids_train:\n idx = (np.abs(id - self.train_img_id)).argmin()\n train_labels.append(idx)\n\n for id in img_ids_imag:\n idx = (np.abs(id - self.test_img_id)).argmin()\n imag_labels.append(idx)\n\n if (test_run_list is not None):\n run = self.get_meta_field('Run')\n test = (self.get_meta_field('DataType') == 2).astype(bool)\n run = run[test]\n\n select = np.in1d(run, test_run_list)\n test_labels = test_labels[select]\n\n #imag_labels = le.fit_transform(img_ids_imag)\n if(imag_data):\n return np.array(train_labels), np.array(test_labels), np.array(imag_labels)\n else:\n return np.array(train_labels),np.array(test_labels)\n\n\n\n\n\n def get_data(self,normalize =1 ,roi = 'ROI_VC',imag_data = 0,test_run_list = None): # normalize 0-no, 1- per run , 2- train/test seperatly\n type = self.get_meta_field('DataType')\n train = (type == self.sample_type['train'])\n test = (type == self.sample_type['test'])\n test_imag = (type == self.sample_type['test_imagine'])\n test_all = np.logical_or(test,test_imag)\n\n roi_select = self.get_meta_field(roi).astype(bool)\n data = self.data[:,roi_select]\n\n if(self.log ==1):\n data = np.log(1+np.abs(data))*np.sign(data)\n\n\n if(normalize==1):\n\n run = self.get_meta_field('Run').astype('int')-1\n num_runs = np.max(run)+1\n data_norm = np.zeros(data.shape)\n\n for r in range(num_runs):\n data_norm[r==run] = sklearn.preprocessing.scale(data[r==run])\n train_data = data_norm[train]\n test_data = data_norm[test]\n test_all = data_norm[test_all]\n test_imag = data_norm[test_imag]\n\n else:\n train_data = data[train]\n test_data = data[test]\n if(normalize==2):\n train_data = sklearn.preprocessing.scale(train_data)\n test_data = sklearn.preprocessing.scale(test_data)\n\n\n if(self.log ==2):\n train_data = np.log(1+np.abs(train_data))*np.sign(train_data)\n test_data = np.log(1+np.abs(test_data))*np.sign(test_data)\n train_data = sklearn.preprocessing.scale(train_data)\n test_data = sklearn.preprocessing.scale(test_data)\n\n\n\n test_labels = self.get_labels()[1]\n imag_labels = self.get_labels(1)[2]\n num_labels = max(test_labels)+1\n test_data_avg = np.zeros([num_labels,test_data.shape[1]])\n test_imag_avg = np.zeros([num_labels,test_data.shape[1]])\n\n if(test_run_list is not None):\n run = self.get_meta_field('Run')\n test = (self.get_meta_field('DataType') == 2).astype(bool)\n run = run[test]\n\n select = np.in1d(run, test_run_list)\n test_data = test_data[select,:]\n test_labels = test_labels[select]\n\n for i in range(num_labels):\n test_data_avg[i] = np.mean(test_data[test_labels==i],axis=0)\n test_imag_avg[i] = np.mean(test_imag[imag_labels == i], axis=0)\n if(imag_data):\n return train_data, test_data, test_data_avg,test_imag,test_imag_avg\n\n else:\n return train_data, test_data, test_data_avg\n\n def get_voxel_loc(self):\n x = self.get_meta_field('voxel_x')\n y = self.get_meta_field('voxel_y')\n z = self.get_meta_field('voxel_z')\n dim = [int(x.max() -x.min()+1),int(y.max() -y.min()+1), int(z.max() -z.min()+1)]\n return [x,y,z] , dim\n\n\n"
] | [
[
"scipy.io.loadmat",
"numpy.logical_or",
"numpy.sign",
"numpy.zeros",
"pandas.read_csv",
"numpy.in1d",
"numpy.abs",
"numpy.max",
"sklearn.preprocessing.LabelEncoder",
"numpy.array",
"numpy.nan_to_num",
"numpy.mean"
]
] |
Pluto9th/ctapipe | [
"8c4faa674a1949210cbda8cb9e2413dd6362afea"
] | [
"ctapipe/reco/tests/test_energy_regressor.py"
] | [
"from tempfile import TemporaryDirectory\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom astropy import units as u\n\nfrom ctapipe.reco.energy_regressor import EnergyRegressor\n\n\ndef test_prepare_model():\n cam_id_list = [\"FlashCam\", \"ASTRICam\"]\n feature_list = {\"FlashCam\": [[1, 10], [2, 20], [3, 30], [0.9, 9],\n ],\n \"ASTRICam\": [[10, 1], [20, 2], [30, 3], [9, 0.9],\n ]}\n target_list = {\"FlashCam\": np.array([1, 2, 3, 0.9]) * u.TeV,\n \"ASTRICam\": np.array([1, 2, 3, 0.9]) * u.TeV}\n\n reg = EnergyRegressor(cam_id_list=cam_id_list, n_estimators=10)\n reg.fit(feature_list, target_list)\n return reg, cam_id_list\n\n\ndef test_fit_save_load():\n reg, cam_id_list = test_prepare_model()\n with TemporaryDirectory() as d:\n temp_path = \"/\".join([d, \"reg_{cam_id}.pkl\"])\n reg.save(temp_path)\n reg = EnergyRegressor.load(temp_path, cam_id_list)\n return reg, cam_id_list\n\n\ndef test_predict_by_event():\n np.random.seed(3)\n\n reg, cam_id_list = test_fit_save_load()\n prediction = reg.predict_by_event([{\"ASTRICam\": [[10, 1]]},\n {\"ASTRICam\": [[20, 2]]},\n {\"ASTRICam\": [[30, 3]]}])\n assert_allclose(prediction[\"mean\"].value, [1, 2, 3], rtol=0.2)\n\n prediction = reg.predict_by_event([{\"FlashCam\": [[1, 10]]},\n {\"FlashCam\": [[2, 20]]},\n {\"FlashCam\": [[3, 30]]}])\n assert_allclose(prediction[\"mean\"].value, [1, 2, 3], rtol=0.2)\n"
] | [
[
"numpy.array",
"numpy.random.seed",
"numpy.testing.assert_allclose"
]
] |
lucasiscovici/plotly_py | [
"42ab769febb45fbbe0a3c677dc4306a4f59cea36"
] | [
"plotly_study/tests/test_orca/test_sg_scraper.py"
] | [
"import plotly_study\nimport os\nimport shutil\nimport pytest\n\n\n# Fixtures\n# --------\[email protected]()\ndef setup():\n # Reset orca state\n plotly_study.io.orca.config.restore_defaults(reset_server=False)\n\n\nhere = os.path.dirname(os.path.abspath(__file__))\n\n\n# Run setup before every test function in this file\npytestmark = pytest.mark.usefixtures(\"setup\")\n\n\ndef execute_plotly_example():\n \"\"\"\n Some typical code which would go inside a gallery example.\n \"\"\"\n import plotly_study.graph_objs as go\n\n # Create random data with numpy\n import numpy as np\n\n N = 200\n random_x = np.random.randn(N)\n random_y_0 = np.random.randn(N)\n random_y_1 = np.random.randn(N) - 1\n\n # Create traces\n trace_0 = go.Scatter(x=random_x, y=random_y_0, mode=\"markers\", name=\"Above\")\n\n fig = go.Figure(data=[trace_0])\n plotly_study.io.show(fig)\n\n\ndef test_scraper():\n from plotly_study.io._sg_scraper import plotly_sg_scraper\n\n # test that monkey-patching worked ok\n assert plotly_study.io.renderers.default == \"sphinx_gallery\"\n # Use dummy values for arguments of plotly_sg_scraper\n block = \"\" # we don't need actually code\n import tempfile\n\n tempdir = tempfile.mkdtemp()\n gallery_conf = {\"src_dir\": tempdir, \"examples_dirs\": here}\n names = iter([\"0\", \"1\", \"2\"])\n block_vars = {\"image_path_iterator\": names}\n execute_plotly_example()\n res = plotly_sg_scraper(block, block_vars, gallery_conf)\n shutil.rmtree(tempdir)\n assert \".. raw:: html\" in res\n"
] | [
[
"numpy.random.randn"
]
] |
guilhermemg/trace-links-tc-br | [
"965cb57d17057d1c9c3841c4aba01e72cf008cab"
] | [
"modules/models_runner/tc_br_models_runner.py"
] | [
"import pandas as pd\nimport numpy as np\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n\nfrom modules.utils import plots\nfrom modules.utils import firefox_dataset_p2 as fd\nfrom modules.utils import tokenizers as tok\nfrom modules.utils import aux_functions\n\nfrom modules.models.lda import LDA\nfrom modules.models.lsi import LSI\nfrom modules.models.bm25 import BM_25\nfrom modules.models.wordvec import WordVec_BasedModel\nfrom modules.models.zeror import ZeroR_Model\nfrom modules.models.vsm import VSM\n\nimport modules.models.model_hyperps as mh\n\nclass TC_BR_Models_Hyperp:\n \n @staticmethod\n def get_lsi_model_hyperp():\n return {\n mh.LSI_Model_Hyperp.SVD_MODEL_N_COMPONENTS.value: 20,\n mh.LSI_Model_Hyperp.VECTORIZER_NGRAM_RANGE.value: (1,1),\n mh.LSI_Model_Hyperp.VECTORIZER_MAX_FEATURES.value: 400,\n mh.LSI_Model_Hyperp.VECTORIZER.value : TfidfVectorizer(stop_words='english', use_idf=True, smooth_idf=True),\n mh.LSI_Model_Hyperp.VECTORIZER_TOKENIZER.value : tok.WordNetBased_LemmaTokenizer()\n }\n \n @staticmethod\n def get_lda_model_hyperp():\n return {\n mh.LDA_Model_Hyperp.LDA_MODEL_N_COMPONENTS.value: 20,\n mh.LDA_Model_Hyperp.LDA_MODEL_RANDOM_STATE.value : 2,\n mh.LDA_Model_Hyperp.VECTORIZER_NGRAM_RANGE.value: (1,1),\n mh.LDA_Model_Hyperp.VECTORIZER_MAX_FEATURES.value: 200,\n mh.LDA_Model_Hyperp.VECTORIZER.value : TfidfVectorizer(stop_words='english', use_idf=True, smooth_idf=True),\n mh.LDA_Model_Hyperp.VECTORIZER_TOKENIZER.value : tok.PorterStemmerBased_Tokenizer() \n }\n \n @staticmethod\n def get_bm25_model_hyperp():\n return {\n mh.BM25_Model_Hyperp.TOKENIZER.value : tok.PorterStemmerBased_Tokenizer()\n }\n \n @staticmethod\n def get_w2v_model_hyperp():\n return {\n mh.WordVec_Model_Hyperp.TOKENIZER.value : tok.PorterStemmerBased_Tokenizer(),\n mh.WordVec_Model_Hyperp.WORD_EMBEDDING.value : 'CC_BASED',\n mh.WordVec_Model_Hyperp.GEN_NAME.value : 'wordvector'\n }\n \n @staticmethod\n def get_cust_w2v_model_hyperp():\n return {\n mh.WordVec_Model_Hyperp.TOKENIZER.value : tok.PorterStemmerBased_Tokenizer(),\n mh.WordVec_Model_Hyperp.WORD_EMBEDDING.value : 'CUSTOMIZED',\n mh.WordVec_Model_Hyperp.GEN_NAME.value : 'cust_wordvector'\n }\n \n @staticmethod\n def get_vsm_model_hyperp():\n return {\n mh.VSM_Model_Hyperp.VECTORIZER_NGRAM_RANGE.value: (1,1),\n mh.VSM_Model_Hyperp.VECTORIZER_MAX_FEATURES.value: 400,\n mh.VSM_Model_Hyperp.VECTORIZER.value : TfidfVectorizer(stop_words='english', use_idf=True, smooth_idf=True),\n mh.VSM_Model_Hyperp.VECTORIZER_TOKENIZER.value : tok.WordNetBased_LemmaTokenizer()\n }\n\nclass TC_BR_Runner:\n def __init__(self, testcases=pd.DataFrame(), bugreports=pd.DataFrame()):\n self.test_cases_df = None\n self.bug_reports_df = None\n self.corpus = None\n self.query = None\n self.test_cases_names = None\n self.bug_reports_names = None\n \n self.set_basic_params(testcases, bugreports)\n \n \n def set_basic_params(self, testcases, bugreports):\n if testcases.empty:\n self.test_cases_df = fd.Datasets.read_testcases_df()\n else:\n self.test_cases_df = testcases\n \n if bugreports.empty:\n self.bug_reports_df = fd.Datasets.read_selected_bugreports_df()\n else:\n self.bug_reports_df = bugreports\n \n self.corpus = self.test_cases_df.tc_desc\n self.query = self.bug_reports_df.br_desc\n \n self.test_cases_names = self.test_cases_df.TC_Number\n self.bug_reports_names = self.bug_reports_df.Bug_Number\n \n def run_lsi_model(self, lsi_hyperp=None):\n print(\"Running LSI Model ------\")\n \n if lsi_hyperp == None:\n lsi_hyperp = TC_BR_Models_Hyperp.get_lsi_model_hyperp()\n\n lsi_model = LSI(**lsi_hyperp)\n lsi_model.set_name('LSI_Model_TC_BR')\n \n lsi_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n \n return lsi_model\n \n def run_lda_model(self, lda_hyperp=None):\n print(\"Running LDA Model -----\")\n \n if lda_hyperp == None:\n lda_hyperp = TC_BR_Models_Hyperp.get_lda_model_hyperp()\n\n lda_model = LDA(**lda_hyperp)\n lda_model.set_name('LDA_Model_TC_BR')\n lda_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n\n return lda_model\n \n def run_bm25_model(self, bm25_hyperp=None):\n print(\"Running BM25 Model -----\")\n \n if bm25_hyperp == None:\n bm25_hyperp = TC_BR_Models_Hyperp.get_bm25_model_hyperp()\n\n bm25_model = BM_25(**bm25_hyperp)\n bm25_model.set_name('BM25_Model_TC_BR')\n bm25_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n\n return bm25_model\n \n def run_word2vec_model(self, wv_hyperp=None):\n print(\"Running W2V Model ------\")\n \n if wv_hyperp == None:\n wv_hyperp = TC_BR_Models_Hyperp.get_w2v_model_hyperp()\n\n wv_model = WordVec_BasedModel(**wv_hyperp)\n wv_model.set_name('WordVec_Model_TC_BR')\n wv_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n\n return wv_model\n \n def run_cust_word2vec_model(self, wv_hyperp=None):\n print(\"Running Customized W2V model -----\")\n \n if wv_hyperp == None:\n wv_hyperp = TC_BR_Models_Hyperp.get_cust_w2v_model_hyperp()\n\n wv_model = WordVec_BasedModel(**wv_hyperp)\n wv_model.set_name('Customized_WordVec_Model_TC_BR')\n wv_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n\n return wv_model\n \n def run_zeror_model(self, zeror_hyperp=None):\n print(\"Running ZeroR model -----\")\n \n oracle = fd.Tc_BR_Oracles.read_oracle_expert_volunteers_intersec_df()\n \n zeror_model = ZeroR_Model(oracle)\n zeror_model.set_name('ZeroR_Model_TC_BR')\n zeror_model.recover_links()\n \n return zeror_model\n\n def run_vsm_model(self, vsm_hyperp=None):\n print('Running VSM model -----')\n \n if vsm_hyperp == None:\n vsm_hyperp = TC_BR_Models_Hyperp.get_vsm_model_hyperp()\n \n vsm_model = VSM(**vsm_hyperp)\n vsm_model.set_name('VSM_Model_TC_BR')\n vsm_model.recover_links(self.corpus, self.query, self.test_cases_names, self.bug_reports_names)\n \n return vsm_model"
] | [
[
"pandas.DataFrame",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
kumasento/gconv-prune | [
"f81c417d3754102c902bd153809130e12607bd7d"
] | [
"evaluation/early_stage/prune.py"
] | [
"\"\"\" Pruning a pre-trained model by GSP.\n\nAuthor: Ruizhe Zhao\nDate: 12/02/2019\n\nThe work-flow of this script:\n- load a pre-trained model (suffixed by 'm')\n- compute the mask based on weights\n- fine-tune the model\n\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nimport copy\nimport time\nimport shutil\nimport json\nimport logging\n\nlogging.getLogger().setLevel(logging.DEBUG)\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nfrom gumi.ops.mask_conv2d import MaskConv2d\nfrom gumi.pruning import prune_utils\nfrom gumi import model_utils\nfrom gumi import models # a module contains all supported models\n\nmodel_names = sorted(\n name\n for name in models.__dict__\n if name.islower() and not name.startswith(\"__\") and callable(models.__dict__[name])\n)\n\nimport cifar_utils\nfrom utils import * # import utilities provided by pytorch-classification\nfrom parser import create_parser # argument parser for evaluation tasks\nfrom pruner import Pruner\n\nparser = create_parser()\nargs = parser.parse_args()\n\n# CUDA\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\nuse_cuda = torch.cuda.is_available()\ncudnn.benchmark = True\n\n\ndef write_summary(args, file_name=\"summary.json\", **kwargs):\n \"\"\" Write summary to a JSON file. \"\"\"\n summary_file = \"{}/{}\".format(args.checkpoint, file_name)\n with open(summary_file, \"w\") as f:\n json.dump(kwargs, f)\n\n\ndef main():\n # initialize the pruner\n pruner = Pruner(args)\n # pruner.prune(args.checkpoint)\n pruner.evaluate()\n\n # Run regularization\n pruner.prune(\n args.checkpoint, fake_mask=True, perm=args.perm, num_iters=args.num_sort_iters\n )\n pruner.evaluate()\n pruner.regularize()\n pruner.apply_mask()\n pruner.evaluate()\n\n logging.debug(\"Fine-tuning model for {} epochs\".format(args.epochs))\n best_acc = pruner.fine_tune(args.epochs)\n logging.debug(\"Fine-tuned model\")\n pruner.evaluate()\n\n write_summary(args, best_acc=best_acc)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.cuda.is_available"
]
] |
wj-Mcat/Paddle | [
"0a931106008f4174a8556aa4a4b9f23167c33f4d"
] | [
"python/paddle/fluid/reader.py"
] | [
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom . import core\nimport sys\nimport six\nimport numpy as np\nimport threading\nimport paddle\nfrom .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places, _current_expected_place, _in_eager_mode\nfrom .executor import global_scope\nfrom .data_feeder import DataFeeder, BatchedTensorProvider\nfrom .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler\nfrom .dataloader import BatchSampler, Dataset, IterableDataset\nfrom .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn\nfrom .dataloader.batch_sampler import _InfiniteIterableSampler\nfrom .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer\nfrom .unique_name import UniqueNameGenerator\nfrom .framework import _get_paddle_place, _get_paddle_place_list\nfrom paddle.fluid.framework import _set_expected_place, _current_expected_place\nimport logging\nimport warnings\n\n### Dygraph DataLoader configs ###\nimport os\nimport multiprocessing\nimport signal\n\n# NOTE: queue has a different name in python2 and python3\nimport queue\n\n# NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process\nQUEUE_GET_TIMEOUT = 60\n\n__all__ = ['PyReader', 'DataLoader', 'default_collate_fn']\n\ndata_loader_unique_name_generator = UniqueNameGenerator()\n\nKEEP_DATA_LOADER_ORDER = True\nUSE_PINNED_MEMORY = None\n\n\ndef keep_data_loader_order(*args):\n global KEEP_DATA_LOADER_ORDER\n if len(args) == 0:\n return KEEP_DATA_LOADER_ORDER\n else:\n assert len(args) == 1 and isinstance(args[0], bool)\n KEEP_DATA_LOADER_ORDER = args[0]\n\n\ndef use_pinned_memory(*args):\n global USE_PINNED_MEMORY\n if len(args) == 0:\n return USE_PINNED_MEMORY\n else:\n assert len(args) == 1 and isinstance(args[0], bool)\n USE_PINNED_MEMORY = args[0]\n\n\ndef _convert_places(places):\n if not isinstance(places, (list, tuple)):\n places = [places]\n\n ret = []\n for p in places:\n if not isinstance(p, core.Place):\n tmp = core.Place()\n tmp.set_place(p)\n p = tmp\n\n ret.append(p)\n return ret\n\n\n# NOTE(chenweihang): _reader_process_loop must be top level method to be pickled\ndef _reader_process_loop(batch_reader, data_queue):\n try:\n # set signal handler\n core._set_process_signal_handler()\n\n # NOTE: [ mmap files clear ] When the child process exits unexpectedly,\n # some shared memory objects may have been applied for but have not yet\n # been put into the inter-process Queue. This part of the object needs\n # to be cleaned up when the process ends.\n CleanupFuncRegistrar.register(_cleanup_mmap)\n\n for batch in batch_reader():\n tensor_list = core._convert_to_tensor_list(batch)\n data_queue.put(tensor_list)\n core._remove_tensor_list_mmap_fds(tensor_list)\n data_queue.put(None)\n except KeyboardInterrupt:\n # NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process\n pass\n except:\n six.reraise(*sys.exc_info())\n\n\nclass DataLoaderBase(object):\n def __init__(self):\n self._places = None\n\n def __call__(self):\n return self\n\n def next(self):\n '''\n Get the next item in the DataLoader object. This method \n should not be called by users directly. It is used for\n implementing iterator protocol of Python 2.x inside\n PaddlePaddle framework.\n '''\n return self.__next__()\n\n def __iter__(self):\n raise NotImplementedError()\n\n def __next__(self):\n raise NotImplementedError()\n\n @classmethod\n def _check_input_array(cls, item):\n arr = np.asarray(item)\n if arr.dtype == np.object:\n raise TypeError(\n \"\\n\\tFaild to convert input data to a regular ndarray :\\n\\t* Usually \"\n \"this means the input data contains nested lists with different lengths. \"\n \"\\n\\t* Check the reader function passed to 'decorate_batch_generator'\"\n \" to locate the data causes this issue.\\n\\t* Please consider using \"\n \"'fluid.create_lod_tensor' to convert it to a LoD-Tensor.\")\n return arr\n\n\nclass DataLoader(object):\n \"\"\"\n DataLoader prodives an iterator which iterates given dataset\n once by the batch_sampler.\n\n DataLoader supports single-process and multi-prcess data loading,\n multi-process workers will be used to load data asynchronously if\n :attr:`num_workers` is set as a positive number.\n\n DataLoader supports map-style dataset and iterable-style dataset.\n\n For map-style datast(can get a sample from dataset with a given\n index), please see :code:`paddle.io.Dataset`.\n\n For iterable-style datast(get samples from dataset iteratively,\n like a Python iterator), please see :code:`paddle.io.IterableDataset`.\n\n For :code:`batch_sampler` please see :code:`paddle.io.BatchSampler`\n\n .. note::\n GPU tensor operation is not supported in subprocess currently,\n please don't use GPU tensor operations in pipeline which will\n be performed in subprocess, such as dataset transforms, collte_fn,\n etc. Numpy array and CPU tensor operation is supported.\n\n **Disable automatic batching**\n\n In certain cases such as some NLP tasks, instead of automatic batching,\n handling batching manually in dataset is needed by users. For these\n cases, automatic batching is disabled if both :attr:`batch_size` and\n :attr:`batch_sampler` is set as None, each data got from :attr:`dataset`\n should be batched data and will be processed with function define by\n :attr:`collate_fn` or :attr:`default_collate_fn`.\n\n\n .. note::\n When automatic batching is disabled, :attr:`default_collate_fn` will\n do nothing to data from dataset.\n\n\n Args: \n dataset(Dataset): the dataset to load data from, should be an\n instance of subclass of :code:`paddle.io.Dataset` or\n :code:`paddle.io.IterableDataset`.\n feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.\n The Tensors should be created by :code:`paddle.static.data()`.\n :attr:`feed_list` must be set if :attr:`return_list` is\n False. Default None.\n places(list(Place)|tuple(Place)|list(str)|optional): a list of Place,\n to put data onto, :attr:`places` can be None, if \n :attr:`places` is None, default place(CPUPlace or CUDAPlace(0))\n will be used. Default None. If ``places`` is list of string,\n the string in the list can be ``cpu``, ``gpu:x`` and ``gpu_pinned``,\n where ``x`` is the index of the GPUs.\n return_list (bool): whether the return value on each device is \n presented as a list. If :attr:`return_list=False`, the return\n value on each device would be a dict of str -> Tensor, where\n the key of the dict is the name of each fed Tensors. If \n :attr:`return_list=True`, the return value on each device would\n be a list(Tensor). :attr:`return_list` can only be True\n in dynamic graph mode. Default True.\n batch_sampler(BatchSampler): an instance of `paddle.io.BatchSampler`\n to generate batch indices to draw samples from :attr:`dataset`\n and combine a batch. Default None.\n batch_size(int|None): sample number in a mini-batch, a substitution\n parameter for :attr:`batch_sampler`, if :attr:`batch_sampler`\n is not set, a default `paddle.io.BatchSampler` will be used\n and initialize by :attr:`batch_size`, :attr:`shuffle` and\n :attr:`drop_last`. Default 1.\n shuffle(bool): whther to shuffle indices order before genrate\n batch indices, a substitution parameter for :attr:`batch_sampler`\n see :attr:`batch_size`. Default False.\n drop_last(bool): whether drop the last incomplete batch dataset size\n is not divisible by the batch size, a substitution parameter\n for :attr:`batch_sampler`, see :attr:`batch_size`. Default False\n collate_fn(callable): function to generate mini-batch data by merging\n the sample list, None for only stack each fields of sample in axis\n 0(same as :attr::`np.stack(..., axis=0)`). Default None\n num_workers(int): the number of subprocess to load data, 0 for no\n subprocess used and loading data in main process. Default 0\n use_buffer_reader (bool): whether to use bufferred reader. \n If use_buffer_reader=True, the DataLoader would prefetch next \n batch data asynchronously, so it would speed up data feeding \n and occupies a little more CPU or GPU memory, i.e., the memory\n of one batch input data. Default True.\n use_shared_memory (bool): whether to use shared memory to speed up\n putting data into inter-process queue, set :attr:`use_shared_memory`\n as True only when the shared memory space on your machine(e.g.\n space of '/dev/shm' on Linux operating sysytem) is large enough.\n Shared memory will only be enabled in multi-process mode(num_workers\n > 0). Default True.\n timeout(int): the timeout value for getting data form output queue\n of subprocesses. Default 0.\n worker_init_fn(callable): init function which will be called with\n worker id on each subproces starting if not set as None. Default\n None.\n\n Returns:\n DataLoader: an iterable object for data iterating, each elemnet of the generated data is a Tensor.\n\n Examples:\n \n .. code-block:: python\n\n import numpy as np\n\n import paddle\n import paddle.nn as nn\n import paddle.nn.functional as F\n from paddle.io import Dataset, BatchSampler, DataLoader\n\n BATCH_NUM = 20\n BATCH_SIZE = 16\n EPOCH_NUM = 4\n\n IMAGE_SIZE = 784\n CLASS_NUM = 10\n\n # define a random dataset\n class RandomDataset(Dataset):\n def __init__(self, num_samples):\n self.num_samples = num_samples\n\n def __getitem__(self, idx):\n image = np.random.random([IMAGE_SIZE]).astype('float32')\n label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')\n return image, label\n\n def __len__(self):\n return self.num_samples\n\n dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)\n\n class SimpleNet(nn.Layer):\n def __init__(self):\n super(SimpleNet, self).__init__()\n self.fc = nn.Linear(IMAGE_SIZE, CLASS_NUM)\n\n def forward(self, image, label=None):\n return self.fc(image)\n\n simple_net = SimpleNet()\n opt = paddle.optimizer.SGD(learning_rate=1e-3,\n parameters=simple_net.parameters())\n\n loader = DataLoader(dataset,\n batch_size=BATCH_SIZE,\n shuffle=True,\n drop_last=True,\n num_workers=2)\n\n for e in range(EPOCH_NUM):\n for i, (image, label) in enumerate(loader()):\n out = simple_net(image)\n loss = F.cross_entropy(out, label)\n avg_loss = paddle.mean(loss)\n avg_loss.backward()\n opt.minimize(avg_loss)\n simple_net.clear_gradients()\n print(\"Epoch {} batch {}: loss = {}\".format(e, i, np.mean(loss.numpy())))\n\n\n .. note::\n For reading iterable dataset with multiprocess Dataloader,\n please see :code:`paddle.io.IterableDataset`\n\n \"\"\"\n\n def __init__(self,\n dataset,\n feed_list=None,\n places=None,\n return_list=True,\n batch_sampler=None,\n batch_size=1,\n shuffle=False,\n drop_last=False,\n collate_fn=None,\n num_workers=0,\n use_buffer_reader=True,\n use_shared_memory=True,\n timeout=0,\n worker_init_fn=None,\n persistent_workers=False):\n self.return_list = return_list\n self.collate_fn = collate_fn\n self.use_buffer_reader = use_buffer_reader\n self.worker_init_fn = worker_init_fn\n\n self.dataset = dataset\n\n if not return_list and not in_dygraph_mode():\n assert feed_list is not None, \\\n \"feed_list should be set when return_list=False\"\n self.feed_list = feed_list\n\n if places is None:\n places = _current_expected_place()\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n self.places = _convert_places(places)\n\n assert num_workers >= 0, \"num_workers should be a non-negative value\"\n if num_workers > 0 and (sys.platform == 'darwin' or\n sys.platform == 'win32'):\n warnings.warn(\n \"DataLoader with multi-process mode is not supported on MacOs and Windows currently.\" \\\n \" Please use signle-process mode with num_workers = 0 instead\")\n num_workers = 0\n self.num_workers = num_workers\n\n self.use_shared_memory = use_shared_memory\n if use_shared_memory and num_workers == 0:\n self.use_shared_memory = False\n\n assert timeout >= 0, \"timeout should be a non-negative value\"\n self.timeout = timeout\n\n if isinstance(dataset, IterableDataset):\n self.dataset_kind = _DatasetKind.ITER\n if shuffle:\n raise ValueError(\n \"IterableDataset not support shuffle, but got shuffle={}\".\n format(shuffle))\n if batch_sampler is not None:\n raise ValueError(\n \"IterableDataset expect unspecified batch_sampler\")\n else:\n self.dataset_kind = _DatasetKind.MAP\n\n if batch_sampler is not None:\n assert batch_size == 1 and not shuffle and not drop_last, \\\n \"batch_size/shuffle/drop_last should not be set when \" \\\n \"batch_sampler is given\"\n self.batch_sampler = batch_sampler\n self.batch_size = None\n elif batch_size is None:\n self.batch_sampler = None\n self.batch_size = None\n else:\n assert batch_size > 0, \\\n \"batch_size should be None or a positive value when \" \\\n \"batch_sampler is not given\"\n self.batch_size = batch_size\n if isinstance(dataset, IterableDataset):\n self.batch_sampler = _InfiniteIterableSampler(dataset,\n batch_size)\n else:\n self.batch_sampler = BatchSampler(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n drop_last=drop_last)\n\n self.drop_last = drop_last\n self.auto_collate_batch = self.batch_sampler is not None\n\n self.pin_memory = False\n if in_dygraph_mode():\n self.pin_memory = use_pinned_memory() or True\n\n self._persistent_workers = persistent_workers\n self._iterator = None\n\n def __len__(self):\n if self.dataset_kind == _DatasetKind.ITER:\n raise ValueError(\"length of IterableDataset not supported\")\n else:\n if self.auto_collate_batch:\n return len(self.batch_sampler)\n else:\n return len(self.dataset)\n\n def __iter__(self):\n if self.num_workers == 0:\n return _DataLoaderIterSingleProcess(self)\n elif self._persistent_workers:\n if self._iterator is None:\n self._iterator = _DataLoaderIterMultiProcess(self)\n else:\n self._iterator._reset()\n return self._iterator\n else:\n return _DataLoaderIterMultiProcess(self)\n\n def __call__(self):\n return self.__iter__()\n\n @staticmethod\n def from_generator(feed_list=None,\n capacity=None,\n use_double_buffer=True,\n iterable=True,\n return_list=False,\n use_multiprocess=False,\n drop_last=True):\n \"\"\"\n .. warning::\n This API will be deprecated in the future, it is recommended to use\n :code:`paddle.io.DataLoader` which supports multi-processes acceleration.\n\n .. note::\n **The framework ensures that the data loading order of DataLoader is exactly the same as the user-defined data source.**\n\n Create a DataLoader object for loading data from Python generator. \n Data would be prefetched using Python thread and be pushed\n into a queue asynchronously.\n\n The created DataLoader object provides 3 methods to set the data source\n :code:`set_sample_generator` , :code:`set_sample_list_generator` and \n :code:`set_batch_generator` . Please see the following example codes\n to know their usages.\n \n If iterable = True, the created DataLoader object is a Python generator\n object, which is iterable using for-range loop.\n\n If iterable = False, the created DataLoader object provides \n :code:`start()` and :code:`reset()` method to control the data reading\n process.\n\n Args: \n feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.\n The Tensors should be created by :code:`fluid.data()`.\n capacity (int): capacity of the queue maintained in DataLoader.\n The unit is batch number. Set larger capacity if your reader \n is fast. \n use_double_buffer (bool): whether to use double_buffer_reader. \n If use_double_buffer=True, the DataLoader would prefetch next \n batch data asynchronously, so it would speed up data feeding \n and occupies a little more CPU or GPU memory, i.e., the memory\n of one batch input data. \n iterable (bool): whether the created DataLoader is iterable. \n return_list (bool): whether the return value on each device is \n presented as a list. It is only valid when iterable=True. \n If return_list=False, the return value on each device would \n be a dict of str -> LoDTensor, where the key of the dict is \n the name of each fed Tensors. If return_list=True, the \n return value on each device would be a list(LoDTensor). It is\n recommended to use return_list=False in static graph mode and\n use return_list=True in dygraph mode. \n use_multiprocess (bool): whether to use multi-process to speed up\n the data loading process in dygraph. Note: this parameter only\n can be used in the dygraph mode. In the static graph mode,\n whether this parameter is set or not has no effect.\n The Default value is False.\n drop_last (bool): whether to drop the last batches whose number is\n less than the CPU core/GPU card number. The default value is \n True. In training phase, users should not set drop_last=False,\n because all CPU cores/GPU cards must read data from DataLoader. \n In inference phase, users can set drop_last=False, so that the\n last batches whose number is less than the CPU core/GPU card\n number can be tested. \n\n Returns:\n loader (DataLoader): the created DataLoader object.\n\n Examples 1:\n \n .. code-block:: python\n\n '''\n Example in static graph mode\n '''\n import numpy as np\n\n import paddle\n import paddle.static as static\n import paddle.nn.functional as F\n\n\n BATCH_NUM = 10 \n BATCH_SIZE = 16\n EPOCH_NUM = 4\n\n CLASS_NUM = 10\n\n ITERABLE = True # whether the created DataLoader object is iterable\n USE_GPU = False # whether to use GPU\n\n DATA_FORMAT = 'batch_generator' # data format of data source user provides \n\n paddle.enable_static()\n\n def simple_net(image, label):\n fc_tmp = static.nn.fc(image, size=CLASS_NUM)\n cross_entropy = F.softmax_with_cross_entropy(image, label)\n loss = paddle.mean(cross_entropy)\n sgd = paddle.optimizer.SGD(learning_rate=1e-3)\n sgd.minimize(loss)\n return loss\n\n def get_random_images_and_labels(image_shape, label_shape):\n image = np.random.random(size=image_shape).astype('float32')\n label = np.random.random(size=label_shape).astype('int64')\n return image, label\n\n # If the data generator yields one sample each time,\n # use DataLoader.set_sample_generator to set the data source.\n def sample_generator_creator(): \n def __reader__():\n for _ in range(BATCH_NUM * BATCH_SIZE):\n image, label = get_random_images_and_labels([784], [1])\n yield image, label\n\n return __reader__\n\n # If the data generator yield list of samples each time,\n # use DataLoader.set_sample_list_generator to set the data source.\n def sample_list_generator_creator():\n def __reader__():\n for _ in range(BATCH_NUM): \n sample_list = []\n for _ in range(BATCH_SIZE):\n image, label = get_random_images_and_labels([784], [1])\n sample_list.append([image, label])\n\n yield sample_list\n\n return __reader__ \n\n # If the data generator yields a batch each time, \n # use DataLoader.set_batch_generator to set the data source.\n def batch_generator_creator():\n def __reader__():\n for _ in range(BATCH_NUM):\n batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1]) \n yield batch_image, batch_label\n\n return __reader__\n\n # If DataLoader is iterable, use for loop to train the network \n def train_iterable(exe, prog, loss, loader):\n for _ in range(EPOCH_NUM):\n for data in loader():\n exe.run(prog, feed=data, fetch_list=[loss])\n\n # If DataLoader is not iterable, use start() and reset() method to control the process \n def train_non_iterable(exe, prog, loss, loader):\n for _ in range(EPOCH_NUM):\n loader.start() # call DataLoader.start() before each epoch starts\n try:\n while True:\n exe.run(prog, fetch_list=[loss])\n except paddle.core.EOFException:\n loader.reset() # call DataLoader.reset() after catching EOFException \n\n def set_data_source(loader, places):\n if DATA_FORMAT == 'sample_generator':\n loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places)\n elif DATA_FORMAT == 'sample_list_generator':\n loader.set_sample_list_generator(sample_list_generator_creator(), places=places)\n elif DATA_FORMAT == 'batch_generator':\n loader.set_batch_generator(batch_generator_creator(), places=places)\n else:\n raise ValueError('Unsupported data format')\n\n image = static.data(name='image', shape=[None, 784], dtype='float32')\n label = static.data(name='label', shape=[None, 1], dtype='int64')\n\n # Define DataLoader \n loader = paddle.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE)\n\n # Define network\n loss = simple_net(image, label)\n\n # Set data source of DataLoader\n #\n # If DataLoader is iterable, places must be given and the number of places must be the same with device number. \n # - If you are using GPU, call `paddle.static.cuda_places()` to get all GPU places. \n # - If you are using CPU, call `paddle.static.cpu_places()` to get all CPU places. \n # \n # If DataLoader is not iterable, places can be None.\n places = static.cuda_places() if USE_GPU else static.cpu_places()\n set_data_source(loader, places)\n\n exe = static.Executor(places[0])\n exe.run(static.default_startup_program())\n\n prog = static.CompiledProgram(static.default_main_program()).with_data_parallel(loss_name=loss.name)\n\n if loader.iterable:\n train_iterable(exe, prog, loss, loader)\n else:\n train_non_iterable(exe, prog, loss, loader)\n\n\n Examples 2:\n\n .. code-block:: python\n\n '''\n Example in dynamic graph mode. \n '''\n import numpy as np\n\n import paddle\n import paddle.nn as nn\n import paddle.optimizer as opt\n import paddle.distributed as dist\n\n BATCH_SIZE = 16\n BATCH_NUM = 4\n EPOCH_NUM = 4\n\n IMAGE_SIZE = 784\n CLASS_NUM = 10\n\n USE_GPU = False # whether to use GPU\n\n def _get_random_images_and_labels(image_shape, label_shape):\n image = np.random.random(size=image_shape).astype('float32')\n label = np.random.random(size=label_shape).astype('int64')\n return image, label\n\n def __reader__():\n for _ in range(BATCH_NUM):\n batch_image, batch_label = _get_random_images_and_labels(\n [BATCH_SIZE, IMAGE_SIZE], [BATCH_SIZE, CLASS_NUM])\n yield batch_image, batch_label\n\n def random_batch_reader():\n return __reader__\n\n class LinearNet(nn.Layer):\n def __init__(self):\n super(LinearNet, self).__init__()\n self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)\n\n @paddle.jit.to_static\n def forward(self, x):\n return self._linear(x)\n\n # set device\n paddle.set_device('gpu' if USE_GPU else 'cpu')\n\n # create network\n layer = LinearNet()\n dp_layer = paddle.DataParallel(layer)\n loss_fn = nn.CrossEntropyLoss()\n adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())\n\n # create data loader\n loader = paddle.io.DataLoader.from_generator(capacity=5)\n loader.set_batch_generator(random_batch_reader())\n\n for epoch_id in range(EPOCH_NUM):\n for batch_id, (image, label) in enumerate(loader()):\n out = layer(image)\n loss = loss_fn(out, label)\n\n loss.backward()\n\n adam.step()\n adam.clear_grad()\n print(\"Epoch {} batch {}: loss = {}\".format(\n epoch_id, batch_id, np.mean(loss.numpy())))\n\n Examples 3:\n\n .. code-block:: python\n\n '''\n Example of `drop_last` using in static graph multi-cards mode\n '''\n import paddle\n import paddle.static as static\n import numpy as np\n import os\n\n # We use 2 CPU cores to run inference network \n os.environ['CPU_NUM'] = '2'\n\n paddle.enable_static()\n\n # The data source has only 3 batches, which can not be\n # divided evenly to each CPU core\n def batch_generator(): \n for i in range(3):\n yield np.array([i+1]).astype('float32'), \n\n x = static.data(name='x', shape=[None], dtype='float32') \n y = x * x\n\n def run_inference(drop_last): \n loader = paddle.io.DataLoader.from_generator(feed_list=[x],\n capacity=8, drop_last=drop_last)\n loader.set_batch_generator(batch_generator, static.cpu_places())\n\n exe = static.Executor(paddle.CPUPlace())\n prog = static.CompiledProgram(static.default_main_program())\n prog = prog.with_data_parallel()\n\n result = []\n for data in loader():\n each_ret, = exe.run(prog, feed=data, fetch_list=[y])\n result.extend(each_ret)\n return result\n\n # Set drop_last to True, so that the last batch whose\n # number is less than CPU core number would be discarded.\n print(run_inference(drop_last=True)) # [1.0, 4.0]\n\n # Set drop_last to False, so that the last batch whose\n # number is less than CPU core number can be tested.\n print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]\n \"\"\"\n if in_dygraph_mode():\n return DygraphGeneratorLoader(feed_list, capacity,\n use_double_buffer, iterable,\n return_list, use_multiprocess)\n else:\n return GeneratorLoader(feed_list, capacity, use_double_buffer,\n iterable, return_list, drop_last)\n\n @staticmethod\n def from_dataset(dataset, places, drop_last=True):\n \"\"\"\n .. warning::\n This API will be deprecated in the future, it is recommended to use\n :code:`paddle.io.DataLoader` which supports multi-processes acceleration.\n\n Create an iterable DataLoader object for loading data from Dataset. \n Dataset is only supported in Linux system currently.\n\n Args:\n dataset (InMemoryDataset|QueueDataset): the dataset object.\n places (list(CUDAPlace)|list(CPUPlace)|list(str)): places where the result \n data should be converted. If places is list of string, the string in the list \n can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where x is the index of the GPUs. \n drop_last (bool): whether to drop the last batch whose sample \n number is less than batch size. If drop_last = True, they\n would be dropped. If drop_last = False, they would be kept. \n\n Returns:\n loader (DataLoader): the created DataLoader object, which can be \n treated as a Python generator. \n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n image = static.data(name='image', shape=[None, 784], dtype='float32')\n label = static.data(name='label', shape=[None, 1], dtype='int64')\n\n dataset = paddle.distributed.QueueDataset()\n dataset.init(\n batch_size=32,\n pipe_command='cat',\n use_var=[image, label])\n dataset.set_filelist(['a.txt', 'b.txt', 'c.txt'])\n\n loader = paddle.io.DataLoader.from_dataset(dataset, static.cpu_places())\n \"\"\"\n return DatasetLoader(dataset, places, drop_last)\n\n\nclass DygraphGeneratorLoader(DataLoaderBase):\n \"\"\"\n The GeneratorLoader of dygraph\n\n The multiprocess dygraph GeneratorLoader's most functions are different from \n static graph GeneratorLoader, Separate implementation to keep code readable.\n \"\"\"\n\n def __init__(self,\n feed_list=None,\n capacity=None,\n use_double_buffer=True,\n iterable=True,\n return_list=True,\n use_multiprocess=False):\n self._batch_reader = None\n self._places = None\n self._feed_list = feed_list\n\n if not capacity:\n raise ValueError(\"Please give value to capacity.\")\n self._capacity = capacity\n self._use_double_buffer = use_double_buffer\n\n if not iterable:\n warnings.warn(\n \"Please NOTE: DygraphGeneratorLoader supports iterable mode only. Change to iterable mode.\"\n )\n self._iterable = True\n if not return_list:\n warnings.warn(\n \"Please NOTE: DygraphGeneratorLoader supports returning as list only. Change to return as list.\"\n )\n self._return_list = True\n\n # NOTE: the multiprocessing in different platform is incompatible, we will solve it later\n self._use_multiprocess = use_multiprocess\n if self._use_multiprocess and (sys.platform == 'darwin' or\n sys.platform == 'win32'):\n warnings.warn(\n \"NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows.\"\n )\n self._use_multiprocess = False\n\n if self._use_multiprocess:\n # NOTE: the multiprocessing.Queue used to save loading data in self._process\n self._data_queue = None\n # NOTE: this process is used to load data asynchronously from self._batch_reader\n self._process = None\n\n # NOTE: the C++ LoDTensorBlockingQueue instance\n self._blocking_queue = None\n # NOTE: 1. In multiprocess mode, this thread is used to get next batch data from\n # self._data_queue, then push it into self._blocking_queue; 2. In singleprocess\n # mode, this thread is used to get next batch data from self._batch_reader, then \n # push it into self._blocking_queue\n self._thread = None\n self._pin_memory = True if use_pinned_memory(\n ) is None else use_pinned_memory()\n\n @property\n def queue(self):\n return self._blocking_queue\n\n @property\n def iterable(self):\n return self._iterable\n\n def _clear_and_remove_data_queue(self):\n if self._data_queue is not None:\n while True:\n try:\n self._data_queue.get_nowait()\n except queue.Empty:\n break\n global multiprocess_queue_set\n multiprocess_queue_set.remove(self._data_queue)\n\n def _wait_thread_ends(self):\n thread = self._thread\n if thread is not None:\n self._blocking_queue.close()\n thread.join()\n\n def _wait_process_ends(self):\n process = self._process\n if process is not None:\n process.join()\n # erase process id\n core._erase_process_pids(id(self))\n\n def _init_iterable(self):\n self._wait_thread_ends()\n if self._use_multiprocess:\n self._wait_process_ends()\n self._var_names = []\n self._shapes = []\n self._dtypes = []\n self._need_check_feed = []\n self._blocking_queue = core.init_lod_tensor_blocking_queue(\n core.Variable(), self._capacity, False)\n self._reader = None\n self._reader = core.create_py_reader(\n self.queue, self._var_names, self._shapes, self._dtypes,\n self._need_check_feed, self._places, self._use_double_buffer, True,\n self._pin_memory)\n\n def _start(self):\n if self._use_multiprocess:\n # clear old _data_queue and remove it from multiprocess_queue_set\n self._clear_and_remove_data_queue()\n # set data_queue and process\n self._data_queue = multiprocessing.Queue(self._capacity)\n # add _data_queue into global queue set\n global multiprocess_queue_set\n multiprocess_queue_set.add(self._data_queue)\n self._process = multiprocessing.Process(\n target=_reader_process_loop,\n args=(self._batch_reader, self._data_queue))\n self._process.daemon = True\n self._process.start()\n\n # Set child process signal handler\n # NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault\n # or just hang, the main process will hang waiting for data, so here need to deal \n # with SIGSEGV and SIGBUS of child process; 2. if the main process end before child\n # process, it shuts the all its daemonic children down with a SIGTERM (instead of \n # joining them without a timeout), so here nedd to deal with SIGTERM.\n core._set_process_pids(id(self), [self._process.pid])\n _set_SIGCHLD_handler()\n\n # Set reader_thread\n self._thread_done_event = threading.Event()\n self._thread = threading.Thread(\n target=self._reader_thread_loop_for_multiprocess,\n args=(_current_expected_place(), ))\n self._thread.daemon = True\n self._thread.start()\n else:\n self._thread = threading.Thread(\n target=self._reader_thread_loop_for_singleprocess,\n args=(_current_expected_place(), ))\n self._thread.daemon = True\n self._thread.start()\n\n def _reset(self):\n self._reader.reset()\n self._wait_thread_ends()\n if self._use_multiprocess:\n self._wait_process_ends()\n\n def __iter__(self):\n assert self.iterable, \"DataLoader is not iterable\"\n assert self._batch_reader is not None, \\\n \"Data source of DataLoader has not set yet\"\n\n self._init_iterable()\n self._start()\n return self\n\n def __next__(self):\n try:\n if _in_eager_mode():\n return core.eager.read_next_tensor_list(\n self._reader.read_next_list()[0])\n else:\n return self._reader.read_next_var_list()\n except StopIteration:\n self._reset()\n six.reraise(*sys.exc_info())\n\n def _exit_thread_expectedly(self):\n self._thread_done_event.set()\n self._blocking_queue.close()\n\n def _exit_thread_unexpectedly(self):\n self._thread_done_event.set()\n self._blocking_queue.kill()\n logging.error(\"DataLoader reader thread raised an exception!\")\n\n def _reader_thread_loop_for_multiprocess(self, legacy_expected_place):\n # See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.\n _set_expected_place(legacy_expected_place)\n\n while not self._thread_done_event.is_set():\n try:\n # NOTE: [ avoid hanging ] Even with carefully designed data dependencies \n # (i.e., a put() always corresponding to a get()), hanging on get() can \n # still happen when data in queue is corrupted (e.g., due to \n # Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever \n # we try to get data from `data_queue`\n # NOTE: [ avoid failed quickly ] Here, the time setting of QUEUE_GET_TIMEOUT\n # is relatively long, currently it is 60 seconds, because in some models,\n # if the reader child process starts with a heavy burden, the child process\n # has no enough time to put the data in the queue when the main process\n # start trying to get data from queue. At this time, the child thread needs\n # to wait slightly longer\n tensor_list = self._data_queue.get(timeout=QUEUE_GET_TIMEOUT)\n except:\n # NOTE [ avoid handing ] After adding the shared memory mechanism, not only\n # the queue.Empty exception will occur here, but other exceptions will also\n # occur, such as mmap failure. If it is not handled here, it will hang.\n self._exit_thread_unexpectedly()\n logging.error(\n \"DataLoader reader thread failed to read data from the multiprocessing.Queue.\"\n )\n six.reraise(*sys.exc_info())\n\n if not self._thread_done_event.is_set():\n if tensor_list is not None:\n try:\n array = core.LoDTensorArray()\n for tensor in tensor_list:\n array.append(tensor)\n if not self._blocking_queue.push(array):\n self._blocking_queue.close()\n except:\n self._exit_thread_unexpectedly()\n six.reraise(*sys.exc_info())\n else:\n self._exit_thread_expectedly()\n\n def _reader_thread_loop_for_singleprocess(self, legacy_expected_place):\n try:\n # See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.\n _set_expected_place(legacy_expected_place)\n\n for sample in self._batch_reader():\n array = core.LoDTensorArray()\n for item in sample:\n if not isinstance(item, core.LoDTensor):\n item = self._check_input_array(item)\n tmp = core.LoDTensor()\n tmp.set(item, core.CPUPlace())\n item = tmp\n\n array.append(item)\n\n if not self._blocking_queue.push(array):\n break\n\n self._blocking_queue.close()\n self._thread = None\n except Exception:\n self._blocking_queue.kill()\n self._thread = None\n logging.warning(\n \"DygraphDataLoader reader thread raised an exception.\")\n six.reraise(*sys.exc_info())\n\n def set_sample_generator(self,\n reader,\n batch_size,\n drop_last=True,\n places=None):\n assert batch_size > 0, \"batch_size must be larger than 0\"\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n self.set_sample_list_generator(\n paddle.batch(\n reader, batch_size=batch_size, drop_last=drop_last),\n places=places)\n return self\n\n def set_sample_list_generator(self, reader, places=None):\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n\n def __batch_reader_impl__():\n for batch in reader():\n slots = []\n for items in batch:\n for i, item in enumerate(items):\n if len(slots) < len(items):\n slots.append([item])\n else:\n slots[i].append(item)\n yield slots\n\n self.set_batch_generator(__batch_reader_impl__, places)\n return self\n\n def set_batch_generator(self, reader, places=None):\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n self._batch_reader = reader\n if places is None:\n places = _current_expected_place()\n self._places = _convert_places(places)\n assert len(self._places) == 1, \\\n \"Number of places must be 1 in imperative mode\"\n return self\n\n\nclass GeneratorLoader(DataLoaderBase):\n def __init__(self,\n feed_list=None,\n capacity=None,\n use_double_buffer=True,\n iterable=True,\n return_list=False,\n drop_last=True):\n self._tensor_reader = None\n self._places = None\n self._thread = None\n self._queue = None\n self._feed_list = feed_list\n self._exited = False\n self._drop_last = drop_last\n self._keep_order = keep_data_loader_order()\n if not capacity:\n raise ValueError(\"Please give value to capacity.\")\n self._iterable = iterable\n self._return_list = return_list\n if not self._feed_list:\n raise Exception(\"Feed list must be given under static mode.\")\n self._use_double_buffer = use_double_buffer\n self._capacity = capacity\n if not self._iterable:\n self._init_non_iterable()\n\n def _wait_thread_ends(self):\n # Get self._thread first to prevent data race, because __thread_main__\n # would set self._thread be None at the end\n thread = self._thread\n if thread is not None and self._iterable:\n self._queue.close()\n thread.join()\n\n def _init_iterable(self):\n self._wait_thread_ends()\n self._var_names = [v.name for v in self._feed_list]\n self._shapes = [v.shape for v in self._feed_list]\n self._dtypes = [v.dtype for v in self._feed_list]\n self._need_check_feed = [\n v.desc.need_check_feed() for v in self._feed_list\n ]\n self._queue = core.init_lod_tensor_blocking_queue(\n core.Variable(), self._capacity, self._keep_order)\n self._reader = None\n self._reader = core.create_py_reader(\n self.queue, self._var_names, self._shapes, self._dtypes,\n self._need_check_feed, self._places, self._use_double_buffer,\n self._drop_last, False)\n\n def _init_non_iterable(self):\n lod_levels = []\n dtypes = []\n shape_concat = []\n ranks = []\n shapes = []\n need_check_feed = []\n\n for feed_data in self._feed_list:\n dtypes.append(feed_data.dtype)\n shape_concat.extend(feed_data.shape)\n ranks.append(len(feed_data.shape))\n shapes.append(feed_data.shape)\n lod_levels.append(feed_data.lod_level)\n need_check_feed.append(int(feed_data.desc.need_check_feed()))\n\n queue_name = data_loader_unique_name_generator(\n 'lod_tensor_blocking_queue')\n reader_name = data_loader_unique_name_generator('create_py_reader')\n double_buffer_name = data_loader_unique_name_generator('double_buffer')\n\n var = global_scope().var(queue_name)\n self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity,\n self._keep_order)\n\n if self._keep_order:\n block = default_main_program().current_block()\n else:\n block = default_startup_program().current_block()\n\n reader_var = block.create_var(name=reader_name)\n\n dtype_int = [int(t) for t in dtypes]\n block.append_op(\n type='create_py_reader',\n inputs={'blocking_queue': [queue_name]},\n outputs={'Out': [reader_var]},\n attrs={\n 'shape_concat': shape_concat,\n 'lod_levels': lod_levels,\n 'dtypes': dtype_int,\n 'need_check_feed': need_check_feed,\n 'ranks': ranks\n })\n\n reader_var.desc.set_dtypes(dtypes)\n reader_var.persistable = True\n reader_var.stop_gradient = True\n\n if self._keep_order:\n main_prog_var = reader_var\n reader = main_prog_var\n reader.reset = self._queue.reset\n else:\n main_prog_var = _copy_reader_var_(\n default_main_program().current_block(), reader_var)\n\n main_prog_var.stop_gradient = True\n main_prog_var.persistable = True\n\n reader = monkey_patch_reader_methods(main_prog_var)\n\n if self._use_double_buffer:\n double_buffer_reader = double_buffer(\n reader, name=double_buffer_name)\n # we return a double buffer reader. However, the reset method comes from\n # py_reader.\n double_buffer_reader.reset = reader.reset\n reader = double_buffer_reader\n\n self._reader = reader\n\n default_main_program().current_block().append_op(\n type='read',\n inputs={'Reader': [self._reader]},\n outputs={'Out': self._feed_list},\n attrs={'drop_last': self._drop_last})\n\n @property\n def queue(self):\n return self._queue\n\n @property\n def iterable(self):\n return self._iterable\n\n def __iter__(self):\n assert self.iterable, \"DataLoader is not iterable\"\n assert self._tensor_reader is not None, \\\n \"Data source of DataLoader has not set yet\"\n\n self._init_iterable()\n self._start()\n return self\n\n def __next__(self):\n try:\n if self._return_list:\n data = self._reader.read_next_list()\n for i in range(len(data)):\n data[i] = data[i]._move_to_list()\n return data\n else:\n return self._reader.read_next()\n except StopIteration:\n self._queue.close()\n self._reset()\n six.reraise(*sys.exc_info())\n\n def start(self):\n assert not self._iterable, \"start() cannot be called when DataLoader is iterable\"\n self._start()\n\n def reset(self):\n assert not self._iterable, \"reset() cannot be called when DataLoader is iterable\"\n self._reset()\n\n def _start(self):\n def __thread_main__(legacy_expected_place):\n try:\n # See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.\n _set_expected_place(legacy_expected_place)\n\n while not self._queue.wait_for_inited(1):\n if self._exited:\n return\n\n for tensors in self._tensor_reader():\n array = core.LoDTensorArray()\n for item in tensors:\n if not isinstance(item, core.LoDTensor):\n item = self._check_input_array(item)\n tmp = core.LoDTensor()\n tmp.set(item, core.CPUPlace())\n item = tmp\n\n array.append(item)\n\n if not self._queue.push(array):\n break\n\n self._queue.close()\n self._thread = None\n except Exception as ex:\n self._queue.kill()\n self._thread = None\n logging.warning('Your reader has raised an exception!')\n six.reraise(*sys.exc_info())\n\n self._thread = threading.Thread(\n target=__thread_main__, args=(_current_expected_place(), ))\n self._thread.daemon = True\n self._thread.start()\n\n def _reset(self):\n self._queue.close()\n self._exited = True\n thread = self._thread\n if thread is not None:\n thread.join()\n\n self._exited = False\n self._reader.reset()\n\n def set_sample_generator(self,\n reader,\n batch_size,\n drop_last=True,\n places=None):\n assert batch_size > 0, \"batch_size must be larger than 0\"\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n has_lod = False\n for f in self._feed_list:\n if f.lod_level != 0:\n has_lod = True\n break\n\n if has_lod:\n self.set_sample_list_generator(\n paddle.batch(\n reader, batch_size=batch_size, drop_last=drop_last),\n places=places)\n else:\n reader = BatchedTensorProvider(\n feed_list=self._feed_list,\n place=core.CPUPlace(),\n batch_size=batch_size,\n generator=reader,\n drop_last=drop_last)\n self.set_batch_generator(reader, places=places)\n return self\n\n def set_sample_list_generator(self, reader, places=None):\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n with program_guard(Program(), Program()):\n feeder = DataFeeder(\n feed_list=self._feed_list, place=core.CPUPlace())\n paddle_reader = feeder.decorate_reader(reader, multi_devices=False)\n\n def __tensor_reader_impl__():\n for slots in paddle_reader():\n yield [slots[var.name] for var in self._feed_list]\n\n self.set_batch_generator(__tensor_reader_impl__, places)\n return self\n\n def set_batch_generator(self, reader, places=None):\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n self._tensor_reader = reader\n if self._iterable:\n assert places is not None, \"Places cannot be None when DataLoader is iterable\"\n self._places = _convert_places(places)\n else:\n if places is not None:\n logging.info(\n 'places would be ommited when DataLoader is not iterable')\n return self\n\n\nclass PyReader(DataLoaderBase):\n r\"\"\"\n Create a reader object for data feeding in Python. \n Data would be prefetched using Python thread and be pushed\n into a queue asynchronously. Data in the queue would be extracted \n automatically when `Executor.run(...)` is called.\n\n Args: \n feed_list (list(Variable)|tuple(Variable)): feed variable list.\n The variables should be created by :code:`fluid.layers.data()`.\n capacity (int): capacity of the queue maintained in PyReader.\n The unit is batch number. Set larger capacity if your reader \n is fast. \n use_double_buffer (bool): whether to use double_buffer_reader. \n If use_double_buffer=True, PyReader would prefetch next \n batch data asynchronously, so it would speed up data feeding \n and occupies a little more CPU or GPU memory, i.e., the memory\n of one batch input data. \n iterable (bool): whether the created PyReader is iterable. \n return_list (bool): whether the return value on each device is \n presented as a list. It is only valid when iterable=True. \n If return_list=False, the return value on each device would \n be a dict of str -> LoDTensor, where the key of the dict is \n the name of each fed variables. If return_list=True, the \n return value on each device would be a list(LoDTensor). It is\n recommended to use return_list=False in static graph mode and\n use return_list=True in dygraph mode. \n\n Returns:\n the created reader object.\n\n Return type:\n reader(Reader)\n\n Examples:\n 1. If iterable = False, the created PyReader object is almost the\n same as :code:`fluid.layers.py_reader()`. Operators would be \n inserted into the program. User should call :code:`start()` \n before each epoch and catch :code:`fluid.core.EOFException`\n thrown by :code:`Executor.run()` when epoch ends. Once the \n exception is caught, user should call :code:`reset()` to reset \n the reader manually.\n\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n EPOCH_NUM = 3\n ITER_NUM = 5\n BATCH_SIZE = 3\n \n def network(image, label):\n # User-defined network, here is an example of softmax regression.\n predict = fluid.layers.fc(input=image, size=10, act='softmax') \n return fluid.layers.cross_entropy(input=predict, label=label)\n\n def reader_creator_random_image_and_label(height, width):\n def reader():\n for i in range(ITER_NUM):\n fake_image = np.random.uniform(low=0,\n high=255,\n size=[height, width])\n fake_label = np.ones([1])\n yield fake_image, fake_label\n return reader\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n\n reader = fluid.io.PyReader(feed_list=[image, label],\n capacity=4,\n iterable=False)\n\n user_defined_reader = reader_creator_random_image_and_label(784, 784)\n reader.decorate_sample_list_generator(\n paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))\n loss = network(image, label)\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n for i in range(EPOCH_NUM):\n reader.start()\n while True:\n try:\n executor.run(feed=None)\n except fluid.core.EOFException:\n reader.reset()\n break\n\n \n 2. If iterable=True, the created PyReader object is decoupled with\n the program. No operator would be inserted into the program. \n In this case, the created reader is a Python generator, which \n is iterable. User should feed the data yielded from PyReader \n object into :code:`Executor.run(feed=...)`. \n\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n EPOCH_NUM = 3\n ITER_NUM = 5\n BATCH_SIZE = 10\n\n def network(image, label):\n # User-defined network, here is an example of softmax regression.\n predict = fluid.layers.fc(input=image, size=10, act='softmax') \n return fluid.layers.cross_entropy(input=predict, label=label)\n\n def reader_creator_random_image(height, width):\n def reader():\n for i in range(ITER_NUM):\n fake_image = np.random.uniform(low=0, high=255, size=[height, width])\n fake_label = np.ones([1])\n yield fake_image, fake_label \n return reader\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)\n\n user_defined_reader = reader_creator_random_image(784, 784)\n reader.decorate_sample_list_generator(\n paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),\n fluid.core.CPUPlace())\n \n loss = network(image, label)\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n \n for _ in range(EPOCH_NUM):\n for data in reader():\n executor.run(feed=data, fetch_list=[loss])\n\n\n 3. If return_list=True, the return values would be presented as list instead of dict. \n This is usually used in dygraph mode.\n\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n ITER_NUM = 5\n BATCH_SIZE = 10\n\n def reader_creator_random_image(height, width):\n def reader():\n for i in range(ITER_NUM):\n yield np.random.uniform(low=0, high=255, size=[height, width]), \\\n np.random.random_integers(low=0, high=9, size=[1])\n return reader\n\n place = fluid.CPUPlace()\n with fluid.dygraph.guard(place):\n py_reader = fluid.io.PyReader(capacity=2, return_list=True)\n user_defined_reader = reader_creator_random_image(784, 784)\n py_reader.decorate_sample_list_generator(\n paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),\n place)\n for image, label in py_reader():\n relu = fluid.layers.relu(image)\n \"\"\"\n\n def __init__(self,\n feed_list=None,\n capacity=None,\n use_double_buffer=True,\n iterable=True,\n return_list=False):\n self._loader = DataLoader.from_generator(\n feed_list, capacity, use_double_buffer, iterable, return_list)\n\n @property\n def queue(self):\n return self._loader.queue\n\n @property\n def iterable(self):\n return self._loader.iterable\n\n def __iter__(self):\n return self._loader.__iter__()\n\n def __next__(self):\n return self._loader.__next__()\n\n def start(self):\n '''\n Start the data feeding thread. \n Can only call when the reader object is not iterable. \n \n\tExample:\n\t .. code-block:: python\n \n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n BATCH_SIZE = 10\n\n def generator():\n for i in range(5):\n yield np.random.uniform(low=0, high=255, size=[784, 784]),\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)\n reader.decorate_sample_list_generator(\n paddle.batch(generator, batch_size=BATCH_SIZE))\n\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n for i in range(3):\n reader.start()\n while True:\n try:\n executor.run(feed=None)\n except fluid.core.EOFException:\n reader.reset()\n break\n\n\t '''\n self._loader.start()\n\n def reset(self):\n '''\n Reset the reader object when :code:`fluid.core.EOFException` raises. \n Can only call when the reader object is not iterable.\n \n Example:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n BATCH_SIZE = 10\n\n def generator():\n for i in range(5):\n yield np.random.uniform(low=0, high=255, size=[784, 784]),\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)\n reader.decorate_sample_list_generator(\n paddle.batch(generator, batch_size=BATCH_SIZE))\n\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n for i in range(3):\n reader.start()\n while True:\n try:\n executor.run(feed=None)\n except fluid.core.EOFException:\n reader.reset()\n break \n\n '''\n self._loader.reset()\n\n def decorate_sample_generator(self,\n sample_generator,\n batch_size,\n drop_last=True,\n places=None):\n '''\n Set the data source of the PyReader object.\n \n The provided :code:`sample_generator` should be a Python generator,\n which yields list(numpy.ndarray)-typed data of each sample.\n\n :code:`places` must be set when the PyReader object is iterable.\n\n If all inputs have no lods, this method is faster than \n :code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .\n\n Args:\n sample_generator (generator): Python generator that yields\n list(numpy.ndarray)-typed sample data.\n batch_size (int): batch size. Must be larger than 0.\n drop_last (bool): Whether to drop the last batch when sample number\n is less than batch_size. \n places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must\n be provided when PyReader is iterable.\n\n Example:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n EPOCH_NUM = 3\n ITER_NUM = 15\n BATCH_SIZE = 3\n \n def network(image, label):\n # User-defined network, here is an example of softmax regression.\n predict = fluid.layers.fc(input=image, size=10, act='softmax') \n return fluid.layers.cross_entropy(input=predict, label=label)\n\n def random_image_and_label_generator(height, width):\n def generator():\n for i in range(ITER_NUM):\n fake_image = np.random.uniform(low=0,\n high=255,\n size=[height, width])\n fake_label = np.array([1])\n yield fake_image, fake_label\n return generator\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)\n\n user_defined_generator = random_image_and_label_generator(784, 784)\n reader.decorate_sample_generator(user_defined_generator,\n batch_size=BATCH_SIZE,\n places=[fluid.CPUPlace()])\n loss = network(image, label)\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n\n for _ in range(EPOCH_NUM):\n for data in reader():\n executor.run(feed=data, fetch_list=[loss])\n \n '''\n self._loader.set_sample_generator(sample_generator, batch_size,\n drop_last, places)\n\n def decorate_sample_list_generator(self, reader, places=None):\n '''\n Set the data source of the PyReader object. \n\n The provided :code:`reader` should be a Python generator,\n which yields list(numpy.ndarray) typed batched data. \n \n :code:`places` must be set when the PyReader object is iterable.\n\n Args:\n reader (generator): Python generator that yields \n list(numpy.ndarray)-typed batched data. \n places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must\n be provided when PyReader is iterable.\n \n Example:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n EPOCH_NUM = 3\n ITER_NUM = 15\n BATCH_SIZE = 3\n\n def network(image, label):\n # User-defined network, here is an example of softmax regression.\n predict = fluid.layers.fc(input=image, size=10, act='softmax') \n return fluid.layers.cross_entropy(input=predict, label=label)\n\n def random_image_and_label_generator(height, width):\n def generator():\n for i in range(ITER_NUM):\n fake_image = np.random.uniform(low=0,\n high=255,\n size=[height, width])\n fake_label = np.ones([1])\n yield fake_image, fake_label\n return generator\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)\n\n user_defined_generator = random_image_and_label_generator(784, 784)\n reader.decorate_sample_list_generator(\n paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),\n fluid.core.CPUPlace())\n \n loss = network(image, label)\n executor = fluid.Executor(fluid.core.CPUPlace())\n executor.run(fluid.default_startup_program())\n\n for _ in range(EPOCH_NUM):\n for data in reader():\n executor.run(feed=data, fetch_list=[loss])\n \n '''\n self._loader.set_sample_list_generator(reader, places)\n\n def decorate_batch_generator(self, reader, places=None):\n '''\n Set the data source of the PyReader object.\n\n The provided :code:`reader` should be a Python generator,\n which yields numpy.ndarray-typed or LoDTensor-typed batched data.\n\n :code:`places` must be set when the PyReader object is iterable.\n\n Args:\n reader (generator): Python generator that yields LoDTensor-typed\n batched data.\n places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must\n be provided when PyReader is iterable.\n\n Example:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n EPOCH_NUM = 3\n ITER_NUM = 15\n BATCH_SIZE = 3\n \n def network(image, label):\n # User-defined network, here is an example of softmax regression.\n predict = fluid.layers.fc(input=image, size=10, act='softmax') \n return fluid.layers.cross_entropy(input=predict, label=label)\n\n def random_image_and_label_generator(height, width):\n def generator():\n for i in range(ITER_NUM):\n batch_image = np.random.uniform(low=0,\n high=255,\n size=[BATCH_SIZE, height, width])\n batch_label = np.ones([BATCH_SIZE, 1])\n batch_image = batch_image.astype('float32')\n batch_label = batch_label.astype('int64')\n yield batch_image, batch_label\n return generator\n\n image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')\n label = fluid.data(name='label', shape=[None, 1], dtype='int64')\n reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)\n\n user_defined_generator = random_image_and_label_generator(784, 784)\n reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())\n \n loss = network(image, label)\n executor = fluid.Executor(fluid.CPUPlace())\n executor.run(fluid.default_startup_program())\n\n for _ in range(EPOCH_NUM):\n for data in reader():\n executor.run(feed=data, fetch_list=[loss])\n\n '''\n self._loader.set_batch_generator(reader, places)\n\n\nclass DatasetLoader(DataLoaderBase):\n def __init__(self, dataset, places, drop_last):\n assert isinstance(dataset, paddle.distributed.fleet.dataset.\n DatasetBase), \"dataset must be type of DatasetBase\"\n assert not in_dygraph_mode(\n ), \"DatasetLoader is not supported in dygraph mode yet\"\n if isinstance(places, (list, tuple)):\n places = _get_paddle_place_list(places)\n else:\n places = _get_paddle_place(places)\n\n thread_num = len(places)\n\n assert len(dataset.filelist) >= thread_num, \\\n \"Filelist number of dataset {} must be not less than place number {}\".format(len(dataset.filelist), thread_num)\n\n if dataset.thread_num != 0 and dataset.thread_num != thread_num:\n logging.warn('thread_num {} which is set in Dataset is ignored'.\n format(dataset.thread_num))\n\n dataset._set_thread(thread_num)\n\n if isinstance(dataset, paddle.distributed.fleet.dataset.\n InMemoryDataset) and dataset.queue_num > thread_num:\n logging.warn(\"queue_num {} which is set in Dataset is ignored\".\n format(dataset.queue_num))\n dataset._set_queue_num(thread_num)\n\n self._dataset = dataset\n use_slots = [\n slot.name for slot in dataset.proto_desc.multi_slot_desc.slots\n if slot.is_used\n ]\n\n self._iterable_dataset = core.IterableDatasetWrapper(\n dataset.dataset, use_slots,\n _convert_places(places), dataset.proto_desc.batch_size, drop_last)\n\n def __iter__(self):\n self._dataset._finish_to_run()\n self._dataset._prepare_to_run()\n self._iterable_dataset._start()\n return self\n\n def __next__(self):\n return self._iterable_dataset._next()\n"
] | [
[
"numpy.asarray"
]
] |
krishna1401/Digital-Image-Processing | [
"47a4da4bef9d08708ac84174b0fcd0ced6a8b5e2"
] | [
"edgeDetection.py"
] | [
"#Perform Edge Detection using Roberts Cross Gradient & Sobel Operators over an Image\n\nimport cv2\nimport math\nimport numpy as np\n\ndef robertCrossGradient(image):\n\t#Objective: Performing Robert Cross Gradient Edge Detection over an Image\n\t#Input: Original Image\n\t#Output: Resultant Image\n\t\n\t#Robert Cross Operator\n\t# x 0 1\n\t#\t-1 0\n\t# y 1 0\n\t#\t 0 -1\n\timage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting Image to Gray Scale\n\tresultant_image = image.copy()\n\tfor i in range(0,image.shape[0]-1):\n\t for j in range(0,image.shape[1]-1):\n\t gx = image[i, j+1] - image[i+1, j]\n\t gy = image[i, j] - image[i+1, j+1]\n\t resultant_image[i, j] = math.sqrt(gx*gx + gy*gy)\t\n\t\n\treturn resultant_image\n\ndef sobelOperator(image):\n #Objective: Performing Sobel Edge Detection over an Image\n\t#Input: Original Image\n\t#Output: Resultant Image\n\t\n\t#Sobel Operator\n\t\n\t# x -1 -2 -1\n\t# 0 0 0\n\t# 1 2 1\n\t\n\t#y -1 0 1\n\t# -2 0 2\n\t# -1 0 1\n\t\n\timage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting Image to Gray Scale\n\tresultant_image = image.copy()\n\t\n\t#Applying Padding\n\trows,cols = image.shape\n\timage = np.insert(image,0,0,axis=0) #top\n\timage = np.insert(image,rows+1,0,axis=0) #bottom\n\timage = np.insert(image,0,0,axis=1) #left\n\timage = np.insert(image,cols+1,0,axis=1) #right\n\t\n\tfor i in range(1, image.shape[0]-1):\n\t for j in range(1, image.shape[1]-1):\n\t fx = image[i+1, j-1] + 2*image[i+1, j] + image[i+1, j+1] - image[i-1, j-1] - 2*image[i-1, j] - image[i+1, j-1]\n\t fy = image[i-1, j+1] + 2*image[i, j+1] + image[i+1, j+1] - image[i-1, j-1] - 2*image[i, j-1] - image[i+1, j-1]\n\t resultant_image[i-1, j-1] = math.sqrt(fx*fx + fy*fy)\n\t\n\treturn resultant_image\n\nimg = cv2.imread('image5.jpg')\noutput = sobelOperator(img)\n\ncv2.imshow('image',output)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n"
] | [
[
"numpy.insert"
]
] |
strawberryfg/xraygan | [
"047474b0244e530f78b28db67564304cff692f5e"
] | [
"full_code/test_apr27.py"
] | [
"import os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nimport math\nimport numpy as np\nfrom scipy import linalg\nfrom os import path as osp\nimport cv2\nimport random\nimport matplotlib.pyplot as plt\nimport pdb\n\n#0. torch imports\nimport torch\nfrom torch.utils.data import DataLoader,Dataset\nfrom torch import optim,nn\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nfrom torchvision import transforms as T\nimport torch.nn.functional as F\nimport torchvision.utils as vutils\nfrom torchvision import models\n\nfrom torchvision.models.resnet import BasicBlock, Bottleneck\nfrom torchvision.models.resnet import model_urls\n\n\n## Data parallel\n\"\"\"Encoding Data Parallel\"\"\"\nimport threading\nimport functools\nfrom torch.autograd import Variable, Function\nimport torch.cuda.comm as comm\nfrom torch.nn.parallel.data_parallel import DataParallel\nfrom torch.nn.parallel.parallel_apply import get_a_var\nfrom torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast\n\n# [DATA PARALLEL]\n\n__all__ = ['allreduce', 'DataParallelModel', 'DataParallelCriterion',\n 'patch_replication_callback']\n\ndef allreduce(*inputs):\n \"\"\"Cross GPU all reduce autograd operation for calculate mean and\n variance in SyncBN.\n \"\"\"\n return AllReduce.apply(*inputs)\n\n\nclass AllReduce(Function):\n @staticmethod\n def forward(ctx, num_inputs, *inputs):\n ctx.num_inputs = num_inputs\n ctx.target_gpus = [inputs[i].get_device() for i in range(0, len(inputs), num_inputs)]\n inputs = [inputs[i:i + num_inputs]\n for i in range(0, len(inputs), num_inputs)]\n # sort before reduce sum\n inputs = sorted(inputs, key=lambda i: i[0].get_device())\n results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])\n outputs = comm.broadcast_coalesced(results, ctx.target_gpus)\n return tuple([t for tensors in outputs for t in tensors])\n\n @staticmethod\n def backward(ctx, *inputs):\n inputs = [i.data for i in inputs]\n inputs = [inputs[i:i + ctx.num_inputs]\n for i in range(0, len(inputs), ctx.num_inputs)]\n results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])\n outputs = comm.broadcast_coalesced(results, ctx.target_gpus)\n return (None,) + tuple([Variable(t) for tensors in outputs for t in tensors])\n\n\nclass Reduce(Function):\n @staticmethod\n def forward(ctx, *inputs):\n ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))]\n inputs = sorted(inputs, key=lambda i: i.get_device())\n return comm.reduce_add(inputs)\n\n @staticmethod\n def backward(ctx, gradOutput):\n return Broadcast.apply(ctx.target_gpus, gradOutput)\n\n\nclass DataParallelModel(DataParallel):\n \"\"\"Implements data parallelism at the module level.\n\n This container parallelizes the application of the given module by\n splitting the input across the specified devices by chunking in the\n batch dimension.\n In the forward pass, the module is replicated on each device,\n and each replica handles a portion of the input. During the backwards pass, gradients from each replica are summed into the original module.\n Note that the outputs are not gathered, please use compatible\n :class:`encoding.parallel.DataParallelCriterion`.\n\n The batch size should be larger than the number of GPUs used. It should\n also be an integer multiple of the number of GPUs so that each chunk is\n the same size (so that each GPU processes the same number of samples).\n\n Args:\n module: module to be parallelized\n device_ids: CUDA devices (default: all devices)\n\n Reference:\n Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,\n Amit Agrawal. Context Encoding for Semantic Segmentation.\n *The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*\n\n Example::\n\n >>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])\n >>> y = net(x)\n \"\"\"\n def gather(self, outputs, output_device):\n return outputs\n\n def replicate(self, module, device_ids):\n modules = super(DataParallelModel, self).replicate(module, device_ids)\n execute_replication_callbacks(modules)\n return modules\n\n\n\nclass DataParallelCriterion(DataParallel):\n \"\"\"\n Calculate loss in multiple-GPUs, which balance the memory usage for\n Semantic Segmentation.\n\n The targets are splitted across the specified devices by chunking in\n the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`.\n\n Reference:\n Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,\n Amit Agrawal. Context Encoding for Semantic Segmentation.\n *The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*\n\n Example::\n\n >>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])\n >>> criterion = encoding.nn.DataParallelCriterion(criterion, device_ids=[0, 1, 2])\n >>> y = net(x)\n >>> loss = criterion(y, target)\n \"\"\"\n def forward(self, inputs, *targets, **kwargs):\n # input should be already scatterd\n # scattering the targets instead\n # if not self.device_ids:\n # return self.module(inputs, *targets, **kwargs)\n targets, kwargs = self.scatter(targets, kwargs, self.device_ids)\n if len(self.device_ids) == 1:\n return self.module(inputs, *targets[0])\n replicas = self.replicate(self.module, self.device_ids[:len(inputs)])\n outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)\n return Reduce.apply(*outputs) / len(outputs)\n\n #return self.gather(outputs, self.output_device).mean()\n\n\ndef _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None):\n assert len(modules) == len(inputs)\n assert len(targets) == len(inputs)\n if kwargs_tup:\n assert len(modules) == len(kwargs_tup)\n else:\n kwargs_tup = ({},) * len(modules)\n if devices is not None:\n assert len(modules) == len(devices)\n else:\n devices = [None] * len(modules)\n\n lock = threading.Lock()\n results = {}\n if torch_ver != \"0.3\":\n grad_enabled = torch.is_grad_enabled()\n\n def _worker(i, module, input, target, kwargs, device=None):\n if torch_ver != \"0.3\":\n torch.set_grad_enabled(grad_enabled)\n if device is None:\n device = get_a_var(input).get_device()\n try:\n with torch.cuda.device(device):\n output = module(input, *target)\n with lock:\n results[i] = output\n except Exception as e:\n with lock:\n results[i] = e\n\n if len(modules) > 1:\n threads = [threading.Thread(target=_worker,\n args=(i, module, input, target,\n kwargs, device),)\n for i, (module, input, target, kwargs, device) in\n enumerate(zip(modules, inputs, targets, kwargs_tup, devices))]\n\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n else:\n _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])\n\n outputs = []\n for i in range(len(inputs)):\n output = results[i]\n if isinstance(output, Exception):\n raise output\n outputs.append(output)\n return outputs\n\n\n###########################################################################\n# Adapted from Synchronized-BatchNorm-PyTorch.\n# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch\n#\nclass CallbackContext(object):\n pass\n\n\ndef execute_replication_callbacks(modules):\n \"\"\"\n Execute an replication callback `__data_parallel_replicate__` on each module created\n by original replication.\n\n The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`\n\n Note that, as all modules are isomorphism, we assign each sub-module with a context\n (shared among multiple copies of this module on different devices).\n Through this context, different copies can share some information.\n\n We guarantee that the callback on the master copy (the first copy) will be called ahead\n of calling the callback of any slave copies.\n \"\"\"\n master_copy = modules[0]\n nr_modules = len(list(master_copy.modules()))\n ctxs = [CallbackContext() for _ in range(nr_modules)]\n\n for i, module in enumerate(modules):\n for j, m in enumerate(module.modules()):\n if hasattr(m, '__data_parallel_replicate__'):\n m.__data_parallel_replicate__(ctxs[j], i)\n\n\ndef patch_replication_callback(data_parallel):\n \"\"\"\n Monkey-patch an existing `DataParallel` object. Add the replication callback.\n Useful when you have customized `DataParallel` implementation.\n\n Examples:\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])\n > patch_replication_callback(sync_bn)\n # this is equivalent to\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])\n \"\"\"\n\n assert isinstance(data_parallel, DataParallel)\n\n old_replicate = data_parallel.replicate\n\n @functools.wraps(old_replicate)\n def new_replicate(module, device_ids):\n modules = old_replicate(module, device_ids)\n execute_replication_callbacks(modules)\n return modules\n\n data_parallel.replicate = new_replicate\n\n\n\n\n\n# 0. ResNet 18\n# ResNet Classifier\n#class BasicBlock(nn.Module):\n# expansion = 1\n\n# def __init__(self, in_planes, planes, stride=1):\n# super(BasicBlock, self).__init__()\n# self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n# self.bn1 = nn.BatchNorm2d(planes)\n# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n# self.bn2 = nn.BatchNorm2d(planes)\n\n# self.shortcut = nn.Sequential()\n# if stride != 1 or in_planes != self.expansion*planes:\n# self.shortcut = nn.Sequential(\n# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n# nn.BatchNorm2d(self.expansion*planes)\n# )\n\n# def forward(self, x):\n# out = F.relu(self.bn1(self.conv1(x)))\n# out = self.bn2(self.conv2(out))\n# out += self.shortcut(x)\n# out = F.relu(out)\n# return out\n\n\n\n# gram matrix and loss\nclass GramMatrix(nn.Module):\n def forward(self, input):\n b, c, h, w = input.size()\n F = input.view(b, c, h * w)\n G = torch.bmm(F, F.transpose(1,2)) \n G.div_(h * w)\n return G\n\nclass GramMSELoss(nn.Module):\n def forward(self, input, target):\n out = nn.MSELoss()(GramMatrix()(input), target)\n return(out)\n\n\nclass ResDeconvNet(nn.Module):\n def __init__(self, backbone):\n super(ResDeconvNet, self).__init__()\n self.backbone = backbone\n\n def forward(self, x, y):\n x = torch.cat((x, y), dim = 1)\n x = self.backbone(x)\n\n return x\n\n\n# Conv Layer\nclass ConvLayer(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride):\n super(ConvLayer, self).__init__()\n padding = kernel_size // 2\n self.reflection_pad = nn.ReflectionPad2d(padding)\n self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride) #, padding)\n\n def forward(self, x):\n out = self.reflection_pad(x)\n out = self.conv2d(out)\n return out\n\n# Upsample Conv Layer\nclass UpsampleConvLayer(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):\n super(UpsampleConvLayer, self).__init__()\n self.upsample = upsample\n if upsample:\n self.upsample = nn.Upsample(scale_factor=upsample, mode='nearest')\n reflection_padding = kernel_size // 2\n self.reflection_pad = nn.ReflectionPad2d(reflection_padding)\n self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)\n\n def forward(self, x):\n if self.upsample:\n x = self.upsample(x)\n out = self.reflection_pad(x)\n out = self.conv2d(out)\n return out\n\n# Residual Block\n# adapted from pytorch tutorial\n# https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/02-\n# intermediate/deep_residual_network/main.py\nclass ResidualBlock(nn.Module):\n def __init__(self, channels):\n super(ResidualBlock, self).__init__()\n self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)\n self.in1 = nn.InstanceNorm2d(channels, affine=True)\n self.relu = nn.ReLU()\n self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)\n self.in2 = nn.InstanceNorm2d(channels, affine=True)\n\n def forward(self, x):\n residual = x\n out = self.relu(self.in1(self.conv1(x)))\n out = self.in2(self.conv2(out))\n out = out + residual\n out = self.relu(out)\n return out \n\n\n#vgg definition that conveniently let's you grab the outputs from any layer\nclass VGG(nn.Module):\n def __init__(self, pool='max'):\n super(VGG, self).__init__()\n #vgg modules\n self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)\n self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)\n self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)\n self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)\n self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\n self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\n self.conv3_4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)\n self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)\n self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv4_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n self.conv5_4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n if pool == 'max':\n self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)\n self.pool6 = nn.MaxPool2d(kernel_size=8, stride=8)\n elif pool == 'avg':\n self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.pool3 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.pool4 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.pool5 = nn.AvgPool2d(kernel_size=2, stride=2)\n self.pool6 = nn.AvgPool2d(kernel_size=8, stride=8)\n \n def forward(self, x, out_keys):\n out = {}\n out['r11'] = F.relu(self.conv1_1(x))\n out['r12'] = F.relu(self.conv1_2(out['r11']))\n out['p1'] = self.pool1(out['r12'])\n out['r21'] = F.relu(self.conv2_1(out['p1']))\n out['r22'] = F.relu(self.conv2_2(out['r21']))\n out['p2'] = self.pool2(out['r22'])\n out['r31'] = F.relu(self.conv3_1(out['p2']))\n out['r32'] = F.relu(self.conv3_2(out['r31']))\n out['r33'] = F.relu(self.conv3_3(out['r32']))\n out['r34'] = F.relu(self.conv3_4(out['r33']))\n out['p3'] = self.pool3(out['r34'])\n out['r41'] = F.relu(self.conv4_1(out['p3']))\n out['r42'] = F.relu(self.conv4_2(out['r41']))\n out['r43'] = F.relu(self.conv4_3(out['r42']))\n out['r44'] = F.relu(self.conv4_4(out['r43']))\n out['p4'] = self.pool4(out['r44'])\n out['r51'] = F.relu(self.conv5_1(out['p4']))\n out['r52'] = F.relu(self.conv5_2(out['r51']))\n out['r53'] = F.relu(self.conv5_3(out['r52']))\n out['r54'] = F.relu(self.conv5_4(out['r53']))\n out['p5'] = self.pool5(out['r54'])\n #out['p6'] = self.pool6(out['r54'])\n return [out[key] for key in out_keys]\n\n\n\nmodel_dir = 'F:/nst/ist/Models/' #os.getcwd() + '/Models/'\n#get network\nvgg = VGG()\n\nvgg.load_state_dict(torch.load(model_dir + 'vgg_conv.pth'))\nfor param in vgg.parameters():\n param.requires_grad = False\nif torch.cuda.is_available():\n vgg = DataParallelModel(vgg).cuda()\n\n#3. possible l\n\n# Image Transform Network\nclass ImageTransformNet(nn.Module):\n def __init__(self):\n super(ImageTransformNet, self).__init__()\n \n # nonlineraity\n self.relu = nn.ReLU()\n self.tanh = nn.Tanh()\n\n # encoding layers\n self.conv1 = ConvLayer(6, 32, kernel_size=9, stride=1)\n self.in1_e = nn.InstanceNorm2d(32, affine=True)\n\n self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)\n self.in2_e = nn.InstanceNorm2d(64, affine=True)\n\n self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)\n self.in3_e = nn.InstanceNorm2d(128, affine=True)\n\n # residual layers\n self.res1 = ResidualBlock(128)\n self.res2 = ResidualBlock(128)\n self.res3 = ResidualBlock(128)\n self.res4 = ResidualBlock(128)\n self.res5 = ResidualBlock(128)\n\n # decoding layers\n self.deconv3 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2 )\n self.in3_d = nn.InstanceNorm2d(64, affine=True)\n\n self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2 )\n self.in2_d = nn.InstanceNorm2d(32, affine=True)\n\n self.deconv1 = UpsampleConvLayer(32, 3, kernel_size=9, stride=1)\n self.in1_d = nn.InstanceNorm2d(3, affine=True)\n\n def forward(self, x):\n # encode\n y = self.relu(self.in1_e(self.conv1(x)))\n y = self.relu(self.in2_e(self.conv2(y)))\n y = self.relu(self.in3_e(self.conv3(y)))\n\n # residual layers\n y = self.res1(y)\n y = self.res2(y)\n y = self.res3(y)\n y = self.res4(y)\n y = self.res5(y)\n\n # decode\n y = self.relu(self.in3_d(self.deconv3(y)))\n y = self.relu(self.in2_d(self.deconv2(y)))\n #y = self.tanh(self.in1_d(self.deconv1(y)))\n y = self.deconv1(y)\n\n return y\n\n def init_weights(self):\n a = 1\n\n\ndef get_deconv_net(is_train):\n backbone_nst = ImageTransformNet()# ResNetBackbone(18, is_pose_net = False)# ImageTransformNet() #ResNetBackbone(18, is_pose_net = False)\n if is_train:\n backbone_nst.init_weights()\n \n model_deconv = ResDeconvNet(backbone_nst)\n\n return model_deconv\n\n\nmodel_deconv = get_deconv_net(True)\nmodel_deconv = DataParallelModel(model_deconv).cuda()\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 16\n self.embDim = 128 * block.expansion\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 128, num_blocks[3], stride=2)\n self.linear = nn.Linear(128 * block.expansion, num_classes)\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 16)\n emb = out.view(out.size(0), -1)\n out = self.linear(emb)\n return out#, emb\n def get_embedding_dim(self):\n return self.embDim\n\ndef ResNet18():\n return ResNet(BasicBlock, [2,2,2,2])\n\n#1. DCGAN Generator\nclass DCGAN_generator(nn.Module):\n \"\"\"\n\n Attributes\n ----------\n ngpu : int\n The number of available GPU devices\n\n \"\"\"\n def __init__(self, ngpu):\n \"\"\"Init function\n\n Parameters\n ----------\n ngpu : int\n The number of available GPU devices\n\n \"\"\"\n super(DCGAN_generator, self).__init__()\n self.ngpu = ngpu\n \n nz = 100 # noise dimension\n ngf = 64 # number of features map on the first layer\n nc = 1 # number of channels\n\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( nz, ngf * 4, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n \"\"\"Forward function\n\n Parameters\n ----------\n input : :py:class:`torch.Tensor`\n \n Returns\n -------\n :py:class:`torch.Tensor`\n the output of the generator (i.e. an image)\n\n \"\"\"\n output = self.main(input)\n return output\n\n\nclass _netG64(nn.Module):\n def __init__(self, ngpu):\n super(_netG64, self).__init__()\n self.ngpu = ngpu\n nz = 100 # noise dimension\n ngf = 64 # number of features map on the first layer\n nc = 1 # number of channels\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16 \n nn.ConvTranspose2d(ngf * 2, ngf * 1, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 1),\n nn.ReLU(True), \n # state size. (ngf) x 32 x 32\n nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n output = self.main(input)\n return output\n\n\n\nclass _netG(nn.Module):\n def __init__(self, ngpu):\n super(_netG, self).__init__()\n self.ngpu = ngpu\n nz = 100 # noise dimension\n ngf = 64 # number of features map on the first layer\n nc = 1 # number of channels\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( nz, ngf * 16, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 16),\n nn.ReLU(True),\n # state size. (ngf*16) x 4 x 4\n nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 8 x 8\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 16 x 16 \n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 32 x 32\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 64 x 64\n nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 128 x 128\n )\n\n def forward(self, input):\n \toutput = self.main(input)\n \treturn output\n\n\nclass _netG256(nn.Module):\n def __init__(self, ngpu):\n super(_netG256, self).__init__()\n self.ngpu = ngpu\n nz = 100 # noise dimension\n ngf = 64 # number of features map on the first layer\n nc = 1 # number of channels\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( nz, ngf * 32, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 32),\n nn.ReLU(True),\n # state size. (ngf*32) x 4 x 4\n nn.ConvTranspose2d(ngf * 32, ngf * 16, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 16),\n nn.ReLU(True),\n # state size. (ngf*16) x 8 x 8\n nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 16 x 16 \n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 32 x 32\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 64 x 64\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 128 x 128\n nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 256 x 256\n )\n\n def forward(self, input):\n output = self.main(input)\n return output\n\n\n#2. DCGAN Discriminator\nclass DCGAN_discriminator(nn.Module):\n \"\"\" \n\n Attributes\n ----------\n ngpu : int\n The number of available GPU devices\n\n \"\"\"\n def __init__(self, ngpu):\n \"\"\"Init function\n\n Parameters\n ----------\n ngpu : int\n The number of available GPU devices\n\n \"\"\"\n super(DCGAN_discriminator, self).__init__()\n self.ngpu = ngpu\n \n ndf = 64\n nc = 1\n \n self.main = nn.Sequential(\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 4, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n \"\"\"Forward function\n\n Parameters\n ----------\n input : :py:class:`torch.Tensor`\n \n Returns\n -------\n :py:class:`torch.Tensor`\n the output of the generator (i.e. an image)\n\n \"\"\"\n output = self.main(input)\n\n return output.view(-1, 1).squeeze(1)\n\n\nclass _netD64(nn.Module):\n def __init__(self, ngpu):\n super(_netD64, self).__init__()\n self.ngpu = ngpu\n ndf = 64\n nc = 1\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, stride=2, padding=1, bias=False), \n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 8, 1, 4, stride=1, padding=0, bias=False),\n nn.Sigmoid()\n # state size. 1\n )\n\n def forward(self, input):\n output = self.main(input)\n return output.view(-1, 1).squeeze(1)\n\n\nclass _netD(nn.Module):\n def __init__(self, ngpu):\n super(_netD, self).__init__()\n self.ngpu = ngpu\n ndf = 64\n nc = 1\n self.main = nn.Sequential(\n # input is (nc) x 128 x 128\n nn.Conv2d(nc, ndf, 4, stride=2, padding=1, bias=False), \n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 64 x 64\n nn.Conv2d(ndf, ndf * 2, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 32 x 32\n nn.Conv2d(ndf * 2, ndf * 4, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 16 x 16 \n nn.Conv2d(ndf * 4, ndf * 8, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 8 x 8\n nn.Conv2d(ndf * 8, ndf * 16, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 16),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*16) x 4 x 4\n nn.Conv2d(ndf * 16, 1, 4, stride=1, padding=0, bias=False),\n nn.Sigmoid()\n # state size. 1\n )\n\n def forward(self, input):\n \toutput = self.main(input)\n \treturn output.view(-1, 1).squeeze(1)\n\nclass _netD256(nn.Module):\n def __init__(self, ngpu):\n super(_netD256, self).__init__()\n self.ngpu = ngpu\n ndf = 64\n nc = 1\n self.main = nn.Sequential(\n # input is (nc) x 256 x 256\n nn.Conv2d(nc, ndf, 4, stride=2, padding=1, bias=False), \n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 128 x 128\n nn.Conv2d(ndf, ndf * 2, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 64 x 64\n nn.Conv2d(ndf * 2, ndf * 4, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 32 x 32 \n nn.Conv2d(ndf * 4, ndf * 8, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 16 x 16\n nn.Conv2d(ndf * 8, ndf * 16, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 16),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*16) x 8 x 8\n nn.Conv2d(ndf * 16, ndf * 32, 4, stride=2, padding=1, bias=False),\n nn.BatchNorm2d(ndf * 32),\n nn.LeakyReLU(0.2, inplace=True), \n # state size. (ndf*32) x 4 x 4\n nn.Conv2d(ndf * 32, 1, 4, stride=1, padding=0, bias=False),\n nn.Sigmoid()\n # state size. 1\n )\n\n def forward(self, input):\n output = self.main(input)\n return output.view(-1, 1).squeeze(1)\n\n\n\n#3. ResNet\n\n\n\n\nclass ResNetBackbone(nn.Module):\n\n def __init__(self, resnet_type, num_classes = 1000):\n \n resnet_spec = {18: (BasicBlock, [2, 2, 2, 2], [64, 64, 128, 256, 512], 'resnet18'),\n 34: (BasicBlock, [3, 4, 6, 3], [64, 64, 128, 256, 512], 'resnet34'),\n 50: (Bottleneck, [3, 4, 6, 3], [64, 256, 512, 1024, 2048], 'resnet50'),\n 101: (Bottleneck, [3, 4, 23, 3], [64, 256, 512, 1024, 2048], 'resnet101'),\n 152: (Bottleneck, [3, 8, 36, 3], [64, 256, 512, 1024, 2048], 'resnet152')}\n block, layers, channels, name = resnet_spec[resnet_type]\n \n self.name = name\n self.inplanes = 64\n self.outplanes = 3\n super(ResNetBackbone, self).__init__()\n self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) #128 -> 4\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n nn.init.normal_(m.weight, mean=0, std=0.01)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n\n def forward(self, x): \n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x) \n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x)\n x = torch.flatten(x, 1) \n x = self.fc(x)\n\n return x\n \n def load_my_state_dict(model, state_dict):\n \n own_state = model.state_dict()\n for name, param in state_dict.items():\n if name not in own_state:\n continue\n #if isinstance(param, Parameter):\n # backwards compatibility for serialized parameters\n # param = param.data\n own_state[name].copy_(param)\n\n def init_weights(self):\n org_resnet = torch.utils.model_zoo.load_url(model_urls[self.name])\n # drop orginal resnet fc layer, add 'None' in case of no fc layer, that will raise error\n org_resnet.pop('fc.weight', None)\n org_resnet.pop('fc.bias', None)\n org_resnet.pop('conv1.weight', None)\n org_resnet.pop('conv1.bias', None)\n #self.load_state_dict(org_resnet)\n self.load_my_state_dict(org_resnet)\n print(\"Initialize resnet from model zoo\")\n\n#4. logging\nimport logging\nimport os\n\nOK = '\\033[92m'\nWARNING = '\\033[93m'\nFAIL = '\\033[91m'\nEND = '\\033[0m'\n\nPINK = '\\033[95m'\nBLUE = '\\033[94m'\nGREEN = OK\nRED = FAIL\nWHITE = END\nYELLOW = WARNING\nclass colorlogger():\n def __init__(self, log_dir, log_name='train_logs.txt'):\n # set log\n self._logger = logging.getLogger(log_name)\n self._logger.setLevel(logging.INFO)\n log_file = os.path.join(log_dir, log_name)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n file_log = logging.FileHandler(log_file, mode='a')\n file_log.setLevel(logging.INFO)\n #console_log = logging.StreamHandler()\n #console_log.setLevel(logging.INFO)\n formatter = logging.Formatter(\n \"{}%(asctime)s{} %(message)s\".format(GREEN, END),\n \"%m-%d %H:%M:%S\")\n file_log.setFormatter(formatter)\n #console_log.setFormatter(formatter)\n self._logger.addHandler(file_log)\n #self._logger.addHandler(console_log)\n\n def debug(self, msg):\n self._logger.debug(str(msg))\n\n def info(self, msg):\n self._logger.info(str(msg))\n\n def warning(self, msg):\n self._logger.warning(WARNING + 'WRN: ' + str(msg) + END)\n\n def critical(self, msg):\n self._logger.critical(RED + 'CRI: ' + str(msg) + END)\n\n def error(self, msg):\n self._logger.error(RED + 'ERR: ' + str(msg) + END)\n\n\n#5. Configurations and arguments\nroot_dir = \"E:/ml/\" # chest x-ray 14\nn_classes = 15 # 0 is normal : no finding\nbatch_size = 22\nimg_size = 128\ndisplay_per_iters = 5 # how many iterations before outputting to the console window\nsave_gan_per_iters = 5 # save gan images per this iterations\nsave_gan_img_folder_prefix = root_dir + \"train_fake/\"\nshow_train_classifier_acc_per_iters = 1000000 # how many iterations before showing train acc of classifier\nshow_test_classifier_acc_per_iters = 15 # \nsave_per_samples = 2000 # save a checkpoint per forward run of this number of samples\nmodel_ckpt_prefix = 'ecgan-chest-xray14'\n\nuse_label_smoothing = True\nsmoothing = 0.1\n\n# define device \ndevice = torch.device(\"cuda:0\")\n\n# The files that contain paths of all images\nimage_index_list_file = root_dir + \"image_index.txt\"\nlabels_file = root_dir + \"labels.txt\"\ntrain_val_list_file = root_dir + \"train_val_list.txt\"\ntest_list_file = root_dir + \"test_list.txt\"\nimg_folders = { 'images_001/', 'images_002/', 'images_003/', 'images_005/', 'images_008/', 'images_011/', 'images_006/', 'images_007/', 'images_004/', 'images_009/', 'images_010/', 'images_012/'}\nsuffix = 'images/'\nimage_index_list = [] \nlabels_list = []\nimg_index_2_label_dict = {}\nlabel_name_dict = { 'No Finding': 0,\n 'Atelectasis': 1, \n 'Cardiomegaly': 2, \n 'Effusion': 3, \n 'Infiltration': 4, \n 'Mass': 5, \n 'Nodule': 6, \n 'Pneumonia': 7, \n 'Pneumothorax': 8, \n 'Consolidation': 9, \n 'Edema': 10, \n 'Emphysema': 11, \n 'Fibrosis': 12, \n 'Pleural_Thickening': 13, \n 'Hernia': 14}\n# list of img paths \ntrain_val_list = []\ntest_list = []\ntrain_val_labels = []\ntest_labels = []\n\ndef load_image_index_and_list():\n #1. image index list (all) e.g. 00000583_023.png\n f_list = open(image_index_list_file, \"r\")\n l = f_list.readlines()\n for line in l:\n if line != '\\n':\n image_index_list.append(line[0:len(line) - 1])\n f_list.close()\n\n #2. labels e.g. Cardiomegaly|Effusion\n f_list = open(labels_file, \"r\")\n l = f_list.readlines()\n for line in l:\n if line != '\\n':\n labels_list.append(line[0:len(line) - 1])\n f_list.close()\n return \n\ndef build_img_2_label_dict():\n for i in range(len(image_index_list)):\n img_id = image_index_list[i]\n label = labels_list[i]\n img_index_2_label_dict.update({img_id: label})\n \ndef load_train_val_list():\n #1. original train_val_list.txt\n f_list = open(train_val_list_file, \"r\")\n l = f_list.readlines() \n for line in l:\n s = line \n if s[len(s) - 1] == '\\n':\n s = s[:len(s) - 1]\n img_name = s\n this_label = img_index_2_label_dict[img_name] \n find_or = this_label.find('|') \n if find_or != -1:\n continue\n # See if this image exists \n for folders in img_folders:\n img_path = root_dir + folders + suffix + img_name \n if not osp.exists(img_path):\n continue\n train_val_list.append(img_path) \n this_label = label_name_dict[this_label]\n train_val_labels.append(this_label)\n print('There are {:6d} images in train/val.\\n'.format(len(train_val_list)))\n f_list.close()\n\ndef load_test_list():\n #1. original test_list.txt\n f_list = open(test_list_file, \"r\")\n l = f_list.readlines() \n for line in l:\n s = line \n if s[len(s) - 1] == '\\n':\n s = s[:len(s) - 1]\n img_name = s\n this_label = img_index_2_label_dict[img_name] \n find_or = this_label.find('|') \n if find_or != -1:\n continue\n # See if this image exists \n for folders in img_folders:\n img_path = root_dir + folders + suffix + img_name \n if not osp.exists(img_path):\n continue\n test_list.append(img_path) \n this_label = label_name_dict[this_label]\n test_labels.append(this_label)\n print('There are {:6d} images in test.\\n'.format(len(test_list)))\n f_list.close()\n\n\t\nload_image_index_and_list()\nbuild_img_2_label_dict()\nload_train_val_list()\nload_test_list()\n\n# Where to log outputs\nlogger = colorlogger(\"logs/\", log_name=\"logs_all.txt\")\ndiscriminator_logger = colorlogger(\"logs/\", log_name=\"logs_D(x);1.txt\")\nfake_logger = colorlogger(\"logs/\", log_name=\"logs_D(G(z));0.txt\")\ngenerator_logger = colorlogger(\"logs/\", log_name=\"logs_D(G(z));1.txt\")\nreal_classifier_logger = colorlogger(\"logs/\", log_name=\"logs_C(x).txt\")\nfake_classifier_logger = colorlogger(\"logs/\", log_name=\"logs_C(G(z)).txt\")\ntotal_logger = colorlogger(\"logs/\", log_name=\"logs_loss_total.txt\")\ntrain_accuracy_logger = colorlogger(\"logs/\", log_name=\"logs_train_classifier_acc.txt\")\ntest_accuracy_logger = colorlogger(\"logs/\", log_name=\"logs_test_classifier_acc.txt\")\navg_kl_logger = colorlogger(\"logs/\", log_name=\"logs_avg_kl.txt\")\n\nepochs = 100\nepoch_imgs = 3483 #how many images are defined as an epoch\n\n#6. Randomly sample training images \ndef sample_train_images_randomly():\n inputs = []\n labels = []\n for i in range(batch_size):\n sz = len(train_val_list)\n img_id = random.randint(0, sz - 1)\n img_path = train_val_list[img_id]\n if not osp.exists(img_path):\n \tprint('Image ', img_path, 'does not exist?')\n else:\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE | cv2.IMREAD_IGNORE_ORIENTATION)\n img = img / 256.0\n #on my laptop it needs to be divided by 256 not sure elsewhere to be in the range[0, 1]\n img = cv2.resize(img, (img_size, img_size))\n img = img.astype(np.float32)\n img = torch.from_numpy(img)\n img = img.reshape((img.shape[0], img.shape[1], 1))\n img = img.permute(2, 0, 1).data.cpu().numpy()\n inputs.append(img)\n this_label = train_val_labels[img_id]\n #print(this_label)\n labels.append(this_label) \n\n TRAIN_AUG = torch.nn.Sequential(\n \n T.RandomResizedCrop((img_size, img_size), scale=(0.75, 1.33), ratio=(0.75, 1.3333333333333333)),\n T.Normalize(\n mean=torch.tensor([0.485]),\n std=torch.tensor([0.229])),\n )\n\n inputs = np.array(inputs)\n inputs = torch.from_numpy(inputs)\n inputs = TRAIN_AUG(inputs)\n labels = np.array(labels)\n labels = labels.reshape((labels.shape[0]))\n labels = torch.from_numpy(labels).long()\n return inputs, labels\n\n#6. Randomly sample test images \ndef sample_test_images_randomly():\n inputs = []\n labels = []\n for i in range(batch_size):\n sz = len(test_list)\n img_id = random.randint(0, sz - 1)\n img_path = test_list[img_id]\n if not osp.exists(img_path):\n \tprint('Image ', img_path, 'does not exist?')\n else:\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE | cv2.IMREAD_IGNORE_ORIENTATION)\n img = img / 256.0\n #on my laptop it needs to be divided by 256 not sure elsewhere to be in the range[0, 1]\n img = cv2.resize(img, (img_size, img_size))\n img = img.astype(np.float32)\n img = torch.from_numpy(img)\n img = img.reshape((img.shape[0], img.shape[1], 1))\n img = img.permute(2, 0, 1).data.cpu().numpy()\n inputs.append(img)\n this_label = test_labels[img_id]\n labels.append(this_label) \n\n TEST_AUG = torch.nn.Sequential(\n \n #T.RandomResizedCrop((img_size, img_size), scale=(0.75, 1.33), ratio=(0.75, 1.3333333333333333)),\n T.Normalize(\n mean=torch.tensor([0.485]),\n std=torch.tensor([0.229])),\n )\n inputs = np.array(inputs)\n inputs = torch.from_numpy(inputs)\n inputs = TEST_AUG(inputs)\n labels = np.array(labels)\n labels = labels.reshape((labels.shape[0]))\n labels = torch.from_numpy(labels).long()\n return inputs, labels\n\n\n#6. Sequentially sample test images \ndef sample_test_images_sequentially(lb, ub):\n inputs = []\n labels = []\n for i in range(batch_size):\n #sz = len(test_list)\n img_id = lb + i #random.randint(0, sz - 1)\n img_path = test_list[img_id]\n if not osp.exists(img_path):\n print('Image ', img_path, 'does not exist?')\n else:\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE | cv2.IMREAD_IGNORE_ORIENTATION)\n img = img / 256.0\n #on my laptop it needs to be divided by 256 not sure elsewhere to be in the range[0, 1]\n img = cv2.resize(img, (img_size, img_size))\n img = img.astype(np.float32)\n img = torch.from_numpy(img)\n img = img.reshape((img.shape[0], img.shape[1], 1))\n img = img.permute(2, 0, 1).data.cpu().numpy()\n inputs.append(img)\n this_label = test_labels[img_id]\n labels.append(this_label) \n\n TEST_AUG = torch.nn.Sequential(\n \n T.RandomResizedCrop((img_size, img_size), scale=(0.75, 1.33), ratio=(0.75, 1.3333333333333333)),\n T.Normalize(\n mean=torch.tensor([0.485]),\n std=torch.tensor([0.229])),\n )\n inputs = np.array(inputs)\n inputs = torch.from_numpy(inputs)\n inputs = TEST_AUG(inputs)\n labels = np.array(labels)\n labels = labels.reshape((labels.shape[0]))\n labels = torch.from_numpy(labels).long()\n return inputs, labels\n\n\n\n#6.5 label smoothing\nclass LabelSmoothingCrossEntropy(nn.Module):\n def __init__(self):\n super(LabelSmoothingCrossEntropy, self).__init__()\n def forward(self, x, target, smoothing=0.1):\n confidence = 1. - smoothing\n logprobs = F.log_softmax(x, dim=-1)\n nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))\n nll_loss = nll_loss.squeeze(1)\n smooth_loss = -logprobs.mean(dim=-1)\n loss = confidence * nll_loss + smoothing * smooth_loss\n return loss.mean()\n\n\n\ndef mixup_data(x, y, alpha=1.0, use_cuda=True):\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\ndef mixup_criterion(criterion, pred, y_a, y_b, lam):\n return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)\n\n\nclass LabelSmoothingCrossEntropy(nn.Module):\n def __init__(self):\n super(LabelSmoothingCrossEntropy, self).__init__()\n def forward(self, x, target, smoothing=0.2): \n confidence = 1. - smoothing\n logprobs = F.log_softmax(x, dim=-1)\n nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))\n nll_loss = nll_loss.squeeze(1)\n smooth_loss = -logprobs.mean(dim=-1)\n loss = confidence * nll_loss + smoothing * smooth_loss\n return loss.mean()\n##\n# version 1: use torch.autograd\nclass LabelSmoothSoftmaxCEV1(nn.Module):\n '''\n This is the autograd version, you can also try the LabelSmoothSoftmaxCEV2 that uses derived gradients\n '''\n\n def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):\n super(LabelSmoothSoftmaxCEV1, self).__init__()\n self.lb_smooth = lb_smooth\n self.reduction = reduction\n self.lb_ignore = ignore_index\n self.log_softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, logits, label):\n '''\n Same usage method as nn.CrossEntropyLoss:\n >>> criteria = LabelSmoothSoftmaxCEV1()\n >>> logits = torch.randn(8, 19, 384, 384) # nchw, float/half\n >>> lbs = torch.randint(0, 19, (8, 384, 384)) # nhw, int64_t\n >>> loss = criteria(logits, lbs)\n '''\n # overcome ignored label\n logits = logits.float() # use fp32 to avoid nan\n with torch.no_grad():\n num_classes = logits.size(1)\n label = label.clone().detach()\n ignore = label.eq(self.lb_ignore)\n n_valid = ignore.eq(0).sum()\n label[ignore] = 0\n lb_pos, lb_neg = 1. - self.lb_smooth, self.lb_smooth / num_classes\n lb_one_hot = torch.empty_like(logits).fill_(\n lb_neg).scatter_(1, label.unsqueeze(1), lb_pos).detach()\n\n logs = self.log_softmax(logits)\n loss = -torch.sum(logs * lb_one_hot, dim=1)\n loss[ignore] = 0\n if self.reduction == 'mean':\n loss = loss.sum() / n_valid\n if self.reduction == 'sum':\n loss = loss.sum()\n\n return loss\n\n\n\n# models\n# _net: 128x128\n# DCGAN_: 64X64\nnetG = _netG(1) #DCGAN_generator(1) \nnetD = _netD(1) #DCGAN_discriminator(1)\nnetC = ResNetBackbone(50, num_classes = 15) #ResNet18() #normal or pneumonia\nnetC.init_weights()\n\nnetG = DataParallelModel(netG).cuda()\nnetD = DataParallelModel(netD).cuda()\nnetC = DataParallelModel(netC).cuda()\n\n# optimizers \nblr_d = 0.0001\nblr_g = 0.0001\nblr_c = 0.0001\noptD = optim.Adam(netD.parameters(), lr=blr_d, betas=(0.5, 0.999), weight_decay = 1e-3)\noptG = optim.Adam(netG.parameters(), lr=blr_g, betas=(0.5, 0.999))\noptC = optim.Adam(netC.parameters(), lr=blr_c, betas=(0.5, 0.999), weight_decay = 1e-3)\n\n# losses \n# 1) for discriminator and generator)\nbce_loss = nn.BCELoss()\nbce_loss = DataParallelCriterion(bce_loss, device_ids=[0])\n# 2) for classifier\nif use_label_smoothing:\n criterion = LabelSmoothSoftmaxCEV1(lb_smooth=smoothing, ignore_index=255, reduction='mean')\nelse:\n criterion = nn.CrossEntropyLoss() #LabelSmoothingCrossEntropy() #\ncriterion = DataParallelCriterion(criterion, device_ids=[0])\n\nadvWeight = 0.25 # adversarial weight\n\n#5. Loading trained weights\ndef load_my_state_dict(model, state_dict):\n \n own_state = model.state_dict()\n for name, param in state_dict.items():\n if name not in own_state:\n continue\n #own_state[name] = deepcopy(param)\n own_state[name].copy_(param)\n #print(own_state)\n return own_state\n\ndef load_model(model_path):\n ckpt = torch.load(model_path) \n start_epoch = ckpt['epoch'] + 1\n #netD.load_state_dict(ckpt['netD']) \n #netG.load_state_dict(ckpt['netG']) \n netC.load_state_dict(ckpt['netC'])\n #optD.load_state_dict(ckpt['optD'])\n #optG.load_state_dict(ckpt['optG'])\n #optC.load_state_dict(ckpt['optC'])\n total_trained_samples = ckpt['total_trained_samples']\n return start_epoch, total_trained_samples\n\n\ndef distance(X, Y, sqrt=True):\n nX = X.size(0)\n nY = Y.size(0)\n \n X = X.view(nX,-1).cuda()\n X2 = (X*X).sum(1).resize(nX,1)\n Y = Y.view(nY,-1).cuda()\n Y2 = (Y*Y).sum(1).resize(nY,1)\n\n M = torch.zeros(nX, nY)\n M.copy_(X2.expand(nX,nY) + Y2.expand(nY,nX).transpose(0,1) - 2*torch.mm(X,Y.transpose(0,1)))\n\n #del X, X2, Y, Y2\n \n if sqrt:\n M = ((M+M.abs())/2).sqrt()\n\n return M\n\ndef mmd(Mxx, Mxy, Myy, sigma = 1):\n scale = Mxx.mean()\n Mxx = torch.exp(-Mxx/(scale*2*sigma*sigma))\n Mxy = torch.exp(-Mxy/(scale*2*sigma*sigma))\n Myy = torch.exp(-Myy/(scale*2*sigma*sigma))\n a = Mxx.mean()+Myy.mean()-2*Mxy.mean() \n if a.item() > 1e-6:\n \tmmd = torch.sqrt(a)\n \t#print(mmd)\n else:\n \treturn -1\n return mmd \n\n#6. Testing loop\ndef test_all(file, epoch, best_accuracy, best_epoch):\n netD.eval()\n netG.eval()\n total_test = 0\n correct_test = 0\n test_num = 100 \n # sample some test images\n with torch.no_grad():\n for steps in range(test_num):\n inputs, labels = sample_test_images_sequentially(steps * batch_size, (steps + 1) * batch_size)\n inputs = inputs.cuda()\n labels = labels.cuda()\n outputs = netC(inputs)\n \n # accuracy\n _, predicted = torch.max(outputs.data, 1)\n total_test += labels.size(0)\n correct_test += predicted.eq(labels.data).sum().item()\n test_accuracy = 100 * correct_test / total_test\n print('Epoch {:5d} test acc {:6.2f} Current Best {:6.2f} \\n'.format(epoch, test_accuracy, best_accuracy))\n file.write('Epoch {:5d} test acc {:6.2f} Current Best {:6.2f} \\n'.format(epoch, test_accuracy, best_accuracy))\n if test_accuracy > best_accuracy:\n best_accuracy = test_accuracy\n best_epoch = epoch\n return test_accuracy, best_accuracy, best_epoch \ntotal_trained_samples = 0\ntorch.manual_seed(42)\nstart_epoch = 0\nfile = open('best.txt', 'w')\nbest_acc = 0\nbest_epoch = 0\nfor epoch in range(36, 100):\n start_epoch, total_trained_samples = load_model('../models_apr28/ecgan-chest-xray14epo_' + str(epoch) + '.pth')\n netC.eval() #classifier\n test_acc, best_acc, best_epoch = test_all(file, epoch, best_acc, best_epoch)\n \nfile.close()\n \n \n \n \n \n"
] | [
[
"torch.cuda.comm.reduce_add_coalesced",
"torch.no_grad",
"torch.sqrt",
"torch.nn.Upsample",
"torch.cuda.is_available",
"torch.nn.Conv2d",
"torch.nn.ReflectionPad2d",
"torch.nn.InstanceNorm2d",
"torch.max",
"torch.is_grad_enabled",
"torch.nn.Sigmoid",
"torch.cat",
"torch.nn.ConvTranspose2d",
"torch.nn.BatchNorm2d",
"torch.set_grad_enabled",
"torch.autograd.Variable",
"torch.nn.init.normal_",
"torch.from_numpy",
"torch.nn.AvgPool2d",
"torch.utils.model_zoo.load_url",
"torch.device",
"torch.nn.MaxPool2d",
"torch.load",
"torch.nn.AdaptiveAvgPool2d",
"torch.manual_seed",
"torch.flatten",
"torch.tensor",
"torch.nn.LogSoftmax",
"torch.cuda.comm.reduce_add",
"torch.nn.parallel.parallel_apply.get_a_var",
"numpy.array",
"torch.cuda.device",
"torch.sum",
"torch.cuda.comm.broadcast_coalesced",
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.nn.init.constant_",
"torch.empty_like",
"torch.nn.CrossEntropyLoss",
"torch.exp",
"torch.nn.Tanh",
"numpy.random.beta",
"torch.nn.parallel._functions.Broadcast.apply",
"torch.nn.Sequential",
"torch.randperm",
"torch.zeros",
"torch.nn.BCELoss",
"torch.nn.ReLU",
"torch.nn.LeakyReLU"
]
] |
Yongtae723/88_face | [
"7a761cb277be2a28984161be1e7ae2b73cadf085"
] | [
"wtfml/data_loaders/pl_data_module/data_module.py"
] | [
"import pytorch_lightning as pl\nfrom torch.utils.data import DataLoader\n\n\nclass plDataModule(pl.LightningDataModule):\n def __init__(\n self,\n train_dataset,\n val_dataset,\n test_dataset=None,\n num_workers=2,\n train_sampler=None,\n train_shuffle=True,\n train_batch_size=64,\n train_drop_last=False,\n val_batch_size=16,\n val_shuffle=False,\n val_sampler=None,\n train_dataloader=None,\n val_dataloader=None,\n test_dataloader=None,\n ):\n super().__init__()\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.test_dataset = test_dataset\n\n self.num_workers = num_workers\n self.train_sampler = train_sampler\n self.train_shuffle = train_shuffle\n self.train_batch_size = train_batch_size\n self.train_drop_last = train_drop_last\n\n self.val_batch_size = val_batch_size\n self.val_shuffle = val_shuffle\n self.val_sampler = val_sampler\n\n self.created_train_dataloader = train_dataloader\n self.created_val_dataloader = val_dataloader\n self.created_test_dataloader = test_dataloader\n\n def train_dataloader(self):\n if self.created_train_dataloader:\n return self.created_train_dataloader\n return DataLoader(\n self.train_dataset,\n batch_size=self.train_batch_size,\n sampler=self.train_sampler,\n drop_last=self.train_drop_last,\n num_workers=self.num_workers,\n shuffle=self.train_shuffle if not self.train_sampler else False,\n )\n\n def val_dataloader(self):\n if self.created_val_dataloader:\n return self.created_val_dataloader\n return DataLoader(\n self.val_dataset,\n batch_size=self.val_batch_size,\n sampler=self.val_sampler,\n drop_last=False,\n num_workers=self.num_workers,\n shuffle=self.val_shuffle if not self.val_sampler else False,\n )\n\n def test_dataloader(self):\n if self.created_test_dataloader:\n return self.created_test_dataloader\n if self.test_dataset:\n return DataLoader(\n self.test_dataset,\n batch_size=self.val_batch_size,\n sampler=self.val_sampler,\n drop_last=False,\n num_workers=self.num_workers,\n shuffle=self.val_shuffle if not self.val_sampler else False,\n )\n"
] | [
[
"torch.utils.data.DataLoader"
]
] |
aimalz/justice | [
"2edcb471cd01d6659a498bcd0209cb5dae83375a"
] | [
"justice/summarize.py"
] | [
"\"\"\"Tools for summarizing lightcurve data into statistics\"\"\"\n\nimport numpy as np\nimport scipy.optimize as spo\nfrom tensorflow.contrib.framework import nest\n\nfrom justice import lightcurve\nfrom justice import xform\n\n\ndef opt_alignment(\n lca: lightcurve._LC,\n lcb: lightcurve._LC,\n ivals=None,\n constraints=None,\n method='Nelder-Mead',\n options=None,\n vb=True,\n) -> xform.LCXform:\n \"\"\"\n Minimizes the arclength between two lightcurves after merging\n\n :param lca: First lightcurve.\n :param lcb: Lightcurve to try merging in\n :param ivals: initial values to try\n :param constraints: Not sure how these work, feel free to give it a try though!\n :param method: Only Nelder_Mead is tested as of now\n :param options: Only maxiter is included right now\n :param vb: Boolean verbose\n :return: best xform\n \"\"\"\n if constraints is None:\n constraints = []\n if options is None:\n options = {'maxiter': 10000}\n if ivals is None:\n ivals = np.array([0, 0, 1, 1])\n\n if method != 'Nelder-Mead':\n\n def pos_dil(xf: xform.LinearBandDataXform):\n return min(xf._dilate_time, xf._dilate_flux)\n\n constraints += [{'type': 'ineq', 'fun': pos_dil}]\n else:\n constraints = None\n\n # don't know if this way of handling constraints actually works -- untested!\n def _helper(vals):\n bd_xform = xform.LinearBandDataXform(*vals)\n lca_xform = xform.SameLCXform(bd_xform)\n lc = lca_xform.apply(lcb)\n new_lc = lca + lc\n length = new_lc.connect_the_dots()\n return length\n\n # could make this a probability by taking chi^2 error relative to\n # connect_the_dots original, but it didn't work better in the sandbox\n # notebook\n res = spo.minimize(\n _helper, ivals, constraints=constraints, method=method, options=options\n )\n if vb:\n print(res)\n res_xform = xform.SameLCXform(xform.LinearBandDataXform(*res.x))\n return res_xform\n"
] | [
[
"numpy.array",
"scipy.optimize.minimize"
]
] |
nilkeshpatra/kmodes | [
"f4b5582e7bb872b15ec4e2c135fd40bd42642e83"
] | [
"kmodes/kprototypes.py"
] | [
"\"\"\"\nK-prototypes clustering for mixed categorical and numerical data\n\"\"\"\n\n# pylint: disable=super-on-old-class,unused-argument,attribute-defined-outside-init\n\nfrom collections import defaultdict\n\nimport numpy as np\nfrom scipy import sparse\nfrom sklearn.externals.joblib import Parallel, delayed\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils.validation import check_array\n\nfrom . import kmodes\nfrom .util import get_max_value_key, encode_features, get_unique_rows, decode_centroids\nfrom .util.dissim import matching_dissim, euclidean_dissim\n\n# Number of tries we give the initialization methods to find non-empty\n# clusters before we switch to random initialization.\nMAX_INIT_TRIES = 20\n# Number of tries we give the initialization before we raise an\n# initialization error.\nRAISE_INIT_TRIES = 100\n\n\ndef move_point_num(point, to_clust, from_clust, cl_attr_sum, cl_memb_sum):\n \"\"\"Move point between clusters, numerical attributes.\"\"\"\n # Update sum of attributes in cluster.\n for iattr, curattr in enumerate(point):\n cl_attr_sum[to_clust][iattr] += curattr\n cl_attr_sum[from_clust][iattr] -= curattr\n # Update sums of memberships in cluster\n cl_memb_sum[to_clust] += 1\n cl_memb_sum[from_clust] -= 1\n return cl_attr_sum, cl_memb_sum\n\n\ndef _split_num_cat(X, categorical):\n \"\"\"Extract numerical and categorical columns.\n Convert to numpy arrays, if needed.\n\n :param X: Feature matrix\n :param categorical: Indices of categorical columns\n \"\"\"\n Xnum = np.asanyarray(X[:, [ii for ii in range(X.shape[1])\n if ii not in categorical]]).astype(np.float64)\n Xcat = np.asanyarray(X[:, categorical])\n return Xnum, Xcat\n\n\ndef _labels_cost(Xnum, Xcat, centroids, num_dissim, cat_dissim, gamma, membship=None):\n \"\"\"Calculate labels and cost function given a matrix of points and\n a list of centroids for the k-prototypes algorithm.\n \"\"\"\n\n n_points = Xnum.shape[0]\n Xnum = check_array(Xnum)\n\n cost = 0.\n labels = np.empty(n_points, dtype=np.uint8)\n for ipoint in range(n_points):\n # Numerical cost = sum of Euclidean distances\n num_costs = num_dissim(centroids[0], Xnum[ipoint])\n cat_costs = cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship)\n # Gamma relates the categorical cost to the numerical cost.\n tot_costs = num_costs + gamma * cat_costs\n clust = np.argmin(tot_costs)\n labels[ipoint] = clust\n cost += tot_costs[clust]\n\n return labels, cost\n\n\ndef _k_prototypes_iter(Xnum, Xcat, centroids, cl_attr_sum, cl_memb_sum, cl_attr_freq,\n membship, num_dissim, cat_dissim, gamma, random_state):\n \"\"\"Single iteration of the k-prototypes algorithm\"\"\"\n moves = 0\n for ipoint in range(Xnum.shape[0]):\n clust = np.argmin(\n num_dissim(centroids[0], Xnum[ipoint]) +\n gamma * cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship)\n )\n if membship[clust, ipoint]:\n # Point is already in its right place.\n continue\n\n # Move point, and update old/new cluster frequencies and centroids.\n moves += 1\n old_clust = np.argwhere(membship[:, ipoint])[0][0]\n\n # Note that membship gets updated by kmodes.move_point_cat.\n # move_point_num only updates things specific to the k-means part.\n cl_attr_sum, cl_memb_sum = move_point_num(\n Xnum[ipoint], clust, old_clust, cl_attr_sum, cl_memb_sum\n )\n cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat(\n Xcat[ipoint], ipoint, clust, old_clust,\n cl_attr_freq, membship, centroids[1]\n )\n\n # Update old and new centroids for numerical attributes using\n # the means and sums of all values\n for iattr in range(len(Xnum[ipoint])):\n for curc in (clust, old_clust):\n if cl_memb_sum[curc]:\n centroids[0][curc, iattr] = cl_attr_sum[curc, iattr] / cl_memb_sum[curc]\n else:\n centroids[0][curc, iattr] = 0.\n\n # In case of an empty cluster, reinitialize with a random point\n # from largest cluster.\n if not cl_memb_sum[old_clust]:\n from_clust = membship.sum(axis=1).argmax()\n choices = [ii for ii, ch in enumerate(membship[from_clust, :]) if ch]\n rindx = random_state.choice(choices)\n\n cl_attr_sum, cl_memb_sum = move_point_num(\n Xnum[rindx], old_clust, from_clust, cl_attr_sum, cl_memb_sum\n )\n cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat(\n Xcat[rindx], rindx, old_clust, from_clust,\n cl_attr_freq, membship, centroids[1]\n )\n\n return centroids, moves\n\n\ndef k_prototypes_single(Xnum, Xcat, nnumattrs, ncatattrs, n_clusters, n_points,\n max_iter, num_dissim, cat_dissim, gamma, init, init_no,\n verbose, random_state):\n # For numerical part of initialization, we don't have a guarantee\n # that there is not an empty cluster, so we need to retry until\n # there is none.\n random_state = check_random_state(random_state)\n init_tries = 0\n while True:\n init_tries += 1\n # _____ INIT _____\n if verbose:\n print(\"Init: initializing centroids\")\n if isinstance(init, str) and init.lower() == 'huang':\n centroids = kmodes.init_huang(Xcat, n_clusters, cat_dissim, random_state)\n elif isinstance(init, str) and init.lower() == 'cao':\n centroids = kmodes.init_cao(Xcat, n_clusters, cat_dissim)\n elif isinstance(init, str) and init.lower() == 'random':\n seeds = random_state.choice(range(n_points), n_clusters)\n centroids = Xcat[seeds]\n elif isinstance(init, list):\n # Make sure inits are 2D arrays.\n init = [np.atleast_2d(cur_init).T if len(cur_init.shape) == 1\n else cur_init\n for cur_init in init]\n assert init[0].shape[0] == n_clusters, \\\n \"Wrong number of initial numerical centroids in init \" \\\n \"({}, should be {}).\".format(init[0].shape[0], n_clusters)\n assert init[0].shape[1] == nnumattrs, \\\n \"Wrong number of numerical attributes in init ({}, should be {}).\" \\\n .format(init[0].shape[1], nnumattrs)\n assert init[1].shape[0] == n_clusters, \\\n \"Wrong number of initial categorical centroids in init ({}, \" \\\n \"should be {}).\".format(init[1].shape[0], n_clusters)\n assert init[1].shape[1] == ncatattrs, \\\n \"Wrong number of categorical attributes in init ({}, should be {}).\" \\\n .format(init[1].shape[1], ncatattrs)\n centroids = [np.asarray(init[0], dtype=np.float64),\n np.asarray(init[1], dtype=np.uint8)]\n else:\n raise NotImplementedError(\"Initialization method not supported.\")\n\n if not isinstance(init, list):\n # Numerical is initialized by drawing from normal distribution,\n # categorical following the k-modes methods.\n meanx = np.mean(Xnum, axis=0)\n stdx = np.std(Xnum, axis=0)\n centroids = [\n meanx + random_state.randn(n_clusters, nnumattrs) * stdx,\n centroids\n ]\n\n if verbose:\n print(\"Init: initializing clusters\")\n membship = np.zeros((n_clusters, n_points), dtype=np.uint8)\n # Keep track of the sum of attribute values per cluster so that we\n # can do k-means on the numerical attributes.\n cl_attr_sum = np.zeros((n_clusters, nnumattrs), dtype=np.float64)\n # Same for the membership sum per cluster\n cl_memb_sum = np.zeros(n_clusters, dtype=int)\n # cl_attr_freq is a list of lists with dictionaries that contain\n # the frequencies of values per cluster and attribute.\n cl_attr_freq = [[defaultdict(int) for _ in range(ncatattrs)]\n for _ in range(n_clusters)]\n for ipoint in range(n_points):\n # Initial assignment to clusters\n clust = np.argmin(\n num_dissim(centroids[0], Xnum[ipoint]) + gamma *\n cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship)\n )\n membship[clust, ipoint] = 1\n cl_memb_sum[clust] += 1\n # Count attribute values per cluster.\n for iattr, curattr in enumerate(Xnum[ipoint]):\n cl_attr_sum[clust, iattr] += curattr\n for iattr, curattr in enumerate(Xcat[ipoint]):\n cl_attr_freq[clust][iattr][curattr] += 1\n\n # If no empty clusters, then consider initialization finalized.\n if membship.sum(axis=1).min() > 0:\n break\n\n if init_tries == MAX_INIT_TRIES:\n # Could not get rid of empty clusters. Randomly\n # initialize instead.\n init = 'random'\n elif init_tries == RAISE_INIT_TRIES:\n raise ValueError(\n \"Clustering algorithm could not initialize. \"\n \"Consider assigning the initial clusters manually.\"\n )\n\n # Perform an initial centroid update.\n for ik in range(n_clusters):\n for iattr in range(nnumattrs):\n centroids[0][ik, iattr] = cl_attr_sum[ik, iattr] / cl_memb_sum[ik]\n for iattr in range(ncatattrs):\n centroids[1][ik, iattr] = get_max_value_key(cl_attr_freq[ik][iattr])\n\n # _____ ITERATION _____\n if verbose:\n print(\"Starting iterations...\")\n itr = 0\n labels = None\n converged = False\n cost = np.Inf\n while itr <= max_iter and not converged:\n itr += 1\n centroids, moves = _k_prototypes_iter(Xnum, Xcat, centroids,\n cl_attr_sum, cl_memb_sum, cl_attr_freq,\n membship, num_dissim, cat_dissim, gamma,\n random_state)\n\n # All points seen in this iteration\n labels, ncost = _labels_cost(Xnum, Xcat, centroids,\n num_dissim, cat_dissim, gamma, membship)\n converged = (moves == 0) or (ncost >= cost)\n cost = ncost\n if verbose:\n print(\"Run: {}, iteration: {}/{}, moves: {}, ncost: {}\"\n .format(init_no + 1, itr, max_iter, moves, ncost))\n\n return centroids, labels, cost, itr\n\n\ndef k_prototypes(X, categorical, n_clusters, max_iter, num_dissim, cat_dissim,\n gamma, init, n_init, verbose, random_state, n_jobs):\n \"\"\"k-prototypes algorithm\"\"\"\n random_state = check_random_state(random_state)\n if sparse.issparse(X):\n raise TypeError(\"k-prototypes does not support sparse data.\")\n\n # Convert pandas objects to numpy arrays.\n if 'pandas' in str(X.__class__):\n X = X.values\n\n if categorical is None or not categorical:\n raise NotImplementedError(\n \"No categorical data selected, effectively doing k-means. \"\n \"Present a list of categorical columns, or use scikit-learn's \"\n \"KMeans instead.\"\n )\n if isinstance(categorical, int):\n categorical = [categorical]\n assert len(categorical) != X.shape[1], \\\n \"All columns are categorical, use k-modes instead of k-prototypes.\"\n assert max(categorical) < X.shape[1], \\\n \"Categorical index larger than number of columns.\"\n\n ncatattrs = len(categorical)\n nnumattrs = X.shape[1] - ncatattrs\n n_points = X.shape[0]\n assert n_clusters <= n_points, \"Cannot have more clusters ({}) \" \\\n \"than data points ({}).\".format(n_clusters, n_points)\n\n Xnum, Xcat = _split_num_cat(X, categorical)\n Xnum, Xcat = check_array(Xnum), check_array(Xcat, dtype=None)\n\n # Convert the categorical values in Xcat to integers for speed.\n # Based on the unique values in Xcat, we can make a mapping to achieve this.\n Xcat, enc_map = encode_features(Xcat)\n\n # Are there more n_clusters than unique rows? Then set the unique\n # rows as initial values and skip iteration.\n unique = get_unique_rows(X)\n n_unique = unique.shape[0]\n if n_unique <= n_clusters:\n max_iter = 0\n n_init = 1\n n_clusters = n_unique\n init = list(_split_num_cat(unique, categorical))\n init[1], _ = encode_features(init[1], enc_map)\n\n # Estimate a good value for gamma, which determines the weighing of\n # categorical values in clusters (see Huang [1997]).\n if gamma is None:\n gamma = 0.5 * Xnum.std()\n\n results = []\n seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)\n if n_jobs == 1:\n for init_no in range(n_init):\n results.append(k_prototypes_single(Xnum, Xcat, nnumattrs, ncatattrs,\n n_clusters, n_points, max_iter,\n num_dissim, cat_dissim, gamma,\n init, init_no, verbose, seeds[init_no]))\n else:\n results = Parallel(n_jobs=n_jobs, verbose=0)(\n delayed(k_prototypes_single)(Xnum, Xcat, nnumattrs, ncatattrs,\n n_clusters, n_points, max_iter,\n num_dissim, cat_dissim, gamma,\n init, init_no, verbose, seed)\n for init_no, seed in enumerate(seeds))\n all_centroids, all_labels, all_costs, all_n_iters = zip(*results)\n\n best = np.argmin(all_costs)\n if n_init > 1 and verbose:\n print(\"Best run was number {}\".format(best + 1))\n\n # Note: return gamma in case it was automatically determined.\n return all_centroids[best], enc_map, all_labels[best], \\\n all_costs[best], all_n_iters[best], gamma\n\n\nclass KPrototypes(kmodes.KModes):\n \"\"\"k-protoypes clustering algorithm for mixed numerical/categorical data.\n\n Parameters\n -----------\n n_clusters : int, optional, default: 8\n The number of clusters to form as well as the number of\n centroids to generate.\n\n max_iter : int, default: 300\n Maximum number of iterations of the k-modes algorithm for a\n single run.\n\n num_dissim : func, default: euclidian_dissim\n Dissimilarity function used by the algorithm for numerical variables.\n Defaults to the Euclidian dissimilarity function.\n\n cat_dissim : func, default: matching_dissim\n Dissimilarity function used by the kmodes algorithm for categorical variables.\n Defaults to the matching dissimilarity function.\n\n n_init : int, default: 10\n Number of time the k-modes algorithm will be run with different\n centroid seeds. The final results will be the best output of\n n_init consecutive runs in terms of cost.\n\n init : {'Huang', 'Cao', 'random' or a list of ndarrays}, default: 'Cao'\n Method for initialization:\n 'Huang': Method in Huang [1997, 1998]\n 'Cao': Method in Cao et al. [2009]\n 'random': choose 'n_clusters' observations (rows) at random from\n data for the initial centroids.\n If a list of ndarrays is passed, it should be of length 2, with\n shapes (n_clusters, n_features) for numerical and categorical\n data respectively. These are the initial centroids.\n\n gamma : float, default: None\n Weighing factor that determines relative importance of numerical vs.\n categorical attributes (see discussion in Huang [1997]). By default,\n automatically calculated from data.\n\n verbose : integer, optional\n Verbosity mode.\n\n random_state : int, RandomState instance or None, optional, default: None\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n n_jobs : int, default: 1\n The number of jobs to use for the computation. This works by computing\n each of the n_init runs in parallel.\n If -1 all CPUs are used. If 1 is given, no parallel computing code is\n used at all, which is useful for debugging. For n_jobs below -1,\n (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one\n are used.\n\n Attributes\n ----------\n cluster_centroids_ : array, [n_clusters, n_features]\n Categories of cluster centroids\n\n labels_ :\n Labels of each point\n\n cost_ : float\n Clustering cost, defined as the sum distance of all points to\n their respective cluster centroids.\n\n n_iter_ : int\n The number of iterations the algorithm ran for.\n\n gamma : float\n The (potentially calculated) weighing factor.\n\n Notes\n -----\n See:\n Huang, Z.: Extensions to the k-modes algorithm for clustering large\n data sets with categorical values, Data Mining and Knowledge\n Discovery 2(3), 1998.\n\n \"\"\"\n\n def __init__(self, n_clusters=8, max_iter=100, num_dissim=euclidean_dissim,\n cat_dissim=matching_dissim, init='Huang', n_init=10, gamma=None,\n verbose=0, random_state=None, n_jobs=1):\n\n super(KPrototypes, self).__init__(n_clusters, max_iter, cat_dissim,\n init, n_init, verbose, random_state,\n n_jobs)\n\n self.num_dissim = num_dissim\n self.gamma = gamma\n\n def fit(self, X, y=None, categorical=None):\n \"\"\"Compute k-prototypes clustering.\n\n Parameters\n ----------\n X : array-like, shape=[n_samples, n_features]\n categorical : Index of columns that contain categorical data\n \"\"\"\n\n random_state = check_random_state(self.random_state)\n # If self.gamma is None, gamma will be automatically determined from\n # the data. The function below returns its value.\n self._enc_cluster_centroids, self._enc_map, self.labels_, self.cost_,\\\n self.n_iter_, self.gamma = k_prototypes(X,\n categorical,\n self.n_clusters,\n self.max_iter,\n self.num_dissim,\n self.cat_dissim,\n self.gamma,\n self.init,\n self.n_init,\n self.verbose,\n random_state,\n self.n_jobs)\n return self\n\n def predict(self, X, categorical=None):\n \"\"\"Predict the closest cluster each sample in X belongs to.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n New data to predict.\n categorical : Index of columns that contain categorical data\n\n Returns\n -------\n labels : array, shape [n_samples,]\n Index of the cluster each sample belongs to.\n \"\"\"\n assert hasattr(self, '_enc_cluster_centroids'), \"Model not yet fitted.\"\n\n Xnum, Xcat = _split_num_cat(X, categorical)\n Xnum, Xcat = check_array(Xnum), check_array(Xcat, dtype=None)\n Xcat, _ = encode_features(Xcat, enc_map=self._enc_map)\n return _labels_cost(Xnum, Xcat, self._enc_cluster_centroids,\n self.num_dissim, self.cat_dissim, self.gamma)[0]\n\n @property\n def cluster_centroids_(self):\n if hasattr(self, '_enc_cluster_centroids'):\n return [\n self._enc_cluster_centroids[0],\n decode_centroids(self._enc_cluster_centroids[1], self._enc_map)\n ]\n else:\n raise AttributeError(\"'{}' object has no attribute 'cluster_centroids_' \"\n \"because the model is not yet fitted.\")\n"
] | [
[
"sklearn.utils.check_random_state",
"numpy.atleast_2d",
"numpy.empty",
"numpy.zeros",
"numpy.argwhere",
"scipy.sparse.issparse",
"numpy.argmin",
"numpy.std",
"numpy.asarray",
"numpy.asanyarray",
"sklearn.externals.joblib.delayed",
"numpy.iinfo",
"sklearn.utils.validation.check_array",
"sklearn.externals.joblib.Parallel",
"numpy.mean"
]
] |
anuragphadnis/im2latex2 | [
"3e5bcb400d7bdff9cfd8ed03b821b3b6cb809b9b"
] | [
"model/utils/text.py"
] | [
"import numpy as np\nfrom collections import Counter\n\n\nclass Vocab(object):\n\n def __init__(self, config):\n self.config = config\n self.load_vocab()\n\n\n def load_vocab(self):\n special_tokens = [self.config.unk, self.config.pad, self.config.end]\n self.tok_to_id = load_tok_to_id(self.config.path_vocab, special_tokens)\n self.id_to_tok = {idx: tok for tok, idx in self.tok_to_id.items()}\n self.n_tok = len(self.tok_to_id)\n\n self.id_pad = self.tok_to_id[self.config.pad]\n self.id_end = self.tok_to_id[self.config.end]\n self.id_unk = self.tok_to_id[self.config.unk]\n\n\n @property\n def form_prepro(self):\n return get_form_prepro(self.tok_to_id, self.id_unk)\n\n\ndef get_form_prepro(vocab, id_unk):\n \"\"\"Given a vocab, returns a lambda function word -> id\n\n Args:\n vocab: dict[token] = id\n\n Returns:\n lambda function(formula) -> list of ids\n\n \"\"\"\n def get_token_id(token):\n return vocab[token] if token in vocab else id_unk\n\n def f(formula):\n formula = formula.strip().split(' ')\n return [get_token_id(t) for t in formula]\n\n return f\n\n\ndef load_tok_to_id(filename, tokens=[]):\n \"\"\"\n Args:\n filename: (string) path to vocab txt file one word per line\n tokens: list of token to add to vocab after reading filename\n\n Returns:\n dict: d[token] = id\n\n \"\"\"\n tok_to_id = dict()\n with open(filename) as f:\n for idx, token in enumerate(f):\n token = token.strip()\n tok_to_id[token] = idx\n\n # add extra tokens\n for tok in tokens:\n tok_to_id[tok] = len(tok_to_id)\n\n return tok_to_id\n\n\ndef build_vocab(datasets, min_count=10):\n \"\"\"Build vocabulary from an iterable of datasets objects\n\n Args:\n datasets: a list of dataset objects\n min_count: (int) if token appears less times, do not include it.\n\n Returns:\n a set of all the words in the dataset\n\n \"\"\"\n print(\"Building vocab...\")\n c = Counter()\n for dataset in datasets:\n for _, formula in dataset:\n try:\n c.update(formula)\n except Exception:\n print(formula)\n raise Exception\n vocab = [tok for tok, count in list(c.items()) if count >= min_count]\n print((\"- done. {}/{} tokens added to vocab.\".format(len(vocab), len(c))))\n return sorted(vocab)\n\n\ndef write_vocab(vocab, filename):\n \"\"\"Writes a vocab to a file\n\n Writes one word per line.\n\n Args:\n vocab: iterable that yields word\n filename: path to vocab file\n\n Returns:\n write a word per line\n\n \"\"\"\n print(\"Writing vocab...\")\n with open(filename, \"w\") as f:\n for i, word in enumerate(vocab):\n if i != len(vocab) - 1:\n f.write(\"{}\\n\".format(word))\n else:\n f.write(word)\n print((\"- done. {} tokens\".format(i+1)))\n\n\ndef pad_batch_formulas(formulas, id_pad, id_end, max_len=None):\n \"\"\"Pad formulas to the max length with id_pad and adds and id_end token\n at the end of each formula\n\n Args:\n formulas: (list) of list of ints\n max_length: length maximal of formulas\n\n Returns:\n array: of shape = (batch_size, max_len) of type np.int32\n array: of shape = (batch_size) of type np.int32\n\n \"\"\"\n if max_len is None:\n max_len = max([len(x) for x in formulas])\n\n batch_formulas = id_pad * np.ones([len(formulas), max_len+1],\n dtype=np.int32)\n formula_length = np.zeros(len(formulas), dtype=np.int32)\n for idx, formula in enumerate(formulas):\n batch_formulas[idx, :len(formula)] = np.asarray(formula,\n dtype=np.int32)\n batch_formulas[idx, len(formula)] = id_end\n formula_length[idx] = len(formula) + 1\n\n return batch_formulas, formula_length\n\n\ndef load_formulas(filename):\n formulas = dict()\n with open(filename) as f:\n for idx, line in enumerate(f):\n formulas[idx] = line.strip()\n\n print((\"Loaded {} formulas from {}\".format(len(formulas), filename)))\n return formulas\n"
] | [
[
"numpy.asarray"
]
] |
Devanthro/ball_in_socket_estimator | [
"5793db2dfd22b693c082694c2130a16c92164d70"
] | [
"python_old/magnetic_field_simulation.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom magpylib.source.magnet import Box,Cylinder\nfrom magpylib import Collection, displaySystem, Sensor\nfrom scipy.optimize import fsolve, least_squares\nimport matplotlib.animation as manimation\nimport random\nimport MDAnalysis\nimport MDAnalysis.visualization.streamlines_3D\nimport mayavi, mayavi.mlab\n\n\niterations = 360\n\nFFMpegWriter = manimation.writers['ffmpeg']\nmetadata = dict(title='Movie Test', artist='Matplotlib',\n comment='Movie support!')\nwriter = FFMpegWriter(fps=1, metadata=metadata)\n\n# define sensor\nsensor_pos = [(-22.7,7.7,0),(-14.7,-19.4,0),(14.7,-19.4,0),(22.7,7.7,0)]\n# sensor_rot = [[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]],[0,[0,0,1]]]\nsensors = []\ni = 0\nfor pos in sensor_pos:\n # sensors.append(Sensor(pos=pos,angle=sensor_rot[i][0], axis=sensor_rot[i][1]))\n sensors.append(Sensor(pos=pos))\n\ndef gen_magnets():\n return [Box(mag=(0,0,500),dim=(10,10,10),pos=(0,0,12)), Box(mag=(0,500,0),dim=(10,10,10),pos=(0,12,0)), Box(mag=(0,500,0),dim=(10,10,10),pos=(10.392304845,-6,0),angle=60, axis=(0,0,1)), Box(mag=(0,500,0),dim=(10,10,10),pos=(-10.392304845,-6,0),angle=-60, axis=(0,0,1))]\n # return [Box(mag=(0,500,0),dim=(10,10,10),pos=(0,12,0)), Box(mag=(0,-500,0),dim=(10,10,10),pos=(10.392304845,-6,0),angle=60, axis=(0,0,1)), Box(mag=(0,0,500),dim=(10,10,10),pos=(-10.392304845,-6,0),angle=-60, axis=(0,0,1))]\n\nc = Collection(gen_magnets())\n\n# calculate B-field on a grid\nxs = np.linspace(-30,30,33)\nzs = np.linspace(-30,30,44)\nPOS = np.array([(x,0,z) for z in zs for x in xs])\n\n# create figure\nfig = plt.figure(figsize=(9,5))\nax1 = fig.add_subplot(121, projection='3d') # 3D-axis\nax2 = fig.add_subplot(122) # 2D-axis\n\nBs = c.getB(POS).reshape(44,33,3) #<--VECTORIZED\nX,Z = np.meshgrid(xs,zs)\nU,V = Bs[:,:,0], Bs[:,:,2]\nax2.streamplot(X, Z, U, V, color=np.log(U**2+V**2))\ndisplaySystem(c, subplotAx=ax1, suppress=True, sensors=sensors,direc=True)\nplt.show()\n\nfirst = True\n\nwith writer.saving(fig, \"writer_test.mp4\", 100):\n for iter in range(0,iterations,5):\n rot = [iter,0,0]#random.uniform(-90,90),random.uniform(-90,90)\n\n c = Collection(gen_magnets())\n c.rotate(rot[0],(1,0,0), anchor=(0,0,0))\n c.rotate(rot[1],(0,1,0), anchor=(0,0,0))\n c.rotate(rot[2],(0,0,1), anchor=(0,0,0))\n b_target = []\n for sens in sensors:\n b_target.append(sens.getB(c))\n # print(b_target)\n\n fig.clear()\n ax1 = fig.add_subplot(121, projection='3d') # 3D-axis\n ax2 = fig.add_subplot(122) # 2D-axis\n\n Bs = c.getB(POS).reshape(44,33,3) #<--VECTORIZED\n X,Z = np.meshgrid(xs,zs)\n U,V = Bs[:,:,0], Bs[:,:,2]\n ax2.streamplot(X, Z, U, V, color=np.log(U**2+V**2))\n\n def func(x):\n c = Collection(gen_magnets())\n c.rotate(x[0],(1,0,0), anchor=(0,0,0))\n c.rotate(x[1],(0,1,0), anchor=(0,0,0))\n c.rotate(x[2],(0,0,1), anchor=(0,0,0))\n b_error = 0\n i = 0\n for sens in sensors:\n b_error = b_error + np.linalg.norm(sens.getB(c)-b_target[i])\n i=i+1\n # print(b_error)\n return [b_error,b_error,b_error]\n\n res = least_squares(func, [0,0,0], bounds = ((-360,-360,-360), (360, 360, 360)))\n angle_error = ((rot[0]-res.x[0])**2+(rot[1]-res.x[1])**2+(rot[2]-res.x[2])**2)**0.5\n print(\"iteration (%d/%d) target %.3f %.3f %.3f result %.3f %.3f %.3f b-field error %.3f, angle_error %.3f\"%(iter,iterations,rot[0],rot[1],rot[2],res.x[0],res.x[1],res.x[2],res.cost,angle_error))\n c = Collection(gen_magnets())\n c.rotate(rot[0],(1,0,0), anchor=(0,0,0))\n c.rotate(rot[1],(0,1,0), anchor=(0,0,0))\n c.rotate(rot[2],(0,0,1), anchor=(0,0,0))\n result = Collection(gen_magnets())\n result.rotate(res.x[0],(1,0,0), anchor=(0,0,0))\n result.rotate(res.x[1],(0,1,0), anchor=(0,0,0))\n result.rotate(res.x[2],(0,0,1), anchor=(0,0,0))\n d = Collection(c,result)\n displaySystem(d, subplotAx=ax1, suppress=True, sensors=sensors)\n if first:\n plt.show()\n first = False\n writer.grab_frame()\n"
] | [
[
"scipy.optimize.least_squares",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"numpy.log",
"numpy.array",
"numpy.meshgrid",
"numpy.linspace"
]
] |
niksell/phenotypes-prediction-using-genotypes-Master-Thesis | [
"c20b6ef89d0979d15266ad572c5aed56e28c4229"
] | [
"code/IO/Output.py"
] | [
"import os.path\nimport time\nimport numpy as np\nfrom DataStructure.PatientPhenotype import PatientPhenotype\nfrom DataStructure.Snp import Snp\n\nclass Output:\n \n def __init__(self,path,numberOfChromosomes):\n \n self.__path = path\n self.__numberOfChromosomes = numberOfChromosomes\n \n def writePatientsList(self,patients,kind):\n \n path = self.__path + kind\n \n try:\n write = open(path,'w')\n for patient in patients.keys():\n write.write(patient.strip() + '\\n')\n \n write.close()\n except Exception as x:\n print(\"error = \",x)\n write.close()\n \n \n def writeSnpsList(self,chromosomes):\n \n for i in range(self.__numberOfChromosomes):\n \n chro = 'chr'+str(i+1)\n try:\n path = self.__path + chro + 'snpList.txt'\n write = open(path,'w')\n\n for snp in chromosomes[chro].keys():\n write.write(snp.strip() + '\\n')\n\n write.close()\n except Exception as x:\n print(\"error = \",x)\n write.close()\n \n def writeSnpsUsed(self,snpsIds,idToName,chromosomes,name = None):\n \n if not name:\n print(\"give a name to file\")\n return\n \n path = self.__path + name + \" ( \" + time.strftime(\"%d-%m-%Y\") + \" ).txt \" \n \n i=1\n while os.path.exists(path):\n \n path = self.__path + name + \" ( \" + time.strftime(\"%d-%m-%Y\") + \" ) \" + '_' + str(i)+\".txt\"\n i += 1\n \n snps = []\n for i in snpsIds:\n snps.append(idToName[i])\n \n print(\"snpsIds = \",len(snpsIds))\n print(\"idToName = \",len(idToName))\n \n write = open(path,'w')\n try:\n for i in range(1,23):\n \n chro = 'chr'+str(i)\n chromList = chromosomes[chro]\n\n if len(list(set(chromList) - set(snps))) < len(chromList):\n write.write(\"chromosome\"+str(i)+'\\n')\n for j in snps:\n if j in chromosomes[chro]:\n write.write(j + '\\t' + chromosomes[chro][j][0] + '\\t' + chromosomes[chro][j][1] + '\\n')\n write.write('\\n')\n\n write.close()\n except Exception as x:\n print(\"error = \",x)\n write.close()\n \n def saveData(self,ids,patients,data,chroms = {}):\n \n self.__snpCodeLog(ids['patients']['idToName'],ids['snps']['idToName'],patients,data)\n \n def writeDf(self,n,m,chromosomes,ids,patients):\n \n X = np.zeros((n,m),dtype = int)\n \n for i in range(self.__numberOfChromosomes):\n \n chro = 'chr'+str(i+1)\n path = self.__path + chro +'.lgen'\n \n \n \n if os.path.exists(path):\n \n try:\n f = open(path,'r')\n \n for line in f:\n try:\n \n patient = line.split()[0].strip()\n snp = line.split()[2].strip()\n allele1 = line.split()[3].strip()\n allele2 = line.split()[4].strip()\n \n snpp = Snp(snp,allele1,allele2)\n snpp.setSnpCode(chromosomes[chro][snp][0],chromosomes[chro][snp][1])\n code = snpp.getSnpCode()\n \n p = ids['patients']['nameToId'][patient]\n s = ids['snps']['nameToId'][snp]\n \n X[p,s] = code\n \n except Exception as x:\n \n print(\"error1 = \",x)\n f.close()\n \n f.close()\n \n except Exception as x:\n print(\"error2 = \",x)\n f.close()\n \n print(\"x shape is \", X.shape)\n write = open(self.__path + 'snpCodeTest1.csv','w')\n \n write.write('patients,')\n \n for i in range(len(X.T)):\n \n s = ids['snps']['idToName'][i]\n write.write(s + ',')\n \n write.write('label' + '\\n')\n \n for i in range(len(X)):\n \n p = ids['patients']['idToName'][i]\n write.write(p + ',')\n \n for j in range(len(X.T)):\n \n s = ids['snps']['idToName'][j]\n write.write(str(X[i,j]) + ',')\n \n write.write(str(patients[p].getCase()) + '\\n')\n \n \n write.close()\n \n \n \n def __patientsLogFile(self,ids,patientKind):\n \n write = open(self.__path + patientKind + 'Ids.txt','w')\n \n write.write(str(len(ids['nameToId'])) + '\\n')\n \n for patient in ids['nameToId'].keys():\n \n write.write(patient.strip() + '\\t' + str(ids['nameToId'][patient]).strip() + '\\n')\n \n write.close()\n \n def __snpsLogFile(self,ids,chroms):\n \n if len(chroms.keys()) > 0:\n \n write = open(self.__path + 'SnpsIds.txt','w')\n \n write.write(str(len(ids['nameToId'])) + '\\n')\n \n for chro in chroms.keys():\n \n for snp in chroms[chro].keys():\n write.write(snp.strip() + '\\t' + str(ids['nameToId'][snp.strip()]).strip() + '\\n')\n \n write.close()\n \n def __snpCodeLog(self,patientsIds,snpsIds,patients,data):\n \n write = open(self.__path + 'snpCode.txt','w')\n \n write.write(str(len(patientsIds)) + '\\n')\n write.write(str(len(snpsIds)) + '\\n')\n \n for i in range(len(data)):\n for j in range(len(data.T)):\n allele1 = patients[patientsIds[i]].getAllele1(snpsIds[j])\n allele2 = patients[patientsIds[i]].getAllele2(snpsIds[j])\n write.write(patientsIds[i].strip() + '\\t' + snpsIds[j].strip() + '\\t' + str(data[i,j]).strip() + '\\t' \n + allele1.strip() + '\\t' + allele2.strip() + '\\n')\n \n write.close()"
] | [
[
"numpy.zeros"
]
] |
HugoSenetaire/vaeac | [
"451d34dd4986c52f2f37c508f03ee3db9e7408d3"
] | [
"fashion_mnist_dropout01/model.py"
] | [
"from torch import nn\nfrom torch.optim import Adam\n\nfrom mask_generators import ImageMaskGenerator, DropoutMaskGenerator\nfrom nn_utils import ResBlock, MemoryLayer, SkipConnection\nfrom prob_utils import normal_parse_params, GaussianLoss\n\n\n# sampler from the model generative distribution\n# here we return mean of the Gaussian to avoid white noise\ndef sampler(params):\n return normal_parse_params(params).mean\n\n\ndef optimizer(parameters):\n return Adam(parameters, lr=2e-4)\n\n\nbatch_size = 16\n\nreconstruction_log_prob = GaussianLoss()\n\nmask_generator = DropoutMaskGenerator(rate=0.9)\n\n# improve train computational stability by dividing the loss\n# by this scale factor right before backpropagation\nvlb_scale_factor = 28 ** 2\nclass StupidLayer(nn.Module):\n\n def __init__(self):\n super(StupidLayer, self).__init__()\n\n def forward(self,x):\n return x[:,:,2:-2,2:-2]\n\ndef MLPBlock(dim):\n return SkipConnection(\n nn.BatchNorm2d(dim),\n nn.LeakyReLU(),\n nn.Conv2d(dim, dim, 1)\n )\n\nproposal_network = nn.Sequential(\n nn.Conv2d(2, 8, 1,padding=2), #28,28,8\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n nn.AvgPool2d(2, 2), # 16, 16,8\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n nn.AvgPool2d(2, 2), nn.Conv2d(8, 16, 1), # 8, 8, 16\n ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), # 8,8, 16?\n nn.AvgPool2d(2, 2), nn.Conv2d(16, 32, 1), # 4, 4, 32\n ResBlock(32, 16), ResBlock(32, 16),\n ResBlock(32, 16), ResBlock(32, 16),\n nn.AvgPool2d(2, 2), nn.Conv2d(32, 64, 1), # 2,2 64\n ResBlock(64, 32), ResBlock(64, 32),\n ResBlock(64, 32), ResBlock(64, 32),\n nn.AvgPool2d(2, 2), nn.Conv2d(64, 128, 1),\n MLPBlock(128), MLPBlock(128), MLPBlock(128), MLPBlock(128),\n)\n\nprior_network = nn.Sequential(\n MemoryLayer('#0'),\n nn.Conv2d(2, 8, 1, padding=2), # 28,28,8\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n MemoryLayer('#1'),\n nn.AvgPool2d(2, 2),# 16,16,8\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n MemoryLayer('#2'),\n nn.AvgPool2d(2, 2), nn.Conv2d(8, 16, 1),# 8,8,16\n ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8),\n MemoryLayer('#3'),\n nn.AvgPool2d(2, 2), nn.Conv2d(16, 32, 1), # 4,4 ,32\n ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16),\n MemoryLayer('#4'),\n nn.AvgPool2d(2, 2), nn.Conv2d(32, 64, 1), #2,2 64\n ResBlock(64, 32), ResBlock(64, 32),\n ResBlock(64, 32), ResBlock(64, 32),\n MemoryLayer('#5'),\n nn.AvgPool2d(2, 2), nn.Conv2d(64, 128, 1), #1,1,128\n MLPBlock(128), MLPBlock(128), MLPBlock(128), MLPBlock(128),\n)\n\ngenerative_network = nn.Sequential(\n nn.Conv2d(64, 64, 1),\n MLPBlock(64), MLPBlock(64), MLPBlock(64), MLPBlock(64),\n nn.Conv2d(64, 32, 1), nn.Upsample(scale_factor=2),\n # MemoryLayer('#7', True), nn.Conv2d(384, 128, 1),\n # ResBlock(128, 64), ResBlock(128, 64),\n # ResBlock(128, 64), ResBlock(128, 64),\n # nn.Conv2d(128, 64, 1), nn.Upsample(scale_factor=2),\n # MemoryLayer('#6', True), nn.Conv2d(192, 64, 1),\n # ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32),\n # nn.Conv2d(64, 32, 1), nn.Upsample(scale_factor=2),\n MemoryLayer('#5', True), nn.Conv2d(96, 32, 1),\n ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16),\n nn.Conv2d(32, 16, 1), nn.Upsample(scale_factor=2),\n MemoryLayer('#4', True), nn.Conv2d(48, 16, 1),\n ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8),\n nn.Conv2d(16, 8, 1), nn.Upsample(scale_factor=2),\n MemoryLayer('#3', True), nn.Conv2d(24, 8, 1),\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n nn.Upsample(scale_factor=2),\n MemoryLayer('#2', True), nn.Conv2d(16, 8, 1),\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n nn.Upsample(scale_factor=2), #32,32,8\n\n # nn.Conv2dTranspose(8,8,stride=2,padding=1) \n MemoryLayer('#1', True), nn.Conv2d(16, 8, 1),\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n StupidLayer(),\n MemoryLayer('#0', True), nn.Conv2d(10, 8, 1),\n ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),\n nn.Conv2d(8, 2, 1),\n\n)\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.optim.Adam",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.AvgPool2d",
"torch.nn.LeakyReLU"
]
] |
ncduy0303/fairseq | [
"a086afb15b7d1737cd98831e975fd21b14ef6b07"
] | [
"fairseq/modules/conformer_layer.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport torch\nfrom typing import Optional\nfrom fairseq.modules import (\n LayerNorm,\n MultiheadAttention,\n ESPNETMultiHeadedAttention,\n RelPositionMultiHeadedAttention,\n RotaryPositionMultiHeadedAttention,\n)\nfrom fairseq.utils import get_activation_fn\n\n\nclass ConvolutionModule(torch.nn.Module):\n \"\"\"Convolution block used in the conformer block\"\"\"\n\n def __init__(\n self,\n embed_dim,\n channels,\n depthwise_kernel_size,\n dropout,\n activation_fn=\"swish\",\n bias=False,\n export=False,\n ):\n \"\"\"\n Args:\n embed_dim: Embedding dimension\n channels: Number of channels in depthwise conv layers\n depthwise_kernel_size: Depthwise conv layer kernel size\n dropout: dropout value\n activation_fn: Activation function to use after depthwise convolution kernel\n bias: If bias should be added to conv layers\n export: If layernorm should be exported to jit\n \"\"\"\n super(ConvolutionModule, self).__init__()\n assert (\n depthwise_kernel_size - 1\n ) % 2 == 0, \"kernel_size should be a odd number for 'SAME' padding\"\n self.layer_norm = LayerNorm(embed_dim, export=export)\n self.pointwise_conv1 = torch.nn.Conv1d(\n embed_dim,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.glu = torch.nn.GLU(dim=1)\n self.depthwise_conv = torch.nn.Conv1d(\n channels,\n channels,\n depthwise_kernel_size,\n stride=1,\n padding=(depthwise_kernel_size - 1) // 2,\n groups=channels,\n bias=bias,\n )\n self.batch_norm = torch.nn.BatchNorm1d(channels)\n self.activation = get_activation_fn(activation_fn)(channels)\n self.pointwise_conv2 = torch.nn.Conv1d(\n channels,\n embed_dim,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.dropout = torch.nn.Dropout(dropout)\n\n def forward(self, x):\n \"\"\"\n Args:\n x: Input of shape B X T X C\n Returns:\n Tensor of shape B X T X C\n \"\"\"\n x = self.layer_norm(x)\n # exchange the temporal dimension and the feature dimension\n x = x.transpose(1, 2)\n\n # GLU mechanism\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\n x = self.glu(x) # (batch, channel, dim)\n\n # 1D Depthwise Conv\n x = self.depthwise_conv(x)\n x = self.batch_norm(x)\n x = self.activation(x)\n\n x = self.pointwise_conv2(x)\n x = self.dropout(x)\n return x.transpose(1, 2)\n\n\nclass FeedForwardModule(torch.nn.Module):\n \"\"\"Positionwise feed forward layer used in conformer\"\"\"\n\n def __init__(\n self,\n input_feat,\n hidden_units,\n dropout1,\n dropout2,\n activation_fn=\"swish\",\n bias=True,\n ):\n \"\"\"\n Args:\n input_feat: Input feature dimension\n hidden_units: Hidden unit dimension\n dropout1: dropout value for layer1\n dropout2: dropout value for layer2\n activation_fn: Name of activation function\n bias: If linear layers should have bias\n \"\"\"\n\n super(FeedForwardModule, self).__init__()\n self.layer_norm = LayerNorm(input_feat)\n self.w_1 = torch.nn.Linear(input_feat, hidden_units, bias=bias)\n self.w_2 = torch.nn.Linear(hidden_units, input_feat, bias=bias)\n self.dropout1 = torch.nn.Dropout(dropout1)\n self.dropout2 = torch.nn.Dropout(dropout2)\n self.activation = get_activation_fn(activation_fn)(hidden_units)\n\n def forward(self, x):\n \"\"\"\n Args:\n x: Input Tensor of shape T X B X C\n Returns:\n Tensor of shape T X B X C\n \"\"\"\n x = self.layer_norm(x)\n x = self.w_1(x)\n x = self.activation(x)\n x = self.dropout1(x)\n x = self.w_2(x)\n return self.dropout2(x)\n\n\nclass ConformerEncoderLayer(torch.nn.Module):\n \"\"\"Conformer block based on https://arxiv.org/abs/2005.08100. We currently don't support relative positional encoding in MHA\"\"\"\n\n def __init__(\n self,\n embed_dim,\n ffn_embed_dim,\n attention_heads,\n dropout,\n use_fp16,\n depthwise_conv_kernel_size=31,\n activation_fn=\"swish\",\n attn_type=None,\n pos_enc_type=\"abs\",\n ):\n \"\"\"\n Args:\n embed_dim: Input embedding dimension\n ffn_embed_dim: FFN layer dimension\n attention_heads: Number of attention heads in MHA\n dropout: dropout value\n depthwise_conv_kernel_size: Size of kernel in depthwise conv layer in convolution module\n activation_fn: Activation function name to use in convulation block and feed forward block\n attn_type: MHA implementation from ESPNET vs fairseq\n pos_enc_type: Positional encoding type - abs, rope, rel_pos\n \"\"\"\n self.pos_enc_type = pos_enc_type\n super(ConformerEncoderLayer, self).__init__()\n\n self.ffn1 = FeedForwardModule(\n embed_dim,\n ffn_embed_dim,\n dropout,\n dropout,\n )\n\n self.self_attn_layer_norm = LayerNorm(embed_dim, export=False)\n self.self_attn_dropout = torch.nn.Dropout(dropout)\n if attn_type == \"espnet\":\n if self.pos_enc_type == \"rel_pos\":\n self.self_attn = RelPositionMultiHeadedAttention(\n embed_dim,\n attention_heads,\n dropout=dropout,\n )\n elif self.pos_enc_type == \"rope\":\n self.self_attn = RotaryPositionMultiHeadedAttention(\n embed_dim, attention_heads, dropout=dropout, precision=use_fp16\n )\n elif self.pos_enc_type == \"abs\":\n self.self_attn = ESPNETMultiHeadedAttention(\n embed_dim,\n attention_heads,\n dropout=dropout,\n )\n else:\n raise Exception(f\"Unsupported attention type {self.pos_enc_type}\")\n else:\n # Default to fairseq MHA\n self.self_attn = MultiheadAttention(\n embed_dim,\n attention_heads,\n dropout=dropout,\n )\n\n self.conv_module = ConvolutionModule(\n embed_dim=embed_dim,\n channels=embed_dim,\n depthwise_kernel_size=depthwise_conv_kernel_size,\n dropout=dropout,\n activation_fn=activation_fn,\n )\n\n self.ffn2 = FeedForwardModule(\n embed_dim,\n ffn_embed_dim,\n dropout,\n dropout,\n activation_fn=activation_fn,\n )\n self.final_layer_norm = LayerNorm(embed_dim, export=False)\n\n def forward(\n self,\n x,\n encoder_padding_mask: Optional[torch.Tensor],\n position_emb: Optional[torch.Tensor] = None,\n ):\n \"\"\"\n Args:\n x: Tensor of shape T X B X C\n encoder_padding_mask: Optional mask tensor\n positions:\n Returns:\n Tensor of shape T X B X C\n \"\"\"\n residual = x\n x = self.ffn1(x)\n x = x * 0.5 + residual\n residual = x\n x = self.self_attn_layer_norm(x)\n if self.pos_enc_type == \"rel_pos\":\n x, attn = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=encoder_padding_mask,\n pos_emb=position_emb,\n need_weights=False,\n )\n else:\n x, attn = self.self_attn(\n query=x,\n key=x,\n value=x,\n key_padding_mask=encoder_padding_mask,\n need_weights=False,\n )\n x = self.self_attn_dropout(x)\n x = x + residual\n\n residual = x\n # TBC to BTC\n x = x.transpose(0, 1)\n x = self.conv_module(x)\n # BTC to TBC\n x = x.transpose(0, 1)\n x = residual + x\n\n residual = x\n x = self.ffn2(x)\n x = x * 0.5 + residual\n\n x = self.final_layer_norm(x)\n return x, attn\n\n\nclass ConformerWav2Vec2EncoderLayer(ConformerEncoderLayer):\n \"\"\"Encoder layer for Wav2vec2 encoder\"\"\"\n\n def forward(\n self,\n x: torch.Tensor,\n self_attn_mask: torch.Tensor = None,\n self_attn_padding_mask: torch.Tensor = None,\n need_weights: bool = False,\n att_args=None,\n position_emb=None,\n ):\n return super().forward(x, self_attn_padding_mask, position_emb)\n"
] | [
[
"torch.nn.Linear",
"torch.nn.BatchNorm1d",
"torch.nn.Conv1d",
"torch.nn.Dropout",
"torch.nn.GLU"
]
] |
robertmacdavid/approx-upf | [
"3f6da80226f94b175afe0c9d463fa38abfd743b9"
] | [
"python/lookup_tables.py"
] | [
"from typing import List, Callable, Dict, Tuple, Union\n\nimport math\nimport matplotlib.pyplot as plt\n\n\nclass ApproxMultiplicationTable:\n \"\"\"\n Multiplication done using a lookup table instead of a math unit\n \"\"\"\n table_entries: Dict[Tuple[int, int], int]\n num_significant_bits: int\n\n def __init__(self, num_significant_bits: int, unbiasing: float = 0.5):\n \"\"\"\n Create a lookup table that approximately multiplies pairs of positive integers\n :param num_significant_bits: number of bits to preserve when approximating operands.\n Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8\n :param unbiasing: a value in the range [0,1) that is used to unbias lookup table error\n \"\"\"\n self.num_significant_bits = num_significant_bits\n self.table_entries = {}\n # Populate the lookup table\n for i in range(1 << num_significant_bits):\n for j in range(1 << num_significant_bits):\n # i and j will be rounded versions of more precise numbers.\n # To unbias the rounding error, we offset i and j slightly before dividing them\n value: int = round((i + unbiasing) * (j + unbiasing))\n self.table_entries[(i, j)] = value\n\n def compute(self, a: int, b: int) -> int:\n assert a > 0 and b > 0\n # the exponent can be computed in tofino using TCAM lookup tables. If the operands are 32 bits,\n # the lookup tables will have 32 entries\n exponent: int = max(a.bit_length(), b.bit_length())\n rshift: int = max(exponent - self.num_significant_bits, 0)\n i = a >> rshift\n j = b >> rshift\n value = self.table_entries[(i, j)]\n return value << (2 * rshift)\n\n def table_size(self) -> int:\n return len(self.table_entries)\n\n\nclass ApproxDivisionTable:\n \"\"\"\n Division done using a lookup table instead of a math unit\n \"\"\"\n table_entries: Dict[Tuple[int, int], Tuple[int, int]]\n num_significant_bits: int\n MIN_LOOKUP_ENTRY = 2 ** -16 # lookup entries smaller than this will be rounded down to 0\n\n def __init__(self, num_significant_bits: int, unbiasing: float = 0.5, lookup_value_mantissa_bits: int = 8):\n \"\"\"\n Create a lookup table that approximately divides pairs of positive integers\n :param num_significant_bits: number of bits to preserve when approximating operands.\n Lookup table size will be 2 ** (2 * num_significant bits), so recommended values are <=8\n :param unbiasing: a value in the range [0,1) that is used to unbias lookup table error\n :param lookup_value_mantissa_bits: significant bits of division results stored in the lookup table\n \"\"\"\n self.num_significant_bits = num_significant_bits\n self.table_entries = {}\n # populate the lookup table\n for i in range(1 << num_significant_bits):\n for j in range(1 << num_significant_bits):\n # i and j will be rounded versions of more precise numbers.\n # To unbias the rounding error, we offset i and j slightly before dividing them\n value = (i + unbiasing) / (j + unbiasing)\n exp: int\n mantissa: int\n if value < self.MIN_LOOKUP_ENTRY:\n exp = 0\n mantissa = 0\n else:\n exp = math.floor(math.log(value, 2)) - lookup_value_mantissa_bits + 1\n mantissa = round(value * 2 ** (-exp))\n self.table_entries[(i, j)] = (mantissa, exp)\n\n def compute(self, a: int, b: int) -> float:\n assert a > 0 and b > 0\n\n exponent: int = max(a.bit_length(), b.bit_length())\n rshift: int = exponent - self.num_significant_bits\n i = a >> rshift\n j = b >> rshift\n\n mantissa, exponent = self.table_entries[(i, j)]\n\n return mantissa * (2 ** exponent)\n\n def table_size(self) -> int:\n return len(self.table_entries)\n\n\ndef plot_relative_error(a_vals: List[int], b_vals: List[int],\n true_func: Callable[[int, int], float],\n lookup: Union[ApproxMultiplicationTable, ApproxDivisionTable]):\n fig, ax = plt.subplots()\n\n ax.set_title(\"Relative error for %s with %d entries\" % (type(lookup).__name__, lookup.table_size()))\n ax.set_ylabel(\"Relative error (0.1 = 10%)\")\n ax.set_xlabel(\"Input a to f(a,b)\")\n\n for b in b_vals:\n errors = []\n for a in a_vals:\n approx_result = lookup.compute(a, b)\n true_result = true_func(a, b)\n error = (approx_result - true_result) / true_result\n errors.append(error)\n\n line, = ax.plot(a_vals, errors, label=\"%d\" % b, linewidth=1.0)\n\n ax.legend(title=\"Input b to f(a,b)\")\n plt.show()\n\n\ndef main():\n a_vals = [i for i in range(100000, 500000)]\n b_vals = [j for j in range(100000, 500000, 100000)]\n mult_lookup = ApproxMultiplicationTable(num_significant_bits=7)\n div_loookup = ApproxDivisionTable(num_significant_bits=7)\n plot_relative_error(a_vals, b_vals, lambda a, b: a * b, mult_lookup)\n plot_relative_error(a_vals, b_vals, lambda a, b: a / b, div_loookup)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
A-Quarter-Mile/Muskits | [
"60d80727d2ec6b8ec405502d67796e8df319ea82"
] | [
"muskit/layers/conformer/convolution.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2020 Johns Hopkins University (Shinji Watanabe)\n# Northwestern Polytechnical University (Pengcheng Guo)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"ConvolutionModule definition.\"\"\"\n\nfrom torch import nn\n\n\nclass ConvolutionModule(nn.Module):\n \"\"\"ConvolutionModule in Conformer model.\n Args:\n channels (int): The number of channels of conv layers.\n kernel_size (int): Kernerl size of conv layers.\n \"\"\"\n\n def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):\n \"\"\"Construct an ConvolutionModule object.\"\"\"\n super(ConvolutionModule, self).__init__()\n # kernerl_size should be a odd number for 'SAME' padding\n assert (kernel_size - 1) % 2 == 0\n\n self.pointwise_conv1 = nn.Conv1d(\n channels,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.depthwise_conv = nn.Conv1d(\n channels,\n channels,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n groups=channels,\n bias=bias,\n )\n self.norm = nn.BatchNorm1d(channels)\n self.pointwise_conv2 = nn.Conv1d(\n channels,\n channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.activation = activation\n\n def forward(self, x):\n \"\"\"Compute convolution module.\n Args:\n x (torch.Tensor): Input tensor (#batch, time, channels).\n Returns:\n torch.Tensor: Output tensor (#batch, time, channels).\n \"\"\"\n # exchange the temporal dimension and the feature dimension\n x = x.transpose(1, 2)\n\n # GLU mechanism\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\n\n # 1D Depthwise Conv\n x = self.depthwise_conv(x)\n x = self.activation(self.norm(x))\n\n x = self.pointwise_conv2(x)\n\n return x.transpose(1, 2)\n"
] | [
[
"torch.nn.Conv1d",
"torch.nn.ReLU",
"torch.nn.functional.glu",
"torch.nn.BatchNorm1d"
]
] |
kunal-mulki/Materials | [
"b76bba123002972e4063b9b24cd5dc3d980e16e9"
] | [
"Code/Python/bootcamp_examples.py"
] | [
"\"\"\"\nExamples for Data Bootcamp course (data input and graphics)\n\n**Warning**\nWeb data access will change in the near future, when Pandas spins\noff the web access tools into a new package.\nhttp://pandas.pydata.org/pandas-docs/stable/remote_data.html\n\nRepository of materials (including this file):\n* https://github.com/NYUDataBootcamp/Materials\n\nWritten by Dave Backus, November 2015\nCreated with Python 3.5\n\"\"\"\n\"\"\"\nCheck versions (ignore this)\n\"\"\"\nimport pandas as pd # the data package\nimport sys\n\nprint('\\nPython version:', sys.version)\nprint('Pandas version: ', pd.__version__)\n\n#%%\n\"\"\"\nExample: World Bank country indicators\n* NY.GDP.PCAP.PP.KD = gdp per capita\n* NY.GDP.MKTP.PP.KD = gdp\n* SE.ADT.LITR.ZS = adult literacy (%)\n* SP.DYN.LE00.IN = life expectancy\n* IT.CEL.SETS.P2 = cell phone penetration (per 100)\nSee: http://data.worldbank.org/\n\"\"\"\n# load packages (redundancy is ok)\nimport pandas as pd # data management tools\nfrom pandas.io import wb # World Bank api\nimport matplotlib.pyplot as plt # plotting tools\n\n# variable list\nvar = ['NY.GDP.PCAP.PP.KD', 'NY.GDP.MKTP.PP.KD', 'SP.DYN.LE00.IN']\n# country list (ISO codes)\niso = ['USA', 'FRA', 'JPN', 'CHN', 'IND', 'BRA', 'MEX']\nyear = 2013\ndf = wb.download(indicator=var, country=iso, start=year, end=year)\n\n# massage data\ndf = df.reset_index(level='year', drop=True)\ndf.columns = ['gdppc', 'gdp', 'le'] # rename variables\ndf['gdp'] = df['gdp']/10**12 # convert to trillions\ndf['gdppc'] = df['gdppc']/10**3 # convert to thousands\ndf['order'] = [5, 3, 1, 4, 2, 6, 0] # reorder countries\ndf = df.sort(columns='order', ascending=False)\n\n# GDP bar chart\nax = df['gdp'].plot(kind='barh', alpha=0.5)\nax.set_title('GDP', loc='left', fontsize=14)\nax.set_xlabel('Trillions of US Dollars')\nax.set_ylabel('')\n\n#%%\n# ditto for GDP per capita (per person)\nax = df['gdppc'].plot(kind='barh', color='m', alpha=0.5)\nax.set_title('GDP Per Capita', loc='left', fontsize=14)\nax.set_xlabel('Thousands of US Dollars')\nax.set_ylabel('')\n\n#%%\n# scatterplot of life expectancy vs gdp per capita\nplt.scatter(df['gdppc'], df['le'], s=50*df['gdp'],\n cmap=plt.get_cmap(name='Spectral'), alpha=0.5) # cmap irrelevant\nplt.title('Life expectancy vs. GDP per capita', loc='left', fontsize=14)\nplt.xlabel('GDP Per Capita')\nplt.ylabel('Life Expectancy')\n#plt.annotate(x=iso, xy=(df['gdppc'], df['le']))\n\n#%%\n\"\"\"\nExample: US GDP and GDP growth from FRED\n\"\"\"\nimport pandas.io.data as web # web interface for FRED\nimport datetime as dt # handles dates\nimport matplotlib.pyplot as plt # plotting\n\nfred_series = ['GDPC1'] # the real GDP code for FRED\nstart_date = dt.datetime(1960, 1, 1)\nfred = web.DataReader(fred_series, 'fred', start_date)/10**3 # convert to trillions of USD\n# print last 3 data points to see what we've got (quarterly data)\nprint(fred.tail(3))\n\n# plot GDP over time\nax = fred.plot(legend=False)\nax.set_title('US Real GDP', fontsize=14, loc='left')\nax.set_xlabel('')\nax.set_ylabel('Trillions of Chained US Dollars')\nax.legend().set_visible(False)\n\n#%%\n# quarterly growth rates\ng = fred.pct_change()*400 # 400 makes this an annual percentage\nprint(g.tail(3))\n# change label\ng.columns = ['US GDP Growth']\ngbar = g.mean()\n\n# plot growth rates\nstart = dt.datetime(1985, 1, 1)\nend = g.index[-1]\nax = g[g.index >= start].plot(kind='line')\nax.set_title('US Real GDP Growth', fontsize=14, loc='left')\nax.hlines(y=gbar, xmin=start, xmax=end)\nax.hlines(y=0, xmin=start, xmax=end, linestyles='dashed')\nax.legend().set_visible(False)\n\n#%%\n\"\"\"\nExample: US economic indicators (monthly data from FRED)\n* INDPRO: industrial production\n* PAYEMS: nonfarm employment\n* AWHMAN: average weekly hours worked in manufacturing\n* PERMIT: premits for new housing\n* NAPM: purchasing managers index\n\"\"\"\nimport pandas.io.data as web # web interface with FRED\nimport pandas as pd # data manipulation\nimport datetime as dt # handles dates\n\n# list of indicators (FRED codes)\nindicators = ['INDPRO', 'PAYEMS', 'AWHMAN', 'PERMIT', 'NAPM']\nstart_date = dt.datetime(1970, 1, 1)\ninds = web.DataReader(indicators, \"fred\", start_date)\nprint(inds.tail(3))\n\n# yoy growth rates\ng = inds.pct_change(periods=12).dropna()\n# standardize\ng_std = (g - g.mean()) / g.std()\n\n# plot\nax = g_std.plot()\nax.set_title('Various economic indicators', fontsize=14, loc='left')\n#ax.set_ylabel('Standard deviations from mean')\nax.set_xlabel('')\nax.hlines(y=[-1, 0, 1], xmin=start_date, xmax=end, linestyles='dashed')\nax.legend().set_visible(False)\n\n#%%\n\"\"\"\nGovernment debt: IMF historical data\nThanks to Itamar Snir\n\"\"\"\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# data input\nexcelFilePath = '../Temp/Debt Database Fall 2013 Vintage.xlsx'\ndf = pd.read_excel(excelFilePath, sheetname=1, na_values=['…', '….', ''])\n #, index_col=-1, encoding='utf-8')\n\n#%%\n#get most recent year in the data (instead of 2013):\nmax_year = max(df.columns.values[4:].tolist())\n\n#get a list of the years for the x-axis values\nyears = [year for year in range(1980,max_year+1)]\n#get a list of the debt to GDP for the y-axis values\ndbt_greece = df[df.country=='Greece'][years]\ndbt_greece_list = dbt_greece.values.tolist()[0]\n#plot the data\nplt.plot(years,dbt_greece_list, color='red') #set graph color\nplt.ylabel('Debt to GDP')\nplt.title ('Greece Debt to GDP Between 1980 and '+ str(max_year))\nplt.show()\n\n#%%\n\"\"\"\nUS bond yields\nVideo?\n\"\"\"\n\n\n#%%\n\"\"\"\nExample: Stock prices from Yahoo finance (VIX)\n\"\"\"\nimport pandas as pd\nimport pandas.io.data as web\nimport datetime as dt\n\n# ticker\nticker = 'aapl'\ntoday = dt.date.today()\n#one_week = dt.timedelta(days=7)\n#start = today - one_week\nstart = dt.datetime(2000, 1, 1)\nvix = web.DataReader(ticker, 'yahoo', start)\n\nax = vix['Close'].plot()\nax.set_xlabel('')\n\n#%%\n\"\"\"\nExample: Fama-French stock returns\n* xsm = excess return on market (market minus riskfree rate)\n* smb = return on small firms minus return on big firms\n* hml = return on high book-to-market firms minus low\n* rf = riskfree rate\nAll returns are monthly percentages\n\"\"\"\nimport pandas.io.data as web\n\nff = web.DataReader('F-F_Research_Data_factors', 'famafrench')[0]\nff.columns = ['xsm', 'smb', 'hml', 'rf']\n\nff.describe\n\n# plots of mean and std\nffbar = ff.mean()\nffstd = ff.std()\n\nff.plot(kind='kde', subplots=True)\n\n#fig, ax = plt.\n#ffbar.plot(kind='barh', alpha=0.5)\n#plt.title('Mean returns', fontsize=14, loc='left')\n#\n#ffstd.plot(kind='barh', alpha=0.5)\n#plt.title('Standard deviation of returns', fontsize=14, loc='left')\n\n\n#%%\n\"\"\"\nExample: Stock options from Yahoo finance\nCurrently **broken**: asks for html5lib, which conflicts with Python 3.5\n\"\"\"\nimport pandas as pd\nimport pandas.io.data as web\nfrom pandas.io.data import Options\nimport datetime as dt\n#import matplotlib.pylab as plt\n\n# ticker\nticker = 'spy'\ntoday = dt.date.today()\none_week = dt.timedelta(days=7)\nstart = today - one_week\nstock = web.DataReader(ticker, 'yahoo', start)\n# take the last close (-1 is the last, 'Close' is the close)\natm = stock.ix[-1,'Close'] # the -1 takes the last observation\nprint('Stock price (at the money): ', atm)\n\n# get option prices for same ticker\noption = Options(ticker, 'yahoo')\nexpiry = dt.date(2016, 2, 19)\n#data_calls = option.get_call_data(expiry=expiry).dropna()\n#data_puts = option.get_put_data(expiry=expiry).dropna()\n\n# compute mid of bid and ask and arrange series for plotting\ncalls_bid = data_calls['Bid']\ncalls_ask = data_calls['Ask']\n\ncalls_strikes = data_calls['Strike']\ncalls_mid = (data_calls['Bid'] + data_calls['Ask'])/2\nputs_strikes = data_puts['Strike']\nputs_mid = (data_puts['Bid'] + data_puts['Ask'])/2\n\n# plot call and put prices v strike\nplt.plot(calls_strikes, calls_mid, 'r', lw=2, label='calls')\nplt.plot(puts_strikes, puts_mid, 'b', lw=2, label='puts')\n\n# prettify it\n#plt.axis([120, 250, 0, 50])\nplt.axvline(x=atm, color='k', linestyle='--', label='ATM')\nplt.legend(loc='best')\nplt.show()\n\n"
] | [
[
"pandas.io.data.Options",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.legend",
"pandas.read_excel",
"pandas.io.wb.download",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.plot",
"pandas.io.data.DataReader",
"matplotlib.pyplot.xlabel"
]
] |
Vincent34/mindspore | [
"fcb2ec2779b753e95c762cf292b23bd81d1f561b",
"fcb2ec2779b753e95c762cf292b23bd81d1f561b"
] | [
"tests/ut/python/dataset/test_vocab.py",
"model_zoo/official/cv/ctpn/src/ctpn.py"
] | [
"# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\n\nimport mindspore.dataset as ds\nimport mindspore.dataset.text as text\nimport mindspore.common.dtype as mstype\nfrom mindspore import log as logger\n\n# this file contains \"home is behind the world head\" each word is 1 line\nDATA_FILE = \"../data/dataset/testVocab/words.txt\"\nVOCAB_FILE = \"../data/dataset/testVocab/vocab_list.txt\"\nSIMPLE_VOCAB_FILE = \"../data/dataset/testVocab/simple_vocab_list.txt\"\n\n\ndef test_lookup_callable():\n \"\"\"\n Test lookup is callable\n \"\"\"\n logger.info(\"test_lookup_callable\")\n vocab = text.Vocab.from_list(['深', '圳', '欢', '迎', '您'])\n lookup = text.Lookup(vocab)\n word = \"迎\"\n assert lookup(word) == 3\n\ndef test_from_list_tutorial():\n vocab = text.Vocab.from_list(\"home IS behind the world ahead !\".split(\" \"), [\"<pad>\", \"<unk>\"], True)\n lookup = text.Lookup(vocab, \"<unk>\")\n data = ds.TextFileDataset(DATA_FILE, shuffle=False)\n data = data.map(operations=lookup, input_columns=[\"text\"])\n ind = 0\n res = [2, 1, 4, 5, 6, 7]\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n assert d[\"text\"] == res[ind], ind\n ind += 1\n\n\ndef test_from_file_tutorial():\n vocab = text.Vocab.from_file(VOCAB_FILE, \",\", None, [\"<pad>\", \"<unk>\"], True)\n lookup = text.Lookup(vocab)\n data = ds.TextFileDataset(DATA_FILE, shuffle=False)\n data = data.map(operations=lookup, input_columns=[\"text\"])\n ind = 0\n res = [10, 11, 12, 15, 13, 14]\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n assert d[\"text\"] == res[ind], ind\n ind += 1\n\n\ndef test_from_dict_tutorial():\n vocab = text.Vocab.from_dict({\"home\": 3, \"behind\": 2, \"the\": 4, \"world\": 5, \"<unk>\": 6})\n lookup = text.Lookup(vocab, \"<unk>\") # any unknown token will be mapped to the id of <unk>\n data = ds.TextFileDataset(DATA_FILE, shuffle=False)\n data = data.map(operations=lookup, input_columns=[\"text\"])\n res = [3, 6, 2, 4, 5, 6]\n ind = 0\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n assert d[\"text\"] == res[ind], ind\n ind += 1\n\n\ndef test_from_dict_exception():\n try:\n vocab = text.Vocab.from_dict({\"home\": -1, \"behind\": 0})\n if not vocab:\n raise ValueError(\"Vocab is None\")\n except ValueError as e:\n assert \"is not within the required interval\" in str(e)\n\n\ndef test_from_list():\n def gen(texts):\n for word in texts.split(\" \"):\n yield (np.array(word, dtype='S'),)\n\n def test_config(lookup_str, vocab_input, special_tokens, special_first, unknown_token):\n try:\n vocab = text.Vocab.from_list(vocab_input, special_tokens, special_first)\n data = ds.GeneratorDataset(gen(lookup_str), column_names=[\"text\"])\n data = data.map(operations=text.Lookup(vocab, unknown_token), input_columns=[\"text\"])\n res = []\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n res.append(d[\"text\"].item())\n return res\n except (ValueError, RuntimeError, TypeError) as e:\n return str(e)\n\n # test basic default config, special_token=None, unknown_token=None\n assert test_config(\"w1 w2 w3\", [\"w1\", \"w2\", \"w3\"], None, True, None) == [0, 1, 2]\n # test normal operations\n assert test_config(\"w1 w2 w3 s1 s2 ephemeral\", [\"w1\", \"w2\", \"w3\"], [\"s1\", \"s2\"], True, \"s2\") == [2, 3, 4, 0, 1, 1]\n assert test_config(\"w1 w2 w3 s1 s2\", [\"w1\", \"w2\", \"w3\"], [\"s1\", \"s2\"], False, \"s2\") == [0, 1, 2, 3, 4]\n assert test_config(\"w3 w2 w1\", [\"w1\", \"w2\", \"w3\"], None, True, \"w1\") == [2, 1, 0]\n assert test_config(\"w3 w2 w1\", [\"w1\", \"w2\", \"w3\"], None, False, \"w1\") == [2, 1, 0]\n # test unknown token lookup\n assert test_config(\"w1 un1 w3 un2\", [\"w1\", \"w2\", \"w3\"], [\"<pad>\", \"<unk>\"], True, \"<unk>\") == [2, 1, 4, 1]\n assert test_config(\"w1 un1 w3 un2\", [\"w1\", \"w2\", \"w3\"], [\"<pad>\", \"<unk>\"], False, \"<unk>\") == [0, 4, 2, 4]\n\n # test exceptions\n assert \"doesn't exist in vocab.\" in test_config(\"un1\", [\"w1\"], [], False, \"unk\")\n assert \"doesn't exist in vocab and no unknown token is specified.\" in test_config(\"un1\", [\"w1\"], [], False, None)\n assert \"doesn't exist in vocab\" in test_config(\"un1\", [\"w1\"], [], False, None)\n assert \"word_list contains duplicate\" in test_config(\"w1\", [\"w1\", \"w1\"], [], True, \"w1\")\n assert \"special_tokens contains duplicate\" in test_config(\"w1\", [\"w1\", \"w2\"], [\"s1\", \"s1\"], True, \"w1\")\n assert \"special_tokens and word_list contain duplicate\" in test_config(\"w1\", [\"w1\", \"w2\"], [\"s1\", \"w1\"], True, \"w1\")\n assert \"is not of type\" in test_config(\"w1\", [\"w1\", \"w2\"], [\"s1\"], True, 123)\n\n\ndef test_from_list_lookup_empty_string():\n # \"\" is a valid word in vocab, which can be looked up by LookupOp\n vocab = text.Vocab.from_list(\"home IS behind the world ahead !\".split(\" \"), [\"<pad>\", \"\"], True)\n lookup = text.Lookup(vocab, \"\")\n data = ds.TextFileDataset(DATA_FILE, shuffle=False)\n data = data.map(operations=lookup, input_columns=[\"text\"])\n ind = 0\n res = [2, 1, 4, 5, 6, 7]\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n assert d[\"text\"] == res[ind], ind\n ind += 1\n\n # unknown_token of LookUp is None, it will convert to std::nullopt in C++,\n # so it has nothing to do with \"\" in vocab and C++ will skip looking up unknown_token\n vocab = text.Vocab.from_list(\"home IS behind the world ahead !\".split(\" \"), [\"<pad>\", \"\"], True)\n lookup = text.Lookup(vocab)\n data = ds.TextFileDataset(DATA_FILE, shuffle=False)\n data = data.map(operations=lookup, input_columns=[\"text\"])\n try:\n for _ in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n pass\n except RuntimeError as e:\n assert \"token: \\\"is\\\" doesn't exist in vocab and no unknown token is specified\" in str(e)\n\n\ndef test_from_file():\n def gen(texts):\n for word in texts.split(\" \"):\n yield (np.array(word, dtype='S'),)\n\n def test_config(lookup_str, vocab_size, special_tokens, special_first):\n try:\n vocab = text.Vocab.from_file(SIMPLE_VOCAB_FILE, vocab_size=vocab_size, special_tokens=special_tokens,\n special_first=special_first)\n data = ds.GeneratorDataset(gen(lookup_str), column_names=[\"text\"])\n data = data.map(operations=text.Lookup(vocab, \"s2\"), input_columns=[\"text\"])\n res = []\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n res.append(d[\"text\"].item())\n return res\n except ValueError as e:\n return str(e)\n\n # test special tokens are prepended\n assert test_config(\"w1 w2 w3 s1 s2 s3\", None, [\"s1\", \"s2\", \"s3\"], True) == [3, 4, 5, 0, 1, 2]\n # test special tokens are appended\n assert test_config(\"w1 w2 w3 s1 s2 s3\", None, [\"s1\", \"s2\", \"s3\"], False) == [0, 1, 2, 8, 9, 10]\n # test special tokens are prepended when not all words in file are used\n assert test_config(\"w1 w2 w3 s1 s2 s3\", 3, [\"s1\", \"s2\", \"s3\"], False) == [0, 1, 2, 3, 4, 5]\n # text exception special_words contains duplicate words\n assert \"special_tokens contains duplicate\" in test_config(\"w1\", None, [\"s1\", \"s1\"], True)\n # test exception when vocab_size is negative\n assert \"Input vocab_size must be greater than 0\" in test_config(\"w1 w2\", 0, [], True)\n assert \"Input vocab_size must be greater than 0\" in test_config(\"w1 w2\", -1, [], True)\n\n\ndef test_lookup_cast_type():\n def gen(texts):\n for word in texts.split(\" \"):\n yield (np.array(word, dtype='S'),)\n\n def test_config(lookup_str, data_type=None):\n try:\n vocab = text.Vocab.from_list([\"w1\", \"w2\", \"w3\"], special_tokens=[\"<unk>\"], special_first=True)\n data = ds.GeneratorDataset(gen(lookup_str), column_names=[\"text\"])\n # if data_type is None, test the default value of data_type\n op = text.Lookup(vocab, \"<unk>\") if data_type is None else text.Lookup(vocab, \"<unk>\", data_type)\n data = data.map(operations=op, input_columns=[\"text\"])\n res = []\n for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):\n res.append(d[\"text\"])\n return res[0].dtype\n except (ValueError, RuntimeError, TypeError) as e:\n return str(e)\n\n # test result is correct\n assert test_config(\"w1\", mstype.int8) == np.dtype(\"int8\")\n assert test_config(\"w2\", mstype.int32) == np.dtype(\"int32\")\n assert test_config(\"w3\", mstype.int64) == np.dtype(\"int64\")\n assert test_config(\"unk\", mstype.float32) != np.dtype(\"int32\")\n assert test_config(\"unk\") == np.dtype(\"int32\")\n # test exception, data_type isn't the correct type\n assert \"tldr is not of type [<class 'mindspore._c_expression.typing.Type'>]\" in test_config(\"unk\", \"tldr\")\n assert \"Lookup : The parameter data_type must be numeric including bool.\" in \\\n test_config(\"w1\", mstype.string)\n\n\nif __name__ == '__main__':\n test_lookup_callable()\n test_from_dict_exception()\n test_from_list_tutorial()\n test_from_file_tutorial()\n test_from_dict_tutorial()\n test_from_list()\n test_from_file()\n test_lookup_cast_type()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"CPTN network definition.\"\"\"\n\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore import Tensor, Parameter\nfrom mindspore.common import dtype as mstype\nfrom mindspore.ops import operations as P\nfrom src.CTPN.rpn import RPN\nfrom src.CTPN.anchor_generator import AnchorGenerator\nfrom src.CTPN.proposal_generator import Proposal\nfrom src.CTPN.vgg16 import VGG16FeatureExtraction\n\nclass BiLSTM(nn.Cell):\n \"\"\"\n Define a BiLSTM network which contains two LSTM layers\n\n Args:\n config(EasyDict): config for ctpn network\n batch_size(int): batch size of input data, only support 1\n \"\"\"\n def __init__(self, config, batch_size):\n super(BiLSTM, self).__init__()\n self.batch_size = batch_size\n self.batch_size = self.batch_size * config.rnn_batch_size\n self.input_size = config.input_size\n self.hidden_size = config.hidden_size\n self.num_step = config.num_step\n self.reshape = P.Reshape()\n self.cast = P.Cast()\n k = (1 / self.hidden_size) ** 0.5\n self.rnn1 = P.DynamicRNN(forget_bias=0.0)\n self.rnn_bw = P.DynamicRNN(forget_bias=0.0)\n self.w1 = Parameter(np.random.uniform(-k, k, \\\n (self.input_size + self.hidden_size, 4 * self.hidden_size)).astype(np.float32), name=\"w1\")\n self.w1_bw = Parameter(np.random.uniform(-k, k, \\\n (self.input_size + self.hidden_size, 4 * self.hidden_size)).astype(np.float32), name=\"w1_bw\")\n\n self.b1 = Parameter(np.random.uniform(-k, k, (4 * self.hidden_size)).astype(np.float32), name=\"b1\")\n self.b1_bw = Parameter(np.random.uniform(-k, k, (4 * self.hidden_size)).astype(np.float32), name=\"b1_bw\")\n\n self.h1 = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))\n self.h1_bw = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))\n\n self.c1 = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))\n self.c1_bw = Tensor(np.zeros(shape=(1, self.batch_size, self.hidden_size)).astype(np.float32))\n self.reverse_seq = P.ReverseV2(axis=[0])\n self.concat = P.Concat()\n self.transpose = P.Transpose()\n self.concat1 = P.Concat(axis=2)\n self.dropout = nn.Dropout(0.7)\n self.use_dropout = config.use_dropout\n self.reshape = P.Reshape()\n self.transpose = P.Transpose()\n def construct(self, x):\n if self.use_dropout:\n x = self.dropout(x)\n x = self.cast(x, mstype.float16)\n bw_x = self.reverse_seq(x)\n y1, _, _, _, _, _, _, _ = self.rnn1(x, self.w1, self.b1, None, self.h1, self.c1)\n y1_bw, _, _, _, _, _, _, _ = self.rnn_bw(bw_x, self.w1_bw, self.b1_bw, None, self.h1_bw, self.c1_bw)\n y1_bw = self.reverse_seq(y1_bw)\n output = self.concat1((y1, y1_bw))\n return output\n\nclass CTPN(nn.Cell):\n \"\"\"\n Define CTPN network\n\n Args:\n config(EasyDict): config for ctpn network\n batch_size(int): batch size of input data, only support 1\n is_training(bool): whether training, default is True\n \"\"\"\n def __init__(self, config, batch_size, is_training=True):\n super(CTPN, self).__init__()\n self.config = config\n self.batch_size = batch_size\n self.num_step = config.num_step\n self.input_size = config.input_size\n self.hidden_size = config.hidden_size\n self.vgg16_feature_extractor = VGG16FeatureExtraction()\n self.conv = nn.Conv2d(512, 512, kernel_size=3, padding=0, pad_mode='same')\n self.rnn = BiLSTM(self.config, batch_size=self.batch_size).to_float(mstype.float16)\n self.reshape = P.Reshape()\n self.transpose = P.Transpose()\n self.cast = P.Cast()\n self.is_training = is_training\n\n # rpn block\n self.rpn_with_loss = RPN(config,\n self.batch_size,\n config.rpn_in_channels,\n config.rpn_feat_channels,\n config.num_anchors,\n config.rpn_cls_out_channels)\n self.anchor_generator = AnchorGenerator(config)\n self.featmap_size = config.feature_shapes\n self.anchor_list = self.get_anchors(self.featmap_size)\n self.proposal_generator_test = Proposal(config,\n self.batch_size,\n config.activate_num_classes,\n config.use_sigmoid_cls)\n self.proposal_generator_test.set_train_local(config, False)\n def construct(self, img_data, gt_bboxes, gt_labels, gt_valids, img_metas=None):\n x = self.vgg16_feature_extractor(img_data)\n x = self.conv(x)\n x = self.cast(x, mstype.float16)\n x = self.transpose(x, (0, 2, 1, 3))\n x = self.reshape(x, (-1, self.input_size, self.num_step))\n x = self.transpose(x, (2, 0, 1))\n x = self.rnn(x)\n rpn_loss, cls_score, bbox_pred, rpn_cls_loss, rpn_reg_loss = self.rpn_with_loss(x,\n img_metas,\n self.anchor_list,\n gt_bboxes,\n gt_labels,\n gt_valids)\n if self.training:\n return rpn_loss, cls_score, bbox_pred, rpn_cls_loss, rpn_reg_loss\n proposal, proposal_mask = self.proposal_generator_test(cls_score, bbox_pred, self.anchor_list)\n return proposal, proposal_mask\n\n def get_anchors(self, featmap_size):\n anchors = self.anchor_generator.grid_anchors(featmap_size)\n return Tensor(anchors, mstype.float16)\n\nclass CTPN_Infer(nn.Cell):\n def __init__(self, config, batch_size):\n super(CTPN_Infer, self).__init__()\n self.network = CTPN(config, batch_size=batch_size, is_training=False)\n self.network.set_train(False)\n\n def construct(self, img_data):\n output = self.network(img_data, None, None, None, None)\n return output\n"
] | [
[
"numpy.array",
"numpy.dtype"
],
[
"numpy.random.uniform",
"numpy.zeros"
]
] |
kngwyu/infomax-option-critic | [
"9d907c041c1d0280db9b23eb2fdf9e0033e33bf3"
] | [
"src/option_select_impl.py"
] | [
"\"\"\" Implemenation of uncertainty-aware option selection\n\"\"\"\n\n\nfrom abc import ABC, abstractmethod\nfrom typing import Tuple\n\nimport torch\n\nfrom torch import BoolTensor, LongTensor, Tensor\nfrom torch.distributions import Categorical\n\nfrom rainy.net.policy import BernoulliPolicy\n\n\ndef _debug_minmax(name: str, t: Tensor) -> None:\n print(f\"{name}: {t.max().item()}, {t.min().item()}\")\n\n\nclass OptionSelectImpl(ABC):\n worker_indices: Tensor\n EPS = 0.001\n INF = 1e9\n\n @abstractmethod\n def logmu_weight(self) -> float:\n pass\n\n def _logmu(self, qo: Tensor, logmu_o_xs: Tensor) -> Tensor:\n return qo - self.logmu_weight() * logmu_o_xs\n\n def _eval_sample_options(\n self, qo: Tensor, beta: BernoulliPolicy,\n ) -> Tuple[LongTensor, BoolTensor]:\n \"\"\"Sample options by ε-Greedy\n \"\"\"\n batch_size = qo.size(0)\n prev_options = self.eval_prev_options[:batch_size]\n current_beta = beta[self.worker_indices[:batch_size], prev_options]\n opt_terminals = current_beta.action().bool()\n use_new_options = self.eval_is_initial_states[:batch_size] | opt_terminals\n new_options = self.eval_opt_explorer.select_from_value(qo, same_device=True)\n options = torch.where(use_new_options, new_options, prev_options)\n return options, use_new_options\n\n def _sample_options(\n self, qo: Tensor, beta: BernoulliPolicy, mu_o_xs: Categorical,\n ) -> Tuple[LongTensor, BoolTensor]:\n \"\"\"\n Select new options.\n Returns options and booltensor that indicates which options ended.\n \"\"\"\n\n masks = self.storage.masks[-1]\n prev_options = self.prev_options\n current_beta = beta[self.worker_indices[: qo.size(0)], prev_options]\n opt_terminals = current_beta.action().bool()\n use_new_options = (1.0 - masks).bool() | opt_terminals\n # mask out current options\n opt_mask = torch.zeros_like(qo)\n opt_mask[self.worker_indices, prev_options] += opt_terminals * -self.INF\n if self.config.option_selector == \"epsg\":\n new_options = self.opt_explorer.select_from_value(\n qo + opt_mask, same_device=True\n )\n elif self.config.option_selector == \"logp\":\n new_options = self._logmu(qo + opt_mask, mu_o_xs.logits).argmax(-1)\n elif self.config.option_selector == \"epsg-logp\":\n value = self._logmu(qo + opt_mask, mu_o_xs.logits)\n new_options = self.opt_explorer.select_from_value(value, same_device=True)\n else:\n raise NotImplementedError(\n f\"Invalid option selector {self.config.opt_selector}\"\n )\n self.option_counter[new_options[use_new_options].cpu().numpy()] += 1\n options = torch.where(use_new_options, new_options, prev_options)\n return options, opt_terminals\n"
] | [
[
"torch.where",
"torch.zeros_like"
]
] |
nathanielbunch/Nonstationary-Bandit-Problem-on-a-Quantum-Computer | [
"af9d4f508a42790249007d5237a2c0ee8b93e30a"
] | [
"Classical/Bandit Problem/k-armed-bandit_non_stationary.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport sys\n\nclass KBanditProblem:\n \n def __init__(self, k, stationary=True):\n self.k = k\n self.stationary = stationary\n self.values = np.random.normal(loc=0.0, scale=1, size=k)\n self.optimal = self.values.argmax() # this is called optimal becuase the things are random, and becuase it changes\n # over time, and every time a reqward is given, the distribution of rewards chnages \n # with the random reward. The optimal solution changes over time, thus it has to be \n # recalculated every time.\n \n def generate_reward(self, action):\n if not self.stationary:\n self.values += np.random.normal(loc=0.0, scale=0.01, size=self.k)\n self.optimal = self.values.argmax()\n return np.random.normal(loc=self.values[action], scale=1)\n\nclass KBanditSolution:\n \n def __init__(self, problem, steps):\n self.problem = problem\n self.steps = steps\n \n self.average_reward = 0\n self.average_rewards = np.array([])\n self.optimal_percentage = 0\n self.optimal_precentages = np.array([])\n \n def count_statistics(self, action, reward, step):\n self.average_reward += (1 / (step + 1)) * (reward - self.average_reward)\n self.optimal_percentage += (1 / (step + 1)) * ((1 if action == self.problem.optimal else 0) - self.optimal_percentage)\n self.average_rewards = np.append(self.average_rewards, self.average_reward)\n self.optimal_precentages = np.append(self.optimal_precentages, self.optimal_percentage)\n\nclass EGreedy(KBanditSolution):\n \n def solve(self, exploration_rate, initial_value):\n Q = {i: initial_value for i in range(k)} # 1. Value function\n N = {i: 0 for i in range(k)} # 2. Number of actions, for update rule\n rewards = []\n rewards_mean = []\n for i in range(self.steps): # 3. Main loop\n explore = random.uniform(0, 1) < exploration_rate # 4. Exploration\n if explore:\n action = random.randint(0, k - 1) # 5. Exploration: Choosing random action\n else:\n action = max(Q, key=Q.get) # 6. Choose action with maximum mean reward\n\n reward = self.problem.generate_reward(action) # 7. Get reward for current action\n rewards.append(reward)\n N[action] += 1 # 8. Update action number\n Q[action] += (1 / N[action]) * (reward - Q[action]) # 9. Update value dict \n if (i % 100 == 0):\n r_mean = np.mean(rewards[-100:])\n rewards_mean.append(r_mean)\n self.count_statistics(action, reward, i)\n return rewards_mean\n\n \n def plot_graph(self, values):\n plt.plot(values)\n plt.show()\n\nclass WeightedAverage(KBanditSolution):\n \n def solve(self, exploration_rate, step_size, initial_value):\n Q = {i: initial_value for i in range(k)} # 1. Value function\n N = {i: 0 for i in range(k)} # 2. Number of actions, for update rule\n\n for i in range(self.steps): # 3. Main loop\n explore = random.uniform(0, 1) < exploration_rate # 4. Exploration\n if explore:\n action = random.randint(0, k - 1) # 5. Exploration: Choosing random action\n else:\n action = max(Q, key=Q.get) # 6. Choose action with maximum mean reward\n\n reward = self.problem.generate_reward(action) # 7. Get reward for current action\n N[action] += 1 # 8. Update action number\n Q[action] += step_size * (reward - Q[action]) # 9. Update value dict \n self.count_statistics(action, reward, i)\n\nclass UCB(KBanditSolution):\n \n def count_ucb(self, q, c, step, n):\n if n == 0:\n return sys.maxsize\n return (q + (c * sqrt((log(step) / n))))\n \n def solve(self, c):\n Q = {i: 0 for i in range(k)} # 1. Value function \n N = {i: 0 for i in range(k)} # 2. Number of actions, for update rule\n\n for i in range(self.steps): # 3. Main loop\n Q_ucb = {i: self.count_ucb(Q[i], c, i + 1, N[i]) for i in range(k)} # 4. Count UCB\n action = max(Q_ucb, key=Q_ucb.get) # 5. Choose action with maximum UCB\n\n reward = self.problem.generate_reward(action) # 6. Get reward for current action\n N[action] += 1 # 7. Update action number\n Q[action] += (1 / N[action]) * (reward - Q[action]) # 8. Update value dict \n self.count_statistics(action, reward, i)\n \nk = 4\nsteps = 50000\nkb_problem = KBanditProblem(k, stationary=False)\n#kb_solution = KBanditSolution(kb_problem, steps)\negreedy_boi = EGreedy(kb_problem, steps)\nsolved = egreedy_boi.solve(0.01, 0)\negreedy_boi.plot_graph(solved)"
] | [
[
"numpy.append",
"matplotlib.pyplot.show",
"numpy.random.normal",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.mean"
]
] |
Alwaysproblem/examples-1 | [
"9754fa63ed1931489a21ac1f5b299f945e369a5c"
] | [
"applications/tensorflow/cnns/models/resnet.py"
] | [
"# Copyright (c) 2019 Graphcore Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom models.resnet_base import ResNet\n\nimport tensorflow.compat.v1 as tf\nimport tensorflow.contrib as contrib\nfrom tensorflow.python.ipu import normalization_ops\n\n# This is all written for: NHWC\n\n\nclass TensorflowResNet(ResNet):\n def __init__(self, *args, **kwargs):\n self.dtype = tf.float16\n super(TensorflowResNet, self).__init__(*args, **kwargs)\n\n def _get_variable(self, name, shape, init):\n return tf.get_variable(name, shape, initializer=init, dtype=self.dtype)\n\n def residual(self, x, shortcut, out_filters, stride, type='B'):\n in_shape = shortcut.get_shape()\n pad = int(x.get_shape()[3] - in_shape[3])\n if pad != 0 or type == 'C':\n if type == 'A':\n shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape,\n strides=[1, stride, stride, 1])\n shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]])\n else:\n shortcut = self.conv(shortcut, 1, stride, out_filters)\n shortcut = self.norm(shortcut)\n x = shortcut + x\n x = self.relu(x)\n return x\n\n def relu(self, x):\n return tf.nn.relu(x)\n\n def conv(self, x, ksize, stride, filters_out, bias=True):\n filters_in = x.get_shape()[-1]\n\n wshape = [ksize, ksize, filters_in, filters_out]\n w_init = contrib.layers.xavier_initializer(dtype=self.dtype)\n weights = self._get_variable('weights', shape=wshape, init=w_init)\n x = tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')\n if bias:\n bshape = [filters_out]\n b_init = tf.zeros_initializer()\n biases = self._get_variable('biases', shape=bshape, init=b_init)\n x = x + biases\n return x\n\n def norm(self, x, type='BATCH', groups=32, training=False):\n if type == 'BATCH':\n # Perhaps use tf.nn.fused_batch_norm instead.\n x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True,\n training=training, trainable=training,\n momentum=0.997, epsilon=1e-5)\n elif type == 'GROUP':\n x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True,\n training=training, trainable=training,\n channels_axis=-1, reduction_axes=[-3, -2])\n return x\n\n def fc(self, x, num_units_out):\n num_units_in = x.get_shape()[1]\n w_init = contrib.layers.xavier_initializer(dtype=self.dtype)\n b_init = tf.constant_initializer(0.0)\n\n with self.namescope('fc'):\n weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init)\n biases = self._get_variable('biases', shape=[num_units_out], init=b_init)\n\n x = tf.nn.xw_plus_b(x, weights, biases)\n return x\n\n def reduce_mean(self, x, indices=(1, 2)):\n x = tf.reduce_mean(x, reduction_indices=indices)\n return x\n\n def maxpool(self, x):\n x = tf.nn.max_pool(\n x,\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n return x\n\n def namescope(self, debug_string):\n return tf.variable_scope(debug_string)\n"
] | [
[
"tensorflow.compat.v1.constant_initializer",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.nn.max_pool",
"tensorflow.compat.v1.strided_slice",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.python.ipu.normalization_ops.group_norm",
"tensorflow.compat.v1.nn.xw_plus_b",
"tensorflow.compat.v1.variable_scope",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.compat.v1.layers.batch_normalization",
"tensorflow.compat.v1.nn.relu",
"tensorflow.compat.v1.nn.conv2d",
"tensorflow.compat.v1.pad",
"tensorflow.compat.v1.get_variable"
]
] |
filangel/Eigenfaces | [
"55ddb705611ee351cc856d5a927a4dc82acaff03"
] | [
"src/app_a.py"
] | [
"# matplotlib backtest for missing $DISPLAY\nimport matplotlib\nmatplotlib.use('Agg')\n\n# scientific computing library\nimport numpy as np\n\n# visualization tools\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# prettify plots\nplt.rcParams['figure.figsize'] = [8.0, 6.0]\nsns.set_palette(sns.color_palette(\"muted\"))\nsns.set_style(\"ticks\")\nsns_b, sns_g, sns_r, sns_v, sns_y, sns_l = sns.color_palette(\"muted\")\n\n\n# helper data preprocessor\nfrom reader import fetch_data\n\n# utility functions\nfrom utils import progress\n\n# logging module\nimport logging\nimport coloredlogs\n\n# argument parser\nimport argparse\n\n# built-in tools\nimport os\n\nSHAPE = (46, 56)\n\nif __name__ == '__main__':\n\n # argument parser instance\n parser = argparse.ArgumentParser()\n # init log level argument\n parser.add_argument('-l', '--log', type=str,\n help='<optional> Log Level (info | debug)')\n # parse arguments\n argv = parser.parse_args()\n # get log level\n _level = argv.log or ''\n\n logger = logging.getLogger(os.path.basename(__file__).replace('.py', ''))\n\n if _level.upper() == 'INFO':\n coloredlogs.install(level='IFNO', logger=logger)\n elif _level.upper() == 'DEBUG':\n coloredlogs.install(level='DEBUG', logger=logger)\n else:\n coloredlogs.install(level='WARNING', logger=logger)\n\n logger.info('Fetching data...')\n data = fetch_data()\n\n X_train, y_train = data['train']\n\n D, N = X_train.shape\n logger.debug('Number of features: D=%d' % D)\n logger.debug('Number of train data: N=%d' % N)\n\n # mean face\n mean_face = X_train.mean(axis=1).reshape(-1, 1)\n\n A = X_train - mean_face\n logger.debug('A.shape=%s' % (A.shape,))\n\n S = (1 / N) * np.dot(A.T, A)\n logger.debug('S.shape=%s' % (S.shape,))\n\n # Calculate eigenvalues `w` and eigenvectors `v`\n logger.info('Calculating eigenvalues and eigenvectors...')\n _l, _v = np.linalg.eig(S)\n\n # Indexes of eigenvalues, sorted by value\n logger.info('Sorting eigenvalues...')\n _indexes = np.argsort(_l)[::-1]\n\n # TODO\n # threshold w's\n logger.warning('TODO: threshold eigenvalues')\n\n # Sorted eigenvalues and eigenvectors\n l = _l[_indexes]\n logger.debug('l.shape=%s' % (l.shape,))\n v = _v[:, _indexes]\n logger.debug('v.shape=%s' % (v.shape,))\n\n M = np.arange(1, N + 1)\n\n error = []\n\n logger.info('Reconstruction for M in [%d,%d]...' % (M[0], M[-1]))\n for j, m in enumerate(M):\n\n progress(j + 1, len(M), status='Reconstruction for M=%d' % m)\n\n V = v[:, :m]\n\n _U = np.dot(A, V)\n\n U = _U / np.apply_along_axis(np.linalg.norm, 0, _U)\n\n W = np.dot(U.T, A)\n\n A_hat = np.dot(U, W)\n\n error.append(np.mean(np.sum((A - A_hat)**2)))\n # fix bug of progress bar after '\\r'\n print('')\n\n logger.info('Plotting reconstruction error versus M...')\n fig, ax1 = plt.subplots()\n\n lns1 = ax1.plot(M, error, color=sns_b, label='Reconstruction Error')\n ax1.tick_params('y', colors=sns_b)\n\n ax2 = ax1.twinx()\n lns2 = ax2.plot(M, l, color=sns_g, label='Covariance Matrix Eigenvalues')\n ax2.tick_params('y', colors=sns_g)\n\n ax1.set_title(\n 'Reconstruction Error versus Number of Principle Components $\\mathcal{M}$\\n')\n ax1.set_xlabel('$\\mathcal{M}$: Number of Principle Components')\n ax1.set_ylabel('$\\mathcal{J}$: Reconstruction Error')\n ax2.set_ylabel('Covariance Matrix Eigenvalues')\n # fix legend hack\n lns = lns1 + lns2\n labs = [l.get_label() for l in lns]\n ax1.legend(lns, labs, loc=0)\n # ax1.grid()\n fig.tight_layout()\n plt.savefig('data/out/error_versus_M.pdf',\n format='pdf', dpi=1000, transparent=True)\n logger.info('Exported at data/out/error_versus_M.pdf...')\n\n # set M\n m = 100\n V = v[:, :m]\n _U = np.dot(A, V)\n U = _U / np.apply_along_axis(np.linalg.norm, 0, _U)\n W_train = np.dot(U.T, A)\n\n # test data\n X_test, y_test = data['test']\n I, K = X_test.shape\n assert I == D, logger.error(\n 'Number of features of test and train data do not match, %d != %d' % (D, I))\n Phi = X_test - mean_face\n logger.debug('Phi.shape=%s' % (Phi.shape,))\n\n W_test = np.dot(U.T, Phi)\n logger.debug('W_test.shape=%s' % (W_test.shape,))\n\n ridx_train = np.random.randint(0, N, 3)\n R_train = W_train[:, ridx_train]\n B_train = np.dot(U, R_train)\n\n plt.rcParams['figure.figsize'] = [16.0, 12.0]\n\n logger.info('Plotting reconstructed training images...')\n fig, axes = plt.subplots(nrows=2, ncols=3)\n titles_train = ['Original Train', 'Original Train', 'Original Train',\n 'Reconstructed Train', 'Reconstructed Train', 'Reconstructed Train']\n for ax, img, title in zip(axes.flatten(), np.concatenate((A[:, ridx_train], B_train), axis=1).T, titles_train):\n _img = img + mean_face.ravel()\n ax.imshow(_img.reshape(SHAPE).T,\n cmap=plt.get_cmap('gray'), vmin=0, vmax=255)\n ax.set_title(title)\n fig.savefig('data/out/reconstructed_train_images.pdf',\n format='pdf', dpi=1000, transparent=True)\n logger.info('Exported at data/out/reconstructed_train_images.pdf...')\n\n ridx_test = np.random.randint(0, K, 3)\n R_test = W_test[:, ridx_test]\n B_test = np.dot(U, R_test)\n\n logger.info('Plotting reconstructed testing images...')\n fig, axes = plt.subplots(nrows=2, ncols=3)\n titles_test = ['Original Test', 'Original Test', 'Original Test',\n 'Reconstructed Test', 'Reconstructed Test', 'Reconstructed Test']\n for ax, img, title in zip(axes.flatten(), np.concatenate((Phi[:, ridx_test], B_test), axis=1).T, titles_test):\n _img = img + mean_face.ravel()\n ax.imshow(_img.reshape(SHAPE).T,\n cmap=plt.get_cmap('gray'), vmin=0, vmax=255)\n ax.set_title(title)\n fig.savefig('data/out/reconstructed_test_images.pdf',\n format='pdf', dpi=1000, transparent=True)\n logger.info('Exported at data/out/reconstructed_test_images.pdf...')\n"
] | [
[
"numpy.sum",
"matplotlib.pyplot.savefig",
"numpy.concatenate",
"numpy.argsort",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.get_cmap",
"numpy.apply_along_axis",
"matplotlib.use",
"numpy.dot",
"numpy.random.randint",
"numpy.linalg.eig"
]
] |
shkarupa-alex/tfmiss | [
"4fe1bb3a47327c07711f910ee53319167032b6af"
] | [
"tfmiss/text/wordpiecelib.py"
] | [
"# Taken from https://raw.githubusercontent.com/tensorflow/text/v2.5.0/tensorflow_text/tools/wordpiece_vocab/wordpiece_tokenizer_learner_lib.py\n#\n# coding=utf-8\n# Copyright 2021 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Algorithm for learning wordpiece vocabulary.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport numpy as np\nfrom typing import List, Optional\n\nParams = collections.namedtuple('Params', [\n 'upper_thresh', 'lower_thresh', 'num_iterations', 'max_input_tokens',\n 'max_token_length', 'max_unique_chars', 'vocab_size', 'slack_ratio',\n 'include_joiner_token', 'joiner', 'reserved_tokens'\n])\n\n\ndef extract_char_tokens(word_counts):\n \"\"\"Extracts all single-character tokens from word_counts.\n\n Args:\n word_counts: list of (string, int) tuples\n\n Returns:\n set of single-character strings contained within word_counts\n \"\"\"\n\n seen_chars = set()\n for word, _ in word_counts:\n for char in word:\n seen_chars.add(char)\n return seen_chars\n\n\ndef ensure_all_tokens_exist(input_tokens, output_tokens, include_joiner_token,\n joiner):\n \"\"\"Adds all tokens in input_tokens to output_tokens if not already present.\n\n Args:\n input_tokens: set of strings (tokens) we want to include\n output_tokens: string to int dictionary mapping token to count\n include_joiner_token: bool whether to include joiner token\n joiner: string used to indicate suffixes\n\n Returns:\n string to int dictionary with all tokens in input_tokens included\n \"\"\"\n\n for token in input_tokens:\n if token not in output_tokens:\n output_tokens[token] = 1\n\n if include_joiner_token:\n joined_token = joiner + token\n if joined_token not in output_tokens:\n output_tokens[joined_token] = 1\n\n return output_tokens\n\n\ndef get_split_indices(word, curr_tokens, include_joiner_token, joiner):\n \"\"\"Gets indices for valid substrings of word, for iterations > 0.\n\n For iterations > 0, rather than considering every possible substring, we only\n want to consider starting points corresponding to the start of wordpieces in\n the current vocabulary.\n\n Args:\n word: string we want to split into substrings\n curr_tokens: string to int dict of tokens in vocab (from previous iteration)\n include_joiner_token: bool whether to include joiner token\n joiner: string used to indicate suffixes\n\n Returns:\n list of ints containing valid starting indices for word\n \"\"\"\n\n indices = []\n start = 0\n while start < len(word):\n end = len(word)\n while end > start:\n subtoken = word[start:end]\n # Subtoken includes the joiner token.\n if include_joiner_token and start > 0:\n subtoken = joiner + subtoken\n # If subtoken is part of vocab, 'end' is a valid start index.\n if subtoken in curr_tokens:\n indices.append(end)\n break\n end -= 1\n\n if end == start:\n return None\n start = end\n\n return indices\n\n\ndef get_search_threshs(word_counts, upper_thresh, lower_thresh):\n \"\"\"Clips the thresholds for binary search based on current word counts.\n\n The upper threshold parameter typically has a large default value that can\n result in many iterations of unnecessary search. Thus we clip the upper and\n lower bounds of search to the maximum and the minimum wordcount values.\n\n Args:\n word_counts: list of (string, int) tuples\n upper_thresh: int, upper threshold for binary search\n lower_thresh: int, lower threshold for binary search\n\n Returns:\n upper_search: int, clipped upper threshold for binary search\n lower_search: int, clipped lower threshold for binary search\n \"\"\"\n\n counts = [count for _, count in word_counts]\n max_count = max(counts)\n min_count = min(counts)\n\n if upper_thresh is None:\n upper_search = max_count\n else:\n upper_search = max_count if max_count < upper_thresh else upper_thresh\n\n if lower_thresh is None:\n lower_search = min_count\n else:\n lower_search = min_count if min_count > lower_thresh else lower_thresh\n\n return upper_search, lower_search\n\n\ndef get_input_words(word_counts, reserved_tokens, max_token_length):\n \"\"\"Filters out words that are longer than max_token_length or are reserved.\n\n Args:\n word_counts: list of (string, int) tuples\n reserved_tokens: list of strings\n max_token_length: int, maximum length of a token\n\n Returns:\n list of (string, int) tuples of filtered wordcounts\n \"\"\"\n\n all_counts = []\n\n for word, count in word_counts:\n if len(word) > max_token_length or word in reserved_tokens:\n continue\n all_counts.append((word, count))\n\n return all_counts\n\n\ndef get_allowed_chars(all_counts, max_unique_chars):\n \"\"\"Get the top max_unique_chars characters within our wordcounts.\n\n We want each character to be in the vocabulary so that we can keep splitting\n down to the character level if necessary. However, in order not to inflate\n our vocabulary with rare characters, we only keep the top max_unique_chars\n characters.\n\n Args:\n all_counts: list of (string, int) tuples\n max_unique_chars: int, maximum number of unique single-character tokens\n\n Returns:\n set of strings containing top max_unique_chars characters in all_counts\n \"\"\"\n\n char_counts = collections.defaultdict(int)\n\n for word, count in all_counts:\n for char in word:\n char_counts[char] += count\n\n # Sort by count, then alphabetically.\n sorted_counts = sorted(sorted(char_counts.items(), key=lambda x: x[0]),\n key=lambda x: x[1], reverse=True)\n\n allowed_chars = set()\n for i in range(min(len(sorted_counts), max_unique_chars)):\n allowed_chars.add(sorted_counts[i][0])\n return allowed_chars\n\n\ndef filter_input_words(all_counts, allowed_chars, max_input_tokens):\n \"\"\"Filters out words with unallowed chars and limits words to max_input_tokens.\n\n Args:\n all_counts: list of (string, int) tuples\n allowed_chars: list of single-character strings\n max_input_tokens: int, maximum number of tokens accepted as input\n\n Returns:\n list of (string, int) tuples of filtered wordcounts\n \"\"\"\n # Ensure that the input is sorted so that if `max_input_tokens` is reached\n # the least common tokens are dropped.\n all_counts = sorted(\n all_counts, key=lambda word_and_count: word_and_count[1], reverse=True)\n filtered_counts = []\n for word, count in all_counts:\n if (max_input_tokens != -1 and\n len(filtered_counts) >= max_input_tokens):\n break\n has_unallowed_chars = False\n for char in word:\n if char not in allowed_chars:\n has_unallowed_chars = True\n break\n if has_unallowed_chars:\n continue\n filtered_counts.append((word, count))\n\n return filtered_counts\n\n\ndef generate_final_vocabulary(reserved_tokens, char_tokens, curr_tokens):\n \"\"\"Generates final vocab given reserved, single-character, and current tokens.\n\n Args:\n reserved_tokens: list of strings (tokens) that must be included in vocab\n char_tokens: set of single-character strings\n curr_tokens: string to int dict mapping token to count\n\n Returns:\n list of strings representing final vocabulary\n \"\"\"\n\n sorted_char_tokens = sorted(list(char_tokens))\n vocab_char_arrays = []\n vocab_char_arrays.extend(reserved_tokens)\n vocab_char_arrays.extend(sorted_char_tokens)\n\n # Sort by count, then alphabetically.\n sorted_tokens = sorted(sorted(curr_tokens.items(), key=lambda x: x[0]),\n key=lambda x: x[1], reverse=True)\n for token, _ in sorted_tokens:\n vocab_char_arrays.append(token)\n\n seen_tokens = set()\n # Adding unique tokens to list to maintain sorted order.\n vocab_words = []\n for word in vocab_char_arrays:\n if word in seen_tokens:\n continue\n seen_tokens.add(word)\n vocab_words.append(word)\n\n return vocab_words\n\n\ndef learn_with_thresh(word_counts, thresh, params):\n \"\"\"Wordpiece learning algorithm to produce a vocab given frequency threshold.\n\n Args:\n word_counts: list of (string, int) tuples\n thresh: int, frequency threshold for a token to be included in the vocab\n params: Params namedtuple, parameters for learning\n\n Returns:\n list of strings, vocabulary generated for the given thresh\n \"\"\"\n\n # Set of single-character tokens.\n char_tokens = extract_char_tokens(word_counts)\n curr_tokens = ensure_all_tokens_exist(char_tokens, {},\n params.include_joiner_token,\n params.joiner)\n\n for iteration in range(params.num_iterations):\n subtokens = [dict() for _ in range(params.max_token_length + 1)]\n # Populate array with counts of each subtoken.\n for word, count in word_counts:\n if iteration == 0:\n split_indices = range(1, len(word) + 1)\n else:\n split_indices = get_split_indices(word, curr_tokens,\n params.include_joiner_token,\n params.joiner)\n if not split_indices:\n continue\n\n start = 0\n for index in split_indices:\n for end in range(start + 1, len(word) + 1):\n subtoken = word[start:end]\n length = len(subtoken)\n if params.include_joiner_token and start > 0:\n subtoken = params.joiner + subtoken\n if subtoken in subtokens[length]:\n # Subtoken exists, increment count.\n subtokens[length][subtoken] += count\n else:\n # New subtoken, add to dict.\n subtokens[length][subtoken] = count\n start = index\n\n next_tokens = {}\n # Get all tokens that have a count above the threshold.\n for length in range(params.max_token_length, 0, -1):\n for token, count in subtokens[length].items():\n if count >= thresh:\n next_tokens[token] = count\n # Decrement the count of all prefixes.\n if len(token) > length: # This token includes the joiner.\n joiner_len = len(params.joiner)\n for i in range(1 + joiner_len, length + joiner_len):\n prefix = token[0:i]\n if prefix in subtokens[i - joiner_len]:\n subtokens[i - joiner_len][prefix] -= count\n else:\n for i in range(1, length):\n prefix = token[0:i]\n if prefix in subtokens[i]:\n subtokens[i][prefix] -= count\n\n # Add back single-character tokens.\n curr_tokens = ensure_all_tokens_exist(char_tokens, next_tokens,\n params.include_joiner_token,\n params.joiner)\n\n vocab_words = generate_final_vocabulary(params.reserved_tokens, char_tokens,\n curr_tokens)\n\n return vocab_words\n\n\ndef learn_binary_search(word_counts, lower, upper, params):\n \"\"\"Performs binary search to find wordcount frequency threshold.\n\n Given upper and lower bounds and a list of (word, count) tuples, performs\n binary search to find the threshold closest to producing a vocabulary\n of size vocab_size.\n\n Args:\n word_counts: list of (string, int) tuples\n lower: int, lower bound for binary search\n upper: int, upper bound for binary search\n params: Params namedtuple, parameters for learning\n\n Returns:\n list of strings, vocab that is closest to target vocab_size\n \"\"\"\n thresh = (upper + lower) // 2\n current_vocab = learn_with_thresh(word_counts, thresh, params)\n current_vocab_size = len(current_vocab)\n\n # Allow count to be within k% of the target count, where k is slack ratio.\n slack_count = params.slack_ratio * params.vocab_size\n if slack_count < 0:\n slack_count = 0\n\n is_within_slack = (current_vocab_size <= params.vocab_size) and (\n params.vocab_size - current_vocab_size <= slack_count)\n\n # We've created a vocab within our goal range (or, ran out of search space).\n if is_within_slack or lower >= upper or thresh <= 1:\n return current_vocab\n\n current_vocab = None\n\n if current_vocab_size > params.vocab_size:\n return learn_binary_search(word_counts, thresh + 1, upper, params)\n\n else:\n return learn_binary_search(word_counts, lower, thresh - 1, params)\n\n\ndef count_words(iterable) -> collections.Counter:\n \"\"\"Converts a iterable of arrays of words into a `Counter` of word counts.\"\"\"\n counts = collections.Counter()\n for words in iterable:\n # Convert a RaggedTensor to a flat/dense Tensor.\n words = getattr(words, 'flat_values', words)\n # Flatten any dense tensor\n words = np.reshape(words, [-1])\n counts.update(words)\n\n # Decode the words if necessary.\n example_word = next(iter(counts.keys()))\n if isinstance(example_word, bytes):\n counts = collections.Counter(\n {word.decode('utf-8'): count for word, count in counts.items()})\n\n return counts\n\n\ndef learn(word_counts,\n vocab_size: int,\n reserved_tokens: List[str],\n upper_thresh: Optional[int] = int(1e7),\n lower_thresh: Optional[int] = 10,\n num_iterations: int = 4,\n max_input_tokens: Optional[int] = int(5e6),\n max_token_length: int = 50,\n max_unique_chars: int = 1000,\n slack_ratio: float = 0.05,\n include_joiner_token: bool = True,\n joiner: str = '##') -> List[str]:\n \"\"\"Takes in wordcounts and returns wordpiece vocabulary.\n\n Args:\n word_counts: (word, count) pairs as a dictionary, or list of tuples.\n vocab_size: The target vocabulary size. This is the maximum size.\n reserved_tokens: A list of tokens that must be included in the vocabulary.\n upper_thresh: Initial upper bound on the token frequency threshold.\n lower_thresh: Initial lower bound on the token frequency threchold.\n num_iterations: Number of iterations to run.\n max_input_tokens: The maximum number of words in the initial vocabulary. The\n words with the lowest counts are discarded. Use `None` or `-1` for \"no\n maximum\".\n max_token_length: The maximum token length. Counts for longer words are\n discarded.\n max_unique_chars: The maximum alphabet size. This prevents rare characters\n from inflating the vocabulary. Counts for words containing characters\n ouside of the selected alphabet are discarded.\n slack_ratio: The maximum deviation acceptable from `vocab_size` for an\n acceptable vocabulary. The acceptable range of vocabulary sizes is from\n `vocab_size*(1-slack_ratio)` to `vocab_size`.\n include_joiner_token: If true, include the `joiner` token in the output\n vocabulary.\n joiner: The prefix to include on suffix tokens in the output vocabulary.\n Usually \"##\". For example 'places' could be tokenized as `['place',\n '##s']`.\n\n Returns:\n string, final vocabulary with each word separated by newline\n \"\"\"\n if isinstance(word_counts, dict):\n word_counts = word_counts.items()\n\n params = Params(upper_thresh, lower_thresh, num_iterations, max_input_tokens,\n max_token_length, max_unique_chars, vocab_size, slack_ratio,\n include_joiner_token, joiner, reserved_tokens)\n\n upper_search, lower_search = get_search_threshs(word_counts,\n params.upper_thresh,\n params.lower_thresh)\n all_counts = get_input_words(word_counts, params.reserved_tokens,\n params.max_token_length)\n allowed_chars = get_allowed_chars(all_counts, params.max_unique_chars)\n\n filtered_counts = filter_input_words(all_counts, allowed_chars,\n params.max_input_tokens)\n\n vocab = learn_binary_search(filtered_counts, lower_search, upper_search,\n params)\n\n return vocab\n"
] | [
[
"numpy.reshape"
]
] |
abhi12ravi/iwpa | [
"ebe133412b7ef24453e090b6b44d8d78a540c384"
] | [
"scripts/make_predictions.py"
] | [
"import lazypredict\nimport sys\nimport numpy as np\nnp.set_printoptions(threshold=sys.maxsize)\n\n#Read data file\n\nimport pandas as pd\n\nfilepath = \"dataset/trial_1200/balanced_dataset2.csv\"\ndf = pd.read_csv(filepath)\nfeatures = df\n\n# Labels are the values we want to predict\nlabels = np.array(df['protection_level'])\n\n# Remove the labels from the features\nfeatures = features.drop('protection_level', axis = 1)\nfeatures = features.drop('page_title', axis = 1)\n# features = features.drop('page_id', axis=1)\n# features = features.drop('page_id_scrapped', axis=1)\n\n#Convert String to Floats\nfeatures['page_length'] = features['page_length'].astype(float)\nfeatures['edit_count'] = features['total_edits'].astype(float)\nfeatures['page_watchers'] = features['number_page_watchers'].astype(float)\nfeatures['page_watchers_recent_edits'] = features['number_page_watchers_recent_edits'].astype(float)\n\n# Saving feature names for later use\nfeature_list = list(features.columns)\n\n# Convert to numpy array\nfeatures = np.array(features)\n\n#Label encoding for protection_status column\n\n# 0 => unprotected\n# 1 => autoconfirmed\n# 2 => extendedconfirmed\n# 3 => sysop\nlabels_encoded = []\nfor item in labels:\n if(item ==\"unprotected\"):\n labels_encoded.append(0)\n elif(item == \"autoconfirmed\"):\n labels_encoded.append(1)\n elif(item == \"extendedconfirmed\"):\n labels_encoded.append(2)\n elif(item == \"sysop\"):\n labels_encoded.append(3) \n\n# Using Skicit-learn to split data into training and testing sets\nfrom sklearn.model_selection import train_test_split\n# Split the data into training and testing sets\ntrain_features, test_features, train_labels, test_labels = train_test_split(features, labels_encoded, test_size =0.20, random_state = 53)\n\nX_train = train_features\ny_train = train_labels\nX_test = test_features\ny_test = test_labels\n\nprint(X_train)\nfrom lazypredict.Supervised import LazyClassifier\nclf = LazyClassifier(verbose=0,ignore_warnings=True, custom_metric=None)\nmodels,predictions = clf.fit(X_train, X_test, y_train, y_test)\n\nprint(models) "
] | [
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.set_printoptions",
"numpy.array"
]
] |
oz123/python-nvd3 | [
"fd4998549542343b74b82ca72cbcee97845b06ee"
] | [
"examples/lineChartXY.py"
] | [
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nExamples for Python-nvd3 is a Python wrapper for NVD3 graph library.\nNVD3 is an attempt to build re-usable charts and chart components\nfor d3.js without taking away the power that d3.js gives you.\n\nProject location : https://github.com/areski/python-nvd3\n\"\"\"\n\nfrom nvd3 import lineChart\nfrom numpy import sin, pi, linspace\n\noutput_file = open('test_lineChartXY.html', 'w')\n\ntype = \"lineChart\"\nchart = lineChart(name=type, x_is_date=False,\n x_axis_format=\".1f\", y_axis_format=\".1f\",\n width=500, height=500,\n show_legend=False)\n\n# lissajous parameters of a/b\na = [1, 3, 5, 3]\nb = [1, 5, 7, 4]\ndelta = pi / 2\nt = linspace(-pi, pi, 300)\n\nfor i in range(0, 4):\n x = sin(a[i] * t + delta)\n y = sin(b[i] * t)\n chart.add_serie(y=y, x=x, name='lissajous-n%d' % i, color='red' if i == 0 else 'black')\n\nchart.buildhtml()\noutput_file.write(chart.htmlcontent)\noutput_file.close()\n"
] | [
[
"numpy.sin",
"numpy.linspace"
]
] |
furgerf/GAN-for-dermatologic-imaging | [
"e90b06c46c7693e984a4c5b067e18460113cd23b"
] | [
"src/perceptual_scores.py"
] | [
"#!/usr/bin/env python\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom scipy.misc import imread\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition.pca import PCA\nfrom tqdm import tqdm\n\nfrom utils import (kernel_classifier_distance_and_std_from_activations,\n load_image_names)\n\n\nclass PerceptualScores:\n EXTRACTOR_NAMES = [\"MobileNetV2\", \"ResNet50\", \"VGG16\", \"VGG19\"]\n\n def __init__(self, config):\n # pylint: disable=no-else-raise\n self._config = config\n self._real_activations = None\n if self._config.extractor_name == \"MobileNetV2\":\n from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2\n from tensorflow.keras.applications.mobilenet_v2 import preprocess_input\n model = MobileNetV2(include_top=False, weights=\"imagenet\", alpha=1.4)\n self._preprocess = preprocess_input\n raise NotImplementedError(\"Need to update blocks...\")\n elif self._config.extractor_name == \"ResNet50\":\n from tensorflow.keras.applications.resnet50 import ResNet50\n from tensorflow.keras.applications.resnet50 import preprocess_input\n model = ResNet50(include_top=False, weights=\"imagenet\")\n self._preprocess = preprocess_input\n raise NotImplementedError(\"Need to update blocks...\")\n elif self._config.extractor_name == \"VGG16\":\n from tensorflow.keras.applications.vgg16 import VGG16\n from tensorflow.keras.applications.vgg16 import preprocess_input\n model = VGG16(include_top=False, weights=\"imagenet\")\n self._preprocess = preprocess_input\n raise NotImplementedError(\"Need to update blocks...\")\n elif self._config.extractor_name == \"VGG19\":\n from tensorflow.keras.applications.vgg19 import VGG19\n from tensorflow.keras.applications.vgg19 import preprocess_input\n model = VGG19(include_top=False, weights=\"imagenet\")\n self._extractor = Model(inputs=model.input, outputs=\n [model.get_layer(\"block{}_pool\".format(i)).output for i in range(1, 6)])\n self._preprocess = preprocess_input\n else:\n raise ValueError(\"Unknown feature extractor '{}'\".format(self._config.extractor_name))\n self._pca = None\n self._high_dimensional_kmeans = None\n self._low_dimensional_kmeans = None\n\n def _get_activations_from_images(self, all_image_names):\n activations = []\n data = tf.data.Dataset.from_tensor_slices(all_image_names).batch(self._config.batch_size)\n\n tf.logging.info(\"Computing activations for {} images\".format(len(all_image_names)))\n for image_names in tqdm(data, total=len(all_image_names) // self._config.batch_size + 1):\n images = [imread(image_name.numpy().decode(\"utf-8\"), mode=\"RGB\") for image_name in image_names]\n batch = tf.cast(tf.stack(images), dtype=tf.float32)\n activations.append([tf.reduce_mean(features, axis=[1, 2]) for features in self._extractor(self._preprocess(batch))])\n return [tf.concat([act[i] for act in activations], axis=0) for i in range(len(activations[0]))]\n\n def _get_activations_from_generator(self, generator, data_set):\n activations = []\n tf.logging.debug(\"Computing activations for newly-generated samples\")\n for batch in data_set:\n samples = tf.cast(tf.cast((generator(batch)+1) * 127.5, dtype=tf.int32), dtype=tf.float32) # denormalize to normal RGB\n activations.append([tf.reduce_mean(features, axis=[1, 2]) for features in self._extractor(self._preprocess(samples))])\n return [tf.concat([act[i] for act in activations], axis=0) for i in range(len(activations[0]))]\n\n def initialize(self, override_data_dir=None):\n assert self._real_activations is None\n\n data_dir = override_data_dir if override_data_dir else \\\n (self._config.target_data_dir if self._config.target_data_dir else self._config.data_dir)\n activations_file = os.path.join(\"data\", data_dir, \"activations_{}.npz\".format(self._config.extractor_name))\n if os.path.exists(activations_file):\n tf.logging.info(\"Loading activations from {}\".format(activations_file))\n with np.load(activations_file) as activations:\n self._real_activations = [tf.convert_to_tensor(activations[f]) for f in sorted(activations.files)]\n else:\n tf.logging.warning(\"Computing activations for real images in '{}'\".format(data_dir))\n self._real_activations = self._get_activations_from_images(load_image_names(data_dir))\n tf.logging.info(\"Saving activations to {}\".format(activations_file))\n np.savez(activations_file, **{\"block_{}\".format(i): act.numpy() for i, act in enumerate(self._real_activations)})\n\n tf.logging.debug(\"Fitting PCA\")\n self._pca = PCA(n_components=2)\n low_dimensional_real_activations = self._pca.fit_transform(self._real_activations[-1])\n tf.logging.debug(\"Explained variance: {} ({:.5f})\".format(\n self._pca.explained_variance_ratio_, np.sum(self._pca.explained_variance_ratio_)))\n\n high_dimensional_clusters = 7\n tf.logging.debug(\"Clustering high-dimensional activations with {} clusters\".format(high_dimensional_clusters))\n self._high_dimensional_kmeans = KMeans(n_clusters=high_dimensional_clusters)\n self._high_dimensional_kmeans.fit(self._real_activations[-1])\n tf.logging.debug(\"Inertia: {:.1f}\".format(self._high_dimensional_kmeans.inertia_))\n\n low_dimensional_clusters = 4\n tf.logging.debug(\"Clustering low-dimensional activations with {} clusters\".format(low_dimensional_clusters))\n self._low_dimensional_kmeans = KMeans(n_clusters=low_dimensional_clusters)\n self._low_dimensional_kmeans.fit(low_dimensional_real_activations)\n tf.logging.debug(\"Inertia: {:.1f}\".format(self._low_dimensional_kmeans.inertia_))\n\n def _compute_scores_from_activations(self, generated_activations):\n fid = tf.contrib.gan.eval.frechet_classifier_distance_from_activations(self._real_activations[-1], generated_activations[-1])\n mmd, _ = kernel_classifier_distance_and_std_from_activations(self._real_activations[-1], generated_activations[-1])\n low_level_fids = [\n tf.contrib.gan.eval.frechet_classifier_distance_from_activations(self._real_activations[i], generated_activations[i]) \\\n for i in range(len(self._real_activations)-1)]\n combined_fid = tf.contrib.gan.eval.frechet_classifier_distance_from_activations(\n tf.concat(self._real_activations, axis=-1), tf.concat(generated_activations, axis=-1))\n\n # high_dimensional_cluster_distances = tf.reduce_min(self._high_dimensional_kmeans.transform(generated_activations), axis=-1)\n # low_dimensional_cluster_distances = tf.reduce_min(self._low_dimensional_kmeans.transform(self._pca.transform(generated_activations)), axis=-1)\n # mean_std = lambda d: (tf.reduce_mean(d), tf.convert_to_tensor(np.std(d)))\n # return fid, k_mmd, mean_std(high_dimensional_cluster_distances), mean_std(low_dimensional_cluster_distances)\n\n return fid, mmd, -self._high_dimensional_kmeans.score(generated_activations[-1]), \\\n -self._low_dimensional_kmeans.score(self._pca.transform(generated_activations[-1])), low_level_fids, combined_fid\n\n def compute_scores_from_samples(self):\n assert os.path.exists(self._config.samples_dir)\n all_image_names = [os.path.join(self._config.samples_dir, sample) for sample in \\\n sorted(os.listdir(self._config.samples_dir)) if sample.endswith(\".png\")]\n\n activations_file = os.path.join(self._config.samples_dir, \"activations_{}.npz\".format(self._config.extractor_name))\n if os.path.exists(activations_file):\n tf.logging.info(\"Loading activations from {}\".format(activations_file))\n generated_activations = tf.convert_to_tensor(np.load(activations_file))\n else:\n tf.logging.warning(\"Computing activations for generated images in '{}'\".format(self._config.samples_dir))\n generated_activations = self._get_activations_from_images(all_image_names)\n tf.logging.info(\"Saving activations to {}\".format(activations_file))\n np.savez(activations_file, **{\"block_{}\".format(i): act.numpy() for i, act in enumerate(self._real_activations)})\n\n tf.logging.info(\"Computing scores\")\n return self._compute_scores_from_activations(generated_activations)\n\n def compute_scores_from_generator(self, generator, data_set):\n generated_activations = self._get_activations_from_generator(generator, data_set)\n\n tf.logging.debug(\"Computing scores\")\n return self._compute_scores_from_activations(generated_activations)\n"
] | [
[
"tensorflow.keras.applications.vgg19.VGG19",
"numpy.load",
"numpy.sum",
"tensorflow.stack",
"tensorflow.logging.info",
"sklearn.decomposition.pca.PCA",
"tensorflow.reduce_mean",
"tensorflow.keras.applications.resnet50.ResNet50",
"tensorflow.keras.applications.vgg16.VGG16",
"sklearn.cluster.KMeans",
"tensorflow.convert_to_tensor",
"tensorflow.keras.applications.mobilenet_v2.MobileNetV2",
"tensorflow.concat",
"tensorflow.logging.debug",
"tensorflow.contrib.gan.eval.frechet_classifier_distance_from_activations",
"tensorflow.data.Dataset.from_tensor_slices"
]
] |
cloudspectatordevelopment/cudamat | [
"d26cf019a7855077b7d4344ae1a3202a156c5170"
] | [
"test/test_cudamat.py"
] | [
"import numpy as np\nimport nose\nimport cudamat as cm\n\ndef setup():\n cm.cublas_init()\n\ndef teardown():\n cm.cublas_shutdown()\n\ndef test_reshape():\n m = 256\n n = 1\n cm1 = np.array(np.random.rand(n, m)*10, dtype=np.float32, order='F')\n cm2 = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n gm1 = cm.CUDAMatrix(cm1)\n gm2 = cm.CUDAMatrix(cm2)\n\n gm1.reshape((m, n))\n gm2.assign(gm1)\n gm1.reshape((n, m))\n\n gm1.copy_to_host()\n gm2.copy_to_host()\n\n assert np.max(np.abs(gm1.numpy_array - gm2.numpy_array.T)) < 10**-2, \"Error in CUDAMatrix.reshape exceeded threshold\"\n\ndef test_T_field():\n m = 256\n n = 128\n cm1 = np.array(np.random.rand(n, m)*10, dtype=np.float32, order='F')\n cm2 = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n gm1 = cm.CUDAMatrix(cm1)\n gm2 = cm.CUDAMatrix(cm2)\n\n # test dot\n gm = cm.dot(gm2.T, gm1.T)\n c = np.dot(cm2.T, cm1.T)\n gm.copy_to_host()\n\n assert np.max(np.abs(gm.numpy_array - c)) < 10**-2, \"Error in CUDAMatrix.dot with TransposedCUDAMatrix exceeded threshold\"\n\n # test add_dot\n cm3 = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n gm3 = cm.CUDAMatrix(cm3)\n gm3.add_dot(gm2.T, gm1.T)\n c = cm3 + np.dot(cm2.T, cm1.T)\n gm3.copy_to_host()\n\n assert np.max(np.abs(gm3.numpy_array - c)) < 10**-2, \"Error in CUDAMatrix.add_dot TransposedCUDAMatrix exceeded threshold\"\n\n # test add_sums\n gm2.add_sums(gm1.T, axis = 1)\n c = cm2 + np.atleast_2d(cm1.sum(0)).T\n gm2.copy_to_host()\n\n assert np.max(np.abs(gm2.numpy_array - c)) < 10**-2, \"Error in CUDAMatrix.add_sums TransposedCUDAMatrix exceeded threshold\"\n\ndef test_assign():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n\n m1.assign(m2)\n m1.copy_to_host()\n\n assert np.max(np.abs(m1.numpy_array - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.assign exceeded threshold\"\n\ndef test_assign_scalar():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n m1 = cm.CUDAMatrix(a)\n\n m1.assign(np.pi)\n m1.copy_to_host()\n\n assert np.max(np.abs(m1.numpy_array - np.pi)) < 10**-4, \"Error in CUDAMatrix.assign_scalar exceeded threshold\"\n\ndef test_get_row_slice():\n m = 256\n n = 128\n start = 11\n end = 54\n\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(end-start, n)*10, dtype=np.float32, order='F')\n\n c = np.array(a[start:end,:], order='F')\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.get_row_slice(start, end, target = m2)\n m3 = m1.get_row_slice(start, end)\n m2.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.get_row_slice exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.get_row_slice exceeded threshold\"\n\ndef test_set_row_slice():\n m = 256\n n = 128\n start = 11\n end = 54\n\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(end-start, n)*10, dtype=np.float32, order='F')\n\n c = a.copy()\n c[start:end,:] = b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.set_row_slice(start, end, m2)\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.set_row_slice exceeded threshold\"\n\ndef test_transpose():\n m = 6\n n = 128\n\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(n, m), dtype=np.float32, order='F')\n\n c = a.copy().T\n\n m = cm.CUDAMatrix(a)\n mt1 = cm.CUDAMatrix(b)\n m.transpose(target = mt1)\n mt2 = m.transpose()\n\n mt1.copy_to_host()\n mt2.copy_to_host()\n\n assert np.max(np.abs(c - mt1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.transpose exceeded threshold\"\n assert np.max(np.abs(c - mt2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.transpose exceeded threshold\"\n\ndef test_slice():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = np.array(a[:,32:64], order='F')\n\n m1 = cm.CUDAMatrix(a)\n m2 = m1.slice(32, 64)\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.slice exceeded threshold\"\n\n\ndef test_add_col_vec():\n m = 250\n n = 120\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a + b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.add_col_vec(m2, target = m3)\n m1.add_col_vec(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_col_vec exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_col_vec exceeded threshold\"\n\ndef test_add_col_mult():\n m = 256\n n = 128\n mult = np.pi\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a + mult * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.add_col_mult(m2, mult, target = m3)\n m1.add_col_mult(m2, mult)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_col_mult exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_col_mult exceeded threshold\"\n\ndef test_add_row_vec():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a + b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.add_row_vec(m2, target = m3)\n m1.add_row_vec(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_row_vec exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_row_vec exceeded threshold\"\n\ndef test_mult_by_col():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.mult_by_col(m2, target = m3)\n m1.mult_by_col(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult_by_col exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult_by_col exceeded threshold\"\n\ndef test_mult_by_row():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.mult_by_row(m2, target = m3)\n m1.mult_by_row(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult_by_row exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult_by_row exceeded threshold\"\n\ndef test_div_by_col():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F') + 0.1\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a / b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.div_by_col(m2, target = m3)\n m1.div_by_col(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div_by_col exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div_by_col exceeded threshold\"\n\ndef test_div_by_row():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F') + 0.1\n t = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = a / b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.div_by_row(m2, target = m3)\n m1.div_by_row(m2)\n m1.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div_by_row exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div_by_row exceeded threshold\"\n\ndef test_sum():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n\n mult = 0.8\n c1 = np.atleast_2d(a.sum(0)) * mult\n c2 = np.atleast_2d(a.sum(1)).T\n\n m = cm.CUDAMatrix(a)\n mt1 = cm.CUDAMatrix(t1)\n mt2 = cm.CUDAMatrix(t2)\n\n m.sum(axis = 0, target = mt1, mult = mult)\n mt1r = m.sum(axis = 0, mult = mult)\n\n m.sum(axis = 1, target = mt2)\n mt2r = m.sum(axis = 1)\n\n mt1.copy_to_host()\n mt1r.copy_to_host()\n mt2.copy_to_host()\n mt2r.copy_to_host()\n\n assert np.max(np.abs(c1 - mt1.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c1 - mt1r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n\ndef test_sum_trans():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.rand(1, m)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.rand(n, 1)*10, dtype=np.float32, order='F')\n\n c1 = np.atleast_2d(a.T.sum(0))\n c2 = np.atleast_2d(a.T.sum(1)).T\n\n m = cm.CUDAMatrix(a)\n m.set_trans(True)\n mt1 = cm.CUDAMatrix(t1)\n mt2 = cm.CUDAMatrix(t2)\n\n m.sum(axis = 0, target = mt1)\n mt1r = m.sum(axis = 0)\n\n m.sum(axis = 1, target = mt2)\n mt2r = m.sum(axis = 1)\n\n mt1.copy_to_host()\n mt1r.copy_to_host()\n mt2.copy_to_host()\n mt2r.copy_to_host()\n\n assert np.max(np.abs(c1 - mt1.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c1 - mt1r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n\ndef test_mean():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n\n c1 = np.atleast_2d(a.mean(0))\n c2 = np.atleast_2d(a.mean(1)).T\n\n m = cm.CUDAMatrix(a)\n mt1 = cm.CUDAMatrix(t1)\n mt2 = cm.CUDAMatrix(t2)\n\n m.mean(axis = 0, target = mt1)\n mt1r = m.mean(axis = 0)\n\n m.mean(axis = 1, target = mt2)\n mt2r = m.mean(axis = 1)\n\n mt1.copy_to_host()\n mt1r.copy_to_host()\n mt2.copy_to_host()\n mt2r.copy_to_host()\n\n assert np.max(np.abs(c1 - mt1.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c1 - mt1r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n assert np.max(np.abs(c2 - mt2r.numpy_array)) < 10**-3, \"Error in CUDAMatrix.sum exceeded threshold\"\n\ndef test_add_sums():\n m = 256\n n = 128\n\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n\n mult = np.pi\n beta = 0.7\n\n c1 = beta * t1 + mult * np.atleast_2d(a.sum(1)).T\n c2 = t2 + np.atleast_2d(a.sum(0))\n\n m = cm.CUDAMatrix(a)\n mt1 = cm.CUDAMatrix(t1)\n mt2 = cm.CUDAMatrix(t2)\n\n mt1.add_sums(m, axis = 1, mult = np.pi, beta = beta)\n mt2.add_sums(m, axis = 0)\n\n mt1.copy_to_host()\n mt2.copy_to_host()\n\n assert np.max(np.abs(c1 - mt1.numpy_array)) < 10**-3, \"Error in CUDAMatrix.add_sums exceeded threshold\"\n assert np.max(np.abs(c2 - mt2.numpy_array)) < 10**-3, \"Error in CUDAMatrix.add_sums exceeded threshold\"\n\n\ndef test_less_than():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n v = 0.1\n\n r1 = 1 * (a < b)\n r2 = 1 * (a < v)\n\n da = cm.CUDAMatrix(a)\n db = cm.CUDAMatrix(b)\n dt1 = cm.CUDAMatrix(t1)\n dt2 = cm.CUDAMatrix(t2)\n\n da.less_than(db, target = dt1)\n da.less_than(v, target = dt2)\n da.less_than(db)\n\n da.copy_to_host()\n dt1.copy_to_host()\n dt2.copy_to_host()\n\n assert np.max(np.abs(r1 - da.numpy_array)) < 10**-4, \"Error in CUDAMatrix.less_than exceeded threshold\"\n assert np.max(np.abs(r1 - dt1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.less_than exceeded threshold\"\n assert np.max(np.abs(r2 - dt2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.less_than exceeded threshold\"\n\ndef test_greater_than():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n v = 0.1\n\n r1 = 1 * (a > b)\n r2 = 1 * (a > v)\n\n da = cm.CUDAMatrix(a)\n db = cm.CUDAMatrix(b)\n dt1 = cm.CUDAMatrix(t1)\n dt2 = cm.CUDAMatrix(t2)\n\n da.greater_than(db, target = dt1)\n da.greater_than(v, target = dt2)\n da.greater_than(db)\n\n da.copy_to_host()\n dt1.copy_to_host()\n dt2.copy_to_host()\n\n assert np.max(np.abs(r1 - da.numpy_array)) < 10**-4, \"Error in CUDAMatrix.greater_than exceeded threshold\"\n assert np.max(np.abs(r1 - dt1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.greater_than exceeded threshold\"\n assert np.max(np.abs(r2 - dt2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.greater_than exceeded threshold\"\n\ndef test_minimum():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n v = 0.1\n\n r1 = np.minimum(a, b)\n r2 = np.minimum(a, v)\n\n da = cm.CUDAMatrix(a)\n db = cm.CUDAMatrix(b)\n dt1 = cm.CUDAMatrix(t1)\n dt2 = cm.CUDAMatrix(t2)\n\n da.minimum(db, target = dt1)\n da.minimum(v, target = dt2)\n da.minimum(db)\n\n da.copy_to_host()\n dt1.copy_to_host()\n dt2.copy_to_host()\n\n assert np.max(np.abs(r1 - da.numpy_array)) < 10**-4, \"Error in CUDAMatrix.minimum exceeded threshold\"\n assert np.max(np.abs(r1 - dt1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.minimum exceeded threshold\"\n assert np.max(np.abs(r2 - dt2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.minimum exceeded threshold\"\n\ndef test_maximum():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t2 = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n v = 0.1\n\n r1 = np.maximum(a, b)\n r2 = np.maximum(a, v)\n\n da = cm.CUDAMatrix(a)\n db = cm.CUDAMatrix(b)\n dt1 = cm.CUDAMatrix(t1)\n dt2 = cm.CUDAMatrix(t2)\n\n da.maximum(db, target = dt1)\n da.maximum(v, target = dt2)\n da.maximum(db)\n\n da.copy_to_host()\n dt1.copy_to_host()\n dt2.copy_to_host()\n\n assert np.max(np.abs(r1 - da.numpy_array)) < 10**-4, \"Error in CUDAMatrix.maximum exceeded threshold\"\n assert np.max(np.abs(r1 - dt1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.maximum exceeded threshold\"\n assert np.max(np.abs(r2 - dt2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.maximum exceeded threshold\"\n\ndef test_minmax():\n m = 256\n n = 128\n for op in 'min', 'max', 'argmin', 'argmax':\n for sign in (1, -1):\n a = np.array(np.random.randn(m, n)*10*sign, dtype=np.float32, order='F')\n t0 = np.array(np.random.rand(1, n)*10, dtype=np.float32, order='F')\n t1 = np.array(np.random.rand(m, 1)*10, dtype=np.float32, order='F')\n\n r0 = np.atleast_2d(getattr(a, op)(0))\n r1 = np.atleast_2d(getattr(a, op)(1))\n\n da = cm.CUDAMatrix(a)\n dr10 = cm.CUDAMatrix(t0)\n dr11 = cm.CUDAMatrix(t1)\n\n getattr(da, op)(axis = 0, target = dr10)\n getattr(da, op)(axis = 1, target = dr11)\n dr20 = getattr(da, op)(axis = 0)\n dr21 = getattr(da, op)(axis = 1)\n\n dr10.copy_to_host()\n dr11.copy_to_host()\n dr20.copy_to_host()\n dr21.copy_to_host()\n\n assert np.max(np.abs(r0 - dr10.numpy_array)) < 10**-4, \"Error in CUDAMatrix.%s exceeded threshold\" % op\n assert np.max(np.abs(r1 - dr11.numpy_array.T)) < 10**-4, \"Error in CUDAMatrix.%s exceeded threshold\" % op\n assert np.max(np.abs(r0 - dr20.numpy_array)) < 10**-4, \"Error in CUDAMatrix.%s exceeded threshold\" % op\n assert np.max(np.abs(r1 - dr21.numpy_array.T)) < 10**-4, \"Error in CUDAMatrix.%s exceeded threshold\" % op\n\ndef test_sign():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n a[0,0] = 0.\n a[0,1] = -0.\n t = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n c = np.sign(a)\n\n m1 = cm.CUDAMatrix(a)\n m3 = cm.CUDAMatrix(t)\n\n m2 = m1.sign()\n m1.sign(target = m3)\n\n m2.copy_to_host()\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.sign exceeded threshold\"\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.sign exceeded threshold\"\n\ndef test_sigmoid():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n c = 1. / (1. + np.exp(-a))\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.apply_sigmoid(target = m2)\n m1.apply_sigmoid()\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_sigmoid exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_sigmoid exceeded threshold\"\n\ndef test_tanh():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n c = np.tanh(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.apply_tanh(target = m2)\n m1.apply_tanh()\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_tanh exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_tanh exceeded threshold\"\n\ndef test_soft_threshold():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n alpha = 0.5\n c = np.sign(a) * np.maximum(0, np.abs(a) - alpha)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.apply_soft_threshold(alpha, target = m2)\n m1.apply_soft_threshold(alpha)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_soft_threshold exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.apply_soft_threshold exceeded threshold\"\n\ndef test_log():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10+0.1, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n)*10+0.1, dtype=np.float32, order='F')\n\n c = np.log(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.log(m1, target = m2)\n cm.log(m1)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in cudamat.log exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in cudamat.log exceeded threshold\"\n\ndef test_exp():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n\n c = np.exp(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.exp(m1, target = m2)\n cm.exp(m1)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in cudamat.exp exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in cudamat.exp exceeded threshold\"\n\ndef test_gamma():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*5, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n)*5, dtype=np.float32, order='F')\n\n from scipy.special import gamma\n c = gamma(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.gamma(m1, target = m2)\n cm.gamma(m1)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-2, \"Error in cudamat.gamma exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-2, \"Error in cudamat.gamma exceeded threshold\"\n\ndef test_lgamma():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n from scipy.special import gammaln\n c = gammaln(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.lgamma(m1, target = m2)\n cm.lgamma(m1)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-2, \"Error in cudamat.lgamma exceeded threshold \" + str(np.max(np.abs(c - m1.numpy_array)))\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-2, \"Error in cudamat.lgamma exceeded threshold\"\n\ndef test_sqrt():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*20, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n), dtype=np.float32, order='F')\n\n c = np.sqrt(a)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.sqrt(m1, target = m2)\n cm.sqrt(m1)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in cudamat.sqrt exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in cudamat.sqrt exceeded threshold\"\n\ndef test_pow():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*20, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n), dtype=np.float32, order='F')\n p = 2\n\n c = a**p\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n cm.pow(m1, p, target = m2)\n cm.pow(m1, p)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-3, \"Error in cudamat.pow exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-3, \"Error in cudamat.pow exceeded threshold\"\n\ndef test_pow_matrix():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*20, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n), dtype=np.float32, order='F')\n p = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n\n\n c = a**p\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n mp = cm.CUDAMatrix(p)\n cm.pow(m1, mp, target = m2)\n cm.pow(m1, mp)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-2, \"Error in cudamat.pow exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-2, \"Error in cudamat.pow exceeded threshold\"\n\ndef test_reciprocal():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10+10**-3, dtype=np.float32, order='F')\n b = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n c = 1. / a\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.reciprocal(target = m2)\n m1.reciprocal()\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.reciprocal exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.reciprocal exceeded threshold\"\n\ndef test_add_mult():\n m = 256\n n = 128\n alpha = np.pi\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n c = a + np.pi * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.add_mult(m2, np.pi)\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_mult exceeded threshold\"\n\ndef test_subtract_mult():\n m = 256\n n = 128\n alpha = np.pi\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n c = a - np.pi * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.subtract_mult(m2, np.pi)\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.subtract_mult exceeded threshold\"\n\ndef test_add():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(1.+np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a + b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.add(m2, target = m3)\n m1.add(m2)\n\n m3.copy_to_host()\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add exceeded threshold\"\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add exceeded threshold\"\n\ndef test_subtract():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(1.+np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a - b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.subtract(m2, target = m3)\n m1.subtract(m2)\n\n m3.copy_to_host()\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.subtract exceeded threshold\"\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.subtract exceeded threshold\"\n\ndef test_divide():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(1.+np.random.rand(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a / b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.divide(m2, target = m3)\n m1.divide(m2)\n\n m3.copy_to_host()\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div exceeded threshold\"\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.div exceeded threshold\"\n\ndef test_mult():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a * b\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(t)\n\n m1.mult(m2, target = m3)\n m1.mult(m2)\n\n m3.copy_to_host()\n m1.copy_to_host()\n\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-4, \"Error in CUDAMatrix.multiply exceeded threshold\"\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.multiply exceeded threshold\"\n\ndef test_scalar_mult():\n m = 256\n n = 128\n alpha = np.pi\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a * alpha\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(t)\n\n m1.mult(alpha, target = m2)\n m1.mult(alpha)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.mult exceeded threshold\"\n\ndef test_scalar_div():\n m = 256\n n = 128\n alpha = np.pi\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a / alpha\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(t)\n\n m1.divide(alpha, target = m2)\n m1.divide(alpha)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.divide exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.divide exceeded threshold\"\n\ndef test_add_scalar():\n m = 256\n n = 128\n alpha = np.pi\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n t = np.array(np.empty((m, n)), dtype=np.float32, order='F')\n\n c = a + alpha\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(t)\n\n m1.add(alpha, target = m2)\n m1.add(alpha)\n\n m1.copy_to_host()\n m2.copy_to_host()\n\n assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_scalar exceeded threshold\"\n assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, \"Error in CUDAMatrix.add_scalar exceeded threshold\"\n\ndef test_dot():\n m = 128\n k = 256\n n = 64\n a = np.array(np.random.randn(m, k)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(k, n)*10, dtype=np.float32, order='F')\n c = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n alpha = 2.\n beta = 0.3\n r = beta * c + alpha * np.dot(a, b)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(c)\n m3 = cm.dot(m1, m2, target = m3, alpha = alpha, beta = beta)\n m3.copy_to_host()\n\n assert np.max(np.abs(r - m3.numpy_array)) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\ndef test_dot_trans():\n m = 128\n k = 256\n n = 64\n a = np.array(np.random.randn(k, m)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(k, n)*10, dtype=np.float32, order='F')\n\n c = np.dot(a.T, b)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m1.set_trans(True);\n m3 = cm.dot(m1, m2)\n m3.copy_to_host()\n\n assert np.max(np.abs(c - m3.numpy_array)) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\ndef test_dot_vect():\n m = 128\n k = 256\n n = 1\n a = np.array(np.random.randn(m, k)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(k, n)*10, dtype=np.float32, order='F')\n A = cm.CUDAMatrix(a)\n B = cm.CUDAMatrix(b)\n\n c = np.dot(a, b)\n C = cm.dot(A, B)\n assert np.max(np.abs(c - C.asarray())) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\n c = np.dot(a.T, b[:m])\n C = cm.dot(A.T, B.slice(0, m))\n assert np.max(np.abs(c - C.asarray())) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\n c = np.dot(b.T, a.T)\n C = cm.dot(B.T, A.T)\n assert np.max(np.abs(c - C.asarray())) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\n c = np.dot(b[:m].T, a)\n C = cm.dot(B.slice(0, m).reshape((1, m)), A)\n assert np.max(np.abs(c - C.asarray())) < 10**-2, \"Error in CUDAMatrix.dot exceeded threshold\"\n\ndef test_add_dot():\n m = 128\n k = 256\n n = 64\n a = np.array(np.random.randn(m, k)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(k, n)*10, dtype=np.float32, order='F')\n c = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n mult = 2.1\n beta = 0.8\n res = beta * c + mult * np.dot(a, b)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(c)\n m3.add_dot(m1, m2, mult = mult, beta = beta)\n\n m3.copy_to_host()\n\n assert np.max(np.abs(res - m3.numpy_array)) < 10**-2, \"Error in CUDAMatrix.add_dot exceeded threshold\"\n\ndef test_vdot():\n m = 64\n n = 64\n a = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n b = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n\n true_res = np.vdot(a, b)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n\n res = cm.vdot(m1, m2)\n\n assert np.abs(res - true_res) < 10**-2, \"Error in CUDAMatrix.vdot exceeded threshold\"\n\ndef test_subtract_dot():\n m = 128\n k = 256\n n = 64\n a = np.array(np.random.randn(m, k)*10, dtype=np.float32, order='F')\n b = np.array(np.random.randn(k, n)*10, dtype=np.float32, order='F')\n c = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n\n res = c - np.dot(a, b)\n\n m1 = cm.CUDAMatrix(a)\n m2 = cm.CUDAMatrix(b)\n m3 = cm.CUDAMatrix(c)\n m3.subtract_dot(m1, m2)\n\n m3.copy_to_host()\n\n assert np.max(np.abs(res - m3.numpy_array)) < 10**-2, \"Error in CUDAMatrix.subtract_dot exceeded threshold\"\n\ndef test_random():\n cm.CUDAMatrix.init_random(1)\n m1 = cm.CUDAMatrix(np.array(np.empty((128,256)), dtype=np.float32, order='F'))\n m2 = cm.CUDAMatrix(np.array(np.empty((128,256)), dtype=np.float32, order='F'))\n\n m1.fill_with_rand()\n m1.copy_to_host()\n m2.fill_with_randn()\n m2.copy_to_host()\n\n assert np.abs(np.mean(m1.numpy_array) - 0.5) < 10**-2, \"Error in CUDAMatrix.fill_with_rand threshold\"\n assert np.abs(np.mean(m2.numpy_array)) < 10**-2, \"Error in CUDAMatrix.fill_with_randn threshold\"\n\ndef test_euclid_norm():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n m = cm.CUDAMatrix(a)\n\n n1 = np.sqrt(np.sum(a**2))\n n2 = m.euclid_norm()\n\n assert np.abs(n1-n2) < 10**-2, \"Error in CUDAMatrix.euclid_norm exceeded threshold\"\n\ndef test_manhattan_norm():\n m = 256\n n = 128\n a = np.array(np.random.rand(m, n)*10, dtype=np.float32, order='F')\n\n m = cm.CUDAMatrix(a)\n\n n1 = np.sum(np.abs(a), dtype=np.double)\n n2 = m.manhattan_norm()\n\n assert np.abs(n1-n2) < 2e-2, \"Error in CUDAMatrix.manhattan_norm exceeded threshold (%f != %f)\" % (n1, n2)\n\ndef test_allfinite():\n a = cm.empty((10, 20)).assign(1).divide(0) # NaN\n b = cm.empty((10, 20)).assign(1e20).mult(1e20) # Inf\n c = cm.empty((10, 20)).assign(1) # 1.0\n\n assert (not a.allfinite()) and (not b.allfinite()) and c.allfinite(), \"CUDAMatrix.allfinite does not work\"\n\ndef test_select_columns():\n m = 256\n n = 128\n k = 8\n\n s = np.array(np.random.randn(m, n), dtype=np.float32, order='F')\n i_l = [0, 1, 2, 3, 5, 10, 12, 20]\n i = np.array(i_l).T[np.newaxis, :]\n t = np.empty((m, k))\n\n s_d = cm.CUDAMatrix(s)\n i_d = cm.CUDAMatrix(i)\n t_d = cm.CUDAMatrix(t)\n\n s_d.select_columns(i_d, t_d)\n res = s[:,i_l]\n\n assert np.max(np.abs(res - t_d.asarray())) < 10**-4, \"Error in CUDAMatrix.select_columns exceeded threshold\"\n\n\ndef test_where():\n m = 256\n n = 128\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n z = np.zeros_like(a)\n res = np.where(a > 0, a, z);\n\n a_d = cm.CUDAMatrix(a)\n z_d = cm.CUDAMatrix(z)\n res_d = cm.empty(a_d.shape)\n a_d.greater_than(0, res_d)\n cm.where(res_d, a_d, z_d)\n assert np.abs(res-res_d.asarray()).max() < 1e-2, \"Error in cudamat.where\"\n\n\ndef test_correlate():\n m = 64\n n = 32\n km = 17\n kn = 11\n\n a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n k = np.array(np.random.randn(km, kn)*10, dtype=np.float32, order='F')\n\n res = np.zeros_like(a)\n for i in range(len(a)):\n for j in range(len(a[0])):\n for h in range(-(km/2), km/2 + 1):\n for w in range(-(kn/2), kn/2 + 1):\n if i+h >= 0 and i+h < m and j+w >= 0 and j+w < n:\n res[i][j] += a[i + h][j + w] * k[km/2 + h][kn/2 + w]\n\n a_d = cm.CUDAMatrix(a)\n k_d = cm.CUDAMatrix(k)\n\n res_d = cm.correlate(a_d, k_d)\n assert np.abs(res-res_d.asarray()).max() < 1e-2, \"Error in cudamat.correlate\"\n\n\nif __name__ == '__main__':\n nose.runmodule()\n"
] | [
[
"numpy.sum",
"scipy.special.gamma",
"numpy.log",
"numpy.tanh",
"numpy.abs",
"numpy.random.rand",
"numpy.where",
"numpy.mean",
"numpy.minimum",
"scipy.special.gammaln",
"numpy.maximum",
"numpy.array",
"numpy.zeros_like",
"numpy.sign",
"numpy.empty",
"numpy.random.randn",
"numpy.exp",
"numpy.sqrt",
"numpy.dot",
"numpy.vdot"
]
] |
shun60s/BipedalWalkerHardcore-Weights-Choice | [
"76a3df3585a13881f1754274b8ded73a054d551d"
] | [
"train.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n-----------------------------------------------------------------------------\n Copyright 2017 David Griffis\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-----------------------------------------------------------------------------\nChanged:\n Add args.entropy, args.value\n Add CONV6_Net\n Add second environment and its worker\n Add load two basis models\n Add CONV_Choice1_Net\n \n\"\"\"\n\nfrom __future__ import division\nfrom setproctitle import setproctitle as ptitle\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom environment import create_env\nfrom utils import ensure_shared_grads\nfrom model import * # change to import any models\nfrom player_util import Agent\nfrom torch.autograd import Variable\nimport gym\n\n\ndef train(rank, args, shared_model, optimizer, shared_bm1_model, shared_bm2_model):\n ptitle('Training Agent: {}'.format(rank))\n gpu_id = args.gpu_ids[rank % len(args.gpu_ids)]\n torch.manual_seed(args.seed + rank)\n if gpu_id >= 0:\n torch.cuda.manual_seed(args.seed + rank)\n\n # add second environment\n if rank >= args.workers:\n print ('training agent of second environment', rank)\n env = create_env(args.env2, args)\n else:\n env = create_env(args.env, args)\n \n if optimizer is None:\n if args.optimizer == 'RMSprop':\n optimizer = optim.RMSprop(shared_model.parameters(), lr=args.lr)\n if args.optimizer == 'Adam':\n optimizer = optim.Adam(shared_model.parameters(), lr=args.lr)\n\n env.seed(args.seed + rank)\n player = Agent(None, env, args, None)\n player.gpu_id = gpu_id\n \n if args.model == 'CONV_Choice1':\n player.model = CONV_Choice1_Net(args.stack_frames, player.env.action_space, args.discrete_number, player.env.observation_space.shape[0]) # change\n if args.basis_model1 == 'CONV6':\n player.bm1_model = CONV6_Net(args.stack_frames, player.env.action_space, player.env.observation_space.shape[0]) # change\n if args.basis_model2 == 'CONV6':\n player.bm2_model = CONV6_Net(args.stack_frames, player.env.action_space, player.env.observation_space.shape[0]) # change\n \n \n \n player.state = player.env.reset()\n player.state = torch.from_numpy(player.state).float()\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n player.state = player.state.cuda()\n player.bm1_model = player.bm1_model.cuda()\n player.bm2_model = player.bm2_model.cuda()\n player.model = player.model.cuda()\n # \n ratio_entropy =args.entropy\n ratio_value = args.value\n \n # This is no train about two basis models\n player.bm1_model.eval() # eval()はdropoutやbatch normの on/offの切替です\n player.bm2_model.eval() # eval()はdropoutやbatch normの on/offの切替です\n player.model.train() # Sets the module in training mode.\n \n while True:\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n player.model.load_state_dict(shared_model.state_dict())\n player.bm1_model.load_state_dict(shared_bm1_model.state_dict())\n player.bm2_model.load_state_dict(shared_bm2_model.state_dict())\n else:\n player.model.load_state_dict(shared_model.state_dict())\n player.bm1_model.load_state_dict(shared_bm1_model.state_dict())\n player.bm2_model.load_state_dict(shared_bm2_model.state_dict())\n \n if player.done:\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n # use for CONV_Choice1_Net\n player.cx = Variable(torch.zeros(1, 128).cuda())\n player.hx = Variable(torch.zeros(1, 128).cuda())\n \n if args.basis_model1 == 'CONV6':\n player.bm1_cx1 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm1_hx1 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm1_cx2 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm1_hx2 = Variable(torch.zeros(\n 1,128).cuda())\n if args.basis_model2 == 'CONV6':\n player.bm2_cx1 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm2_hx1 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm2_cx2 = Variable(torch.zeros(\n 1,128).cuda())\n player.bm2_hx2 = Variable(torch.zeros(\n 1,128).cuda())\n else:\n # use for CONV_Choice1_Net\n player.cx = Variable(torch.zeros(1, 128))\n player.hx = Variable(torch.zeros(1, 128))\n \n if args.basis_model1 == 'CONV6':\n player.bm1_cx1 = Variable(torch.zeros(1, 128))\n player.bm1_hx1 = Variable(torch.zeros(1, 128))\n player.bm1_cx2 = Variable(torch.zeros(1, 128))\n player.bm1_hx2 = Variable(torch.zeros(1, 128))\n if args.basis_model2 == 'CONV6':\n player.bm2_cx1 = Variable(torch.zeros(1, 128))\n player.bm2_hx1 = Variable(torch.zeros(1, 128))\n player.bm2_cx2 = Variable(torch.zeros(1, 128))\n player.bm2_hx2 = Variable(torch.zeros(1, 128))\n else:\n # use for CONV_Choice1_Net\n player.cx = Variable(player.cx.data)\n player.hx = Variable(player.hx.data)\n if args.basis_model1 == 'CONV6':\n player.bm1_cx1 = Variable(player.bm1_cx1.data)\n player.bm1_hx1 = Variable(player.bm1_hx1.data)\n player.bm1_cx2 = Variable(player.bm1_cx2.data)\n player.bm1_hx2 = Variable(player.bm1_hx2.data)\n if args.basis_model2 == 'CONV6':\n player.bm2_cx1 = Variable(player.bm2_cx1.data)\n player.bm2_hx1 = Variable(player.bm2_hx1.data)\n player.bm2_cx2 = Variable(player.bm2_cx2.data)\n player.bm2_hx2 = Variable(player.bm2_hx2.data)\n \n \n # try args.num_steps times\n for step in range(args.num_steps):\n \n player.action_train() # call action_train\n \n if player.done:\n break\n \n \n \n if player.done:\n player.eps_len = 0\n state = player.env.reset()\n player.state = torch.from_numpy(state).float()\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n player.state = player.state.cuda()\n \n # --- add\n if args.use_discrete_model:\n if player.env.observation_space.shape[0] == 28 and args.discrete_number == 4:\n \n state_out_loss = 0\n for i in range(len(player.loss_state_out)):\n state_out_loss = state_out_loss + player.loss_state_out[i]\n \n #\n player.model.zero_grad()\n state_out_loss.backward()\n ensure_shared_grads(player.model, shared_model, gpu=gpu_id >= 0)\n optimizer.step()\n player.clear_actions()\n \n \n else: # ---\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n R = torch.zeros(1, 1).cuda()\n else:\n R = torch.zeros(1, 1)\n \n if not player.done:\n state = player.state\n \n if args.use_discrete_model:\n # --- Only compute args.model's value ---\n if args.model == 'CONV_Choice1':\n state = state.unsqueeze(0)\n # value is critic\n value, _, _ = player.model(\n (Variable(state), (player.hx, player.cx)))\n else: # continouse model\n if args.basis_model1 == 'CONV6':\n value, _, _, _ = player.model(\n (Variable(state), (player.bm1_hx1, player.bm1_cx1, player.bm1_hx2, player.bm1_cx2)))\n \n R = value.data\n \n player.values.append(Variable(R))\n policy_loss = 0\n value_loss = 0\n R = Variable(R)\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n gae = torch.zeros(1, 1).cuda()\n else:\n gae = torch.zeros(1, 1)\n \n \n for i in reversed(range(len(player.rewards))):\n R = args.gamma * R + player.rewards[i]\n advantage = R - player.values[i]\n \n # Value Loss is ...\n value_loss = value_loss + 0.5 * advantage.pow(2)\n \n # Generalized Advantage Estimataion\n # print(player.rewards[i])\n # rewards + gamma* value[i+1] - value[i]\n delta_t = player.rewards[i] + args.gamma * \\\n player.values[i + 1].data - player.values[i].data\n \n gae = gae * args.gamma * args.tau + delta_t\n \n if args.use_discrete_model:\n # Policy Loss is ....\n policy_loss = policy_loss - \\\n (player.log_probs[i].sum() * Variable(gae)) # Policy Gradient Theorem ?\n else: # continouse model\n # Policy Loss is ....\n policy_loss = policy_loss - \\\n (player.log_probs[i].sum() * Variable(gae)) - \\\n (ratio_entropy * player.entropies[i].sum())\n \n player.model.zero_grad()\n \n # --- backward ---\n (policy_loss + ratio_value * value_loss).backward()\n ensure_shared_grads(player.model, shared_model, gpu=gpu_id >= 0)\n optimizer.step()\n player.clear_actions()\n"
] | [
[
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.autograd.Variable",
"torch.from_numpy",
"torch.zeros",
"torch.cuda.device"
]
] |
shahad-bit/Disaster-Response-Pipeline | [
"76a86db14845c8d8ba8d87c81112580c96b2b0d4"
] | [
"data/process_data.py"
] | [
"import sys\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n \"\"\"Load disaster messages and categories from csv files.\n \n Arguments:\n messages_filepath {String} -- disaster message file path\n categories_filepath {String} -- disaster categories file path\n \n Returns:\n pandas dataframe -- merged disaster data\n \"\"\" \n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n df = messages.merge(categories, on='id')\n return df\n\n\n\ndef clean_data(df):\n \"\"\"Preprocess data\n \n Arguments:\n df {pandas dataframe} -- disaster data\n \"\"\" \n # create a dataframe of the 36 individual category columns\n categories = df['categories'].str.split(';', expand=True)\n\n # select the first row of the categories dataframe\n row = categories.iloc[0]\n\n # use this row to extract a list of new column names for categories.\n # one way is to apply a lambda function that takes everything \n # up to the second to last character of each string with slicing\n category_colnames = [val.split('-')[0] for val in row]\n print(category_colnames)\n\n # rename the columns of `categories`\n categories.columns = category_colnames\n\n for column in categories:\n # set each value to be the last character of the string\n categories[column] = categories[column].str[-1]\n # convert column from string to numeric\n categories[column] = categories[column].astype(int)\n\n # drop the original categories column from `df`\n df.drop(['categories'], axis=1, inplace=True)\n # concatenate the original dataframe with the new `categories` dataframe\n df = pd.concat([df, categories], axis=1)\n # drop duplicates\n df.drop_duplicates(inplace=True)\n return df\n\n\n\n\ndef save_data(df, database_filename):\n \"\"\"Store the data in mysql db.\n \n Arguments:\n df {pandas dataframe} -- disaster data\n database_filename {String} -- path to the db\n \"\"\" \n engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql('disaster_response', engine, index=False) \n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"pandas.read_csv",
"pandas.concat"
]
] |
adesgautam/objdet | [
"7154bd5035dd51de8a49b7ae59b65277a1727263"
] | [
"yolov3/yolo_detection/yolo_files/Utils/yolo3/model.py"
] | [
"\"\"\"YOLO_v3 Model Defined in Keras.\"\"\"\n\nfrom functools import wraps\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.ops import control_flow_ops\nimport keras\nfrom keras import backend as K\nfrom keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.regularizers import l2\n\nfrom ..yolo3.utils import compose\n\n\n@wraps(Conv2D)\ndef DarknetConv2D(*args, **kwargs):\n \"\"\"Wrapper to set Darknet parameters for Convolution2D.\"\"\"\n darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}\n darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'\n darknet_conv_kwargs.update(kwargs)\n return Conv2D(*args, **darknet_conv_kwargs)\n\ndef DarknetConv2D_BN_Leaky(*args, **kwargs):\n \"\"\"Darknet Convolution2D followed by BatchNormalization and LeakyReLU.\"\"\"\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1))\n\ndef resblock_body(x, num_filters, num_blocks):\n '''A series of resblocks starting with a downsampling Convolution2D'''\n # Darknet uses left and top padding instead of 'same' mode\n x = ZeroPadding2D(((1,0),(1,0)))(x)\n x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)\n for i in range(num_blocks):\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)\n x = Add()([x,y])\n return x\n\ndef darknet_body(x):\n '''Darknent body having 52 Convolution2D layers'''\n x = DarknetConv2D_BN_Leaky(32, (3,3))(x)\n x = resblock_body(x, 64, 1)\n x = resblock_body(x, 128, 2)\n x = resblock_body(x, 256, 8)\n x = resblock_body(x, 512, 8)\n x = resblock_body(x, 1024, 4)\n return x\n\ndef make_last_layers(x, num_filters, out_filters):\n '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''\n x = compose(\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D(out_filters, (1,1)))(x)\n return x, y\n\n\ndef yolo_body(inputs, num_anchors, num_classes):\n \"\"\"Create YOLO_V3 model CNN body in Keras.\"\"\"\n darknet = Model(inputs, darknet_body(inputs))\n x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))\n\n x = compose(\n DarknetConv2D_BN_Leaky(256, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[152].output])\n x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))\n\n x = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[92].output])\n x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))\n\n return Model(inputs, [y1,y2,y3])\n\ndef tiny_yolo_body(inputs, num_anchors, num_classes):\n '''Create Tiny YOLO_v3 model CNN body in keras.'''\n x1 = compose(\n DarknetConv2D_BN_Leaky(16, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(32, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(64, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(128, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(256, (3,3)))(inputs)\n x2 = compose(\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(512, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),\n DarknetConv2D_BN_Leaky(1024, (3,3)),\n DarknetConv2D_BN_Leaky(256, (1,1)))(x1)\n y1 = compose(\n DarknetConv2D_BN_Leaky(512, (3,3)),\n DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)\n\n x2 = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x2)\n y2 = compose(\n Concatenate(),\n DarknetConv2D_BN_Leaky(256, (3,3)),\n DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])\n\n return Model(inputs, [y1,y2])\n\n\ndef yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):\n \"\"\"Convert final layer features to bounding box parameters.\"\"\"\n num_anchors = len(anchors)\n # Reshape to batch, height, width, num_anchors, box_params.\n anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])\n\n grid_shape = K.shape(feats)[1:3] # height, width\n grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),\n [1, grid_shape[1], 1, 1])\n grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),\n [grid_shape[0], 1, 1, 1])\n grid = K.concatenate([grid_x, grid_y])\n grid = K.cast(grid, K.dtype(feats))\n\n feats = K.reshape(\n feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])\n\n # Adjust preditions to each spatial grid point and anchor size.\n box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))\n box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))\n box_confidence = K.sigmoid(feats[..., 4:5])\n box_class_probs = K.sigmoid(feats[..., 5:])\n\n if calc_loss == True:\n return grid, feats, box_xy, box_wh\n return box_xy, box_wh, box_confidence, box_class_probs\n\n\ndef yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):\n '''Get corrected boxes'''\n box_yx = box_xy[..., ::-1]\n box_hw = box_wh[..., ::-1]\n input_shape = K.cast(input_shape, K.dtype(box_yx))\n image_shape = K.cast(image_shape, K.dtype(box_yx))\n new_shape = K.round(image_shape * K.min(input_shape/image_shape))\n offset = (input_shape-new_shape)/2./input_shape\n scale = input_shape/new_shape\n box_yx = (box_yx - offset) * scale\n box_hw *= scale\n\n box_mins = box_yx - (box_hw / 2.)\n box_maxes = box_yx + (box_hw / 2.)\n boxes = K.concatenate([\n box_mins[..., 0:1], # y_min\n box_mins[..., 1:2], # x_min\n box_maxes[..., 0:1], # y_max\n box_maxes[..., 1:2] # x_max\n ])\n\n # Scale boxes back to original image shape.\n boxes *= K.concatenate([image_shape, image_shape])\n return boxes\n\n\ndef yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):\n '''Process Conv layer output'''\n box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,\n anchors, num_classes, input_shape)\n boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)\n boxes = K.reshape(boxes, [-1, 4])\n box_scores = box_confidence * box_class_probs\n box_scores = K.reshape(box_scores, [-1, num_classes])\n return boxes, box_scores\n\n\ndef yolo_eval(yolo_outputs,\n anchors,\n num_classes,\n image_shape,\n max_boxes=20,\n score_threshold=.6,\n iou_threshold=.5):\n \"\"\"Evaluate YOLO model on given input and return filtered boxes.\"\"\"\n num_layers = len(yolo_outputs)\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting\n input_shape = K.shape(yolo_outputs[0])[1:3] * 32\n boxes = []\n box_scores = []\n for l in range(num_layers):\n _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, image_shape)\n boxes.append(_boxes)\n box_scores.append(_box_scores)\n boxes = K.concatenate(boxes, axis=0)\n box_scores = K.concatenate(box_scores, axis=0)\n\n mask = box_scores >= score_threshold\n max_boxes_tensor = K.constant(max_boxes, dtype='int32')\n boxes_ = []\n scores_ = []\n classes_ = []\n for c in range(num_classes):\n # TODO: use keras backend instead of tf.\n class_boxes = tf.boolean_mask(boxes, mask[:, c])\n class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])\n nms_index = tf.image.non_max_suppression(\n class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)\n class_boxes = K.gather(class_boxes, nms_index)\n class_box_scores = K.gather(class_box_scores, nms_index)\n classes = K.ones_like(class_box_scores, 'int32') * c\n boxes_.append(class_boxes)\n scores_.append(class_box_scores)\n classes_.append(classes)\n boxes_ = K.concatenate(boxes_, axis=0)\n scores_ = K.concatenate(scores_, axis=0)\n classes_ = K.concatenate(classes_, axis=0)\n\n return boxes_, scores_, classes_\n\n\ndef preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):\n '''Preprocess true boxes to training input format\n\n Parameters\n ----------\n true_boxes: array, shape=(m, T, 5)\n Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.\n input_shape: array-like, hw, multiples of 32\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n\n Returns\n -------\n y_true: list of array, shape like yolo_outputs, xywh are reletive value\n\n '''\n assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'\n num_layers = len(anchors)//3 # default setting\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n\n true_boxes = np.array(true_boxes, dtype='float32')\n input_shape = np.array(input_shape, dtype='int32')\n boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2\n boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]\n true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]\n true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]\n\n m = true_boxes.shape[0]\n grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]\n y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),\n dtype='float32') for l in range(num_layers)]\n\n # Expand dim to apply broadcasting.\n anchors = np.expand_dims(anchors, 0)\n anchor_maxes = anchors / 2.\n anchor_mins = -anchor_maxes\n valid_mask = boxes_wh[..., 0]>0\n\n for b in range(m):\n # Discard zero rows.\n wh = boxes_wh[b, valid_mask[b]]\n if len(wh)==0: continue\n # Expand dim to apply broadcasting.\n wh = np.expand_dims(wh, -2)\n box_maxes = wh / 2.\n box_mins = -box_maxes\n\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n box_area = wh[..., 0] * wh[..., 1]\n anchor_area = anchors[..., 0] * anchors[..., 1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n\n # Find best anchor for each true box\n best_anchor = np.argmax(iou, axis=-1)\n\n for t, n in enumerate(best_anchor):\n for l in range(num_layers):\n if n in anchor_mask[l]:\n i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')\n j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')\n k = anchor_mask[l].index(n)\n c = true_boxes[b,t, 4].astype('int32')\n y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4]\n y_true[l][b, j, i, k, 4] = 1\n y_true[l][b, j, i, k, 5+c] = 1\n\n return y_true\n\n\ndef box_iou(b1, b2):\n '''Return iou tensor\n\n Parameters\n ----------\n b1: tensor, shape=(i1,...,iN, 4), xywh\n b2: tensor, shape=(j, 4), xywh\n\n Returns\n -------\n iou: tensor, shape=(i1,...,iN, j)\n\n '''\n\n # Expand dim to apply broadcasting.\n b1 = K.expand_dims(b1, -2)\n b1_xy = b1[..., :2]\n b1_wh = b1[..., 2:4]\n b1_wh_half = b1_wh/2.\n b1_mins = b1_xy - b1_wh_half\n b1_maxes = b1_xy + b1_wh_half\n\n # Expand dim to apply broadcasting.\n b2 = K.expand_dims(b2, 0)\n b2_xy = b2[..., :2]\n b2_wh = b2[..., 2:4]\n b2_wh_half = b2_wh/2.\n b2_mins = b2_xy - b2_wh_half\n b2_maxes = b2_xy + b2_wh_half\n\n intersect_mins = K.maximum(b1_mins, b2_mins)\n intersect_maxes = K.minimum(b1_maxes, b2_maxes)\n intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n b1_area = b1_wh[..., 0] * b1_wh[..., 1]\n b2_area = b2_wh[..., 0] * b2_wh[..., 1]\n iou = intersect_area / (b1_area + b2_area - intersect_area)\n\n return iou\n\n\ndef yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):\n '''Return yolo_loss tensor\n\n Parameters\n ----------\n yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body\n y_true: list of array, the output of preprocess_true_boxes\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n ignore_thresh: float, the iou threshold whether to ignore object confidence loss\n\n Returns\n -------\n loss: tensor, shape=(1,)\n\n '''\n num_layers = len(anchors)//3 # default setting\n yolo_outputs = args[:num_layers]\n y_true = args[num_layers:]\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))\n grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]\n loss = 0\n m = K.shape(yolo_outputs[0])[0] # batch size, tensor\n mf = K.cast(m, K.dtype(yolo_outputs[0]))\n\n for l in range(num_layers):\n object_mask = y_true[l][..., 4:5]\n true_class_probs = y_true[l][..., 5:]\n\n grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)\n pred_box = K.concatenate([pred_xy, pred_wh])\n\n # Darknet raw box to calculate loss.\n raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid\n raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])\n raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf\n box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]\n\n # Find ignore mask, iterate over each of batch.\n ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)\n object_mask_bool = K.cast(object_mask, 'bool')\n def loop_body(b, ignore_mask):\n true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])\n iou = box_iou(pred_box[b], true_box)\n best_iou = K.max(iou, axis=-1)\n ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))\n return b+1, ignore_mask\n _, ignore_mask = control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])\n ignore_mask = ignore_mask.stack()\n ignore_mask = K.expand_dims(ignore_mask, -1)\n\n # K.binary_crossentropy is helpful to avoid exp overflow.\n xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)\n wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])\n confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \\\n (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask\n class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)\n\n xy_loss = K.sum(xy_loss) / mf\n wh_loss = K.sum(wh_loss) / mf\n confidence_loss = K.sum(confidence_loss) / mf\n class_loss = K.sum(class_loss) / mf\n loss += xy_loss + wh_loss + confidence_loss + class_loss\n if print_loss:\n loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ')\n return loss\n"
] | [
[
"numpy.maximum",
"numpy.floor",
"numpy.argmax",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.image.non_max_suppression",
"numpy.expand_dims",
"tensorflow.boolean_mask",
"numpy.array",
"numpy.minimum"
]
] |
dipjyoti92/WaveRNN | [
"43c170dac7f6f27697fa4f04d44731f744c27fb4"
] | [
"gen_tacotron.py"
] | [
"import torch\nfrom models.fatchord_version import WaveRNN\nimport hparams as hp\nfrom utils.text.symbols import symbols\nfrom utils.paths import Paths\nfrom models.tacotron import Tacotron\nimport argparse\nfrom utils.text import text_to_sequence\nfrom utils.display import save_attention, simple_table\n\nif __name__ == \"__main__\" :\n\n # Parse Arguments\n parser = argparse.ArgumentParser(description='TTS Generator')\n parser.add_argument('--input_text', '-i', type=str, help='[string] Type in something here and TTS will generate it!')\n parser.add_argument('--batched', '-b', dest='batched', action='store_true', help='Fast Batched Generation')\n parser.add_argument('--unbatched', '-u', dest='batched', action='store_false', help='Slow Unbatched Generation')\n parser.add_argument('--target', '-t', type=int, help='[int] number of samples in each batch index')\n parser.add_argument('--overlap', '-o', type=int, help='[int] number of crossover samples')\n parser.add_argument('--weights_path', '-w', type=str, help='[string/path] Load in different Tacotron Weights')\n parser.add_argument('--save_attention', '-a', dest='save_attn', action='store_true', help='Save Attention Plots')\n parser.set_defaults(batched=hp.voc_gen_batched)\n parser.set_defaults(target=hp.voc_target)\n parser.set_defaults(overlap=hp.voc_overlap)\n parser.set_defaults(input_text=None)\n parser.set_defaults(weights_path=None)\n parser.set_defaults(save_attention=False)\n args = parser.parse_args()\n\n batched = args.batched\n target = args.target\n overlap = args.overlap\n input_text = args.input_text\n weights_path = args.weights_path\n save_attn = args.save_attention\n\n paths = Paths(hp.data_path, hp.voc_model_id, hp.tts_model_id)\n\n print('\\nInitialising WaveRNN Model...\\n')\n\n # Instantiate WaveRNN Model\n voc_model = WaveRNN(rnn_dims=hp.voc_rnn_dims,\n fc_dims=hp.voc_fc_dims,\n bits=hp.bits,\n pad=hp.voc_pad,\n upsample_factors=hp.voc_upsample_factors,\n feat_dims=hp.num_mels,\n compute_dims=hp.voc_compute_dims,\n res_out_dims=hp.voc_res_out_dims,\n res_blocks=hp.voc_res_blocks,\n hop_length=hp.hop_length,\n sample_rate=hp.sample_rate,\n mode=hp.voc_mode).cuda()\n\n voc_model.restore(paths.voc_latest_weights)\n\n print('\\nInitialising Tacotron Model...\\n')\n\n # Instantiate Tacotron Model\n tts_model = Tacotron(embed_dims=hp.tts_embed_dims,\n num_chars=len(symbols),\n encoder_dims=hp.tts_encoder_dims,\n decoder_dims=hp.tts_decoder_dims,\n n_mels=hp.num_mels,\n fft_bins=hp.num_mels,\n postnet_dims=hp.tts_postnet_dims,\n encoder_K=hp.tts_encoder_K,\n lstm_dims=hp.tts_lstm_dims,\n postnet_K=hp.tts_postnet_K,\n num_highways=hp.tts_num_highways,\n dropout=hp.tts_dropout).cuda()\n\n tts_restore_path = weights_path if weights_path else paths.tts_latest_weights\n tts_model.restore(tts_restore_path)\n\n if input_text :\n inputs = [text_to_sequence(input_text.strip(), hp.tts_cleaner_names)]\n else :\n with open('sentences.txt') as f :\n inputs = [text_to_sequence(l.strip(), hp.tts_cleaner_names) for l in f]\n\n voc_k = voc_model.get_step() // 1000\n tts_k = tts_model.get_step() // 1000\n\n simple_table([('WaveRNN', str(voc_k) + 'k'),\n ('Tacotron', str(tts_k) + 'k'),\n ('r', tts_model.r.item()),\n ('Generation Mode', 'Batched' if batched else 'Unbatched'),\n ('Target Samples', target if batched else 'N/A'),\n ('Overlap Samples', overlap if batched else 'N/A')])\n\n for i, x in enumerate(inputs, 1) :\n\n print(f'\\n| Generating {i}/{len(inputs)}')\n _, m, attention = tts_model.generate(x)\n\n if input_text :\n # save_path = f'{paths.tts_output}__input_{input_text[:10]}_{tts_k}k.wav'\n save_path = f'{paths.tts_output}output.wav'\n else :\n save_path = f'{paths.tts_output}{i}_batched{str(batched)}_{tts_k}k.wav'\n\n if save_attn : save_attention(attention, save_path)\n\n m = torch.tensor(m).unsqueeze(0)\n m = (m + 4) / 8\n\n voc_model.generate(m, save_path, batched, hp.voc_target, hp.voc_overlap, hp.mu_law)\n\n print('\\n\\nDone.\\n')\n"
] | [
[
"torch.tensor"
]
] |
roycezhou/Anomaly-detection-and-classification-with-deep-learning | [
"12b26f7c6f97a0a5305c653ab36b5272f94696fa"
] | [
"src/anomaly_detection/knn/predict.py"
] | [
"import sys\r\nimport numpy as np\r\nfrom itertools import product\r\nimport torchvision.transforms as transforms\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nfrom Utils.transform import *\r\nfrom Utils.pillowhelper import *\r\n\r\n\r\ndef rowcolumn2coor(row, col, patch_size):\r\n \"\"\" Map row column number to pillow image coordinates: (left, upper, right, lower)\r\n \"\"\"\r\n left = col * patch_size\r\n upper = row * patch_size\r\n right = (col + 1) * patch_size\r\n lower = (row + 1) * patch_size\r\n return (left, upper, right, lower)\r\n\r\n\r\ndef main_road_knn(image, feature_extractor, z_list, thresh, num_patch, patch_ignore, patch_size, flag_equalizer=True, img_resize=224, flag_cuda=True):\r\n z_list_test = get_latent_vector_list_test(image, feature_extractor, num_patch, patch_ignore, patch_size, flag_equalizer, img_resize, flag_cuda)\r\n detected = detect_anomaly_knn(z_list, z_list_test, thresh, num_patch, patch_ignore)\r\n return detected\r\n\r\n\r\ndef get_latent_vector_list_test(image, feature_extractor, num_patch, patch_ignore, patch_size, flag_equalizer, img_resize, flag_cuda): \r\n # Extraction\r\n z_list_patches = []\r\n for i, (row, col) in enumerate(product(range(num_patch), range(num_patch))):\r\n if patch_ignore and (row, col) in patch_ignore:\r\n print('skip {}'.format((row, col)))\r\n continue\r\n print('compute {}'.format((row, col)))\r\n tmp_coor = rowcolumn2coor(row, col, patch_size)\r\n # Apply transformer\r\n tmp_transforms = transforms.Compose([\r\n EqualizerCroppedGrey(flag_equalizer, tmp_coor, img_resize),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]),\r\n RepeatChannel(3)\r\n ])\r\n tmp_patch = tmp_transforms(image)\r\n tmp_patch = tmp_patch.unsqueeze(0)\r\n if flag_cuda:\r\n tmp_patch = tmp_patch.cuda()\r\n tmp_z = feature_extractor(tmp_patch).detach().cpu().numpy()\r\n z_list_patches.append(tmp_z)\r\n \r\n tmp_transforms = transforms.Compose([\r\n EqualizerCroppedGrey(flag_equalizer, None, img_resize),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]),\r\n RepeatChannel(3)\r\n ])\r\n\r\n tmp_patch = tmp_transforms(image)\r\n tmp_patch = tmp_patch.unsqueeze(0)\r\n if flag_cuda:\r\n tmp_patch = tmp_patch.cuda()\r\n tmp_z = feature_extractor(tmp_patch).detach().cpu().numpy()\r\n z_list_patches.append(tmp_z)\r\n return z_list_patches\r\n\r\n\r\ndef detect_anomaly_knn(z_list, z_list_test, thresh, num_patch, patch_ignore):\r\n counter = 0\r\n detected = []\r\n for i, (row, col) in enumerate(product(range(num_patch), range(num_patch))):\r\n if patch_ignore and (row, col) in patch_ignore:\r\n print('skip {}'.format((row, col)))\r\n continue\r\n print('compute {}'.format((row, col)))\r\n score = np.mean(cosine_similarity(z_list[counter], z_list_test[counter]), axis=0)\r\n if score[0] < thresh[counter]:\r\n detected.append({'index': counter, 'row': row, 'col': col, 'score': score[0]})\r\n counter+=1\r\n score = np.mean(cosine_similarity(z_list[counter], z_list_test[counter]), axis=0)\r\n if score[0] < thresh[counter]:\r\n detected.append({'index': counter, 'row': None, 'col': None, 'score': score[0]})\r\n return detected"
] | [
[
"sklearn.metrics.pairwise.cosine_similarity"
]
] |
thanhtung09t2/Hyperbox-classifier | [
"4b4cf9dfae68902bd9a742db421cacce8daf37a4"
] | [
"GFMM/agglo_onlgfmm.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 28 15:41:27 2018\n\n@author: Thanh Tung Khuat\n\nAnother method for serial combination of online learning and agglomerative learning gfmm\n\n Using Agglomerative learning to train a base model, then deploy the trained model for online learning with different training data\n \n AggloOnlineGFMM(gamma, teta_onl, teta_agglo, bthres, simil, sing, isDraw, oper, isNorm, norm_range, V_pre, W_pre, classId_pre)\n\n INPUT\n gamma Membership function slope (default: 1)\n teta_onl Maximum hyperbox size (default: 1) for online learning\n teta_agglo Maximum hyperbox size (default: 1) for agglomerative v2 learning\n bthres Similarity threshold for hyperbox concatenation (default: 0.5)\n simil Similarity measure: 'short', 'long' or 'mid' (default: 'mid')\n sing Use 'min' or 'max' (default) memberhsip in case of assymetric similarity measure (simil='mid')\n isDraw Progress plot flag (default: False)\n oper Membership calculation operation: 'min' or 'prod' (default: 'min')\n isNorm Do normalization of input training samples or not?\n norm_range New ranging of input data after normalization, for example: [0, 1]\n V_pre Hyperbox lower bounds for the model to be updated using new data\n W_pre Hyperbox upper bounds for the model to be updated using new data\n classId_pre Hyperbox class labels (crisp) for the model to be updated using new data \n \n ATTRIBUTES:\n V Hyperbox lower bounds\n W Hyperbox upper bounds\n classId Hyperbox class labels (crisp)\n cardin Hyperbox cardinalities (the number of training samples is covered by corresponding hyperboxes)\n clusters Identifiers of input objects in each hyperbox (indexes of training samples covered by corresponding hyperboxes)\n\n\"\"\"\n\nimport sys, os\nsys.path.insert(0, os.path.pardir)\n\nimport ast\nimport numpy as np\nimport time\nimport matplotlib\ntry:\n matplotlib.use('TkAgg')\nexcept:\n pass\n\nfrom functionhelper.preprocessinghelper import loadDataset, string_to_boolean, splitDatasetRndClassBasedTo2Part, splitDatasetRndTo2Part\nfrom GFMM.basebatchlearninggfmm import BaseBatchLearningGFMM\nfrom GFMM.onlinegfmm import OnlineGFMM\nfrom GFMM.accelbatchgfmm import AccelBatchGFMM\nfrom GFMM.batchgfmm_v1 import BatchGFMMV1\nfrom GFMM.batchgfmm_v2 import BatchGFMMV2\n\nclass AggloOnlineGFMM(BaseBatchLearningGFMM):\n \n def __init__(self, gamma = 1, teta_onl = 1, teta_agglo = 1, bthres = 0.5, simil = 'mid', sing = 'max', isDraw = False, oper = 'min', isNorm = False, norm_range = [0, 1], V_pre = np.array([], dtype=np.float64), W_pre = np.array([], dtype=np.float64), classId_pre = np.array([], dtype=np.int16)):\n BaseBatchLearningGFMM.__init__(self, gamma, teta_onl, isDraw, oper, isNorm, norm_range)\n \n self.teta_onl = teta_onl\n self.teta_agglo = teta_agglo\n \n self.V = V_pre\n self.W = W_pre\n self.classId = classId_pre\n \n self.bthres = bthres\n self.simil = simil\n self.sing = sing\n \n \n def fit(self, Xl_onl, Xu_onl, patClassId_onl, Xl_off, Xu_off, patClassId_off, typeOfAgglo = 1):\n \"\"\"\n The input data need to be normalized before using this function\n \n Xl_onl Input data lower bounds (rows = objects, columns = features) for online learning\n Xu_onl Input data upper bounds (rows = objects, columns = features) for online learning\n patClassId_onl Input data class labels (crisp) for online learning\n \n Xl_off Input data lower bounds (rows = objects, columns = features) for agglomerative learning\n Xu_off Input data upper bounds (rows = objects, columns = features) for agglomerative learning\n patClassId_off Input data class labels (crisp) for agglomerative learning\n \n typeOfAgglo The used type of agglomerative learning algorithms\n \"\"\"\n \n time_start = time.clock()\n \n # Perform agglomerative learning\n if typeOfAgglo == 1:\n aggloClassifier = AccelBatchGFMM(self.gamma, self.teta_agglo, bthres = self.bthres, simil = self.simil, sing = self.sing, isDraw = self.isDraw, oper = self.oper, isNorm = False)\n elif typeOfAgglo == 2:\n aggloClassifier = BatchGFMMV2(self.gamma, self.teta_agglo, bthres = self.bthres, simil = self.simil, sing = self.sing, isDraw = self.isDraw, oper = self.oper, isNorm = False)\n else:\n aggloClassifier = BatchGFMMV1(self.gamma, self.teta_agglo, bthres = self.bthres, simil = self.simil, sing = self.sing, isDraw = self.isDraw, oper = self.oper, isNorm = False)\n \n aggloClassifier.fit(Xl_off, Xu_off, patClassId_off)\n \n self.V = aggloClassifier.V\n self.W = aggloClassifier.W\n self.classId = aggloClassifier.classId\n \n # Perform online learning\n onlClassifier = OnlineGFMM(self.gamma, self.teta_onl, self.teta_onl, isDraw = self.isDraw, oper = self.oper, isNorm = False, norm_range = [self.loLim, self.hiLim], V = self.V, W = self.W, classId = self.classId)\n # training for online GFMM\n onlClassifier.fit(Xl_onl, Xu_onl, patClassId_onl)\n \n self.V = onlClassifier.V\n self.W = onlClassifier.W\n self.classId = onlClassifier.classId\n \n time_end = time.clock()\n self.elapsed_training_time = time_end - time_start\n \n return self\n \n\nif __name__ == '__main__':\n \"\"\"\n INPUT parameters from command line\n \n arg1: + 1 - training and testing datasets are located in separated files\n + 2 - training and testing datasets are located in the same files\n arg2: path to file containing the training dataset (arg1 = 1) or both training and testing datasets (arg1 = 2)\n arg3: + path to file containing the testing dataset (arg1 = 1)\n + percentage of the training dataset in the input file\n arg4: + True: drawing hyperboxes during the training process\n + False: no drawing\n arg5: + Maximum size of hyperboxes of online learning algorithm (teta_onl, default: 1)\n arg6: + Maximum size of hyperboxes of agglomerative learning algorithm (teta_agglo, default: 1)\n arg7: + gamma value (default: 1)\n arg8: + Similarity threshod (default: 0.5)\n arg9: + Similarity measure: 'short', 'long' or 'mid' (default: 'mid')\n arg10: + operation used to compute membership value: 'min' or 'prod' (default: 'min')\n arg11: + do normalization of datasets or not? True: Normilize, False: No normalize (default: True)\n arg12: + range of input values after normalization (default: [0, 1]) \n arg13: + Use 'min' or 'max' (default) memberhsip in case of assymetric similarity measure (simil='mid')\n arg14: + Type of agglomerative learning\n - 1: Accelerated agglomerative learning AGGLO-2\n - 2: Full batch learning slower version\n - 3: Full batch learning faster version\n arg15: + Percentage of online training data (default: 0.5)\n \"\"\"\n \n # Init default parameters\n if len(sys.argv) < 5:\n isDraw = False\n else:\n isDraw = string_to_boolean(sys.argv[4])\n \n if len(sys.argv) < 6:\n teta_onl = 1 \n else:\n teta_onl = float(sys.argv[5])\n \n if len(sys.argv) < 7:\n teta_agglo = 1\n else:\n teta_agglo = float(sys.argv[6])\n \n if len(sys.argv) < 8:\n gamma = 1\n else:\n gamma = float(sys.argv[7])\n \n if len(sys.argv) < 9:\n bthres = 0.5\n else:\n bthres = float(sys.argv[8])\n \n if len(sys.argv) < 10:\n simil = 'mid'\n else:\n simil = sys.argv[9]\n \n if len(sys.argv) < 11:\n oper = 'min'\n else:\n oper = sys.argv[10]\n \n if len(sys.argv) < 12:\n isNorm = True\n else:\n isNorm = string_to_boolean(sys.argv[11])\n \n if len(sys.argv) < 13:\n norm_range = [0, 1]\n else:\n norm_range = ast.literal_eval(sys.argv[12])\n \n if len(sys.argv) < 14:\n sing = 'max'\n else:\n sing = sys.argv[13]\n \n if len(sys.argv) < 15:\n typeOfAgglo = 1\n else:\n typeOfAgglo = int(sys.argv[14])\n \n if len(sys.argv) < 16:\n percentOnl = 0.5\n else:\n percentOnl = float(sys.argv[15])\n \n if sys.argv[1] == '1':\n training_file = sys.argv[2]\n testing_file = sys.argv[3]\n\n # Read training file\n Xtr, X_tmp, patClassIdTr, pat_tmp = loadDataset(training_file, 1, False)\n # Read testing file\n X_tmp, Xtest, pat_tmp, patClassIdTest = loadDataset(testing_file, 0, False)\n \n else:\n dataset_file = sys.argv[2]\n percent_Training = float(sys.argv[3])\n Xtr, Xtest, patClassIdTr, patClassIdTest = loadDataset(dataset_file, percent_Training, False)\n \n \n classifier = AggloOnlineGFMM(gamma, teta_onl, teta_agglo, bthres, simil, sing, isDraw, oper, isNorm, norm_range)\n \n Xtr_onl, Xtr_off = splitDatasetRndTo2Part(Xtr, Xtr, patClassIdTr, percentOnl)\n \n classifier.fit(Xtr_onl.lower, Xtr_onl.upper, Xtr_onl.label, Xtr_off.lower, Xtr_off.upper, Xtr_off.label)\n \n \n # Testing\n print(\"-- Testing --\")\n result = classifier.predict(Xtest, Xtest, patClassIdTest)\n if result != None:\n print(\"Number of wrong predicted samples = \", result.summis)\n numTestSample = Xtest.shape[0]\n print(\"Error Rate = \", np.round(result.summis / numTestSample * 100, 2), \"%\")\n \n"
] | [
[
"matplotlib.use",
"numpy.round",
"numpy.array"
]
] |
alcinos/dps | [
"5467db1216e9f9089376d2c71f524ced2382e4f6"
] | [
"dps/hyper/parallel_session.py"
] | [
"from __future__ import print_function\nimport os\nimport datetime\nimport subprocess\nfrom future.utils import raise_with_traceback\nimport numpy as np\nimport time\nimport progressbar\nimport shutil\nfrom collections import defaultdict\nimport sys\nimport dill\nfrom zipfile import ZipFile\nfrom contextlib import ExitStack\nimport json\n\nfrom dps import cfg\nfrom dps.parallel import ReadOnlyJob\nfrom dps.utils import (\n cd, parse_timedelta, make_symlink, ExperimentStore,\n zip_root, process_path, path_stem, redirect_stream\n)\n\n\nDEFAULT_HOST_POOL = ['ecrawf6@cs-{}.cs.mcgill.ca'.format(i) for i in range(1, 33)]\n\n\nclass ParallelSession(object):\n \"\"\" Run a Job in parallel using gnu-parallel.\n\n A directory for this job execution is created in `scratch`, and results are saved there.\n\n Parameters\n ----------\n name: str\n Name for the experiment.\n input_zip: str\n Path to a zip archive storing the Job.\n pattern: str\n Pattern to use to select which ops to run within the Job.\n scratch: str\n Path to location where the results of running the selected ops will be\n written. Must be writeable by the master process.\n local_scratch_prefix: str\n Path to scratch directory that is local to each remote host.\n ppn: int\n Number of processors per node.\n wall_time: str\n String specifying the maximum wall-time allotted to running the selected ops.\n cleanup_time: str\n String specifying the amount of cleanup time to allow per step. Affects the time-limit\n that we pass to `gnu-parallel`, as well as the time limit passed to the python script.\n slack_time: float\n String specifying the amount of slack time to allow per step. Corresponds to\n time allotted to each process to respond to the signal that the step's time is up.\n Affects the time limit that we pass to the python script.\n add_date: bool\n Whether to add current date/time to the name of the directory where results are stored.\n dry_run: bool\n If True, control script will be generated but not executed/submitted.\n parallel_exe: str\n Path to the `gnu-parallel` executable to use.\n host_pool: list of str\n A list of names of hosts to use to execute the job.\n load_avg_threshold: float\n If a host exhibits a load average greater than this, it will not be used.\n max_hosts: int\n Maximum number of hosts to use.\n env_vars: dict (str -> str)\n Dictionary mapping environment variable names to values. These will be accessible\n by the submit script, and will also be sent to the worker nodes.\n output_to_files: bool\n If True, stderr and stdout of jobs is saved in files rather than being printed to screen.\n n_retries: int\n Number of retries per job.\n gpu_set: str\n Comma-separated list of indices of gpus to use.\n copy_venv: bool\n If True, copy the virtualenv from the launching environment and use it to run the simulation.\n python_startup: bool\n If True, source script located at \"$HOME/python_startup.sh\" before running step command.\n step_time_limit: str\n String specifying time limit for each step. If not supplied, a time limit is inferred\n automatically based on wall_time and number of steps (giving each step an equal amount\n of time).\n ignore_gpu: bool\n If True, GPUs will be requested by as part of the job, but will not be used at run time.\n ssh_options: string\n String of options to pass to ssh.\n loud_output: bool\n Whether to capture stdout for the main execution command.\n\n \"\"\"\n def __init__(\n self, name, input_zip, pattern, scratch, local_scratch_prefix='/tmp/dps/hyper/', ppn=12, cpp=1,\n pmem=None, wall_time=\"1hour\", cleanup_time=\"1min\", slack_time=\"1min\", add_date=True, dry_run=0,\n parallel_exe=None, kind=\"parallel\", host_pool=None, load_avg_threshold=8., min_hosts=None,\n max_hosts=1, env_vars=None, output_to_files=True, n_retries=0, gpu_set=\"\", copy_venv=\"\",\n python_startup=False, step_time_limit=None, ignore_gpu=False, ssh_options=None, loud_output=True,\n rsync_verbosity=0):\n\n args = locals().copy()\n del args['self']\n\n print(\"\\nParallelSession args:\")\n print(args)\n\n launch_venv = os.getenv('VIRTUAL_ENV')\n if launch_venv:\n launch_venv = os.path.split(launch_venv)[1]\n\n if not parallel_exe:\n parallel_exe = \"$HOME/.local/bin/parallel\"\n\n if ssh_options is None:\n ssh_options = (\n \"-oPasswordAuthentication=no \"\n \"-oStrictHostKeyChecking=no \"\n \"-oConnectTimeout=5 \"\n \"-oServerAliveInterval=2\"\n )\n\n if kind == \"pbs\":\n local_scratch_prefix = \"\\\\$RAMDISK\"\n\n assert kind in \"parallel pbs slurm slurm-local\".split()\n hpc = kind != \"parallel\"\n\n # Create directory to run the job from - should be on scratch.\n scratch = os.path.abspath(os.path.expandvars(scratch))\n\n es = ExperimentStore(scratch, prefix=\"run_search\")\n\n job_dir = es.new_experiment(name, 0, add_date=add_date, force_fresh=1)\n job_dir.record_environment()\n\n with open(job_dir.path_for('run_kwargs.json'), 'w') as f:\n json.dump(args, f, default=str, indent=4, sort_keys=True)\n del f\n del args\n\n job_path = job_dir.path\n job_dir.make_directory('experiments')\n\n input_zip_stem = path_stem(input_zip)\n input_zip = shutil.copy(input_zip, job_dir.path_for(\"orig.zip\"))\n input_zip_abs = process_path(input_zip)\n input_zip_base = os.path.basename(input_zip)\n archive_root = zip_root(input_zip)\n\n self.copy_files(\n job_dir, input_zip, archive_root,\n [\"README.md\", \"sampled_configs.txt\", \"config.json\", \"config.pkl\"])\n\n # storage local to each node, from the perspective of that node\n local_scratch = os.path.join(local_scratch_prefix, os.path.basename(job_path))\n\n output_to_files = \"--output-to-files\" if output_to_files else \"\"\n\n env = os.environ.copy()\n\n env_vars = env_vars or {}\n\n env.update({e: str(v) for e, v in env_vars.items()})\n env_vars = ' '.join('--env ' + k for k in env_vars)\n\n rsync_verbosity = \"\" if not rsync_verbosity else \"-\" + \"v\" * rsync_verbosity\n\n ro_job = ReadOnlyJob(input_zip)\n indices_to_run = sorted([op.idx for op in ro_job.ready_incomplete_ops(sort=False)])\n del ro_job\n n_jobs_to_run = len(indices_to_run)\n if n_jobs_to_run == 0:\n print(\"All jobs are finished! Exiting.\")\n return\n\n dirty_hosts = set()\n\n if hpc:\n host_pool = []\n n_nodes = max_hosts\n n_procs = n_nodes * ppn\n n_steps = int(np.ceil(n_jobs_to_run / n_procs))\n else:\n self.__dict__.update(locals())\n\n host_pool = host_pool or DEFAULT_HOST_POOL\n if isinstance(host_pool, str):\n host_pool = host_pool.split()\n\n # Get an estimate of the number of hosts we'll have available.\n with cd(job_path):\n hosts, n_procs = self.recruit_hosts(\n hpc, min_hosts, max_hosts, host_pool,\n ppn, max_procs=np.inf)\n n_nodes = len(hosts)\n\n if n_jobs_to_run < n_procs:\n n_steps = 1\n n_nodes = int(np.ceil(n_jobs_to_run / ppn))\n n_procs = n_nodes * ppn\n hosts = hosts[:n_nodes]\n else:\n n_steps = int(np.ceil(n_jobs_to_run / n_procs))\n\n node_file = \" --sshloginfile nodefile.txt \"\n\n wall_time_seconds, total_seconds_per_step, parallel_seconds_per_step, python_seconds_per_step = \\\n self.compute_time_limits(wall_time, cleanup_time, slack_time, step_time_limit, n_steps)\n\n self.__dict__.update(locals())\n\n self.print_time_limits()\n\n def get_load_avg(self, host):\n return_code, stdout, stderr = self.ssh_execute(\"uptime\", host, robust=True)\n print(stdout)\n if return_code:\n return 1000.0, 1000.0, 1000.0\n return [float(s) for s in stdout.split(':')[-1].split(',')]\n\n def print_time_limits(self):\n print(\"\\n\" + \"~\" * 40)\n print(\"We have {wall_time_seconds} seconds to complete {n_jobs_to_run} \"\n \"sub-jobs (grouped into {n_steps} steps) using {n_procs} processors.\".format(**self.__dict__))\n print(\"Each step, we are allowing {slack_time} as slack and \"\n \"{cleanup_time} for cleanup.\".format(**self.__dict__))\n print(\"Total time per step is {total_seconds_per_step} seconds.\".format(**self.__dict__))\n print(\"Time-limit passed to parallel is {parallel_seconds_per_step} seconds.\".format(**self.__dict__))\n print(\"Time-limit passed to dps-hyper is {python_seconds_per_step} seconds.\".format(**self.__dict__))\n\n @staticmethod\n def compute_time_limits(wall_time, cleanup_time_per_step, slack_time_per_step, step_time_limit, n_steps):\n if isinstance(wall_time, str):\n wall_time = int(parse_timedelta(wall_time).total_seconds())\n assert isinstance(wall_time, int)\n assert wall_time > 0\n\n if isinstance(cleanup_time_per_step, str):\n cleanup_time_per_step = int(parse_timedelta(cleanup_time_per_step).total_seconds())\n assert isinstance(cleanup_time_per_step, int)\n assert cleanup_time_per_step > 0\n\n if isinstance(slack_time_per_step, str):\n slack_time_per_step = int(parse_timedelta(slack_time_per_step).total_seconds())\n assert isinstance(slack_time_per_step, int)\n assert slack_time_per_step > 0\n\n if step_time_limit is None:\n total_seconds_per_step = int(np.floor(wall_time / n_steps))\n else:\n if isinstance(step_time_limit, str):\n step_time_limit = int(parse_timedelta(step_time_limit).total_seconds())\n assert isinstance(step_time_limit, int)\n assert step_time_limit > 0\n\n total_seconds_per_step = step_time_limit\n\n # Subtract cleanup time and wall time.\n parallel_seconds_per_step = int(total_seconds_per_step - cleanup_time_per_step)\n python_seconds_per_step = int(\n total_seconds_per_step - cleanup_time_per_step - slack_time_per_step)\n\n assert total_seconds_per_step > 0\n assert parallel_seconds_per_step > 0\n assert python_seconds_per_step > 0\n\n return wall_time, total_seconds_per_step, parallel_seconds_per_step, python_seconds_per_step\n\n @staticmethod\n def copy_files(job_dir, input_zip, archive_root, filenames):\n # Copy files from archive\n with ZipFile(input_zip, 'r') as _input_zip:\n for filename in filenames:\n name_in_zip = os.path.join(archive_root, filename)\n text = None\n try:\n text = _input_zip.read(name_in_zip).decode()\n except Exception:\n print(\"No {} found in zip file.\".format(filename))\n\n if text is not None:\n with open(job_dir.path_for(filename), 'w') as f:\n f.write(text)\n\n def recruit_hosts(self, hpc, min_hosts, max_hosts, host_pool, ppn, max_procs):\n if not hpc and getattr(self, 'candidate_hosts', None) is None:\n print(\"Ranking hosts by suitability...\")\n candidate_hosts = {}\n for host in host_pool:\n if host is not ':':\n print(\"\\n\" + \"~\" * 40)\n print(\"Testing connection to host {}...\".format(host))\n failed, _, _ = self.ssh_execute(\"echo Connected to \\$HOSTNAME\", host, robust=True)\n if failed:\n print(\"Could not connect.\")\n continue\n\n load_avg, _, _ = self.get_load_avg(host)\n print(\"1 minute load average: {}\".format(load_avg))\n\n if load_avg < self.load_avg_threshold:\n candidate_hosts[host] = load_avg\n else:\n print(\"`load_avg` above threshold of {}, discarding host.\".format(self.load_avg_threshold))\n\n self.candidate_hosts = candidate_hosts\n\n hosts = []\n\n if hpc:\n candidate_hosts = host_pool\n else:\n candidate_hosts = sorted(self.candidate_hosts, key=self.candidate_hosts.__getitem__)\n\n for host in candidate_hosts:\n n_hosts_recruited = len(hosts)\n if n_hosts_recruited >= max_hosts:\n break\n\n if n_hosts_recruited * ppn >= max_procs:\n break\n\n print(\"\\n\" + (\"~\" * 40))\n print(\"Recruiting host {}...\".format(host))\n\n if not hpc:\n load_avg, _, _ = self.get_load_avg(host)\n print(\"Previous 1 minute load average: {}\".format(self.candidate_hosts[host]))\n print(\"Recalculated 1 minute load average: {}\".format(load_avg))\n self.candidate_hosts[host] = load_avg\n\n print(\"Preparing host...\")\n try:\n command = \"stat {local_scratch}\"\n create_local_scratch, _, _ = self.ssh_execute(command, host, robust=True, output=\"quiet\")\n\n if create_local_scratch:\n print(\"Creating local scratch directory...\")\n command = \"mkdir -p {local_scratch}\"\n self.ssh_execute(command, host, robust=False)\n self.dirty_hosts.add(host)\n\n command = \"cd {local_scratch} && stat {archive_root}\"\n missing_archive, _, _ = self.ssh_execute(command, host, robust=True, output=\"quiet\")\n\n if missing_archive:\n command = \"cd {local_scratch} && stat {input_zip_base}\"\n missing_zip, _, _ = self.ssh_execute(command, host, robust=True, output=\"quiet\")\n\n if missing_zip:\n print(\"Copying zip to local scratch...\")\n if host == ':':\n command = \"cp {input_zip_abs} {local_scratch}\".format(**self.__dict__)\n else:\n command = (\n \"rsync -a {rsync_verbosity} --timeout=300 -e \\\"ssh {ssh_options}\\\" \"\n \"{input_zip_abs} {host}:{local_scratch}\".format(host=host, **self.__dict__)\n )\n self.execute_command(command, frmt=False, robust=False)\n\n print(\"Unzipping...\")\n command = \"cd {local_scratch} && unzip -ouq {input_zip_base}\"\n self.ssh_execute(command, host, robust=False)\n\n print(\"Host successfully prepared.\")\n hosts.append(host)\n\n except subprocess.CalledProcessError as e:\n print(\"Preparation of host failed.\")\n print(\"Command output:\\n{}\".format(e.output))\n\n if min_hosts is not None and len(hosts) < min_hosts:\n raise Exception(\n \"Found only {} usable hosts, but minimum \"\n \"required hosts is {}.\".format(len(hosts), min_hosts))\n\n n_procs = ppn * len(hosts)\n\n print(\"\\nProceeding with {} usable hosts, translates into {} procs total \"\n \"(max_procs: {}, max_hosts: {}).\".format(\n len(hosts), n_procs, max_procs, max_hosts))\n\n with open('nodefile.txt', 'w') as f:\n f.write('\\n'.join(hosts))\n\n return hosts, n_procs\n\n def execute_command(\n self, command, frmt=True, shell=True, max_seconds=None,\n progress=False, robust=False, output=None):\n \"\"\" Uses `subprocess` to execute `command`. Has a few added bells and whistles.\n\n if command returns non-zero exit status:\n if robust:\n returns as normal\n else:\n raise CalledProcessError\n\n Parameters\n ----------\n command: str\n The command to execute.\n\n\n Returns\n -------\n returncode, stdout, stderr\n\n \"\"\"\n p = None\n try:\n assert isinstance(command, str)\n if frmt:\n command = command.format(**self.__dict__)\n\n if output == \"loud\":\n print(\"\\nExecuting command: \" + (\">\" * 40) + \"\\n\")\n print(command)\n\n if not shell:\n command = command.split()\n\n stdout = None if output == \"loud\" else subprocess.PIPE\n stderr = None if output == \"loud\" else subprocess.PIPE\n\n start = time.time()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n p = subprocess.Popen(command, shell=shell, universal_newlines=True,\n stdout=stdout, stderr=stderr)\n\n progress_bar = None\n if progress:\n widgets = ['[', progressbar.Timer(), '] ',\n '(', progressbar.ETA(), ') ',\n progressbar.Bar()]\n _max_value = max_seconds or progressbar.UnknownLength\n progress_bar = progressbar.ProgressBar(\n widgets=widgets, max_value=_max_value, redirect_stdout=True)\n\n interval_length = 1\n while True:\n try:\n p.wait(interval_length)\n except subprocess.TimeoutExpired:\n if progress_bar is not None:\n progress_bar.update(min(int(time.time() - start), max_seconds))\n\n if p.returncode is not None:\n break\n\n if progress_bar is not None:\n progress_bar.finish()\n\n if output == \"loud\":\n print(\"\\nCommand took {} seconds.\\n\".format(time.time() - start))\n\n _stdout = \"\" if p.stdout is None else p.stdout.read()\n _stderr = \"\" if p.stderr is None else p.stderr.read()\n\n if p.returncode != 0:\n if isinstance(command, list):\n command = ' '.join(command)\n\n print(\"The following command returned with non-zero exit code \"\n \"{}:\\n {}\".format(p.returncode, command))\n\n if output is None or (output == \"quiet\" and not robust):\n print(\"\\n\" + \"-\" * 20 + \" stdout \" + \"-\" * 20 + \"\\n\")\n print(_stdout)\n\n print(\"\\n\" + \"-\" * 20 + \" stderr \" + \"-\" * 20 + \"\\n\")\n print(_stderr)\n\n if robust:\n return p.returncode, _stdout, _stderr\n else:\n raise subprocess.CalledProcessError(p.returncode, command, _stdout, _stderr)\n\n return p.returncode, _stdout, _stderr\n\n except BaseException as e:\n if p is not None:\n p.terminate()\n p.kill()\n if progress_bar is not None:\n progress_bar.finish()\n raise_with_traceback(e)\n\n def ssh_execute(self, command, host, **kwargs):\n if host == \":\":\n cmd = command\n else:\n cmd = \"ssh {ssh_options} -T {host} \\\"{command}\\\"\".format(\n ssh_options=self.ssh_options, host=host, command=command)\n return self.execute_command(cmd, **kwargs)\n\n def _step(self, i, indices_for_step):\n if not indices_for_step:\n print(\"No jobs left to run on step {}.\".format(i))\n return\n\n _ignore_gpu = \"--ignore-gpu\" if self.ignore_gpu else \"\"\n\n indices = ' '.join(str(i) for i in indices_for_step)\n\n if \"slurm\" in self.kind:\n parallel_command = (\n \"cd {local_scratch} && \"\n \"dps-hyper run {archive_root} {pattern} {indices} --max-time {python_seconds_per_step} \"\n \"--log-root {local_scratch} --env-name experiments --gpu-set={gpu_set} --ppn={ppn} \"\n \"{_ignore_gpu} {output_to_files}\"\n )\n\n bind = \"--accel-bind=g\" if self.gpu_set else \"\"\n mem = \"--mem-per-cpu={}mb\".format(self.pmem) if self.pmem else \"\"\n\n command = ('timeout --signal=INT {parallel_seconds_per_step} srun --cpus-per-task {cpp} --ntasks {n_tasks} {bind} '\n '{mem} --no-kill --quit-on-interrupt sh -c \"{parallel_command}\"'.format(\n parallel_seconds_per_step=self.parallel_seconds_per_step,\n cpp=self.cpp,\n n_tasks=len(indices_for_step),\n bind=bind,\n mem=mem,\n parallel_command=parallel_command))\n else:\n workon = \"workon {launch_venv} && \" if (self.copy_venv and self.launch_venv) else \"\"\n python_startup = \"source \\$HOME/python_startup.sh && \" if self.python_startup else \"\"\n parallel_command = (\n python_startup +\n workon +\n \"cd {local_scratch} && \"\n \"dps-hyper run {archive_root} {pattern} {{}} --max-time {python_seconds_per_step} \"\n \"--log-root {local_scratch} --env-name experiments \"\n \"--idx-in-node={{%}} --gpu-set={gpu_set} --ppn={ppn} {_ignore_gpu} {output_to_files}\"\n )\n\n command = (\n '{parallel_exe} --timeout {parallel_seconds_per_step} --no-notice -j{ppn} \\\\\\n'\n ' --joblog {job_path}/job_log.txt {node_file} \\\\\\n'\n ' {env_vars} -v \\\\\\n'\n # ' --env PATH --env LD_LIBRARY_PATH {env_vars} -v \\\\\\n'\n ' \"' + parallel_command + '\" \\\\\\n'\n ' ::: {indices}'\n )\n\n command = command.format(\n indices=indices, _ignore_gpu=_ignore_gpu, **self.__dict__)\n\n self.execute_command(\n command, frmt=False, robust=True,\n max_seconds=self.parallel_seconds_per_step, progress=not self.hpc,\n output='loud' if self.loud_output else None)\n\n def _checkpoint(self, i):\n print(\"Fetching results of step {} at: \".format(i))\n print(datetime.datetime.now())\n\n for i, host in enumerate(self.hosts):\n if host == ':':\n command = \"mv {local_scratch}/experiments/* ./experiments\"\n self.execute_command(command, robust=True)\n\n command = \"rm -rf {local_scratch}/experiments\"\n self.execute_command(command, robust=True)\n\n command = \"cp -ru {local_scratch}/{archive_root} .\"\n self.execute_command(command, robust=True)\n else:\n command = (\n \"rsync -az {rsync_verbosity} --timeout=300 -e \\\"ssh {ssh_options}\\\" \"\n \"{host}:{local_scratch}/experiments/ ./experiments\".format(\n host=host, **self.__dict__)\n )\n self.execute_command(command, frmt=False, robust=True, output=\"loud\")\n\n command = \"rm -rf {local_scratch}/experiments\"\n self.ssh_execute(command, host, robust=True, output=\"loud\")\n\n command = (\n \"rsync -az {rsync_verbosity} --timeout=300 -e \\\"ssh {ssh_options}\\\" \"\n \"{host}:{local_scratch}/{archive_root} .\".format(\n host=host, **self.__dict__)\n )\n self.execute_command(command, frmt=False, robust=True, output=\"loud\")\n\n self.execute_command(\"zip -rq results {archive_root}\", robust=True)\n\n try:\n from dps.hyper import HyperSearch\n search = HyperSearch('.')\n with redirect_stream('stdout', 'results.txt', tee=False):\n search.print_summary(print_config=False, verbose=False)\n print(search.job.summary(verbose=False))\n except Exception:\n job_path = 'results.zip' if os.path.exists('results.zip') else 'orig.zip'\n assert os.path.exists(job_path)\n job = ReadOnlyJob(job_path)\n print(job.summary(verbose=False))\n\n def get_slurm_var(self, var_name):\n parallel_command = \"printenv | grep {}\".format(var_name)\n command = 'srun --ntasks 1 --no-kill sh -c \"{parallel_command}\"'.format(parallel_command=parallel_command)\n returncode, stdout, stderr = self.execute_command(command, frmt=False, robust=False, progress=False)\n split = stdout.split('=')\n\n if len(split) != 2:\n raise Exception(\n \"Unparseable output while getting SLURM environment \"\n \"variable {}: {}\".format(var_name, stdout))\n\n _var_name, value = split\n _var_name = _var_name.strip()\n value = value.strip()\n\n if _var_name != var_name:\n raise Exception(\n \"Got wrong variable. Wanted {}, got {} with value {}\".format(var_name, _var_name, value))\n return value\n\n def run(self):\n with ExitStack() as stack:\n if not self.hpc:\n stack.enter_context(redirect_stream('stdout', self.job_dir.path_for('stdout'), tee=True))\n stack.enter_context(redirect_stream('stderr', self.job_dir.path_for('stderr'), tee=True))\n\n self._run()\n\n def _run(self):\n if self.dry_run:\n print(\"Dry run, so not running.\")\n return\n\n if \"slurm\" in self.kind:\n # Have to jump through a hoop to get the proper node-local storage on cedar/graham.\n self.local_scratch_prefix = self.get_slurm_var(\"SLURM_TMPDIR\")\n self.local_scratch = os.path.join(\n self.local_scratch_prefix,\n os.path.basename(self.job_path))\n\n # Compute new time limits based on the actual time remaining (protect against e.g. job starting late)\n\n print(\"Time limits before adjustment:\")\n self.print_time_limits()\n\n job_id = os.getenv(\"SLURM_JOBID\")\n command = 'squeue -h -j {} -o \"%L\"'.format(job_id)\n returncode, stdout, stderr = self.execute_command(command, frmt=False, robust=False)\n days = 0\n if \"-\" in stdout:\n days, time = stdout.split(\"-\")\n days = int(days)\n else:\n time = stdout\n\n time = time.split(\":\")\n\n hours = int(time[-3]) if len(time) > 2 else 0\n minutes = int(time[-2]) if len(time) > 1 else 0\n seconds = int(time[-1])\n\n wall_time_delta = datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)\n wall_time_seconds = int(wall_time_delta.total_seconds())\n\n print(\"Actual remaining walltime: {}\".format(wall_time_delta))\n print(\"Time limits after adjustment:\")\n\n (self.wall_time_seconds, self.total_seconds_per_step,\n self.parallel_seconds_per_step, self.python_seconds_per_step) = \\\n self.compute_time_limits(\n wall_time_seconds, self.cleanup_time, self.slack_time, self.step_time_limit, self.n_steps)\n\n self.print_time_limits()\n\n with cd(self.job_path):\n print(\"\\n\" + (\"=\" * 80))\n job_start = datetime.datetime.now()\n print(\"Starting job at {}\".format(job_start))\n\n job = ReadOnlyJob(self.input_zip)\n subjobs_remaining = sorted([op.idx for op in job.ready_incomplete_ops(sort=False)])\n\n n_failures = defaultdict(int)\n dead_jobs = set()\n\n i = 0\n while subjobs_remaining:\n step_start = datetime.datetime.now()\n\n print(\"\\nStarting step {} at: \".format(i) + \"=\" * 90)\n print(\"{} ({} since start of job)\".format(step_start, step_start - job_start))\n\n if not self.host_pool:\n if self.kind == \"pbs\":\n with open(os.path.expandvars(\"$PBS_NODEFILE\"), 'r') as f:\n self.host_pool = list(set([s.strip() for s in iter(f.readline, '')]))\n print(self.host_pool)\n elif \"slurm\" in self.kind:\n p = subprocess.run(\n 'scontrol show hostnames $SLURM_JOB_NODELIST', stdout=subprocess.PIPE, shell=True)\n self.host_pool = list(set([host.strip() for host in p.stdout.decode().split('\\n') if host]))\n else:\n raise Exception(\"NotImplemented\")\n\n self.hosts, self.n_procs = self.recruit_hosts(\n self.hpc, self.min_hosts, self.max_hosts, self.host_pool,\n self.ppn, max_procs=len(subjobs_remaining))\n\n indices_for_step = subjobs_remaining[:self.n_procs]\n self._step(i, indices_for_step)\n self._checkpoint(i)\n\n job = ReadOnlyJob(self.archive_root)\n\n subjobs_remaining = set([op.idx for op in job.ready_incomplete_ops(sort=False)])\n\n for j in indices_for_step:\n if j in subjobs_remaining:\n n_failures[j] += 1\n if n_failures[j] > self.n_retries:\n print(\"All {} attempts at completing job with index {} have failed, \"\n \"permanently removing it from set of eligible jobs.\".format(n_failures[j], j))\n dead_jobs.add(j)\n\n subjobs_remaining = [idx for idx in subjobs_remaining if idx not in dead_jobs]\n subjobs_remaining = sorted(subjobs_remaining)\n\n i += 1\n\n print(\"Step duration: {}.\".format(datetime.datetime.now() - step_start))\n\n self.execute_command(\"rm -rf {archive_root}\", robust=True)\n\n print(\"Cleaning up dirty hosts...\")\n command = \"rm -rf {local_scratch}\"\n for host in self.dirty_hosts:\n print(\"Cleaning host {}...\".format(host))\n self.ssh_execute(command, host, robust=True)\n\n\ndef submit_job(\n archive_path, name, wall_time=\"1year\", ppn=1, cpp=1, pmem=0,\n queue=\"\", kind=\"local\", gpu_set=\"\", project=\"rpp-bengioy\", **run_kwargs):\n\n assert kind in \"pbs slurm slurm-local parallel\".split()\n\n if \"slurm\" in kind and not pmem:\n raise Exception(\"Must supply a value for pmem (per-process-memory in mb) when using SLURM\")\n\n run_kwargs.update(\n wall_time=wall_time, ppn=ppn, cpp=cpp, kind=kind,\n gpu_set=gpu_set, pmem=pmem)\n\n run_kwargs['env_vars'] = dict(TF_CPP_MIN_LOG_LEVEL=3, CUDA_VISIBLE_DEVICES='-1')\n run_kwargs['dry_run'] = False\n\n session = ParallelSession(\n name, archive_path, 'map', cfg.parallel_experiments_run_dir, **run_kwargs)\n\n job_path = session.job_path\n\n # Not strictly required if kind == \"parallel\", but do it anyway for completeness.\n with open(os.path.join(job_path, \"session.pkl\"), 'wb') as f:\n dill.dump(session, f, protocol=dill.HIGHEST_PROTOCOL, recurse=True)\n\n if kind in \"parallel slurm-local\".split():\n session.run()\n return session\n\n python_script = \"\"\"#!{}\nimport datetime\nstart = datetime.datetime.now()\nprint(\"Starting job at \" + str(start))\nimport dill\nwith open(\"./session.pkl\", \"rb\") as f:\n session = dill.load(f)\nsession.run()\nend = datetime.datetime.now()\nprint(\"Finishing job at \" + str(end))\nprint(str((end - start).total_seconds()) + \" seconds elapsed between start and finish.\")\n\n\"\"\".format(sys.executable)\n with open(os.path.join(job_path, \"run.py\"), 'w') as f:\n f.write(python_script)\n\n if kind == \"pbs\":\n resources = \"nodes={}:ppn={},walltime={}\".format(session.n_nodes, session.ppn, session.wall_time_seconds)\n if pmem:\n resources = \"{},pmem={}mb\".format(resources, pmem)\n\n email = \"[email protected]\"\n if queue:\n queue = \"-q \" + queue\n command = (\n \"qsub -N {name} -d {job_path} -w {job_path} -m abe -M {email} \"\n \"-A {project} {queue} -V -l {resources} \"\n \"-j oe output.txt run.py\".format(\n name=name, job_path=job_path, email=email, project=project,\n queue=queue, resources=resources\n )\n )\n\n elif kind == \"slurm\":\n wall_time_minutes = int(np.ceil(session.wall_time_seconds / 60))\n resources = \"--nodes={} --ntasks-per-node={} --cpus-per-task={} --time={}\".format(\n session.n_nodes, session.ppn, cpp, wall_time_minutes)\n\n if pmem:\n resources = \"{} --mem-per-cpu={}mb\".format(resources, pmem)\n\n if gpu_set:\n n_gpus = len([int(i) for i in gpu_set.split(',')])\n resources = \"{} --gres=gpu:{}\".format(resources, n_gpus)\n\n email = \"[email protected]\"\n if queue:\n queue = \"-p \" + queue\n command = (\n \"sbatch --job-name {name} -D {job_path} --mail-type=ALL [email protected] \"\n \"-A {project} {queue} --export=ALL {resources} \"\n \"-o stdout -e stderr run.py\".format(\n name=name, job_path=job_path, email=email, project=project,\n queue=queue, resources=resources\n )\n )\n\n else:\n raise Exception()\n\n print(\"\\n\" + \"~\" * 40)\n print(command)\n\n with cd(job_path):\n subprocess.run(command.split())\n return session\n"
] | [
[
"numpy.floor",
"numpy.ceil"
]
] |
jiduque/scikit-fda | [
"5ea71e78854801b259aa3a01eb6b154aa63bf54b"
] | [
"tests/test_classification.py"
] | [
"\"\"\"Tests of classification methods.\"\"\"\n\nimport unittest\n\nimport numpy as np\nfrom sklearn.base import clone\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier as _KNeighborsClassifier\n\nfrom skfda.datasets import fetch_growth\nfrom skfda.misc.metrics import l2_distance\nfrom skfda.ml.classification import (\n DDClassifier,\n DDGClassifier,\n DTMClassifier,\n KNeighborsClassifier,\n MaximumDepthClassifier,\n NearestCentroid,\n RadiusNeighborsClassifier,\n)\nfrom skfda.ml.classification._depth_classifiers import _ArgMaxClassifier\nfrom skfda.representation import FData\n\n\nclass TestClassifiers(unittest.TestCase):\n \"\"\"Tests for classifiers.\"\"\"\n\n def setUp(self) -> None:\n \"\"\"Establish train and test data sets.\"\"\"\n X, y = fetch_growth(return_X_y=True)\n X_train, X_test, y_train, y_test = train_test_split(\n X,\n y,\n test_size=0.25,\n stratify=y,\n random_state=0,\n )\n self._X_train = X_train\n self._X_test = X_test\n self._y_train = y_train\n self._y_test = y_test\n\n def test_dtm_independent_copy(self) -> None:\n \"\"\"Check that copies are un-linked.\"\"\"\n clf: DTMClassifier[FData] = DTMClassifier(proportiontocut=0.25)\n clf1 = clone(clf)\n clf2: DTMClassifier[FData] = DTMClassifier(proportiontocut=0.75)\n\n clf1.proportiontocut = 0.75\n clf1.fit(self._X_train, self._y_train)\n clf2.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf1.predict(self._X_test),\n clf2.predict(self._X_test),\n )\n\n def test_dtm_classifier(self) -> None:\n \"\"\"Check DTM classifier.\"\"\"\n clf: DTMClassifier[FData] = DTMClassifier(proportiontocut=0.25)\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n ],\n )\n\n def test_centroid_classifier(self) -> None:\n \"\"\"Check NearestCentroid classifier.\"\"\"\n clf: NearestCentroid[FData] = NearestCentroid()\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n ],\n )\n\n def test_dtm_inheritance(self) -> None:\n \"\"\"Check that DTM is a subclass of NearestCentroid.\"\"\"\n clf1: NearestCentroid[FData] = NearestCentroid()\n clf2: DTMClassifier[FData] = DTMClassifier(\n proportiontocut=0,\n metric=l2_distance,\n )\n clf1.fit(self._X_train, self._y_train)\n clf2.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf1.predict(self._X_test),\n clf2.predict(self._X_test),\n )\n\n def test_maximumdepth_classifier(self) -> None:\n \"\"\"Check MaximumDepth classifier.\"\"\"\n clf: MaximumDepthClassifier[FData] = MaximumDepthClassifier()\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n ],\n )\n\n def test_dd_classifier(self) -> None:\n \"\"\"Check DD classifier.\"\"\"\n clf: DDClassifier[FData] = DDClassifier(degree=2)\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n ],\n )\n\n def test_ddg_classifier(self) -> None:\n \"\"\"Check DDG classifier.\"\"\"\n clf: DDGClassifier[FData] = DDGClassifier(_KNeighborsClassifier())\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1,\n ],\n )\n\n def test_maximumdepth_inheritance(self) -> None:\n \"\"\"Check that MaximumDepth is a subclass of DDG.\"\"\"\n clf1: DDGClassifier[FData] = DDGClassifier(_ArgMaxClassifier())\n clf2: MaximumDepthClassifier[FData] = MaximumDepthClassifier()\n clf1.fit(self._X_train, self._y_train)\n clf2.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf1.predict(self._X_test),\n clf2.predict(self._X_test),\n )\n\n def test_kneighbors_classifier(self) -> None:\n \"\"\"Check KNeighbors classifier.\"\"\"\n clf = KNeighborsClassifier()\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n ],\n )\n\n def test_radiusneighbors_classifier(self) -> None:\n \"\"\"Check RadiusNeighbors classifier.\"\"\"\n clf = RadiusNeighborsClassifier(radius=15)\n clf.fit(self._X_train, self._y_train)\n\n np.testing.assert_array_equal(\n clf.predict(self._X_test),\n [ # noqa: WPS317\n 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1,\n ],\n )\n\n def test_radiusneighbors_small_raidus(self) -> None:\n \"\"\"Check that an error is raised if radius too small.\"\"\"\n clf = RadiusNeighborsClassifier(radius=1)\n clf.fit(self._X_train, self._y_train)\n\n with np.testing.assert_raises(ValueError):\n clf.predict(self._X_test)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.testing.assert_raises",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.base.clone",
"sklearn.model_selection.train_test_split"
]
] |
theglossy1/Minesweeper | [
"9c641310e82e470a4c4e74bf91239f70b9dc7caa"
] | [
"minesweeper.py"
] | [
"import math\nimport random\n\nimport numpy as np\n\nMINE_BIT = 0b01\nFLAG_BIT = 0b10\n\nEMPTY_SLOT = 0xFF\nFLAG_SLOT = 0xFE\n\nSURROUNDING = [\n (1, 0),\n (1, 1),\n (0, 1),\n (-1, 1),\n (-1, 0),\n (-1, -1),\n (0, -1),\n (1, -1),\n]\n\n\nclass Minesweeper:\n def __init__(self, *shape, seed=None):\n if len(shape) < 1:\n shape = (10, 10)\n bomb_count = 7\n else:\n shape, bomb_count = shape[:-1], shape[-1]\n if math.prod(shape) < bomb_count:\n raise ValueError('cannot be more bombs than spaces on the board')\n self.board_matrix = np.zeros(shape, 'uint16')\n self.render_matrix = np.full(shape, EMPTY_SLOT, 'uint8')\n randomizer = random.Random(seed)\n bombs = []\n while bomb_count:\n bomb = []\n for size in shape:\n bomb.append(randomizer.randrange(size))\n bomb = tuple(bomb)\n if bomb not in bombs:\n bombs.append(bomb)\n self.board_matrix[bomb] |= MINE_BIT\n bomb_count -= 1\n\n def add_flag(self, *pos):\n self.board_matrix[pos] |= FLAG_BIT\n self.render_matrix[pos] = FLAG_SLOT\n\n def remove_flag(self, *pos):\n self.board_matrix[pos] ^= FLAG_BIT\n self.render_matrix[pos] = EMPTY_SLOT\n\n def is_flagged(self, *pos):\n return FLAG_BIT & self.board_matrix[pos]\n\n def toggle_flag(self, *pos):\n if self.is_flagged(*pos):\n self.remove_flag(*pos)\n else:\n self.add_flag(*pos)\n\n def _reveal(self, pos):\n cell = self.board_matrix[pos]\n if cell & FLAG_BIT:\n return -2\n elif cell & MINE_BIT:\n return -1\n else:\n count = 0\n shape = self.board_matrix.shape\n for direction in SURROUNDING:\n # newpos = (pos[0] + direction[0], pos[1] + direction[1])\n newpos = tuple(map(sum, ((pos[x], direction[x]) for x in range(len(direction)))))\n if all(map((lambda x: x[1] >= 0 and x[1] < shape[x[0]]), enumerate(newpos))):\n count += self.board_matrix[newpos] & MINE_BIT\n return count\n\n def reveal(self, *pos):\n count = self._reveal(pos)\n if count >= 0:\n self.render_matrix[pos] = count\n return count\n\n def recursive_reveal(self, *pos, reached=None):\n if reached is None:\n reached = set()\n if pos in reached:\n return None\n count = self.reveal(*pos)\n reached.add(pos)\n if count == 0:\n shape = self.board_matrix.shape\n for direction in SURROUNDING:\n # newpos = (pos[0] + direction[0], pos[1] + direction[1])\n newpos = tuple(map(sum, ((pos[x], direction[x]) for x in range(len(direction)))))\n if all(map((lambda x: x[1] >= 0 and x[1] < shape[x[0]]), enumerate(newpos))):\n if newpos not in reached:\n self.recursive_reveal(*newpos, reached=reached)\n return count\n\n def has_won(self):\n return all((bool(cell & FLAG_BIT) == bool(cell & MINE_BIT)) for cell in np.nditer(self.board_matrix))\n\n def reveal_all(self):\n for (pos, cell) in np.ndenumerate(self.board_matrix):\n if not cell & FLAG_BIT and not cell & MINE_BIT:\n self.reveal(*pos)\n"
] | [
[
"numpy.ndenumerate",
"numpy.full",
"numpy.zeros",
"numpy.nditer"
]
] |
Horacehxw/Multi-label | [
"76095c72327e9aa379eaa653dbbb775ca638e6db"
] | [
"src/LDPC/pyldpc/ldpcmatrices.py"
] | [
"import numpy as np\nfrom scipy.sparse import csr_matrix\nfrom .ldpcalgebra import*\n\n__all__ = ['BinaryProduct', 'InCode', 'BinaryRank','RegularH','CodingMatrix','CodingMatrix_systematic','HtG']\n\n\ndef RegularH(n,d_v,d_c):\n\n \"\"\" ------------------------------------------------------------------------------\n\n Builds a regular Parity-Check Matrix H (n,d_v,d_c) following Callager's algorithm : \n\n ----------------------------------------------------------------------------------\n\n Paramaeters:\n\n n: Number of columns (Same as number of coding bits)\n d_v: number of ones per column (number of parity-check equations including a certain variable) \n d_c: number of ones per row (number of variables participating in a certain parity-check equation); \n\n ----------------------------------------------------------------------------------\n\n Errors: \n\n The number of ones in the matrix is the same no matter how we calculate it (rows or columns), therefore, if m is \n the number of rows in the matrix: \n\n m*d_c = n*d_v with m < n (because H is a decoding matrix) => Parameters must verify:\n\n\n 0 - all integer parameters\n 1 - d_v < d_v\n 2 - d_c divides n \n\n ---------------------------------------------------------------------------------------\n\n Returns: 2D-array (shape = (m,n))\n\n \"\"\"\n\n\n if n%d_c:\n raise ValueError('d_c must divide n. Help(RegularH) for more info.')\n\n if d_c <= d_v: \n raise ValueError('d_c must be greater than d_v. Help(RegularH) for more info.')\n\n m = (n*d_v)// d_c\n\n Set=np.zeros((m//d_v,n),dtype=int) \n a=m//d_v\n\n # Filling the first set with consecutive ones in each row of the set \n\n for i in range(a): \n for j in range(i*d_c,(i+1)*d_c): \n Set[i,j]=1\n\n #Create list of Sets and append the first reference set\n Sets=[]\n Sets.append(Set.tolist())\n\n #Create remaining sets by permutations of the first set's columns: \n i=1\n for i in range(1,d_v):\n newSet = np.transpose(np.random.permutation(np.transpose(Set))).tolist()\n Sets.append(newSet)\n\n #Returns concatenated list of sest:\n H = np.concatenate(Sets)\n return H\n\n\n\ndef CodingMatrix(MATRIX,use_sparse=1):\n\n \"\"\" \n CAUTION: RETURNS tG TRANSPOSED CODING MATRIX. \n \n Function Applies GaussJordan Algorithm on Columns and rows of MATRIX in order\n to permute Basis Change matrix using Matrix Equivalence.\n\n Let A be the treated Matrix. refAref the double row reduced echelon Matrix.\n\n refAref has the form:\n\n (e.g) : |1 0 0 0 0 0 ... 0 0 0 0| \n |0 1 0 0 0 0 ... 0 0 0 0| \n |0 0 0 0 0 0 ... 0 0 0 0| \n |0 0 0 1 0 0 ... 0 0 0 0| \n |0 0 0 0 0 0 ... 0 0 0 0| \n |0 0 0 0 0 0 ... 0 0 0 0| \n\n\n First, let P1 Q1 invertible matrices: P1.A.Q1 = refAref\n\n We would like to calculate:\n P,Q are the square invertible matrices of the appropriate size so that:\n\n P.A.Q = J. Where J is the matrix of the form (having MATRIX's shape):\n\n | I_p O | where p is MATRIX's rank and I_p Identity matrix of size p.\n | 0 0 |\n\n Therfore, we perform permuations of rows and columns in refAref (same changes\n are applied to Q1 in order to get final Q matrix)\n\n\n NOTE: P IS NOT RETURNED BECAUSE WE DO NOT NEED IT TO SOLVE H.G' = 0 \n P IS INVERTIBLE, WE GET SIMPLY RID OF IT. \n \n Then\n \n solves: inv(P).J.inv(Q).G' = 0 (1) where inv(P) = P^(-1) and \n P.H.Q = J. Help(PJQ) for more info.\n \n Let Y = inv(Q).G', equation becomes J.Y = 0 (2) whilst:\n \n J = | I_p O | where p is H's rank and I_p Identity matrix of size p.\n | 0 0 |\n \n Knowing that G must have full rank, a solution of (2) is Y = | 0 | Where k = n-p. \n | I-k |\n \n Because of rank-nullity theorem. \n \n -----------------\n parameters:\n \n H: Parity check matrix. \n use_sparse: (optional, default True): use scipy.sparse format to speed up calculations\n ---------------\n returns:\n \n tG: Transposed Coding Matrix. \n \n \"\"\"\n\n\n H = np.copy(MATRIX)\n m,n = H.shape\n\n if m > n: \n raise ValueError('MATRIX must have more rows than columns (a parity check matrix)')\n \n if n > 500 and use_sparse:\n sparse = 1\n \n else:\n sparse = 0\n ##### DOUBLE GAUSS-JORDAN:\n\n Href_colonnes,tQ = GaussJordan(np.transpose(H),1)\n\n Href_diag = GaussJordan(np.transpose(Href_colonnes)) \n\n Q=np.transpose(tQ)\n \n k = n - sum(Href_diag.reshape(m*n))\n\n \n Y = np.zeros(shape=(n,k)).astype(int)\n Y[n-k:,:] = np.identity(k)\n \n if sparse:\n Q = csr_matrix(Q)\n Y = csr_matrix(Y)\n\n tG = BinaryProduct(Q,Y)\n\n return tG\n \n \ndef CodingMatrix_systematic(MATRIX,use_sparse = 1):\n\n \"\"\" \n Description:\n\n Solves H.G' = 0 and finds the coding matrix G in the systematic form : [I_k A] by applying permutations on MATRIX.\n \n CAUTION: RETURNS TUPLE (Hp,tGS) WHERE Hp IS A MODIFIED VERSION OF THE GIVEN PARITY CHECK MATRIX, tGS THE TRANSPOSED \n SYSTEMATIC CODING MATRIX ASSOCIATED TO Hp. YOU MUST USE THE RETURNED TUPLE IN CODING AND DECODING, RATHER THAN THE UNCHANGED \n PARITY-CHECK MATRIX H. \n\n -------------------------------------------------\n Parameters: \n\n MATRIX: 2D-Array. Parity-check matrix.\n use_sparse: (optional, default True): use scipy.sparse matrices to speed up calculations if n>100.\n\n ------------------------------------------------\n\n >>> Returns Tuple of 2D-arrays (Hp,GS):\n Hp: Modified H: permutation of columns (The code doesn't change)\n tGS: Transposed Systematic Coding matrix associated to Hp.\n\n \"\"\"\n\n H = np.copy(MATRIX)\n m,n = H.shape\n \n if n>100 and use_sparse:\n sparse = 1\n else:\n sparse = 0 \n \n P1 = np.identity(n,dtype=int)\n \n Hrowreduced = GaussJordan(H)\n \n k = n - sum([a.any() for a in Hrowreduced ])\n\n ## After this loop, Hrowreduced will have the form H_ss : | I_(n-k) A |\n permut = np.array(list(range(n)))\n\n while(True):\n zeros = [i for i in range(min(m,n)) if not Hrowreduced[i,i]]\n if len(zeros)==0:\n \tbreak\n indice_colonne_a = min(zeros)\n list_ones = [j for j in range(indice_colonne_a+1,n) if Hrowreduced[indice_colonne_a,j] ]\n if not len(list_ones):\n break\n\n indice_colonne_b = min(list_ones)\n \n aux = np.copy(Hrowreduced[:,indice_colonne_a])\n Hrowreduced[:,indice_colonne_a] = Hrowreduced[:,indice_colonne_b]\n Hrowreduced[:,indice_colonne_b] = aux \n \n aux = np.copy(P1[:,indice_colonne_a])\n P1[:,indice_colonne_a] = P1[:,indice_colonne_b]\n P1[:,indice_colonne_b] = aux\n \n ############ NOW, Hrowreduced has the form: | I_(n-k) A | , the permutation above makes it look like : \n ########### |A I_(n-k)|\n \n P1 = P1.T\n identity = list(range(n))\n sigma = identity[n-k:]+identity[:n-k]\n \n P2 = np.zeros(shape=(n,n),dtype=int)\n P2[identity,sigma] = np.ones(n)\n \n if sparse:\n P1 = csr_matrix(P1)\n P2 = csr_matrix(P2)\n H = csr_matrix(H)\n\n P = BinaryProduct(P2,P1)\n \n if sparse:\n P = csr_matrix(P)\n \n Hp = BinaryProduct(H,np.transpose(P))\n\n GS = np.zeros((k,n),dtype=int)\n GS[:,:k] = np.identity(k)\n GS[:,k:] = np.transpose(Hrowreduced[:n-k,n-k:])\n \n \n return Hp,np.transpose(GS)\n \ndef HtG(invrate,k,systematic=True):\n\n \"\"\"\n Constructs tuple H,tG using approximate rate (k/n) and k.\n\n Parameters:\n\n - invrate= 1/rate must be > 2\n - k must be > 1 \n - systematic (Boolean optional, default = True) Construction method of transposed coding matrix tG. \n\n returns tuple: H,tG \n \"\"\"\n\n if invrate < 3:\n raise ValueError('invrate must be > 2')\n if k < 2: \n raise ValueError('k must be > 1')\n\n d_c = invrate\n d_v = invrate-1\n\n n = invrate*k - (d_c-2)*d_c\n\n H = RegularH(n,d_v,d_c)\n if systematic:\n H,tG = CodingMatrix_systematic(H)\n else:\n tG = CodingMatrix(H)\n\n return H,tG\n\n\n \n "
] | [
[
"numpy.ones",
"numpy.transpose",
"numpy.zeros",
"scipy.sparse.csr_matrix",
"numpy.copy",
"numpy.concatenate",
"numpy.identity"
]
] |
lixuekai2001/ml_for_log_data | [
"1e01c4c6c9a3ee6e20c5cfe8db44029c0aeaedd8"
] | [
"notebooks/c07_Recurrent_Neural_Networks/RNN_Depthseries.py"
] | [
"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.6.0\n# kernelspec:\n# display_name: deep_ml_curriculum\n# language: python\n# name: deep_ml_curriculum\n# ---\n\n# + [markdown] colab_type=\"text\" id=\"iALm8shtXMVK\"\n# # Recurrent Neural Networks: A Case study in well logs and LSTM's\n#\n# All the models we have discussed so far were looking at the inputs as isolated instances. But there are many cases were datapoints are not isolated instances and have connection to each other. *Sequential data* are the type of data where each instance is related to the instances came before. \n#\n# A good example for this type of data is time series data. At each point in time to the value of the time series depends on the value of the prior points. Another example is depth data, like well logs.\n#\n# Recurrent Neural Networks (RNN) are a class of networks which deal with sequential data. There are many variants of Recurrent Neural Networks, including:\n#\n# - Simple Recurrect Neural Networks (Simple RNN - or often just called RNN)\n# - Gated Recurrent Unit (GRU)\n# - **Long Short-Term Memory (LSTM)**\n#\n# In this notebook we will discuss LSTM; however, the general logic behind all these methods are the same. They only differ in the way they handle information internally. \n#\n#\n# RNN's have been used for\n# - translation\n# - drawing chinese charectors\n# - composing music\n# - timeseries\n# - depth\n# - weather\n# - many more\n# -\n\n# ## A minute of Theory\n#\n# This is a hand on course, not theory so we will look at a high level view of one type of RNN, the LSTM. But lets look at the theory for a moment, to get some broad idea of how they work\n#\n\n# The figure below is from d2l.ai and shows how an RNN can operate on a text sequence to predict the next charector.\n#\n# \n#\n#\n\n# How does the model itself work? Let look at an excerpt from the open source machine learning book [d2l.ai](d2l.ai):\n#\n# \n#\n# > The figure below illustrates the computational logic of an RNN at three adjacent time steps. At any time step `t`, the computation of the hidden state can be treated as: \n#\n# > i) concatenating the input `Xt` at the current time step `t` and the hidden state `Ht−1` at the previous time step `t−1` ; \n#\n# > ii) feeding the concatenation result into a fully-connected layer with the activation function `ϕ`. \n#\n# > The output of such a fully-connected layer is the hidden state `Ht` of the current time step t . In this case, the model parameters are the concatenation of `Wxh` and `Whh` , and a bias of `bh`. The hidden state of the current time step `t` , `Ht` , will participate in computing the hidden state `Ht+1` of the next time step t+1 . What is more, `Ht` will also be fed into the fully-connected output layer to compute the output `Ot` of the current time step `t` .\n\n# To understand more see these visualisations:\n#\n# - [distill.pub memorization in rnns](memorization-in-rnns)\n# - [Chris Olah Understanding LSTMs](https://colah.github.io/posts/2015-08-Understanding-LSTMs/)\n#\n# And see these chapters:\n#\n# - [d2l.ai RNN's](http://d2l.ai/chapter_recurrent-neural-networks/rnn.html)\n# - [d2l.ai LSTM's](http://d2l.ai/chapter_recurrent-modern/lstm.html)\n#\n\n# # Hands on example with well logs\n#\n# You can read more [here](http://d2l.ai/chapter_recurrent-neural-networks/rnn.html), but lets dive into a hand on example first and it will begin to make more sense. We will be focusing on\n#\n# - How do RNN's represent data\n# - How do we implement them in pytorch\n# - What are the key parameters and example values\n# - Where might you use them\n\n# +\nimport torch\nfrom torch import nn, optim\nfrom torch import functional as F\nfrom torch.autograd import Variable\nimport torch\nimport torch.utils.data\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm.auto import tqdm\nimport xarray as xr\n# -\n\n# # Models\n\n# ## Low level\n#\n# [LSTMCell docs](https://pytorch.org/docs/stable/generated/torch.nn.LSTMCell.html)\n#\n# Lets look at a low level implementation, and compare it to the figure we previously saw\n#\n# \n\n# +\nfrom sklearn.preprocessing import LabelEncoder\n\n# Our input text\ntext = list(\"\"\"Machine Learning. Deep\"\"\")\ne = LabelEncoder()\ninput = e.fit_transform(text)\ninput\n# -\n\n# Visualize it\npd.DataFrame(list(zip(text, input)), columns=['char', 'int']).T\n\n# +\n# We can use the low level LSTM Cell\nrnn = nn.LSTMCell(input_size=1, hidden_size=20)\n\n# Input [Sequence Length=6, BatchSize=1, input_size=1]\ninput = torch.from_numpy(input)[:, None, None].float()\n\n# Initial states (Batch size, Hidden Size)\nhx = torch.randn(1, 20) # Initial hidden\ncx = torch.randn(1, 20) # Initial cell\n\noutput = []\n# we manually call it on each part of the sequence\nfor i in range(6):\n # We manually handle states\n hx, cx = rnn(input[i], (hx, cx))\n output.append(hx)\n \n# Seqence, Batch, Hidden size\noutput = torch.stack(output)\noutput.shape\n# -\n\n# As always you can read more about an LSTMCell in the help or docs\nhelp(nn.LSTMCell)\n\n# ## High level\n#\n# Or we can use the high level API that handles it for you\n#\n# [LSTMdocs](https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html)\n\nhelp(nn.LSTM)\n\n\n# +\n\nclass LSTM(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, output_size=1):\n super(LSTM, self).__init__()\n # Params\n self.num_layers = num_layers\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.seq_length = seq_length\n \n # High level LSTM library, nn.LSTMCell is a lower level one\n self.lstm = nn.LSTM(\n input_size=input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n batch_first=True,\n )\n \n # Final layer\n self.linear = nn.Linear(hidden_size, output_size)\n\n def forward(self, x):\n seq_len = x.shape[1]\n \n output, (_, _) = self.lstm(x)\n h = output.view(-1, seq_len, self.hidden_size)[:, -1]\n \n return self.linear(h)\n\n\n# + [markdown] colab_type=\"text\" id=\"vaL6j3pkCen3\"\n# ## The well log classification problem \n#\n# In this scenario we are drilling downwards, and while well logs are reported instantly, there is a lag in facies of around 15 meter (see diagram), while they are interpreated by a petrophysicist. The problem is we would like to know the facies as soon as possible in order decide if, how, and where to drill.\n#\n# Lets apply machine learning. There are many ways to set up this problem, and geology is especially hard due to the important of context and the amount of undigitized information (much of it is in the brain of old and grizzled geologists).\n#\n# In this scenario we will apply an RNN. \n# - It will travel down the well\n# - Input are \n# - the last 200 meters of well logs \n# - and the geologist facies interpreation up to 15 meters ago\n# - The label is the facies at the point in the well\n#\n#\n# You may ask: \"Isn't it cheating? Because it knows the human labels from 15 meters above?\" \n#\n# We measure this and it gives a ~60% accuracy. So this is the naive baseline that we have to beat.\n#\n# <img src=\"images/diagram.png\" width=\"600\"/>\n#\n#\n# -\n\n# # Parameters\n\n# +\n# Params\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nshift_length = 100\nseq_length = 600\nmax_lithologies = 12\nmax_wells = 20\n\n\nprint(f'context length of {0.15*seq_length} m or {seq_length} intervals')\nprint(f'model can see human labels up to {shift_length*0.15}m above. Or {shift_length} intervals')\n# -\n\n# ## Data\n\n# + [markdown] colab_type=\"text\" id=\"kzlqXAj4EIBN\"\n# In this example we are going to look at well logs which are sequential data as well.\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 255} colab_type=\"code\" id=\"uNl846nE-jjq\" outputId=\"de7b4197-6a3f-4e88-e07e-2463adba90d0\"\n\n\nimport pandas as pd\nimport xarray as xr\nxf = xr.open_zarr(\"../../data/processed/geolink_norge_dataset/geolink_norge_well_logs.zarr\")\n\n# We will use just the 30* wells\nxf = xf.where(xf['Well'].str.startswith('30')).dropna(dim='Well', how='all')\n\ndf = xf.to_dataframe().swaplevel()\ndf['LITHOLOGY_GEOLINK'] = df['LITHOLOGY_GEOLINK'].astype('category')\ndf['Well'] = df.index.get_level_values(0).astype('category')\ndf['DEPT'] = df.index.get_level_values(1)\n\n# Keep these cols\nfeature_cols = ['CALI', 'DTC', 'GR', 'RDEP', 'RHOB',\n 'RMED', 'xc', 'yc', 'DEPT']\ndf = df.dropna(how='any', subset=feature_cols+['LITHOLOGY_GEOLINK'])\ndf = df.sort_index()\ndf\n# -\n\n\n\n#\n# <div class=\"alert alert-success\">\n# <h2>Exercise</h2>\n#\n# Discussion: Are there better ways we should set this up?\n# \n# What are the benefits?\n# \n# What information are we missing?\n# \n#\n# <details>\n# <summary><b>→ Hints</b></summary>\n#\n# There is no right answer except experimentation, but on creating this demo we found:\n# \n# * Generalising to a new well is hard, and it's important to have a similar distribution in test and train. So we took the top of some wells, and the bottom of others as training. \n# * Seeing the previous labels is important, as this encodes how the particular geologist interprets facies in this well. Which can often have some subjectivity\n# * Long context help a lot, but also slow it down. We're using the last 200 meters, but seeing the whole well helps\n# * Using all wells, instead of just the 30* wells will help it learn to generalise\n# * Using all logs may help\n# * We could do infilling instead\n# * We could make it bi-directional\n# * We could make it a sequence to sequence model, instead of sequence to 1\n# * Transformer may do better\n# * We could normalise the logs per window or well\n# * Many more\n#\n# </details>\n#\n# </div>\n\n# ## Data processing\n\n# We will stick to a group of long wells 29, 30, 31, 35 are valid groups\n# df=df[df['Well'].str.startswith('30')]\ncounts = df['Well'].value_counts()\ncounts[counts>0]\n\n\n\n# Let take the top N lithologies, replacing the rest with \"rare\"\n# print(len(df))\nremoved_labels = df[\"LITHOLOGY_GEOLINK\"].value_counts()[max_lithologies:].index\nprint(df['LITHOLOGY_GEOLINK'].value_counts())\nprint('removed_labels', removed_labels)\nl = df[\"LITHOLOGY_GEOLINK\"].values.remove_categories(removed_labels)\ndf['LITHOLOGY_GEOLINK'] = l.add_categories('rare').fillna('rare')\ndf['LITHOLOGY_GEOLINK'].value_counts()\n\n# Lets keep the top 12 lithologies, and rename the rest as \"rare\" (if any)\nremoved_labels = list(df[\"LITHOLOGY_GEOLINK\"].value_counts()[12:].keys())\ni = df[\"LITHOLOGY_GEOLINK\"].values.remove_categories(removed_labels)\ni[i.isna()]\n\n# +\n# Remove unused categories, and order\ndf['LITHOLOGY_GEOLINK'] = df['LITHOLOGY_GEOLINK'].values.remove_unused_categories()\n\n# sort categories (leads to nicer histograms)\ni = df['LITHOLOGY_GEOLINK'].values\nlitho_sorted = i.value_counts().sort_values(ascending=True).index\ndf['LITHOLOGY_GEOLINK'] = i.reorder_categories(list(litho_sorted), ordered=True)\n\ndf['LITHOLOGY_GEOLINK'].values.categories\n# -\n\ndf['LITHOLOGY_GEOLINK'].value_counts().plot.bar()\ndf['LITHOLOGY_GEOLINK'].value_counts()\n\n# Gvie each well an number, since the model needs numbers\nwell_index = df['Well'].values\nwell_int = well_index.rename_categories(range(len(well_index.categories))).astype(int)\ndf['Well_int']= well_int\ndf[['Well_int']]\n\n# Select the N longest well logs\nwells = sorted(df['Well'].unique())\nn_wells = min(len(wells), max_wells)\nselected_wells = wells[:n_wells]\ndf = df.loc[selected_wells]\n# df\n\n# Get a list of wells, ordered by frequency\nwell_counts = df['Well'].value_counts()\nwell_counts = well_counts[well_counts>0]\nwells = list(well_counts.index)\n# well_counts.plot.bar()\n1\n# well_counts\n\n# We want to see the facies N intervals above\ndf['LITH_ABV'] = df[\"LITHOLOGY_GEOLINK\"].shift(shift_length).fillna('Shaly Silt')\ndf['LITH_ABV_INT'] = df['LITH_ABV'].values.codes\ndf[['LITHOLOGY_GEOLINK', 'LITH_ABV']]\n\n# ### Split data\n#\n# There are many ways to split the data, the best way would be to split by well, but this is too hard and leads to poor results.\n#\n# We could split randomly but this is too easy, since seeing the lithology at 1000 m gives you 90% of the answer at 1010 m.\n#\n# Lets split the wells by depth, this way the model gets some idea about each well, but can't peek ahead. We will take the top of the well as training for even numbered wells, and vice versa. There is a graph below showing the outcome.\n#\n\n# +\n\nfrom functools import partial\n\ndef get_depth_thresh(x, even_bottom=True):\n \"\"\"\n On even number well codes take the bottom of the well for trainin\n \"\"\"\n if len(x)==0: return x\n \n # if the well code is even take the top \n code_is_even = (x['Well'].values.codes[0]%2)==0\n if code_is_even:\n even_bottom = not even_bottom\n \n d = x['DEPT']\n thresh = np.round(d.mean())\n x['thresh'] = thresh\n if even_bottom:\n return x[d<thresh]\n else:\n return x[d>thresh]\n\n\ndf_test = df.groupby(level=0).apply(partial(get_depth_thresh, even_bottom=False))\ndf_train = df.groupby(level=0).apply(partial(get_depth_thresh, even_bottom=True))\nprint('train', df_train.shape, 'test', df_test.shape)\nprint(f'Train {len(df_train)/len(df):.0%}, test {len(df_test)/len(df):.0%}')\n\n# +\ntrain = []\ntest = []\nfor i, well in enumerate(selected_wells):\n df_well = df.loc[well]\n df_well.name = well\n i_halfway = int(len(df_well)*0.5)\n df_top = df_well.iloc[:i_halfway]\n df_bottom = df_well.iloc[i_halfway:]\n is_even = i%2==0\n if is_even==0:\n train.append(df_top)\n test.append(df_bottom)\n else:\n train.append(df_bottom)\n test.append(df_top)\n \ndf_test = pd.concat(test).set_index(['Well', 'DEPT'], drop=False)\ndf_train = pd.concat(train).set_index(['Well', 'DEPT'], drop=False)\nprint('train', df_train.shape, 'test', df_test.shape)\nprint(f'Train {len(df_train)/len(df):.0%}, test {len(df_test)/len(df):.0%}')\n\n# +\n# Plot the data split\nwell_split = []\nfor i, well in enumerate(selected_wells):\n df_well = df.loc[well]\n i_halfway = int(len(df_well)*0.5)\n is_even = i%2==0\n well_split.append(dict(\n well=well,\n top=df_well.Depth.min(),\n half=df_well.Depth.iloc[i_halfway],\n bottom=df_well.Depth.max(),\n train_top=is_even,\n ))\n \ndf_well_split = pd.DataFrame(well_split)\n\nwell_top = df_well_split[df_well_split.train_top]\nwell_bottom = df_well_split[~df_well_split.train_top]\n\n# Do the ones where train is at top\nplt.bar(\n x=well_top.well,\n height=well_top.bottom,\n color=\"green\",\n label=\"test\"\n)\nplt.bar(\n x=well_top.well,\n height=well_top.half,\n color=\"blue\",\n label=\"train\"\n)\nplt.bar(\n x=well_top.well,\n height=well_top.top,\n color=\"white\",\n)\n\n\n# Others\nplt.bar(\n x=well_bottom.well,\n height=well_bottom.bottom,\n color=\"blue\",\n)\nplt.bar(\n x=well_bottom.well,\n height=well_bottom.half,\n color=\"green\",\n)\nplt.bar(\n x=well_bottom.well,\n height=well_bottom.top,\n color=\"white\",\n)\nplt.gca().invert_yaxis()\n\nplt.legend()\nplt.title('data split')\nplt.xticks(rotation=90)\nplt.ylabel('depth')\nplt.show()\n# -\n\n# Double check there is not overlap\na=set(df_train.index)\nb=set(df_test.index)\nassert len(a.intersection(b))==0\n\n# ### Scale\n\n# We need to process the input and target data. The input data needs to be normalised with a standard scaler, and the output data needs to be converted from text to numbers. To convert text to numbers we use `LabelEncoder` from Scikit Learn.\n\n# +\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\n\nscaler = StandardScaler()\n\n# Make a encoder, that order by frequency\nencoder = LabelEncoder()\n\n# Instead of fitting, use the same codes as the pandas.Category\nencoder.classes_ = df[\"LITHOLOGY_GEOLINK\"].values.categories\nprint(encoder.classes_)\nfeat_cols = feature_cols = ['CALI', 'DTC', 'GR', 'RDEP', 'RHOB', 'RMED', 'xc', 'yc', 'DEPT', \"LITH_ABV_INT\"]\nscaler.fit(df[feat_cols].values)\n# -\n\n# `LabelEncoder` converts each type to a value.\n\nencoder.transform([\"Shaly Silt\"])\n\n\n# ### To pytorch sequences\n\n# We will be using depth and other measurements to determine the lithology. We dealt with the same problem in the tablular data. But in tabular data we only look at the measurements at each depth to find the class, while here we can look at the variations in the measurements as well.\n\n# +\n\ndef get_sequences(df, seq_length = 10):\n \"\"\"Take moving sequences of a dataframe\"\"\"\n \n\n x = []\n y = []\n features = scaler.transform(df.loc[:, feat_cols].values)\n targets = encoder.transform(df.loc[:, \"LITHOLOGY_GEOLINK\"])\n\n # Add prev labels, as one hot, to inputs\n one_hot_targets = np.eye(len(encoder.classes_))[targets]\n prev_one_host_targets = np.roll(one_hot_targets, shift=shift_length)\n features = np.concatenate([features, prev_one_host_targets], 1)\n\n for i in range(len(targets) - seq_length):\n xi = features[i : i + seq_length, :]\n yi = targets[i + seq_length - 1]\n x.append(xi)\n y.append(yi)\n return x, y\n\n\n# -\n\ndef to_numpy(x):\n if isinstance(x, torch.Tensor):\n x = x.cpu().detach().numpy()\n return x\n\n\n# +\nx_train, y_train = get_sequences(df_train, seq_length=seq_length)\nx_test, y_test = get_sequences(df_test, seq_length=seq_length)\n\n# What's the shape or one row of data? \nprint(x_test[0].shape, y_test[0].shape)\nx_test[0], y_test[0]\n# -\n\n# The output of a classification model is a value for each type. The type with the highest value is the one the model thinks is most likely to be associated with the input data. Therefore, the output size of the model should be the number of types.\n\noutput_size = len(df[\"LITHOLOGY_GEOLINK\"].unique())\n\n# ### Distribution\n\n# It is important that we make sure the training and test set have close distribution. For instance, if there is a certain type in test data that doesn't exist in training data, the model will not be able to predict it.\n\n\n\n# +\ndef show_distribution(y, label):\n y = to_numpy(y)\n plt.hist(y, output_size * 2, alpha=0.5, label=label, density=True)\n plt.xticks(ticks=range(len(encoder.classes_)), labels=encoder.classes_, rotation=90)\n\nshow_distribution(y_train, 'train')\nshow_distribution(y_test, 'test')\nplt.legend()\nplt.show()\n# -\n\n# ## Baseline accuracy\n#\n# When you experiment with a machine learning problem it's important to use a baseline, to check if the model is actually doing any work. Sometimes you can use humans, or a prior work, but in novel problems we look at a naive answer, then aim to do better.\n#\n# Below we investigate several methods of naive estimation and try to beat the best.\n#\n# [more](https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html)\n\n# +\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.metrics import accuracy_score, f1_score\nscore_fn=accuracy_score\n\ntrue = np.array(y_test)\nfor strategy in [\"most_frequent\", \"uniform\"]:\n dummy_clf = DummyClassifier(strategy=strategy)\n dummy_clf.fit(x_train, y_train)\n score = dummy_clf.score(x_test, y_test)\n print(f\"baseline accuracy={score:2.2%} for {strategy}\")\n# -\n\n# Prev litho Baseline, this is like a model that says \"the same as the last lithology\"\npred_baseline = np.roll(true, shift=shift_length)\nscore_prev_base=score_fn(true, pred_baseline)\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values or {shift_length*0.15}m above')\n\n\n# OK so which baseline do we use? The highest is the one we need to beat\n\n# ## Train\n#\n# Note that this can be simplified with libraries like pytorch lightning or fast-ai, but they are not yet approved at many companies. So we do it manually, this also helps you see the details of the training loop.\n\nclass NumpyDataset(torch.utils.data.Dataset):\n \"\"\"Dataset wrapping arrays.\n Each sample will be retrieved by indexing array along the first dimension.\n Arguments:\n *arrays (numpy.array): arrays that have the same size of the first dimension.\n \"\"\"\n\n def __init__(self, *arrays):\n self.arrays = arrays\n\n def __getitem__(self, index):\n return tuple(array[index] for array in self.arrays)\n\n def __len__(self):\n return len(self.arrays[0])\n\n\n# +\ndef train_epoch(x_train, y_train, model, bs=128, max_epoch_iters=128*128):\n model.train()\n\n training_loss = []\n training_accuracy = []\n\n # Put data into a loader\n dset_train = NumpyDataset(x_train, y_train)\n load_train = torch.utils.data.dataloader.DataLoader(\n dset_train, \n batch_size=bs, pin_memory=True,\n shuffle=True,\n )\n\n for x, y in tqdm(load_train, leave=False, desc='train'):\n # make it a pytorch gpu variable\n x = x.float().to(device)\n y = y.long().to(device)\n\n \n optimizer.zero_grad()\n preds = model(x) # Make prediction\n loss = loss_func(preds, y) # Measure error/lopss\n \n # Backprop\n loss.backward()\n optimizer.step()\n\n # Record stats\n training_loss.append(loss.item())\n accuracy = score_fn(\n to_numpy(y), to_numpy(preds).argmax(-1)\n )\n training_accuracy.append(accuracy)\n\n return [np.mean(training_loss), np.mean(training_accuracy)]\n\ndef test_epoch(x_test, y_test, model, bs=512, max_epoch_iters=128*128):\n model.eval()\n preds = []\n true = []\n\n test_loss = []\n\n dset_test = NumpyDataset(x_test[:max_epoch_iters//4], y_test[:max_epoch_iters//4])\n load_test = torch.utils.data.dataloader.DataLoader(dset_test, batch_size=bs, pin_memory=True)\n for x, y in tqdm(load_test, leave=False, desc='test'):\n x = x.float().to(device)\n y = y.long().to(device)\n \n pred = model(x)\n loss = loss_func(pred, y)\n\n preds.append(to_numpy(pred))\n true.append(to_numpy(y))\n test_loss.append(loss.item())\n\n preds = np.concatenate(preds, 0).argmax(axis=-1)\n true = np.concatenate(true, 0)\n test_accuracy = score_fn(true, preds)\n return preds, true, np.mean(test_loss), test_accuracy\n\ndef training_loop(x_train, y_train, x_test, y_test, mode, epochs=1, bs=128, max_epoch_iters=128*128):\n all_losses = []\n all_accuracys = []\n try:\n _, _, test_loss, test_acc = test_epoch(x_test, y_test, model, max_epoch_iters=max_epoch_iters)\n print(\n f\"Start: Test Loss = {test_loss:.2f}, Test accuracy = {test_acc:.3f}\"\n )\n for epoch in tqdm(range(epochs), desc='epochs'):\n loss, acc = train_epoch(x_train, y_train, model, bs=bs, max_epoch_iters=max_epoch_iters)\n print(f\"Epoch {epoch+1}/{epochs}: Training Loss = {loss:.2f}, Train accuracy = {acc:.3f}\")\n \n _, _, test_loss, test_acc = test_epoch(x_test, y_test, model, max_epoch_iters=max_epoch_iters)\n print(\n f\"Epoch {epoch+1}/{epochs}: Test Loss = {test_loss:.2f}, Test accuracy = {test_acc:.3f}\"\n )\n print(\"-\" * 50)\n \n all_losses.append([loss, test_loss])\n all_accuracys.append([acc, test_acc])\n \n except KeyboardInterrupt:\n # This lets you stop manually. and still get the results\n pass\n\n # Visualising the results\n all_losses = np.array(all_losses)\n plt.plot(all_losses[:, 0], label=\"Training\")\n plt.plot(all_losses[:, 1], label=\"Test\")\n plt.title(\"Loss\")\n plt.legend()\n \n plt.figure()\n all_accuracys = np.array(all_accuracys)\n plt.plot(all_accuracys[:, 0], label=\"Training\")\n plt.plot(all_accuracys[:, 1], label=\"Test\")\n plt.title(\"accuracy\")\n plt.legend()\n \n return all_losses, all_accuracys\n# -\n\n\n\n# Init the model\nmodel = LSTM(\n input_size=x_train[0].shape[-1],\n hidden_size=32,\n num_layers=2,\n output_size=output_size,\n)\nmodel = model.to(device)\nmodel\n\n# Init the optimiser, and loss function\noptimizer = optim.Adam(model.parameters(), lr=0.001)\nloss_func = torch.nn.CrossEntropyLoss().to(device)\n\n# Let's train for 10 epochs\n\ntraining_loop(x_train, y_train, x_test, y_test, model, epochs=4, bs=128)\n\n# Did it overfit?\n\n# ## Test\n\npreds, true, loss, acc = test_epoch(x_test, y_test, model)\nprint('final test acc', acc)\n\n\n\n# ### Reports\n\n# This beats the baseline, so the model is doing better than the naive answer of \"the same again\". But lets break it down by lithology\n\n\n\n# +\n# pred_baseline = np.roll(true, shift=shift_length)\n# df_report = classification_report(true, pred_baseline, labels=range(len(encoder.classes_)), target_names=encoder.classes_)\n# df_report[df_report.support>0]\n# -\n\nfrom deep_ml_curriculum.classification_report import pd_classification_report\ndf_report = classification_report(true, preds, labels=range(len(encoder.classes_)), target_names=encoder.classes_)\ndf_report[df_report.support>0]\n\n\ndef confusion_matrix(true, preds):\n cm = sklearn.metrics.confusion_matrix(true, preds, labels=range(len(encoder.classes_)))\n\n plt.figure(figsize=(10, 10))\n plt.title('Confusion Matrix')\n ax=plt.gca()\n disp = sklearn.metrics.ConfusionMatrixDisplay(confusion_matrix=cm,\n display_labels=encoder.classes_)\n disp.plot(ax=ax, xticks_rotation=90)\n plt.show()\n\n\nconfusion_matrix(true, preds)\n\n# ## Plot results\n\n# Let's have a look at model's predictions.\n\n# +\nfrom deep_ml_curriculum.visualization.well_log import plot_well_pred\n\ndef plot_well(df, model, depth_min=0, depth_max=18000, well_name=\"30_6-11\", device=device):\n logs = df.loc[well_name].sort_index()\n x_test, y_test = get_sequences(logs)\n x_test = torch.Tensor(x_test)\n preds = to_numpy(model(x_test.to(device)).argmax(axis=-1))\n acc = score_fn(y_test, preds)\n df_log_results = logs.iloc[10:].copy()\n df_log_results['pred'] = pd.Categorical(encoder.inverse_transform(preds), categories=df_log_results.LITHOLOGY_GEOLINK.values.categories)\n \n # Lets zoom in on an interesting interval a:b\n plot_well_pred(f'{well_name} acc={acc:2.2f}', df_log_results.loc[depth_min:depth_max],\n facies_true=df_log_results.loc[depth_min:depth_max].LITHOLOGY_GEOLINK.values, \n facies_pred=df_log_results.loc[depth_min:depth_max].pred.values)\n plt.show()\n return df_log_results[['LITHOLOGY_GEOLINK', 'pred']]\n \nplot_well(df, model)\n# -\n\nprint(f'{n_wells} wells. {max_lithologies} lithologies')\nprint(f'context length of {0.15*seq_length} m or {seq_length} intervals')\nprint(f'model can see human labels up to {shift_length*0.15}m above. Or {shift_length} intervals')\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\n\n# The bottom half was test\nplot_well(df, model)#, depth_min=3000, depth_max=6000)\n\n# We can also zoom into a range\nplot_well(df, model, depth_min=3200, depth_max=3500)\n1\n\n\n\n# The model requires hyper parameter tuning and possibly training over 100s of epochs to reach the best results. However, in this example due to large size of dataset and the model we stopped after `10` epochs. \n#\n# There are number ways we can improve it:\n#\n# - Training for longer. Instead of stopping after 10 `epochs` go for longer. (might overfit)\n# - Increase or decrease the `hidden_size`. (might overfit)\n# - Increase the size of the sequences `seq_length` so the model get to look further in the history. (might underfit)\n# - Increase the learning rate or decrease batch size `bs` (might overfit)\n# - (advanced) Increase the size of training data by adding data from more wells to training. `max_wells` (might underfit)\n#\n# #### Exercise 2\n#\n# Try one of the options above to improve the model. (hint search for \"# CHANGE ME\", change values, then rerun notebook)\n\n# <div class=\"alert alert-success\">\n# <h2>Exercise</h2>\n#\n# Try one of the options above to improve the model. \n# \n# To help we've collected and summarised all the code below, so you can change and run the cells below\n# \n# \n# ```python\n# # Params\n# seq_length = 400 # CHANGE ME\n#\n# # Prepare data\n# x_train, y_train = get_sequences(df_train, seq_length=seq_length)\n# x_test, y_test = get_sequences(df_test, seq_length=seq_length)\n#\n# # Init the model\n# model = LSTM(\n# input_size=x_train[0].shape[-1],\n# hidden_size=64, # CHANGE ME\n# num_layers=3, # CHANGE ME\n# output_size=output_size,\n# ).to(device)\n#\n# # Init the optimiser, and loss function\n# optimizer = optim.Adam(model.parameters(), lr=0.001) # CHANGE ME\n# loss_func = torch.nn.CrossEntropyLoss().to(device)\n#\n# # Train\n# training_loop(x_train, y_train, x_test, y_test, model, epochs=10, bs=128) # Change me\n#\n# # Measure baseline\n# pred_baseline = np.roll(np.array(y_test), shift=shift_length)\n# score_prev_base=score_fn(y_test, pred_baseline)\n# print(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\n# print(f'{n_wells} wells. {max_lithologies} lithologies')\n# print(f'context length of {0.15*seq_length} m or {seq_length} intervals')\n# print(f'model can see human labels up to {shift_length*0.15}m above. Or {shift_length} intervals')\n# print(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\n#\n# # Test\n# preds, true, loss, acc = test_epoch(x_test, y_test, model)\n# print('acc', acc)\n#\n# df_report = classification_report(true, preds, labels=range(len(encoder.classes_)), target_names=encoder.classes_)\n# display(df_report[df_report.support>0])\n#\n# plot_well(df, model)\n# confusion_matrix(true, preds)\n# 1\n# ```\n#\n# <details>\n# <summary><b>→ Hints</b></summary>\n# \n# - The model is close to over fitting to just increasing epochs, or hidden size likely wont help\n#\n# - To change a value\n# - Hint search for \"# CHANGE ME\" below\n# - Change values\n# - then run the cells\n# - wait, some values will take longer\n#\n# </details>\n#\n# <br/>\n# <br/>\n# <details>\n# <summary>\n# <b>→ Solution</b>\n# </summary>\n#\n# ```python\n# # this helps a lot\n# seq_length = 1000 \n# ```\n#\n# </details>\n#\n# </div>\n\n# +\n# Params\nseq_length = 400 # CHANGE ME\n\n# Prepare data\nx_train, y_train = get_sequences(df_train, seq_length=seq_length)\nx_test, y_test = get_sequences(df_test, seq_length=seq_length)\n\n# Init the model\nmodel = LSTM(\n input_size=x_train[0].shape[-1],\n hidden_size=64, # CHANGE ME\n num_layers=3, # CHANGE ME\n output_size=output_size,\n).to(device)\n\n# Init the optimiser, and loss function\noptimizer = optim.Adam(model.parameters(), lr=0.001) # CHANGE ME\nloss_func = torch.nn.CrossEntropyLoss().to(device)\n\n# Train\ntraining_loop(x_train, y_train, x_test, y_test, model, epochs=10, bs=128) # Change me\n\n# Measure baseline\npred_baseline = np.roll(np.array(y_test), shift=shift_length)\nscore_prev_base=score_fn(y_test, pred_baseline)\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\nprint(f'{n_wells} wells. {max_lithologies} lithologies')\nprint(f'context length of {0.15*seq_length} m or {seq_length} intervals')\nprint(f'model can see human labels up to {shift_length*0.15}m above. Or {shift_length} intervals')\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\n\n# Test\npreds, true, loss, acc = test_epoch(x_test, y_test, model)\nprint('final test acc', acc)\n\ndf_report = classification_report(true, preds, labels=range(len(encoder.classes_)), target_names=encoder.classes_)\ndisplay(df_report[df_report.support>0])\n\nplot_well(df, model)\nconfusion_matrix(true, preds)\n1\n# +\n# Params\nseq_length = 400 # CHANGE ME\n\n# Prepare data\nx_train, y_train = get_sequences(df_train, seq_length=seq_length)\nx_test, y_test = get_sequences(df_test, seq_length=seq_length)\n\n# Init the model\nmodel = LSTM(\n input_size=x_train[0].shape[-1],\n hidden_size=64, # CHANGE ME\n num_layers=3, # CHANGE ME\n output_size=output_size,\n).to(device)\n\n# Init the optimiser, and loss function\noptimizer = optim.Adam(model.parameters(), lr=0.001) # CHANGE ME\n\ncounts = pd.Series(y_train).value_counts().sort_index() + 1000\nweights = 1/counts.values\nweights /= weights.sum()\nloss_func = torch.nn.CrossEntropyLoss(weight=torch.from_numpy(weights).float()).to(device)\n\nplt.title('label weighting')\nplt.bar(range(weights.shape[0]), weights)\nplt.show()\n\n# Train\ntraining_loop(x_train, y_train, x_test, y_test, model, epochs=10, bs=128) # Change me\n\n# Measure baseline\npred_baseline = np.roll(np.array(y_test), shift=shift_length)\nscore_prev_base=score_fn(y_test, pred_baseline)\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\nprint(f'{n_wells} wells. {max_lithologies} lithologies')\nprint(f'context length of {0.15*seq_length} m or {seq_length} intervals')\nprint(f'model can see human labels up to {shift_length*0.15}m above. Or {shift_length} intervals')\nprint(f'baseline accuracy {score_prev_base:2.2%} for prev {shift_length} facies values')\n\n# Test\npreds, true, loss, acc = test_epoch(x_test, y_test, model)\nprint('final test acc', acc)\n\ndf_report = classification_report(true, preds, labels=range(len(encoder.classes_)), target_names=encoder.classes_)\ndisplay(df_report[df_report.support>0])\n\nplot_well(df, model)\nconfusion_matrix(true, preds)\n1\n# -\n\n\n\n\n\n# ## Further Reading\n# - [Introduction to RNN](http://slazebni.cs.illinois.edu/spring17/lec02_rnn.pdf)\n# - [A friendly introduction to Recurrent Neural Networks](https://www.youtube.com/watch?v=UNmqTiOnRfg)\n# - [Recurrent Neural Networks (RNN) and Long Short-Term Memory (LSTM)](https://www.youtube.com/watch?v=WCUNPb-5EYI&t=97s)\n# - [Introduction to LSTM](https://medium.com/x8-the-ai-community/a-7-minute-introduction-to-lstm-5e1480e6f52a)\n# - [LSTM and GRU](https://towardsdatascience.com/illustrated-guide-to-lstms-and-gru-s-a-step-by-step-explanation-44e9eb85bf21)\n# - [Time Series Prediction with LSTM](https://stackabuse.com/time-series-prediction-using-lstm-with-pytorch-in-python/)\n# - [Building RNN from scratch](https://medium.com/dair-ai/building-rnns-is-fun-with-pytorch-and-google-colab-3903ea9a3a79)\n#\n\n# We can also zoom into a range\nplot_well(df, model, depth_min=3200, depth_max=3500)\n1\n\n\n\n\n"
] | [
[
"torch.stack",
"pandas.Series",
"torch.cuda.is_available",
"matplotlib.pyplot.ylabel",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xticks",
"torch.randn",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"torch.from_numpy",
"matplotlib.pyplot.hist",
"sklearn.dummy.DummyClassifier",
"torch.Tensor",
"matplotlib.pyplot.bar",
"numpy.mean",
"torch.nn.LSTM",
"torch.utils.data.dataloader.DataLoader",
"pandas.concat",
"sklearn.preprocessing.StandardScaler",
"numpy.roll",
"matplotlib.pyplot.legend",
"torch.nn.Linear",
"pandas.DataFrame",
"torch.nn.CrossEntropyLoss",
"torch.nn.LSTMCell",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.concatenate"
]
] |
siyuchen95/madminer | [
"dfcbd7ee26c47dd294610c195fafce15f74c10eb"
] | [
"madminer/utils/ml/trainer.py"
] | [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport six\nimport logging\nfrom collections import OrderedDict\nimport numpy as np\nimport time\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.nn.utils import clip_grad_norm_\n\nlogger = logging.getLogger(__name__)\n\n\nclass EarlyStoppingException(Exception):\n pass\n\n\nclass NanException(Exception):\n pass\n\n\nclass NumpyDataset(Dataset):\n \"\"\" Dataset for numpy arrays with explicit memmap support \"\"\"\n\n def __init__(self, *arrays, **kwargs):\n\n self.dtype = kwargs.get(\"dtype\", torch.float)\n self.memmap = []\n self.data = []\n self.n = None\n\n for array in arrays:\n if self.n is None:\n self.n = array.shape[0]\n assert array.shape[0] == self.n\n\n if isinstance(array, np.memmap):\n self.memmap.append(True)\n self.data.append(array)\n else:\n self.memmap.append(False)\n tensor = torch.from_numpy(array).to(self.dtype)\n self.data.append(tensor)\n\n def __getitem__(self, index):\n items = []\n for memmap, array in zip(self.memmap, self.data):\n if memmap:\n tensor = np.array(array[index])\n items.append(torch.from_numpy(tensor).to(self.dtype))\n else:\n items.append(array[index])\n return tuple(items)\n\n def __len__(self):\n return self.n\n\n\nclass Trainer(object):\n \"\"\" Trainer class. Any subclass has to implement the forward_pass() function. \"\"\"\n\n def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):\n self._init_timer()\n self._timer(start=\"ALL\")\n self._timer(start=\"initialize model\")\n self.model = model\n self.run_on_gpu = run_on_gpu and torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if self.run_on_gpu else \"cpu\")\n self.dtype = torch.double if double_precision else torch.float\n self.n_workers = n_workers\n\n self.model = self.model.to(self.device, self.dtype)\n\n logger.info(\n \"Training on %s with %s precision\",\n \"GPU\" if self.run_on_gpu else \"CPU\",\n \"double\" if double_precision else \"single\",\n )\n\n self._timer(stop=\"initialize model\")\n self._timer(stop=\"ALL\")\n\n def train(\n self,\n data,\n loss_functions,\n loss_weights=None,\n loss_labels=None,\n epochs=50,\n batch_size=100,\n optimizer=optim.Adam,\n optimizer_kwargs=None,\n initial_lr=0.001,\n final_lr=0.0001,\n data_val=None,\n validation_split=0.25,\n early_stopping=True,\n early_stopping_patience=None,\n clip_gradient=None,\n verbose=\"some\",\n ):\n self._timer(start=\"ALL\")\n self._timer(start=\"check data\")\n\n logger.debug(\"Initialising training data\")\n self.check_data(data)\n self.report_data(data)\n if data_val is not None:\n logger.debug(\"Found external validation data set\")\n self.check_data(data_val)\n self.report_data(data_val)\n self._timer(stop=\"check data\", start=\"make dataset\")\n data_labels, dataset = self.make_dataset(data)\n if data_val is not None:\n _, dataset_val = self.make_dataset(data_val)\n else:\n dataset_val = None\n self._timer(stop=\"make dataset\", start=\"make dataloader\")\n train_loader, val_loader = self.make_dataloaders(dataset, dataset_val, validation_split, batch_size)\n\n self._timer(stop=\"make dataloader\", start=\"setup optimizer\")\n logger.debug(\"Setting up optimizer\")\n optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs\n opt = optimizer(self.model.parameters(), lr=initial_lr, **optimizer_kwargs)\n\n early_stopping = early_stopping and (validation_split is not None) and (epochs > 1)\n best_loss, best_model, best_epoch = None, None, None\n if early_stopping and early_stopping_patience is None:\n logger.debug(\"Using early stopping with infinite patience\")\n elif early_stopping:\n logger.debug(\"Using early stopping with patience %s\", early_stopping_patience)\n else:\n logger.debug(\"No early stopping\")\n\n self._timer(stop=\"setup optimizer\", start=\"initialize training\")\n n_losses = len(loss_functions)\n loss_weights = [1.0] * n_losses if loss_weights is None else loss_weights\n\n # Verbosity\n if verbose == \"all\": # Print output after every epoch\n n_epochs_verbose = 1\n elif verbose == \"many\": # Print output after 2%, 4%, ..., 100% progress\n n_epochs_verbose = max(int(round(epochs / 50, 0)), 1)\n elif verbose == \"some\": # Print output after 10%, 20%, ..., 100% progress\n n_epochs_verbose = max(int(round(epochs / 20, 0)), 1)\n elif verbose == \"few\": # Print output after 20%, 40%, ..., 100% progress\n n_epochs_verbose = max(int(round(epochs / 5, 0)), 1)\n elif verbose == \"none\": # Never print output\n n_epochs_verbose = epochs + 2\n else:\n raise ValueError(\"Unknown value %s for keyword verbose\", verbose)\n logger.debug(\"Will print training progress every %s epochs\", n_epochs_verbose)\n\n logger.debug(\"Beginning main training loop\")\n losses_train, losses_val = [], []\n self._timer(stop=\"initialize training\")\n\n # Loop over epochs\n for i_epoch in range(epochs):\n logger.debug(\"Training epoch %s / %s\", i_epoch + 1, epochs)\n\n self._timer(start=\"set lr\")\n lr = self.calculate_lr(i_epoch, epochs, initial_lr, final_lr)\n self.set_lr(opt, lr)\n logger.debug(\"Learning rate: %s\", lr)\n self._timer(stop=\"set lr\")\n loss_val = None\n\n try:\n loss_train, loss_val, loss_contributions_train, loss_contributions_val = self.epoch(\n i_epoch, data_labels, train_loader, val_loader, opt, loss_functions, loss_weights, clip_gradient\n )\n losses_train.append(loss_train)\n losses_val.append(loss_val)\n except NanException:\n logger.info(\"Ending training during epoch %s because NaNs appeared\", i_epoch + 1)\n break\n\n self._timer(start=\"early stopping\")\n if early_stopping:\n try:\n best_loss, best_model, best_epoch = self.check_early_stopping(\n best_loss, best_model, best_epoch, loss_val, i_epoch, early_stopping_patience\n )\n except EarlyStoppingException:\n logger.info(\"Early stopping: ending training after %s epochs\", i_epoch + 1)\n break\n self._timer(stop=\"early stopping\", start=\"report epoch\")\n\n verbose_epoch = (i_epoch + 1) % n_epochs_verbose == 0\n self.report_epoch(\n i_epoch,\n loss_labels,\n loss_train,\n loss_val,\n loss_contributions_train,\n loss_contributions_val,\n verbose=verbose_epoch,\n )\n self._timer(stop=\"report epoch\")\n\n self._timer(start=\"early stopping\")\n if early_stopping and len(losses_val) > 0:\n self.wrap_up_early_stopping(best_model, loss_val, best_loss, best_epoch)\n self._timer(stop=\"early stopping\")\n\n logger.debug(\"Training finished\")\n\n self._timer(stop=\"ALL\")\n self._report_timer()\n\n return np.array(losses_train), np.array(losses_val)\n\n @staticmethod\n def report_data(data):\n logger.debug(\"Training data:\")\n for key, value in six.iteritems(data):\n if value is None:\n logger.debug(\" %s: -\", key)\n else:\n logger.debug(\n \" %s: shape %s, first %s, mean %s, min %s, max %s\",\n key,\n value.shape,\n value[0],\n np.mean(value, axis=0),\n np.min(value, axis=0),\n np.max(value, axis=0),\n )\n\n @staticmethod\n def check_data(data):\n pass\n\n def make_dataset(self, data):\n data_arrays = []\n data_labels = []\n for key, value in six.iteritems(data):\n data_labels.append(key)\n data_arrays.append(value)\n dataset = NumpyDataset(*data_arrays, dtype=self.dtype)\n return data_labels, dataset\n\n def make_dataloaders(self, dataset, dataset_val, validation_split, batch_size):\n if dataset_val is None and (validation_split is None or validation_split <= 0.0):\n train_loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True, pin_memory=self.run_on_gpu, num_workers=self.n_workers\n )\n val_loader = None\n\n elif dataset_val is not None:\n train_loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True, pin_memory=self.run_on_gpu, num_workers=self.n_workers\n )\n val_loader = DataLoader(\n dataset_val, batch_size=batch_size, shuffle=True, pin_memory=self.run_on_gpu, num_workers=self.n_workers\n )\n\n else:\n assert 0.0 < validation_split < 1.0, \"Wrong validation split: {}\".format(validation_split)\n\n n_samples = len(dataset)\n indices = list(range(n_samples))\n split = int(np.floor(validation_split * n_samples))\n np.random.shuffle(indices)\n train_idx, valid_idx = indices[split:], indices[:split]\n\n train_sampler = SubsetRandomSampler(train_idx)\n val_sampler = SubsetRandomSampler(valid_idx)\n\n train_loader = DataLoader(\n dataset,\n sampler=train_sampler,\n batch_size=batch_size,\n pin_memory=self.run_on_gpu,\n num_workers=self.n_workers,\n )\n val_loader = DataLoader(\n dataset,\n sampler=val_sampler,\n batch_size=batch_size,\n pin_memory=self.run_on_gpu,\n num_workers=self.n_workers,\n )\n\n return train_loader, val_loader\n\n @staticmethod\n def calculate_lr(i_epoch, n_epochs, initial_lr, final_lr):\n if n_epochs == 1:\n return initial_lr\n return initial_lr * (final_lr / initial_lr) ** float(i_epoch / (n_epochs - 1.0))\n\n @staticmethod\n def set_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n def epoch(\n self,\n i_epoch,\n data_labels,\n train_loader,\n val_loader,\n optimizer,\n loss_functions,\n loss_weights,\n clip_gradient=None,\n ):\n n_losses = len(loss_functions)\n\n self.model.train()\n loss_contributions_train = np.zeros(n_losses)\n loss_train = 0.0\n\n self._timer(start=\"load training batch\")\n for i_batch, batch_data in enumerate(train_loader):\n batch_data = OrderedDict(list(zip(data_labels, batch_data)))\n self._timer(stop=\"load training batch\")\n\n batch_loss, batch_loss_contributions = self.batch_train(\n batch_data, loss_functions, loss_weights, optimizer, clip_gradient\n )\n loss_train += batch_loss\n for i, batch_loss_contribution in enumerate(batch_loss_contributions):\n loss_contributions_train[i] += batch_loss_contribution\n\n self.report_batch(i_epoch, i_batch, batch_loss)\n\n self._timer(start=\"load training batch\")\n self._timer(stop=\"load training batch\")\n\n loss_contributions_train /= len(train_loader)\n loss_train /= len(train_loader)\n\n if val_loader is not None:\n self.model.eval()\n loss_contributions_val = np.zeros(n_losses)\n loss_val = 0.0\n\n self._timer(start=\"load validation batch\")\n for i_batch, batch_data in enumerate(val_loader):\n batch_data = OrderedDict(list(zip(data_labels, batch_data)))\n self._timer(stop=\"load validation batch\")\n\n batch_loss, batch_loss_contributions = self.batch_val(batch_data, loss_functions, loss_weights)\n loss_val += batch_loss\n for i, batch_loss_contribution in enumerate(batch_loss_contributions):\n loss_contributions_val[i] += batch_loss_contribution\n\n self._timer(start=\"load validation batch\")\n self._timer(stop=\"load validation batch\")\n\n loss_contributions_val /= len(val_loader)\n loss_val /= len(val_loader)\n\n else:\n loss_contributions_val = None\n loss_val = None\n\n return loss_train, loss_val, loss_contributions_train, loss_contributions_val\n\n def batch_train(self, batch_data, loss_functions, loss_weights, optimizer, clip_gradient=None):\n self._timer(start=\"training forward pass\")\n loss_contributions = self.forward_pass(batch_data, loss_functions)\n self._timer(stop=\"training forward pass\", start=\"training sum losses\")\n loss = self.sum_losses(loss_contributions, loss_weights)\n self._timer(stop=\"training sum losses\", start=\"optimizer step\")\n\n self.optimizer_step(optimizer, loss, clip_gradient)\n self._timer(stop=\"optimizer step\", start=\"training sum losses\")\n\n loss = loss.item()\n loss_contributions = [contrib.item() for contrib in loss_contributions]\n self._timer(stop=\"training sum losses\")\n\n return loss, loss_contributions\n\n def batch_val(self, batch_data, loss_functions, loss_weights):\n self._timer(start=\"validation forward pass\")\n loss_contributions = self.forward_pass(batch_data, loss_functions)\n self._timer(stop=\"validation forward pass\", start=\"validation sum losses\")\n loss = self.sum_losses(loss_contributions, loss_weights)\n\n loss = loss.item()\n loss_contributions = [contrib.item() for contrib in loss_contributions]\n self._timer(stop=\"validation sum losses\")\n return loss, loss_contributions\n\n def forward_pass(self, batch_data, loss_functions):\n \"\"\"\n Forward pass of the model. Needs to be implemented by any subclass.\n\n Parameters\n ----------\n batch_data : OrderedDict with str keys and Tensor values\n The data of the minibatch.\n\n loss_functions : list of function\n Loss functions.\n\n Returns\n -------\n losses : list of Tensor\n Losses as scalar pyTorch tensors.\n\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def sum_losses(contributions, weights):\n loss = weights[0] * contributions[0]\n for _w, _l in zip(weights[1:], contributions[1:]):\n loss = loss + _w * _l\n return loss\n\n def optimizer_step(self, optimizer, loss, clip_gradient):\n self._timer(start=\"opt: zero grad\")\n optimizer.zero_grad()\n self._timer(stop=\"opt: zero grad\", start=\"opt: backward\")\n loss.backward()\n self._timer(start=\"opt: clip grad norm\", stop=\"opt: backward\")\n if clip_gradient is not None:\n clip_grad_norm_(self.model.parameters(), clip_gradient)\n self._timer(stop=\"opt: clip grad norm\", start=\"opt: step\")\n optimizer.step()\n self._timer(stop=\"opt: step\")\n\n def check_early_stopping(self, best_loss, best_model, best_epoch, loss, i_epoch, early_stopping_patience=None):\n if best_loss is None or loss < best_loss:\n best_loss = loss\n best_model = self.model.state_dict()\n best_epoch = i_epoch\n\n if early_stopping_patience is not None and i_epoch - best_epoch > early_stopping_patience >= 0:\n raise EarlyStoppingException\n\n if loss is None or not np.isfinite(loss):\n raise EarlyStoppingException\n\n return best_loss, best_model, best_epoch\n\n @staticmethod\n def report_batch(i_epoch, i_batch, loss_train):\n if i_batch in [0, 1, 10, 100, 1000]:\n logger.debug(\" Epoch {:>3d}, batch {:>3d}: loss {:>8.5f}\".format(i_epoch + 1, i_batch + 1, loss_train))\n\n @staticmethod\n def report_epoch(\n i_epoch, loss_labels, loss_train, loss_val, loss_contributions_train, loss_contributions_val, verbose=False\n ):\n logging_fn = logger.info if verbose else logger.debug\n\n def contribution_summary(labels, contributions):\n summary = \"\"\n for i, (label, value) in enumerate(zip(labels, contributions)):\n if i > 0:\n summary += \", \"\n summary += \"{}: {:>6.3f}\".format(label, value)\n return summary\n\n train_report = \" Epoch {:>3d}: train loss {:>8.5f} ({})\".format(\n i_epoch + 1, loss_train, contribution_summary(loss_labels, loss_contributions_train)\n )\n logging_fn(train_report)\n\n if loss_val is not None:\n val_report = \" val. loss {:>8.5f} ({})\".format(\n loss_val, contribution_summary(loss_labels, loss_contributions_val)\n )\n logging_fn(val_report)\n\n def wrap_up_early_stopping(self, best_model, currrent_loss, best_loss, best_epoch):\n if best_loss is None or not np.isfinite(best_loss):\n logger.warning(\"Best loss is None, cannot wrap up early stopping\")\n elif currrent_loss is None or not np.isfinite(currrent_loss) or best_loss < currrent_loss:\n logger.info(\n \"Early stopping after epoch %s, with loss %8.5f compared to final loss %8.5f\",\n best_epoch + 1,\n best_loss,\n currrent_loss,\n )\n self.model.load_state_dict(best_model)\n else:\n logger.info(\"Early stopping did not improve performance\")\n\n @staticmethod\n def _check_for_nans(label, *tensors):\n for tensor in tensors:\n if tensor is None:\n continue\n if torch.isnan(tensor).any():\n logger.warning(\"%s contains NaNs, aborting training!\", label)\n raise NanException\n\n def _init_timer(self):\n self.timer = OrderedDict()\n self.time_started = OrderedDict()\n\n def _timer(self, start=None, stop=None):\n if start is not None:\n self.time_started[start] = time.time()\n\n if stop is not None:\n if stop not in list(self.time_started.keys()):\n logger.warning(\"Timer for task %s has been stopped without being started before\", stop)\n return\n\n dt = time.time() - self.time_started[stop]\n del self.time_started[stop]\n\n if stop in list(self.timer.keys()):\n self.timer[stop] += dt\n else:\n self.timer[stop] = dt\n\n def _report_timer(self):\n logger.info(\"Training time spend on:\")\n for key, value in six.iteritems(self.timer):\n logger.info(\" {:>32s}: {:6.2f}h\".format(key, value / 3600.0))\n\n\nclass SingleParameterizedRatioTrainer(Trainer):\n def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):\n super(SingleParameterizedRatioTrainer, self).__init__(model, run_on_gpu, double_precision, n_workers)\n self.calculate_model_score = True\n\n def check_data(self, data):\n data_keys = list(data.keys())\n if \"x\" not in data_keys or \"theta\" not in data_keys or \"y\" not in data_keys:\n raise ValueError(\"Missing required information 'x', 'theta', or 'y' in training data!\")\n\n for key in data_keys:\n if key not in [\"x\", \"theta\", \"y\", \"r_xz\", \"t_xz\"]:\n logger.warning(\"Unknown key %s in training data! Ignoring it.\", key)\n\n self.calculate_model_score = \"t_xz\" in data_keys\n if self.calculate_model_score:\n logger.debug(\"Model score will be calculated\")\n else:\n logger.debug(\"Model score will not be calculated\")\n\n def forward_pass(self, batch_data, loss_functions):\n self._timer(start=\"fwd: move data\")\n theta = batch_data[\"theta\"].to(self.device, self.dtype, non_blocking=True)\n x = batch_data[\"x\"].to(self.device, self.dtype, non_blocking=True)\n y = batch_data[\"y\"].to(self.device, self.dtype, non_blocking=True)\n try:\n r_xz = batch_data[\"r_xz\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n r_xz = None\n try:\n t_xz = batch_data[\"t_xz\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n t_xz = None\n self._timer(stop=\"fwd: move data\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Training data\", theta, x, y)\n self._check_for_nans(\"Augmented training data\", r_xz, t_xz)\n self._timer(start=\"fwd: model.forward\", stop=\"fwd: check for nans\")\n\n if self.calculate_model_score:\n theta.requires_grad = True\n\n s_hat, log_r_hat, t_hat = self.model(theta, x, track_score=self.calculate_model_score, return_grad_x=False)\n self._timer(stop=\"fwd: model.forward\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Model output\", log_r_hat, s_hat)\n self._check_for_nans(\"Model score\", t_hat)\n\n self._timer(start=\"fwd: calculate losses\", stop=\"fwd: check for nans\")\n losses = [loss_function(s_hat, log_r_hat, t_hat, None, y, r_xz, t_xz, None) for loss_function in loss_functions]\n self._timer(stop=\"fwd: calculate losses\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Loss\", *losses)\n self._timer(stop=\"fwd: check for nans\")\n\n return losses\n\n\nclass DoubleParameterizedRatioTrainer(Trainer):\n def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):\n super(DoubleParameterizedRatioTrainer, self).__init__(model, run_on_gpu, double_precision, n_workers)\n self.calculate_model_score = True\n\n def check_data(self, data):\n data_keys = list(data.keys())\n if \"x\" not in data_keys or \"theta0\" not in data_keys or \"theta1\" not in data_keys or \"y\" not in data_keys:\n raise ValueError(\"Missing required information 'x', 'theta0', 'theta1', or 'y' in training data!\")\n\n for key in data_keys:\n if key not in [\"x\", \"theta0\", \"theta1\", \"y\", \"r_xz\", \"t_xz0\", \"t_xz1\"]:\n logger.warning(\"Unknown key %s in training data! Ignoring it.\", key)\n\n self.calculate_model_score = \"t_xz0\" in data_keys or \"t_xz1\" in data_keys\n if self.calculate_model_score:\n logger.debug(\"Model score will be calculated\")\n else:\n logger.debug(\"Model score will not be calculated\")\n\n def forward_pass(self, batch_data, loss_functions):\n self._timer(start=\"fwd: move data\")\n theta0 = batch_data[\"theta0\"].to(self.device, self.dtype, non_blocking=True)\n theta1 = batch_data[\"theta1\"].to(self.device, self.dtype, non_blocking=True)\n x = batch_data[\"x\"].to(self.device, self.dtype, non_blocking=True)\n y = batch_data[\"y\"].to(self.device, self.dtype, non_blocking=True)\n try:\n r_xz = batch_data[\"r_xz\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n r_xz = None\n try:\n t_xz0 = batch_data[\"t_xz0\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n t_xz0 = None\n try:\n t_xz1 = batch_data[\"t_xz1\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n t_xz1 = None\n self._timer(stop=\"fwd: move data\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Training data\", theta0, theta1, x, y)\n self._check_for_nans(\"Augmented training data\", r_xz, t_xz0, t_xz1)\n self._timer(start=\"fwd: model.forward\", stop=\"fwd: check for nans\")\n\n if self.calculate_model_score:\n theta0.requires_grad = True\n theta1.requires_grad = True\n\n s_hat, log_r_hat, t_hat0, t_hat1 = self.model(\n theta0, theta1, x, track_score=self.calculate_model_score, return_grad_x=False\n )\n self._timer(stop=\"fwd: model.forward\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Model output\", s_hat, log_r_hat, t_hat0, t_hat1)\n\n self._timer(start=\"fwd: calculate losses\", stop=\"fwd: check for nans\")\n losses = [\n loss_function(s_hat, log_r_hat, t_hat0, t_hat1, y, r_xz, t_xz0, t_xz1) for loss_function in loss_functions\n ]\n self._timer(stop=\"fwd: calculate losses\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Loss\", *losses)\n self._timer(stop=\"fwd: check for nans\")\n\n return losses\n\n\nclass LocalScoreTrainer(Trainer):\n def check_data(self, data):\n data_keys = list(data.keys())\n if \"x\" not in data_keys or \"t_xz\" not in data_keys:\n raise ValueError(\"Missing required information 'x' or 't_xz' in training data!\")\n\n for key in data_keys:\n if key not in [\"x\", \"t_xz\"]:\n logger.warning(\"Unknown key %s in training data! Ignoring it.\", key)\n\n def forward_pass(self, batch_data, loss_functions):\n self._timer(start=\"fwd: move data\")\n x = batch_data[\"x\"].to(self.device, self.dtype, non_blocking=True)\n t_xz = batch_data[\"t_xz\"].to(self.device, self.dtype, non_blocking=True)\n self._timer(stop=\"fwd: move data\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Training data\", x)\n self._check_for_nans(\"Augmented training data\", t_xz)\n\n self._timer(start=\"fwd: model.forward\", stop=\"fwd: check for nans\")\n t_hat = self.model(x)\n self._timer(stop=\"fwd: model.forward\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Model output\", t_hat)\n\n self._timer(start=\"fwd: calculate losses\", stop=\"fwd: check for nans\")\n losses = [loss_function(t_hat, t_xz) for loss_function in loss_functions]\n self._timer(stop=\"fwd: calculate losses\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Loss\", *losses)\n self._timer(stop=\"fwd: check for nans\")\n\n return losses\n\n\nclass FlowTrainer(Trainer):\n def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):\n super(FlowTrainer, self).__init__(model, run_on_gpu, double_precision, n_workers)\n self.calculate_model_score = True\n\n def check_data(self, data):\n data_keys = list(data.keys())\n if \"x\" not in data_keys or \"theta\" not in data_keys:\n raise ValueError(\"Missing required information 'x' or 'theta' in training data!\")\n\n for key in data_keys:\n if key not in [\"x\", \"theta\", \"t_xz\"]:\n logger.warning(\"Unknown key %s in training data! Ignoring it.\", key)\n\n self.calculate_model_score = \"t_xz\" in data_keys\n if self.calculate_model_score:\n logger.debug(\"Model score will be calculated\")\n else:\n logger.debug(\"Model score will not be calculated\")\n\n def forward_pass(self, batch_data, loss_functions):\n self._timer(start=\"fwd: move data\")\n x = batch_data[\"x\"].to(self.device, self.dtype, non_blocking=True)\n theta = batch_data[\"theta\"].to(self.device, self.dtype, non_blocking=True)\n try:\n t_xz = batch_data[\"t_xz\"].to(self.device, self.dtype, non_blocking=True)\n except KeyError:\n t_xz = None\n self._timer(stop=\"fwd: move data\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Training data\", theta, x)\n self._check_for_nans(\"Augmented training data\", t_xz)\n\n self._timer(start=\"fwd: model.forward\", stop=\"fwd: check for nans\")\n if self.calculate_model_score:\n theta.requires_grad = True\n _, log_likelihood, t_hat = self.model.log_likelihood_and_score(theta, x)\n else:\n _, log_likelihood = self.model.log_likelihood(theta, x)\n t_hat = None\n self._timer(stop=\"fwd: model.forward\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Model output\", log_likelihood, t_hat)\n\n self._timer(start=\"fwd: calculate losses\", stop=\"fwd: check for nans\")\n losses = [loss_function(log_likelihood, t_hat, t_xz) for loss_function in loss_functions]\n self._timer(stop=\"fwd: calculate losses\", start=\"fwd: check for nans\")\n self._check_for_nans(\"Loss\", *losses)\n self._timer(stop=\"fwd: check for nans\")\n\n return losses\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.random.shuffle",
"numpy.zeros",
"numpy.mean",
"numpy.floor",
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.cuda.is_available",
"numpy.max",
"numpy.min",
"torch.from_numpy",
"numpy.array",
"torch.isnan",
"torch.device",
"numpy.isfinite"
]
] |
steven0820/tensorflow | [
"36ebbf1ddc3ed820b7a5572ff4ed8e9bc707b8e5"
] | [
"tensorflow/contrib/learn/python/learn/graph_actions.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"High level operations on graphs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport sys\nimport threading\nimport time\n\nimport numpy as np\n\nfrom six import reraise\n\nfrom tensorflow.contrib.framework.python.ops import ops as contrib_ops\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\nfrom tensorflow.contrib.learn.python.learn import monitors as monitors_lib\nfrom tensorflow.contrib.learn.python.learn.utils import checkpoints\nfrom tensorflow.core.framework import summary_pb2\nfrom tensorflow.python.client import session as tf_session\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import coordinator\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import queue_runner\nfrom tensorflow.python.training import saver as tf_saver\nfrom tensorflow.python.training import session_manager as session_manager_lib\nfrom tensorflow.python.training import summary_io\nfrom tensorflow.python.training import supervisor as tf_supervisor\n\n# Singleton for SummaryWriter per logdir folder.\n_SUMMARY_WRITERS = {}\n\n# Lock protecting _SUMMARY_WRITERS\n_summary_writer_lock = threading.Lock()\n\n\ndef clear_summary_writers():\n \"\"\"Clear cached summary writers. Currently only used for unit tests.\"\"\"\n return summary_io.SummaryWriterCache.clear()\n\n\ndef get_summary_writer(logdir):\n \"\"\"Returns single SummaryWriter per logdir in current run.\n\n Args:\n logdir: str, folder to write summaries.\n\n Returns:\n Existing `SummaryWriter` object or new one if never wrote to given\n directory.\n \"\"\"\n return summary_io.SummaryWriterCache.get(logdir)\n\n\ndef _make_saver(graph, keep_checkpoint_max=5):\n vars_to_save = graph.get_collection(ops.GraphKeys.VARIABLES)\n if vars_to_save:\n return tf_saver.Saver(vars_to_save,\n sharded=True,\n max_to_keep=keep_checkpoint_max)\n else:\n return None\n\n\ndef _restore_from_checkpoint(session, graph, checkpoint_path, saver=None):\n logging.info('Loading model from checkpoint: %s.', checkpoint_path)\n saver = saver or _make_saver(graph)\n if saver:\n saver.restore(session, checkpoint_path)\n else:\n logging.info('No variables found in graph, not creating Saver() object.')\n\n\ndef _run_with_monitors(session, step, tensors, feed_dict, monitors):\n \"\"\"Runs session for given tensors with monitor callbacks.\"\"\"\n for monitor in monitors:\n tensors += monitor.step_begin(step)\n tensors = list(set(tensors))\n\n outputs = session.run(tensors, feed_dict=feed_dict)\n outputs = dict(zip(\n [t.name if isinstance(t, ops.Tensor) else t for t in tensors],\n outputs))\n\n should_stop = False\n for monitor in monitors:\n induce_stop = monitor.step_end(step, outputs)\n should_stop = should_stop or induce_stop\n return outputs, should_stop\n\n\ndef _monitored_train(graph,\n output_dir,\n train_op,\n loss_op,\n global_step_tensor=None,\n init_op=None,\n init_feed_dict=None,\n init_fn=None,\n log_every_steps=10,\n supervisor_is_chief=True,\n supervisor_master='',\n supervisor_save_model_secs=600,\n supervisor_save_model_steps=None,\n keep_checkpoint_max=5,\n supervisor_save_summaries_steps=100,\n feed_fn=None,\n steps=None,\n fail_on_nan_loss=True,\n hooks=None,\n max_steps=None):\n \"\"\"Train a model via monitored_session.\n\n Given `graph`, a directory to write outputs to (`output_dir`), and some ops,\n run a training loop. The given `train_op` performs one step of training on the\n model. The `loss_op` represents the objective function of the training. It is\n expected to increment the `global_step_tensor`, a scalar integer tensor\n counting training steps. This function uses `Supervisor` to initialize the\n graph (from a checkpoint if one is available in `output_dir`), write summaries\n defined in the graph, and write regular checkpoints as defined by\n `supervisor_save_model_secs`.\n\n Training continues until `global_step_tensor` evaluates to `max_steps`, or, if\n `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the\n program is terminated with exit code 1.\n\n Args:\n graph: A graph to train. It is expected that this graph is not in use\n elsewhere.\n output_dir: A directory to write outputs to.\n train_op: An op that performs one training step when run.\n loss_op: A scalar loss tensor.\n global_step_tensor: A tensor representing the global step. If none is given,\n one is extracted from the graph using the same logic as in `Supervisor`.\n init_op: An op that initializes the graph. If `None`, use `Supervisor`'s\n default.\n init_feed_dict: A dictionary that maps `Tensor` objects to feed values.\n This feed dictionary will be used when `init_op` is evaluated.\n init_fn: Optional callable passed to Supervisor to initialize the model.\n log_every_steps: Output logs regularly. The logs contain timing data and the\n current loss. A `0` or negative value disables logging.\n supervisor_is_chief: Whether the current process is the chief supervisor in\n charge of restoring the model and running standard services.\n supervisor_master: The master string to use when preparing the session. \n supervisor_save_model_secs: Save checkpoints every this many seconds. Can\n not be specified with `supervisor_save_model_steps`.\n supervisor_save_model_steps: Save checkpoints every this many steps. Can not\n be specified with `supervisor_save_model_secs`.\n keep_checkpoint_max: The maximum number of recent checkpoint files to\n keep. As new files are created, older files are deleted. If None or 0,\n all checkpoint files are kept. This is simply passed as the max_to_keep\n arg to `tf.Saver` constructor.\n supervisor_save_summaries_steps: Save summaries every\n `supervisor_save_summaries_steps` seconds when training.\n feed_fn: A function that is called every iteration to produce a `feed_dict`\n passed to `session.run` calls. Optional.\n steps: Trains for this many steps (e.g. current global step + `steps`).\n fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`\n evaluates to `NaN`. If false, continue training as if nothing happened.\n hooks: List of `SessionRunHook` subclass instances. Used for callbacks\n inside the training loop.\n max_steps: Number of total steps for which to train model. If `None`,\n train forever. Two calls fit(steps=100) means 200 training iterations.\n On the other hand two calls of fit(max_steps=100) means, second call\n will not do any iteration since first call did all 100 steps.\n\n Returns:\n The final loss value.\n\n Raises:\n ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`\n is not provided. See `tf.contrib.framework.get_global_step` for how we\n look up the latter if not provided explicitly.\n NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever\n evaluates to `NaN`.\n ValueError: If both `steps` and `max_steps` are not `None`.\n \"\"\"\n if (steps is not None) and (max_steps is not None):\n raise ValueError('Can not provide both steps and max_steps.')\n if not output_dir:\n raise ValueError('Output directory should be non-empty %s.' % output_dir)\n if train_op is None:\n raise ValueError('Missing train_op.')\n if loss_op is None:\n raise ValueError('Missing loss_op.')\n if hooks is None:\n hooks = []\n if not isinstance(hooks, list):\n raise ValueError('Hooks should be a list.')\n with graph.as_default():\n global_step_tensor = contrib_variables.assert_or_get_global_step(\n graph, global_step_tensor)\n if global_step_tensor is None:\n raise ValueError('No \"global_step\" was provided or found in the graph.')\n\n if max_steps is not None:\n try:\n start_step = checkpoints.load_variable(output_dir,\n global_step_tensor.name)\n if max_steps <= start_step:\n logging.info('Skipping training since max_steps has already saved.')\n return None\n except: # pylint: disable=bare-except\n pass\n\n # Adapted SessionRunHooks such as ExportMonitor depend on the\n # CheckpointSaverHook to be executed before they should be executed.\n # The `hooks` param comprises of deprecated monitor hooks\n # (such as ExportMonitor). Appending them after the basic_session_run_hooks.\n all_hooks = []\n with graph.as_default():\n all_hooks.append(basic_session_run_hooks.NanTensorHook(\n loss_op, fail_on_nan_loss=fail_on_nan_loss))\n if log_every_steps > 0:\n all_hooks.append(basic_session_run_hooks.LoggingTensorHook({\n 'loss': loss_op.name,\n 'step': global_step_tensor.name\n }, every_n_iter=log_every_steps))\n\n def make_saver():\n return tf_saver.Saver(\n sharded=True, max_to_keep=keep_checkpoint_max, defer_build=True)\n\n scaffold = monitored_session.Scaffold(\n init_op=init_op,\n init_feed_dict=init_feed_dict,\n init_fn=init_fn,\n saver=monitored_session.Scaffold.get_or_default('saver',\n ops.GraphKeys.SAVERS,\n make_saver))\n\n if not supervisor_is_chief:\n session_creator = monitored_session.WorkerSessionCreator(\n scaffold=scaffold,\n master=supervisor_master)\n else:\n session_creator = monitored_session.ChiefSessionCreator(\n scaffold=scaffold,\n checkpoint_dir=output_dir,\n master=supervisor_master)\n summary_writer = summary_io.SummaryWriterCache.get(output_dir)\n all_hooks.append(\n basic_session_run_hooks.StepCounterHook(\n summary_writer=summary_writer))\n all_hooks.append(\n basic_session_run_hooks.SummarySaverHook(\n save_steps=supervisor_save_summaries_steps,\n summary_writer=summary_writer,\n scaffold=scaffold))\n if (supervisor_save_model_secs is not None\n or supervisor_save_model_steps is not None):\n all_hooks.append(\n basic_session_run_hooks.CheckpointSaverHook(\n output_dir,\n save_secs=supervisor_save_model_secs,\n save_steps=supervisor_save_model_steps,\n scaffold=scaffold))\n\n if steps is not None or max_steps is not None:\n all_hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))\n all_hooks.extend(hooks)\n\n with monitored_session.MonitoredSession(\n session_creator=session_creator,\n hooks=all_hooks) as super_sess:\n loss = None\n while not super_sess.should_stop():\n _, loss = super_sess.run([train_op, loss_op], feed_fn() if feed_fn else\n None)\n return loss\n\n\n# TODO(ispir): Deprecate train in favor of supervised_train\ndef train(graph,\n output_dir,\n train_op,\n loss_op,\n global_step_tensor=None,\n init_op=None,\n init_feed_dict=None,\n init_fn=None,\n log_every_steps=10,\n supervisor_is_chief=True,\n supervisor_master='',\n supervisor_save_model_secs=600,\n keep_checkpoint_max=5,\n supervisor_save_summaries_steps=100,\n feed_fn=None,\n steps=None,\n fail_on_nan_loss=True,\n monitors=None,\n max_steps=None):\n \"\"\"Train a model.\n\n Given `graph`, a directory to write outputs to (`output_dir`), and some ops,\n run a training loop. The given `train_op` performs one step of training on the\n model. The `loss_op` represents the objective function of the training. It is\n expected to increment the `global_step_tensor`, a scalar integer tensor\n counting training steps. This function uses `Supervisor` to initialize the\n graph (from a checkpoint if one is available in `output_dir`), write summaries\n defined in the graph, and write regular checkpoints as defined by\n `supervisor_save_model_secs`.\n\n Training continues until `global_step_tensor` evaluates to `max_steps`, or, if\n `fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the\n program is terminated with exit code 1.\n\n Args:\n graph: A graph to train. It is expected that this graph is not in use\n elsewhere.\n output_dir: A directory to write outputs to.\n train_op: An op that performs one training step when run.\n loss_op: A scalar loss tensor.\n global_step_tensor: A tensor representing the global step. If none is given,\n one is extracted from the graph using the same logic as in `Supervisor`.\n init_op: An op that initializes the graph. If `None`, use `Supervisor`'s\n default.\n init_feed_dict: A dictionary that maps `Tensor` objects to feed values.\n This feed dictionary will be used when `init_op` is evaluated.\n init_fn: Optional callable passed to Supervisor to initialize the model.\n log_every_steps: Output logs regularly. The logs contain timing data and the\n current loss.\n supervisor_is_chief: Whether the current process is the chief supervisor in\n charge of restoring the model and running standard services.\n supervisor_master: The master string to use when preparing the session.\n supervisor_save_model_secs: Save a checkpoint every\n `supervisor_save_model_secs` seconds when training.\n keep_checkpoint_max: The maximum number of recent checkpoint files to\n keep. As new files are created, older files are deleted. If None or 0,\n all checkpoint files are kept. This is simply passed as the max_to_keep\n arg to tf.Saver constructor.\n supervisor_save_summaries_steps: Save summaries every\n `supervisor_save_summaries_steps` seconds when training.\n feed_fn: A function that is called every iteration to produce a `feed_dict`\n passed to `session.run` calls. Optional.\n steps: Trains for this many steps (e.g. current global step + `steps`).\n fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`\n evaluates to `NaN`. If false, continue training as if nothing happened.\n monitors: List of `BaseMonitor` subclass instances. Used for callbacks\n inside the training loop.\n max_steps: Number of total steps for which to train model. If `None`,\n train forever. Two calls fit(steps=100) means 200 training iterations.\n On the other hand two calls of fit(max_steps=100) means, second call\n will not do any iteration since first call did all 100 steps.\n\n Returns:\n The final loss value.\n\n Raises:\n ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`\n is not provided. See `tf.contrib.framework.get_global_step` for how we\n look up the latter if not provided explicitly.\n NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever\n evaluates to `NaN`.\n ValueError: If both `steps` and `max_steps` are not `None`.\n \"\"\"\n while True:\n try:\n return _train_internal(graph,\n output_dir,\n train_op,\n loss_op,\n global_step_tensor,\n init_op,\n init_feed_dict,\n init_fn,\n log_every_steps,\n supervisor_is_chief,\n supervisor_master,\n supervisor_save_model_secs,\n keep_checkpoint_max,\n supervisor_save_summaries_steps,\n feed_fn,\n steps,\n fail_on_nan_loss,\n monitors,\n max_steps)\n except errors.AbortedError:\n # Happens when PS restarts, keep training.\n logging.warning('Training got Aborted error. Keep training.')\n\n\ndef _train_internal(graph,\n output_dir,\n train_op,\n loss_op,\n global_step_tensor,\n init_op,\n init_feed_dict,\n init_fn,\n log_every_steps,\n supervisor_is_chief,\n supervisor_master,\n supervisor_save_model_secs,\n keep_checkpoint_max,\n supervisor_save_summaries_steps,\n feed_fn,\n steps,\n fail_on_nan_loss,\n monitors,\n max_steps):\n \"\"\"See train.\"\"\"\n if (steps is not None) and (max_steps is not None):\n raise ValueError('Can not provide both steps and max_steps.')\n if not output_dir:\n raise ValueError('Output directory should be non-empty %s.' % output_dir)\n if train_op is None:\n raise ValueError('Missing train_op.')\n if loss_op is None:\n raise ValueError('Missing loss_op.')\n\n with graph.as_default():\n global_step_tensor = contrib_variables.assert_or_get_global_step(\n graph, global_step_tensor)\n if global_step_tensor is None:\n raise ValueError('No \"global_step\" was provided or found in the graph.')\n\n # Get current step.\n try:\n start_step = checkpoints.load_variable(\n output_dir, global_step_tensor.name)\n except (errors.NotFoundError, ValueError):\n start_step = 0\n\n summary_writer = (get_summary_writer(output_dir)\n if supervisor_is_chief else None)\n\n # Add default chief monitors if none were provided.\n if not monitors:\n monitors = monitors_lib.get_default_monitors(\n loss_op=loss_op,\n summary_op=logging_ops.get_summary_op(),\n save_summary_steps=supervisor_save_summaries_steps,\n summary_writer=summary_writer) if supervisor_is_chief else []\n\n # TODO(ipolosukhin): Replace all functionality of Supervisor\n # with Chief-Exclusive Monitors.\n if not supervisor_is_chief:\n # Prune list of monitor to the ones runnable on all workers.\n monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]\n\n if max_steps is None:\n max_steps = (start_step + steps) if steps else None\n # Start monitors, can create graph parts.\n for monitor in monitors:\n monitor.begin(max_steps=max_steps)\n\n supervisor = tf_supervisor.Supervisor(\n graph,\n init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,\n init_feed_dict=init_feed_dict,\n is_chief=supervisor_is_chief,\n logdir=output_dir,\n saver=_make_saver(graph, keep_checkpoint_max),\n global_step=global_step_tensor,\n summary_op=None,\n summary_writer=summary_writer,\n save_model_secs=supervisor_save_model_secs,\n init_fn=init_fn)\n session = supervisor.PrepareSession(master=supervisor_master,\n start_standard_services=True)\n supervisor.StartQueueRunners(session)\n\n with session:\n get_current_step = lambda: session.run(global_step_tensor)\n\n start_step = get_current_step()\n last_step = start_step\n last_log_step = start_step\n loss_value = None\n logging.info('Training steps [%d,%s)', last_step, 'inf'\n if max_steps is None else str(max_steps))\n\n excinfo = None\n try:\n while not supervisor.ShouldStop() and (\n (max_steps is None) or (last_step < max_steps)):\n start_time = time.time()\n feed_dict = feed_fn() if feed_fn is not None else None\n\n outputs, should_stop = _run_with_monitors(\n session, last_step + 1, [train_op, loss_op], feed_dict, monitors)\n\n loss_value = outputs[loss_op.name]\n if np.isnan(loss_value):\n failure_message = 'Model diverged with loss = NaN.'\n if fail_on_nan_loss:\n logging.error(failure_message)\n raise monitors_lib.NanLossDuringTrainingError()\n else:\n logging.warning(failure_message)\n\n if should_stop:\n break\n\n this_step = get_current_step()\n\n if this_step <= last_step:\n logging.error(\n 'Global step was not incremented by train op at step %s'\n ': new step %d', last_step, this_step)\n\n last_step = this_step\n is_last_step = (max_steps is not None) and (last_step >= max_steps)\n if is_last_step or (last_step - last_log_step >= log_every_steps):\n logging.info(\n 'training step %d, loss = %.5f (%.3f sec/batch).',\n last_step, loss_value, float(time.time() - start_time))\n last_log_step = last_step\n except errors.OutOfRangeError as e:\n logging.warn('Got exception during tf.learn training loop possibly '\n 'due to exhausted input queue %s.', e)\n except StopIteration:\n logging.info('Exhausted input iterarator.')\n except BaseException as e: # pylint: disable=broad-except\n # Hold on to any other exceptions while we try recording a final\n # checkpoint and summary.\n excinfo = sys.exc_info()\n finally:\n try:\n # Call supervisor.Stop() from within a try block because it re-raises\n # exceptions thrown by the supervised threads.\n supervisor.Stop(close_summary_writer=False)\n\n # Save one last checkpoint and summaries\n # TODO(wicke): This should be handled by Supervisor\n\n # In case we encountered an exception in the try block before we updated\n # last_step, update it here (again).\n last_step = get_current_step()\n if supervisor_is_chief:\n ckpt_path = supervisor.save_path\n logging.info('Saving checkpoint for step %d to checkpoint: %s.',\n last_step, ckpt_path)\n supervisor.saver.save(session, ckpt_path, global_step=last_step)\n\n # Finish monitors.\n for monitor in monitors:\n monitor.end()\n\n # catch OutOfRangeError which is thrown when queue is out of data (and for\n # other reasons as well).\n except errors.OutOfRangeError as e:\n logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '\n 'due to exhausted input queue. Note: summary_op is not '\n 'expected to trigger dequeues. %s.', e)\n except BaseException as e: # pylint: disable=broad-except\n # If we don't already have an exception to re-raise, raise this one.\n if not excinfo:\n raise\n # Otherwise, log this one and raise the other in the finally block.\n logging.error('Got exception during tf.learn final checkpoint %s.', e)\n finally:\n if excinfo:\n reraise(*excinfo)\n return loss_value\n\n\ndef _get_first_op_from_collection(collection_name):\n elements = ops.get_collection(collection_name)\n if elements:\n return elements[0]\n return None\n\n\ndef _get_saver():\n \"\"\"Lazy init and return saver.\"\"\"\n saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)\n if saver is None and variables.all_variables():\n saver = tf_saver.Saver()\n ops.add_to_collection(ops.GraphKeys.SAVERS, saver)\n return saver\n\n\ndef _get_ready_op():\n ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP)\n if ready_op is None:\n ready_op = variables.report_uninitialized_variables()\n ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)\n return ready_op\n\n\ndef _get_local_init_op():\n local_init_op = _get_first_op_from_collection(\n ops.GraphKeys.LOCAL_INIT_OP)\n if local_init_op is None:\n op_list = [variables.initialize_local_variables(),\n data_flow_ops.initialize_all_tables()]\n if op_list:\n local_init_op = control_flow_ops.group(*op_list)\n ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)\n return local_init_op\n\n\ndef _eval_results_to_str(eval_results):\n return ', '.join('%s = %s' % (k, v) for k, v in eval_results.items())\n\n\ndef _write_summary_results(output_dir, eval_results, current_global_step):\n \"\"\"Writes eval results into summary file in given dir.\"\"\"\n logging.info('Saving evaluation summary for %d step: %s', current_global_step,\n _eval_results_to_str(eval_results))\n summary_writer = get_summary_writer(output_dir)\n summary = summary_pb2.Summary()\n for key in eval_results:\n if eval_results[key] is None:\n continue\n value = summary.value.add()\n value.tag = key\n if (isinstance(eval_results[key], np.float32) or\n isinstance(eval_results[key], float)):\n value.simple_value = float(eval_results[key])\n else:\n logging.warn('Skipping summary for %s, must be a float or np.float32.',\n key)\n summary_writer.add_summary(summary, current_global_step)\n summary_writer.flush()\n\n\ndef evaluate(graph,\n output_dir,\n checkpoint_path,\n eval_dict,\n update_op=None,\n global_step_tensor=None,\n supervisor_master='',\n log_every_steps=10,\n feed_fn=None,\n max_steps=None):\n \"\"\"Evaluate a model loaded from a checkpoint.\n\n Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint\n to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval\n loop for `max_steps` steps, or until an exception (generally, an\n end-of-input signal from a reader operation) is raised from running\n `eval_dict`.\n\n In each step of evaluation, all tensors in the `eval_dict` are evaluated, and\n every `log_every_steps` steps, they are logged. At the very end of evaluation,\n a summary is evaluated (finding the summary ops using `Supervisor`'s logic)\n and written to `output_dir`.\n\n Args:\n graph: A `Graph` to train. It is expected that this graph is not in use\n elsewhere.\n output_dir: A string containing the directory to write a summary to.\n checkpoint_path: A string containing the path to a checkpoint to restore.\n Can be `None` if the graph doesn't require loading any variables.\n eval_dict: A `dict` mapping string names to tensors to evaluate. It is\n evaluated in every logging step. The result of the final evaluation is\n returned. If `update_op` is None, then it's evaluated in every step. If\n `max_steps` is `None`, this should depend on a reader that will raise an\n end-of-input exception when the inputs are exhausted.\n update_op: A `Tensor` which is run in every step.\n global_step_tensor: A `Variable` containing the global step. If `None`,\n one is extracted from the graph using the same logic as in `Supervisor`.\n Used to place eval summaries on training curves.\n supervisor_master: The master string to use when preparing the session.\n log_every_steps: Integer. Output logs every `log_every_steps` evaluation\n steps. The logs contain the `eval_dict` and timing information.\n feed_fn: A function that is called every iteration to produce a `feed_dict`\n passed to `session.run` calls. Optional.\n max_steps: Integer. Evaluate `eval_dict` this many times.\n\n Returns:\n A tuple `(eval_results, global_step)`:\n eval_results: A `dict` mapping `string` to numeric values (`int`, `float`)\n that are the result of running eval_dict in the last step. `None` if no\n eval steps were run.\n global_step: The global step this evaluation corresponds to.\n\n Raises:\n ValueError: if `output_dir` is empty.\n \"\"\"\n if not output_dir:\n raise ValueError('Output directory should be non-empty %s.' % output_dir)\n with graph.as_default():\n global_step_tensor = contrib_variables.assert_or_get_global_step(\n graph, global_step_tensor)\n\n # Create or get summary op, global_step and saver.\n saver = _get_saver()\n local_init_op = _get_local_init_op()\n ready_op = _get_ready_op()\n\n session_manager = session_manager_lib.SessionManager(\n local_init_op=local_init_op,\n ready_op=ready_op)\n session, initialized = session_manager.recover_session(\n master=supervisor_master,\n saver=saver,\n checkpoint_dir=checkpoint_path)\n\n # Start queue runners.\n coord = coordinator.Coordinator()\n threads = queue_runner.start_queue_runners(session, coord)\n\n with session:\n if not initialized:\n logging.warning('Failed to initialize from %s.', checkpoint_path)\n # TODO(ipolosukhin): This should be failing, but old code relies on that.\n session.run(variables.initialize_all_variables())\n if checkpoint_path:\n _restore_from_checkpoint(session, graph, checkpoint_path, saver)\n\n current_global_step = session.run(global_step_tensor)\n eval_results = None\n # TODO(amodei): Fix this to run through the eval set exactly once.\n step = 0\n eval_step = None\n feed_dict = None\n logging.info('Eval steps [%d,%s) for training step %d.', step,\n 'inf' if max_steps is None\n else str(max_steps), current_global_step)\n try:\n try:\n while (max_steps is None) or (step < max_steps):\n step += 1\n start_time = time.time()\n feed_dict = feed_fn() if feed_fn is not None else None\n if update_op is not None:\n session.run(update_op, feed_dict=feed_dict)\n else:\n eval_results = session.run(eval_dict, feed_dict=feed_dict)\n eval_step = step\n\n # TODO(wicke): We should assert that the global step hasn't changed.\n if step % log_every_steps == 0:\n if eval_step is None or step != eval_step:\n eval_results = session.run(eval_dict, feed_dict=feed_dict)\n eval_step = step\n duration = time.time() - start_time\n logging.info('Results after %d steps (%.3f sec/batch): %s.',\n step, float(duration),\n _eval_results_to_str(eval_results))\n finally:\n if eval_results is None or step != eval_step:\n eval_results = session.run(eval_dict, feed_dict=feed_dict)\n eval_step = step\n # Stop session first, before queue runners.\n session.close()\n\n # Stop queue runners.\n try:\n coord.request_stop()\n coord.join(threads, stop_grace_period_secs=120)\n except (RuntimeError, errors.CancelledError) as e:\n logging.warning('Coordinator didn\\'t stop cleanly: %s', e)\n\n # catch OutOfRangeError which is thrown when queue is out of data (and for\n # other reasons as well).\n except errors.OutOfRangeError as e:\n if max_steps is None:\n logging.info('Input queue is exhausted.')\n else:\n logging.warn('Input queue is exhausted: %s.', e)\n # catch StopIteration which is thrown is DataReader is out of data.\n except StopIteration as e:\n if max_steps is None:\n logging.info('Input iterator is exhausted.')\n else:\n logging.warn('Input iterator is exhausted: %s.', e)\n\n # Save summaries for this evaluation.\n _write_summary_results(output_dir, eval_results, current_global_step)\n\n return eval_results, current_global_step\n\n\ndef run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1):\n \"\"\"Run `output_dict` tensors `n` times, with the same `feed_dict` each run.\n\n Args:\n output_dict: A `dict` mapping string names to tensors to run. Must all be\n from the same graph.\n feed_dict: `dict` of input values to feed each run.\n restore_checkpoint_path: A string containing the path to a checkpoint to\n restore.\n n: Number of times to repeat.\n\n Returns:\n A list of `n` `dict` objects, each containing values read from `output_dict`\n tensors.\n \"\"\"\n return run_feeds(\n output_dict=output_dict,\n feed_dicts=itertools.repeat(feed_dict, n),\n restore_checkpoint_path=restore_checkpoint_path)\n\n\n# TODO(ptucker): Add save_checkpoint_path.\ndef run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):\n \"\"\"Run `output_dict` tensors with each input in `feed_dicts`.\n\n If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,\n init all variables.\n\n Args:\n output_dict: A `dict` mapping string names to `Tensor` objects to run.\n Tensors must all be from the same graph.\n feed_dicts: Iterable of `dict` objects of input values to feed.\n restore_checkpoint_path: A string containing the path to a checkpoint to\n restore.\n\n Yields:\n A sequence of dicts of values read from `output_dict` tensors, one item\n yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,\n values are the results read from the corresponding `Tensor` in\n `output_dict`.\n\n Raises:\n ValueError: if `output_dict` or `feed_dicts` is None or empty.\n \"\"\"\n if not output_dict:\n raise ValueError('output_dict is invalid: %s.' % output_dict)\n if not feed_dicts:\n raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)\n\n graph = contrib_ops.get_graph_from_inputs(output_dict.values())\n\n with graph.as_default() as g:\n with tf_session.Session('') as session:\n if restore_checkpoint_path:\n _restore_from_checkpoint(session, g, restore_checkpoint_path)\n else:\n session.run(variables.initialize_all_variables())\n session.run(variables.initialize_local_variables())\n session.run(data_flow_ops.initialize_all_tables())\n coord = coordinator.Coordinator()\n threads = None\n try:\n threads = queue_runner.start_queue_runners(session, coord=coord)\n for f in feed_dicts:\n yield session.run(output_dict, f)\n finally:\n coord.request_stop()\n if threads:\n coord.join(threads, stop_grace_period_secs=120)\n\n\ndef run_feeds(*args, **kwargs):\n \"\"\"See run_feeds_iter(). Returns a `list` instead of an iterator.\"\"\"\n return list(run_feeds_iter(*args, **kwargs))\n\n\ndef infer(restore_checkpoint_path, output_dict, feed_dict=None):\n \"\"\"Restore graph from `restore_checkpoint_path` and run `output_dict` tensors.\n\n If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,\n init all variables.\n\n Args:\n restore_checkpoint_path: A string containing the path to a checkpoint to\n restore.\n output_dict: A `dict` mapping string names to `Tensor` objects to run.\n Tensors must all be from the same graph.\n feed_dict: `dict` object mapping `Tensor` objects to input values to feed.\n\n Returns:\n Dict of values read from `output_dict` tensors. Keys are the same as\n `output_dict`, values are the results read from the corresponding `Tensor`\n in `output_dict`.\n\n Raises:\n ValueError: if `output_dict` or `feed_dicts` is None or empty.\n \"\"\"\n return run_feeds(output_dict=output_dict,\n feed_dicts=[feed_dict] if feed_dict is not None else [None],\n restore_checkpoint_path=restore_checkpoint_path)[0]\n"
] | [
[
"tensorflow.python.ops.data_flow_ops.initialize_all_tables",
"tensorflow.python.ops.variables.initialize_local_variables",
"tensorflow.python.training.basic_session_run_hooks.LoggingTensorHook",
"tensorflow.python.training.monitored_session.MonitoredSession",
"tensorflow.python.ops.logging_ops.get_summary_op",
"tensorflow.python.training.saver.Saver",
"tensorflow.python.training.monitored_session.WorkerSessionCreator",
"tensorflow.python.platform.tf_logging.warn",
"tensorflow.contrib.framework.python.ops.variables.assert_or_get_global_step",
"tensorflow.contrib.learn.python.learn.utils.checkpoints.load_variable",
"tensorflow.core.framework.summary_pb2.Summary",
"tensorflow.python.training.basic_session_run_hooks.NanTensorHook",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.ops.variables.all_variables",
"tensorflow.python.client.session.Session",
"tensorflow.python.training.monitored_session.Scaffold.get_or_default",
"numpy.isnan",
"tensorflow.contrib.learn.python.learn.monitors.NanLossDuringTrainingError",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.training.basic_session_run_hooks.StepCounterHook",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.training.summary_io.SummaryWriterCache.clear",
"tensorflow.python.training.queue_runner.start_queue_runners",
"tensorflow.python.training.coordinator.Coordinator",
"tensorflow.python.training.basic_session_run_hooks.CheckpointSaverHook",
"tensorflow.python.training.monitored_session.ChiefSessionCreator",
"tensorflow.python.training.summary_io.SummaryWriterCache.get",
"tensorflow.python.ops.variables.report_uninitialized_variables",
"tensorflow.python.training.basic_session_run_hooks.StopAtStepHook",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.ops.variables.initialize_all_variables",
"tensorflow.python.training.session_manager.SessionManager",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.training.basic_session_run_hooks.SummarySaverHook"
]
] |
OakCityLabs/numpy | [
"09f5c5a64eb019b3e058c7183ca1ead6190bdbc8"
] | [
"numpy/distutils/system_info.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\nThis file defines a set of system_info classes for getting\ninformation about various resources (libraries, library directories,\ninclude directories, etc.) in the system. Usage:\n info_dict = get_info(<name>)\n where <name> is a string 'atlas','x11','fftw','lapack','blas',\n 'lapack_src', 'blas_src', etc. For a complete list of allowed names,\n see the definition of get_info() function below.\n\n Returned info_dict is a dictionary which is compatible with\n distutils.setup keyword arguments. If info_dict == {}, then the\n asked resource is not available (system_info could not find it).\n\n Several *_info classes specify an environment variable to specify\n the locations of software. When setting the corresponding environment\n variable to 'None' then the software will be ignored, even when it\n is available in system.\n\nGlobal parameters:\n system_info.search_static_first - search static libraries (.a)\n in precedence to shared ones (.so, .sl) if enabled.\n system_info.verbosity - output the results to stdout if enabled.\n\nThe file 'site.cfg' is looked for in\n\n1) Directory of main setup.py file being run.\n2) Home directory of user running the setup.py file as ~/.numpy-site.cfg\n3) System wide directory (location of this file...)\n\nThe first one found is used to get system configuration options The\nformat is that used by ConfigParser (i.e., Windows .INI style). The\nsection ALL is not intended for general use.\n\nAppropriate defaults are used if nothing is specified.\n\nThe order of finding the locations of resources is the following:\n 1. environment variable\n 2. section in site.cfg\n 3. DEFAULT section in site.cfg\n 4. System default search paths (see ``default_*`` variables below).\nOnly the first complete match is returned.\n\nCurrently, the following classes are available, along with their section names:\n\n Numeric_info:Numeric\n _numpy_info:Numeric\n _pkg_config_info:None\n accelerate_info:accelerate\n agg2_info:agg2\n amd_info:amd\n atlas_3_10_blas_info:atlas\n atlas_3_10_blas_threads_info:atlas\n atlas_3_10_info:atlas\n atlas_3_10_threads_info:atlas\n atlas_blas_info:atlas\n atlas_blas_threads_info:atlas\n atlas_info:atlas\n atlas_threads_info:atlas\n blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix)\n blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS)\n blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix)\n blas_info:blas\n blas_mkl_info:mkl\n blas_opt_info:ALL # usage recommended\n blas_src_info:blas_src\n blis_info:blis\n boost_python_info:boost_python\n dfftw_info:fftw\n dfftw_threads_info:fftw\n djbfft_info:djbfft\n f2py_info:ALL\n fft_opt_info:ALL\n fftw2_info:fftw\n fftw3_info:fftw3\n fftw_info:fftw\n fftw_threads_info:fftw\n flame_info:flame\n freetype2_info:freetype2\n gdk_2_info:gdk_2\n gdk_info:gdk\n gdk_pixbuf_2_info:gdk_pixbuf_2\n gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2\n gdk_x11_2_info:gdk_x11_2\n gtkp_2_info:gtkp_2\n gtkp_x11_2_info:gtkp_x11_2\n lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix)\n lapack_atlas_3_10_info:atlas\n lapack_atlas_3_10_threads_info:atlas\n lapack_atlas_info:atlas\n lapack_atlas_threads_info:atlas\n lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK)\n lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix)\n lapack_info:lapack\n lapack_mkl_info:mkl\n lapack_opt_info:ALL # usage recommended\n lapack_src_info:lapack_src\n mkl_info:mkl\n numarray_info:numarray\n numerix_info:numerix\n numpy_info:numpy\n openblas64__info:openblas64_\n openblas64__lapack_info:openblas64_\n openblas_clapack_info:openblas\n openblas_ilp64_info:openblas_ilp64\n openblas_ilp64_lapack_info:openblas_ilp64\n openblas_info:openblas\n openblas_lapack_info:openblas\n sfftw_info:fftw\n sfftw_threads_info:fftw\n system_info:ALL\n umfpack_info:umfpack\n wx_info:wx\n x11_info:x11\n xft_info:xft\n\nNote that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER\nand NPY_LAPACK_ORDER environment variables to determine the order in which\nspecific BLAS and LAPACK libraries are searched for.\n\nThis search (or autodetection) can be bypassed by defining the environment\nvariables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the\nexact linker flags to use (language will be set to F77). Building against\nNetlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK\nimplementations at runtime. If using this to build NumPy itself, it is\nrecommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a\nCBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized\notherwise).\n\nExample:\n----------\n[DEFAULT]\n# default section\nlibrary_dirs = /usr/lib:/usr/local/lib:/opt/lib\ninclude_dirs = /usr/include:/usr/local/include:/opt/include\nsrc_dirs = /usr/local/src:/opt/src\n# search static libraries (.a) in preference to shared ones (.so)\nsearch_static_first = 0\n\n[fftw]\nlibraries = rfftw, fftw\n\n[atlas]\nlibrary_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas\n# for overriding the names of the atlas libraries\nlibraries = lapack, f77blas, cblas, atlas\n\n[x11]\nlibrary_dirs = /usr/X11R6/lib\ninclude_dirs = /usr/X11R6/include\n----------\n\nNote that the ``libraries`` key is the default setting for libraries.\n\nAuthors:\n Pearu Peterson <[email protected]>, February 2002\n David M. Cooke <[email protected]>, April 2002\n\nCopyright 2002 Pearu Peterson all rights reserved,\nPearu Peterson <[email protected]>\nPermission to use, modify, and distribute this software is given under the\nterms of the NumPy (BSD style) license. See LICENSE.txt that came with\nthis distribution for specifics.\n\nNO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.\n\n\"\"\"\nimport sys\nimport os\nimport re\nimport copy\nimport warnings\nimport subprocess\nimport textwrap\n\nfrom glob import glob\nfrom functools import reduce\nfrom configparser import NoOptionError\nfrom configparser import RawConfigParser as ConfigParser\n# It seems that some people are importing ConfigParser from here so is\n# good to keep its class name. Use of RawConfigParser is needed in\n# order to be able to load path names with percent in them, like\n# `feature%2Fcool` which is common on git flow branch names.\n\nfrom distutils.errors import DistutilsError\nfrom distutils.dist import Distribution\nimport sysconfig\nfrom numpy.distutils import log\nfrom distutils.util import get_platform\n\nfrom numpy.distutils.exec_command import (\n find_executable, filepath_from_subprocess_output,\n )\nfrom numpy.distutils.misc_util import (is_sequence, is_string,\n get_shared_lib_extension)\nfrom numpy.distutils.command.config import config as cmd_config\nfrom numpy.distutils import customized_ccompiler as _customized_ccompiler\nfrom numpy.distutils import _shell_utils\nimport distutils.ccompiler\nimport tempfile\nimport shutil\n\n__all__ = ['system_info']\n\n# Determine number of bits\nimport platform\n_bits = {'32bit': 32, '64bit': 64}\nplatform_bits = _bits[platform.architecture()[0]]\n\n\nglobal_compiler = None\n\ndef customized_ccompiler():\n global global_compiler\n if not global_compiler:\n global_compiler = _customized_ccompiler()\n return global_compiler\n\n\ndef _c_string_literal(s):\n \"\"\"\n Convert a python string into a literal suitable for inclusion into C code\n \"\"\"\n # only these three characters are forbidden in C strings\n s = s.replace('\\\\', r'\\\\')\n s = s.replace('\"', r'\\\"')\n s = s.replace('\\n', r'\\n')\n return '\"{}\"'.format(s)\n\n\ndef libpaths(paths, bits):\n \"\"\"Return a list of library paths valid on 32 or 64 bit systems.\n\n Inputs:\n paths : sequence\n A sequence of strings (typically paths)\n bits : int\n An integer, the only valid values are 32 or 64. A ValueError exception\n is raised otherwise.\n\n Examples:\n\n Consider a list of directories\n >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']\n\n For a 32-bit platform, this is already valid:\n >>> np.distutils.system_info.libpaths(paths,32)\n ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']\n\n On 64 bits, we prepend the '64' postfix\n >>> np.distutils.system_info.libpaths(paths,64)\n ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',\n '/usr/lib64', '/usr/lib']\n \"\"\"\n if bits not in (32, 64):\n raise ValueError(\"Invalid bit size in libpaths: 32 or 64 only\")\n\n # Handle 32bit case\n if bits == 32:\n return paths\n\n # Handle 64bit case\n out = []\n for p in paths:\n out.extend([p + '64', p])\n\n return out\n\n\nif sys.platform == 'win32':\n default_lib_dirs = ['C:\\\\',\n os.path.join(sysconfig.get_config_var('exec_prefix'),\n 'libs')]\n default_runtime_dirs = []\n default_include_dirs = []\n default_src_dirs = ['.']\n default_x11_lib_dirs = []\n default_x11_include_dirs = []\n _include_dirs = [\n 'include',\n 'include/suitesparse',\n ]\n _lib_dirs = [\n 'lib',\n ]\n\n _include_dirs = [d.replace('/', os.sep) for d in _include_dirs]\n _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs]\n def add_system_root(library_root):\n \"\"\"Add a package manager root to the include directories\"\"\"\n global default_lib_dirs\n global default_include_dirs\n\n library_root = os.path.normpath(library_root)\n\n default_lib_dirs.extend(\n os.path.join(library_root, d) for d in _lib_dirs)\n default_include_dirs.extend(\n os.path.join(library_root, d) for d in _include_dirs)\n\n # VCpkg is the de-facto package manager on windows for C/C++\n # libraries. If it is on the PATH, then we append its paths here.\n vcpkg = shutil.which('vcpkg')\n if vcpkg:\n vcpkg_dir = os.path.dirname(vcpkg)\n if platform.architecture()[0] == '32bit':\n specifier = 'x86'\n else:\n specifier = 'x64'\n\n vcpkg_installed = os.path.join(vcpkg_dir, 'installed')\n for vcpkg_root in [\n os.path.join(vcpkg_installed, specifier + '-windows'),\n os.path.join(vcpkg_installed, specifier + '-windows-static'),\n ]:\n add_system_root(vcpkg_root)\n\n # Conda is another popular package manager that provides libraries\n conda = shutil.which('conda')\n if conda:\n conda_dir = os.path.dirname(conda)\n add_system_root(os.path.join(conda_dir, '..', 'Library'))\n add_system_root(os.path.join(conda_dir, 'Library'))\n\nelse:\n default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',\n '/opt/local/lib', '/sw/lib'], platform_bits)\n default_runtime_dirs = []\n default_include_dirs = ['/usr/local/include',\n '/opt/include',\n # path of umfpack under macports\n '/opt/local/include/ufsparse',\n '/opt/local/include', '/sw/include',\n '/usr/include/suitesparse']\n default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']\n\n default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',\n '/usr/lib'], platform_bits)\n default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include']\n\n if os.path.exists('/usr/lib/X11'):\n globbed_x11_dir = glob('/usr/lib/*/libX11.so')\n if globbed_x11_dir:\n x11_so_dir = os.path.split(globbed_x11_dir[0])[0]\n default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])\n default_x11_include_dirs.extend(['/usr/lib/X11/include',\n '/usr/include/X11'])\n\n # iOS: we need to cancel this call\n if (not os.getenv('PLATFORM').startswith('iphone')):\n with open(os.devnull, 'w') as tmp:\n try:\n p = subprocess.Popen([\"gcc\", \"-print-multiarch\"], stdout=subprocess.PIPE,\n stderr=tmp)\n except (OSError, DistutilsError):\n # OSError if gcc is not installed, or SandboxViolation (DistutilsError\n # subclass) if an old setuptools bug is triggered (see gh-3160).\n pass\n else:\n triplet = str(p.communicate()[0].decode().strip())\n if p.returncode == 0:\n # gcc supports the \"-print-multiarch\" option\n default_x11_lib_dirs += [os.path.join(\"/usr/lib/\", triplet)]\n default_lib_dirs += [os.path.join(\"/usr/lib/\", triplet)]\n\n\nif os.path.join(sys.prefix, 'lib') not in default_lib_dirs:\n default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))\n default_include_dirs.append(os.path.join(sys.prefix, 'include'))\n default_src_dirs.append(os.path.join(sys.prefix, 'src'))\n\ndefault_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]\ndefault_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]\ndefault_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]\ndefault_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]\n\nso_ext = get_shared_lib_extension()\n\n\ndef get_standard_file(fname):\n \"\"\"Returns a list of files named 'fname' from\n 1) System-wide directory (directory-location of this module)\n 2) Users HOME directory (os.environ['HOME'])\n 3) Local directory\n \"\"\"\n # System-wide file\n filenames = []\n try:\n f = __file__\n except NameError:\n f = sys.argv[0]\n sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],\n fname)\n if os.path.isfile(sysfile):\n filenames.append(sysfile)\n\n # Home directory\n # And look for the user config file\n try:\n f = os.path.expanduser('~')\n except KeyError:\n pass\n else:\n user_file = os.path.join(f, fname)\n if os.path.isfile(user_file):\n filenames.append(user_file)\n\n # Local file\n if os.path.isfile(fname):\n filenames.append(os.path.abspath(fname))\n\n return filenames\n\n\ndef _parse_env_order(base_order, env):\n \"\"\" Parse an environment variable `env` by splitting with \",\" and only returning elements from `base_order`\n\n This method will sequence the environment variable and check for their\n individual elements in `base_order`.\n\n The items in the environment variable may be negated via '^item' or '!itema,itemb'.\n It must start with ^/! to negate all options.\n\n Raises\n ------\n ValueError: for mixed negated and non-negated orders or multiple negated orders\n\n Parameters\n ----------\n base_order : list of str\n the base list of orders\n env : str\n the environment variable to be parsed, if none is found, `base_order` is returned\n\n Returns\n -------\n allow_order : list of str\n allowed orders in lower-case\n unknown_order : list of str\n for values not overlapping with `base_order`\n \"\"\"\n order_str = os.environ.get(env, None)\n\n # ensure all base-orders are lower-case (for easier comparison)\n base_order = [order.lower() for order in base_order]\n if order_str is None:\n return base_order, []\n\n neg = order_str.startswith('^') or order_str.startswith('!')\n # Check format\n order_str_l = list(order_str)\n sum_neg = order_str_l.count('^') + order_str_l.count('!')\n if neg:\n if sum_neg > 1:\n raise ValueError(f\"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}\")\n # remove prefix\n order_str = order_str[1:]\n elif sum_neg > 0:\n raise ValueError(f\"Environment variable '{env}' may not mix negated an non-negated items: {order_str}\")\n\n # Split and lower case\n orders = order_str.lower().split(',')\n\n # to inform callee about non-overlapping elements\n unknown_order = []\n\n # if negated, we have to remove from the order\n if neg:\n allow_order = base_order.copy()\n\n for order in orders:\n if not order:\n continue\n\n if order not in base_order:\n unknown_order.append(order)\n continue\n\n if order in allow_order:\n allow_order.remove(order)\n\n else:\n allow_order = []\n\n for order in orders:\n if not order:\n continue\n\n if order not in base_order:\n unknown_order.append(order)\n continue\n\n if order not in allow_order:\n allow_order.append(order)\n\n return allow_order, unknown_order\n\n\ndef get_info(name, notfound_action=0):\n \"\"\"\n notfound_action:\n 0 - do nothing\n 1 - display warning message\n 2 - raise error\n \"\"\"\n cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead\n 'atlas_threads': atlas_threads_info, # ditto\n 'atlas_blas': atlas_blas_info,\n 'atlas_blas_threads': atlas_blas_threads_info,\n 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead\n 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto\n 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead\n 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto\n 'atlas_3_10_blas': atlas_3_10_blas_info,\n 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,\n 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead\n 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto\n 'flame': flame_info, # use lapack_opt instead\n 'mkl': mkl_info,\n # openblas which may or may not have embedded lapack\n 'openblas': openblas_info, # use blas_opt instead\n # openblas with embedded lapack\n 'openblas_lapack': openblas_lapack_info, # use blas_opt instead\n 'openblas_clapack': openblas_clapack_info, # use blas_opt instead\n 'blis': blis_info, # use blas_opt instead\n 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead\n 'blas_mkl': blas_mkl_info, # use blas_opt instead\n 'accelerate': accelerate_info, # use blas_opt instead\n 'openblas64_': openblas64__info,\n 'openblas64__lapack': openblas64__lapack_info,\n 'openblas_ilp64': openblas_ilp64_info,\n 'openblas_ilp64_lapack': openblas_ilp64_lapack_info,\n 'x11': x11_info,\n 'fft_opt': fft_opt_info,\n 'fftw': fftw_info,\n 'fftw2': fftw2_info,\n 'fftw3': fftw3_info,\n 'dfftw': dfftw_info,\n 'sfftw': sfftw_info,\n 'fftw_threads': fftw_threads_info,\n 'dfftw_threads': dfftw_threads_info,\n 'sfftw_threads': sfftw_threads_info,\n 'djbfft': djbfft_info,\n 'blas': blas_info, # use blas_opt instead\n 'lapack': lapack_info, # use lapack_opt instead\n 'lapack_src': lapack_src_info,\n 'blas_src': blas_src_info,\n 'numpy': numpy_info,\n 'f2py': f2py_info,\n 'Numeric': Numeric_info,\n 'numeric': Numeric_info,\n 'numarray': numarray_info,\n 'numerix': numerix_info,\n 'lapack_opt': lapack_opt_info,\n 'lapack_ilp64_opt': lapack_ilp64_opt_info,\n 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info,\n 'lapack64__opt': lapack64__opt_info,\n 'blas_opt': blas_opt_info,\n 'blas_ilp64_opt': blas_ilp64_opt_info,\n 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info,\n 'blas64__opt': blas64__opt_info,\n 'boost_python': boost_python_info,\n 'agg2': agg2_info,\n 'wx': wx_info,\n 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,\n 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,\n 'gdk_pixbuf_2': gdk_pixbuf_2_info,\n 'gdk-pixbuf-2.0': gdk_pixbuf_2_info,\n 'gdk': gdk_info,\n 'gdk_2': gdk_2_info,\n 'gdk-2.0': gdk_2_info,\n 'gdk_x11_2': gdk_x11_2_info,\n 'gdk-x11-2.0': gdk_x11_2_info,\n 'gtkp_x11_2': gtkp_x11_2_info,\n 'gtk+-x11-2.0': gtkp_x11_2_info,\n 'gtkp_2': gtkp_2_info,\n 'gtk+-2.0': gtkp_2_info,\n 'xft': xft_info,\n 'freetype2': freetype2_info,\n 'umfpack': umfpack_info,\n 'amd': amd_info,\n }.get(name.lower(), system_info)\n return cl().get_info(notfound_action)\n\n\nclass NotFoundError(DistutilsError):\n \"\"\"Some third-party program or library is not found.\"\"\"\n\n\nclass AliasedOptionError(DistutilsError):\n \"\"\"\n Aliases entries in config files should not be existing.\n In section '{section}' we found multiple appearances of options {options}.\"\"\"\n\n\nclass AtlasNotFoundError(NotFoundError):\n \"\"\"\n Atlas (http://github.com/math-atlas/math-atlas) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [atlas]) or by setting\n the ATLAS environment variable.\"\"\"\n\n\nclass FlameNotFoundError(NotFoundError):\n \"\"\"\n FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [flame]).\"\"\"\n\n\nclass LapackNotFoundError(NotFoundError):\n \"\"\"\n Lapack (http://www.netlib.org/lapack/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [lapack]) or by setting\n the LAPACK environment variable.\"\"\"\n\n\nclass LapackSrcNotFoundError(LapackNotFoundError):\n \"\"\"\n Lapack (http://www.netlib.org/lapack/) sources not found.\n Directories to search for the sources can be specified in the\n numpy/distutils/site.cfg file (section [lapack_src]) or by setting\n the LAPACK_SRC environment variable.\"\"\"\n\n\nclass LapackILP64NotFoundError(NotFoundError):\n \"\"\"\n 64-bit Lapack libraries not found.\n Known libraries in numpy/distutils/site.cfg file are:\n openblas64_, openblas_ilp64\n \"\"\"\n\nclass BlasOptNotFoundError(NotFoundError):\n \"\"\"\n Optimized (vendor) Blas libraries are not found.\n Falls back to netlib Blas library which has worse performance.\n A better performance should be easily gained by switching\n Blas library.\"\"\"\n\nclass BlasNotFoundError(NotFoundError):\n \"\"\"\n Blas (http://www.netlib.org/blas/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [blas]) or by setting\n the BLAS environment variable.\"\"\"\n\nclass BlasILP64NotFoundError(NotFoundError):\n \"\"\"\n 64-bit Blas libraries not found.\n Known libraries in numpy/distutils/site.cfg file are:\n openblas64_, openblas_ilp64\n \"\"\"\n\nclass BlasSrcNotFoundError(BlasNotFoundError):\n \"\"\"\n Blas (http://www.netlib.org/blas/) sources not found.\n Directories to search for the sources can be specified in the\n numpy/distutils/site.cfg file (section [blas_src]) or by setting\n the BLAS_SRC environment variable.\"\"\"\n\n\nclass FFTWNotFoundError(NotFoundError):\n \"\"\"\n FFTW (http://www.fftw.org/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [fftw]) or by setting\n the FFTW environment variable.\"\"\"\n\n\nclass DJBFFTNotFoundError(NotFoundError):\n \"\"\"\n DJBFFT (https://cr.yp.to/djbfft.html) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [djbfft]) or by setting\n the DJBFFT environment variable.\"\"\"\n\n\nclass NumericNotFoundError(NotFoundError):\n \"\"\"\n Numeric (https://www.numpy.org/) module not found.\n Get it from above location, install it, and retry setup.py.\"\"\"\n\n\nclass X11NotFoundError(NotFoundError):\n \"\"\"X11 libraries not found.\"\"\"\n\n\nclass UmfpackNotFoundError(NotFoundError):\n \"\"\"\n UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/)\n not found. Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [umfpack]) or by setting\n the UMFPACK environment variable.\"\"\"\n\n\nclass system_info:\n\n \"\"\" get_info() is the only public method. Don't use others.\n \"\"\"\n dir_env_var = None\n # XXX: search_static_first is disabled by default, may disappear in\n # future unless it is proved to be useful.\n search_static_first = 0\n # The base-class section name is a random word \"ALL\" and is not really\n # intended for general use. It cannot be None nor can it be DEFAULT as\n # these break the ConfigParser. See gh-15338\n section = 'ALL'\n saved_results = {}\n\n notfounderror = NotFoundError\n\n def __init__(self,\n default_lib_dirs=default_lib_dirs,\n default_include_dirs=default_include_dirs,\n ):\n self.__class__.info = {}\n self.local_prefixes = []\n defaults = {'library_dirs': os.pathsep.join(default_lib_dirs),\n 'include_dirs': os.pathsep.join(default_include_dirs),\n 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs),\n 'rpath': '',\n 'src_dirs': os.pathsep.join(default_src_dirs),\n 'search_static_first': str(self.search_static_first),\n 'extra_compile_args': '', 'extra_link_args': ''}\n self.cp = ConfigParser(defaults)\n self.files = []\n self.files.extend(get_standard_file('.numpy-site.cfg'))\n self.files.extend(get_standard_file('site.cfg'))\n self.parse_config_files()\n\n if self.section is not None:\n self.search_static_first = self.cp.getboolean(\n self.section, 'search_static_first')\n assert isinstance(self.search_static_first, int)\n\n def parse_config_files(self):\n self.cp.read(self.files)\n if not self.cp.has_section(self.section):\n if self.section is not None:\n self.cp.add_section(self.section)\n\n def calc_libraries_info(self):\n libs = self.get_libraries()\n dirs = self.get_lib_dirs()\n # The extensions use runtime_library_dirs\n r_dirs = self.get_runtime_lib_dirs()\n # Intrinsic distutils use rpath, we simply append both entries\n # as though they were one entry\n r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))\n info = {}\n for lib in libs:\n i = self.check_libs(dirs, [lib])\n if i is not None:\n dict_append(info, **i)\n else:\n log.info('Library %s was not found. Ignoring' % (lib))\n\n if r_dirs:\n i = self.check_libs(r_dirs, [lib])\n if i is not None:\n # Swap library keywords found to runtime_library_dirs\n # the libraries are insisting on the user having defined\n # them using the library_dirs, and not necessarily by\n # runtime_library_dirs\n del i['libraries']\n i['runtime_library_dirs'] = i.pop('library_dirs')\n dict_append(info, **i)\n else:\n log.info('Runtime library %s was not found. Ignoring' % (lib))\n\n return info\n\n def set_info(self, **info):\n if info:\n lib_info = self.calc_libraries_info()\n dict_append(info, **lib_info)\n # Update extra information\n extra_info = self.calc_extra_info()\n dict_append(info, **extra_info)\n self.saved_results[self.__class__.__name__] = info\n\n def get_option_single(self, *options):\n \"\"\" Ensure that only one of `options` are found in the section\n\n Parameters\n ----------\n *options : list of str\n a list of options to be found in the section (``self.section``)\n\n Returns\n -------\n str :\n the option that is uniquely found in the section\n\n Raises\n ------\n AliasedOptionError :\n in case more than one of the options are found\n \"\"\"\n found = [self.cp.has_option(self.section, opt) for opt in options]\n if sum(found) == 1:\n return options[found.index(True)]\n elif sum(found) == 0:\n # nothing is found anyways\n return options[0]\n\n # Else we have more than 1 key found\n if AliasedOptionError.__doc__ is None:\n raise AliasedOptionError()\n raise AliasedOptionError(AliasedOptionError.__doc__.format(\n section=self.section, options='[{}]'.format(', '.join(options))))\n\n\n def has_info(self):\n return self.__class__.__name__ in self.saved_results\n\n def calc_extra_info(self):\n \"\"\" Updates the information in the current information with\n respect to these flags:\n extra_compile_args\n extra_link_args\n \"\"\"\n info = {}\n for key in ['extra_compile_args', 'extra_link_args']:\n # Get values\n opt = self.cp.get(self.section, key)\n opt = _shell_utils.NativeParser.split(opt)\n if opt:\n tmp = {key: opt}\n dict_append(info, **tmp)\n return info\n\n def get_info(self, notfound_action=0):\n \"\"\" Return a dictionary with items that are compatible\n with numpy.distutils.setup keyword arguments.\n \"\"\"\n flag = 0\n if not self.has_info():\n flag = 1\n log.info(self.__class__.__name__ + ':')\n if hasattr(self, 'calc_info'):\n self.calc_info()\n if notfound_action:\n if not self.has_info():\n if notfound_action == 1:\n warnings.warn(self.notfounderror.__doc__, stacklevel=2)\n elif notfound_action == 2:\n raise self.notfounderror(self.notfounderror.__doc__)\n else:\n raise ValueError(repr(notfound_action))\n\n if not self.has_info():\n log.info(' NOT AVAILABLE')\n self.set_info()\n else:\n log.info(' FOUND:')\n\n res = self.saved_results.get(self.__class__.__name__)\n if log.get_threshold() <= log.INFO and flag:\n for k, v in res.items():\n v = str(v)\n if k in ['sources', 'libraries'] and len(v) > 270:\n v = v[:120] + '...\\n...\\n...' + v[-120:]\n log.info(' %s = %s', k, v)\n log.info('')\n\n return copy.deepcopy(res)\n\n def get_paths(self, section, key):\n dirs = self.cp.get(section, key).split(os.pathsep)\n env_var = self.dir_env_var\n if env_var:\n if is_sequence(env_var):\n e0 = env_var[-1]\n for e in env_var:\n if e in os.environ:\n e0 = e\n break\n if not env_var[0] == e0:\n log.info('Setting %s=%s' % (env_var[0], e0))\n env_var = e0\n if env_var and env_var in os.environ:\n d = os.environ[env_var]\n if d == 'None':\n log.info('Disabled %s: %s',\n self.__class__.__name__, '(%s is None)'\n % (env_var,))\n return []\n if os.path.isfile(d):\n dirs = [os.path.dirname(d)] + dirs\n l = getattr(self, '_lib_names', [])\n if len(l) == 1:\n b = os.path.basename(d)\n b = os.path.splitext(b)[0]\n if b[:3] == 'lib':\n log.info('Replacing _lib_names[0]==%r with %r' \\\n % (self._lib_names[0], b[3:]))\n self._lib_names[0] = b[3:]\n else:\n ds = d.split(os.pathsep)\n ds2 = []\n for d in ds:\n if os.path.isdir(d):\n ds2.append(d)\n for dd in ['include', 'lib']:\n d1 = os.path.join(d, dd)\n if os.path.isdir(d1):\n ds2.append(d1)\n dirs = ds2 + dirs\n default_dirs = self.cp.get(self.section, key).split(os.pathsep)\n dirs.extend(default_dirs)\n ret = []\n for d in dirs:\n if len(d) > 0 and not os.path.isdir(d):\n warnings.warn('Specified path %s is invalid.' % d, stacklevel=2)\n continue\n\n if d not in ret:\n ret.append(d)\n\n log.debug('( %s = %s )', key, ':'.join(ret))\n return ret\n\n def get_lib_dirs(self, key='library_dirs'):\n return self.get_paths(self.section, key)\n\n def get_runtime_lib_dirs(self, key='runtime_library_dirs'):\n path = self.get_paths(self.section, key)\n if path == ['']:\n path = []\n return path\n\n def get_include_dirs(self, key='include_dirs'):\n return self.get_paths(self.section, key)\n\n def get_src_dirs(self, key='src_dirs'):\n return self.get_paths(self.section, key)\n\n def get_libs(self, key, default):\n try:\n libs = self.cp.get(self.section, key)\n except NoOptionError:\n if not default:\n return []\n if is_string(default):\n return [default]\n return default\n return [b for b in [a.strip() for a in libs.split(',')] if b]\n\n def get_libraries(self, key='libraries'):\n if hasattr(self, '_lib_names'):\n return self.get_libs(key, default=self._lib_names)\n else:\n return self.get_libs(key, '')\n\n def library_extensions(self):\n c = customized_ccompiler()\n static_exts = []\n if c.compiler_type != 'msvc':\n # MSVC doesn't understand binutils\n static_exts.append('.a')\n if sys.platform == 'win32':\n static_exts.append('.lib') # .lib is used by MSVC and others\n if self.search_static_first:\n exts = static_exts + [so_ext]\n else:\n exts = [so_ext] + static_exts\n if sys.platform == 'cygwin':\n exts.append('.dll.a')\n if sys.platform == 'darwin':\n exts.append('.dylib')\n return exts\n\n def check_libs(self, lib_dirs, libs, opt_libs=[]):\n \"\"\"If static or shared libraries are available then return\n their info dictionary.\n\n Checks for all libraries as shared libraries first, then\n static (or vice versa if self.search_static_first is True).\n \"\"\"\n exts = self.library_extensions()\n info = None\n for ext in exts:\n info = self._check_libs(lib_dirs, libs, opt_libs, [ext])\n if info is not None:\n break\n if not info:\n log.info(' libraries %s not found in %s', ','.join(libs),\n lib_dirs)\n return info\n\n def check_libs2(self, lib_dirs, libs, opt_libs=[]):\n \"\"\"If static or shared libraries are available then return\n their info dictionary.\n\n Checks each library for shared or static.\n \"\"\"\n exts = self.library_extensions()\n info = self._check_libs(lib_dirs, libs, opt_libs, exts)\n if not info:\n log.info(' libraries %s not found in %s', ','.join(libs),\n lib_dirs)\n\n return info\n\n def _find_lib(self, lib_dir, lib, exts):\n assert is_string(lib_dir)\n # under windows first try without 'lib' prefix\n if sys.platform == 'win32':\n lib_prefixes = ['', 'lib']\n else:\n lib_prefixes = ['lib']\n # for each library name, see if we can find a file for it.\n for ext in exts:\n for prefix in lib_prefixes:\n p = self.combine_paths(lib_dir, prefix + lib + ext)\n if p:\n break\n if p:\n assert len(p) == 1\n # ??? splitext on p[0] would do this for cygwin\n # doesn't seem correct\n if ext == '.dll.a':\n lib += '.dll'\n if ext == '.lib':\n lib = prefix + lib\n return lib\n\n return False\n\n def _find_libs(self, lib_dirs, libs, exts):\n # make sure we preserve the order of libs, as it can be important\n found_dirs, found_libs = [], []\n for lib in libs:\n for lib_dir in lib_dirs:\n found_lib = self._find_lib(lib_dir, lib, exts)\n if found_lib:\n found_libs.append(found_lib)\n if lib_dir not in found_dirs:\n found_dirs.append(lib_dir)\n break\n return found_dirs, found_libs\n\n def _check_libs(self, lib_dirs, libs, opt_libs, exts):\n \"\"\"Find mandatory and optional libs in expected paths.\n\n Missing optional libraries are silently forgotten.\n \"\"\"\n if not is_sequence(lib_dirs):\n lib_dirs = [lib_dirs]\n # First, try to find the mandatory libraries\n found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts)\n if len(found_libs) > 0 and len(found_libs) == len(libs):\n # Now, check for optional libraries\n opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts)\n found_libs.extend(opt_found_libs)\n for lib_dir in opt_found_dirs:\n if lib_dir not in found_dirs:\n found_dirs.append(lib_dir)\n info = {'libraries': found_libs, 'library_dirs': found_dirs}\n return info\n else:\n return None\n\n def combine_paths(self, *args):\n \"\"\"Return a list of existing paths composed by all combinations\n of items from the arguments.\n \"\"\"\n return combine_paths(*args)\n\n\nclass fft_opt_info(system_info):\n\n def calc_info(self):\n info = {}\n fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')\n djbfft_info = get_info('djbfft')\n if fftw_info:\n dict_append(info, **fftw_info)\n if djbfft_info:\n dict_append(info, **djbfft_info)\n self.set_info(**info)\n return\n\n\nclass fftw_info(system_info):\n #variables to override\n section = 'fftw'\n dir_env_var = 'FFTW'\n notfounderror = FFTWNotFoundError\n ver_info = [{'name':'fftw3',\n 'libs':['fftw3'],\n 'includes':['fftw3.h'],\n 'macros':[('SCIPY_FFTW3_H', None)]},\n {'name':'fftw2',\n 'libs':['rfftw', 'fftw'],\n 'includes':['fftw.h', 'rfftw.h'],\n 'macros':[('SCIPY_FFTW_H', None)]}]\n\n def calc_ver_info(self, ver_param):\n \"\"\"Returns True on successful version detection, else False\"\"\"\n lib_dirs = self.get_lib_dirs()\n incl_dirs = self.get_include_dirs()\n\n opt = self.get_option_single(self.section + '_libs', 'libraries')\n libs = self.get_libs(opt, ver_param['libs'])\n info = self.check_libs(lib_dirs, libs)\n if info is not None:\n flag = 0\n for d in incl_dirs:\n if len(self.combine_paths(d, ver_param['includes'])) \\\n == len(ver_param['includes']):\n dict_append(info, include_dirs=[d])\n flag = 1\n break\n if flag:\n dict_append(info, define_macros=ver_param['macros'])\n else:\n info = None\n if info is not None:\n self.set_info(**info)\n return True\n else:\n log.info(' %s not found' % (ver_param['name']))\n return False\n\n def calc_info(self):\n for i in self.ver_info:\n if self.calc_ver_info(i):\n break\n\n\nclass fftw2_info(fftw_info):\n #variables to override\n section = 'fftw'\n dir_env_var = 'FFTW'\n notfounderror = FFTWNotFoundError\n ver_info = [{'name':'fftw2',\n 'libs':['rfftw', 'fftw'],\n 'includes':['fftw.h', 'rfftw.h'],\n 'macros':[('SCIPY_FFTW_H', None)]}\n ]\n\n\nclass fftw3_info(fftw_info):\n #variables to override\n section = 'fftw3'\n dir_env_var = 'FFTW3'\n notfounderror = FFTWNotFoundError\n ver_info = [{'name':'fftw3',\n 'libs':['fftw3'],\n 'includes':['fftw3.h'],\n 'macros':[('SCIPY_FFTW3_H', None)]},\n ]\n\n\nclass dfftw_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'dfftw',\n 'libs':['drfftw', 'dfftw'],\n 'includes':['dfftw.h', 'drfftw.h'],\n 'macros':[('SCIPY_DFFTW_H', None)]}]\n\n\nclass sfftw_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'sfftw',\n 'libs':['srfftw', 'sfftw'],\n 'includes':['sfftw.h', 'srfftw.h'],\n 'macros':[('SCIPY_SFFTW_H', None)]}]\n\n\nclass fftw_threads_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'fftw threads',\n 'libs':['rfftw_threads', 'fftw_threads'],\n 'includes':['fftw_threads.h', 'rfftw_threads.h'],\n 'macros':[('SCIPY_FFTW_THREADS_H', None)]}]\n\n\nclass dfftw_threads_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'dfftw threads',\n 'libs':['drfftw_threads', 'dfftw_threads'],\n 'includes':['dfftw_threads.h', 'drfftw_threads.h'],\n 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]\n\n\nclass sfftw_threads_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'sfftw threads',\n 'libs':['srfftw_threads', 'sfftw_threads'],\n 'includes':['sfftw_threads.h', 'srfftw_threads.h'],\n 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]\n\n\nclass djbfft_info(system_info):\n section = 'djbfft'\n dir_env_var = 'DJBFFT'\n notfounderror = DJBFFTNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend(self.combine_paths(d, ['djbfft']) + [d])\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n incl_dirs = self.get_include_dirs()\n info = None\n for d in lib_dirs:\n p = self.combine_paths(d, ['djbfft.a'])\n if p:\n info = {'extra_objects': p}\n break\n p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])\n if p:\n info = {'libraries': ['djbfft'], 'library_dirs': [d]}\n break\n if info is None:\n return\n for d in incl_dirs:\n if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:\n dict_append(info, include_dirs=[d],\n define_macros=[('SCIPY_DJBFFT_H', None)])\n self.set_info(**info)\n return\n return\n\n\nclass mkl_info(system_info):\n section = 'mkl'\n dir_env_var = 'MKLROOT'\n _lib_mkl = ['mkl_rt']\n\n def get_mkl_rootdir(self):\n mklroot = os.environ.get('MKLROOT', None)\n if mklroot is not None:\n return mklroot\n paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)\n ld_so_conf = '/etc/ld.so.conf'\n if os.path.isfile(ld_so_conf):\n with open(ld_so_conf, 'r') as f:\n for d in f:\n d = d.strip()\n if d:\n paths.append(d)\n intel_mkl_dirs = []\n for path in paths:\n path_atoms = path.split(os.sep)\n for m in path_atoms:\n if m.startswith('mkl'):\n d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])\n intel_mkl_dirs.append(d)\n break\n for d in paths:\n dirs = glob(os.path.join(d, 'mkl', '*'))\n dirs += glob(os.path.join(d, 'mkl*'))\n for sub_dir in dirs:\n if os.path.isdir(os.path.join(sub_dir, 'lib')):\n return sub_dir\n return None\n\n def __init__(self):\n mklroot = self.get_mkl_rootdir()\n if mklroot is None:\n system_info.__init__(self)\n else:\n from .cpuinfo import cpu\n if cpu.is_Itanium():\n plt = '64'\n elif cpu.is_Intel() and cpu.is_64bit():\n plt = 'intel64'\n else:\n plt = '32'\n system_info.__init__(\n self,\n default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],\n default_include_dirs=[os.path.join(mklroot, 'include')])\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n incl_dirs = self.get_include_dirs()\n opt = self.get_option_single('mkl_libs', 'libraries')\n mkl_libs = self.get_libs(opt, self._lib_mkl)\n info = self.check_libs2(lib_dirs, mkl_libs)\n if info is None:\n return\n dict_append(info,\n define_macros=[('SCIPY_MKL_H', None),\n ('HAVE_CBLAS', None)],\n include_dirs=incl_dirs)\n if sys.platform == 'win32':\n pass # win32 has no pthread library\n else:\n dict_append(info, libraries=['pthread'])\n self.set_info(**info)\n\n\nclass lapack_mkl_info(mkl_info):\n pass\n\n\nclass blas_mkl_info(mkl_info):\n pass\n\n\nclass atlas_info(system_info):\n section = 'atlas'\n dir_env_var = 'ATLAS'\n _lib_names = ['f77blas', 'cblas']\n if sys.platform[:7] == 'freebsd':\n _lib_atlas = ['atlas_r']\n _lib_lapack = ['alapack_r']\n else:\n _lib_atlas = ['atlas']\n _lib_lapack = ['lapack']\n\n notfounderror = AtlasNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',\n 'sse', '3dnow', 'sse2']) + [d])\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n info = {}\n opt = self.get_option_single('atlas_libs', 'libraries')\n atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)\n lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)\n atlas = None\n lapack = None\n atlas_1 = None\n for d in lib_dirs:\n atlas = self.check_libs2(d, atlas_libs, [])\n if atlas is not None:\n lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])\n lapack = self.check_libs2(lib_dirs2, lapack_libs, [])\n if lapack is not None:\n break\n if atlas:\n atlas_1 = atlas\n log.info(self.__class__)\n if atlas is None:\n atlas = atlas_1\n if atlas is None:\n return\n include_dirs = self.get_include_dirs()\n h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])\n h = h[0]\n if h:\n h = os.path.dirname(h)\n dict_append(info, include_dirs=[h])\n info['language'] = 'c'\n if lapack is not None:\n dict_append(info, **lapack)\n dict_append(info, **atlas)\n elif 'lapack_atlas' in atlas['libraries']:\n dict_append(info, **atlas)\n dict_append(info,\n define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])\n self.set_info(**info)\n return\n else:\n dict_append(info, **atlas)\n dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])\n message = textwrap.dedent(\"\"\"\n *********************************************************************\n Could not find lapack library within the ATLAS installation.\n *********************************************************************\n \"\"\")\n warnings.warn(message, stacklevel=2)\n self.set_info(**info)\n return\n\n # Check if lapack library is complete, only warn if it is not.\n lapack_dir = lapack['library_dirs'][0]\n lapack_name = lapack['libraries'][0]\n lapack_lib = None\n lib_prefixes = ['lib']\n if sys.platform == 'win32':\n lib_prefixes.append('')\n for e in self.library_extensions():\n for prefix in lib_prefixes:\n fn = os.path.join(lapack_dir, prefix + lapack_name + e)\n if os.path.exists(fn):\n lapack_lib = fn\n break\n if lapack_lib:\n break\n if lapack_lib is not None:\n sz = os.stat(lapack_lib)[6]\n if sz <= 4000 * 1024:\n message = textwrap.dedent(\"\"\"\n *********************************************************************\n Lapack library (from ATLAS) is probably incomplete:\n size of %s is %sk (expected >4000k)\n\n Follow the instructions in the KNOWN PROBLEMS section of the file\n numpy/INSTALL.txt.\n *********************************************************************\n \"\"\") % (lapack_lib, sz / 1024)\n warnings.warn(message, stacklevel=2)\n else:\n info['language'] = 'f77'\n\n atlas_version, atlas_extra_info = get_atlas_version(**atlas)\n dict_append(info, **atlas_extra_info)\n\n self.set_info(**info)\n\n\nclass atlas_blas_info(atlas_info):\n _lib_names = ['f77blas', 'cblas']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n info = {}\n opt = self.get_option_single('atlas_libs', 'libraries')\n atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)\n atlas = self.check_libs2(lib_dirs, atlas_libs, [])\n if atlas is None:\n return\n include_dirs = self.get_include_dirs()\n h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])\n h = h[0]\n if h:\n h = os.path.dirname(h)\n dict_append(info, include_dirs=[h])\n info['language'] = 'c'\n info['define_macros'] = [('HAVE_CBLAS', None)]\n\n atlas_version, atlas_extra_info = get_atlas_version(**atlas)\n dict_append(atlas, **atlas_extra_info)\n\n dict_append(info, **atlas)\n\n self.set_info(**info)\n return\n\n\nclass atlas_threads_info(atlas_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['ptf77blas', 'ptcblas']\n\n\nclass atlas_blas_threads_info(atlas_blas_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['ptf77blas', 'ptcblas']\n\n\nclass lapack_atlas_info(atlas_info):\n _lib_names = ['lapack_atlas'] + atlas_info._lib_names\n\n\nclass lapack_atlas_threads_info(atlas_threads_info):\n _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names\n\n\nclass atlas_3_10_info(atlas_info):\n _lib_names = ['satlas']\n _lib_atlas = _lib_names\n _lib_lapack = _lib_names\n\n\nclass atlas_3_10_blas_info(atlas_3_10_info):\n _lib_names = ['satlas']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n info = {}\n opt = self.get_option_single('atlas_lib', 'libraries')\n atlas_libs = self.get_libs(opt, self._lib_names)\n atlas = self.check_libs2(lib_dirs, atlas_libs, [])\n if atlas is None:\n return\n include_dirs = self.get_include_dirs()\n h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])\n h = h[0]\n if h:\n h = os.path.dirname(h)\n dict_append(info, include_dirs=[h])\n info['language'] = 'c'\n info['define_macros'] = [('HAVE_CBLAS', None)]\n\n atlas_version, atlas_extra_info = get_atlas_version(**atlas)\n dict_append(atlas, **atlas_extra_info)\n\n dict_append(info, **atlas)\n\n self.set_info(**info)\n return\n\n\nclass atlas_3_10_threads_info(atlas_3_10_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['tatlas']\n _lib_atlas = _lib_names\n _lib_lapack = _lib_names\n\n\nclass atlas_3_10_blas_threads_info(atlas_3_10_blas_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['tatlas']\n\n\nclass lapack_atlas_3_10_info(atlas_3_10_info):\n pass\n\n\nclass lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):\n pass\n\n\nclass lapack_info(system_info):\n section = 'lapack'\n dir_env_var = 'LAPACK'\n _lib_names = ['lapack']\n notfounderror = LapackNotFoundError\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n\n opt = self.get_option_single('lapack_libs', 'libraries')\n lapack_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs(lib_dirs, lapack_libs, [])\n if info is None:\n return\n info['language'] = 'f77'\n self.set_info(**info)\n\n\nclass lapack_src_info(system_info):\n # LAPACK_SRC is deprecated, please do not use this!\n # Build or install a BLAS library via your package manager or from\n # source separately.\n section = 'lapack_src'\n dir_env_var = 'LAPACK_SRC'\n notfounderror = LapackSrcNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'dgesv.f')):\n src_dir = d\n break\n if not src_dir:\n #XXX: Get sources from netlib. May be ask first.\n return\n # The following is extracted from LAPACK-3.0/SRC/Makefile.\n # Added missing names from lapack-lite-3.1.1/SRC/Makefile\n # while keeping removed names for Lapack-3.0 compatibility.\n allaux = '''\n ilaenv ieeeck lsame lsamen xerbla\n iparmq\n ''' # *.f\n laux = '''\n bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1\n laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2\n lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre\n larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4\n lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1\n lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf\n stebz stedc steqr sterf\n\n larra larrc larrd larr larrk larrj larrr laneg laisnan isnan\n lazq3 lazq4\n ''' # [s|d]*.f\n lasrc = '''\n gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak\n gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv\n gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2\n geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd\n gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal\n gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd\n ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein\n hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0\n lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb\n lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp\n laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv\n lartv larz larzb larzt laswp lasyf latbs latdf latps latrd\n latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv\n pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2\n potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri\n pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs\n spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv\n sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2\n tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs\n trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs\n tzrqf tzrzf\n\n lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5\n ''' # [s|c|d|z]*.f\n sd_lasrc = '''\n laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l\n org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr\n orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3\n ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx\n sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd\n stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd\n sygvx sytd2 sytrd\n ''' # [s|d]*.f\n cz_lasrc = '''\n bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev\n heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv\n hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd\n hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf\n hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7\n laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe\n laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv\n spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq\n ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2\n unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr\n ''' # [c|z]*.f\n #######\n sclaux = laux + ' econd ' # s*.f\n dzlaux = laux + ' secnd ' # d*.f\n slasrc = lasrc + sd_lasrc # s*.f\n dlasrc = lasrc + sd_lasrc # d*.f\n clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f\n zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f\n oclasrc = ' icmax1 scsum1 ' # *.f\n ozlasrc = ' izmax1 dzsum1 ' # *.f\n sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \\\n + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \\\n + ['c%s.f' % f for f in (clasrc).split()] \\\n + ['z%s.f' % f for f in (zlasrc).split()] \\\n + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]\n sources = [os.path.join(src_dir, f) for f in sources]\n # Lapack 3.1:\n src_dir2 = os.path.join(src_dir, '..', 'INSTALL')\n sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']\n # Lapack 3.2.1:\n sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']\n sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']\n sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']\n # Should we check here actual existence of source files?\n # Yes, the file listing is different between 3.0 and 3.1\n # versions.\n sources = [f for f in sources if os.path.isfile(f)]\n info = {'sources': sources, 'language': 'f77'}\n self.set_info(**info)\n\natlas_version_c_text = r'''\n/* This file is generated from numpy/distutils/system_info.py */\nvoid ATL_buildinfo(void);\nint main(void) {\n ATL_buildinfo();\n return 0;\n}\n'''\n\n_cached_atlas_version = {}\n\n\ndef get_atlas_version(**config):\n libraries = config.get('libraries', [])\n library_dirs = config.get('library_dirs', [])\n key = (tuple(libraries), tuple(library_dirs))\n if key in _cached_atlas_version:\n return _cached_atlas_version[key]\n c = cmd_config(Distribution())\n atlas_version = None\n info = {}\n try:\n s, o = c.get_output(atlas_version_c_text,\n libraries=libraries, library_dirs=library_dirs,\n )\n if s and re.search(r'undefined reference to `_gfortran', o, re.M):\n s, o = c.get_output(atlas_version_c_text,\n libraries=libraries + ['gfortran'],\n library_dirs=library_dirs,\n )\n if not s:\n warnings.warn(textwrap.dedent(\"\"\"\n *****************************************************\n Linkage with ATLAS requires gfortran. Use\n\n python setup.py config_fc --fcompiler=gnu95 ...\n\n when building extension libraries that use ATLAS.\n Make sure that -lgfortran is used for C++ extensions.\n *****************************************************\n \"\"\"), stacklevel=2)\n dict_append(info, language='f90',\n define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])\n except Exception: # failed to get version from file -- maybe on Windows\n # look at directory name\n for o in library_dirs:\n m = re.search(r'ATLAS_(?P<version>\\d+[.]\\d+[.]\\d+)_', o)\n if m:\n atlas_version = m.group('version')\n if atlas_version is not None:\n break\n\n # final choice --- look at ATLAS_VERSION environment\n # variable\n if atlas_version is None:\n atlas_version = os.environ.get('ATLAS_VERSION', None)\n if atlas_version:\n dict_append(info, define_macros=[(\n 'ATLAS_INFO', _c_string_literal(atlas_version))\n ])\n else:\n dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])\n return atlas_version or '?.?.?', info\n\n if not s:\n m = re.search(r'ATLAS version (?P<version>\\d+[.]\\d+[.]\\d+)', o)\n if m:\n atlas_version = m.group('version')\n if atlas_version is None:\n if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):\n atlas_version = '3.2.1_pre3.3.6'\n else:\n log.info('Status: %d', s)\n log.info('Output: %s', o)\n\n elif atlas_version == '3.2.1_pre3.3.6':\n dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])\n else:\n dict_append(info, define_macros=[(\n 'ATLAS_INFO', _c_string_literal(atlas_version))\n ])\n result = _cached_atlas_version[key] = atlas_version, info\n return result\n\n\nclass lapack_opt_info(system_info):\n notfounderror = LapackNotFoundError\n\n # List of all known LAPACK libraries, in the default order\n lapack_order = ['mkl', 'openblas', 'flame',\n 'accelerate', 'atlas', 'lapack']\n order_env_var_name = 'NPY_LAPACK_ORDER'\n\n def _calc_info_mkl(self):\n info = get_info('lapack_mkl')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_openblas(self):\n info = get_info('openblas_lapack')\n if info:\n self.set_info(**info)\n return True\n info = get_info('openblas_clapack')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_flame(self):\n info = get_info('flame')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_atlas(self):\n info = get_info('atlas_3_10_threads')\n if not info:\n info = get_info('atlas_3_10')\n if not info:\n info = get_info('atlas_threads')\n if not info:\n info = get_info('atlas')\n if info:\n # Figure out if ATLAS has lapack...\n # If not we need the lapack library, but not BLAS!\n l = info.get('define_macros', [])\n if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \\\n or ('ATLAS_WITHOUT_LAPACK', None) in l:\n # Get LAPACK (with possible warnings)\n # If not found we don't accept anything\n # since we can't use ATLAS with LAPACK!\n lapack_info = self._get_info_lapack()\n if not lapack_info:\n return False\n dict_append(info, **lapack_info)\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_accelerate(self):\n info = get_info('accelerate')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _get_info_blas(self):\n # Default to get the optimized BLAS implementation\n info = get_info('blas_opt')\n if not info:\n warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)\n info_src = get_info('blas_src')\n if not info_src:\n warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)\n return {}\n dict_append(info, libraries=[('fblas_src', info_src)])\n return info\n\n def _get_info_lapack(self):\n info = get_info('lapack')\n if not info:\n warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3)\n info_src = get_info('lapack_src')\n if not info_src:\n warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3)\n return {}\n dict_append(info, libraries=[('flapack_src', info_src)])\n return info\n\n def _calc_info_lapack(self):\n info = self._get_info_lapack()\n if info:\n info_blas = self._get_info_blas()\n dict_append(info, **info_blas)\n dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_from_envvar(self):\n info = {}\n info['language'] = 'f77'\n info['libraries'] = []\n info['include_dirs'] = []\n info['define_macros'] = []\n info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split()\n self.set_info(**info)\n return True\n\n def _calc_info(self, name):\n return getattr(self, '_calc_info_{}'.format(name))()\n\n def calc_info(self):\n lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name)\n if len(unknown_order) > 0:\n raise ValueError(\"lapack_opt_info user defined \"\n \"LAPACK order has unacceptable \"\n \"values: {}\".format(unknown_order))\n\n if 'NPY_LAPACK_LIBS' in os.environ:\n # Bypass autodetection, set language to F77 and use env var linker\n # flags directly\n self._calc_info_from_envvar()\n return\n\n for lapack in lapack_order:\n if self._calc_info(lapack):\n return\n\n if 'lapack' not in lapack_order:\n # Since the user may request *not* to use any library, we still need\n # to raise warnings to signal missing packages!\n warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2)\n warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2)\n\n\nclass _ilp64_opt_info_mixin:\n symbol_suffix = None\n symbol_prefix = None\n\n def _check_info(self, info):\n macros = dict(info.get('define_macros', []))\n prefix = macros.get('BLAS_SYMBOL_PREFIX', '')\n suffix = macros.get('BLAS_SYMBOL_SUFFIX', '')\n\n if self.symbol_prefix not in (None, prefix):\n return False\n\n if self.symbol_suffix not in (None, suffix):\n return False\n\n return bool(info)\n\n\nclass lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin):\n notfounderror = LapackILP64NotFoundError\n lapack_order = ['openblas64_', 'openblas_ilp64']\n order_env_var_name = 'NPY_LAPACK_ILP64_ORDER'\n\n def _calc_info(self, name):\n info = get_info(name + '_lapack')\n if self._check_info(info):\n self.set_info(**info)\n return True\n return False\n\n\nclass lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info):\n # Same as lapack_ilp64_opt_info, but fix symbol names\n symbol_prefix = ''\n symbol_suffix = ''\n\n\nclass lapack64__opt_info(lapack_ilp64_opt_info):\n symbol_prefix = ''\n symbol_suffix = '64_'\n\n\nclass blas_opt_info(system_info):\n notfounderror = BlasNotFoundError\n # List of all known BLAS libraries, in the default order\n\n blas_order = ['mkl', 'blis', 'openblas',\n 'accelerate', 'atlas', 'blas']\n order_env_var_name = 'NPY_BLAS_ORDER'\n\n def _calc_info_mkl(self):\n info = get_info('blas_mkl')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_blis(self):\n info = get_info('blis')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_openblas(self):\n info = get_info('openblas')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_atlas(self):\n info = get_info('atlas_3_10_blas_threads')\n if not info:\n info = get_info('atlas_3_10_blas')\n if not info:\n info = get_info('atlas_blas_threads')\n if not info:\n info = get_info('atlas_blas')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_accelerate(self):\n info = get_info('accelerate')\n if info:\n self.set_info(**info)\n return True\n return False\n\n def _calc_info_blas(self):\n # Warn about a non-optimized BLAS library\n warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3)\n info = {}\n dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])\n\n blas = get_info('blas')\n if blas:\n dict_append(info, **blas)\n else:\n # Not even BLAS was found!\n warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)\n\n blas_src = get_info('blas_src')\n if not blas_src:\n warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)\n return False\n dict_append(info, libraries=[('fblas_src', blas_src)])\n\n self.set_info(**info)\n return True\n\n def _calc_info_from_envvar(self):\n info = {}\n info['language'] = 'f77'\n info['libraries'] = []\n info['include_dirs'] = []\n info['define_macros'] = []\n info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split()\n if 'NPY_CBLAS_LIBS' in os.environ:\n info['define_macros'].append(('HAVE_CBLAS', None))\n info['extra_link_args'].extend(\n os.environ['NPY_CBLAS_LIBS'].split())\n self.set_info(**info)\n return True\n\n def _calc_info(self, name):\n return getattr(self, '_calc_info_{}'.format(name))()\n\n def calc_info(self):\n blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name)\n if len(unknown_order) > 0:\n raise ValueError(\"blas_opt_info user defined BLAS order has unacceptable values: {}\".format(unknown_order))\n\n if 'NPY_BLAS_LIBS' in os.environ:\n # Bypass autodetection, set language to F77 and use env var linker\n # flags directly\n self._calc_info_from_envvar()\n return\n\n for blas in blas_order:\n if self._calc_info(blas):\n return\n\n if 'blas' not in blas_order:\n # Since the user may request *not* to use any library, we still need\n # to raise warnings to signal missing packages!\n warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2)\n warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2)\n\n\nclass blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin):\n notfounderror = BlasILP64NotFoundError\n blas_order = ['openblas64_', 'openblas_ilp64']\n order_env_var_name = 'NPY_BLAS_ILP64_ORDER'\n\n def _calc_info(self, name):\n info = get_info(name)\n if self._check_info(info):\n self.set_info(**info)\n return True\n return False\n\n\nclass blas_ilp64_plain_opt_info(blas_ilp64_opt_info):\n symbol_prefix = ''\n symbol_suffix = ''\n\n\nclass blas64__opt_info(blas_ilp64_opt_info):\n symbol_prefix = ''\n symbol_suffix = '64_'\n\n\nclass cblas_info(system_info):\n section = 'cblas'\n dir_env_var = 'CBLAS'\n # No default as it's used only in blas_info\n _lib_names = []\n notfounderror = BlasNotFoundError\n\n\nclass blas_info(system_info):\n section = 'blas'\n dir_env_var = 'BLAS'\n _lib_names = ['blas']\n notfounderror = BlasNotFoundError\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n opt = self.get_option_single('blas_libs', 'libraries')\n blas_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs(lib_dirs, blas_libs, [])\n if info is None:\n return\n else:\n info['include_dirs'] = self.get_include_dirs()\n if platform.system() == 'Windows':\n # The check for windows is needed because get_cblas_libs uses the\n # same compiler that was used to compile Python and msvc is\n # often not installed when mingw is being used. This rough\n # treatment is not desirable, but windows is tricky.\n info['language'] = 'f77' # XXX: is it generally true?\n # If cblas is given as an option, use those\n cblas_info_obj = cblas_info()\n cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries')\n cblas_libs = cblas_info_obj.get_libs(cblas_opt, None)\n if cblas_libs:\n info['libraries'] = cblas_libs + blas_libs\n info['define_macros'] = [('HAVE_CBLAS', None)]\n else:\n lib = self.get_cblas_libs(info)\n if lib is not None:\n info['language'] = 'c'\n info['libraries'] = lib\n info['define_macros'] = [('HAVE_CBLAS', None)]\n self.set_info(**info)\n\n def get_cblas_libs(self, info):\n \"\"\" Check whether we can link with CBLAS interface\n\n This method will search through several combinations of libraries\n to check whether CBLAS is present:\n\n 1. Libraries in ``info['libraries']``, as is\n 2. As 1. but also explicitly adding ``'cblas'`` as a library\n 3. As 1. but also explicitly adding ``'blas'`` as a library\n 4. Check only library ``'cblas'``\n 5. Check only library ``'blas'``\n\n Parameters\n ----------\n info : dict\n system information dictionary for compilation and linking\n\n Returns\n -------\n libraries : list of str or None\n a list of libraries that enables the use of CBLAS interface.\n Returns None if not found or a compilation error occurs.\n\n Since 1.17 returns a list.\n \"\"\"\n # primitive cblas check by looking for the header and trying to link\n # cblas or blas\n c = customized_ccompiler()\n tmpdir = tempfile.mkdtemp()\n s = textwrap.dedent(\"\"\"\\\n #include <cblas.h>\n int main(int argc, const char *argv[])\n {\n double a[4] = {1,2,3,4};\n double b[4] = {5,6,7,8};\n return cblas_ddot(4, a, 1, b, 1) > 10;\n }\"\"\")\n src = os.path.join(tmpdir, 'source.c')\n try:\n with open(src, 'wt') as f:\n f.write(s)\n\n try:\n # check we can compile (find headers)\n obj = c.compile([src], output_dir=tmpdir,\n include_dirs=self.get_include_dirs())\n except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError):\n return None\n\n # check we can link (find library)\n # some systems have separate cblas and blas libs.\n for libs in [info['libraries'], ['cblas'] + info['libraries'],\n ['blas'] + info['libraries'], ['cblas'], ['blas']]:\n try:\n c.link_executable(obj, os.path.join(tmpdir, \"a.out\"),\n libraries=libs,\n library_dirs=info['library_dirs'],\n extra_postargs=info.get('extra_link_args', []))\n return libs\n except distutils.ccompiler.LinkError:\n pass\n finally:\n shutil.rmtree(tmpdir)\n return None\n\n\nclass openblas_info(blas_info):\n section = 'openblas'\n dir_env_var = 'OPENBLAS'\n _lib_names = ['openblas']\n _require_symbols = []\n notfounderror = BlasNotFoundError\n\n @property\n def symbol_prefix(self):\n try:\n return self.cp.get(self.section, 'symbol_prefix')\n except NoOptionError:\n return ''\n\n @property\n def symbol_suffix(self):\n try:\n return self.cp.get(self.section, 'symbol_suffix')\n except NoOptionError:\n return ''\n\n def _calc_info(self):\n c = customized_ccompiler()\n\n lib_dirs = self.get_lib_dirs()\n\n # Prefer to use libraries over openblas_libs\n opt = self.get_option_single('openblas_libs', 'libraries')\n openblas_libs = self.get_libs(opt, self._lib_names)\n\n info = self.check_libs(lib_dirs, openblas_libs, [])\n\n if c.compiler_type == \"msvc\" and info is None:\n from numpy.distutils.fcompiler import new_fcompiler\n f = new_fcompiler(c_compiler=c)\n if f and f.compiler_type == 'gnu95':\n # Try gfortran-compatible library files\n info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs)\n # Skip lapack check, we'd need build_ext to do it\n skip_symbol_check = True\n elif info:\n skip_symbol_check = False\n info['language'] = 'c'\n\n if info is None:\n return None\n\n # Add extra info for OpenBLAS\n extra_info = self.calc_extra_info()\n dict_append(info, **extra_info)\n\n if not (skip_symbol_check or self.check_symbols(info)):\n return None\n\n info['define_macros'] = [('HAVE_CBLAS', None)]\n if self.symbol_prefix:\n info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)]\n if self.symbol_suffix:\n info['define_macros'] += [('BLAS_SYMBOL_SUFFIX', self.symbol_suffix)]\n\n return info\n\n def calc_info(self):\n info = self._calc_info()\n if info is not None:\n self.set_info(**info)\n\n def check_msvc_gfortran_libs(self, library_dirs, libraries):\n # First, find the full path to each library directory\n library_paths = []\n for library in libraries:\n for library_dir in library_dirs:\n # MinGW static ext will be .a\n fullpath = os.path.join(library_dir, library + '.a')\n if os.path.isfile(fullpath):\n library_paths.append(fullpath)\n break\n else:\n return None\n\n # Generate numpy.distutils virtual static library file\n basename = self.__class__.__name__\n tmpdir = os.path.join(os.getcwd(), 'build', basename)\n if not os.path.isdir(tmpdir):\n os.makedirs(tmpdir)\n\n info = {'library_dirs': [tmpdir],\n 'libraries': [basename],\n 'language': 'f77'}\n\n fake_lib_file = os.path.join(tmpdir, basename + '.fobjects')\n fake_clib_file = os.path.join(tmpdir, basename + '.cobjects')\n with open(fake_lib_file, 'w') as f:\n f.write(\"\\n\".join(library_paths))\n with open(fake_clib_file, 'w') as f:\n pass\n\n return info\n\n def check_symbols(self, info):\n res = False\n c = customized_ccompiler()\n\n tmpdir = tempfile.mkdtemp()\n\n prototypes = \"\\n\".join(\"void %s%s%s();\" % (self.symbol_prefix,\n symbol_name,\n self.symbol_suffix)\n for symbol_name in self._require_symbols)\n calls = \"\\n\".join(\"%s%s%s();\" % (self.symbol_prefix,\n symbol_name,\n self.symbol_suffix)\n for symbol_name in self._require_symbols)\n s = textwrap.dedent(\"\"\"\\\n %(prototypes)s\n int main(int argc, const char *argv[])\n {\n %(calls)s\n return 0;\n }\"\"\") % dict(prototypes=prototypes, calls=calls)\n src = os.path.join(tmpdir, 'source.c')\n out = os.path.join(tmpdir, 'a.out')\n # Add the additional \"extra\" arguments\n try:\n extra_args = info['extra_link_args']\n except Exception:\n extra_args = []\n try:\n with open(src, 'wt') as f:\n f.write(s)\n obj = c.compile([src], output_dir=tmpdir)\n try:\n c.link_executable(obj, out, libraries=info['libraries'],\n library_dirs=info['library_dirs'],\n extra_postargs=extra_args)\n res = True\n except distutils.ccompiler.LinkError:\n res = False\n finally:\n shutil.rmtree(tmpdir)\n return res\n\nclass openblas_lapack_info(openblas_info):\n section = 'openblas'\n dir_env_var = 'OPENBLAS'\n _lib_names = ['openblas']\n _require_symbols = ['zungqr_']\n notfounderror = BlasNotFoundError\n\nclass openblas_clapack_info(openblas_lapack_info):\n _lib_names = ['openblas', 'lapack']\n\nclass openblas_ilp64_info(openblas_info):\n section = 'openblas_ilp64'\n dir_env_var = 'OPENBLAS_ILP64'\n _lib_names = ['openblas64']\n _require_symbols = ['dgemm_', 'cblas_dgemm']\n notfounderror = BlasILP64NotFoundError\n\n def _calc_info(self):\n info = super()._calc_info()\n if info is not None:\n info['define_macros'] += [('HAVE_BLAS_ILP64', None)]\n return info\n\nclass openblas_ilp64_lapack_info(openblas_ilp64_info):\n _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr']\n\n def _calc_info(self):\n info = super()._calc_info()\n if info:\n info['define_macros'] += [('HAVE_LAPACKE', None)]\n return info\n\nclass openblas64__info(openblas_ilp64_info):\n # ILP64 Openblas, with default symbol suffix\n section = 'openblas64_'\n dir_env_var = 'OPENBLAS64_'\n _lib_names = ['openblas64_']\n symbol_suffix = '64_'\n symbol_prefix = ''\n\nclass openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info):\n pass\n\nclass blis_info(blas_info):\n section = 'blis'\n dir_env_var = 'BLIS'\n _lib_names = ['blis']\n notfounderror = BlasNotFoundError\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n opt = self.get_option_single('blis_libs', 'libraries')\n blis_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs2(lib_dirs, blis_libs, [])\n if info is None:\n return\n\n # Add include dirs\n incl_dirs = self.get_include_dirs()\n dict_append(info,\n language='c',\n define_macros=[('HAVE_CBLAS', None)],\n include_dirs=incl_dirs)\n self.set_info(**info)\n\n\nclass flame_info(system_info):\n \"\"\" Usage of libflame for LAPACK operations\n\n This requires libflame to be compiled with lapack wrappers:\n\n ./configure --enable-lapack2flame ...\n\n Be aware that libflame 5.1.0 has some missing names in the shared library, so\n if you have problems, try the static flame library.\n \"\"\"\n section = 'flame'\n _lib_names = ['flame']\n notfounderror = FlameNotFoundError\n\n def check_embedded_lapack(self, info):\n \"\"\" libflame does not necessarily have a wrapper for fortran LAPACK, we need to check \"\"\"\n c = customized_ccompiler()\n\n tmpdir = tempfile.mkdtemp()\n s = textwrap.dedent(\"\"\"\\\n void zungqr_();\n int main(int argc, const char *argv[])\n {\n zungqr_();\n return 0;\n }\"\"\")\n src = os.path.join(tmpdir, 'source.c')\n out = os.path.join(tmpdir, 'a.out')\n # Add the additional \"extra\" arguments\n extra_args = info.get('extra_link_args', [])\n try:\n with open(src, 'wt') as f:\n f.write(s)\n obj = c.compile([src], output_dir=tmpdir)\n try:\n c.link_executable(obj, out, libraries=info['libraries'],\n library_dirs=info['library_dirs'],\n extra_postargs=extra_args)\n return True\n except distutils.ccompiler.LinkError:\n return False\n finally:\n shutil.rmtree(tmpdir)\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n flame_libs = self.get_libs('libraries', self._lib_names)\n\n info = self.check_libs2(lib_dirs, flame_libs, [])\n if info is None:\n return\n\n # Add the extra flag args to info\n extra_info = self.calc_extra_info()\n dict_append(info, **extra_info)\n\n if self.check_embedded_lapack(info):\n # check if the user has supplied all information required\n self.set_info(**info)\n else:\n # Try and get the BLAS lib to see if we can get it to work\n blas_info = get_info('blas_opt')\n if not blas_info:\n # since we already failed once, this ain't going to work either\n return\n\n # Now we need to merge the two dictionaries\n for key in blas_info:\n if isinstance(blas_info[key], list):\n info[key] = info.get(key, []) + blas_info[key]\n elif isinstance(blas_info[key], tuple):\n info[key] = info.get(key, ()) + blas_info[key]\n else:\n info[key] = info.get(key, '') + blas_info[key]\n\n # Now check again\n if self.check_embedded_lapack(info):\n self.set_info(**info)\n\n\nclass accelerate_info(system_info):\n section = 'accelerate'\n _lib_names = ['accelerate', 'veclib']\n notfounderror = BlasNotFoundError\n\n def calc_info(self):\n # Make possible to enable/disable from config file/env var\n libraries = os.environ.get('ACCELERATE')\n if libraries:\n libraries = [libraries]\n else:\n libraries = self.get_libs('libraries', self._lib_names)\n libraries = [lib.strip().lower() for lib in libraries]\n\n if (sys.platform == 'darwin' and\n not os.getenv('_PYTHON_HOST_PLATFORM', None)):\n # Use the system BLAS from Accelerate or vecLib under OSX\n args = []\n link_args = []\n if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \\\n 'x86_64' in get_platform() or \\\n 'i386' in platform.platform():\n intel = 1\n else:\n intel = 0\n if (os.path.exists('/System/Library/Frameworks'\n '/Accelerate.framework/') and\n 'accelerate' in libraries):\n if intel:\n args.extend(['-msse3'])\n args.extend([\n '-I/System/Library/Frameworks/vecLib.framework/Headers'])\n link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])\n elif (os.path.exists('/System/Library/Frameworks'\n '/vecLib.framework/') and\n 'veclib' in libraries):\n if intel:\n args.extend(['-msse3'])\n args.extend([\n '-I/System/Library/Frameworks/vecLib.framework/Headers'])\n link_args.extend(['-Wl,-framework', '-Wl,vecLib'])\n\n if args:\n self.set_info(extra_compile_args=args,\n extra_link_args=link_args,\n define_macros=[('NO_ATLAS_INFO', 3),\n ('HAVE_CBLAS', None)])\n\n return\n\nclass blas_src_info(system_info):\n # BLAS_SRC is deprecated, please do not use this!\n # Build or install a BLAS library via your package manager or from\n # source separately.\n section = 'blas_src'\n dir_env_var = 'BLAS_SRC'\n notfounderror = BlasSrcNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['blas']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'daxpy.f')):\n src_dir = d\n break\n if not src_dir:\n #XXX: Get sources from netlib. May be ask first.\n return\n blas1 = '''\n caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot\n dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2\n srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg\n dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax\n snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap\n scabs1\n '''\n blas2 = '''\n cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv\n chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv\n dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv\n sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger\n stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc\n zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2\n ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv\n '''\n blas3 = '''\n cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k\n dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm\n ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm\n '''\n sources = [os.path.join(src_dir, f + '.f') \\\n for f in (blas1 + blas2 + blas3).split()]\n #XXX: should we check here actual existence of source files?\n sources = [f for f in sources if os.path.isfile(f)]\n info = {'sources': sources, 'language': 'f77'}\n self.set_info(**info)\n\n\nclass x11_info(system_info):\n section = 'x11'\n notfounderror = X11NotFoundError\n _lib_names = ['X11']\n\n def __init__(self):\n system_info.__init__(self,\n default_lib_dirs=default_x11_lib_dirs,\n default_include_dirs=default_x11_include_dirs)\n\n def calc_info(self):\n if sys.platform in ['win32']:\n return\n lib_dirs = self.get_lib_dirs()\n include_dirs = self.get_include_dirs()\n opt = self.get_option_single('x11_libs', 'libraries')\n x11_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs(lib_dirs, x11_libs, [])\n if info is None:\n return\n inc_dir = None\n for d in include_dirs:\n if self.combine_paths(d, 'X11/X.h'):\n inc_dir = d\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir])\n self.set_info(**info)\n\n\nclass _numpy_info(system_info):\n section = 'Numeric'\n modulename = 'Numeric'\n notfounderror = NumericNotFoundError\n\n def __init__(self):\n include_dirs = []\n try:\n module = __import__(self.modulename)\n prefix = []\n for name in module.__file__.split(os.sep):\n if name == 'lib':\n break\n prefix.append(name)\n\n # Ask numpy for its own include path before attempting\n # anything else\n try:\n include_dirs.append(getattr(module, 'get_include')())\n except AttributeError:\n pass\n\n include_dirs.append(sysconfig.get_path('include'))\n except ImportError:\n pass\n py_incl_dir = sysconfig.get_path('include')\n include_dirs.append(py_incl_dir)\n py_pincl_dir = sysconfig.get_path('platinclude')\n if py_pincl_dir not in include_dirs:\n include_dirs.append(py_pincl_dir)\n for d in default_include_dirs:\n d = os.path.join(d, os.path.basename(py_incl_dir))\n if d not in include_dirs:\n include_dirs.append(d)\n system_info.__init__(self,\n default_lib_dirs=[],\n default_include_dirs=include_dirs)\n\n def calc_info(self):\n try:\n module = __import__(self.modulename)\n except ImportError:\n return\n info = {}\n macros = []\n for v in ['__version__', 'version']:\n vrs = getattr(module, v, None)\n if vrs is None:\n continue\n macros = [(self.modulename.upper() + '_VERSION',\n _c_string_literal(vrs)),\n (self.modulename.upper(), None)]\n break\n dict_append(info, define_macros=macros)\n include_dirs = self.get_include_dirs()\n inc_dir = None\n for d in include_dirs:\n if self.combine_paths(d,\n os.path.join(self.modulename,\n 'arrayobject.h')):\n inc_dir = d\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir])\n if info:\n self.set_info(**info)\n return\n\n\nclass numarray_info(_numpy_info):\n section = 'numarray'\n modulename = 'numarray'\n\n\nclass Numeric_info(_numpy_info):\n section = 'Numeric'\n modulename = 'Numeric'\n\n\nclass numpy_info(_numpy_info):\n section = 'numpy'\n modulename = 'numpy'\n\n\nclass numerix_info(system_info):\n section = 'numerix'\n\n def calc_info(self):\n which = None, None\n if os.getenv(\"NUMERIX\"):\n which = os.getenv(\"NUMERIX\"), \"environment var\"\n # If all the above fail, default to numpy.\n if which[0] is None:\n which = \"numpy\", \"defaulted\"\n try:\n import numpy # noqa: F401\n which = \"numpy\", \"defaulted\"\n except ImportError as e:\n msg1 = str(e)\n try:\n import Numeric # noqa: F401\n which = \"numeric\", \"defaulted\"\n except ImportError as e:\n msg2 = str(e)\n try:\n import numarray # noqa: F401\n which = \"numarray\", \"defaulted\"\n except ImportError as e:\n msg3 = str(e)\n log.info(msg1)\n log.info(msg2)\n log.info(msg3)\n which = which[0].strip().lower(), which[1]\n if which[0] not in [\"numeric\", \"numarray\", \"numpy\"]:\n raise ValueError(\"numerix selector must be either 'Numeric' \"\n \"or 'numarray' or 'numpy' but the value obtained\"\n \" from the %s was '%s'.\" % (which[1], which[0]))\n os.environ['NUMERIX'] = which[0]\n self.set_info(**get_info(which[0]))\n\n\nclass f2py_info(system_info):\n def calc_info(self):\n try:\n import numpy.f2py as f2py\n except ImportError:\n return\n f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')\n self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],\n include_dirs=[f2py_dir])\n return\n\n\nclass boost_python_info(system_info):\n section = 'boost_python'\n dir_env_var = 'BOOST'\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['boost*']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',\n 'module.cpp')):\n src_dir = d\n break\n if not src_dir:\n return\n py_incl_dirs = [sysconfig.get_path('include')]\n py_pincl_dir = sysconfig.get_path('platinclude')\n if py_pincl_dir not in py_incl_dirs:\n py_incl_dirs.append(py_pincl_dir)\n srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')\n bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))\n bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))\n info = {'libraries': [('boost_python_src',\n {'include_dirs': [src_dir] + py_incl_dirs,\n 'sources':bpl_srcs}\n )],\n 'include_dirs': [src_dir],\n }\n if info:\n self.set_info(**info)\n return\n\n\nclass agg2_info(system_info):\n section = 'agg2'\n dir_env_var = 'AGG2'\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['agg2*']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):\n src_dir = d\n break\n if not src_dir:\n return\n if sys.platform == 'win32':\n agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',\n 'win32', 'agg_win32_bmp.cpp'))\n else:\n agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))\n agg2_srcs += [os.path.join(src_dir, 'src', 'platform',\n 'X11',\n 'agg_platform_support.cpp')]\n\n info = {'libraries':\n [('agg2_src',\n {'sources': agg2_srcs,\n 'include_dirs': [os.path.join(src_dir, 'include')],\n }\n )],\n 'include_dirs': [os.path.join(src_dir, 'include')],\n }\n if info:\n self.set_info(**info)\n return\n\n\nclass _pkg_config_info(system_info):\n section = None\n config_env_var = 'PKG_CONFIG'\n default_config_exe = 'pkg-config'\n append_config_exe = ''\n version_macro_name = None\n release_macro_name = None\n version_flag = '--modversion'\n cflags_flag = '--cflags'\n\n def get_config_exe(self):\n if self.config_env_var in os.environ:\n return os.environ[self.config_env_var]\n return self.default_config_exe\n\n def get_config_output(self, config_exe, option):\n cmd = config_exe + ' ' + self.append_config_exe + ' ' + option\n try:\n o = subprocess.check_output(cmd)\n except (OSError, subprocess.CalledProcessError):\n pass\n else:\n o = filepath_from_subprocess_output(o)\n return o\n\n def calc_info(self):\n config_exe = find_executable(self.get_config_exe())\n if not config_exe:\n log.warn('File not found: %s. Cannot determine %s info.' \\\n % (config_exe, self.section))\n return\n info = {}\n macros = []\n libraries = []\n library_dirs = []\n include_dirs = []\n extra_link_args = []\n extra_compile_args = []\n version = self.get_config_output(config_exe, self.version_flag)\n if version:\n macros.append((self.__class__.__name__.split('.')[-1].upper(),\n _c_string_literal(version)))\n if self.version_macro_name:\n macros.append((self.version_macro_name + '_%s'\n % (version.replace('.', '_')), None))\n if self.release_macro_name:\n release = self.get_config_output(config_exe, '--release')\n if release:\n macros.append((self.release_macro_name + '_%s'\n % (release.replace('.', '_')), None))\n opts = self.get_config_output(config_exe, '--libs')\n if opts:\n for opt in opts.split():\n if opt[:2] == '-l':\n libraries.append(opt[2:])\n elif opt[:2] == '-L':\n library_dirs.append(opt[2:])\n else:\n extra_link_args.append(opt)\n opts = self.get_config_output(config_exe, self.cflags_flag)\n if opts:\n for opt in opts.split():\n if opt[:2] == '-I':\n include_dirs.append(opt[2:])\n elif opt[:2] == '-D':\n if '=' in opt:\n n, v = opt[2:].split('=')\n macros.append((n, v))\n else:\n macros.append((opt[2:], None))\n else:\n extra_compile_args.append(opt)\n if macros:\n dict_append(info, define_macros=macros)\n if libraries:\n dict_append(info, libraries=libraries)\n if library_dirs:\n dict_append(info, library_dirs=library_dirs)\n if include_dirs:\n dict_append(info, include_dirs=include_dirs)\n if extra_link_args:\n dict_append(info, extra_link_args=extra_link_args)\n if extra_compile_args:\n dict_append(info, extra_compile_args=extra_compile_args)\n if info:\n self.set_info(**info)\n return\n\n\nclass wx_info(_pkg_config_info):\n section = 'wx'\n config_env_var = 'WX_CONFIG'\n default_config_exe = 'wx-config'\n append_config_exe = ''\n version_macro_name = 'WX_VERSION'\n release_macro_name = 'WX_RELEASE'\n version_flag = '--version'\n cflags_flag = '--cxxflags'\n\n\nclass gdk_pixbuf_xlib_2_info(_pkg_config_info):\n section = 'gdk_pixbuf_xlib_2'\n append_config_exe = 'gdk-pixbuf-xlib-2.0'\n version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'\n\n\nclass gdk_pixbuf_2_info(_pkg_config_info):\n section = 'gdk_pixbuf_2'\n append_config_exe = 'gdk-pixbuf-2.0'\n version_macro_name = 'GDK_PIXBUF_VERSION'\n\n\nclass gdk_x11_2_info(_pkg_config_info):\n section = 'gdk_x11_2'\n append_config_exe = 'gdk-x11-2.0'\n version_macro_name = 'GDK_X11_VERSION'\n\n\nclass gdk_2_info(_pkg_config_info):\n section = 'gdk_2'\n append_config_exe = 'gdk-2.0'\n version_macro_name = 'GDK_VERSION'\n\n\nclass gdk_info(_pkg_config_info):\n section = 'gdk'\n append_config_exe = 'gdk'\n version_macro_name = 'GDK_VERSION'\n\n\nclass gtkp_x11_2_info(_pkg_config_info):\n section = 'gtkp_x11_2'\n append_config_exe = 'gtk+-x11-2.0'\n version_macro_name = 'GTK_X11_VERSION'\n\n\nclass gtkp_2_info(_pkg_config_info):\n section = 'gtkp_2'\n append_config_exe = 'gtk+-2.0'\n version_macro_name = 'GTK_VERSION'\n\n\nclass xft_info(_pkg_config_info):\n section = 'xft'\n append_config_exe = 'xft'\n version_macro_name = 'XFT_VERSION'\n\n\nclass freetype2_info(_pkg_config_info):\n section = 'freetype2'\n append_config_exe = 'freetype2'\n version_macro_name = 'FREETYPE2_VERSION'\n\n\nclass amd_info(system_info):\n section = 'amd'\n dir_env_var = 'AMD'\n _lib_names = ['amd']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n\n opt = self.get_option_single('amd_libs', 'libraries')\n amd_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs(lib_dirs, amd_libs, [])\n if info is None:\n return\n\n include_dirs = self.get_include_dirs()\n\n inc_dir = None\n for d in include_dirs:\n p = self.combine_paths(d, 'amd.h')\n if p:\n inc_dir = os.path.dirname(p[0])\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir],\n define_macros=[('SCIPY_AMD_H', None)],\n swig_opts=['-I' + inc_dir])\n\n self.set_info(**info)\n return\n\n\nclass umfpack_info(system_info):\n section = 'umfpack'\n dir_env_var = 'UMFPACK'\n notfounderror = UmfpackNotFoundError\n _lib_names = ['umfpack']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n\n opt = self.get_option_single('umfpack_libs', 'libraries')\n umfpack_libs = self.get_libs(opt, self._lib_names)\n info = self.check_libs(lib_dirs, umfpack_libs, [])\n if info is None:\n return\n\n include_dirs = self.get_include_dirs()\n\n inc_dir = None\n for d in include_dirs:\n p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')\n if p:\n inc_dir = os.path.dirname(p[0])\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir],\n define_macros=[('SCIPY_UMFPACK_H', None)],\n swig_opts=['-I' + inc_dir])\n\n dict_append(info, **get_info('amd'))\n\n self.set_info(**info)\n return\n\n\ndef combine_paths(*args, **kws):\n \"\"\" Return a list of existing paths composed by all combinations of\n items from arguments.\n \"\"\"\n r = []\n for a in args:\n if not a:\n continue\n if is_string(a):\n a = [a]\n r.append(a)\n args = r\n if not args:\n return []\n if len(args) == 1:\n result = reduce(lambda a, b: a + b, map(glob, args[0]), [])\n elif len(args) == 2:\n result = []\n for a0 in args[0]:\n for a1 in args[1]:\n result.extend(glob(os.path.join(a0, a1)))\n else:\n result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))\n log.debug('(paths: %s)', ','.join(result))\n return result\n\nlanguage_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}\ninv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}\n\n\ndef dict_append(d, **kws):\n languages = []\n for k, v in kws.items():\n if k == 'language':\n languages.append(v)\n continue\n if k in d:\n if k in ['library_dirs', 'include_dirs',\n 'extra_compile_args', 'extra_link_args',\n 'runtime_library_dirs', 'define_macros']:\n [d[k].append(vv) for vv in v if vv not in d[k]]\n else:\n d[k].extend(v)\n else:\n d[k] = v\n if languages:\n l = inv_language_map[max([language_map.get(l, 0) for l in languages])]\n d['language'] = l\n return\n\n\ndef parseCmdLine(argv=(None,)):\n import optparse\n parser = optparse.OptionParser(\"usage: %prog [-v] [info objs]\")\n parser.add_option('-v', '--verbose', action='store_true', dest='verbose',\n default=False,\n help='be verbose and print more messages')\n\n opts, args = parser.parse_args(args=argv[1:])\n return opts, args\n\n\ndef show_all(argv=None):\n import inspect\n if argv is None:\n argv = sys.argv\n opts, args = parseCmdLine(argv)\n if opts.verbose:\n log.set_threshold(log.DEBUG)\n else:\n log.set_threshold(log.INFO)\n show_only = []\n for n in args:\n if n[-5:] != '_info':\n n = n + '_info'\n show_only.append(n)\n show_all = not show_only\n _gdict_ = globals().copy()\n for name, c in _gdict_.items():\n if not inspect.isclass(c):\n continue\n if not issubclass(c, system_info) or c is system_info:\n continue\n if not show_all:\n if name not in show_only:\n continue\n del show_only[show_only.index(name)]\n conf = c()\n conf.verbosity = 2\n # we don't need the result, but we want\n # the side effect of printing diagnostics\n conf.get_info()\n if show_only:\n log.info('Info classes not defined: %s', ','.join(show_only))\n\nif __name__ == \"__main__\":\n show_all()\n"
] | [
[
"numpy.distutils.exec_command.filepath_from_subprocess_output",
"numpy.distutils.fcompiler.new_fcompiler",
"numpy.distutils.log.get_threshold",
"numpy.distutils._shell_utils.NativeParser.split",
"numpy.distutils.misc_util.is_sequence",
"numpy.distutils.customized_ccompiler",
"numpy.distutils.misc_util.is_string",
"numpy.distutils.log.warn",
"numpy.distutils.log.info",
"numpy.distutils.log.set_threshold",
"numpy.distutils.misc_util.get_shared_lib_extension"
]
] |
NVIDIA/Torch-TensorRT | [
"1a22204fecec690bc3c2a318dab4f57b98c57f05",
"1a22204fecec690bc3c2a318dab4f57b98c57f05"
] | [
"py/torch_tensorrt/fx/test/converters/acc_op/test_eq.py",
"py/torch_tensorrt/fx/test/converters/acc_op/test_embedding.py"
] | [
"import torch\nimport torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops\nfrom parameterized import parameterized\nfrom torch.testing._internal.common_utils import run_tests\nfrom torch_tensorrt.fx.tools.common_fx2trt import AccTestCase\n\n\nclass TestEqConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"rand_2d\", torch.randn(3, 4), torch.randn(3, 4)),\n (\"rand_3d\", torch.randn(3, 4, 5), torch.randn(3, 4, 5)),\n (\"rand_4d\", torch.randn(3, 4, 5, 6), torch.randn(3, 4, 5, 6)),\n (\n \"rand_2d_int_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_bool\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_int\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.int),\n ),\n (\n \"rand_4d_float_bool_dim\",\n torch.randn(3, 4, 5, 6).to(torch.float),\n torch.randn(3, 1, 1, 6).to(torch.bool),\n ),\n ]\n )\n def test_eq(self, _, input, other):\n class Eq(torch.nn.Module):\n def forward(self, x, y):\n mask = torch.eq(x, y)\n return x.masked_fill(mask, 5)\n\n inputs = [\n input,\n other,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nclass TestEqMethodConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"rand_2d\", torch.randn(3, 4), torch.randn(3, 4)),\n (\"rand_3d\", torch.randn(3, 4, 5), torch.randn(3, 4, 5)),\n (\"rand_4d\", torch.randn(3, 4, 5, 6), torch.randn(3, 4, 5, 6)),\n (\n \"rand_2d_int_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_bool\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_int\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.int),\n ),\n (\n \"rand_4d_float_bool_dim\",\n torch.randn(3, 4, 5, 6).to(torch.float),\n torch.randn(3, 1, 1, 6).to(torch.bool),\n ),\n ]\n )\n def test_eq(self, _, input, other):\n class Eq(torch.nn.Module):\n def forward(self, x, y):\n mask = x.eq(y)\n return x.masked_fill(mask, 5)\n\n inputs = [\n input,\n other,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nclass TestEqOperatorConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"rand_2d\", torch.randn(3, 4), torch.randn(3, 4)),\n (\"rand_3d\", torch.randn(3, 4, 5), torch.randn(3, 4, 5)),\n (\"rand_4d\", torch.randn(3, 4, 5, 6), torch.randn(3, 4, 5, 6)),\n (\n \"rand_2d_int_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_bool\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_int\",\n torch.randn(3, 4).to(torch.float),\n torch.zeros(3, 4).to(torch.int),\n ),\n (\n \"rand_4d_float_bool_dim\",\n torch.randn(3, 4, 5, 6).to(torch.float),\n torch.randn(3, 1, 1, 6).to(torch.bool),\n ),\n ]\n )\n def test_eq(self, _, input, other):\n class Eq(torch.nn.Module):\n def forward(self, x, y):\n mask = x == y\n return x.masked_fill(mask, 5)\n\n inputs = [\n input,\n other,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nclass TestEqOperatorSimpleConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"rand_2d_float_bool\", torch.randn(3, 4), torch.randn(3, 4).to(torch.bool)),\n (\n \"rand_2d_int_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.randn(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_bool_bool\",\n torch.randn(3, 4).to(torch.bool),\n torch.randn(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_int\",\n torch.randn(3, 4).to(torch.float),\n torch.randn(3, 4).to(torch.int),\n ),\n (\n \"rand_2d_float_single_bool\",\n torch.randn(3, 4),\n torch.tensor(0).to(torch.bool),\n ),\n (\n \"rand_2d_int_single_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.tensor(0).to(torch.bool),\n ),\n (\n \"rand_2d_bool_single_bool\",\n torch.randn(3, 4).to(torch.bool),\n torch.tensor(0).to(torch.bool),\n ),\n ]\n )\n def test_eq(self, _, input, other):\n class Eq(torch.nn.Module):\n def forward(self, x, y):\n return x == y\n\n inputs = [\n input,\n other,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nclass TestEqOperatorConstantConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"rand_2d_float_bool\", torch.randn(3, 4), torch.randn(3, 4).to(torch.bool)),\n (\n \"rand_2d_int_bool\",\n torch.randn(3, 4).to(torch.int),\n torch.randn(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_bool_bool\",\n torch.randn(3, 4).to(torch.bool),\n torch.randn(3, 4).to(torch.bool),\n ),\n (\n \"rand_2d_float_int\",\n torch.randn(3, 4).to(torch.float),\n torch.randn(3, 4).to(torch.int),\n ),\n (\"rand_2d_float_single_bool\", torch.randn(3, 4), False),\n (\"rand_2d_int_single_bool\", torch.randn(3, 4).to(torch.int), False),\n (\"rand_2d_bool_single_bool\", torch.randn(3, 4).to(torch.bool), False),\n ]\n )\n def test_eq(self, _, input, other):\n class Eq(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.other = other\n\n def forward(self, x):\n return x == self.other\n\n inputs = [\n input,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nclass TestConstInputConverter(AccTestCase):\n def test_eq(self):\n class Eq(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return x.shape[0] == 4\n\n input = torch.randn(3, 4)\n inputs = [\n input,\n ]\n self.run_test(\n Eq(), inputs, expected_ops={acc_ops.eq}, test_implicit_batch_dim=False\n )\n\n\nif __name__ == \"__main__\":\n run_tests()\n",
"import unittest\n\nimport torch\n\nimport torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops\nfrom parameterized import param, parameterized\nfrom torch.testing._internal.common_utils import run_tests\nfrom torch_tensorrt.fx.tools.common_fx2trt import AccTestCase\n\n\[email protected](\n \"Current implementation is limited. All implementations in hf use int64. T113156424\"\n)\nclass TestEmbeddingConverter(AccTestCase):\n @parameterized.expand(\n [\n param(\n test_name=\"1d_indices\",\n indices_tensor=torch.tensor([3, 1, 2]),\n weights_tensor=torch.randn(5, 10),\n ),\n param(\n test_name=\"2d_indices\",\n indices_tensor=torch.tensor([[3, 1, 2], [4, 1, 3]]),\n weights_tensor=torch.randn(5, 10),\n ),\n param(\n test_name=\"3d_indices\",\n indices_tensor=torch.tensor([[[0, 1], [2, 3]], [[3, 4], [4, 0]]]),\n weights_tensor=torch.randn(5, 10),\n ),\n ]\n )\n def test_embedding(\n self,\n test_name,\n indices_tensor,\n weights_tensor,\n padding_idx=None,\n max_norm=None,\n norm_type=2.0,\n scale_grad_by_freq=False,\n sparse=False,\n ):\n class TestEmbedding(torch.nn.Module):\n def forward(self, indices, weights):\n return torch.nn.functional.embedding(\n input=indices,\n weight=weights,\n padding_idx=padding_idx,\n max_norm=max_norm,\n norm_type=norm_type,\n scale_grad_by_freq=scale_grad_by_freq,\n sparse=sparse,\n )\n\n self.run_test(\n TestEmbedding(),\n inputs=[indices_tensor.int(), weights_tensor.float()],\n expected_ops={acc_ops.embedding},\n test_implicit_batch_dim=False,\n test_explicit_batch_dim=True,\n )\n\n\nif __name__ == \"__main__\":\n run_tests()\n"
] | [
[
"torch.randn",
"torch.testing._internal.common_utils.run_tests",
"torch.tensor",
"torch.eq",
"torch.zeros"
],
[
"torch.randn",
"torch.testing._internal.common_utils.run_tests",
"torch.tensor",
"torch.nn.functional.embedding"
]
] |
hbrunie/PeleLM | [
"8b8c07aa1770c07e087f8976b6e16a71de68f751"
] | [
"Exec/RegTests/FlameSheet/pprocConvOrder.py"
] | [
"#!/usr/bin/env python3\n\n# Template post-processing script for PeleLM convergence analysis\n# Must be used after multirun.py script\n# Input are limited by the regression framework.\n\n# Usage:\n# ./pprocConvOrder.py --pproc_exec prog.exe --test_name DummyTest\n\n# Input:\n# * --pproc_exec: the processing executable path\n# * --test_name: a TESTNAME that will looked for during the postprocessing\n\n# \"Internal\" user input \n# * pproc_type:\n# - pproc_type == \"fcompare\". fcompare is used to get the error from the initial solution (== analytical solution) \n# - pproc_type == \"diffsamedomain\". Analytical solution is not known and errors are computed from the next finer grid \n# * vars : a list of the variables of interest (no check is done on whether it exists in plt ...)\n# * resolution : a list of the resolutions to post-process (should be consistent with multirun.py, if used)\n\n# Output:\n# * Convergence_${TESTNAME}.png file with the log-log plot of the error vs. resolution.\n# * ConvTable_${TESTNAME}.tex file with the convergence rate formatted in an LaTeX table.\n# * Convergence_${TESTNAME}.dat plain text file with the convergence rate.\n\n# Head's up : \n# - The script will get a copy of the post-processing program (if not already there) in the testing folder. The name of this folder is assumed to be the TESTNAME. \n# - The plt files naming convention is: ${TESTNAME}_plt_${resolution}_*****. It is used to get the first and last solution of a test at a given resolution.\n# - Errors are parsed from the screen output of the standard fcompare/diffsamedomain. Beware of any change of these programs. \n\nimport sys\nimport os\nimport fnmatch\nimport shutil\nimport argparse\nimport numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nUSAGE = \"\"\"\n Template post-processing script for PeleLM convergence analysis\n\"\"\"\n\ndef pproc(args):\n\n # User data\n vars=[\"y_velocity\", \"density\", \"temp\", \"Y(O2)\", \"Y(CH4)\", \"Y(CO2)\", \"Y(CO)\", \"Y(H2O)\" ]\n resolution = [64,128,256,512] \n pproc_type = \"diffsamedomain\"\n\n # Get a local copy of post-processing executable\n run_dir = os.getcwd()\n if ( not os.path.isfile(os.path.basename(args.pproc_exe)) ):\n shutil.copy(args.pproc_exe, run_dir)\n\n # Check the test name: current folder name is default\n if ( args.test_name == \"None\" ):\n args.test_name = run_dir.split(\"/\")[-1]\n\n # Run the postprocessing\n if ( pproc_type == \"fcompare\" ): # running fcompare since analytical solution is known\n errors = np.empty([len(resolution),len(vars)+1])\n pltfile=[]\n for res in range(len(resolution)):\n case = resolution[res]\n errors[res,0] = case\n\n # Get the fcompare inputs: first and last solution of current case\n # TODO: the analytical solution might not be plt****_00000 ...\n for f in os.listdir(run_dir):\n if ( not fnmatch.fnmatch(f, '*old*')):\n if (f.startswith(\"{}_plt_{}_\".format(args.test_name,case))):\n pltfile.append(f)\n pltfile.sort()\n outfile = \"error_{}.analysis.out\".format(case)\n os.system(\"./{} -n 2 {} {} > {}\".format(os.path.basename(args.pproc_exe), pltfile[0], pltfile[-1], outfile))\n pltfile.clear()\n \n # Extract errors on each variable\n with open(outfile) as fp:\n for i, line in enumerate(fp):\n if (i >= 5):\n var = line.split()[0]\n for v in range(len(vars)):\n if ( var == vars[v] ):\n errors[res,v+1] = line.split()[1]\n os.system(\"rm {}\".format(outfile))\n elif ( pproc_type == \"diffsamedomain\" ): # running diffsamedomain. No analytical sol ...\n errors = np.empty([len(resolution)-1,len(vars)+1])\n pltfile=[]\n pltfilenext=[]\n for res in range(len(resolution)-1):\n case = resolution[res]\n nextcase = resolution[res+1]\n errors[res,0] = case\n\n # Get the diffsamedomain inputs: last solutions of current \n # and next finer cases. These run should have been runned to the same final time\n for f in os.listdir(run_dir):\n if ( not fnmatch.fnmatch(f, '*old*')):\n if (f.startswith(\"{}_plt_{}_\".format(args.test_name,case))):\n pltfile.append(f)\n if (f.startswith(\"{}_plt_{}_\".format(args.test_name,nextcase))):\n pltfilenext.append(f)\n pltfile.sort()\n pltfilenext.sort()\n outfile = \"error_{}.analysis.out\".format(case)\n os.system(\"./{} infile1={} reffile={} > {}\".format(os.path.basename(args.pproc_exe), pltfile[-1], pltfilenext[-1], outfile))\n pltfile.clear()\n pltfilenext.clear()\n\n # Extract errors on each variable\n with open(outfile) as fp:\n for i, line in enumerate(fp):\n if (i >= 5):\n var = line.split(\":\")[0]\n for v in range(len(vars)):\n if ( var.split(\" \")[0] == vars[v] ):\n errors[res,v+1] = line.split(\":\")[1]\n os.system(\"rm {}\".format(outfile))\n else:\n print(\"Wrong pproc_type: {}. should be either fcompare or diffsamedomain\".format(pproc_type))\n return\n\n\n print(errors)\n # Plot data\n plotdata(errors, args.test_name, vars)\n writetex(errors, args.test_name, vars)\n writeRegTestFile(errors, args.test_name, vars)\n\ndef plotdata(data, test_name, vars):\n # Evaluate 2nd order slope\n snd_order = data[:,1]*1.05\n for i in range(1,len(data[:,1])):\n snd_order[i] = snd_order[i-1]/np.exp(2.0*np.log(2.0))\n for i in range(0, len(vars)): \n plt.plot(data[:,0], data[:,i+1], label=\"{}\".format(vars[i]))\n plt.plot(data[:,0], snd_order[:],linestyle='--',color='k', label='2nd-order')\n plt.xlabel(\"Resolution\")\n plt.ylabel(\"Error L2norm\")\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.grid(which='both',color='k', linestyle=':', linewidth=1)\n plt.legend(bbox_to_anchor=(0.9, 0.9), loc=1, borderaxespad=0.)\n plt.savefig(\"Convergence_{}.png\".format(test_name))\n\ndef writetex(data, test_name, vars):\n # Evaluate order\n conv_order = np.empty([len(data[:,0])-1,len(vars)])\n for v in range(len(vars)):\n for i in range(len(conv_order[:,0])):\n conv_order[i,v] = np.log(data[i,v+1]/data[i+1,v+1])/np.log(2.0)\n fout = open(\"ConvTable_{}.tex\".format(test_name), \"w\") \n fout.write(\"\\\\begin{table}[ht!]\\n\")\n fout.write(\"\\centering\\n\")\n fout.write(\"\\\\begin{tabular}{l|\")\n for i in range(len(conv_order[:,0])):\n fout.write(\"c \")\n fout.write(\"}\\n\") \n fout.write(\"\\hline\\n\")\n fout.write(\"Variable \")\n for i in range(len(conv_order[:,0])):\n fout.write(\"& {}/{} \".format(data[i+1,0],data[i,0]))\n fout.write(\"\\\\\\\\\\n\\hline\\hline\\n\")\n for v in range(len(vars)):\n fout.write(\"{} \".format(vars[v].replace(\"_\",\"\\_\")))\n for i in range(len(conv_order[:,0])):\n fout.write(\"& {:.3f} \".format(conv_order[i,v]))\n fout.write(\"\\\\\\\\\\n\")\n fout.write(\"\\end{tabular}\\n\")\n fout.write(\"\\caption{PeleLM convergence order}\\n\")\n fout.write(\"\\label{table:conv}\\n\")\n fout.write(\"\\end{table}\\n\")\n fout.close()\n\ndef writeRegTestFile(data, test_name, vars):\n # Evaluate order\n conv_order = np.empty([len(data[:,0])-1,len(vars)])\n for v in range(len(vars)):\n for i in range(len(conv_order[:,0])):\n conv_order[i,v] = np.log(data[i,v+1]/data[i+1,v+1])/np.log(2.0)\n fout = open(\"Convergence_{}.dat\".format(test_name), \"w\") \n fout.write(\" Variables \")\n for i in range(len(conv_order[:,0])):\n fout.write(\" {}/{} \".format(data[i+1,0],data[i,0]))\n fout.write(\"\\n\")\n for v in range(len(vars)):\n fout.write(\"{} \".format(vars[v]))\n for i in range(len(conv_order[:,0])):\n fout.write(\" {:.3f} \".format(conv_order[i,v]))\n fout.write(\"\\n\")\n fout.close()\n\ndef parse_args(arg_string=None):\n parser = argparse.ArgumentParser(description=USAGE)\n\n parser.add_argument(\"--test_name\", type=str, default=\"None\", metavar=\"test-name\",\n help=\"name of the test. Default = current folder name\")\n\n parser.add_argument(\"--pproc_exe\", type=str, default=\"None\", metavar=\"pproc.exe\",\n help=\"path to the executable required for the analysis.\")\n\n if not arg_string is None:\n args, unknown = parser.parse_known_args(arg_string)\n else:\n args, unknown = parser.parse_known_args()\n\n return args \n\nif __name__ == \"__main__\":\n arg_string_prepend = [\"--pproc_exe\"]+sys.argv[1:]\n args = parse_args(arg_string=arg_string_prepend)\n pproc(args)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.ylabel",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
Dou-Yu-xuan/deep-learning-visal | [
"da1ebc527d44c8c5a524e757a1d784ba37ec2d5c"
] | [
"models/ObjectDetection/FoveaBox.py"
] | [
"import torch\nimport torch.nn as nn\nimport torchvision\n\ndef Conv3x3ReLU(in_channels,out_channels):\n return nn.Sequential(\n nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=3,stride=1,padding=1),\n nn.ReLU6(inplace=True)\n )\n\ndef locLayer(in_channels,out_channels):\n return nn.Sequential(\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),\n )\n\ndef confLayer(in_channels,out_channels):\n return nn.Sequential(\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n Conv3x3ReLU(in_channels=in_channels, out_channels=in_channels),\n nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),\n )\n\nclass FoveaBox(nn.Module):\n def __init__(self, num_classes=80):\n super(FoveaBox, self).__init__()\n self.num_classes = num_classes\n resnet = torchvision.models.resnet50()\n layers = list(resnet.children())\n\n self.layer1 = nn.Sequential(*layers[:5])\n self.layer2 = nn.Sequential(*layers[5])\n self.layer3 = nn.Sequential(*layers[6])\n self.layer4 = nn.Sequential(*layers[7])\n\n self.lateral5 = nn.Conv2d(in_channels=2048, out_channels=256, kernel_size=1)\n self.lateral4 = nn.Conv2d(in_channels=1024, out_channels=256, kernel_size=1)\n self.lateral3 = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=1)\n\n self.upsample4 = nn.ConvTranspose2d(in_channels=256, out_channels=256, kernel_size=4, stride=2, padding=1)\n self.upsample3 = nn.ConvTranspose2d(in_channels=256, out_channels=256, kernel_size=4, stride=2, padding=1)\n\n self.downsample6 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=2, padding=1)\n self.downsample6_relu = nn.ReLU6(inplace=True)\n self.downsample5 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=2, padding=1)\n\n self.loc_layer3 = locLayer(in_channels=256,out_channels=4)\n self.conf_layer3 = confLayer(in_channels=256,out_channels=self.num_classes)\n\n self.loc_layer4 = locLayer(in_channels=256, out_channels=4)\n self.conf_layer4 = confLayer(in_channels=256, out_channels=self.num_classes)\n\n self.loc_layer5 = locLayer(in_channels=256, out_channels=4)\n self.conf_layer5 = confLayer(in_channels=256, out_channels=self.num_classes)\n\n self.loc_layer6 = locLayer(in_channels=256, out_channels=4)\n self.conf_layer6 = confLayer(in_channels=256, out_channels=self.num_classes)\n\n self.loc_layer7 = locLayer(in_channels=256, out_channels=4)\n self.conf_layer7 = confLayer(in_channels=256, out_channels=self.num_classes)\n\n self.init_params()\n\n def init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n x = self.layer1(x)\n c3 =x = self.layer2(x)\n c4 =x = self.layer3(x)\n c5 = x = self.layer4(x)\n\n p5 = self.lateral5(c5)\n p4 = self.upsample4(p5) + self.lateral4(c4)\n p3 = self.upsample3(p4) + self.lateral3(c3)\n\n p6 = self.downsample5(p5)\n p7 = self.downsample6_relu(self.downsample6(p6))\n\n loc3 = self.loc_layer3(p3)\n conf3 = self.conf_layer3(p3)\n\n loc4 = self.loc_layer4(p4)\n conf4 = self.conf_layer4(p4)\n\n loc5 = self.loc_layer5(p5)\n conf5 = self.conf_layer5(p5)\n\n loc6 = self.loc_layer6(p6)\n conf6 = self.conf_layer6(p6)\n\n loc7 = self.loc_layer7(p7)\n conf7 = self.conf_layer7(p7)\n\n locs = torch.cat([loc3.permute(0, 2, 3, 1).contiguous().view(loc3.size(0), -1),\n loc4.permute(0, 2, 3, 1).contiguous().view(loc4.size(0), -1),\n loc5.permute(0, 2, 3, 1).contiguous().view(loc5.size(0), -1),\n loc6.permute(0, 2, 3, 1).contiguous().view(loc6.size(0), -1),\n loc7.permute(0, 2, 3, 1).contiguous().view(loc7.size(0), -1)],dim=1)\n\n confs = torch.cat([conf3.permute(0, 2, 3, 1).contiguous().view(conf3.size(0), -1),\n conf4.permute(0, 2, 3, 1).contiguous().view(conf4.size(0), -1),\n conf5.permute(0, 2, 3, 1).contiguous().view(conf5.size(0), -1),\n conf6.permute(0, 2, 3, 1).contiguous().view(conf6.size(0), -1),\n conf7.permute(0, 2, 3, 1).contiguous().view(conf7.size(0), -1),], dim=1)\n\n out = (locs, confs)\n return out\n\nif __name__ == '__main__':\n model = FoveaBox()\n print(model)\n\n input = torch.randn(1, 3, 800, 800)\n out = model(input)\n print(out[0].shape)\n print(out[1].shape)\n"
] | [
[
"torch.nn.init.kaiming_normal_",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.ReLU6",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d"
]
] |
Flodip/WaterMonitor | [
"5f7d8d6f266d35e7d4dd655e6e47933abb28c697"
] | [
"pimonitor.py"
] | [
"from sense_hat import SenseHat\nimport psycopg2\nimport numpy as np\nimport time\n\nsense = SenseHat()\nsense.set_imu_config(True, False, False) # compass, not gyro, not accel\n\ndatabase = \"watermonitor\"\n\ntry:\n try:\n conn = psycopg2.connect(\n user=\"pi\",\n password=\"piwater\",\n host=\"127.0.0.1\",\n port=\"5432\",\n database=database\n )\n except Exception:\n message = \"Error db conn\"\n raise\n\n while True:\n # time.sleep(0.02) # already a lag of 0.02s without sleep\n xyz = sense.get_compass_raw() # get values in microteslas\n\n # get timestamp in ms\n timestamp = int(round(time.time() * 1000))\n # get norm of compass xyz values\n value = np.linalg.norm([xyz[\"x\"], xyz[\"y\"], xyz[\"z\"]])\n try:\n curs = conn.cursor()\n print(str(timestamp) + \", \" + str(value))\n curs.execute(\"INSERT INTO water_consumption (timestamp, value) VALUES(%s, %s);\", (timestamp, value))\n conn.commit()\n curs.close()\n except Exception:\n message = \"Error cursor db\"\n raise\nexcept Exception as e:\n print(message + str(e))\n"
] | [
[
"numpy.linalg.norm"
]
] |
csbrasnett/lipid-md | [
"22ac04a01277da7e64e58ba10a1e7a9791393fcc"
] | [
"QIIDcurvature.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\nauthor: Chris Brasnett, University of Bristol, [email protected]\n\n\"\"\"\n\nimport numpy as np\nfrom QIIDderivative import derivative\n\ndef nominator(F_x, F_y, F_z, F_xx, F_xy, F_yy, F_yz, F_zz, F_xz):\n m = np.array([[F_xx, F_xy, F_xz, F_x],\n [F_xy, F_yy, F_yz, F_y],\n [F_xz, F_yz, F_zz, F_z],\n [F_x, F_y, F_z, 0]])\n \n d = np.linalg.det(m)\n \n return d\n\ndef denominator(F_x,F_y, F_z):\n \n g = np.array([F_x,F_y,F_z])\n \n mag_g = np.linalg.norm(g)\n \n return mag_g**4\n\ndef main(x, y, z, lamb):\n vals = derivative(x, y, z, lamb)\n \n n = nominator(vals[0],vals[1],vals[2],vals[3],vals[4],vals[5],vals[6],vals[7],vals[8])\n d = denominator(vals[0],vals[1],vals[2])\n K = -(n/d)\n \n return K"
] | [
[
"numpy.array",
"numpy.linalg.det",
"numpy.linalg.norm"
]
] |
rungjoo/KoreaBERT_description | [
"ad35b14ac8fb65593c0fe987680c2759e47478ab"
] | [
"run_squad_debug.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run BERT on SQuAD 1.1 and SQuAD 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport math\nimport os\nimport random\nimport modeling\nimport optimization\nimport tokenization\nimport six\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"bert_config_file\", \"./pretrained_model/cased_L-12_H-768_A-12/bert_config.json\",\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"vocab_file\", \"./pretrained_model/cased_L-12_H-768_A-12/vocab.txt\",\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", \"/squad/squad_base/\",\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\nflags.DEFINE_string(\"train_file\", \"./squad/train-v1.1.json\",\n \"SQuAD json for training. E.g., train-v1.1.json\")\n\nflags.DEFINE_string(\n \"predict_file\", \"./squad/dev-v1.1.json\",\n \"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", \"./tfrecord/pretraining_output/model.ckpt-20\",\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 384,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_integer(\n \"doc_stride\", 128,\n \"When splitting up a long document into chunks, how much stride to \"\n \"take between chunks.\")\n\nflags.DEFINE_integer(\n \"max_query_length\", 64,\n \"The maximum number of tokens for the question. Questions longer than \"\n \"this will be truncated to this length.\")\n\nflags.DEFINE_bool(\"do_train\", True, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_predict\", True, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 12, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8,\n \"Total batch size for predictions.\")\n\nflags.DEFINE_float(\"learning_rate\", 3e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 2.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\n \"n_best_size\", 20,\n \"The total number of n-best predictions to generate in the \"\n \"nbest_predictions.json output file.\")\n\nflags.DEFINE_integer(\n \"max_answer_length\", 30,\n \"The maximum length of an answer that can be generated. This is needed \"\n \"because the start and end predictions are not conditioned on one another.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\nflags.DEFINE_bool(\n \"verbose_logging\", False,\n \"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\")\n\nflags.DEFINE_bool(\n \"version_2_with_negative\", False,\n \"If true, the SQuAD examples contain some that do not have an answer.\")\n\nflags.DEFINE_float(\n \"null_score_diff_threshold\", 0.0,\n \"If null_score - best_non_null is greater than the threshold predict null.\")\n\n\nclass SquadExample(object):\n \"\"\"A single training/test example for simple sequence classification.\n\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(self,\n qas_id,\n question_text,\n doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False):\n self.qas_id = qas_id\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\n s += \", question_text: %s\" % (\n tokenization.printable_text(self.question_text))\n s += \", doc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n if self.start_position:\n s += \", start_position: %d\" % (self.start_position)\n if self.start_position:\n s += \", end_position: %d\" % (self.end_position)\n if self.start_position:\n s += \", is_impossible: %r\" % (self.is_impossible)\n return s\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n unique_id,\n example_index,\n doc_span_index,\n tokens,\n token_to_orig_map,\n token_is_max_context,\n input_ids,\n input_mask,\n segment_ids,\n start_position=None,\n end_position=None,\n is_impossible=None):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n self.token_is_max_context = token_is_max_context\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n\ndef read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n break_count = 0 # 수정\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n break_count += 1\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n\n if FLAGS.version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length -\n 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(\n doc_tokens[start_position:(end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n tf.logging.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n if break_count == 8:\n break\n if break_count == 8:\n break\n if break_count == 8:\n break\n return examples\n\n\ndef convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\n \"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls/squad/output_weights\", [2, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls/squad/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)\n\n\ndef model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) / 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn\n\n\ndef input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"start_logits\", \"end_logits\"])\n\n\ndef write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n\n\ndef get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text\n\n\ndef _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes\n\n\ndef _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n\n\nclass FeatureWriter(object):\n \"\"\"Writes InputFeature to TF example file.\"\"\"\n\n def __init__(self, filename, is_training):\n self.filename = filename\n self.is_training = is_training\n self.num_features = 0\n self._writer = tf.python_io.TFRecordWriter(filename)\n\n def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())\n\n def close(self):\n self._writer.close()\n\n\ndef validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\n \"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\n \"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n validate_flags_or_throw(bert_config)\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = read_squad_examples(\n input_file=FLAGS.train_file, is_training=True)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n # Pre-shuffle the input to avoid having to make a very large shuffle\n # buffer in in the `input_fn`.\n rng = random.Random(12345)\n rng.shuffle(train_examples)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n # We write to a temporary file to avoid storing very large constant tensors\n # in memory.\n train_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"train.tf_record\"),\n is_training=True)\n convert_examples_to_features(\n examples=train_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=True,\n output_fn=train_writer.process_feature)\n train_writer.close()\n\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num orig examples = %d\", len(train_examples))\n tf.logging.info(\" Num split examples = %d\", train_writer.num_features)\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n del train_examples\n\n train_input_fn = input_fn_builder(\n input_file=train_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n\n if FLAGS.do_predict:\n eval_examples = read_squad_examples(\n input_file=FLAGS.predict_file, is_training=False)\n\n eval_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"eval.tf_record\"),\n is_training=False)\n eval_features = []\n\n def append_feature(feature):\n eval_features.append(feature)\n eval_writer.process_feature(feature)\n\n convert_examples_to_features(\n examples=eval_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=False,\n output_fn=append_feature)\n eval_writer.close()\n\n tf.logging.info(\"***** Running predictions *****\")\n tf.logging.info(\" Num orig examples = %d\", len(eval_examples))\n tf.logging.info(\" Num split examples = %d\", len(eval_features))\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n all_results = []\n\n predict_input_fn = input_fn_builder(\n input_file=eval_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=False)\n\n # If running eval on the TPU, you will need to specify the number of\n # steps.\n all_results = []\n for result in estimator.predict(\n predict_input_fn, yield_single_examples=True):\n if len(all_results) % 1000 == 0:\n tf.logging.info(\"Processing example: %d\" % (len(all_results)))\n unique_id = int(result[\"unique_ids\"])\n start_logits = [float(x) for x in result[\"start_logits\"].flat]\n end_logits = [float(x) for x in result[\"end_logits\"].flat]\n all_results.append(\n RawResult(\n unique_id=unique_id,\n start_logits=start_logits,\n end_logits=end_logits))\n\n output_prediction_file = os.path.join(FLAGS.output_dir, \"predictions.json\")\n output_nbest_file = os.path.join(FLAGS.output_dir, \"nbest_predictions.json\")\n output_null_log_odds_file = os.path.join(FLAGS.output_dir, \"null_odds.json\")\n\n write_predictions(eval_examples, eval_features, all_results,\n FLAGS.n_best_size, FLAGS.max_answer_length,\n FLAGS.do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n"
] | [
[
"tensorflow.data.TFRecordDataset",
"tensorflow.reshape",
"tensorflow.unstack",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.logging.set_verbosity",
"tensorflow.matmul",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.one_hot",
"tensorflow.reduce_sum",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.truncated_normal_initializer",
"tensorflow.gfile.GFile",
"tensorflow.nn.log_softmax",
"tensorflow.logging.warning",
"tensorflow.train.Features",
"tensorflow.FixedLenFeature",
"tensorflow.train.init_from_checkpoint",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.gfile.MakeDirs",
"tensorflow.train.Scaffold",
"tensorflow.transpose",
"tensorflow.app.run",
"tensorflow.parse_single_example",
"tensorflow.zeros_initializer",
"tensorflow.flags.DEFINE_string",
"tensorflow.logging.info",
"tensorflow.nn.bias_add",
"tensorflow.trainable_variables",
"tensorflow.to_int32",
"tensorflow.gfile.Open"
]
] |
elusenji/transformers | [
"b18dfd95e1f60ae65a959a7b255fc06522170d1b",
"b18dfd95e1f60ae65a959a7b255fc06522170d1b"
] | [
"tests/openai/test_modeling_tf_openai.py",
"src/transformers/models/flaubert/modeling_tf_flaubert.py"
] | [
"# coding=utf-8\n# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\n\nfrom transformers import OpenAIGPTConfig, is_tf_available\nfrom transformers.testing_utils import require_tf, slow\n\nfrom ..test_configuration_common import ConfigTester\nfrom ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask\n\n\nif is_tf_available():\n import tensorflow as tf\n\n from transformers.models.openai.modeling_tf_openai import (\n TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,\n TFOpenAIGPTDoubleHeadsModel,\n TFOpenAIGPTForSequenceClassification,\n TFOpenAIGPTLMHeadModel,\n TFOpenAIGPTModel,\n )\n\n\nclass TFOpenAIGPTModelTester:\n def __init__(\n self,\n parent,\n ):\n self.parent = parent\n self.batch_size = 13\n self.seq_length = 7\n self.is_training = True\n self.use_token_type_ids = True\n self.use_input_mask = True\n self.use_labels = True\n self.use_mc_token_ids = True\n self.vocab_size = 99\n self.hidden_size = 32\n self.num_hidden_layers = 5\n self.num_attention_heads = 4\n self.intermediate_size = 37\n self.hidden_act = \"gelu\"\n self.hidden_dropout_prob = 0.1\n self.attention_probs_dropout_prob = 0.1\n self.max_position_embeddings = 512\n self.type_vocab_size = 16\n self.type_sequence_label_size = 2\n self.initializer_range = 0.02\n self.num_labels = 3\n self.num_choices = 4\n self.scope = None\n self.pad_token_id = self.vocab_size - 1\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)\n\n mc_token_ids = None\n if self.use_mc_token_ids:\n mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)\n\n sequence_labels = None\n token_labels = None\n choice_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)\n choice_labels = ids_tensor([self.batch_size], self.num_choices)\n\n config = OpenAIGPTConfig(\n vocab_size=self.vocab_size,\n n_embd=self.hidden_size,\n n_layer=self.num_hidden_layers,\n n_head=self.num_attention_heads,\n # intermediate_size=self.intermediate_size,\n # hidden_act=self.hidden_act,\n # hidden_dropout_prob=self.hidden_dropout_prob,\n # attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n n_positions=self.max_position_embeddings,\n # type_vocab_size=self.type_vocab_size,\n # initializer_range=self.initializer_range,\n pad_token_id=self.pad_token_id,\n )\n\n head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)\n\n return (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n mc_token_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n )\n\n def create_and_check_openai_gpt_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = TFOpenAIGPTModel(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n\n inputs = [input_ids, input_mask]\n result = model(inputs)\n\n result = model(input_ids)\n\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n\n def create_and_check_openai_gpt_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = TFOpenAIGPTLMHeadModel(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_openai_gpt_double_head(\n self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args\n ):\n model = TFOpenAIGPTDoubleHeadsModel(config=config)\n\n multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))\n multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))\n multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))\n\n inputs = {\n \"input_ids\": multiple_choice_inputs_ids,\n \"mc_token_ids\": mc_token_ids,\n \"attention_mask\": multiple_choice_input_mask,\n \"token_type_ids\": multiple_choice_token_type_ids,\n }\n result = model(inputs)\n self.parent.assertEqual(\n result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)\n )\n self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))\n\n def create_and_check_openai_gpt_for_sequence_classification(\n self, config, input_ids, input_mask, head_mask, token_type_ids, *args\n ):\n config.num_labels = self.num_labels\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n inputs = {\n \"input_ids\": input_ids,\n \"attention_mask\": input_mask,\n \"token_type_ids\": token_type_ids,\n \"labels\": sequence_labels,\n }\n model = TFOpenAIGPTForSequenceClassification(config)\n result = model(inputs)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n\n (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n mc_token_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = config_and_inputs\n\n inputs_dict = {\"input_ids\": input_ids, \"token_type_ids\": token_type_ids, \"attention_mask\": input_mask}\n return config, inputs_dict\n\n\n@require_tf\nclass TFOpenAIGPTModelTest(TFModelTesterMixin, unittest.TestCase):\n\n all_model_classes = (\n (TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification)\n if is_tf_available()\n else ()\n )\n all_generative_model_classes = (\n (TFOpenAIGPTLMHeadModel,) if is_tf_available() else ()\n ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly\n test_head_masking = False\n test_onnx = False\n\n def setUp(self):\n self.model_tester = TFOpenAIGPTModelTester(self)\n self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_openai_gpt_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs)\n\n def test_openai_gpt_lm_head(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_openai_gpt_lm_head(*config_and_inputs)\n\n def test_openai_gpt_double_head(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_openai_gpt_double_head(*config_and_inputs)\n\n def test_model_common_attributes(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)\n\n if model_class in self.all_generative_model_classes:\n x = model.get_output_embeddings()\n assert isinstance(x, tf.keras.layers.Layer)\n name = model.get_bias()\n assert name is None\n else:\n x = model.get_output_embeddings()\n assert x is None\n name = model.get_bias()\n assert name is None\n\n def test_openai_gpt_sequence_classification_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = TFOpenAIGPTModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n@require_tf\nclass TFOPENAIGPTModelLanguageGenerationTest(unittest.TestCase):\n @slow\n def test_lm_generate_openai_gpt(self):\n model = TFOpenAIGPTLMHeadModel.from_pretrained(\"openai-gpt\")\n input_ids = tf.convert_to_tensor([[481, 4735, 544]], dtype=tf.int32) # the president is\n expected_output_ids = [\n 481,\n 4735,\n 544,\n 246,\n 963,\n 870,\n 762,\n 239,\n 244,\n 40477,\n 244,\n 249,\n 719,\n 881,\n 487,\n 544,\n 240,\n 244,\n 603,\n 481,\n ] # the president is a very good man. \" \\n \" i\\'m sure he is, \" said the\n\n output_ids = model.generate(input_ids, do_sample=False)\n self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)\n",
"# coding=utf-8\n# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n TF 2.0 Flaubert model.\n\"\"\"\n\nimport itertools\nimport random\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...activations_tf import get_tf_activation\nfrom ...modeling_tf_outputs import TFBaseModelOutput\nfrom ...modeling_tf_utils import (\n TFPreTrainedModel,\n TFSharedEmbeddings,\n get_initializer,\n keras_serializable,\n unpack_inputs,\n)\nfrom ...tf_utils import shape_list\nfrom ...utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n logging,\n)\nfrom ..xlm.modeling_tf_xlm import (\n TFXLMForMultipleChoice,\n TFXLMForQuestionAnsweringSimple,\n TFXLMForSequenceClassification,\n TFXLMForTokenClassification,\n)\nfrom .configuration_flaubert import FlaubertConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"flaubert/flaubert_base_cased\"\n_CONFIG_FOR_DOC = \"FlaubertConfig\"\n_TOKENIZER_FOR_DOC = \"FlaubertTokenizer\"\n\nTF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n # See all Flaubert models at https://huggingface.co/models?filter=flaubert\n]\n\nFLAUBERT_START_DOCSTRING = r\"\"\"\n\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n <Tip>\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the\n first positional argument :\n\n - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n </Tip>\n\n Parameters:\n config ([`FlaubertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nFLAUBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`FlaubertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - `1` for tokens that are **not masked**,\n - `0` for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n langs (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are\n languages ids which can be obtained from the language names by using two conversion mappings provided in\n the configuration of the model (only provided for multilingual models). More precisely, the *language name\n to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the\n *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).\n\n See usage examples detailed in the [multilingual documentation](../multilingual).\n token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - `0` corresponds to a *sentence A* token,\n - `1` corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n lengths (`tf.Tensor` or `Numpy array` of shape `(batch_size,)`, *optional*):\n Length of each sentence that can be used to avoid performing attention on padding token indices. You can\n also use *attention_mask* for the same result (see above), kept here for compatibility Indices selected in\n `[0, ..., input_ids.size(-1)]`:\n cache (`Dict[str, tf.Tensor]`, *optional*):\n Dictionary string to `tf.FloatTensor` that contains precomputed hidden states (key and values in the\n attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential\n decoding.\n\n The dictionary object will be modified in-place during the forward pass to add newly computed\n hidden-states.\n head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - `1` indicates the head is **not masked**,\n - `0` indicates the head is **masked**.\n\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\ndef get_masks(slen, lengths, causal, padding_mask=None):\n \"\"\"\n Generate hidden states mask, and optionally an attention mask.\n \"\"\"\n bs = shape_list(lengths)[0]\n if padding_mask is not None:\n mask = padding_mask\n else:\n # assert lengths.max().item() <= slen\n alen = tf.range(slen)\n mask = tf.math.less(alen, tf.expand_dims(lengths, axis=1))\n\n # attention mask is the same as mask, or triangular inferior attention (causal)\n if causal:\n attn_mask = tf.less_equal(\n tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1))\n )\n else:\n attn_mask = mask\n\n # sanity check\n # assert shape_list(mask) == [bs, slen]\n if tf.executing_eagerly():\n tf.debugging.assert_equal(shape_list(mask), [bs, slen])\n assert causal is False or shape_list(attn_mask) == [bs, slen, slen]\n\n return mask, attn_mask\n\n\nclass TFFlaubertPreTrainedModel(TFPreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = FlaubertConfig\n base_model_prefix = \"transformer\"\n\n @property\n def dummy_inputs(self):\n # Sometimes XLM has language embeddings so don't forget to build them as well if needed\n inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])\n attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])\n if self.config.use_lang_emb and self.config.n_langs > 1:\n return {\n \"input_ids\": inputs_list,\n \"attention_mask\": attns_list,\n \"langs\": tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]]),\n }\n else:\n return {\"input_ids\": inputs_list, \"attention_mask\": attns_list}\n\n\n@add_start_docstrings(\n \"The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertModel(TFFlaubertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFBaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n langs: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,\n cache: Optional[Dict[str, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[tf.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[Tuple, TFBaseModelOutput]:\n outputs = self.transformer(\n input_ids=input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n return outputs\n\n # Copied from transformers.models.distilbert.modeling_tf_distilbert.TFDistilBertModel.serving_output\n def serving_output(self, output):\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)\n\n\n# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMMultiHeadAttention with XLM->Flaubert\nclass TFFlaubertMultiHeadAttention(tf.keras.layers.Layer):\n NEW_ID = itertools.count()\n\n def __init__(self, n_heads, dim, config, **kwargs):\n super().__init__(**kwargs)\n self.layer_id = next(TFFlaubertMultiHeadAttention.NEW_ID)\n self.dim = dim\n self.n_heads = n_heads\n self.output_attentions = config.output_attentions\n assert self.dim % self.n_heads == 0\n\n self.q_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"q_lin\")\n self.k_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"k_lin\")\n self.v_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"v_lin\")\n self.out_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name=\"out_lin\")\n self.dropout = tf.keras.layers.Dropout(config.attention_dropout)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n raise NotImplementedError\n\n def call(self, input, mask, kv, cache, head_mask, output_attentions, training=False):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n bs, qlen, dim = shape_list(input)\n\n if kv is None:\n klen = qlen if cache is None else cache[\"slen\"] + qlen\n else:\n klen = shape_list(kv)[1]\n\n # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'\n dim_per_head = self.dim // self.n_heads\n mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen)\n\n def shape(x):\n \"\"\"projection\"\"\"\n return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))\n\n def unshape(x):\n \"\"\"compute context\"\"\"\n return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))\n\n q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n\n if kv is None:\n k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n elif cache is None or self.layer_id not in cache:\n k = v = kv\n k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if cache is not None:\n if self.layer_id in cache:\n if kv is None:\n k_, v_ = cache[self.layer_id]\n k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head)\n v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = cache[self.layer_id]\n\n cache[self.layer_id] = (k, v)\n\n f_dim_per_head = tf.cast(dim_per_head, dtype=q.dtype)\n q = tf.multiply(q, tf.math.rsqrt(f_dim_per_head)) # (bs, n_heads, qlen, dim_per_head)\n k = tf.cast(k, dtype=q.dtype)\n scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen)\n mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)\n # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)\n mask = tf.cast(mask, dtype=scores.dtype)\n scores = scores - 1e30 * (1.0 - mask)\n weights = tf.nn.softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)\n weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n context = unshape(context) # (bs, qlen, dim)\n outputs = (self.out_lin(context),)\n\n if output_attentions:\n outputs = outputs + (weights,)\n\n return outputs\n\n\n# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMTransformerFFN\nclass TFFlaubertTransformerFFN(tf.keras.layers.Layer):\n def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs):\n super().__init__(**kwargs)\n\n self.lin1 = tf.keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name=\"lin1\")\n self.lin2 = tf.keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name=\"lin2\")\n self.act = get_tf_activation(\"gelu\") if config.gelu_activation else get_tf_activation(\"relu\")\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n\n def call(self, input, training=False):\n x = self.lin1(input)\n x = self.act(x)\n x = self.lin2(x)\n x = self.dropout(x, training=training)\n\n return x\n\n\n@keras_serializable\nclass TFFlaubertMainLayer(tf.keras.layers.Layer):\n config_class = FlaubertConfig\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n self.n_heads = config.n_heads\n self.n_langs = config.n_langs\n self.dim = config.emb_dim\n self.hidden_dim = self.dim * 4\n self.n_words = config.n_words\n self.pad_index = config.pad_index\n self.causal = config.causal\n self.n_layers = config.n_layers\n self.use_lang_emb = config.use_lang_emb\n self.layerdrop = getattr(config, \"layerdrop\", 0.0)\n self.pre_norm = getattr(config, \"pre_norm\", False)\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.return_dict = config.use_return_dict\n self.max_position_embeddings = config.max_position_embeddings\n self.embed_init_std = config.embed_init_std\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.embeddings = TFSharedEmbeddings(\n self.n_words, self.dim, initializer_range=config.embed_init_std, name=\"embeddings\"\n )\n self.layer_norm_emb = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"layer_norm_emb\")\n self.attentions = []\n self.layer_norm1 = []\n self.ffns = []\n self.layer_norm2 = []\n\n for i in range(self.n_layers):\n self.attentions.append(\n TFFlaubertMultiHeadAttention(self.n_heads, self.dim, config=config, name=f\"attentions_._{i}\")\n )\n self.layer_norm1.append(\n tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f\"layer_norm1_._{i}\")\n )\n # if self.is_decoder:\n # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))\n self.ffns.append(\n TFFlaubertTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name=f\"ffns_._{i}\")\n )\n self.layer_norm2.append(\n tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f\"layer_norm2_._{i}\")\n )\n\n def build(self, input_shape):\n with tf.name_scope(\"position_embeddings\"):\n self.position_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.max_position_embeddings, self.dim],\n initializer=get_initializer(self.embed_init_std),\n )\n\n if self.n_langs > 1 and self.use_lang_emb:\n with tf.name_scope(\"lang_embeddings\"):\n self.lang_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.n_langs, self.dim],\n initializer=get_initializer(self.embed_init_std),\n )\n\n super().build(input_shape)\n\n def get_input_embeddings(self):\n return self.embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.weight = value\n self.embeddings.vocab_size = shape_list(value)[0]\n\n @unpack_inputs\n def call(\n self,\n input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n langs: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,\n cache: Optional[Dict[str, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[tf.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[Tuple, TFBaseModelOutput]:\n # removed: src_enc=None, src_len=None\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n bs, slen = shape_list(input_ids)\n elif inputs_embeds is not None:\n bs, slen = shape_list(inputs_embeds)[:2]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if lengths is None:\n if input_ids is not None:\n lengths = tf.reduce_sum(\n tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=input_ids.dtype), axis=1\n )\n else:\n lengths = tf.convert_to_tensor([slen] * bs)\n # mask = input_ids != self.pad_index\n\n # check inputs\n # assert shape_list(lengths)[0] == bs\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(lengths)[0], bs\n ), f\"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched\"\n # assert lengths.max().item() <= slen\n # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0\n # assert (src_enc is None) == (src_len is None)\n # if src_enc is not None:\n # assert self.is_decoder\n # assert src_enc.size(0) == bs\n\n # generate masks\n mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)\n # if self.is_decoder and src_enc is not None:\n # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]\n\n # position_ids\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(slen), axis=0)\n position_ids = tf.tile(position_ids, (bs, 1))\n\n if tf.executing_eagerly():\n # assert shape_list(position_ids) == [bs, slen] # (slen, bs)\n tf.debugging.assert_equal(\n shape_list(position_ids), [bs, slen]\n ), f\"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched\"\n # position_ids = position_ids.transpose(0, 1)\n\n # langs\n if langs is not None and tf.executing_eagerly():\n # assert shape_list(langs) == [bs, slen] # (slen, bs)\n tf.debugging.assert_equal(\n shape_list(langs), [bs, slen]\n ), f\"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched\"\n # langs = langs.transpose(0, 1)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]\n if head_mask is not None:\n raise NotImplementedError\n else:\n head_mask = [None] * self.n_layers\n\n # do not recompute cached elements\n if cache is not None and input_ids is not None:\n _slen = slen - cache[\"slen\"]\n input_ids = input_ids[:, -_slen:]\n position_ids = position_ids[:, -_slen:]\n if langs is not None:\n langs = langs[:, -_slen:]\n mask = mask[:, -_slen:]\n attn_mask = attn_mask[:, -_slen:]\n\n # embeddings\n if inputs_embeds is None:\n inputs_embeds = self.embeddings(input_ids)\n\n tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids)\n\n if langs is not None and self.use_lang_emb:\n tensor = tensor + tf.gather(self.lang_embeddings, langs)\n if token_type_ids is not None:\n tensor = tensor + self.embeddings(token_type_ids)\n\n tensor = self.layer_norm_emb(tensor)\n tensor = self.dropout(tensor, training=training)\n mask = tf.cast(mask, dtype=tensor.dtype)\n tensor = tensor * tf.expand_dims(mask, axis=-1)\n\n # hidden_states and attentions cannot be None in graph mode.\n hidden_states = () if output_hidden_states else None\n attentions = () if output_attentions else None\n\n # transformer layers\n for i in range(self.n_layers):\n # LayerDrop\n dropout_probability = random.uniform(0, 1)\n\n if training and (dropout_probability < self.layerdrop):\n continue\n\n if output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # self attention\n if not self.pre_norm:\n attn_outputs = self.attentions[i](\n tensor,\n attn_mask,\n None,\n cache,\n head_mask[i],\n output_attentions,\n training=training,\n )\n attn = attn_outputs[0]\n\n if output_attentions:\n attentions = attentions + (attn_outputs[1],)\n\n attn = self.dropout(attn, training=training)\n tensor = tensor + attn\n tensor = self.layer_norm1[i](tensor)\n else:\n tensor_normalized = self.layer_norm1[i](tensor)\n attn_outputs = self.attentions[i](\n tensor_normalized,\n attn_mask,\n None,\n cache,\n head_mask[i],\n output_attentions,\n training=training,\n )\n attn = attn_outputs[0]\n\n if output_attentions:\n attentions = attentions + (attn_outputs[1],)\n\n attn = self.dropout(attn, training=training)\n tensor = tensor + attn\n\n # encoder attention (for decoder only)\n # if self.is_decoder and src_enc is not None:\n # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)\n # attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)\n # tensor = tensor + attn\n # tensor = self.layer_norm15[i](tensor)\n\n # FFN\n if not self.pre_norm:\n tensor = tensor + self.ffns[i](tensor)\n tensor = self.layer_norm2[i](tensor)\n else:\n tensor_normalized = self.layer_norm2[i](tensor)\n tensor = tensor + self.ffns[i](tensor_normalized)\n\n tensor = tensor * tf.expand_dims(mask, axis=-1)\n\n # Add last hidden state\n if output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # update cache length\n if cache is not None:\n cache[\"slen\"] += tensor.size(1)\n\n # move back sequence length to dimension 0\n # tensor = tensor.transpose(0, 1)\n\n if not return_dict:\n return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)\n\n return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)\n\n\n# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMPredLayer\nclass TFFlaubertPredLayer(tf.keras.layers.Layer):\n \"\"\"\n Prediction layer (cross_entropy or adaptive_softmax).\n \"\"\"\n\n def __init__(self, config, input_embeddings, **kwargs):\n super().__init__(**kwargs)\n\n self.asm = config.asm\n self.n_words = config.n_words\n self.pad_index = config.pad_index\n\n if config.asm is False:\n self.input_embeddings = input_embeddings\n else:\n raise NotImplementedError\n # self.proj = nn.AdaptiveLogSoftmaxWithLoss(\n # in_features=dim,\n # n_classes=config.n_words,\n # cutoffs=config.asm_cutoffs,\n # div_value=config.asm_div_value,\n # head_bias=True, # default is False\n # )\n\n def build(self, input_shape):\n # The output weights are the same as the input embeddings, but there is an output-only bias for each token.\n self.bias = self.add_weight(shape=(self.n_words,), initializer=\"zeros\", trainable=True, name=\"bias\")\n\n super().build(input_shape)\n\n def get_output_embeddings(self):\n return self.input_embeddings\n\n def set_output_embeddings(self, value):\n self.input_embeddings.weight = value\n self.input_embeddings.vocab_size = shape_list(value)[0]\n\n def get_bias(self):\n return {\"bias\": self.bias}\n\n def set_bias(self, value):\n self.bias = value[\"bias\"]\n self.vocab_size = shape_list(value[\"bias\"])[0]\n\n def call(self, hidden_states):\n hidden_states = self.input_embeddings(hidden_states, mode=\"linear\")\n hidden_states = hidden_states + self.bias\n\n return hidden_states\n\n\n@dataclass\nclass TFFlaubertWithLMHeadModelOutput(ModelOutput):\n \"\"\"\n Base class for [`TFFlaubertWithLMHeadModel`] outputs.\n\n Args:\n logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n logits: tf.Tensor = None\n hidden_states: Optional[Tuple[tf.Tensor]] = None\n attentions: Optional[Tuple[tf.Tensor]] = None\n\n\n@add_start_docstrings(\n \"\"\"\n The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n \"\"\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertWithLMHeadModel(TFFlaubertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n self.pred_layer = TFFlaubertPredLayer(config, self.transformer.embeddings, name=\"pred_layer_._proj\")\n\n def get_lm_head(self):\n return self.pred_layer\n\n def get_prefix_bias_name(self):\n warnings.warn(\"The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.\", FutureWarning)\n return self.name + \"/\" + self.pred_layer.name\n\n def prepare_inputs_for_generation(self, inputs, **kwargs):\n mask_token_id = self.config.mask_token_id\n lang_id = self.config.lang_id\n\n effective_batch_size = inputs.shape[0]\n mask_token = tf.fill((effective_batch_size, 1), 1) * mask_token_id\n inputs = tf.concat([inputs, mask_token], axis=1)\n\n if lang_id is not None:\n langs = tf.ones_like(inputs) * lang_id\n else:\n langs = None\n return {\"input_ids\": inputs, \"langs\": langs}\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFFlaubertWithLMHeadModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n langs: Optional[Union[np.ndarray, tf.Tensor]] = None,\n token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,\n lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,\n cache: Optional[Dict[str, tf.Tensor]] = None,\n head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,\n inputs_embeds: Optional[tf.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: Optional[bool] = False,\n ) -> Union[Tuple, TFFlaubertWithLMHeadModelOutput]:\n\n transformer_outputs = self.transformer(\n input_ids=input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n output = transformer_outputs[0]\n outputs = self.pred_layer(output)\n\n if not return_dict:\n return (outputs,) + transformer_outputs[1:]\n\n return TFFlaubertWithLMHeadModelOutput(\n logits=outputs, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions\n )\n\n def serving_output(self, output):\n hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None\n attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None\n\n return TFFlaubertWithLMHeadModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)\n\n\n@add_start_docstrings(\n \"\"\"\n Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n e.g. for GLUE tasks.\n \"\"\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertForSequenceClassification(TFXLMForSequenceClassification):\n config_class = FlaubertConfig\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n\n\n@add_start_docstrings(\n \"\"\"\n Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertForQuestionAnsweringSimple(TFXLMForQuestionAnsweringSimple):\n config_class = FlaubertConfig\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n\n\n@add_start_docstrings(\n \"\"\"\n Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertForTokenClassification(TFXLMForTokenClassification):\n config_class = FlaubertConfig\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n\n\n@add_start_docstrings(\n \"\"\"\n Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n FLAUBERT_START_DOCSTRING,\n)\nclass TFFlaubertForMultipleChoice(TFXLMForMultipleChoice):\n config_class = FlaubertConfig\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.transformer = TFFlaubertMainLayer(config, name=\"transformer\")\n"
] | [
[
"tensorflow.expand_dims",
"tensorflow.convert_to_tensor"
],
[
"tensorflow.reshape",
"tensorflow.math.rsqrt",
"tensorflow.matmul",
"tensorflow.name_scope",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.executing_eagerly",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.keras.layers.Dropout",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.not_equal",
"tensorflow.tile",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.range",
"tensorflow.fill",
"tensorflow.gather"
]
] |
nihalsid/texture_fields | [
"dcd091a5f40fe433dbc47f2055d1cd2d3d2a1b87"
] | [
"scripts/sample_mesh.py"
] | [
"import argparse\nimport trimesh\nimport numpy as np\nimport os\nimport glob\nimport sys\nfrom multiprocessing import Pool\nfrom functools import partial\n# TODO: do this better\nsys.path.append('..')\n\nparser = argparse.ArgumentParser('Sample a watertight mesh.')\nparser.add_argument('in_folder', type=str,\n help='Path to input watertight meshes.')\nparser.add_argument('--ext', type=str, default=\"off\",\n help='Mesh extension')\nparser.add_argument('--n_proc', type=int, default=0,\n help='Number of processes to use.')\n\nparser.add_argument('--resize', action='store_true',\n help='When active, resizes the mesh to bounding box.')\n\nparser.add_argument('--rotate_xz', type=float, default=0.,\n help='Angle to rotate around y axis.')\n\nparser.add_argument('--bbox_padding', type=float, default=0.,\n help='Padding for bounding box')\nparser.add_argument('--bbox_in_folder', type=str,\n help='Path to other input folder to extract'\n 'bounding boxes.')\n\nparser.add_argument('--pointcloud_folder', type=str,\n help='Output path for point cloud.')\nparser.add_argument('--pointcloud_size', type=int, default=100000,\n help='Size of point cloud.')\n\nparser.add_argument('--voxels_folder', type=str,\n help='Output path for voxelization.')\nparser.add_argument('--voxels_res', type=int, default=32,\n help='Resolution for voxelization.')\n\nparser.add_argument('--points_folder', type=str,\n help='Output path for points.')\nparser.add_argument('--points_size', type=int, default=100000,\n help='Size of points.')\nparser.add_argument('--points_uniform_ratio', type=float, default=1.,\n help='Ratio of points to sample uniformly'\n 'in bounding box.')\nparser.add_argument('--points_sigma', type=float, default=0.01,\n help='Standard deviation of gaussian noise added to points'\n 'samples on the surfaces.')\nparser.add_argument('--points_padding', type=float, default=0.1,\n help='Additional padding applied to the uniformly'\n 'sampled points on both sides (in total).')\n\nparser.add_argument('--mesh_folder', type=str,\n help='Output path for mesh.')\n\nparser.add_argument('--overwrite', action='store_true',\n help='Whether to overwrite output.')\nparser.add_argument('--float16', action='store_true',\n help='Whether to use half precision.')\nparser.add_argument('--packbits', action='store_true',\n help='Whether to save truth values as bit array.')\nparser.add_argument('--fixed_bbox', action='store_true',\n help='96x96x96 bbox')\n\n \ndef main(args):\n input_files = glob.glob(os.path.join(args.in_folder, \"*\"))\n if args.n_proc != 0:\n with Pool(args.n_proc) as p:\n p.map(partial(process_path, args=args), input_files)\n else:\n for p in input_files:\n process_path(p, args)\n \n\ndef process_path(in_path, args):\n modelname = os.path.basename(in_path)\n in_path = os.path.join(in_path, \"model_c.obj\")\n mesh = trimesh.load(in_path, process=False)\n\n # Determine bounding box\n if not args.resize:\n # Standard bounding boux\n loc = np.zeros(3)\n scale = 1.\n else:\n if args.bbox_in_folder is not None:\n in_path_tmp = os.path.join(args.bbox_in_folder, modelname + '.off')\n mesh_tmp = trimesh.load(in_path_tmp, process=False)\n bbox = mesh_tmp.bounding_box.bounds\n elif args.fixed_bbox:\n bbox = np.array([[0, 0, 0], [96, 96, 96]], dtype=np.float32)\n else:\n bbox = mesh.bounding_box.bounds\n\n # Compute location and scale\n loc = (bbox[0] + bbox[1]) / 2\n scale = (bbox[1] - bbox[0]).max() / (1 - args.bbox_padding)\n\n # Transform input mesh\n mesh.apply_translation(-loc)\n mesh.apply_scale(1 / scale)\n\n if args.rotate_xz != 0:\n angle = args.rotate_xz / 180 * np.pi\n R = trimesh.transformations.rotation_matrix(angle, [0, 1, 0])\n mesh.apply_transform(R)\n\n # Expert various modalities\n if args.pointcloud_folder is not None:\n export_pointcloud(mesh, modelname, loc, scale, args)\n\n if args.voxels_folder is not None:\n export_voxels(mesh, modelname, loc, scale, args)\n\n if args.points_folder is not None:\n export_points(mesh, modelname, loc, scale, args)\n\n if args.mesh_folder is not None:\n export_mesh(mesh, modelname, loc, scale, args)\n\n\ndef export_pointcloud(mesh, modelname, loc, scale, args):\n filename = os.path.join(args.pointcloud_folder,\n modelname, 'pointcloud.npz')\n if not args.overwrite and os.path.exists(filename):\n print('Pointcloud already exist: %s' % filename)\n return\n elif not os.path.exists(os.path.join(args.pointcloud_folder, modelname)):\n print(f\"folder for {modelname} doesnt exist.. skipping\")\n return\n\n points, face_idx = mesh.sample(args.pointcloud_size, return_index=True)\n normals = mesh.face_normals[face_idx]\n\n # Compress\n if args.float16:\n dtype = np.float16\n else:\n dtype = np.float32\n\n points = points.astype(dtype)\n normals = normals.astype(dtype)\n\n print('Writing pointcloud: %s' % filename)\n np.savez(filename, points=points, normals=normals, loc=loc, scale=scale)\n\n\ndef export_mesh(mesh, modelname, loc, scale, args):\n filename = os.path.join(args.mesh_folder, modelname + '.off') \n if not args.overwrite and os.path.exists(filename):\n print('Mesh already exist: %s' % filename)\n return\n print('Writing mesh: %s' % filename)\n mesh.export(filename)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n main(args)\n"
] | [
[
"numpy.array",
"numpy.savez",
"numpy.zeros"
]
] |
chineseocr/table-detect | [
"92488f30ffaf486d29791aab63802beeb1eaca32"
] | [
"table_line.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 9 23:11:51 2020\ntable line detect\n@author: chineseocr\n\"\"\"\n\nfrom tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, BatchNormalization, UpSampling2D\nfrom tensorflow.keras.layers import LeakyReLU\nfrom tensorflow.keras.models import Model\n\n\ndef table_net(input_shape=(512, 512, 3), num_classes=1):\n inputs = Input(shape=input_shape)\n # 512\n use_bias = False\n down0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(inputs)\n down0a = BatchNormalization()(down0a)\n down0a = LeakyReLU(alpha=0.1)(down0a)\n down0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(down0a)\n down0a = BatchNormalization()(down0a)\n down0a = LeakyReLU(alpha=0.1)(down0a)\n down0a_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0a)\n # 256\n\n down0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down0a_pool)\n down0 = BatchNormalization()(down0)\n\n down0 = LeakyReLU(alpha=0.1)(down0)\n down0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down0)\n down0 = BatchNormalization()(down0)\n down0 = LeakyReLU(alpha=0.1)(down0)\n down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)\n # 128\n\n down1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down0_pool)\n down1 = BatchNormalization()(down1)\n down1 = LeakyReLU(alpha=0.1)(down1)\n down1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down1)\n down1 = BatchNormalization()(down1)\n down1 = LeakyReLU(alpha=0.1)(down1)\n down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)\n # 64\n\n down2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(down1_pool)\n down2 = BatchNormalization()(down2)\n down2 = LeakyReLU(alpha=0.1)(down2)\n down2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(down2)\n down2 = BatchNormalization()(down2)\n down2 = LeakyReLU(alpha=0.1)(down2)\n down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)\n # 32\n\n down3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(down2_pool)\n down3 = BatchNormalization()(down3)\n down3 = LeakyReLU(alpha=0.1)(down3)\n down3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(down3)\n down3 = BatchNormalization()(down3)\n down3 = LeakyReLU(alpha=0.1)(down3)\n down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)\n # 16\n\n down4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(down3_pool)\n down4 = BatchNormalization()(down4)\n down4 = LeakyReLU(alpha=0.1)(down4)\n down4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(down4)\n down4 = BatchNormalization()(down4)\n down4 = LeakyReLU(alpha=0.1)(down4)\n down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)\n # 8\n\n center = Conv2D(1024, (3, 3), padding='same', use_bias=use_bias)(down4_pool)\n center = BatchNormalization()(center)\n center = LeakyReLU(alpha=0.1)(center)\n center = Conv2D(1024, (3, 3), padding='same', use_bias=use_bias)(center)\n center = BatchNormalization()(center)\n center = LeakyReLU(alpha=0.1)(center)\n # center\n\n up4 = UpSampling2D((2, 2))(center)\n up4 = concatenate([down4, up4], axis=3)\n up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)\n up4 = BatchNormalization()(up4)\n up4 = LeakyReLU(alpha=0.1)(up4)\n up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)\n up4 = BatchNormalization()(up4)\n up4 = LeakyReLU(alpha=0.1)(up4)\n up4 = Conv2D(512, (3, 3), padding='same', use_bias=use_bias)(up4)\n up4 = BatchNormalization()(up4)\n up4 = LeakyReLU(alpha=0.1)(up4)\n # 16\n\n up3 = UpSampling2D((2, 2))(up4)\n up3 = concatenate([down3, up3], axis=3)\n up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)\n up3 = BatchNormalization()(up3)\n up3 = LeakyReLU(alpha=0.1)(up3)\n up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)\n up3 = BatchNormalization()(up3)\n up3 = LeakyReLU(alpha=0.1)(up3)\n up3 = Conv2D(256, (3, 3), padding='same', use_bias=use_bias)(up3)\n up3 = BatchNormalization()(up3)\n up3 = LeakyReLU(alpha=0.1)(up3)\n # 32\n\n up2 = UpSampling2D((2, 2))(up3)\n up2 = concatenate([down2, up2], axis=3)\n up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)\n up2 = BatchNormalization()(up2)\n up2 = LeakyReLU(alpha=0.1)(up2)\n up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)\n up2 = BatchNormalization()(up2)\n up2 = LeakyReLU(alpha=0.1)(up2)\n up2 = Conv2D(128, (3, 3), padding='same', use_bias=use_bias)(up2)\n up2 = BatchNormalization()(up2)\n up2 = LeakyReLU(alpha=0.1)(up2)\n # 64\n\n up1 = UpSampling2D((2, 2))(up2)\n up1 = concatenate([down1, up1], axis=3)\n up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)\n up1 = BatchNormalization()(up1)\n up1 = LeakyReLU(alpha=0.1)(up1)\n up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)\n up1 = BatchNormalization()(up1)\n up1 = LeakyReLU(alpha=0.1)(up1)\n up1 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up1)\n up1 = BatchNormalization()(up1)\n up1 = LeakyReLU(alpha=0.1)(up1)\n # 128\n\n up0 = UpSampling2D((2, 2))(up1)\n up0 = concatenate([down0, up0], axis=3)\n up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)\n up0 = BatchNormalization()(up0)\n up0 = LeakyReLU(alpha=0.1)(up0)\n up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)\n up0 = BatchNormalization()(up0)\n up0 = LeakyReLU(alpha=0.1)(up0)\n up0 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up0)\n up0 = BatchNormalization()(up0)\n up0 = LeakyReLU(alpha=0.1)(up0)\n # 256\n\n up0a = UpSampling2D((2, 2))(up0)\n up0a = concatenate([down0a, up0a], axis=3)\n up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)\n up0a = BatchNormalization()(up0a)\n up0a = LeakyReLU(alpha=0.1)(up0a)\n up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)\n up0a = BatchNormalization()(up0a)\n up0a = LeakyReLU(alpha=0.1)(up0a)\n up0a = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0a)\n up0a = BatchNormalization()(up0a)\n up0a = LeakyReLU(alpha=0.1)(up0a)\n # 512\n\n classify = Conv2D(num_classes, (1, 1), activation='sigmoid')(up0a)\n\n model = Model(inputs=inputs, outputs=classify)\n\n return model\n\n\nfrom config import tableModeLinePath\nfrom utils import letterbox_image, get_table_line, adjust_lines, line_to_line\nimport numpy as np\nimport cv2\n\nmodel = table_net((None, None, 3), 2)\nmodel.load_weights(tableModeLinePath)\n\n\ndef table_line(img, size=(512, 512), hprob=0.5, vprob=0.5, row=50, col=30, alph=15):\n sizew, sizeh = size\n inputBlob, fx, fy = letterbox_image(img[..., ::-1], (sizew, sizeh))\n pred = model.predict(np.array([np.array(inputBlob) / 255.0]))\n pred = pred[0]\n vpred = pred[..., 1] > vprob ##竖线\n hpred = pred[..., 0] > hprob ##横线\n vpred = vpred.astype(int)\n hpred = hpred.astype(int)\n colboxes = get_table_line(vpred, axis=1, lineW=col)\n rowboxes = get_table_line(hpred, axis=0, lineW=row)\n ccolbox = []\n crowlbox = []\n if len(rowboxes) > 0:\n rowboxes = np.array(rowboxes)\n rowboxes[:, [0, 2]] = rowboxes[:, [0, 2]] / fx\n rowboxes[:, [1, 3]] = rowboxes[:, [1, 3]] / fy\n xmin = rowboxes[:, [0, 2]].min()\n xmax = rowboxes[:, [0, 2]].max()\n ymin = rowboxes[:, [1, 3]].min()\n ymax = rowboxes[:, [1, 3]].max()\n ccolbox = [[xmin, ymin, xmin, ymax], [xmax, ymin, xmax, ymax]]\n rowboxes = rowboxes.tolist()\n\n if len(colboxes) > 0:\n colboxes = np.array(colboxes)\n colboxes[:, [0, 2]] = colboxes[:, [0, 2]] / fx\n colboxes[:, [1, 3]] = colboxes[:, [1, 3]] / fy\n\n xmin = colboxes[:, [0, 2]].min()\n xmax = colboxes[:, [0, 2]].max()\n ymin = colboxes[:, [1, 3]].min()\n ymax = colboxes[:, [1, 3]].max()\n colboxes = colboxes.tolist()\n crowlbox = [[xmin, ymin, xmax, ymin], [xmin, ymax, xmax, ymax]]\n\n rowboxes += crowlbox\n colboxes += ccolbox\n\n rboxes_row_, rboxes_col_ = adjust_lines(rowboxes, colboxes, alph=alph)\n rowboxes += rboxes_row_\n colboxes += rboxes_col_\n nrow = len(rowboxes)\n ncol = len(colboxes)\n for i in range(nrow):\n for j in range(ncol):\n rowboxes[i] = line_to_line(rowboxes[i], colboxes[j], 10)\n colboxes[j] = line_to_line(colboxes[j], rowboxes[i], 10)\n\n return rowboxes, colboxes\n\n\nif __name__ == '__main__':\n import time\n\n p = 'img/table-detect.jpg'\n from utils import draw_lines\n\n img = cv2.imread(p)\n t = time.time()\n rowboxes, colboxes = table_line(img[..., ::-1], size=(512, 512), hprob=0.5, vprob=0.5)\n img = draw_lines(img, rowboxes + colboxes, color=(255, 0, 0), lineW=2)\n\n print(time.time() - t, len(rowboxes), len(colboxes))\n cv2.imwrite('img/table-line.png', img)\n"
] | [
[
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Conv2D",
"numpy.array",
"tensorflow.keras.layers.Input"
]
] |
OmerMughal31/RetinaNet_modified | [
"207ec4fba35ef390af42fa0266ae95b86ecb9b08"
] | [
"keras_retinanet/bin/train.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport warnings\n\nimport keras\nimport keras.preprocessing.image\nimport tensorflow as tf\n\n# Allow relative imports when being executed as script.\nif __name__ == \"__main__\" and __package__ is None:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), \"..\", \"..\"))\n import keras_retinanet.bin # noqa: F401\n\n __package__ = \"keras_retinanet.bin\"\n\n# Change these to absolute imports if you copy this script outside the keras_retinanet package.\nfrom .. import layers # noqa: F401\nfrom .. import losses\nfrom .. import models\nfrom ..callbacks import RedirectModel\nfrom ..callbacks.eval import Evaluate\nfrom ..models.retinanet import retinanet_bbox\nfrom ..preprocessing.csv_generator import CSVGenerator\nfrom ..utils.anchors import make_shapes_callback\nfrom ..utils.config import read_config_file, parse_anchor_parameters\nfrom ..utils.gpu import setup_gpu\nfrom ..utils.image import random_visual_effect_generator\nfrom ..utils.keras_version import check_keras_version\nfrom ..utils.model import freeze as freeze_model\nfrom ..utils.tf_version import check_tf_version\nfrom ..utils.transform import random_transform_generator\n\n\ndef makedirs(path):\n # Intended behavior: try to create the directory,\n # pass if the directory exists already, fails otherwise.\n # Meant for Python 2.7/3.n compatibility.\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\n\ndef model_with_weights(model, weights, skip_mismatch):\n \"\"\" Load weights for model.\n\n Args\n model : The model to load weights for.\n weights : The weights to load.\n skip_mismatch : If True, skips layers whose shape of weights doesn't match with the model.\n \"\"\"\n if weights is not None:\n model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)\n return model\n\n\ndef create_models(\n backbone_retinanet,\n num_classes,\n weights,\n multi_gpu=0,\n freeze_backbone=False,\n lr=1e-5,\n config=None,\n):\n \"\"\" Creates three models (model, training_model, prediction_model).\n\n Args\n backbone_retinanet : A function to call to create a retinanet model with a given backbone.\n num_classes : The number of classes to train.\n weights : The weights to load into the model.\n multi_gpu : The number of GPUs to use for training.\n freeze_backbone : If True, disables learning for the backbone.\n config : Config parameters, None indicates the default configuration.\n\n Returns\n model : The base model. This is also the model that is saved in snapshots.\n training_model : The training model. If multi_gpu=0, this is identical to model.\n prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).\n \"\"\"\n\n modifier = freeze_model if freeze_backbone else None\n\n # load anchor parameters, or pass None (so that defaults will be used)\n anchor_params = None\n num_anchors = None\n if config and \"anchor_parameters\" in config:\n anchor_params = parse_anchor_parameters(config)\n num_anchors = anchor_params.num_anchors()\n\n # Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.\n # optionally wrap in a parallel model\n if multi_gpu > 1:\n from keras.utils import multi_gpu_model\n\n with tf.device(\"/cpu:0\"):\n model = model_with_weights(\n backbone_retinanet(\n num_classes, num_anchors=num_anchors, modifier=modifier\n ),\n weights=weights,\n skip_mismatch=True,\n )\n training_model = multi_gpu_model(model, gpus=multi_gpu)\n else:\n model = model_with_weights(\n backbone_retinanet(num_classes, num_anchors=num_anchors, modifier=modifier),\n weights=weights,\n skip_mismatch=True,\n )\n training_model = model\n\n # make prediction model\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)\n\n # compile model\n training_model.compile(\n loss={\"regression\": losses.smooth_l1(), \"classification\": losses.focal()},\n optimizer=keras.optimizers.adam(lr=lr, clipnorm=0.001),\n metrics=[\"accuracy\"],\n )\n\n return model, training_model, prediction_model\n\n\ndef create_callbacks(\n model, training_model, prediction_model, validation_generator, args\n):\n \"\"\" Creates the callbacks to use during training.\n\n Args\n model: The base model.\n training_model: The model that is used for training.\n prediction_model: The model that should be used for validation.\n validation_generator: The generator for creating validation data.\n args: parseargs args object.\n\n Returns:\n A list of callbacks used for training.\n \"\"\"\n callbacks = []\n\n tensorboard_callback = None\n\n if args.tensorboard_dir:\n makedirs(args.tensorboard_dir)\n tensorboard_callback = keras.callbacks.TensorBoard(\n log_dir=args.tensorboard_dir,\n histogram_freq=0,\n batch_size=args.batch_size,\n write_graph=True,\n write_grads=False,\n write_images=False,\n embeddings_freq=0,\n embeddings_layer_names=None,\n embeddings_metadata=None,\n )\n\n if args.evaluation and validation_generator:\n evaluation = Evaluate(\n validation_generator,\n tensorboard=tensorboard_callback,\n weighted_average=args.weighted_average,\n )\n evaluation = RedirectModel(evaluation, prediction_model)\n callbacks.append(evaluation)\n\n # save the model\n if args.snapshots:\n # ensure directory created first; otherwise h5py will error after epoch.\n makedirs(args.snapshot_path)\n checkpoint = keras.callbacks.ModelCheckpoint(\n os.path.join(\n args.snapshot_path,\n \"{backbone}_{dataset_type}_{{epoch:02d}}.h5\".format(\n backbone=args.backbone, dataset_type=args.dataset_type\n ),\n ),\n verbose=1,\n # save_best_only=True,\n # monitor=\"mAP\",\n # mode='max'\n )\n checkpoint = RedirectModel(checkpoint, model)\n callbacks.append(checkpoint)\n\n callbacks.append(\n keras.callbacks.ReduceLROnPlateau(\n monitor=\"loss\",\n factor=0.1,\n patience=2,\n verbose=1,\n mode=\"auto\",\n min_delta=0.0001,\n cooldown=0,\n min_lr=0,\n )\n )\n\n if args.tensorboard_dir:\n callbacks.append(tensorboard_callback)\n\n return callbacks\n\n\ndef create_generators(args, preprocess_image):\n \"\"\" Create generators for training and validation.\n\n Args\n args : parseargs object containing configuration for generators.\n preprocess_image : Function that preprocesses an image for the network.\n \"\"\"\n common_args = {\n \"batch_size\": args.batch_size,\n \"config\": args.config,\n \"image_min_side\": args.image_min_side,\n \"image_max_side\": args.image_max_side,\n \"no_resize\": args.no_resize,\n \"preprocess_image\": preprocess_image,\n }\n\n # create random transform generator for augmenting training data\n if args.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.5,\n )\n visual_effect_generator = random_visual_effect_generator(\n contrast_range=(0.9, 1.1),\n brightness_range=(-0.1, 0.1),\n hue_range=(-0.05, 0.05),\n saturation_range=(0.95, 1.05),\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n visual_effect_generator = None\n\n if args.dataset_type == \"csv\":\n train_generator = CSVGenerator(\n args.annotations,\n args.classes,\n transform_generator=transform_generator,\n visual_effect_generator=visual_effect_generator,\n **common_args\n )\n\n if args.val_annotations:\n validation_generator = CSVGenerator(\n args.val_annotations, args.classes, shuffle_groups=False, **common_args\n )\n else:\n validation_generator = None\n else:\n raise ValueError(\"Invalid data type received: {}\".format(args.dataset_type))\n\n return train_generator, validation_generator\n\n\ndef check_args(parsed_args):\n \"\"\" Function to check for inherent contradictions within parsed arguments.\n For example, batch_size < num_gpus\n Intended to raise errors prior to backend initialisation.\n\n Args\n parsed_args: parser.parse_args()\n\n Returns\n parsed_args\n \"\"\"\n\n if parsed_args.multi_gpu > 1 and parsed_args.batch_size < parsed_args.multi_gpu:\n raise ValueError(\n \"Batch size ({}) must be equal to or higher than the number of GPUs ({})\".format(\n parsed_args.batch_size, parsed_args.multi_gpu\n )\n )\n\n if parsed_args.multi_gpu > 1 and parsed_args.snapshot:\n raise ValueError(\n \"Multi GPU training ({}) and resuming from snapshots ({}) is not supported.\".format(\n parsed_args.multi_gpu, parsed_args.snapshot\n )\n )\n\n if parsed_args.multi_gpu > 1 and not parsed_args.multi_gpu_force:\n raise ValueError(\n \"Multi-GPU support is experimental, use at own risk! Run with --multi-gpu-force if you wish to continue.\"\n )\n\n if \"resnet\" not in parsed_args.backbone:\n warnings.warn(\n \"Using experimental backbone {}. Only resnet50 has been properly tested.\".format(\n parsed_args.backbone\n )\n )\n\n return parsed_args\n\n\ndef parse_args(args):\n \"\"\" Parse the arguments.\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Simple training script for training a RetinaNet network.\"\n )\n subparsers = parser.add_subparsers(\n help=\"Arguments for specific dataset types.\", dest=\"dataset_type\"\n )\n subparsers.required = True\n\n csv_parser = subparsers.add_parser(\"csv\")\n csv_parser.add_argument(\n \"annotations\", help=\"Path to CSV file containing annotations for training.\"\n )\n csv_parser.add_argument(\n \"classes\", help=\"Path to a CSV file containing class label mapping.\"\n )\n csv_parser.add_argument(\n \"--val-annotations\",\n help=\"Path to CSV file containing annotations for validation (optional).\",\n )\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"--snapshot\", help=\"Resume training from a snapshot.\")\n group.add_argument(\n \"--imagenet-weights\",\n help=\"Initialize the model with pretrained imagenet weights. This is the default behaviour.\",\n action=\"store_const\",\n const=True,\n default=True,\n )\n group.add_argument(\n \"--weights\", help=\"Initialize the model with weights from a file.\"\n )\n group.add_argument(\n \"--no-weights\",\n help=\"Don't initialize the model with any weights.\",\n dest=\"imagenet_weights\",\n action=\"store_const\",\n const=False,\n )\n parser.add_argument(\n \"--backbone\",\n help=\"Backbone model used by retinanet.\",\n default=\"resnet50\",\n type=str,\n )\n parser.add_argument(\n \"--batch-size\", help=\"Size of the batches.\", default=1, type=int\n )\n parser.add_argument(\n \"--gpu\", help=\"Id of the GPU to use (as reported by nvidia-smi).\", type=int\n )\n parser.add_argument(\n \"--multi-gpu\",\n help=\"Number of GPUs to use for parallel processing.\",\n type=int,\n default=0,\n )\n parser.add_argument(\n \"--multi-gpu-force\",\n help=\"Extra flag needed to enable (experimental) multi-gpu support.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--initial-epoch\",\n help=\"Epoch from which to begin the train, useful if resuming from snapshot.\",\n type=int,\n default=0,\n )\n parser.add_argument(\n \"--epochs\", help=\"Number of epochs to train.\", type=int, default=50\n )\n parser.add_argument(\n \"--steps\", help=\"Number of steps per epoch.\", type=int, default=10000\n )\n parser.add_argument(\"--lr\", help=\"Learning rate.\", type=float, default=1e-5)\n parser.add_argument(\n \"--snapshot-path\",\n help=\"Path to store snapshots of models during training (defaults to './snapshots')\",\n default=\"./snapshots\",\n )\n parser.add_argument(\n \"--tensorboard-dir\", help=\"Log directory for Tensorboard output\", default=\"\"\n ) # default='./logs') => https://github.com/tensorflow/tensorflow/pull/34870\n parser.add_argument(\n \"--no-snapshots\",\n help=\"Disable saving snapshots.\",\n dest=\"snapshots\",\n action=\"store_false\",\n )\n parser.add_argument(\n \"--no-evaluation\",\n help=\"Disable per epoch evaluation.\",\n dest=\"evaluation\",\n action=\"store_false\",\n )\n parser.add_argument(\n \"--freeze-backbone\",\n help=\"Freeze training of backbone layers.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--random-transform\",\n help=\"Randomly transform image and annotations.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--image-min-side\",\n help=\"Rescale the image so the smallest side is min_side.\",\n type=int,\n default=800,\n )\n parser.add_argument(\n \"--image-max-side\",\n help=\"Rescale the image if the largest side is larger than max_side.\",\n type=int,\n default=1333,\n )\n parser.add_argument(\n \"--no-resize\", help=\"Don\" \"t rescale the image.\", action=\"store_true\"\n )\n parser.add_argument(\n \"--config\", help=\"Path to a configuration parameters .ini file.\"\n )\n parser.add_argument(\n \"--weighted-average\",\n help=\"Compute the mAP using the weighted average of precisions among classes.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--compute-val-loss\",\n help=\"Compute validation loss during training\",\n dest=\"compute_val_loss\",\n action=\"store_true\",\n )\n\n # Fit generator arguments\n parser.add_argument(\n \"--multiprocessing\",\n help=\"Use multiprocessing in fit_generator.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--workers\", help=\"Number of generator workers.\", type=int, default=1\n )\n parser.add_argument(\n \"--max-queue-size\",\n help=\"Queue length for multiprocessing workers in fit_generator.\",\n type=int,\n default=10,\n )\n\n return check_args(parser.parse_args(args))\n\n\ndef main(args=None):\n # parse arguments\n if args is None:\n args = sys.argv[1:]\n args = parse_args(args)\n\n # create object that stores backbone information\n backbone = models.backbone(args.backbone)\n\n # make sure keras and tensorflow are the minimum required version\n check_keras_version()\n check_tf_version()\n\n # optionally choose specific GPU\n if args.gpu is not None:\n setup_gpu(args.gpu)\n\n # optionally load config parameters\n if args.config:\n args.config = read_config_file(args.config)\n\n # create the generators\n train_generator, validation_generator = create_generators(\n args, backbone.preprocess_image\n )\n\n # create the model\n if args.snapshot is not None:\n print(\"Loading model, this may take a second...\")\n model = models.load_model(args.snapshot, backbone_name=args.backbone)\n training_model = model\n anchor_params = None\n if args.config and \"anchor_parameters\" in args.config:\n anchor_params = parse_anchor_parameters(args.config)\n prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params)\n else:\n weights = args.weights\n # default to imagenet if nothing else is specified\n if weights is None and args.imagenet_weights:\n weights = backbone.download_imagenet()\n\n print(\"Creating model, this may take a second...\")\n model, training_model, prediction_model = create_models(\n backbone_retinanet=backbone.retinanet,\n num_classes=train_generator.num_classes(),\n weights=weights,\n multi_gpu=args.multi_gpu,\n freeze_backbone=args.freeze_backbone,\n lr=args.lr,\n config=args.config,\n )\n\n # print model summary\n print(model.summary())\n\n # this lets the generator compute backbone layer shapes using the actual backbone model\n if \"vgg\" in args.backbone or \"densenet\" in args.backbone:\n train_generator.compute_shapes = make_shapes_callback(model)\n if validation_generator:\n validation_generator.compute_shapes = train_generator.compute_shapes\n\n # create the callbacks\n callbacks = create_callbacks(\n model, training_model, prediction_model, validation_generator, args,\n )\n\n if not args.compute_val_loss:\n validation_generator = None\n\n # start training\n return training_model.fit_generator(\n generator=train_generator,\n steps_per_epoch=args.steps,\n epochs=args.epochs,\n verbose=1,\n callbacks=callbacks,\n workers=args.workers,\n use_multiprocessing=args.multiprocessing,\n max_queue_size=args.max_queue_size,\n validation_data=validation_generator,\n initial_epoch=args.initial_epoch,\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"tensorflow.device"
]
] |
Jie-Yuan/Torchappy | [
"e722db1085fa2ff8e0267f7e6745875531c00f8b"
] | [
"models/lr.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom tqdm import tqdm, tqdm_notebook\nfrom ml_metrics import auc\nfrom sklearn.datasets import make_classification\n\n\nclass LogsticRegression(nn.Module):\n def __init__(self, in_dim, n_class):\n super().__init__()\n self.fc1 = nn.Linear(in_dim, in_dim // 2)\n self.fc2 = nn.Linear(in_dim // 2, n_class)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n # return x\n return F.softmax(x, 1)\n\n\nepochs = 5\nbatch_size = 128\nX, y = make_classification(1000000)\nt_X, t_y = map(torch.FloatTensor, (X, y))\n\nnet = LogsticRegression(20, 2)\nloss_func = torch.nn.modules.loss.CrossEntropyLoss()\noptimizer = torch.optim.Adam(net.parameters())\n\nbar_epochs = tqdm_notebook(range(epochs))\nfor e in bar_epochs:\n bar_epochs.set_description(f\"Epoch {e}:\")\n t = tqdm_notebook(range(0, t_X.size(0), batch_size))\n for b in t: # for each training step\n # train your data...\n b_X = t_X[b:b + batch_size]\n b_y = t_y[b:b + batch_size]\n output = net(b_X) # rnn output\n loss = loss_func(\n output,\n b_y.long().view(-1)) # cross entropy loss and y is not one-hotted\n optimizer.zero_grad() # clear gradients for this training step\n loss.backward() # backpropagation, compute gradients\n optimizer.step()\n if b % 10000 == 0:\n t.set_description(\n f\"Epoch {e}:\"\n f\"Loss: {loss.data.numpy():.5f} | \"\n f\"Auc: {auc(b_y.numpy(), output.data.numpy()[:, 1]):.5}\")\n\n_net = net.eval()\nauc(y, _net(t_X).data.numpy()[:, -1])\n"
] | [
[
"torch.nn.Linear",
"torch.nn.functional.softmax",
"torch.nn.modules.loss.CrossEntropyLoss",
"sklearn.datasets.make_classification"
]
] |
Jebediah/libwave | [
"c04998c964f0dc7d414783c6e8cf989a2716ad54"
] | [
"wave_utils/scripts/plot_matrix.py"
] | [
"import sys\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\nif __name__ == \"__main__\":\n file = open(sys.argv[1], \"r\")\n X = np.loadtxt(file)\n X = np.matrix(X)\n print(X.shape)\n\n fig, ax = plt.subplots()\n cax = ax.matshow(X)\n ax.set_xticks(range(0, X.shape[1]))\n ax.set_yticks(range(0, X.shape[0]))\n fig.colorbar(cax)\n plt.show()\n"
] | [
[
"numpy.matrix",
"matplotlib.pylab.show",
"matplotlib.pylab.subplots",
"numpy.loadtxt"
]
] |
jinzhuoran/CogKGE | [
"b0e819a1d34cf61a7d70c33808da3377b73c8fd6"
] | [
"cogkge/modules/gnn/helper.py"
] | [
"import numpy as np, sys, os, random, pdb, json, uuid, time, argparse\nfrom pprint import pprint\nimport logging, logging.config\nfrom collections import defaultdict as ddict\n# from ordered_set import OrderedSet\n\n# PyTorch related imports\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.init import xavier_normal_\nfrom torch.utils.data import DataLoader\nfrom torch.nn import Parameter\n# from torch_scatter import scatter_add\nfrom .util_scatter import scatter_add\n\ntry:\n from torch import irfft\n from torch import rfft\nexcept ImportError:\n from torch.fft import irfft2\n from torch.fft import rfft2\n\n\n def rfft(x, d):\n t = rfft2(x, dim=(-d))\n return torch.stack((t.real, t.imag), -1)\n\n\n def irfft(x, d, signal_sizes):\n return irfft2(torch.complex(x[:, :, 0], x[:, :, 1]), s=signal_sizes, dim=(-d))\n\nnp.set_printoptions(precision=4)\n\n\ndef set_gpu(gpus):\n \"\"\"\n\tSets the GPU to be used for the run\n\n\tParameters\n\t----------\n\tgpus: List of GPUs to be used for the run\n\t\n\tReturns\n\t-------\n\t\t\n\t\"\"\"\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus\n\n\ndef get_logger(name, log_dir, config_dir):\n \"\"\"\n\tCreates a logger object\n\n\tParameters\n\t----------\n\tname: Name of the logger file\n\tlog_dir: Directory where logger file needs to be stored\n\tconfig_dir: Directory from where log_config.json needs to be read\n\t\n\tReturns\n\t-------\n\tA logger object which writes to both file and stdout\n\t\t\n\t\"\"\"\n config_dict = json.load(open(config_dir + 'log_config.json'))\n config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')\n logging.config.dictConfig(config_dict)\n logger = logging.getLogger(name)\n\n std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logging.Formatter(std_out_format))\n logger.addHandler(consoleHandler)\n\n return logger\n\n\ndef get_combined_results(left_results, right_results):\n results = {}\n count = float(left_results['count'])\n\n results['left_mr'] = round(left_results['mr'] / count, 5)\n results['left_mrr'] = round(left_results['mrr'] / count, 5)\n results['right_mr'] = round(right_results['mr'] / count, 5)\n results['right_mrr'] = round(right_results['mrr'] / count, 5)\n results['mr'] = round((left_results['mr'] + right_results['mr']) / (2 * count), 5)\n results['mrr'] = round((left_results['mrr'] + right_results['mrr']) / (2 * count), 5)\n\n for k in range(10):\n results['left_hits@{}'.format(k + 1)] = round(left_results['hits@{}'.format(k + 1)] / count, 5)\n results['right_hits@{}'.format(k + 1)] = round(right_results['hits@{}'.format(k + 1)] / count, 5)\n results['hits@{}'.format(k + 1)] = round(\n (left_results['hits@{}'.format(k + 1)] + right_results['hits@{}'.format(k + 1)]) / (2 * count), 5)\n return results\n\n\ndef get_param(shape):\n param = Parameter(torch.Tensor(*shape));\n xavier_normal_(param.data)\n return param\n\n\ndef com_mult(a, b):\n r1, i1 = a[..., 0], a[..., 1]\n r2, i2 = b[..., 0], b[..., 1]\n return torch.stack([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim=-1)\n\n\ndef conj(a):\n a[..., 1] = -a[..., 1]\n return a\n\n\ndef cconv(a, b):\n return irfft(com_mult(rfft(a, 1), rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))\n\n\ndef ccorr(a, b):\n return irfft(com_mult(conj(rfft(a, 1)), rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))\n\n\ndef construct_adj(train_dataset, relation_dict_len):\n edge_index, edge_type = [], []\n if train_dataset.data.shape[1] == 3: # score_based\n for sub, rel, obj in train_dataset.data:\n edge_index.append((sub, obj))\n edge_type.append(rel)\n\n for sub, rel, obj in train_dataset.data:\n edge_index.append((obj, sub))\n edge_type.append(rel + relation_dict_len)\n else: # classification-based\n label = train_dataset.label_data\n for j,(sub, rel) in enumerate(train_dataset.data):\n for elem in torch.nonzero(label[j]):\n e2_idx = elem.item()\n edge_index.append((sub,e2_idx))\n edge_type.append(rel)\n\n for j,(sub, rel) in enumerate(train_dataset.data):\n for elem in torch.nonzero(label[j]):\n e2_idx = elem.item()\n edge_index.append((e2_idx,sub))\n edge_type.append(rel + relation_dict_len)\n\n return edge_index,edge_type"
] | [
[
"torch.complex",
"torch.stack",
"torch.nonzero",
"torch.nn.init.xavier_normal_",
"numpy.set_printoptions",
"torch.fft.rfft2",
"torch.rfft",
"torch.Tensor"
]
] |
chenxiaoyu523/FEAT3D | [
"ba45ba7c26628a7cc0070b010f4f33893cdac926"
] | [
"train_matchnet.py"
] | [
"import argparse\nimport os.path as osp\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom feat.dataloader.samplers import CategoriesSampler\nfrom feat.models.matchnet import MatchNet \nfrom feat.utils import pprint, set_gpu, ensure_path, Averager, Timer, count_acc, euclidean_metric, compute_confidence_interval\nfrom tensorboardX import SummaryWriter\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--max_epoch', type=int, default=200)\n parser.add_argument('--way', type=int, default=5) \n parser.add_argument('--shot', type=int, default=1)\n parser.add_argument('--query', type=int, default=15)\n parser.add_argument('--lr', type=float, default=0.0001)\n parser.add_argument('--lr_mul', type=float, default=1) # lr is the basic learning rate, while lr * lr_mul is the lr for other parts\n parser.add_argument('--step_size', type=int, default=10)\n parser.add_argument('--gamma', type=float, default=0.2) \n parser.add_argument('--temperature', type=float, default=1)\n parser.add_argument('--use_bilstm', type=bool, default=False)\n parser.add_argument('--model_type', type=str, default='ConvNet', choices=['ConvNet', 'ResNet'])\n parser.add_argument('--dataset', type=str, default='MiniImageNet', choices=['MiniImageNet', 'CUB', 'TieredImageNet']) \n # MiniImageNet, ConvNet, './saves/initialization/miniimagenet/con-pre.pth'\n # MiniImageNet, ResNet, './saves/initialization/miniimagenet/res-pre.pth'\n # CUB, ConvNet, './saves/initialization/cub/con-pre.pth' \n parser.add_argument('--init_weights', type=str, default=None) \n parser.add_argument('--gpu', default='0')\n args = parser.parse_args()\n pprint(vars(args))\n\n set_gpu(args.gpu)\n save_path1 = '-'.join([args.dataset, args.model_type, 'MatchNet'])\n save_path2 = '_'.join([str(args.shot), str(args.query), str(args.way), \n str(args.step_size), str(args.gamma), str(args.lr), str(args.temperature)])\n if args.use_bilstm:\n save_path2 = save_path2 + '_' + str(args.lr_mul) + '_BiLSTM'\n args.save_path = osp.join(save_path1, save_path2) \n ensure_path(save_path1, remove=False)\n ensure_path(args.save_path) \n\n if args.dataset == 'MiniImageNet':\n # Handle MiniImageNet\n from feat.dataloader.mini_imagenet import MiniImageNet as Dataset\n elif args.dataset == 'CUB':\n from feat.dataloader.cub import CUB as Dataset\n elif args.dataset == 'TieredImageNet':\n from feat.dataloader.tiered_imagenet import tieredImageNet as Dataset \n else:\n raise ValueError('Non-supported Dataset.')\n \n trainset = Dataset('train', args)\n train_sampler = CategoriesSampler(trainset.label, 100, args.way, args.shot + args.query)\n train_loader = DataLoader(dataset=trainset, batch_sampler=train_sampler, num_workers=0, pin_memory=True)\n\n valset = Dataset('val', args)\n val_sampler = CategoriesSampler(valset.label, 500, args.way, args.shot + args.query)\n val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler, num_workers=0, pin_memory=True)\n \n model = MatchNet(args)\n if args.model_type == 'ConvNet':\n if args.use_bilstm:\n optimizer = torch.optim.Adam([{'params': model.encoder.parameters()},\n {'params': model.bilstm.parameters(), 'lr': args.lr * args.lr_mul}], lr=args.lr) \n else:\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n elif args.model_type == 'ResNet':\n if args.use_bilstm:\n optimizer = torch.optim.SGD([{'params': model.encoder.parameters()},\n {'params': model.bilstm.parameters(), 'lr': args.lr * args.lr_mul}], lr=args.lr, momentum=0.9, nesterov=True, weight_decay=0.0005) \n else: \n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, nesterov=True, weight_decay=0.0005) \n else:\n raise ValueError('No Such Encoder')\n \n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) \n \n # load pre-trained model (no FC weights)\n model_dict = model.state_dict()\n if args.init_weights is not None:\n pretrained_dict = torch.load(args.init_weights)['params']\n # remove weights for FC\n pretrained_dict = {'encoder.'+k: v for k, v in pretrained_dict.items()}\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n print(pretrained_dict.keys())\n model_dict.update(pretrained_dict) \n model.load_state_dict(model_dict) \n \n if torch.cuda.is_available():\n torch.backends.cudnn.benchmark = True\n model = model.cuda()\n \n def save_model(name):\n torch.save(dict(params=model.state_dict()), osp.join(args.save_path, name + '.pth'))\n \n trlog = {}\n trlog['args'] = vars(args)\n trlog['train_loss'] = []\n trlog['val_loss'] = []\n trlog['train_acc'] = []\n trlog['val_acc'] = []\n trlog['max_acc'] = 0.0\n trlog['max_acc_epoch'] = 0\n\n timer = Timer()\n global_count = 0\n writer = SummaryWriter(logdir=args.save_path)\n \n label = torch.arange(args.way).repeat(args.query)\n if torch.cuda.is_available():\n label = label.type(torch.cuda.LongTensor)\n else:\n label = label.type(torch.LongTensor)\n \n label_support = torch.arange(args.way).repeat(args.shot)\n label_support = label_support.type(torch.LongTensor)\n # transform to one-hot form\n label_support_onehot = torch.zeros(args.way * args.shot, args.way)\n label_support_onehot.scatter_(1, label_support.unsqueeze(1), 1) \n if torch.cuda.is_available():\n label_support_onehot = label_support_onehot.cuda() # KN x N\n \n for epoch in range(1, args.max_epoch + 1):\n lr_scheduler.step()\n model.train()\n tl = Averager()\n ta = Averager()\n \n for i, batch in enumerate(train_loader, 1):\n global_count = global_count + 1\n if torch.cuda.is_available():\n data, _ = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n p = args.shot * args.way\n data_shot, data_query = data[:p], data[p:]\n\n logits = model(data_shot, data_query) # KqN x KN x 1\n # use logits to weights all labels, KN x N\n prediction = torch.sum(torch.mul(logits, label_support_onehot.unsqueeze(0)), 1) # KqN x N\n # compute loss\n loss = F.cross_entropy(prediction, label)\n acc = count_acc(prediction, label)\n writer.add_scalar('data/loss', float(loss), global_count)\n writer.add_scalar('data/acc', float(acc), global_count)\n print('epoch {}, train {}/{}, loss={:.4f} acc={:.4f}'\n .format(epoch, i, len(train_loader), loss.item(), acc))\n\n tl.add(loss.item())\n ta.add(acc)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n tl = tl.item()\n ta = ta.item()\n\n model.eval()\n\n vl = Averager()\n va = Averager()\n\n label = torch.arange(args.way).repeat(args.query)\n if torch.cuda.is_available():\n label = label.type(torch.cuda.LongTensor)\n else:\n label = label.type(torch.LongTensor)\n \n print('best epoch {}, best val acc={:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc']))\n with torch.no_grad():\n for i, batch in enumerate(val_loader, 1):\n if torch.cuda.is_available():\n data, _ = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n p = args.shot * args.way\n data_shot, data_query = data[:p], data[p:]\n \n logits = model(data_shot, data_query) # KqN x KN x 1\n # use logits to weights all labels, KN x N\n prediction = torch.sum(torch.mul(logits, label_support_onehot.unsqueeze(0)), 1) # KqN x N\n # compute loss\n loss = F.cross_entropy(prediction, label)\n acc = count_acc(prediction, label)\n vl.add(loss.item())\n va.add(acc)\n\n vl = vl.item()\n va = va.item()\n writer.add_scalar('data/val_loss', float(vl), epoch)\n writer.add_scalar('data/val_acc', float(va), epoch) \n print('epoch {}, val, loss={:.4f} acc={:.4f}'.format(epoch, vl, va))\n\n if va > trlog['max_acc']:\n trlog['max_acc'] = va\n trlog['max_acc_epoch'] = epoch\n save_model('max_acc')\n\n trlog['train_loss'].append(tl)\n trlog['train_acc'].append(ta)\n trlog['val_loss'].append(vl)\n trlog['val_acc'].append(va)\n\n torch.save(trlog, osp.join(args.save_path, 'trlog'))\n\n save_model('epoch-last')\n\n print('ETA:{}/{}'.format(timer.measure(), timer.measure(epoch / args.max_epoch)))\n writer.close()\n\n # Test Phase\n trlog = torch.load(osp.join(args.save_path, 'trlog'))\n test_set = Dataset('test', args)\n sampler = CategoriesSampler(test_set.label, 10000, args.way, args.shot + args.query)\n loader = DataLoader(test_set, batch_sampler=sampler, num_workers=0, pin_memory=True)\n test_acc_record = np.zeros((10000,))\n\n model.load_state_dict(torch.load(osp.join(args.save_path, 'max_acc' + '.pth'))['params'])\n model.eval()\n\n ave_acc = Averager()\n \n with torch.no_grad():\n for i, batch in enumerate(loader, 1):\n if torch.cuda.is_available():\n data, _ = [_.cuda() for _ in batch]\n else:\n data = batch[0]\n k = args.way * args.shot\n data_shot, data_query = data[:k], data[k:]\n logits = model(data_shot, data_query) # KqN x KN x 1\n # use logits to weights all labels, KN x N\n prediction = torch.sum(torch.mul(logits, label_support_onehot.unsqueeze(0)), 1) # KqN x N\n acc = count_acc(prediction, label)\n ave_acc.add(acc)\n test_acc_record[i-1] = acc\n print('batch {}: {:.2f}({:.2f})'.format(i, ave_acc.item() * 100, acc * 100))\n \n m, pm = compute_confidence_interval(test_acc_record)\n print('Val Best Acc {:.4f}, Test Acc {:.4f}'.format(trlog['max_acc'], ave_acc.item()))\n print('Test Acc {:.4f} + {:.4f}'.format(m, pm))\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.load",
"numpy.zeros",
"torch.no_grad",
"torch.zeros",
"torch.cuda.is_available",
"torch.arange",
"torch.nn.functional.cross_entropy",
"torch.optim.lr_scheduler.StepLR"
]
] |
AutodidactaMx/cocid_python | [
"11628f465ff362807a692c79ede26bf30dd8e26a",
"11628f465ff362807a692c79ede26bf30dd8e26a"
] | [
"Modulo_3/Semana 4/matplotlib/practica4.py",
"Modulo_3/Semana 4/matplotlib/practica2.py"
] | [
"import tkinter as tk\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n\ndata = {\n 'Basquet': 11,\n 'Futbol': 222,\n 'Natacion': 121,\n 'Esqui': 321,\n 'Tenis': 44\n }\nclave = data.keys()\nvalor = data.values()\n\nventana= tk.Tk() \n \nfigura = plt.Figure(figsize=(6,5), dpi=100)\nlienzo_figura = FigureCanvasTkAgg(figura, ventana)\nlienzo_figura.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH)\n\nax1 = figura.add_subplot()\nax1.set_title('Alumnos')\nax1.plot(clave, valor)\nax1.set_ylabel('Cantidad alumnos')\nax1.set_xlabel('Materias')\n\ntoolbar =NavigationToolbar2Tk(lienzo_figura, ventana)\ntoolbar.update()\ntoolbar.pack(side=tk.BOTTOM, fill=tk.Y)\nventana.mainloop()",
"import tkinter as tk\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n\ndata = {\n '0-9': 5000,\n '10-19': 2000,\n '20-29': 30000,\n '30-39': 43490,\n '40-49': 39898\n }\nclave = data.keys()\nvalor = data.values()\n\nventana= tk.Tk() \n \nfigura = plt.Figure(figsize=(6,5), dpi=100)\nlienzo_figura = FigureCanvasTkAgg(figura, ventana)\nlienzo_figura.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH)\n\nax1 = figura.add_subplot()\nax1.set_title('Habitantes')\nax1.barh(list(clave), list(valor))\nax1.set_ylabel('Rango de edad')\nax1.set_xlabel('Cantidad')\n\ntoolbar =NavigationToolbar2Tk(lienzo_figura, ventana)\ntoolbar.update()\ntoolbar.pack(side=tk.BOTTOM, fill=tk.Y)\nventana.mainloop()"
] | [
[
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"matplotlib.backends.backend_tkagg.NavigationToolbar2Tk",
"matplotlib.pyplot.Figure"
],
[
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"matplotlib.backends.backend_tkagg.NavigationToolbar2Tk",
"matplotlib.pyplot.Figure"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.